Large diffs are not rendered by default.

@@ -0,0 +1,202 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

#ifndef V8_A64_DECODER_A64_H_
#define V8_A64_DECODER_A64_H_

#include <list>

#include "globals.h"
#include "a64/instructions-a64.h"

namespace v8 {
namespace internal {


// List macro containing all visitors needed by the decoder class.

#define VISITOR_LIST(V) \
V(PCRelAddressing) \
V(AddSubImmediate) \
V(LogicalImmediate) \
V(MoveWideImmediate) \
V(Bitfield) \
V(Extract) \
V(UnconditionalBranch) \
V(UnconditionalBranchToRegister) \
V(CompareBranch) \
V(TestBranch) \
V(ConditionalBranch) \
V(System) \
V(Exception) \
V(LoadStorePairPostIndex) \
V(LoadStorePairOffset) \
V(LoadStorePairPreIndex) \
V(LoadStorePairNonTemporal) \
V(LoadLiteral) \
V(LoadStoreUnscaledOffset) \
V(LoadStorePostIndex) \
V(LoadStorePreIndex) \
V(LoadStoreRegisterOffset) \
V(LoadStoreUnsignedOffset) \
V(LogicalShifted) \
V(AddSubShifted) \
V(AddSubExtended) \
V(AddSubWithCarry) \
V(ConditionalCompareRegister) \
V(ConditionalCompareImmediate) \
V(ConditionalSelect) \
V(DataProcessing1Source) \
V(DataProcessing2Source) \
V(DataProcessing3Source) \
V(FPCompare) \
V(FPConditionalCompare) \
V(FPConditionalSelect) \
V(FPImmediate) \
V(FPDataProcessing1Source) \
V(FPDataProcessing2Source) \
V(FPDataProcessing3Source) \
V(FPIntegerConvert) \
V(FPFixedPointConvert) \
V(Unallocated) \
V(Unimplemented)

// The Visitor interface. Disassembler and simulator (and other tools)
// must provide implementations for all of these functions.
class DecoderVisitor {
public:
#define DECLARE(A) virtual void Visit##A(Instruction* instr) = 0;
VISITOR_LIST(DECLARE)
#undef DECLARE

virtual ~DecoderVisitor() {}

private:
// Visitors are registered in a list.
std::list<DecoderVisitor*> visitors_;

friend class Decoder;
};


class Decoder: public DecoderVisitor {
public:
explicit Decoder() {}

// Top-level instruction decoder function. Decodes an instruction and calls
// the visitor functions registered with the Decoder class.
void Decode(Instruction *instr);

// Register a new visitor class with the decoder.
// Decode() will call the corresponding visitor method from all registered
// visitor classes when decoding reaches the leaf node of the instruction
// decode tree.
// Visitors are called in the order.
// A visitor can only be registered once.
// Registering an already registered visitor will update its position.
//
// d.AppendVisitor(V1);
// d.AppendVisitor(V2);
// d.PrependVisitor(V2); // Move V2 at the start of the list.
// d.InsertVisitorBefore(V3, V2);
// d.AppendVisitor(V4);
// d.AppendVisitor(V4); // No effect.
//
// d.Decode(i);
//
// will call in order visitor methods in V3, V2, V1, V4.
void AppendVisitor(DecoderVisitor* visitor);
void PrependVisitor(DecoderVisitor* visitor);
void InsertVisitorBefore(DecoderVisitor* new_visitor,
DecoderVisitor* registered_visitor);
void InsertVisitorAfter(DecoderVisitor* new_visitor,
DecoderVisitor* registered_visitor);

// Remove a previously registered visitor class from the list of visitors
// stored by the decoder.
void RemoveVisitor(DecoderVisitor* visitor);

#define DECLARE(A) void Visit##A(Instruction* instr);
VISITOR_LIST(DECLARE)
#undef DECLARE

private:
// Decode the PC relative addressing instruction, and call the corresponding
// visitors.
// On entry, instruction bits 27:24 = 0x0.
void DecodePCRelAddressing(Instruction* instr);

// Decode the add/subtract immediate instruction, and call the corresponding
// visitors.
// On entry, instruction bits 27:24 = 0x1.
void DecodeAddSubImmediate(Instruction* instr);

// Decode the branch, system command, and exception generation parts of
// the instruction tree, and call the corresponding visitors.
// On entry, instruction bits 27:24 = {0x4, 0x5, 0x6, 0x7}.
void DecodeBranchSystemException(Instruction* instr);

// Decode the load and store parts of the instruction tree, and call
// the corresponding visitors.
// On entry, instruction bits 27:24 = {0x8, 0x9, 0xC, 0xD}.
void DecodeLoadStore(Instruction* instr);

// Decode the logical immediate and move wide immediate parts of the
// instruction tree, and call the corresponding visitors.
// On entry, instruction bits 27:24 = 0x2.
void DecodeLogical(Instruction* instr);

// Decode the bitfield and extraction parts of the instruction tree,
// and call the corresponding visitors.
// On entry, instruction bits 27:24 = 0x3.
void DecodeBitfieldExtract(Instruction* instr);

// Decode the data processing parts of the instruction tree, and call the
// corresponding visitors.
// On entry, instruction bits 27:24 = {0x1, 0xA, 0xB}.
void DecodeDataProcessing(Instruction* instr);

// Decode the floating point parts of the instruction tree, and call the
// corresponding visitors.
// On entry, instruction bits 27:24 = {0xE, 0xF}.
void DecodeFP(Instruction* instr);

// Decode the Advanced SIMD (NEON) load/store part of the instruction tree,
// and call the corresponding visitors.
// On entry, instruction bits 29:25 = 0x6.
void DecodeAdvSIMDLoadStore(Instruction* instr);

// Decode the Advanced SIMD (NEON) data processing part of the instruction
// tree, and call the corresponding visitors.
// On entry, instruction bits 27:25 = 0x7.
void DecodeAdvSIMDDataProcessing(Instruction* instr);
};


} } // namespace v8::internal

#endif // V8_A64_DECODER_A64_H_
@@ -0,0 +1,376 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

#include "v8.h"

#include "codegen.h"
#include "deoptimizer.h"
#include "full-codegen.h"
#include "safepoint-table.h"


namespace v8 {
namespace internal {


int Deoptimizer::patch_size() {
// Size of the code used to patch lazy bailout points.
// Patching is done by Deoptimizer::DeoptimizeFunction.
return 4 * kInstructionSize;
}



void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
// Invalidate the relocation information, as it will become invalid by the
// code patching below, and is not needed any more.
code->InvalidateRelocation();

// For each LLazyBailout instruction insert a call to the corresponding
// deoptimization entry.
DeoptimizationInputData* deopt_data =
DeoptimizationInputData::cast(code->deoptimization_data());
Address code_start_address = code->instruction_start();
#ifdef DEBUG
Address prev_call_address = NULL;
#endif

for (int i = 0; i < deopt_data->DeoptCount(); i++) {
if (deopt_data->Pc(i)->value() == -1) continue;

Address call_address = code_start_address + deopt_data->Pc(i)->value();
Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);

PatchingAssembler patcher(call_address, patch_size() / kInstructionSize);
patcher.LoadLiteral(ip0, 2 * kInstructionSize);
patcher.blr(ip0);
patcher.dc64(reinterpret_cast<intptr_t>(deopt_entry));

ASSERT((prev_call_address == NULL) ||
(call_address >= prev_call_address + patch_size()));
ASSERT(call_address + patch_size() <= code->instruction_end());
#ifdef DEBUG
prev_call_address = call_address;
#endif
}
}


void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
// Set the register values. The values are not important as there are no
// callee saved registers in JavaScript frames, so all registers are
// spilled. Registers fp and sp are set to the correct values though.
for (int i = 0; i < Register::NumRegisters(); i++) {
input_->SetRegister(i, 0);
}

// TODO(all): Do we also need to set a value to csp?
input_->SetRegister(jssp.code(), reinterpret_cast<intptr_t>(frame->sp()));
input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));

for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
input_->SetDoubleRegister(i, 0.0);
}

// Fill the frame content from the actual data on the frame.
for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
input_->SetFrameSlot(i, Memory::uint64_at(tos + i));
}
}


bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
// There is no dynamic alignment padding on A64 in the input frame.
return false;
}


void Deoptimizer::SetPlatformCompiledStubRegisters(
FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) {
ApiFunction function(descriptor->deoptimization_handler_);
ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
int params = descriptor->GetHandlerParameterCount();
output_frame->SetRegister(x0.code(), params);
output_frame->SetRegister(x1.code(), handler);
}


void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
for (int i = 0; i < DoubleRegister::kMaxNumRegisters; ++i) {
double double_value = input_->GetDoubleRegister(i);
output_frame->SetDoubleRegister(i, double_value);
}
}


Code* Deoptimizer::NotifyStubFailureBuiltin() {
return isolate_->builtins()->builtin(Builtins::kNotifyStubFailureSaveDoubles);
}


#define __ masm()->

void Deoptimizer::EntryGenerator::Generate() {
GeneratePrologue();

// TODO(all): This code needs to be revisited. We probably only need to save
// caller-saved registers here. Callee-saved registers can be stored directly
// in the input frame.

// Save all allocatable floating point registers.
CPURegList saved_fp_registers(CPURegister::kFPRegister, kDRegSize,
0, FPRegister::NumAllocatableRegisters() - 1);
__ PushCPURegList(saved_fp_registers);

// We save all the registers expcept jssp, sp and lr.
CPURegList saved_registers(CPURegister::kRegister, kXRegSize, 0, 27);
saved_registers.Combine(fp);
__ PushCPURegList(saved_registers);

const int kSavedRegistersAreaSize =
(saved_registers.Count() * kXRegSizeInBytes) +
(saved_fp_registers.Count() * kDRegSizeInBytes);

// Floating point registers are saved on the stack above core registers.
const int kFPRegistersOffset = saved_registers.Count() * kXRegSizeInBytes;

// Get the bailout id from the stack.
Register bailout_id = x2;
__ Peek(bailout_id, kSavedRegistersAreaSize);

Register code_object = x3;
Register fp_to_sp = x4;
// Get the address of the location in the code object. This is the return
// address for lazy deoptimization.
__ Mov(code_object, lr);
// Compute the fp-to-sp delta, and correct one word for bailout id.
__ Add(fp_to_sp, masm()->StackPointer(),
kSavedRegistersAreaSize + (1 * kPointerSize));
__ Sub(fp_to_sp, fp, fp_to_sp);

// Allocate a new deoptimizer object.
__ Ldr(x0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ Mov(x1, type());
// Following arguments are already loaded:
// - x2: bailout id
// - x3: code object address
// - x4: fp-to-sp delta
__ Mov(x5, Operand(ExternalReference::isolate_address(isolate())));

{
// Call Deoptimizer::New().
AllowExternalCallThatCantCauseGC scope(masm());
__ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6);
}

// Preserve "deoptimizer" object in register x0.
Register deoptimizer = x0;

// Get the input frame descriptor pointer.
__ Ldr(x1, MemOperand(deoptimizer, Deoptimizer::input_offset()));

// Copy core registers into the input frame.
CPURegList copy_to_input = saved_registers;
for (int i = 0; i < saved_registers.Count(); i++) {
// TODO(all): Look for opportunities to optimize this by using ldp/stp.
__ Peek(x2, i * kPointerSize);
CPURegister current_reg = copy_to_input.PopLowestIndex();
int offset = (current_reg.code() * kPointerSize) +
FrameDescription::registers_offset();
__ Str(x2, MemOperand(x1, offset));
}

// Copy FP registers to the input frame.
for (int i = 0; i < saved_fp_registers.Count(); i++) {
// TODO(all): Look for opportunities to optimize this by using ldp/stp.
int dst_offset = FrameDescription::double_registers_offset() +
(i * kDoubleSize);
int src_offset = kFPRegistersOffset + (i * kDoubleSize);
__ Peek(x2, src_offset);
__ Str(x2, MemOperand(x1, dst_offset));
}

// Remove the bailout id and the saved registers from the stack.
__ Drop(1 + (kSavedRegistersAreaSize / kXRegSizeInBytes));

// Compute a pointer to the unwinding limit in register x2; that is
// the first stack slot not part of the input frame.
Register unwind_limit = x2;
__ Ldr(unwind_limit, MemOperand(x1, FrameDescription::frame_size_offset()));
__ Add(unwind_limit, unwind_limit, __ StackPointer());

// Unwind the stack down to - but not including - the unwinding
// limit and copy the contents of the activation frame to the input
// frame description.
__ Add(x3, x1, FrameDescription::frame_content_offset());
Label pop_loop;
Label pop_loop_header;
__ B(&pop_loop_header);
__ Bind(&pop_loop);
__ Pop(x4);
__ Str(x4, MemOperand(x3, kPointerSize, PostIndex));
__ Bind(&pop_loop_header);
__ Cmp(unwind_limit, __ StackPointer());
__ B(ne, &pop_loop);

// Compute the output frame in the deoptimizer.
__ Push(x0); // Preserve deoptimizer object across call.

{
// Call Deoptimizer::ComputeOutputFrames().
AllowExternalCallThatCantCauseGC scope(masm());
__ CallCFunction(
ExternalReference::compute_output_frames_function(isolate()), 1);
}
__ Pop(x4); // Restore deoptimizer object (class Deoptimizer).

// Replace the current (input) frame with the output frames.
Label outer_push_loop, inner_push_loop,
outer_loop_header, inner_loop_header;
__ Ldrsw(x1, MemOperand(x4, Deoptimizer::output_count_offset()));
__ Ldr(x0, MemOperand(x4, Deoptimizer::output_offset()));
__ Add(x1, x0, Operand(x1, LSL, kPointerSizeLog2));
__ B(&outer_loop_header);

__ Bind(&outer_push_loop);
Register current_frame = x2;
__ Ldr(current_frame, MemOperand(x0, 0));
__ Ldr(x3, MemOperand(current_frame, FrameDescription::frame_size_offset()));
__ B(&inner_loop_header);

__ Bind(&inner_push_loop);
__ Sub(x3, x3, kPointerSize);
__ Add(x6, current_frame, x3);
__ Ldr(x7, MemOperand(x6, FrameDescription::frame_content_offset()));
__ Push(x7);
__ Bind(&inner_loop_header);
__ Cbnz(x3, &inner_push_loop);

__ Add(x0, x0, kPointerSize);
__ Bind(&outer_loop_header);
__ Cmp(x0, x1);
__ B(lt, &outer_push_loop);

__ Ldr(x1, MemOperand(x4, Deoptimizer::input_offset()));
ASSERT(!saved_fp_registers.IncludesAliasOf(crankshaft_fp_scratch) &&
!saved_fp_registers.IncludesAliasOf(fp_zero) &&
!saved_fp_registers.IncludesAliasOf(fp_scratch));
int src_offset = FrameDescription::double_registers_offset();
while (!saved_fp_registers.IsEmpty()) {
const CPURegister reg = saved_fp_registers.PopLowestIndex();
__ Ldr(reg, MemOperand(x1, src_offset));
src_offset += kDoubleSize;
}

// Push state from the last output frame.
__ Ldr(x6, MemOperand(current_frame, FrameDescription::state_offset()));
__ Push(x6);

// TODO(all): ARM copies a lot (if not all) of the last output frame onto the
// stack, then pops it all into registers. Here, we try to load it directly
// into the relevant registers. Is this correct? If so, we should improve the
// ARM code.

// TODO(all): This code needs to be revisited, We probably don't need to
// restore all the registers as fullcodegen does not keep live values in
// registers (note that at least fp must be restored though).

// Restore registers from the last output frame.
// Note that lr is not in the list of saved_registers and will be restored
// later. We can use it to hold the address of last output frame while
// reloading the other registers.
ASSERT(!saved_registers.IncludesAliasOf(lr));
Register last_output_frame = lr;
__ Mov(last_output_frame, current_frame);

// We don't need to restore x7 as it will be clobbered later to hold the
// continuation address.
Register continuation = x7;
saved_registers.Remove(continuation);

while (!saved_registers.IsEmpty()) {
// TODO(all): Look for opportunities to optimize this by using ldp.
CPURegister current_reg = saved_registers.PopLowestIndex();
int offset = (current_reg.code() * kPointerSize) +
FrameDescription::registers_offset();
__ Ldr(current_reg, MemOperand(last_output_frame, offset));
}

__ Ldr(continuation, MemOperand(last_output_frame,
FrameDescription::continuation_offset()));
__ Ldr(lr, MemOperand(last_output_frame, FrameDescription::pc_offset()));
__ InitializeRootRegister();
__ Br(continuation);
}


// Size of an entry of the second level deopt table.
// This is the code size generated by GeneratePrologue for one entry.
const int Deoptimizer::table_entry_size_ = 2 * kInstructionSize;


void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
// Create a sequence of deoptimization entries.
// Note that registers are still live when jumping to an entry.
Label done;
{
InstructionAccurateScope scope(masm());

// The number of entry will never exceed kMaxNumberOfEntries.
// As long as kMaxNumberOfEntries is a valid 16 bits immediate you can use
// a movz instruction to load the entry id.
ASSERT(is_uint16(Deoptimizer::kMaxNumberOfEntries));

for (int i = 0; i < count(); i++) {
int start = masm()->pc_offset();
USE(start);
__ movz(masm()->Tmp0(), i);
__ b(&done);
ASSERT(masm()->pc_offset() - start == table_entry_size_);
}
}
__ Bind(&done);
// TODO(all): We need to add some kind of assertion to verify that Tmp0()
// is not clobbered by Push.
__ Push(masm()->Tmp0());
}


void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
SetFrameSlot(offset, value);
}


void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
SetFrameSlot(offset, value);
}


#undef __

} } // namespace v8::internal

Large diffs are not rendered by default.

@@ -0,0 +1,115 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

#ifndef V8_A64_DISASM_A64_H
#define V8_A64_DISASM_A64_H

#include "v8.h"

#include "globals.h"
#include "utils.h"
#include "instructions-a64.h"
#include "decoder-a64.h"

namespace v8 {
namespace internal {


class Disassembler: public DecoderVisitor {
public:
Disassembler();
Disassembler(char* text_buffer, int buffer_size);
virtual ~Disassembler();
char* GetOutput();

// Declare all Visitor functions.
#define DECLARE(A) void Visit##A(Instruction* instr);
VISITOR_LIST(DECLARE)
#undef DECLARE

protected:
virtual void ProcessOutput(Instruction* instr);

void Format(Instruction* instr, const char* mnemonic, const char* format);
void Substitute(Instruction* instr, const char* string);
int SubstituteField(Instruction* instr, const char* format);
int SubstituteRegisterField(Instruction* instr, const char* format);
int SubstituteImmediateField(Instruction* instr, const char* format);
int SubstituteLiteralField(Instruction* instr, const char* format);
int SubstituteBitfieldImmediateField(Instruction* instr, const char* format);
int SubstituteShiftField(Instruction* instr, const char* format);
int SubstituteExtendField(Instruction* instr, const char* format);
int SubstituteConditionField(Instruction* instr, const char* format);
int SubstitutePCRelAddressField(Instruction* instr, const char* format);
int SubstituteBranchTargetField(Instruction* instr, const char* format);
int SubstituteLSRegOffsetField(Instruction* instr, const char* format);
int SubstitutePrefetchField(Instruction* instr, const char* format);
int SubstituteBarrierField(Instruction* instr, const char* format);

bool RdIsZROrSP(Instruction* instr) const {
return (instr->Rd() == kZeroRegCode);
}

bool RnIsZROrSP(Instruction* instr) const {
return (instr->Rn() == kZeroRegCode);
}

bool RmIsZROrSP(Instruction* instr) const {
return (instr->Rm() == kZeroRegCode);
}

bool RaIsZROrSP(Instruction* instr) const {
return (instr->Ra() == kZeroRegCode);
}

bool IsMovzMovnImm(unsigned reg_size, uint64_t value);

void ResetOutput();
void AppendToOutput(const char* string, ...);

char* buffer_;
uint32_t buffer_pos_;
uint32_t buffer_size_;
bool own_buffer_;
};


class PrintDisassembler: public Disassembler {
public:
explicit PrintDisassembler(FILE* stream) : stream_(stream) { }
~PrintDisassembler() { }

virtual void ProcessOutput(Instruction* instr);

private:
FILE *stream_;
};


} } // namespace v8::internal

#endif // V8_A64_DISASM_A64_H
@@ -25,30 +25,33 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

#ifndef V8_V8_DEFAULTS_H_
#define V8_V8_DEFAULTS_H_

#include "v8.h"

/**
* Default configuration support for the V8 JavaScript engine.
*/
#if V8_TARGET_ARCH_A64

#include "assembler.h"
#include "assembler-a64.h"
#include "assembler-a64-inl.h"
#include "frames.h"

namespace v8 {
namespace internal {


Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
Register JavaScriptFrame::context_register() { return cp; }


Register StubFailureTrampolineFrame::fp_register() { return v8::internal::fp; }
Register StubFailureTrampolineFrame::context_register() { return cp; }

/**
* Configures the constraints with reasonable default values based on the
* capabilities of the current device the VM is running on.
*/
bool V8_EXPORT ConfigureResourceConstraintsForCurrentPlatform(
ResourceConstraints* constraints);

Object*& ExitFrame::constant_pool_slot() const {
UNREACHABLE();
return Memory::Object_at(NULL);
}

/**
* Convience function which performs SetResourceConstraints with the settings
* returned by ConfigureResourceConstraintsForCurrentPlatform.
*/
bool V8_EXPORT SetDefaultResourceConstraintsForCurrentPlatform();

} // namespace v8
} } // namespace v8::internal

#endif // V8_V8_DEFAULTS_H_
#endif // V8_TARGET_ARCH_A64
@@ -0,0 +1,131 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

#include "a64/constants-a64.h"
#include "a64/assembler-a64.h"

#ifndef V8_A64_FRAMES_A64_H_
#define V8_A64_FRAMES_A64_H_

namespace v8 {
namespace internal {

const int kNumRegs = kNumberOfRegisters;
// Registers x0-x17 are caller-saved.
const int kNumJSCallerSaved = 18;
const RegList kJSCallerSaved = 0x3ffff;
typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];

// Number of registers for which space is reserved in safepoints. Must be a
// multiple of eight.
// TODO(all): Refine this number.
const int kNumSafepointRegisters = 32;

// Define the list of registers actually saved at safepoints.
// Note that the number of saved registers may be smaller than the reserved
// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
#define kSafepointSavedRegisters CPURegList::GetSafepointSavedRegisters().list()
#define kNumSafepointSavedRegisters \
CPURegList::GetSafepointSavedRegisters().Count();

class EntryFrameConstants : public AllStatic {
public:
static const int kCallerFPOffset =
-(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
};


class ExitFrameConstants : public AllStatic {
public:
static const int kFrameSize = 2 * kPointerSize;

static const int kCallerSPDisplacement = 2 * kPointerSize;
static const int kCallerPCOffset = 1 * kPointerSize;
static const int kCallerFPOffset = 0 * kPointerSize; // <- fp
static const int kSPOffset = -1 * kPointerSize;
static const int kCodeOffset = -2 * kPointerSize;
static const int kLastExitFrameField = kCodeOffset;
};


class JavaScriptFrameConstants : public AllStatic {
public:
// FP-relative.
static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;

// There are two words on the stack (saved fp and saved lr) between fp and
// the arguments.
static const int kLastParameterOffset = 2 * kPointerSize;

static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
};


class ArgumentsAdaptorFrameConstants : public AllStatic {
public:
// FP-relative.
static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;

static const int kFrameSize =
StandardFrameConstants::kFixedFrameSize + kPointerSize;
};


class ConstructFrameConstants : public AllStatic {
public:
// FP-relative.
static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
static const int kLengthOffset = -4 * kPointerSize;
static const int kConstructorOffset = -5 * kPointerSize;
static const int kImplicitReceiverOffset = -6 * kPointerSize;

static const int kFrameSize =
StandardFrameConstants::kFixedFrameSize + 4 * kPointerSize;
};


class InternalFrameConstants : public AllStatic {
public:
// FP-relative.
static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
};


inline Object* JavaScriptFrame::function_slot_object() const {
const int offset = JavaScriptFrameConstants::kFunctionOffset;
return Memory::Object_at(fp() + offset);
}


inline void StackHandler::SetFp(Address slot, Address fp) {
Memory::Address_at(slot) = fp;
}


} } // namespace v8::internal

#endif // V8_A64_FRAMES_A64_H_

Large diffs are not rendered by default.

Large diffs are not rendered by default.

@@ -0,0 +1,334 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

#include "v8.h"

#if V8_TARGET_ARCH_A64

#define A64_DEFINE_FP_STATICS

#include "a64/instructions-a64.h"
#include "a64/assembler-a64-inl.h"

namespace v8 {
namespace internal {


bool Instruction::IsLoad() const {
if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
return false;
}

if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
return Mask(LoadStorePairLBit) != 0;
} else {
LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreOpMask));
switch (op) {
case LDRB_w:
case LDRH_w:
case LDR_w:
case LDR_x:
case LDRSB_w:
case LDRSB_x:
case LDRSH_w:
case LDRSH_x:
case LDRSW_x:
case LDR_s:
case LDR_d: return true;
default: return false;
}
}
}


bool Instruction::IsStore() const {
if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
return false;
}

if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
return Mask(LoadStorePairLBit) == 0;
} else {
LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreOpMask));
switch (op) {
case STRB_w:
case STRH_w:
case STR_w:
case STR_x:
case STR_s:
case STR_d: return true;
default: return false;
}
}
}


static uint64_t RotateRight(uint64_t value,
unsigned int rotate,
unsigned int width) {
ASSERT(width <= 64);
rotate &= 63;
return ((value & ((1UL << rotate) - 1UL)) << (width - rotate)) |
(value >> rotate);
}


static uint64_t RepeatBitsAcrossReg(unsigned reg_size,
uint64_t value,
unsigned width) {
ASSERT((width == 2) || (width == 4) || (width == 8) || (width == 16) ||
(width == 32));
ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
uint64_t result = value & ((1UL << width) - 1UL);
for (unsigned i = width; i < reg_size; i *= 2) {
result |= (result << i);
}
return result;
}


// Logical immediates can't encode zero, so a return value of zero is used to
// indicate a failure case. Specifically, where the constraints on imm_s are not
// met.
uint64_t Instruction::ImmLogical() {
unsigned reg_size = SixtyFourBits() ? kXRegSize : kWRegSize;
int64_t n = BitN();
int64_t imm_s = ImmSetBits();
int64_t imm_r = ImmRotate();

// An integer is constructed from the n, imm_s and imm_r bits according to
// the following table:
//
// N imms immr size S R
// 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
// 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
// 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
// 0 110sss xxxrrr 8 UInt(sss) UInt(rrr)
// 0 1110ss xxxxrr 4 UInt(ss) UInt(rr)
// 0 11110s xxxxxr 2 UInt(s) UInt(r)
// (s bits must not be all set)
//
// A pattern is constructed of size bits, where the least significant S+1
// bits are set. The pattern is rotated right by R, and repeated across a
// 32 or 64-bit value, depending on destination register width.
//

if (n == 1) {
if (imm_s == 0x3F) {
return 0;
}
uint64_t bits = (1UL << (imm_s + 1)) - 1;
return RotateRight(bits, imm_r, 64);
} else {
if ((imm_s >> 1) == 0x1F) {
return 0;
}
for (int width = 0x20; width >= 0x2; width >>= 1) {
if ((imm_s & width) == 0) {
int mask = width - 1;
if ((imm_s & mask) == mask) {
return 0;
}
uint64_t bits = (1UL << ((imm_s & mask) + 1)) - 1;
return RepeatBitsAcrossReg(reg_size,
RotateRight(bits, imm_r & mask, width),
width);
}
}
}
UNREACHABLE();
return 0;
}


float Instruction::ImmFP32() {
// ImmFP: abcdefgh (8 bits)
// Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits)
// where B is b ^ 1
uint32_t bits = ImmFP();
uint32_t bit7 = (bits >> 7) & 0x1;
uint32_t bit6 = (bits >> 6) & 0x1;
uint32_t bit5_to_0 = bits & 0x3f;
uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19);

return rawbits_to_float(result);
}


double Instruction::ImmFP64() {
// ImmFP: abcdefgh (8 bits)
// Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
// 0000.0000.0000.0000.0000.0000.0000.0000 (64 bits)
// where B is b ^ 1
uint32_t bits = ImmFP();
uint64_t bit7 = (bits >> 7) & 0x1;
uint64_t bit6 = (bits >> 6) & 0x1;
uint64_t bit5_to_0 = bits & 0x3f;
uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48);

return rawbits_to_double(result);
}


LSDataSize CalcLSPairDataSize(LoadStorePairOp op) {
switch (op) {
case STP_x:
case LDP_x:
case STP_d:
case LDP_d: return LSDoubleWord;
default: return LSWord;
}
}


ptrdiff_t Instruction::ImmPCOffset() {
ptrdiff_t offset;
if (IsPCRelAddressing()) {
// PC-relative addressing. Only ADR is supported.
offset = ImmPCRel();
} else if (BranchType() != UnknownBranchType) {
// All PC-relative branches.
// Relative branch offsets are instruction-size-aligned.
offset = ImmBranch() << kInstructionSizeLog2;
} else {
// Load literal (offset from PC).
ASSERT(IsLdrLiteral());
// The offset is always shifted by 2 bits, even for loads to 64-bits
// registers.
offset = ImmLLiteral() << kInstructionSizeLog2;
}
return offset;
}


Instruction* Instruction::ImmPCOffsetTarget() {
return this + ImmPCOffset();
}


bool Instruction::IsValidImmPCOffset(ImmBranchType branch_type,
int32_t offset) {
return is_intn(offset, ImmBranchRangeBitwidth(branch_type));
}


bool Instruction::IsTargetInImmPCOffsetRange(Instruction* target) {
int offset = target - this;
return IsValidImmPCOffset(BranchType(), offset);
}


void Instruction::SetImmPCOffsetTarget(Instruction* target) {
if (IsPCRelAddressing()) {
SetPCRelImmTarget(target);
} else if (BranchType() != UnknownBranchType) {
SetBranchImmTarget(target);
} else {
SetImmLLiteral(target);
}
}


void Instruction::SetPCRelImmTarget(Instruction* target) {
// ADRP is not supported, so 'this' must point to an ADR instruction.
ASSERT(Mask(PCRelAddressingMask) == ADR);

Instr imm = Assembler::ImmPCRelAddress(target - this);

SetInstructionBits(Mask(~ImmPCRel_mask) | imm);
}


void Instruction::SetBranchImmTarget(Instruction* target) {
ASSERT(((target - this) & 3) == 0);
Instr branch_imm = 0;
uint32_t imm_mask = 0;
int offset = (target - this) >> kInstructionSizeLog2;
switch (BranchType()) {
case CondBranchType: {
branch_imm = Assembler::ImmCondBranch(offset);
imm_mask = ImmCondBranch_mask;
break;
}
case UncondBranchType: {
branch_imm = Assembler::ImmUncondBranch(offset);
imm_mask = ImmUncondBranch_mask;
break;
}
case CompareBranchType: {
branch_imm = Assembler::ImmCmpBranch(offset);
imm_mask = ImmCmpBranch_mask;
break;
}
case TestBranchType: {
branch_imm = Assembler::ImmTestBranch(offset);
imm_mask = ImmTestBranch_mask;
break;
}
default: UNREACHABLE();
}
SetInstructionBits(Mask(~imm_mask) | branch_imm);
}


void Instruction::SetImmLLiteral(Instruction* source) {
ASSERT(((source - this) & 3) == 0);
int offset = (source - this) >> kLiteralEntrySizeLog2;
Instr imm = Assembler::ImmLLiteral(offset);
Instr mask = ImmLLiteral_mask;

SetInstructionBits(Mask(~mask) | imm);
}


// TODO(jbramley): We can't put this inline in the class because things like
// xzr and Register are not defined in that header. Consider adding
// instructions-a64-inl.h to work around this.
bool InstructionSequence::IsInlineData() const {
// Inline data is encoded as a single movz instruction which writes to xzr
// (x31).
return IsMovz() && SixtyFourBits() && (Rd() == xzr.code());
// TODO(all): If we extend ::InlineData() to support bigger data, we need
// to update this method too.
}


// TODO(jbramley): We can't put this inline in the class because things like
// xzr and Register are not defined in that header. Consider adding
// instructions-a64-inl.h to work around this.
uint64_t InstructionSequence::InlineData() const {
ASSERT(IsInlineData());
uint64_t payload = ImmMoveWide();
// TODO(all): If we extend ::InlineData() to support bigger data, we need
// to update this method too.
return payload;
}


} } // namespace v8::internal

#endif // V8_TARGET_ARCH_A64

Large diffs are not rendered by default.

Large diffs are not rendered by default.

@@ -0,0 +1,108 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

#ifndef V8_A64_INSTRUMENT_A64_H_
#define V8_A64_INSTRUMENT_A64_H_

#include "globals.h"
#include "utils.h"
#include "a64/decoder-a64.h"
#include "a64/constants-a64.h"
#include "a64/instrument-a64.h"

namespace v8 {
namespace internal {

const int kCounterNameMaxLength = 256;
const uint64_t kDefaultInstrumentationSamplingPeriod = 1 << 22;


enum InstrumentState {
InstrumentStateDisable = 0,
InstrumentStateEnable = 1
};


enum CounterType {
Gauge = 0, // Gauge counters reset themselves after reading.
Cumulative = 1 // Cumulative counters keep their value after reading.
};


class Counter {
public:
Counter(const char* name, CounterType type = Gauge);

void Increment();
void Enable();
void Disable();
bool IsEnabled();
uint64_t count();
const char* name();
CounterType type();

private:
char name_[kCounterNameMaxLength];
uint64_t count_;
bool enabled_;
CounterType type_;
};


class Instrument: public DecoderVisitor {
public:
explicit Instrument(const char* datafile = NULL,
uint64_t sample_period = kDefaultInstrumentationSamplingPeriod);
~Instrument();

// Declare all Visitor functions.
#define DECLARE(A) void Visit##A(Instruction* instr);
VISITOR_LIST(DECLARE)
#undef DECLARE

private:
void Update();
void Enable();
void Disable();
void DumpCounters();
void DumpCounterNames();
void DumpEventMarker(unsigned marker);
void HandleInstrumentationEvent(unsigned event);
Counter* GetCounter(const char* name);

void InstrumentLoadStore(Instruction* instr);
void InstrumentLoadStorePair(Instruction* instr);

std::list<Counter*> counters_;

FILE *output_stream_;
uint64_t sample_period_;
};

} } // namespace v8::internal

#endif // V8_A64_INSTRUMENT_A64_H_

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

@@ -0,0 +1,326 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

#include "v8.h"

#include "a64/lithium-gap-resolver-a64.h"
#include "a64/lithium-codegen-a64.h"

namespace v8 {
namespace internal {

// We use the root register to spill a value while breaking a cycle in parallel
// moves. We don't need access to roots while resolving the move list and using
// the root register has two advantages:
// - It is not in crankshaft allocatable registers list, so it can't interfere
// with any of the moves we are resolving.
// - We don't need to push it on the stack, as we can reload it with its value
// once we have resolved a cycle.
#define kSavedValue root

LGapResolver::LGapResolver(LCodeGen* owner)
: cgen_(owner), moves_(32, owner->zone()), root_index_(0), in_cycle_(false),
saved_destination_(NULL), need_to_restore_root_(false) { }


#define __ ACCESS_MASM(cgen_->masm())

void LGapResolver::Resolve(LParallelMove* parallel_move) {
ASSERT(moves_.is_empty());

// Build up a worklist of moves.
BuildInitialMoveList(parallel_move);

for (int i = 0; i < moves_.length(); ++i) {
LMoveOperands move = moves_[i];

// Skip constants to perform them last. They don't block other moves
// and skipping such moves with register destinations keeps those
// registers free for the whole algorithm.
if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
root_index_ = i; // Any cycle is found when we reach this move again.
PerformMove(i);
if (in_cycle_) RestoreValue();
}
}

// Perform the moves with constant sources.
for (int i = 0; i < moves_.length(); ++i) {
LMoveOperands move = moves_[i];

if (!move.IsEliminated()) {
ASSERT(move.source()->IsConstantOperand());
EmitMove(i);
}
}

if (need_to_restore_root_) {
ASSERT(kSavedValue.Is(root));
__ InitializeRootRegister();
need_to_restore_root_ = false;
}

moves_.Rewind(0);
}


void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
// Perform a linear sweep of the moves to add them to the initial list of
// moves to perform, ignoring any move that is redundant (the source is
// the same as the destination, the destination is ignored and
// unallocated, or the move was already eliminated).
const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
for (int i = 0; i < moves->length(); ++i) {
LMoveOperands move = moves->at(i);
if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
}
Verify();
}


void LGapResolver::PerformMove(int index) {
// Each call to this function performs a move and deletes it from the move
// graph. We first recursively perform any move blocking this one. We
// mark a move as "pending" on entry to PerformMove in order to detect
// cycles in the move graph.
LMoveOperands& current_move = moves_[index];

ASSERT(!current_move.IsPending());
ASSERT(!current_move.IsRedundant());

// Clear this move's destination to indicate a pending move. The actual
// destination is saved in a stack allocated local. Multiple moves can
// be pending because this function is recursive.
ASSERT(current_move.source() != NULL); // Otherwise it will look eliminated.
LOperand* destination = current_move.destination();
current_move.set_destination(NULL);

// Perform a depth-first traversal of the move graph to resolve
// dependencies. Any unperformed, unpending move with a source the same
// as this one's destination blocks this one so recursively perform all
// such moves.
for (int i = 0; i < moves_.length(); ++i) {
LMoveOperands other_move = moves_[i];
if (other_move.Blocks(destination) && !other_move.IsPending()) {
PerformMove(i);
// If there is a blocking, pending move it must be moves_[root_index_]
// and all other moves with the same source as moves_[root_index_] are
// sucessfully executed (because they are cycle-free) by this loop.
}
}

// We are about to resolve this move and don't need it marked as
// pending, so restore its destination.
current_move.set_destination(destination);

// The move may be blocked on a pending move, which must be the starting move.
// In this case, we have a cycle, and we save the source of this move to
// a scratch register to break it.
LMoveOperands other_move = moves_[root_index_];
if (other_move.Blocks(destination)) {
ASSERT(other_move.IsPending());
BreakCycle(index);
return;
}

// This move is no longer blocked.
EmitMove(index);
}


void LGapResolver::Verify() {
#ifdef ENABLE_SLOW_ASSERTS
// No operand should be the destination for more than one move.
for (int i = 0; i < moves_.length(); ++i) {
LOperand* destination = moves_[i].destination();
for (int j = i + 1; j < moves_.length(); ++j) {
SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
}
}
#endif
}


void LGapResolver::BreakCycle(int index) {
ASSERT(moves_[index].destination()->Equals(moves_[root_index_].source()));
ASSERT(!in_cycle_);

// We use a register which is not allocatable by crankshaft to break the cycle
// to be sure it doesn't interfere with the moves we are resolving.
ASSERT(!kSavedValue.IsAllocatable());
need_to_restore_root_ = true;

// We save in a register the source of that move and we remember its
// destination. Then we mark this move as resolved so the cycle is
// broken and we can perform the other moves.
in_cycle_ = true;
LOperand* source = moves_[index].source();
saved_destination_ = moves_[index].destination();

if (source->IsRegister()) {
__ Mov(kSavedValue, cgen_->ToRegister(source));
} else if (source->IsStackSlot()) {
__ Ldr(kSavedValue, cgen_->ToMemOperand(source));
} else if (source->IsDoubleRegister()) {
// TODO(all): We should use a double register to store the value to avoid
// the penalty of the mov across register banks. We are going to reserve
// d31 to hold 0.0 value. We could clobber this register while breaking the
// cycle and restore it after like we do with the root register.
// LGapResolver::RestoreValue() will need to be updated as well when we'll
// do that.
__ Fmov(kSavedValue, cgen_->ToDoubleRegister(source));
} else if (source->IsDoubleStackSlot()) {
__ Ldr(kSavedValue, cgen_->ToMemOperand(source));
} else {
UNREACHABLE();
}

// Mark this move as resolved.
// This move will be actually performed by moving the saved value to this
// move's destination in LGapResolver::RestoreValue().
moves_[index].Eliminate();
}


void LGapResolver::RestoreValue() {
ASSERT(in_cycle_);
ASSERT(saved_destination_ != NULL);

if (saved_destination_->IsRegister()) {
__ Mov(cgen_->ToRegister(saved_destination_), kSavedValue);
} else if (saved_destination_->IsStackSlot()) {
__ Str(kSavedValue, cgen_->ToMemOperand(saved_destination_));
} else if (saved_destination_->IsDoubleRegister()) {
__ Fmov(cgen_->ToDoubleRegister(saved_destination_), kSavedValue);
} else if (saved_destination_->IsDoubleStackSlot()) {
__ Str(kSavedValue, cgen_->ToMemOperand(saved_destination_));
} else {
UNREACHABLE();
}

in_cycle_ = false;
saved_destination_ = NULL;
}


void LGapResolver::EmitMove(int index) {
LOperand* source = moves_[index].source();
LOperand* destination = moves_[index].destination();

// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.

if (source->IsRegister()) {
Register source_register = cgen_->ToRegister(source);
if (destination->IsRegister()) {
__ Mov(cgen_->ToRegister(destination), source_register);
} else {
ASSERT(destination->IsStackSlot());
__ Str(source_register, cgen_->ToMemOperand(destination));
}

} else if (source->IsStackSlot()) {
MemOperand source_operand = cgen_->ToMemOperand(source);
if (destination->IsRegister()) {
__ Ldr(cgen_->ToRegister(destination), source_operand);
} else {
ASSERT(destination->IsStackSlot());
EmitStackSlotMove(index);
}

} else if (source->IsConstantOperand()) {
LConstantOperand* constant_source = LConstantOperand::cast(source);
if (destination->IsRegister()) {
Register dst = cgen_->ToRegister(destination);
if (cgen_->IsSmi(constant_source)) {
__ Mov(dst, Operand(cgen_->ToSmi(constant_source)));
} else if (cgen_->IsInteger32Constant(constant_source)) {
__ Mov(dst, cgen_->ToInteger32(constant_source));
} else {
__ LoadObject(dst, cgen_->ToHandle(constant_source));
}
} else if (destination->IsDoubleRegister()) {
DoubleRegister result = cgen_->ToDoubleRegister(destination);
__ Fmov(result, cgen_->ToDouble(constant_source));
} else {
ASSERT(destination->IsStackSlot());
ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone.
need_to_restore_root_ = true;
if (cgen_->IsSmi(constant_source)) {
__ Mov(kSavedValue, Operand(cgen_->ToSmi(constant_source)));
} else if (cgen_->IsInteger32Constant(constant_source)) {
__ Mov(kSavedValue, cgen_->ToInteger32(constant_source));
} else {
__ LoadObject(kSavedValue, cgen_->ToHandle(constant_source));
}
__ Str(kSavedValue, cgen_->ToMemOperand(destination));
}

} else if (source->IsDoubleRegister()) {
DoubleRegister src = cgen_->ToDoubleRegister(source);
if (destination->IsDoubleRegister()) {
__ Fmov(cgen_->ToDoubleRegister(destination), src);
} else {
ASSERT(destination->IsDoubleStackSlot());
__ Str(src, cgen_->ToMemOperand(destination));
}

} else if (source->IsDoubleStackSlot()) {
MemOperand src = cgen_->ToMemOperand(source);
if (destination->IsDoubleRegister()) {
__ Ldr(cgen_->ToDoubleRegister(destination), src);
} else {
ASSERT(destination->IsDoubleStackSlot());
EmitStackSlotMove(index);
}

} else {
UNREACHABLE();
}

// The move has been emitted, we can eliminate it.
moves_[index].Eliminate();
}


void LGapResolver::EmitStackSlotMove(int index) {
// We need a temp register to perform a stack slot to stack slot move, and
// the register must not be involved in breaking cycles.

// Use the Crankshaft double scratch register as the temporary.
DoubleRegister temp = crankshaft_fp_scratch;

LOperand* src = moves_[index].source();
LOperand* dst = moves_[index].destination();

ASSERT(src->IsStackSlot());
ASSERT(dst->IsStackSlot());
__ Ldr(temp, cgen_->ToMemOperand(src));
__ Str(temp, cgen_->ToMemOperand(dst));
}

} } // namespace v8::internal
@@ -0,0 +1,90 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

#ifndef V8_A64_LITHIUM_GAP_RESOLVER_A64_H_
#define V8_A64_LITHIUM_GAP_RESOLVER_A64_H_

#include "v8.h"

#include "lithium.h"

namespace v8 {
namespace internal {

class LCodeGen;
class LGapResolver;

class LGapResolver BASE_EMBEDDED {
public:
explicit LGapResolver(LCodeGen* owner);

// Resolve a set of parallel moves, emitting assembler instructions.
void Resolve(LParallelMove* parallel_move);

private:
// Build the initial list of moves.
void BuildInitialMoveList(LParallelMove* parallel_move);

// Perform the move at the moves_ index in question (possibly requiring
// other moves to satisfy dependencies).
void PerformMove(int index);

// If a cycle is found in the series of moves, save the blocking value to
// a scratch register. The cycle must be found by hitting the root of the
// depth-first search.
void BreakCycle(int index);

// After a cycle has been resolved, restore the value from the scratch
// register to its proper destination.
void RestoreValue();

// Emit a move and remove it from the move graph.
void EmitMove(int index);

// Emit a move from one stack slot to another.
void EmitStackSlotMove(int index);

// Verify the move list before performing moves.
void Verify();

LCodeGen* cgen_;

// List of moves not yet resolved.
ZoneList<LMoveOperands> moves_;

int root_index_;
bool in_cycle_;
LOperand* saved_destination_;

// We use the root register as a scratch in a few places. When that happens,
// this flag is set to indicate that it needs to be restored.
bool need_to_restore_root_;
};

} } // namespace v8::internal

#endif // V8_A64_LITHIUM_GAP_RESOLVER_A64_H_

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

@@ -0,0 +1,315 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

#ifndef V8_A64_REGEXP_MACRO_ASSEMBLER_A64_H_
#define V8_A64_REGEXP_MACRO_ASSEMBLER_A64_H_

#include "a64/assembler-a64.h"
#include "a64/assembler-a64-inl.h"
#include "macro-assembler.h"

namespace v8 {
namespace internal {


#ifndef V8_INTERPRETED_REGEXP
class RegExpMacroAssemblerA64: public NativeRegExpMacroAssembler {
public:
RegExpMacroAssemblerA64(Mode mode, int registers_to_save, Zone* zone);
virtual ~RegExpMacroAssemblerA64();
virtual int stack_limit_slack();
virtual void AdvanceCurrentPosition(int by);
virtual void AdvanceRegister(int reg, int by);
virtual void Backtrack();
virtual void Bind(Label* label);
virtual void CheckAtStart(Label* on_at_start);
virtual void CheckCharacter(unsigned c, Label* on_equal);
virtual void CheckCharacterAfterAnd(unsigned c,
unsigned mask,
Label* on_equal);
virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
virtual void CheckCharacterLT(uc16 limit, Label* on_less);
virtual void CheckCharacters(Vector<const uc16> str,
int cp_offset,
Label* on_failure,
bool check_end_of_string);
// A "greedy loop" is a loop that is both greedy and with a simple
// body. It has a particularly simple implementation.
virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
virtual void CheckNotAtStart(Label* on_not_at_start);
virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
Label* on_no_match);
virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(unsigned c,
unsigned mask,
Label* on_not_equal);
virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
uc16 minus,
uc16 mask,
Label* on_not_equal);
virtual void CheckCharacterInRange(uc16 from,
uc16 to,
Label* on_in_range);
virtual void CheckCharacterNotInRange(uc16 from,
uc16 to,
Label* on_not_in_range);
virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);

// Checks whether the given offset from the current position is before
// the end of the string.
virtual void CheckPosition(int cp_offset, Label* on_outside_input);
virtual bool CheckSpecialCharacterClass(uc16 type,
Label* on_no_match);
virtual void Fail();
virtual Handle<HeapObject> GetCode(Handle<String> source);
virtual void GoTo(Label* label);
virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
virtual void IfRegisterEqPos(int reg, Label* if_eq);
virtual IrregexpImplementation Implementation();
virtual void LoadCurrentCharacter(int cp_offset,
Label* on_end_of_input,
bool check_bounds = true,
int characters = 1);
virtual void PopCurrentPosition();
virtual void PopRegister(int register_index);
virtual void PushBacktrack(Label* label);
virtual void PushCurrentPosition();
virtual void PushRegister(int register_index,
StackCheckFlag check_stack_limit);
virtual void ReadCurrentPositionFromRegister(int reg);
virtual void ReadStackPointerFromRegister(int reg);
virtual void SetCurrentPositionFromEnd(int by);
virtual void SetRegister(int register_index, int to);
virtual bool Succeed();
virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
virtual void ClearRegisters(int reg_from, int reg_to);
virtual void WriteStackPointerToRegister(int reg);
virtual bool CanReadUnaligned();

// Called from RegExp if the stack-guard is triggered.
// If the code object is relocated, the return address is fixed before
// returning.
static int CheckStackGuardState(Address* return_address,
Code* re_code,
Address re_frame,
int start_offset,
const byte** input_start,
const byte** input_end);

private:
// Above the frame pointer - Stored registers and stack passed parameters.
// Callee-saved registers x19-x29, where x29 is the old frame pointer.
static const int kCalleeSavedRegisters = 0;
// Return address.
// It is placed above the 11 callee-saved registers.
static const int kReturnAddress = kCalleeSavedRegisters + 11 * kPointerSize;
static const int kSecondaryReturnAddress = kReturnAddress + kPointerSize;
// Stack parameter placed by caller.
static const int kIsolate = kSecondaryReturnAddress + kPointerSize;

// Below the frame pointer.
// Register parameters stored by setup code.
static const int kDirectCall = kCalleeSavedRegisters - kPointerSize;
static const int kStackBase = kDirectCall - kPointerSize;
static const int kOutputSize = kStackBase - kPointerSize;
static const int kInput = kOutputSize - kPointerSize;
// When adding local variables remember to push space for them in
// the frame in GetCode.
static const int kSuccessCounter = kInput - kPointerSize;
// First position register address on the stack. Following positions are
// below it. A position is a 32 bit value.
static const int kFirstRegisterOnStack = kSuccessCounter - kWRegSizeInBytes;
// A capture is a 64 bit value holding two position.
static const int kFirstCaptureOnStack = kSuccessCounter - kXRegSizeInBytes;

// Initial size of code buffer.
static const size_t kRegExpCodeSize = 1024;

// When initializing registers to a non-position value we can unroll
// the loop. Set the limit of registers to unroll.
static const int kNumRegistersToUnroll = 16;

// We are using x0 to x7 as a register cache. Each hardware register must
// contain one capture, that is two 32 bit registers. We can cache at most
// 16 registers.
static const int kNumCachedRegisters = 16;

// Load a number of characters at the given offset from the
// current position, into the current-character register.
void LoadCurrentCharacterUnchecked(int cp_offset, int character_count);

// Check whether preemption has been requested.
void CheckPreemption();

// Check whether we are exceeding the stack limit on the backtrack stack.
void CheckStackLimit();

// Generate a call to CheckStackGuardState.
void CallCheckStackGuardState(Register scratch);

// Location of a 32 bit position register.
MemOperand register_location(int register_index);

// Location of a 64 bit capture, combining two position registers.
MemOperand capture_location(int register_index, Register scratch);

// Register holding the current input position as negative offset from
// the end of the string.
Register current_input_offset() { return w21; }

// The register containing the current character after LoadCurrentCharacter.
Register current_character() { return w22; }

// Register holding address of the end of the input string.
Register input_end() { return x25; }

// Register holding address of the start of the input string.
Register input_start() { return x26; }

// Register holding the offset from the start of the string where we should
// start matching.
Register start_offset() { return w27; }

// Pointer to the output array's first element.
Register output_array() { return x28; }

// Register holding the frame address. Local variables, parameters and
// regexp registers are addressed relative to this.
Register frame_pointer() { return fp; }

// The register containing the backtrack stack top. Provides a meaningful
// name to the register.
Register backtrack_stackpointer() { return x23; }

// Register holding pointer to the current code object.
Register code_pointer() { return x20; }

// Register holding the value used for clearing capture registers.
Register non_position_value() { return w24; }
// The top 32 bit of this register is used to store this value
// twice. This is used for clearing more than one register at a time.
Register twice_non_position_value() { return x24; }

// Byte size of chars in the string to match (decided by the Mode argument)
int char_size() { return static_cast<int>(mode_); }

// Equivalent to a conditional branch to the label, unless the label
// is NULL, in which case it is a conditional Backtrack.
void BranchOrBacktrack(Condition condition, Label* to);

// Compares reg against immmediate before calling BranchOrBacktrack.
// It makes use of the Cbz and Cbnz instructions.
void CompareAndBranchOrBacktrack(Register reg,
int immediate,
Condition condition,
Label* to);

inline void CallIf(Label* to, Condition condition);

// Save and restore the link register on the stack in a way that
// is GC-safe.
inline void SaveLinkRegister();
inline void RestoreLinkRegister();

// Pushes the value of a register on the backtrack stack. Decrements the
// stack pointer by a word size and stores the register's value there.
inline void Push(Register source);

// Pops a value from the backtrack stack. Reads the word at the stack pointer
// and increments it by a word size.
inline void Pop(Register target);

// This state indicates where the register actually is.
enum RegisterState {
STACKED, // Resides in memory.
CACHED_LSW, // Least Significant Word of a 64 bit hardware register.
CACHED_MSW // Most Significant Word of a 64 bit hardware register.
};

RegisterState GetRegisterState(int register_index) {
ASSERT(register_index >= 0);
if (register_index >= kNumCachedRegisters) {
return STACKED;
} else {
if ((register_index % 2) == 0) {
return CACHED_LSW;
} else {
return CACHED_MSW;
}
}
}

// Store helper that takes the state of the register into account.
inline void StoreRegister(int register_index, Register source);

// Returns a hardware W register that holds the value of the capture
// register.
//
// This function will try to use an existing cache register (w0-w7) for the
// result. Otherwise, it will load the value into maybe_result.
//
// If the returned register is anything other than maybe_result, calling code
// must not write to it.
inline Register GetRegister(int register_index, Register maybe_result);

// Returns the harware register (x0-x7) holding the value of the capture
// register.
// This assumes that the state of the register is not STACKED.
inline Register GetCachedRegister(int register_index);

Isolate* isolate() const { return masm_->isolate(); }

MacroAssembler* masm_;

// Which mode to generate code for (ASCII or UC16).
Mode mode_;

// One greater than maximal register index actually used.
int num_registers_;

// Number of registers to output at the end (the saved registers
// are always 0..num_saved_registers_-1)
int num_saved_registers_;

// Labels used internally.
Label entry_label_;
Label start_label_;
Label success_label_;
Label backtrack_label_;
Label exit_label_;
Label check_preempt_label_;
Label stack_overflow_label_;
};

#endif // V8_INTERPRETED_REGEXP


}} // namespace v8::internal

#endif // V8_A64_REGEXP_MACRO_ASSEMBLER_A64_H_

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

@@ -0,0 +1,112 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

#if V8_TARGET_ARCH_A64

#include "a64/utils-a64.h"


namespace v8 {
namespace internal {

#define __ assm->


int CountLeadingZeros(uint64_t value, int width) {
// TODO(jbramley): Optimize this for A64 hosts.
ASSERT((width == 32) || (width == 64));
int count = 0;
uint64_t bit_test = 1UL << (width - 1);
while ((count < width) && ((bit_test & value) == 0)) {
count++;
bit_test >>= 1;
}
return count;
}


int CountLeadingSignBits(int64_t value, int width) {
// TODO(jbramley): Optimize this for A64 hosts.
ASSERT((width == 32) || (width == 64));
if (value >= 0) {
return CountLeadingZeros(value, width) - 1;
} else {
return CountLeadingZeros(~value, width) - 1;
}
}


int CountTrailingZeros(uint64_t value, int width) {
// TODO(jbramley): Optimize this for A64 hosts.
ASSERT((width == 32) || (width == 64));
int count = 0;
while ((count < width) && (((value >> count) & 1) == 0)) {
count++;
}
return count;
}


int CountSetBits(uint64_t value, int width) {
// TODO(jbramley): Would it be useful to allow other widths? The
// implementation already supports them.
ASSERT((width == 32) || (width == 64));

// Mask out unused bits to ensure that they are not counted.
value &= (0xffffffffffffffffUL >> (64-width));

// Add up the set bits.
// The algorithm works by adding pairs of bit fields together iteratively,
// where the size of each bit field doubles each time.
// An example for an 8-bit value:
// Bits: h g f e d c b a
// \ | \ | \ | \ |
// value = h+g f+e d+c b+a
// \ | \ |
// value = h+g+f+e d+c+b+a
// \ |
// value = h+g+f+e+d+c+b+a
value = ((value >> 1) & 0x5555555555555555) + (value & 0x5555555555555555);
value = ((value >> 2) & 0x3333333333333333) + (value & 0x3333333333333333);
value = ((value >> 4) & 0x0f0f0f0f0f0f0f0f) + (value & 0x0f0f0f0f0f0f0f0f);
value = ((value >> 8) & 0x00ff00ff00ff00ff) + (value & 0x00ff00ff00ff00ff);
value = ((value >> 16) & 0x0000ffff0000ffff) + (value & 0x0000ffff0000ffff);
value = ((value >> 32) & 0x00000000ffffffff) + (value & 0x00000000ffffffff);

return value;
}


int MaskToBit(uint64_t mask) {
ASSERT(CountSetBits(mask, 64) == 1);
return CountTrailingZeros(mask, 64);
}


} } // namespace v8::internal

#endif // V8_TARGET_ARCH_A64
@@ -0,0 +1,109 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

#ifndef V8_A64_UTILS_A64_H_
#define V8_A64_UTILS_A64_H_

#include <cmath>
#include "v8.h"
#include "a64/constants-a64.h"

#define REGISTER_CODE_LIST(R) \
R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)

namespace v8 {
namespace internal {

// Floating point representation.
static inline uint32_t float_to_rawbits(float value) {
uint32_t bits = 0;
memcpy(&bits, &value, 4);
return bits;
}


static inline uint64_t double_to_rawbits(double value) {
uint64_t bits = 0;
memcpy(&bits, &value, 8);
return bits;
}


static inline float rawbits_to_float(uint32_t bits) {
float value = 0.0;
memcpy(&value, &bits, 4);
return value;
}


static inline double rawbits_to_double(uint64_t bits) {
double value = 0.0;
memcpy(&value, &bits, 8);
return value;
}


// Bits counting.
int CountLeadingZeros(uint64_t value, int width);
int CountLeadingSignBits(int64_t value, int width);
int CountTrailingZeros(uint64_t value, int width);
int CountSetBits(uint64_t value, int width);
int MaskToBit(uint64_t mask);


// NaN tests.
inline bool IsSignallingNaN(double num) {
const uint64_t kFP64QuietNaNMask = 0x0008000000000000UL;
uint64_t raw = double_to_rawbits(num);
if (std::isnan(num) && ((raw & kFP64QuietNaNMask) == 0)) {
return true;
}
return false;
}


inline bool IsSignallingNaN(float num) {
const uint64_t kFP32QuietNaNMask = 0x00400000UL;
uint32_t raw = float_to_rawbits(num);
if (std::isnan(num) && ((raw & kFP32QuietNaNMask) == 0)) {
return true;
}
return false;
}


template <typename T>
inline bool IsQuietNaN(T num) {
return std::isnan(num) && !IsSignallingNaN(num);
}

} } // namespace v8::internal

#endif // V8_A64_UTILS_A64_H_
@@ -28,6 +28,7 @@
#include "v8.h"
#include "accessors.h"

#include "compiler.h"
#include "contexts.h"
#include "deoptimizer.h"
#include "execution.h"
@@ -90,10 +91,22 @@ static V8_INLINE bool CheckForName(Handle<String> name,
}


bool Accessors::IsJSObjectFieldAccessor(
Handle<Map> map, Handle<String> name,
int* object_offset) {
Isolate* isolate = map->GetIsolate();
// Returns true for properties that are accessors to object fields.
// If true, *object_offset contains offset of object field.
template <class T>
bool Accessors::IsJSObjectFieldAccessor(typename T::TypeHandle type,
Handle<String> name,
int* object_offset) {
Isolate* isolate = name->GetIsolate();

if (type->Is(T::String())) {
return CheckForName(name, isolate->heap()->length_string(),
String::kLengthOffset, object_offset);
}

if (!type->IsClass()) return false;
Handle<Map> map = type->AsClass();

switch (map->instance_type()) {
case JS_ARRAY_TYPE:
return
@@ -121,18 +134,24 @@ bool Accessors::IsJSObjectFieldAccessor(
JSDataView::kByteOffsetOffset, object_offset) ||
CheckForName(name, isolate->heap()->buffer_string(),
JSDataView::kBufferOffset, object_offset);
default: {
if (map->instance_type() < FIRST_NONSTRING_TYPE) {
return
CheckForName(name, isolate->heap()->length_string(),
String::kLengthOffset, object_offset);
}
default:
return false;
}
}
}


template
bool Accessors::IsJSObjectFieldAccessor<Type>(Type* type,
Handle<String> name,
int* object_offset);


template
bool Accessors::IsJSObjectFieldAccessor<HeapType>(Handle<HeapType> type,
Handle<String> name,
int* object_offset);


//
// Accessors::ArrayLength
//
@@ -148,45 +167,49 @@ MaybeObject* Accessors::ArrayGetLength(Isolate* isolate,


// The helper function will 'flatten' Number objects.
Object* Accessors::FlattenNumber(Isolate* isolate, Object* value) {
Handle<Object> Accessors::FlattenNumber(Isolate* isolate,
Handle<Object> value) {
if (value->IsNumber() || !value->IsJSValue()) return value;
JSValue* wrapper = JSValue::cast(value);
Handle<JSValue> wrapper = Handle<JSValue>::cast(value);
ASSERT(wrapper->GetIsolate()->context()->native_context()->number_function()->
has_initial_map());
Map* number_map = isolate->context()->native_context()->
number_function()->initial_map();
if (wrapper->map() == number_map) return wrapper->value();
if (wrapper->map() ==
isolate->context()->native_context()->number_function()->initial_map()) {
return handle(wrapper->value(), isolate);
}

return value;
}


MaybeObject* Accessors::ArraySetLength(Isolate* isolate,
JSObject* object,
Object* value,
JSObject* object_raw,
Object* value_raw,
void*) {
HandleScope scope(isolate);
Handle<JSObject> object(object_raw, isolate);
Handle<Object> value(value_raw, isolate);

// This means one of the object's prototypes is a JSArray and the
// object does not have a 'length' property. Calling SetProperty
// causes an infinite loop.
if (!object->IsJSArray()) {
return object->SetLocalPropertyIgnoreAttributesTrampoline(
isolate->heap()->length_string(), value, NONE);
Handle<Object> result = JSObject::SetLocalPropertyIgnoreAttributes(object,
isolate->factory()->length_string(), value, NONE);
RETURN_IF_EMPTY_HANDLE(isolate, result);
return *result;
}

value = FlattenNumber(isolate, value);

// Need to call methods that may trigger GC.
HandleScope scope(isolate);

// Protect raw pointers.
Handle<JSArray> array_handle(JSArray::cast(object), isolate);
Handle<Object> value_handle(value, isolate);
Handle<JSArray> array_handle = Handle<JSArray>::cast(object);

bool has_exception;
Handle<Object> uint32_v =
Execution::ToUint32(isolate, value_handle, &has_exception);
Execution::ToUint32(isolate, value, &has_exception);
if (has_exception) return Failure::Exception();
Handle<Object> number_v =
Execution::ToNumber(isolate, value_handle, &has_exception);
Execution::ToNumber(isolate, value, &has_exception);
if (has_exception) return Failure::Exception();

if (uint32_v->Number() == number_v->Number()) {
@@ -578,26 +601,28 @@ MaybeObject* Accessors::FunctionGetPrototype(Isolate* isolate,


MaybeObject* Accessors::FunctionSetPrototype(Isolate* isolate,
JSObject* object,
JSObject* object_raw,
Object* value_raw,
void*) {
Heap* heap = isolate->heap();
JSFunction* function_raw = FindInstanceOf<JSFunction>(isolate, object);
if (function_raw == NULL) return heap->undefined_value();
if (!function_raw->should_have_prototype()) {
// Since we hit this accessor, object will have no prototype property.
return object->SetLocalPropertyIgnoreAttributesTrampoline(
heap->prototype_string(), value_raw, NONE);
}
JSFunction* function_raw = FindInstanceOf<JSFunction>(isolate, object_raw);
if (function_raw == NULL) return isolate->heap()->undefined_value();

HandleScope scope(isolate);
Handle<JSFunction> function(function_raw, isolate);
Handle<JSObject> object(object_raw, isolate);
Handle<Object> value(value_raw, isolate);
if (!function->should_have_prototype()) {
// Since we hit this accessor, object will have no prototype property.
Handle<Object> result = JSObject::SetLocalPropertyIgnoreAttributes(object,
isolate->factory()->prototype_string(), value, NONE);
RETURN_IF_EMPTY_HANDLE(isolate, result);
return *result;
}

Handle<Object> old_value;
bool is_observed =
FLAG_harmony_observation &&
*function == object &&
*function == *object &&
function->map()->is_observed();
if (is_observed) {
if (function->has_prototype())
@@ -611,7 +636,7 @@ MaybeObject* Accessors::FunctionSetPrototype(Isolate* isolate,

if (is_observed && !old_value->SameValue(*value)) {
JSObject::EnqueueChangeRecord(
function, "updated", isolate->factory()->prototype_string(), old_value);
function, "update", isolate->factory()->prototype_string(), old_value);
}

return *function;
@@ -642,9 +667,9 @@ MaybeObject* Accessors::FunctionGetLength(Isolate* isolate,
// If the function isn't compiled yet, the length is not computed correctly
// yet. Compile it now and return the right length.
HandleScope scope(isolate);
Handle<JSFunction> handle(function);
if (JSFunction::CompileLazy(handle, KEEP_EXCEPTION)) {
return Smi::FromInt(handle->shared()->length());
Handle<JSFunction> function_handle(function);
if (Compiler::EnsureCompiled(function_handle, KEEP_EXCEPTION)) {
return Smi::FromInt(function_handle->shared()->length());
}
return Failure::Exception();
}
@@ -699,21 +724,22 @@ static MaybeObject* ConstructArgumentsObjectForInlinedFunction(
int inlined_frame_index) {
Isolate* isolate = inlined_function->GetIsolate();
Factory* factory = isolate->factory();
Vector<SlotRef> args_slots =
SlotRef::ComputeSlotMappingForArguments(
frame,
inlined_frame_index,
inlined_function->shared()->formal_parameter_count());
int args_count = args_slots.length();
SlotRefValueBuilder slot_refs(
frame,
inlined_frame_index,
inlined_function->shared()->formal_parameter_count());

int args_count = slot_refs.args_length();
Handle<JSObject> arguments =
factory->NewArgumentsObject(inlined_function, args_count);
Handle<FixedArray> array = factory->NewFixedArray(args_count);
slot_refs.Prepare(isolate);
for (int i = 0; i < args_count; ++i) {
Handle<Object> value = args_slots[i].GetValue(isolate);
Handle<Object> value = slot_refs.GetNext(isolate, 0);
array->set(i, *value);
}
slot_refs.Finish(isolate);
arguments->set_elements(*array);
args_slots.Dispose();

// Return the freshly allocated arguments object.
return *arguments;
@@ -88,9 +88,10 @@ class Accessors : public AllStatic {

// Returns true for properties that are accessors to object fields.
// If true, *object_offset contains offset of object field.
static bool IsJSObjectFieldAccessor(
Handle<Map> map, Handle<String> name,
int* object_offset);
template <class T>
static bool IsJSObjectFieldAccessor(typename T::TypeHandle type,
Handle<String> name,
int* object_offset);


private:
@@ -149,7 +150,7 @@ class Accessors : public AllStatic {
void*);

// Helper functions.
static Object* FlattenNumber(Isolate* isolate, Object* value);
static Handle<Object> FlattenNumber(Isolate* isolate, Handle<Object> value);
static MaybeObject* IllegalSetter(Isolate* isolate,
JSObject*,
Object*,
@@ -83,26 +83,20 @@ void AllocationSiteCreationContext::ExitScope(
}


Handle<AllocationSite> AllocationSiteUsageContext::EnterNewScope() {
if (top().is_null()) {
InitializeTraversal(top_site_);
} else {
// Advance current site
Object* nested_site = current()->nested_site();
// Something is wrong if we advance to the end of the list here.
ASSERT(nested_site->IsAllocationSite());
update_current_site(AllocationSite::cast(nested_site));
bool AllocationSiteUsageContext::ShouldCreateMemento(Handle<JSObject> object) {
if (activated_ && AllocationSite::CanTrack(object->map()->instance_type())) {
if (FLAG_allocation_site_pretenuring ||
AllocationSite::GetMode(object->GetElementsKind()) ==
TRACK_ALLOCATION_SITE) {
if (FLAG_trace_creation_allocation_sites) {
PrintF("*** Creating Memento for %s %p\n",
object->IsJSArray() ? "JSArray" : "JSObject",
static_cast<void*>(*object));
}
return true;
}
}
return Handle<AllocationSite>(*current(), isolate());
}


void AllocationSiteUsageContext::ExitScope(
Handle<AllocationSite> scope_site,
Handle<JSObject> object) {
// This assert ensures that we are pointing at the right sub-object in a
// recursive walk of a nested literal.
ASSERT(object.is_null() || *object == scope_site->transition_info());
return false;
}

} } // namespace v8::internal
@@ -41,31 +41,22 @@ namespace internal {
// boilerplate with AllocationSite and AllocationMemento support.
class AllocationSiteContext {
public:
AllocationSiteContext(Isolate* isolate, bool activated) {
explicit AllocationSiteContext(Isolate* isolate) {
isolate_ = isolate;
activated_ = activated;
};
virtual ~AllocationSiteContext() {}

Handle<AllocationSite> top() { return top_; }
Handle<AllocationSite> current() { return current_; }

// If activated, then recursively create mementos
bool activated() const { return activated_; }
bool ShouldCreateMemento(Handle<JSObject> object) { return false; }

// Returns the AllocationSite that matches this scope.
virtual Handle<AllocationSite> EnterNewScope() = 0;

// scope_site should be the handle returned by the matching EnterNewScope()
virtual void ExitScope(Handle<AllocationSite> scope_site,
Handle<JSObject> object) = 0;
Isolate* isolate() { return isolate_; }

protected:
void update_current_site(AllocationSite* site) {
*(current_.location()) = site;
}

Isolate* isolate() { return isolate_; }
void InitializeTraversal(Handle<AllocationSite> site) {
top_ = site;
current_ = Handle<AllocationSite>(*top_, isolate());
@@ -75,7 +66,6 @@ class AllocationSiteContext {
Isolate* isolate_;
Handle<AllocationSite> top_;
Handle<AllocationSite> current_;
bool activated_;
};


@@ -84,11 +74,10 @@ class AllocationSiteContext {
class AllocationSiteCreationContext : public AllocationSiteContext {
public:
explicit AllocationSiteCreationContext(Isolate* isolate)
: AllocationSiteContext(isolate, true) { }
: AllocationSiteContext(isolate) { }

virtual Handle<AllocationSite> EnterNewScope() V8_OVERRIDE;
virtual void ExitScope(Handle<AllocationSite> site,
Handle<JSObject> object) V8_OVERRIDE;
Handle<AllocationSite> EnterNewScope();
void ExitScope(Handle<AllocationSite> site, Handle<JSObject> object);
};


@@ -98,15 +87,35 @@ class AllocationSiteUsageContext : public AllocationSiteContext {
public:
AllocationSiteUsageContext(Isolate* isolate, Handle<AllocationSite> site,
bool activated)
: AllocationSiteContext(isolate, activated),
top_site_(site) { }
: AllocationSiteContext(isolate),
top_site_(site),
activated_(activated) { }

inline Handle<AllocationSite> EnterNewScope() {
if (top().is_null()) {
InitializeTraversal(top_site_);
} else {
// Advance current site
Object* nested_site = current()->nested_site();
// Something is wrong if we advance to the end of the list here.
ASSERT(nested_site->IsAllocationSite());
update_current_site(AllocationSite::cast(nested_site));
}
return Handle<AllocationSite>(*current(), isolate());
}

inline void ExitScope(Handle<AllocationSite> scope_site,
Handle<JSObject> object) {
// This assert ensures that we are pointing at the right sub-object in a
// recursive walk of a nested literal.
ASSERT(object.is_null() || *object == scope_site->transition_info());
}

virtual Handle<AllocationSite> EnterNewScope() V8_OVERRIDE;
virtual void ExitScope(Handle<AllocationSite> site,
Handle<JSObject> object) V8_OVERRIDE;
bool ShouldCreateMemento(Handle<JSObject> object);

private:
Handle<AllocationSite> top_site_;
bool activated_;
};


@@ -46,6 +46,7 @@ AllocationTraceNode::AllocationTraceNode(


AllocationTraceNode::~AllocationTraceNode() {
for (int i = 0; i < children_.length(); i++) delete children_[i];
}


@@ -155,6 +156,11 @@ AllocationTracker::AllocationTracker(

AllocationTracker::~AllocationTracker() {
unresolved_locations_.Iterate(DeleteUnresolvedLocation);
for (HashMap::Entry* p = id_to_function_info_.Start();
p != NULL;
p = id_to_function_info_.Next(p)) {
delete reinterpret_cast<AllocationTracker::FunctionInfo* >(p->value);
}
}


@@ -169,7 +175,7 @@ void AllocationTracker::PrepareForSerialization() {
}


void AllocationTracker::NewObjectEvent(Address addr, int size) {
void AllocationTracker::AllocationEvent(Address addr, int size) {
DisallowHeapAllocation no_allocation;
Heap* heap = ids_->heap();

@@ -185,7 +191,8 @@ void AllocationTracker::NewObjectEvent(Address addr, int size) {
while (!it.done() && length < kMaxAllocationTraceLength) {
JavaScriptFrame* frame = it.frame();
SharedFunctionInfo* shared = frame->function()->shared();
SnapshotObjectId id = ids_->FindEntry(shared->address());
SnapshotObjectId id = ids_->FindOrAddEntry(
shared->address(), shared->Size(), false);
allocation_trace_buffer_[length++] = id;
AddFunctionInfo(shared, id);
it.Advance();
@@ -245,34 +252,33 @@ AllocationTracker::UnresolvedLocation::UnresolvedLocation(
info_(info) {
script_ = Handle<Script>::cast(
script->GetIsolate()->global_handles()->Create(script));
GlobalHandles::MakeWeak(
reinterpret_cast<Object**>(script_.location()),
this, &HandleWeakScript);
GlobalHandles::MakeWeak(reinterpret_cast<Object**>(script_.location()),
this,
&HandleWeakScript);
}


AllocationTracker::UnresolvedLocation::~UnresolvedLocation() {
if (!script_.is_null()) {
script_->GetIsolate()->global_handles()->Destroy(
reinterpret_cast<Object**>(script_.location()));
GlobalHandles::Destroy(reinterpret_cast<Object**>(script_.location()));
}
}


void AllocationTracker::UnresolvedLocation::Resolve() {
if (script_.is_null()) return;
HandleScope scope(script_->GetIsolate());
info_->line = GetScriptLineNumber(script_, start_position_);
info_->column = GetScriptColumnNumber(script_, start_position_);
}


void AllocationTracker::UnresolvedLocation::HandleWeakScript(
v8::Isolate* isolate,
v8::Persistent<v8::Value>* obj,
void* data) {
UnresolvedLocation* location = reinterpret_cast<UnresolvedLocation*>(data);
location->script_ = Handle<Script>::null();
obj->Dispose();
const v8::WeakCallbackData<v8::Value, void>& data) {
UnresolvedLocation* loc =
reinterpret_cast<UnresolvedLocation*>(data.GetParameter());
GlobalHandles::Destroy(reinterpret_cast<Object**>(loc->script_.location()));
loc->script_ = Handle<Script>::null();
}