Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse files

Upgrade v8 to 1.3.3

  • Loading branch information...
commit 79ff085c4a7e4951158912ee502eef807233540c 1 parent 3b0408e
@ry ry authored
Showing with 7,229 additions and 1,232 deletions.
  1. +21 −0 deps/v8/.gitignore
  2. +18 −0 deps/v8/ChangeLog
  3. +6 −0 deps/v8/LICENSE
  4. +6 −4 deps/v8/SConstruct
  5. +11 −0 deps/v8/include/v8.h
  6. +33 −3 deps/v8/src/api.cc
  7. +185 −8 deps/v8/src/arm/cfg-arm.cc
  8. +31 −0 deps/v8/src/arm/codegen-arm-inl.h
  9. +59 −242 deps/v8/src/arm/codegen-arm.cc
  10. +99 −18 deps/v8/src/arm/codegen-arm.h
  11. +125 −211 deps/v8/src/arm/jump-target-arm.cc
  12. +32 −94 deps/v8/src/arm/virtual-frame-arm.cc
  13. +31 −42 deps/v8/src/arm/virtual-frame-arm.h
  14. +353 −75 deps/v8/src/cfg.cc
  15. +550 −64 deps/v8/src/cfg.h
  16. +11 −5 deps/v8/src/d8.cc
  17. +1 −0  deps/v8/src/d8.h
  18. +8 −0 deps/v8/src/execution.cc
  19. +5 −5 deps/v8/src/heap.cc
  20. +6 −8 deps/v8/src/ia32/builtins-ia32.cc
  21. +186 −8 deps/v8/src/ia32/cfg-ia32.cc
  22. +12 −60 deps/v8/src/ia32/codegen-ia32.cc
  23. +51 −0 deps/v8/src/ia32/codegen-ia32.h
  24. +13 −0 deps/v8/src/ia32/cpu-ia32.cc
  25. +64 −0 deps/v8/src/ia32/jump-target-ia32.cc
  26. +27 −21 deps/v8/src/ia32/macro-assembler-ia32.cc
  27. +11 −9 deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
  28. +1 −0  deps/v8/src/ia32/regexp-macro-assembler-ia32.h
  29. +1 −0  deps/v8/src/ia32/virtual-frame-ia32.cc
  30. +0 −61 deps/v8/src/jump-target.cc
  31. +2 −2 deps/v8/src/log.cc
  32. +19 −0 deps/v8/src/objects-inl.h
  33. +0 −18 deps/v8/src/objects.cc
  34. +3 −3 deps/v8/src/objects.h
  35. +17 −19 deps/v8/src/platform-macos.cc
  36. +9 −10 deps/v8/src/spaces-inl.h
  37. +12 −7 deps/v8/src/spaces.cc
  38. +7 −4 deps/v8/src/spaces.h
  39. +3,924 −0 deps/v8/src/third_party/valgrind/valgrind.h
  40. +1 −1  deps/v8/src/version.cc
  41. +20 −0 deps/v8/src/x64/assembler-x64.cc
  42. +2 −0  deps/v8/src/x64/assembler-x64.h
  43. +150 −3 deps/v8/src/x64/builtins-x64.cc
  44. +186 −9 deps/v8/src/x64/cfg-x64.cc
  45. +560 −181 deps/v8/src/x64/codegen-x64.cc
  46. +60 −0 deps/v8/src/x64/codegen-x64.h
  47. +162 −0 deps/v8/src/x64/ic-x64.cc
  48. +64 −0 deps/v8/src/x64/jump-target-x64.cc
  49. +26 −19 deps/v8/src/x64/macro-assembler-x64.cc
  50. +1 −0  deps/v8/src/x64/virtual-frame-x64.cc
  51. +47 −0 deps/v8/test/cctest/test-api.cc
  52. +0 −18 deps/v8/test/mozilla/mozilla.status
View
21 deps/v8/.gitignore
@@ -0,0 +1,21 @@
+*.a
+*.exe
+*.lib
+*.log
+*.map
+*.mk
+*.ncb
+*.pyc
+*.scons*
+*.suo
+*.user
+*.xcodeproj
+d8
+d8_g
+shell
+shell_g
+/obj/
+/tools/visual_studio/Debug
+/tools/visual_studio/Release
+/xcodebuild/
+TAGS
View
18 deps/v8/ChangeLog
@@ -1,3 +1,21 @@
+2009-08-12: Version 1.3.3
+
+ Fix issue 417: incorrect %t placeholder expansion.
+
+ Add .gitignore file similar to Chromium's one.
+
+ Fix SConstruct file to build with new logging code for Android.
+
+ API: added function to find instance of template in prototype
+ chain. Inlined Object::IsInstanceOf.
+
+ Land change to notify valgrind when we modify code on x86.
+
+ Add api call to determine whether a string can be externalized.
+
+ Add a write() command to d8.
+
+
2009-08-05: Version 1.3.2
Started new compiler infrastructure for two-pass compilation using a
View
6 deps/v8/LICENSE
@@ -20,6 +20,12 @@ are:
copyrighted by Douglas Crockford and Baruch Even and released under
an MIT license.
+ - Valgrind client API header, located at third_party/valgrind/valgrind.h
+ This is release under the BSD license.
+
+ - Valgrind client API header, located at third_party/valgrind/valgrind.h
+ This is release under the BSD license.
+
These libraries have their own licenses; we recommend you read them,
as their terms may differ from the terms below.
View
10 deps/v8/SConstruct
@@ -79,7 +79,9 @@ ANDROID_INCLUDES = [ANDROID_TOP + '/bionic/libc/arch-arm/include',
ANDROID_TOP + '/bionic/libc/kernel/arch-arm',
ANDROID_TOP + '/bionic/libm/include',
ANDROID_TOP + '/bionic/libm/include/arch/arm',
- ANDROID_TOP + '/bionic/libthread_db/include']
+ ANDROID_TOP + '/bionic/libthread_db/include',
+ ANDROID_TOP + '/frameworks/base/include',
+ ANDROID_TOP + '/system/core/include']
ANDROID_LINKFLAGS = ['-nostdlib',
'-Bdynamic',
@@ -331,7 +333,7 @@ CCTEST_EXTRA_FLAGS = {
'CPPPATH': ANDROID_INCLUDES,
'LIBPATH': [ANDROID_TOP + '/out/target/product/generic/obj/lib'],
'LINKFLAGS': ANDROID_LINKFLAGS,
- 'LIBS': ['c', 'stdc++', 'm'],
+ 'LIBS': ['log', 'c', 'stdc++', 'm'],
'mode:release': {
'CPPDEFINES': ['SK_RELEASE', 'NDEBUG']
}
@@ -382,7 +384,7 @@ SAMPLE_FLAGS = {
'CPPPATH': ANDROID_INCLUDES,
'LIBPATH': [ANDROID_TOP + '/out/target/product/generic/obj/lib'],
'LINKFLAGS': ANDROID_LINKFLAGS,
- 'LIBS': ['c', 'stdc++', 'm'],
+ 'LIBS': ['log', 'c', 'stdc++', 'm'],
'mode:release': {
'CPPDEFINES': ['SK_RELEASE', 'NDEBUG']
}
@@ -470,7 +472,7 @@ D8_FLAGS = {
'os:android': {
'LIBPATH': [ANDROID_TOP + '/out/target/product/generic/obj/lib'],
'LINKFLAGS': ANDROID_LINKFLAGS,
- 'LIBS': ['c', 'stdc++', 'm'],
+ 'LIBS': ['log', 'c', 'stdc++', 'm'],
},
'os:win32': {
'LIBS': ['winmm', 'ws2_32'],
View
11 deps/v8/include/v8.h
@@ -901,6 +901,11 @@ class V8EXPORT String : public Primitive {
*/
bool MakeExternal(ExternalAsciiStringResource* resource);
+ /**
+ * Returns true if this string can be made external.
+ */
+ bool CanMakeExternal();
+
/** Creates an undetectable string from the supplied ascii or utf-8 data.*/
static Local<String> NewUndetectable(const char* data, int length = -1);
@@ -1099,6 +1104,12 @@ class V8EXPORT Object : public Value {
Local<Value> GetPrototype();
/**
+ * Finds an instance of the given function template in the prototype
+ * chain.
+ */
+ Local<Object> FindInstanceInPrototypeChain(Handle<FunctionTemplate> tmpl);
+
+ /**
* Call builtin Object.prototype.toString on this object.
* This is different from Value::ToString() that may call
* user-defined toString function. This one does not.
View
36 deps/v8/src/api.cc
@@ -1928,6 +1928,22 @@ Local<Value> v8::Object::GetPrototype() {
}
+Local<Object> v8::Object::FindInstanceInPrototypeChain(
+ v8::Handle<FunctionTemplate> tmpl) {
+ ON_BAILOUT("v8::Object::FindInstanceInPrototypeChain()",
+ return Local<v8::Object>());
+ ENTER_V8;
+ i::JSObject* object = *Utils::OpenHandle(this);
+ i::FunctionTemplateInfo* tmpl_info = *Utils::OpenHandle(*tmpl);
+ while (!object->IsInstanceOf(tmpl_info)) {
+ i::Object* prototype = object->GetPrototype();
+ if (!prototype->IsJSObject()) return Local<Object>();
+ object = i::JSObject::cast(prototype);
+ }
+ return Utils::ToLocal(i::Handle<i::JSObject>(object));
+}
+
+
Local<Array> v8::Object::GetPropertyNames() {
ON_BAILOUT("v8::Object::GetPropertyNames()", return Local<v8::Array>());
ENTER_V8;
@@ -2573,9 +2589,12 @@ Persistent<Context> v8::Context::New(
i::Handle<i::Context> env;
{
ENTER_V8;
+#if defined(ANDROID)
+ // On mobile devices, full GC is expensive.
+#else
// Give the heap a chance to cleanup if we've disposed contexts.
i::Heap::CollectAllGarbageIfContextDisposed();
-
+#endif
v8::Handle<ObjectTemplate> proxy_template = global_template;
i::Handle<i::FunctionTemplateInfo> proxy_constructor;
i::Handle<i::FunctionTemplateInfo> global_constructor;
@@ -2967,7 +2986,7 @@ bool v8::String::MakeExternal(v8::String::ExternalStringResource* resource) {
if (IsDeadCheck("v8::String::MakeExternal()")) return false;
if (this->IsExternal()) return false; // Already an external string.
ENTER_V8;
- i::Handle <i::String> obj = Utils::OpenHandle(this);
+ i::Handle<i::String> obj = Utils::OpenHandle(this);
bool result = obj->MakeExternal(resource);
if (result && !obj->IsSymbol()) {
// Operation was successful and the string is not a symbol. In this case
@@ -3003,7 +3022,7 @@ bool v8::String::MakeExternal(
if (IsDeadCheck("v8::String::MakeExternal()")) return false;
if (this->IsExternal()) return false; // Already an external string.
ENTER_V8;
- i::Handle <i::String> obj = Utils::OpenHandle(this);
+ i::Handle<i::String> obj = Utils::OpenHandle(this);
bool result = obj->MakeExternal(resource);
if (result && !obj->IsSymbol()) {
// Operation was successful and the string is not a symbol. In this case
@@ -3018,6 +3037,17 @@ bool v8::String::MakeExternal(
}
+bool v8::String::CanMakeExternal() {
+ if (IsDeadCheck("v8::String::CanMakeExternal()")) return false;
+ i::Handle<i::String> obj = Utils::OpenHandle(this);
+ int size = obj->Size(); // Byte size of the original string.
+ if (size < i::ExternalString::kSize)
+ return false;
+ i::StringShape shape(*obj);
+ return !shape.IsExternal();
+}
+
+
Local<v8::Object> v8::Object::New() {
EnsureInitialized("v8::Object::New()");
LOG_API("Object::New");
View
193 deps/v8/src/arm/cfg-arm.cc
@@ -29,6 +29,7 @@
#include "cfg.h"
#include "codegen-inl.h"
+#include "codegen-arm.h" // Include after codegen-inl.h.
#include "macro-assembler-arm.h"
namespace v8 {
@@ -42,6 +43,14 @@ void InstructionBlock::Compile(MacroAssembler* masm) {
{
Comment cmt(masm, "[ InstructionBlock");
for (int i = 0, len = instructions_.length(); i < len; i++) {
+ // If the location of the current instruction is a temp, then the
+ // instruction cannot be in tail position in the block. Allocate the
+ // temp based on peeking ahead to the next instruction.
+ Instruction* instr = instructions_[i];
+ Location* loc = instr->location();
+ if (loc->is_temporary()) {
+ instructions_[i+1]->FastAllocate(TempLocation::cast(loc));
+ }
instructions_[i]->Compile(masm);
}
}
@@ -91,31 +100,199 @@ void ExitNode::Compile(MacroAssembler* masm) {
}
+void PropLoadInstr::Compile(MacroAssembler* masm) {
+ // The key should not be on the stack---if it is a compiler-generated
+ // temporary it is in the accumulator.
+ ASSERT(!key()->is_on_stack());
+
+ Comment cmnt(masm, "[ Load from Property");
+ // If the key is known at compile-time we may be able to use a load IC.
+ bool is_keyed_load = true;
+ if (key()->is_constant()) {
+ // Still use the keyed load IC if the key can be parsed as an integer so
+ // we will get into the case that handles [] on string objects.
+ Handle<Object> key_val = Constant::cast(key())->handle();
+ uint32_t ignored;
+ if (key_val->IsSymbol() &&
+ !String::cast(*key_val)->AsArrayIndex(&ignored)) {
+ is_keyed_load = false;
+ }
+ }
+
+ if (!object()->is_on_stack()) object()->Push(masm);
+
+ if (is_keyed_load) {
+ key()->Push(masm);
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ // Discard key and receiver.
+ __ add(sp, sp, Operand(2 * kPointerSize));
+ } else {
+ key()->Get(masm, r2);
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ __ pop(); // Discard receiver.
+ }
+ location()->Set(masm, r0);
+}
+
+
+void BinaryOpInstr::Compile(MacroAssembler* masm) {
+ // The right-hand value should not be on the stack---if it is a
+ // compiler-generated temporary it is in the accumulator.
+ ASSERT(!right()->is_on_stack());
+
+ Comment cmnt(masm, "[ BinaryOpInstr");
+ // We can overwrite one of the operands if it is a temporary.
+ OverwriteMode mode = NO_OVERWRITE;
+ if (left()->is_temporary()) {
+ mode = OVERWRITE_LEFT;
+ } else if (right()->is_temporary()) {
+ mode = OVERWRITE_RIGHT;
+ }
+
+ // Move left to r1 and right to r0.
+ left()->Get(masm, r1);
+ right()->Get(masm, r0);
+ GenericBinaryOpStub stub(op(), mode);
+ __ CallStub(&stub);
+ location()->Set(masm, r0);
+}
+
+
void ReturnInstr::Compile(MacroAssembler* masm) {
+ // The location should be 'Effect'. As a side effect, move the value to
+ // the accumulator.
Comment cmnt(masm, "[ ReturnInstr");
- value_->ToRegister(masm, r0);
+ value()->Get(masm, r0);
}
-void Constant::ToRegister(MacroAssembler* masm, Register reg) {
+void Constant::Get(MacroAssembler* masm, Register reg) {
__ mov(reg, Operand(handle_));
}
-void SlotLocation::ToRegister(MacroAssembler* masm, Register reg) {
- switch (type_) {
+void Constant::Push(MacroAssembler* masm) {
+ __ mov(ip, Operand(handle_));
+ __ push(ip);
+}
+
+
+static MemOperand ToMemOperand(SlotLocation* loc) {
+ switch (loc->type()) {
case Slot::PARAMETER: {
int count = CfgGlobals::current()->fun()->scope()->num_parameters();
- __ ldr(reg, MemOperand(fp, (1 + count - index_) * kPointerSize));
- break;
+ return MemOperand(fp, (1 + count - loc->index()) * kPointerSize);
}
case Slot::LOCAL: {
const int kOffset = JavaScriptFrameConstants::kLocal0Offset;
- __ ldr(reg, MemOperand(fp, kOffset - index_ * kPointerSize));
- break;
+ return MemOperand(fp, kOffset - loc->index() * kPointerSize);
}
default:
UNREACHABLE();
+ return MemOperand(r0);
+ }
+}
+
+
+void Constant::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) {
+ __ mov(ip, Operand(handle_));
+ __ str(ip, ToMemOperand(loc));
+}
+
+
+void SlotLocation::Get(MacroAssembler* masm, Register reg) {
+ __ ldr(reg, ToMemOperand(this));
+}
+
+
+void SlotLocation::Set(MacroAssembler* masm, Register reg) {
+ __ str(reg, ToMemOperand(this));
+}
+
+
+void SlotLocation::Push(MacroAssembler* masm) {
+ __ ldr(ip, ToMemOperand(this));
+ __ push(ip); // Push will not destroy ip.
+}
+
+
+void SlotLocation::Move(MacroAssembler* masm, Value* value) {
+ // Double dispatch.
+ value->MoveToSlot(masm, this);
+}
+
+
+void SlotLocation::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) {
+ __ ldr(ip, ToMemOperand(this));
+ __ str(ip, ToMemOperand(loc));
+}
+
+
+void TempLocation::Get(MacroAssembler* masm, Register reg) {
+ switch (where_) {
+ case ACCUMULATOR:
+ if (!reg.is(r0)) __ mov(reg, r0);
+ break;
+ case STACK:
+ __ pop(reg);
+ break;
+ case NOT_ALLOCATED:
+ UNREACHABLE();
+ }
+}
+
+
+void TempLocation::Set(MacroAssembler* masm, Register reg) {
+ switch (where_) {
+ case ACCUMULATOR:
+ if (!reg.is(r0)) __ mov(r0, reg);
+ break;
+ case STACK:
+ __ push(reg);
+ break;
+ case NOT_ALLOCATED:
+ UNREACHABLE();
+ }
+}
+
+
+void TempLocation::Push(MacroAssembler* masm) {
+ switch (where_) {
+ case ACCUMULATOR:
+ __ push(r0);
+ break;
+ case STACK:
+ case NOT_ALLOCATED:
+ UNREACHABLE();
+ }
+}
+
+
+void TempLocation::Move(MacroAssembler* masm, Value* value) {
+ switch (where_) {
+ case ACCUMULATOR:
+ value->Get(masm, r0);
+ case STACK:
+ value->Push(masm);
+ break;
+ case NOT_ALLOCATED:
+ UNREACHABLE();
+ }
+}
+
+
+void TempLocation::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) {
+ switch (where_) {
+ case ACCUMULATOR:
+ __ str(r0, ToMemOperand(loc));
+ case STACK:
+ __ pop(ip);
+ __ str(ip, ToMemOperand(loc));
+ break;
+ case NOT_ALLOCATED:
+ UNREACHABLE();
}
}
View
31 deps/v8/src/arm/codegen-arm-inl.h
@@ -34,6 +34,37 @@ namespace internal {
#define __ ACCESS_MASM(masm_)
+void CodeGenerator::LoadConditionAndSpill(Expression* expression,
+ TypeofState typeof_state,
+ JumpTarget* true_target,
+ JumpTarget* false_target,
+ bool force_control) {
+ LoadCondition(expression, typeof_state, true_target, false_target,
+ force_control);
+}
+
+
+void CodeGenerator::LoadAndSpill(Expression* expression,
+ TypeofState typeof_state) {
+ Load(expression, typeof_state);
+}
+
+
+void CodeGenerator::VisitAndSpill(Statement* statement) {
+ Visit(statement);
+}
+
+
+void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
+ VisitStatements(statements);
+}
+
+
+void Reference::GetValueAndSpill(TypeofState typeof_state) {
+ GetValue(typeof_state);
+}
+
+
// Platform-specific inline functions.
void DeferredCode::Jump() { __ jmp(&entry_label_); }
View
301 deps/v8/src/arm/codegen-arm.cc
@@ -133,8 +133,7 @@ CodeGenerator::CodeGenerator(int buffer_size, Handle<Script> script,
allocator_(NULL),
cc_reg_(al),
state_(NULL),
- function_return_is_shadowed_(false),
- in_spilled_code_(false) {
+ function_return_is_shadowed_(false) {
}
@@ -156,7 +155,6 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
ASSERT(frame_ == NULL);
frame_ = new VirtualFrame();
cc_reg_ = al;
- set_in_spilled_code(false);
{
CodeGenState state(this);
@@ -423,22 +421,6 @@ MemOperand CodeGenerator::ContextSlotOperandCheckExtensions(
}
-void CodeGenerator::LoadConditionAndSpill(Expression* expression,
- TypeofState typeof_state,
- JumpTarget* true_target,
- JumpTarget* false_target,
- bool force_control) {
- ASSERT(in_spilled_code());
- set_in_spilled_code(false);
- LoadCondition(expression, typeof_state, true_target, false_target,
- force_control);
- if (frame_ != NULL) {
- frame_->SpillAll();
- }
- set_in_spilled_code(true);
-}
-
-
// Loads a value on TOS. If it is a boolean value, the result may have been
// (partially) translated into branches, or it may have set the condition
// code register. If force_cc is set, the value is forced to set the
@@ -450,7 +432,6 @@ void CodeGenerator::LoadCondition(Expression* x,
JumpTarget* true_target,
JumpTarget* false_target,
bool force_cc) {
- ASSERT(!in_spilled_code());
ASSERT(!has_cc());
int original_height = frame_->height();
@@ -484,21 +465,10 @@ void CodeGenerator::LoadCondition(Expression* x,
}
-void CodeGenerator::LoadAndSpill(Expression* expression,
- TypeofState typeof_state) {
- ASSERT(in_spilled_code());
- set_in_spilled_code(false);
- Load(expression, typeof_state);
- frame_->SpillAll();
- set_in_spilled_code(true);
-}
-
-
void CodeGenerator::Load(Expression* x, TypeofState typeof_state) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- ASSERT(!in_spilled_code());
JumpTarget true_target;
JumpTarget false_target;
LoadCondition(x, typeof_state, &true_target, &false_target, false);
@@ -697,96 +667,6 @@ void CodeGenerator::ToBoolean(JumpTarget* true_target,
}
-class GenericBinaryOpStub : public CodeStub {
- public:
- GenericBinaryOpStub(Token::Value op,
- OverwriteMode mode,
- int constant_rhs = CodeGenerator::kUnknownIntValue)
- : op_(op),
- mode_(mode),
- constant_rhs_(constant_rhs),
- specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)) { }
-
- private:
- Token::Value op_;
- OverwriteMode mode_;
- int constant_rhs_;
- bool specialized_on_rhs_;
-
- static const int kMaxKnownRhs = 0x40000000;
-
- // Minor key encoding in 16 bits.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 6> {};
- class KnownIntBits: public BitField<int, 8, 8> {};
-
- Major MajorKey() { return GenericBinaryOp; }
- int MinorKey() {
- // Encode the parameters in a unique 16 bit value.
- return OpBits::encode(op_)
- | ModeBits::encode(mode_)
- | KnownIntBits::encode(MinorKeyForKnownInt());
- }
-
- void Generate(MacroAssembler* masm);
- void HandleNonSmiBitwiseOp(MacroAssembler* masm);
-
- static bool RhsIsOneWeWantToOptimizeFor(Token::Value op, int constant_rhs) {
- if (constant_rhs == CodeGenerator::kUnknownIntValue) return false;
- if (op == Token::DIV) return constant_rhs >= 2 && constant_rhs <= 3;
- if (op == Token::MOD) {
- if (constant_rhs <= 1) return false;
- if (constant_rhs <= 10) return true;
- if (constant_rhs <= kMaxKnownRhs && IsPowerOf2(constant_rhs)) return true;
- return false;
- }
- return false;
- }
-
- int MinorKeyForKnownInt() {
- if (!specialized_on_rhs_) return 0;
- if (constant_rhs_ <= 10) return constant_rhs_ + 1;
- ASSERT(IsPowerOf2(constant_rhs_));
- int key = 12;
- int d = constant_rhs_;
- while ((d & 1) == 0) {
- key++;
- d >>= 1;
- }
- return key;
- }
-
- const char* GetName() {
- switch (op_) {
- case Token::ADD: return "GenericBinaryOpStub_ADD";
- case Token::SUB: return "GenericBinaryOpStub_SUB";
- case Token::MUL: return "GenericBinaryOpStub_MUL";
- case Token::DIV: return "GenericBinaryOpStub_DIV";
- case Token::MOD: return "GenericBinaryOpStub_MOD";
- case Token::BIT_OR: return "GenericBinaryOpStub_BIT_OR";
- case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND";
- case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR";
- case Token::SAR: return "GenericBinaryOpStub_SAR";
- case Token::SHL: return "GenericBinaryOpStub_SHL";
- case Token::SHR: return "GenericBinaryOpStub_SHR";
- default: return "GenericBinaryOpStub";
- }
- }
-
-#ifdef DEBUG
- void Print() {
- if (!specialized_on_rhs_) {
- PrintF("GenericBinaryOpStub (%s)\n", Token::String(op_));
- } else {
- PrintF("GenericBinaryOpStub (%s by %d)\n",
- Token::String(op_),
- constant_rhs_);
- }
- }
-#endif
-};
-
-
void CodeGenerator::GenericBinaryOperation(Token::Value op,
OverwriteMode overwrite_mode,
int constant_rhs) {
@@ -1156,11 +1036,7 @@ void CodeGenerator::Comparison(Condition cc,
// We call with 0 args because there are 0 on the stack.
CompareStub stub(cc, strict);
frame_->CallStub(&stub, 0);
-
- Result result = allocator_->Allocate(r0);
- ASSERT(result.is_valid());
- __ cmp(result.reg(), Operand(0));
- result.Unuse();
+ __ cmp(r0, Operand(0));
exit.Jump();
// Do smi comparisons by pointer comparison.
@@ -1236,28 +1112,6 @@ void CodeGenerator::CheckStack() {
}
-void CodeGenerator::VisitAndSpill(Statement* statement) {
- ASSERT(in_spilled_code());
- set_in_spilled_code(false);
- Visit(statement);
- if (frame_ != NULL) {
- frame_->SpillAll();
- }
- set_in_spilled_code(true);
-}
-
-
-void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
- ASSERT(in_spilled_code());
- set_in_spilled_code(false);
- VisitStatements(statements);
- if (frame_ != NULL) {
- frame_->SpillAll();
- }
- set_in_spilled_code(true);
-}
-
-
void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
#ifdef DEBUG
int original_height = frame_->height();
@@ -1854,7 +1708,6 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- ASSERT(!in_spilled_code());
VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ ForInStatement");
CodeForStatementPosition(node);
@@ -1892,9 +1745,8 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
primitive.Bind();
frame_->EmitPush(r0);
- Result arg_count = allocator_->Allocate(r0);
- ASSERT(arg_count.is_valid());
- __ mov(arg_count.reg(), Operand(0));
+ Result arg_count(r0);
+ __ mov(r0, Operand(0));
frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS, &arg_count, 1);
jsobject.Bind();
@@ -1975,15 +1827,10 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
__ ldr(r0, frame_->ElementAt(4)); // push enumerable
frame_->EmitPush(r0);
frame_->EmitPush(r3); // push entry
- Result arg_count_register = allocator_->Allocate(r0);
- ASSERT(arg_count_register.is_valid());
- __ mov(arg_count_register.reg(), Operand(1));
- Result result = frame_->InvokeBuiltin(Builtins::FILTER_KEY,
- CALL_JS,
- &arg_count_register,
- 2);
- __ mov(r3, Operand(result.reg()));
- result.Unuse();
+ Result arg_count_reg(r0);
+ __ mov(r0, Operand(1));
+ frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_JS, &arg_count_reg, 2);
+ __ mov(r3, Operand(r0));
// If the property has been removed while iterating, we just skip it.
__ cmp(r3, Operand(Factory::null_value()));
@@ -2576,9 +2423,8 @@ void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot,
// Load the global object.
LoadGlobal();
// Setup the name register.
- Result name = allocator_->Allocate(r2);
- ASSERT(name.is_valid()); // We are in spilled code.
- __ mov(name.reg(), Operand(slot->var()->name()));
+ Result name(r2);
+ __ mov(r2, Operand(slot->var()->name()));
// Call IC stub.
if (typeof_state == INSIDE_TYPEOF) {
frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET, &name, 0);
@@ -2912,16 +2758,14 @@ void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- ASSERT(!in_spilled_code());
VirtualFrame::SpilledScope spilled_scope;
// Call runtime routine to allocate the catch extension object and
// assign the exception value to the catch variable.
Comment cmnt(masm_, "[ CatchExtensionObject");
LoadAndSpill(node->key());
LoadAndSpill(node->value());
- Result result =
- frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
- frame_->EmitPush(result.reg());
+ frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
+ frame_->EmitPush(r0);
ASSERT(frame_->height() == original_height + 1);
}
@@ -3261,24 +3105,22 @@ void CodeGenerator::VisitCallNew(CallNew* node) {
}
// r0: the number of arguments.
- Result num_args = allocator_->Allocate(r0);
- ASSERT(num_args.is_valid());
- __ mov(num_args.reg(), Operand(arg_count));
+ Result num_args(r0);
+ __ mov(r0, Operand(arg_count));
// Load the function into r1 as per calling convention.
- Result function = allocator_->Allocate(r1);
- ASSERT(function.is_valid());
- __ ldr(function.reg(), frame_->ElementAt(arg_count + 1));
+ Result function(r1);
+ __ ldr(r1, frame_->ElementAt(arg_count + 1));
// Call the construct call builtin that handles allocation and
// constructor invocation.
CodeForSourcePosition(node->position());
Handle<Code> ic(Builtins::builtin(Builtins::JSConstructCall));
- Result result = frame_->CallCodeObject(ic,
- RelocInfo::CONSTRUCT_CALL,
- &num_args,
- &function,
- arg_count + 1);
+ frame_->CallCodeObject(ic,
+ RelocInfo::CONSTRUCT_CALL,
+ &num_args,
+ &function,
+ arg_count + 1);
// Discard old TOS value and push r0 on the stack (same as Pop(), push(r0)).
__ str(r0, frame_->Top());
@@ -3621,9 +3463,8 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
if (property != NULL) {
LoadAndSpill(property->obj());
LoadAndSpill(property->key());
- Result arg_count = allocator_->Allocate(r0);
- ASSERT(arg_count.is_valid());
- __ mov(arg_count.reg(), Operand(1)); // not counting receiver
+ Result arg_count(r0);
+ __ mov(r0, Operand(1)); // not counting receiver
frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, &arg_count, 2);
} else if (variable != NULL) {
@@ -3632,9 +3473,8 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
LoadGlobal();
__ mov(r0, Operand(variable->name()));
frame_->EmitPush(r0);
- Result arg_count = allocator_->Allocate(r0);
- ASSERT(arg_count.is_valid());
- __ mov(arg_count.reg(), Operand(1)); // not counting receiver
+ Result arg_count(r0);
+ __ mov(r0, Operand(1)); // not counting receiver
frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, &arg_count, 2);
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
@@ -3647,9 +3487,8 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
frame_->EmitPush(r0);
__ mov(r0, Operand(variable->name()));
frame_->EmitPush(r0);
- Result arg_count = allocator_->Allocate(r0);
- ASSERT(arg_count.is_valid());
- __ mov(arg_count.reg(), Operand(1)); // not counting receiver
+ Result arg_count(r0);
+ __ mov(r0, Operand(1)); // not counting receiver
frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, &arg_count, 2);
} else {
@@ -3700,9 +3539,8 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
smi_label.Branch(eq);
frame_->EmitPush(r0);
- Result arg_count = allocator_->Allocate(r0);
- ASSERT(arg_count.is_valid());
- __ mov(arg_count.reg(), Operand(0)); // not counting receiver
+ Result arg_count(r0);
+ __ mov(r0, Operand(0)); // not counting receiver
frame_->InvokeBuiltin(Builtins::BIT_NOT, CALL_JS, &arg_count, 1);
continue_label.Jump();
@@ -3725,9 +3563,8 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
__ tst(r0, Operand(kSmiTagMask));
continue_label.Branch(eq);
frame_->EmitPush(r0);
- Result arg_count = allocator_->Allocate(r0);
- ASSERT(arg_count.is_valid());
- __ mov(arg_count.reg(), Operand(0)); // not counting receiver
+ Result arg_count(r0);
+ __ mov(r0, Operand(0)); // not counting receiver
frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, &arg_count, 1);
continue_label.Bind();
break;
@@ -3813,9 +3650,8 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
{
// Convert the operand to a number.
frame_->EmitPush(r0);
- Result arg_count = allocator_->Allocate(r0);
- ASSERT(arg_count.is_valid());
- __ mov(arg_count.reg(), Operand(0));
+ Result arg_count(r0);
+ __ mov(r0, Operand(0));
frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, &arg_count, 1);
}
if (is_postfix) {
@@ -4192,14 +4028,10 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
case Token::IN: {
LoadAndSpill(left);
LoadAndSpill(right);
- Result arg_count = allocator_->Allocate(r0);
- ASSERT(arg_count.is_valid());
- __ mov(arg_count.reg(), Operand(1)); // not counting receiver
- Result result = frame_->InvokeBuiltin(Builtins::IN,
- CALL_JS,
- &arg_count,
- 2);
- frame_->EmitPush(result.reg());
+ Result arg_count(r0);
+ __ mov(r0, Operand(1)); // not counting receiver
+ frame_->InvokeBuiltin(Builtins::IN, CALL_JS, &arg_count, 2);
+ frame_->EmitPush(r0);
break;
}
@@ -4207,9 +4039,9 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
LoadAndSpill(left);
LoadAndSpill(right);
InstanceofStub stub;
- Result result = frame_->CallStub(&stub, 2);
+ frame_->CallStub(&stub, 2);
// At this point if instanceof succeeded then r0 == 0.
- __ tst(result.reg(), Operand(result.reg()));
+ __ tst(r0, Operand(r0));
cc_reg_ = eq;
break;
}
@@ -4248,17 +4080,7 @@ Handle<String> Reference::GetName() {
}
-void Reference::GetValueAndSpill(TypeofState typeof_state) {
- ASSERT(cgen_->in_spilled_code());
- cgen_->set_in_spilled_code(false);
- GetValue(typeof_state);
- cgen_->frame()->SpillAll();
- cgen_->set_in_spilled_code(true);
-}
-
-
void Reference::GetValue(TypeofState typeof_state) {
- ASSERT(!cgen_->in_spilled_code());
ASSERT(cgen_->HasValidEntryRegisters());
ASSERT(!is_illegal());
ASSERT(!cgen_->has_cc());
@@ -4289,15 +4111,14 @@ void Reference::GetValue(TypeofState typeof_state) {
Variable* var = expression_->AsVariableProxy()->AsVariable();
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
// Setup the name register.
- Result name_reg = cgen_->allocator()->Allocate(r2);
- ASSERT(name_reg.is_valid());
- __ mov(name_reg.reg(), Operand(name));
+ Result name_reg(r2);
+ __ mov(r2, Operand(name));
ASSERT(var == NULL || var->is_global());
RelocInfo::Mode rmode = (var == NULL)
? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT;
- Result answer = frame->CallCodeObject(ic, rmode, &name_reg, 0);
- frame->EmitPush(answer.reg());
+ frame->CallCodeObject(ic, rmode, &name_reg, 0);
+ frame->EmitPush(r0);
break;
}
@@ -4316,8 +4137,8 @@ void Reference::GetValue(TypeofState typeof_state) {
RelocInfo::Mode rmode = (var == NULL)
? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT;
- Result answer = frame->CallCodeObject(ic, rmode, 0);
- frame->EmitPush(answer.reg());
+ frame->CallCodeObject(ic, rmode, 0);
+ frame->EmitPush(r0);
break;
}
@@ -4426,20 +4247,18 @@ void Reference::SetValue(InitState init_state) {
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
Handle<String> name(GetName());
- Result value = cgen_->allocator()->Allocate(r0);
- ASSERT(value.is_valid());
- frame->EmitPop(value.reg());
+ Result value(r0);
+ frame->EmitPop(r0);
// Setup the name register.
- Result property_name = cgen_->allocator()->Allocate(r2);
- ASSERT(property_name.is_valid());
- __ mov(property_name.reg(), Operand(name));
- Result answer = frame->CallCodeObject(ic,
- RelocInfo::CODE_TARGET,
- &value,
- &property_name,
- 0);
- frame->EmitPush(answer.reg());
+ Result property_name(r2);
+ __ mov(r2, Operand(name));
+ frame->CallCodeObject(ic,
+ RelocInfo::CODE_TARGET,
+ &value,
+ &property_name,
+ 0);
+ frame->EmitPush(r0);
break;
}
@@ -4452,12 +4271,10 @@ void Reference::SetValue(InitState init_state) {
// Call IC code.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
// TODO(1222589): Make the IC grab the values from the stack.
- Result value = cgen_->allocator()->Allocate(r0);
- ASSERT(value.is_valid());
- frame->EmitPop(value.reg()); // value
- Result result =
- frame->CallCodeObject(ic, RelocInfo::CODE_TARGET, &value, 0);
- frame->EmitPush(result.reg());
+ Result value(r0);
+ frame->EmitPop(r0); // value
+ frame->CallCodeObject(ic, RelocInfo::CODE_TARGET, &value, 0);
+ frame->EmitPush(r0);
break;
}
View
117 deps/v8/src/arm/codegen-arm.h
@@ -183,9 +183,6 @@ class CodeGenerator: public AstVisitor {
void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
- bool in_spilled_code() const { return in_spilled_code_; }
- void set_in_spilled_code(bool flag) { in_spilled_code_ = flag; }
-
static const int kUnknownIntValue = -1;
private:
@@ -222,11 +219,11 @@ class CodeGenerator: public AstVisitor {
// reach the end of the statement (ie, it does not exit via break,
// continue, return, or throw). This function is used temporarily while
// the code generator is being transformed.
- void VisitAndSpill(Statement* statement);
+ inline void VisitAndSpill(Statement* statement);
// Visit a list of statements and then spill the virtual frame if control
// flow can reach the end of the list.
- void VisitStatementsAndSpill(ZoneList<Statement*>* statements);
+ inline void VisitStatementsAndSpill(ZoneList<Statement*>* statements);
// Main code generation function
void GenCode(FunctionLiteral* fun);
@@ -263,17 +260,17 @@ class CodeGenerator: public AstVisitor {
// Generate code to push the value of an expression on top of the frame
// and then spill the frame fully to memory. This function is used
// temporarily while the code generator is being transformed.
- void LoadAndSpill(Expression* expression,
- TypeofState typeof_state = NOT_INSIDE_TYPEOF);
+ inline void LoadAndSpill(Expression* expression,
+ TypeofState typeof_state = NOT_INSIDE_TYPEOF);
// Call LoadCondition and then spill the virtual frame unless control flow
// cannot reach the end of the expression (ie, by emitting only
// unconditional jumps to the control targets).
- void LoadConditionAndSpill(Expression* expression,
- TypeofState typeof_state,
- JumpTarget* true_target,
- JumpTarget* false_target,
- bool force_control);
+ inline void LoadConditionAndSpill(Expression* expression,
+ TypeofState typeof_state,
+ JumpTarget* true_target,
+ JumpTarget* false_target,
+ bool force_control);
// Read a value from a slot and leave it on top of the expression stack.
void LoadFromSlot(Slot* slot, TypeofState typeof_state);
@@ -405,12 +402,6 @@ class CodeGenerator: public AstVisitor {
// to some unlinking code).
bool function_return_is_shadowed_;
- // True when we are in code that expects the virtual frame to be fully
- // spilled. Some virtual frame function are disabled in DEBUG builds when
- // called from spilled code, because they do not leave the virtual frame
- // in a spilled state.
- bool in_spilled_code_;
-
static InlineRuntimeLUT kInlineRuntimeLUT[];
friend class VirtualFrame;
@@ -421,6 +412,96 @@ class CodeGenerator: public AstVisitor {
};
+class GenericBinaryOpStub : public CodeStub {
+ public:
+ GenericBinaryOpStub(Token::Value op,
+ OverwriteMode mode,
+ int constant_rhs = CodeGenerator::kUnknownIntValue)
+ : op_(op),
+ mode_(mode),
+ constant_rhs_(constant_rhs),
+ specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)) { }
+
+ private:
+ Token::Value op_;
+ OverwriteMode mode_;
+ int constant_rhs_;
+ bool specialized_on_rhs_;
+
+ static const int kMaxKnownRhs = 0x40000000;
+
+ // Minor key encoding in 16 bits.
+ class ModeBits: public BitField<OverwriteMode, 0, 2> {};
+ class OpBits: public BitField<Token::Value, 2, 6> {};
+ class KnownIntBits: public BitField<int, 8, 8> {};
+
+ Major MajorKey() { return GenericBinaryOp; }
+ int MinorKey() {
+ // Encode the parameters in a unique 16 bit value.
+ return OpBits::encode(op_)
+ | ModeBits::encode(mode_)
+ | KnownIntBits::encode(MinorKeyForKnownInt());
+ }
+
+ void Generate(MacroAssembler* masm);
+ void HandleNonSmiBitwiseOp(MacroAssembler* masm);
+
+ static bool RhsIsOneWeWantToOptimizeFor(Token::Value op, int constant_rhs) {
+ if (constant_rhs == CodeGenerator::kUnknownIntValue) return false;
+ if (op == Token::DIV) return constant_rhs >= 2 && constant_rhs <= 3;
+ if (op == Token::MOD) {
+ if (constant_rhs <= 1) return false;
+ if (constant_rhs <= 10) return true;
+ if (constant_rhs <= kMaxKnownRhs && IsPowerOf2(constant_rhs)) return true;
+ return false;
+ }
+ return false;
+ }
+
+ int MinorKeyForKnownInt() {
+ if (!specialized_on_rhs_) return 0;
+ if (constant_rhs_ <= 10) return constant_rhs_ + 1;
+ ASSERT(IsPowerOf2(constant_rhs_));
+ int key = 12;
+ int d = constant_rhs_;
+ while ((d & 1) == 0) {
+ key++;
+ d >>= 1;
+ }
+ return key;
+ }
+
+ const char* GetName() {
+ switch (op_) {
+ case Token::ADD: return "GenericBinaryOpStub_ADD";
+ case Token::SUB: return "GenericBinaryOpStub_SUB";
+ case Token::MUL: return "GenericBinaryOpStub_MUL";
+ case Token::DIV: return "GenericBinaryOpStub_DIV";
+ case Token::MOD: return "GenericBinaryOpStub_MOD";
+ case Token::BIT_OR: return "GenericBinaryOpStub_BIT_OR";
+ case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND";
+ case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR";
+ case Token::SAR: return "GenericBinaryOpStub_SAR";
+ case Token::SHL: return "GenericBinaryOpStub_SHL";
+ case Token::SHR: return "GenericBinaryOpStub_SHR";
+ default: return "GenericBinaryOpStub";
+ }
+ }
+
+#ifdef DEBUG
+ void Print() {
+ if (!specialized_on_rhs_) {
+ PrintF("GenericBinaryOpStub (%s)\n", Token::String(op_));
+ } else {
+ PrintF("GenericBinaryOpStub (%s by %d)\n",
+ Token::String(op_),
+ constant_rhs_);
+ }
+ }
+#endif
+};
+
+
} } // namespace v8::internal
#endif // V8_ARM_CODEGEN_ARM_H_
View
336 deps/v8/src/arm/jump-target-arm.cc
@@ -47,23 +47,29 @@ void JumpTarget::DoJump() {
ASSERT(cgen()->HasValidEntryRegisters());
if (is_bound()) {
- // Backward jump. There is an expected frame to merge to.
+ // Backward jump. There already a frame expectation at the target.
ASSERT(direction_ == BIDIRECTIONAL);
- cgen()->frame()->PrepareMergeTo(entry_frame_);
cgen()->frame()->MergeTo(entry_frame_);
cgen()->DeleteFrame();
- __ jmp(&entry_label_);
} else {
- // Preconfigured entry frame is not used on ARM.
- ASSERT(entry_frame_ == NULL);
- // Forward jump. The current frame is added to the end of the list
- // of frames reaching the target block and a jump to the merge code
- // is emitted.
- AddReachingFrame(cgen()->frame());
- RegisterFile empty;
- cgen()->SetFrame(NULL, &empty);
- __ jmp(&merge_labels_.last());
+ // Use the current frame as the expected one at the target if necessary.
+ if (entry_frame_ == NULL) {
+ entry_frame_ = cgen()->frame();
+ RegisterFile empty;
+ cgen()->SetFrame(NULL, &empty);
+ } else {
+ cgen()->frame()->MergeTo(entry_frame_);
+ cgen()->DeleteFrame();
+ }
+
+ // The predicate is_linked() should be made true. Its implementation
+ // detects the presence of a frame pointer in the reaching_frames_ list.
+ if (!is_linked()) {
+ reaching_frames_.Add(NULL);
+ ASSERT(is_linked());
+ }
}
+ __ jmp(&entry_label_);
}
@@ -74,56 +80,21 @@ void JumpTarget::DoBranch(Condition cc, Hint ignored) {
ASSERT(direction_ == BIDIRECTIONAL);
// Backward branch. We have an expected frame to merge to on the
// backward edge.
-
- // Swap the current frame for a copy (we do the swapping to get
- // the off-frame registers off the fall through) to use for the
- // branch.
- VirtualFrame* fall_through_frame = cgen()->frame();
- VirtualFrame* branch_frame = new VirtualFrame(fall_through_frame);
- RegisterFile non_frame_registers;
- cgen()->SetFrame(branch_frame, &non_frame_registers);
-
- // Check if we can avoid merge code.
- cgen()->frame()->PrepareMergeTo(entry_frame_);
- if (cgen()->frame()->Equals(entry_frame_)) {
- // Branch right in to the block.
- cgen()->DeleteFrame();
- __ b(cc, &entry_label_);
- cgen()->SetFrame(fall_through_frame, &non_frame_registers);
- return;
- }
-
- // Check if we can reuse existing merge code.
- for (int i = 0; i < reaching_frames_.length(); i++) {
- if (reaching_frames_[i] != NULL &&
- cgen()->frame()->Equals(reaching_frames_[i])) {
- // Branch to the merge code.
- cgen()->DeleteFrame();
- __ b(cc, &merge_labels_[i]);
- cgen()->SetFrame(fall_through_frame, &non_frame_registers);
- return;
- }
- }
-
- // To emit the merge code here, we negate the condition and branch
- // around the merge code on the fall through path.
- Label original_fall_through;
- __ b(NegateCondition(cc), &original_fall_through);
cgen()->frame()->MergeTo(entry_frame_);
- cgen()->DeleteFrame();
- __ b(&entry_label_);
- cgen()->SetFrame(fall_through_frame, &non_frame_registers);
- __ bind(&original_fall_through);
-
} else {
- // Preconfigured entry frame is not used on ARM.
- ASSERT(entry_frame_ == NULL);
- // Forward branch. A copy of the current frame is added to the end
- // of the list of frames reaching the target block and a branch to
- // the merge code is emitted.
- AddReachingFrame(new VirtualFrame(cgen()->frame()));
- __ b(cc, &merge_labels_.last());
+ // Clone the current frame to use as the expected one at the target if
+ // necessary.
+ if (entry_frame_ == NULL) {
+ entry_frame_ = new VirtualFrame(cgen()->frame());
+ }
+ // The predicate is_linked() should be made true. Its implementation
+ // detects the presence of a frame pointer in the reaching_frames_ list.
+ if (!is_linked()) {
+ reaching_frames_.Add(NULL);
+ ASSERT(is_linked());
+ }
}
+ __ b(cc, &entry_label_);
}
@@ -139,13 +110,19 @@ void JumpTarget::Call() {
ASSERT(cgen()->HasValidEntryRegisters());
ASSERT(!is_linked());
- cgen()->frame()->SpillAll();
+ // Calls are always 'forward' so we use a copy of the current frame (plus
+ // one for a return address) as the expected frame.
+ ASSERT(entry_frame_ == NULL);
VirtualFrame* target_frame = new VirtualFrame(cgen()->frame());
target_frame->Adjust(1);
- // We do not expect a call with a preconfigured entry frame.
- ASSERT(entry_frame_ == NULL);
- AddReachingFrame(target_frame);
- __ bl(&merge_labels_.last());
+ entry_frame_ = target_frame;
+
+ // The predicate is_linked() should now be made true. Its implementation
+ // detects the presence of a frame pointer in the reaching_frames_ list.
+ reaching_frames_.Add(NULL);
+ ASSERT(is_linked());
+
+ __ bl(&entry_label_);
}
@@ -156,168 +133,105 @@ void JumpTarget::DoBind() {
// block.
ASSERT(!cgen()->has_valid_frame() || cgen()->HasValidEntryRegisters());
- if (direction_ == FORWARD_ONLY) {
- // A simple case: no forward jumps and no possible backward jumps.
- if (!is_linked()) {
- // The stack pointer can be floating above the top of the
- // virtual frame before the bind. Afterward, it should not.
- ASSERT(cgen()->has_valid_frame());
- VirtualFrame* frame = cgen()->frame();
- int difference = frame->stack_pointer_ - (frame->element_count() - 1);
- if (difference > 0) {
- frame->stack_pointer_ -= difference;
- __ add(sp, sp, Operand(difference * kPointerSize));
- }
- __ bind(&entry_label_);
- return;
- }
-
- // Another simple case: no fall through, a single forward jump,
- // and no possible backward jumps.
- if (!cgen()->has_valid_frame() && reaching_frames_.length() == 1) {
- // Pick up the only reaching frame, take ownership of it, and
- // use it for the block about to be emitted.
- VirtualFrame* frame = reaching_frames_[0];
- RegisterFile empty;
- cgen()->SetFrame(frame, &empty);
- reaching_frames_[0] = NULL;
- __ bind(&merge_labels_[0]);
-
- // The stack pointer can be floating above the top of the
- // virtual frame before the bind. Afterward, it should not.
- int difference = frame->stack_pointer_ - (frame->element_count() - 1);
- if (difference > 0) {
- frame->stack_pointer_ -= difference;
- __ add(sp, sp, Operand(difference * kPointerSize));
- }
- __ bind(&entry_label_);
- return;
- }
- }
-
- // If there is a current frame, record it as the fall-through. It
- // is owned by the reaching frames for now.
- bool had_fall_through = false;
if (cgen()->has_valid_frame()) {
- had_fall_through = true;
- AddReachingFrame(cgen()->frame()); // Return value ignored.
+ // If there is a current frame we can use it on the fall through.
+ if (entry_frame_ == NULL) {
+ entry_frame_ = new VirtualFrame(cgen()->frame());
+ } else {
+ ASSERT(cgen()->frame()->Equals(entry_frame_));
+ }
+ } else {
+ // If there is no current frame we must have an entry frame which we can
+ // copy.
+ ASSERT(entry_frame_ != NULL);
RegisterFile empty;
- cgen()->SetFrame(NULL, &empty);
+ cgen()->SetFrame(new VirtualFrame(entry_frame_), &empty);
}
- // Compute the frame to use for entry to the block.
- if (entry_frame_ == NULL) {
- ComputeEntryFrame();
+ // The predicate is_linked() should be made false. Its implementation
+ // detects the presence (or absence) of frame pointers in the
+ // reaching_frames_ list. If we inserted a bogus frame to make
+ // is_linked() true, remove it now.
+ if (is_linked()) {
+ reaching_frames_.Clear();
}
- // Some moves required to merge to an expected frame require purely
- // frame state changes, and do not require any code generation.
- // Perform those first to increase the possibility of finding equal
- // frames below.
- for (int i = 0; i < reaching_frames_.length(); i++) {
- if (reaching_frames_[i] != NULL) {
- reaching_frames_[i]->PrepareMergeTo(entry_frame_);
- }
- }
+ __ bind(&entry_label_);
+}
- if (is_linked()) {
- // There were forward jumps. Handle merging the reaching frames
- // and possible fall through to the entry frame.
-
- // Loop over the (non-null) reaching frames and process any that
- // need merge code. Iterate backwards through the list to handle
- // the fall-through frame first. Set frames that will be
- // processed after 'i' to NULL if we want to avoid processing
- // them.
- for (int i = reaching_frames_.length() - 1; i >= 0; i--) {
- VirtualFrame* frame = reaching_frames_[i];
-
- if (frame != NULL) {
- // Does the frame (probably) need merge code?
- if (!frame->Equals(entry_frame_)) {
- // We could have a valid frame as the fall through to the
- // binding site or as the fall through from a previous merge
- // code block. Jump around the code we are about to
- // generate.
- if (cgen()->has_valid_frame()) {
- cgen()->DeleteFrame();
- __ b(&entry_label_);
- }
- // Pick up the frame for this block. Assume ownership if
- // there cannot be backward jumps.
- RegisterFile empty;
- if (direction_ == BIDIRECTIONAL) {
- cgen()->SetFrame(new VirtualFrame(frame), &empty);
- } else {
- cgen()->SetFrame(frame, &empty);
- reaching_frames_[i] = NULL;
- }
- __ bind(&merge_labels_[i]);
-
- // Loop over the remaining (non-null) reaching frames,
- // looking for any that can share merge code with this one.
- for (int j = 0; j < i; j++) {
- VirtualFrame* other = reaching_frames_[j];
- if (other != NULL && other->Equals(cgen()->frame())) {
- // Set the reaching frame element to null to avoid
- // processing it later, and then bind its entry label.
- reaching_frames_[j] = NULL;
- __ bind(&merge_labels_[j]);
- }
- }
-
- // Emit the merge code.
- cgen()->frame()->MergeTo(entry_frame_);
- } else if (i == reaching_frames_.length() - 1 && had_fall_through) {
- // If this is the fall through, and it didn't need merge
- // code, we need to pick up the frame so we can jump around
- // subsequent merge blocks if necessary.
- RegisterFile empty;
- cgen()->SetFrame(frame, &empty);
- reaching_frames_[i] = NULL;
- }
- }
- }
- // The code generator may not have a current frame if there was no
- // fall through and none of the reaching frames needed merging.
- // In that case, clone the entry frame as the current frame.
- if (!cgen()->has_valid_frame()) {
- RegisterFile empty;
- cgen()->SetFrame(new VirtualFrame(entry_frame_), &empty);
- }
+void BreakTarget::Jump() {
+ // On ARM we do not currently emit merge code for jumps, so we need to do
+ // it explicitly here. The only merging necessary is to drop extra
+ // statement state from the stack.
+ ASSERT(cgen()->has_valid_frame());
+ int count = cgen()->frame()->height() - expected_height_;
+ cgen()->frame()->Drop(count);
+ DoJump();
+}
- // There may be unprocessed reaching frames that did not need
- // merge code. They will have unbound merge labels. Bind their
- // merge labels to be the same as the entry label and deallocate
- // them.
- for (int i = 0; i < reaching_frames_.length(); i++) {
- if (!merge_labels_[i].is_bound()) {
- reaching_frames_[i] = NULL;
- __ bind(&merge_labels_[i]);
- }
- }
- // There are non-NULL reaching frames with bound labels for each
- // merge block, but only on backward targets.
- } else {
- // There were no forward jumps. There must be a current frame and
- // this must be a bidirectional target.
- ASSERT(reaching_frames_.length() == 1);
- ASSERT(reaching_frames_[0] != NULL);
- ASSERT(direction_ == BIDIRECTIONAL);
+void BreakTarget::Jump(Result* arg) {
+ // On ARM we do not currently emit merge code for jumps, so we need to do
+ // it explicitly here. The only merging necessary is to drop extra
+ // statement state from the stack.
+ ASSERT(cgen()->has_valid_frame());
+ int count = cgen()->frame()->height() - expected_height_;
+ cgen()->frame()->Drop(count);
+ cgen()->frame()->Push(arg);
+ DoJump();
+}
- // Use a copy of the reaching frame so the original can be saved
- // for possible reuse as a backward merge block.
- RegisterFile empty;
- cgen()->SetFrame(new VirtualFrame(reaching_frames_[0]), &empty);
- __ bind(&merge_labels_[0]);
- cgen()->frame()->MergeTo(entry_frame_);
+
+void BreakTarget::Bind() {
+#ifdef DEBUG
+ // All the forward-reaching frames should have been adjusted at the
+ // jumps to this target.
+ for (int i = 0; i < reaching_frames_.length(); i++) {
+ ASSERT(reaching_frames_[i] == NULL ||
+ reaching_frames_[i]->height() == expected_height_);
+ }
+#endif
+ // Drop leftover statement state from the frame before merging, even
+ // on the fall through. This is so we can bind the return target
+ // with state on the frame.
+ if (cgen()->has_valid_frame()) {
+ int count = cgen()->frame()->height() - expected_height_;
+ // On ARM we do not currently emit merge code at binding sites, so we need
+ // to do it explicitly here. The only merging necessary is to drop extra
+ // statement state from the stack.
+ cgen()->frame()->Drop(count);
}
- __ bind(&entry_label_);
+ DoBind();
}
+
+void BreakTarget::Bind(Result* arg) {
+#ifdef DEBUG
+ // All the forward-reaching frames should have been adjusted at the
+ // jumps to this target.
+ for (int i = 0; i < reaching_frames_.length(); i++) {
+ ASSERT(reaching_frames_[i] == NULL ||
+ reaching_frames_[i]->height() == expected_height_ + 1);
+ }
+#endif
+ // Drop leftover statement state from the frame before merging, even
+ // on the fall through. This is so we can bind the return target
+ // with state on the frame.
+ if (cgen()->has_valid_frame()) {
+ int count = cgen()->frame()->height() - expected_height_;
+ // On ARM we do not currently emit merge code at binding sites, so we need
+ // to do it explicitly here. The only merging necessary is to drop extra
+ // statement state from the stack.
+ cgen()->frame()->ForgetElements(count);
+ cgen()->frame()->Push(arg);
+ }
+ DoBind();
+ *arg = cgen()->frame()->Pop();
+}
+
+
#undef __
View
126 deps/v8/src/arm/virtual-frame-arm.cc
@@ -76,72 +76,23 @@ void VirtualFrame::SyncRange(int begin, int end) {
void VirtualFrame::MergeTo(VirtualFrame* expected) {
- Comment cmnt(masm(), "[ Merge frame");
- // We should always be merging the code generator's current frame to an
- // expected frame.
- ASSERT(cgen()->frame() == this);
-
- // Adjust the stack pointer upward (toward the top of the virtual
- // frame) if necessary.
- if (stack_pointer_ < expected->stack_pointer_) {
- int difference = expected->stack_pointer_ - stack_pointer_;
- stack_pointer_ = expected->stack_pointer_;
- __ sub(sp, sp, Operand(difference * kPointerSize));
- }
-
- MergeMoveRegistersToMemory(expected);
- MergeMoveRegistersToRegisters(expected);
- MergeMoveMemoryToRegisters(expected);
-
- // Fix any sync bit problems from the bottom-up, stopping when we
- // hit the stack pointer or the top of the frame if the stack
- // pointer is floating above the frame.
- int limit = Min(static_cast<int>(stack_pointer_), element_count() - 1);
- for (int i = 0; i <= limit; i++) {
- FrameElement source = elements_[i];
- FrameElement target = expected->elements_[i];
- if (source.is_synced() && !target.is_synced()) {
- elements_[i].clear_sync();
- } else if (!source.is_synced() && target.is_synced()) {
- SyncElementAt(i);
- }
- }
-
- // Adjust the stack point downard if necessary.
- if (stack_pointer_ > expected->stack_pointer_) {
- int difference = stack_pointer_ - expected->stack_pointer_;
- stack_pointer_ = expected->stack_pointer_;
- __ add(sp, sp, Operand(difference * kPointerSize));
- }
-
- // At this point, the frames should be identical.
+ // ARM frames are currently always in memory.
ASSERT(Equals(expected));
}
void VirtualFrame::MergeMoveRegistersToMemory(VirtualFrame* expected) {
- ASSERT(stack_pointer_ >= expected->stack_pointer_);
-
- // Move registers, constants, and copies to memory. Perform moves
- // from the top downward in the frame in order to leave the backing
- // stores of copies in registers.
- // On ARM, all elements are in memory.
-
-#ifdef DEBUG
- int start = Min(static_cast<int>(stack_pointer_), element_count() - 1);
- for (int i = start; i >= 0; i--) {
- ASSERT(elements_[i].is_memory());
- ASSERT(expected->elements_[i].is_memory());
- }
-#endif
+ UNREACHABLE();
}
void VirtualFrame::MergeMoveRegistersToRegisters(VirtualFrame* expected) {
+ UNREACHABLE();
}
void VirtualFrame::MergeMoveMemoryToRegisters(VirtualFrame* expected) {
+ UNREACHABLE();
}
@@ -235,76 +186,62 @@ void VirtualFrame::PushTryHandler(HandlerType type) {
}
-Result VirtualFrame::RawCallStub(CodeStub* stub) {
+void VirtualFrame::RawCallStub(CodeStub* stub) {
ASSERT(cgen()->HasValidEntryRegisters());
__ CallStub(stub);
- Result result = cgen()->allocator()->Allocate(r0);
- ASSERT(result.is_valid());
- return result;
}
-Result VirtualFrame::CallStub(CodeStub* stub, Result* arg) {
+void VirtualFrame::CallStub(CodeStub* stub, Result* arg) {
PrepareForCall(0, 0);
arg->Unuse();
- return RawCallStub(stub);
+ RawCallStub(stub);
}
-Result VirtualFrame::CallStub(CodeStub* stub, Result* arg0, Result* arg1) {
+void VirtualFrame::CallStub(CodeStub* stub, Result* arg0, Result* arg1) {
PrepareForCall(0, 0);
arg0->Unuse();
arg1->Unuse();
- return RawCallStub(stub);
+ RawCallStub(stub);
}
-Result VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
+void VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
PrepareForCall(arg_count, arg_count);
ASSERT(cgen()->HasValidEntryRegisters());
__ CallRuntime(f, arg_count);
- Result result = cgen()->allocator()->Allocate(r0);
- ASSERT(result.is_valid());
- return result;
}
-Result VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
+void VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
PrepareForCall(arg_count, arg_count);
ASSERT(cgen()->HasValidEntryRegisters());
__ CallRuntime(id, arg_count);
- Result result = cgen()->allocator()->Allocate(r0);
- ASSERT(result.is_valid());
- return result;
}
-Result VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
- InvokeJSFlags flags,
- Result* arg_count_register,
- int arg_count) {
+void VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
+ InvokeJSFlags flags,
+ Result* arg_count_register,
+ int arg_count) {
ASSERT(arg_count_register->reg().is(r0));
PrepareForCall(arg_count, arg_count);
arg_count_register->Unuse();
__ InvokeBuiltin(id, flags);
- Result result = cgen()->allocator()->Allocate(r0);
- return result;
}
-Result VirtualFrame::RawCallCodeObject(Handle<Code> code,
+void VirtualFrame::RawCallCodeObject(Handle<Code> code,
RelocInfo::Mode rmode) {
ASSERT(cgen()->HasValidEntryRegisters());
__ Call(code, rmode);
- Result result = cgen()->allocator()->Allocate(r0);
- ASSERT(result.is_valid());
- return result;
}
-Result VirtualFrame::CallCodeObject(Handle<Code> code,
- RelocInfo::Mode rmode,
- int dropped_args) {
+void VirtualFrame::CallCodeObject(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ int dropped_args) {
int spilled_args = 0;
switch (code->kind()) {
case Code::CALL_IC:
@@ -325,14 +262,14 @@ Result VirtualFrame::CallCodeObject(Handle<Code> code,
break;
}
PrepareForCall(spilled_args, dropped_args);
- return RawCallCodeObject(code, rmode);
+ RawCallCodeObject(code, rmode);
}
-Result VirtualFrame::CallCodeObject(Handle<Code> code,
- RelocInfo::Mode rmode,
- Result* arg,
- int dropped_args) {
+void VirtualFrame::CallCodeObject(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ Result* arg,
+ int dropped_args) {
int spilled_args = 0;
switch (code->kind()) {
case Code::LOAD_IC:
@@ -353,15 +290,15 @@ Result VirtualFrame::CallCodeObject(Handle<Code> code,
}
PrepareForCall(spilled_args, dropped_args);
arg->Unuse();
- return RawCallCodeObject(code, rmode);
+ RawCallCodeObject(code, rmode);
}
-Result VirtualFrame::CallCodeObject(Handle<Code> code,
- RelocInfo::Mode rmode,
- Result* arg0,
- Result* arg1,
- int dropped_args) {
+void VirtualFrame::CallCodeObject(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ Result* arg0,
+ Result* arg1,
+ int dropped_args) {
int spilled_args = 1;
switch (code->kind()) {
case Code::STORE_IC:
@@ -385,11 +322,12 @@ Result VirtualFrame::CallCodeObject(Handle<Code> code,
PrepareForCall(spilled_args, dropped_args);
arg0->Unuse();
arg1->Unuse();
- return RawCallCodeObject(code, rmode);
+ RawCallCodeObject(code, rmode);
}
void VirtualFrame::Drop(int count) {
+ ASSERT(count >= 0);
ASSERT(height() >= count);
int num_virtual_elements = (element_count() - 1) - stack_pointer_;
View
73 deps/v8/src/arm/virtual-frame-arm.h
@@ -52,20 +52,7 @@ class VirtualFrame : public ZoneObject {
// generator is being transformed.
class SpilledScope BASE_EMBEDDED {
public:
- SpilledScope() : previous_state_(cgen()->in_spilled_code()) {
- ASSERT(cgen()->has_valid_frame());
- cgen()->frame()->SpillAll();
- cgen()->set_in_spilled_code(true);
- }
-
- ~SpilledScope() {
- cgen()->set_in_spilled_code(previous_state_);
- }
-
- private:
- bool previous_state_;
-
- CodeGenerator* cgen() { return CodeGeneratorScope::Current(); }
+ SpilledScope() {}
};
// An illegal index into the virtual frame.
@@ -125,12 +112,14 @@ class VirtualFrame : public ZoneObject {
ASSERT(count >= 0);
ASSERT(stack_pointer_ == element_count() - 1);
stack_pointer_ -= count;
- ForgetElements(count);
+ // On ARM, all elements are in memory, so there is no extra bookkeeping
+ // (registers, copies, etc.) beyond dropping the elements.
+ elements_.Rewind(stack_pointer_ + 1);
}
- // Forget count elements from the top of the frame without adjusting
- // the stack pointer downward. This is used, for example, before
- // merging frames at break, continue, and return targets.
+ // Forget count elements from the top of the frame and adjust the stack
+ // pointer downward. This is used, for example, before merging frames at
+ // break, continue, and return targets.
void ForgetElements(int count);
// Spill all values from the frame to memory.
@@ -294,46 +283,46 @@ class VirtualFrame : public ZoneObject {
// Call stub given the number of arguments it expects on (and
// removes from) the stack.
- Result CallStub(CodeStub* stub, int arg_count) {
+ void CallStub(CodeStub* stub, int arg_count) {
PrepareForCall(arg_count, arg_count);
- return RawCallStub(stub);
+ RawCallStub(stub);
}
// Call stub that expects its argument in r0. The argument is given
// as a result which must be the register r0.
- Result CallStub(CodeStub* stub, Result* arg);
+ void CallStub(CodeStub* stub, Result* arg);
// Call stub that expects its arguments in r1 and r0. The arguments
// are given as results which must be the appropriate registers.
- Result CallStub(CodeStub* stub, Result* arg0, Result* arg1);
+ void CallStub(CodeStub* stub, Result* arg0, Result* arg1);
// Call runtime given the number of arguments expected on (and
// removed from) the stack.
- Result CallRuntime(Runtime::Function* f, int arg_count);
- Result CallRuntime(Runtime::FunctionId id, int arg_count);
+ void CallRuntime(Runtime::Function* f, int arg_count);
+ void CallRuntime(Runtime::FunctionId id, int arg_count);
// Invoke builtin given the number of arguments it expects on (and
// removes from) the stack.
- Result InvokeBuiltin(Builtins::JavaScript id,
- InvokeJSFlags flag,
- Result* arg_count_register,
- int arg_count);
+ void InvokeBuiltin(Builtins::JavaScript id,
+ InvokeJSFlags flag,
+ Result* arg_count_register,
+ int arg_count);
// Call into an IC stub given the number of arguments it removes
// from the stack. Register arguments are passed as results and
// consumed by the call.
- Result CallCodeObject(Handle<Code> ic,
- RelocInfo::Mode rmode,
- int dropped_args);
- Result CallCodeObject(Handle<Code> ic,
- RelocInfo::Mode rmode,
- Result* arg,
- int dropped_args);
- Result CallCodeObject(Handle<Code> ic,
- RelocInfo::Mode rmode,
- Result* arg0,
- Result* arg1,
- int dropped_args);
+ void CallCodeObject(Handle<Code> ic,
+ RelocInfo::Mode rmode,
+ int dropped_args);
+ void CallCodeObject(Handle<Code> ic,
+ RelocInfo::Mode rmode,
+ Result* arg,
+ int dropped_args);
+ void CallCodeObject(Handle<Code> ic,
+ RelocInfo::Mode rmode,
+ Result* arg0,
+ Result* arg1,
+ int dropped_args);
// Drop a number of elements from the top of the expression stack. May
// emit code to affect the physical frame. Does not clobber any registers
@@ -517,11 +506,11 @@ class VirtualFrame : public ZoneObject {
// Call a code stub that has already been prepared for calling (via
// PrepareForCall).
- Result RawCallStub(CodeStub* stub);
+ void RawCallStub(CodeStub* stub);
// Calls a code object which has already been prepared for calling
// (via PrepareForCall).
- Result RawCallCodeObject(Handle<Code> code, RelocInfo::Mode rmode);
+ void RawCallCodeObject(Handle<Code> code, RelocInfo::Mode rmode);
bool Equals(VirtualFrame* other);
View
428 deps/v8/src/cfg.cc
@@ -42,8 +42,10 @@ CfgGlobals* CfgGlobals::top_ = NULL;
CfgGlobals::CfgGlobals(FunctionLiteral* fun)
: global_fun_(fun),
global_exit_(new ExitNode()),
+ nowhere_(new Nowhere()),
#ifdef DEBUG
node_counter_(0),
+ temp_counter_(0),
#endif
previous_(top_) {
top_ = this;
@@ -58,6 +60,12 @@ Cfg* Cfg::Build() {
if (fun->scope()->num_heap_slots() > 0) {
BAILOUT("function has context slots");
}
+ if (fun->scope()->num_stack_slots() > kBitsPerPointer) {
+ BAILOUT("function has too many locals");
+ }
+ if (fun->scope()->num_parameters() > kBitsPerPointer - 1) {
+ BAILOUT("function has too many parameters");
+ }
if (fun->scope()->arguments() != NULL) {
BAILOUT("function uses .arguments");
}
@@ -67,17 +75,20 @@ Cfg* Cfg::Build() {
BAILOUT("empty function body");
}
- StatementBuilder builder;
+ StatementCfgBuilder builder;
builder.VisitStatements(body);
- Cfg* cfg = builder.cfg();
- if (cfg == NULL) {
+ Cfg* graph = builder.graph();
+ if (graph == NULL) {
BAILOUT("unsupported statement type");
}
- if (cfg->has_exit()) {
+ if (graph->is_empty()) {
+ BAILOUT("function body produces empty cfg");
+ }
+ if (graph->has_exit()) {
BAILOUT("control path without explicit return");
}
- cfg->PrependEntryNode();
- return cfg;
+ graph->PrependEntryNode();
+ return graph;
}
#undef BAILOUT
@@ -90,8 +101,10 @@ void Cfg::PrependEntryNode() {
void Cfg::Append(Instruction* instr) {
- ASSERT(has_exit());
- ASSERT(!is_empty());
+ ASSERT(is_empty() || has_exit());
+ if (is_empty()) {
+ entry_ = exit_ = new InstructionBlock();
+ }
InstructionBlock::cast(exit_)->Append(instr);
}
@@ -104,6 +117,27 @@ void Cfg::AppendReturnInstruction(Value* value) {
}
+void Cfg::Concatenate(Cfg* other) {
+ ASSERT(is_empty() || has_exit());
+ if (other->is_empty()) return;
+
+ if (is_empty()) {
+ entry_ = other->entry();
+ exit_ = other->exit();
+ } else {
+ // We have a pair of nonempty fragments and this has an available exit.
+ // Destructively glue the fragments together.
+ InstructionBlock* first = InstructionBlock::cast(exit_);
+ InstructionBlock* second = InstructionBlock::cast(other->entry());
+ first->instructions()->AddAll(*second->instructions());
+ if (second->successor() != NULL) {
+ first->set_successor(second->successor());
+ exit_ = other->exit();
+ }
+ }
+}
+
+
void InstructionBlock::Unmark() {
if (is_marked_) {
is_marked_ = false;
@@ -166,11 +200,45 @@ Handle<Code> Cfg::Compile(Handle<Script> script) {
}
+void ZeroOperandInstruction::FastAllocate(TempLocation* temp) {
+ temp->set_where(TempLocation::STACK);
+}
+
+
+void OneOperandInstruction::FastAllocate(TempLocation* temp) {
+ temp->set_where((temp == value_)
+ ? TempLocation::ACCUMULATOR
+ : TempLocation::STACK);
+}
+
+
+void TwoOperandInstruction::FastAllocate(TempLocation* temp) {
+ temp->set_where((temp == value0_ || temp == value1_)
+ ? TempLocation::ACCUMULATOR
+ : TempLocation::STACK);
+}
+
+
+void PositionInstr::Compile(MacroAssembler* masm) {
+ if (FLAG_debug_info && pos_ != RelocInfo::kNoPosition) {
+ masm->RecordStatementPosition(pos_);
+ masm->RecordPosition(pos_);
+ }
+}
+
+
+void MoveInstr::Compile(MacroAssembler* masm) {
+ location()->Move(masm, value());
+}
+
+
// The expression builder should not be used for declarations or statements.
-void ExpressionBuilder::VisitDeclaration(Declaration* decl) { UNREACHABLE(); }
+void ExpressionCfgBuilder::VisitDeclaration(Declaration* decl) {
+ UNREACHABLE();
+}
#define DEFINE_VISIT(type) \
- void ExpressionBuilder::Visit##type(type* stmt) { UNREACHABLE(); }
+ void ExpressionCfgBuilder::Visit##type(type* stmt) { UNREACHABLE(); }
STATEMENT_NODE_LIST(DEFINE_VISIT)
#undef DEFINE_VISIT
@@ -178,35 +246,32 @@ STATEMENT_NODE_LIST(DEFINE_VISIT)
// Macros (temporarily) handling unsupported expression types.
#define BAILOUT(reason) \
do { \
- value_ = NULL; \
+ graph_ = NULL; \
return; \
} while (false)
-#define CHECK_BAILOUT() \
- if (value_ == NULL) { return; } else {}
-
-void ExpressionBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
+void ExpressionCfgBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
BAILOUT("FunctionLiteral");
}
-void ExpressionBuilder::VisitFunctionBoilerplateLiteral(
+void ExpressionCfgBuilder::VisitFunctionBoilerplateLiteral(
FunctionBoilerplateLiteral* expr) {
BAILOUT("FunctionBoilerplateLiteral");
}
-void ExpressionBuilder::VisitConditional(Conditional* expr) {
+void ExpressionCfgBuilder::VisitConditional(Conditional* expr) {
BAILOUT("Conditional");
}
-void ExpressionBuilder::VisitSlot(Slot* expr) {
+void ExpressionCfgBuilder::VisitSlot(Slot* expr) {
BAILOUT("Slot");
}
-void ExpressionBuilder::VisitVariableProxy(VariableProxy* expr) {
+void ExpressionCfgBuilder::VisitVariableProxy(VariableProxy* expr) {
Expression* rewrite = expr->var()->rewrite();
if (rewrite == NULL || rewrite->AsSlot() == NULL) {
BAILOUT("unsupported variable (not a slot)");
@@ -215,201 +280,332 @@ void ExpressionBuilder::VisitVariableProxy(VariableProxy* expr) {
if (slot->type() != Slot::PARAMETER && slot->type() != Slot::LOCAL) {
BAILOUT("unsupported slot type (not a parameter or local)");
}
+ // Ignore the passed destination.
value_ = new SlotLocation(slot->type(), slot->index());
}
-void ExpressionBuilder::VisitLiteral(Literal* expr) {
+void ExpressionCfgBuilder::VisitLiteral(Literal* expr) {
+ // Ignore the passed destination.
value_ = new Constant(expr->handle());
}
-void ExpressionBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
+void ExpressionCfgBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
BAILOUT("RegExpLiteral");
}
-void ExpressionBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
+void ExpressionCfgBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
BAILOUT("ObjectLiteral");
}
-void ExpressionBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
+void ExpressionCfgBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
BAILOUT("ArrayLiteral");
}
-void ExpressionBuilder::VisitCatchExtensionObject(CatchExtensionObject* expr) {
+void ExpressionCfgBuilder::VisitCatchExtensionObject(
+ CatchExtensionObject* expr) {
BAILOUT("CatchExtensionObject");
}
-void ExpressionBuilder::VisitAssignment(Assignment* expr) {
- BAILOUT("Assignment");
+void ExpressionCfgBuilder::VisitAssignment(Assignment* expr) {
+ if (expr->op() != Token::ASSIGN && expr->op() != Token::INIT_VAR) {
+ BAILOUT("unsupported compound assignment");
+ }
+ Expression* lhs = expr->target();
+ if (lhs->AsProperty() != NULL) {
+ BAILOUT("unsupported property assignment");
+ }
+
+ Variable* var = lhs->AsVariableProxy()->AsVariable();
+ if (var == NULL) {
+ BAILOUT("unsupported invalid left-hand side");
+ }
+ if (var->is_global()) {
+ BAILOUT("unsupported global variable");
+ }
+ Slot* slot = var->slot();
+ ASSERT(slot != NULL);
+ if (slot->type() != Slot::PARAMETER && slot->type() != Slot::LOCAL) {
+ BAILOUT("unsupported slot lhs (not a parameter or local)");
+ }
+
+ // Parameter and local slot assignments.
+ ExpressionCfgBuilder builder;
+ SlotLocation* loc = new SlotLocation(slot->type(), slot->index());
+ builder.Build(expr->value(), loc);
+ if (builder.graph() == NULL) {
+ BAILOUT("unsupported expression in assignment");
+ }
+ // If the expression did not come back in the slot location, append
+ // a move to the CFG.
+ graph_ = builder.graph();
+ if (builder.value() != loc) {
+ graph()->Append(new MoveInstr(loc, builder.value()));
+ }
+ // Record the assignment.
+ assigned_vars_.AddElement(loc);
+ // Ignore the destination passed to us.
+ value_ = loc;
}
-void ExpressionBuilder::VisitThrow(Throw* expr) {
+void ExpressionCfgBuilder::VisitThrow(Throw* expr) {
BAILOUT("Throw");
}
-void ExpressionBuilder::VisitProperty(Property* expr) {
- BAILOUT("Property");
+void ExpressionCfgBuilder::VisitProperty(Property* expr) {
+ ExpressionCfgBuilder object, key;
+ object.Build(expr->obj(), NULL);
+ if (object.graph() == NULL) {
+ BAILOUT("unsupported object subexpression in propload");
+ }
+ key.Build(expr->key(), NULL);
+ if (key.graph() == NULL) {
+ BAILOUT("unsupported key subexpression in propload");
+ }
+
+ if (destination_ == NULL) destination_ = new TempLocation();
+
+ graph_ = object.graph();
+ // Insert a move to a fresh temporary if the object value is in a slot
+ // that's assigned in the key.
+ Location* temp = NULL;
+ if (object.value()->is_slot() &&
+ key.assigned_vars()->Contains(SlotLocation::cast(object.value()))) {
+ temp = new TempLocation();
+ graph()->Append(new MoveInstr(temp, object.value()));
+ }