Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP
Browse files

Upgrade V8 to 2.2.20

  • Loading branch information...
commit 5a25338ac08ff9ab7656f91a67f06491e5cf8431 1 parent 67f7fe5
@ry ry authored
Showing with 1,473 additions and 1,012 deletions.
  1. +12 −0 deps/v8/ChangeLog
  2. +4 −0 deps/v8/src/api.cc
  3. +1 −0  deps/v8/src/arm/assembler-arm.h
  4. +180 −207 deps/v8/src/arm/codegen-arm.cc
  5. +3 −1 deps/v8/src/arm/codegen-arm.h
  6. +154 −176 deps/v8/src/arm/ic-arm.cc
  7. +16 −0 deps/v8/src/arm/jump-target-arm.cc
  8. +13 −0 deps/v8/src/arm/macro-assembler-arm.cc
  9. +44 −0 deps/v8/src/arm/virtual-frame-arm.cc
  10. +11 −2 deps/v8/src/arm/virtual-frame-arm.h
  11. +2 −2 deps/v8/src/array.js
  12. +3 −1 deps/v8/src/ast-inl.h
  13. +1 −0  deps/v8/src/builtins.cc
  14. +11 −0 deps/v8/src/factory.cc
  15. +4 −0 deps/v8/src/factory.h
  16. +56 −3 deps/v8/src/heap.cc
  17. +50 −0 deps/v8/src/heap.h
  18. +11 −4 deps/v8/src/ia32/codegen-ia32.cc
  19. +1 −1  deps/v8/src/ia32/full-codegen-ia32.cc
  20. +141 −195 deps/v8/src/ia32/ic-ia32.cc
  21. +6 −4 deps/v8/src/ic.cc
  22. +0 −4 deps/v8/src/ic.h
  23. +1 −1  deps/v8/src/json.js
  24. +2 −0  deps/v8/src/jump-target-heavy.h
  25. +4 −0 deps/v8/src/jump-target-light-inl.h
  26. +4 −0 deps/v8/src/jump-target-light.h
  27. +5 −5 deps/v8/src/log.cc
  28. +3 −0  deps/v8/src/objects-debug.cc
  29. +35 −0 deps/v8/src/objects-inl.h
  30. +41 −26 deps/v8/src/objects.cc
  31. +26 −1 deps/v8/src/objects.h
  32. +4 −1 deps/v8/src/regexp.js
  33. +15 −4 deps/v8/src/runtime.cc
  34. +1 −1  deps/v8/src/utils.h
  35. +4 −0 deps/v8/src/v8-counters.h
  36. +14 −3 deps/v8/src/v8natives.js
  37. +1 −1  deps/v8/src/version.cc
  38. +104 −18 deps/v8/src/x64/assembler-x64.cc
  39. +19 −8 deps/v8/src/x64/assembler-x64.h
  40. +45 −55 deps/v8/src/x64/codegen-x64.cc
  41. +24 −6 deps/v8/src/x64/disasm-x64.cc
  42. +4 −3 deps/v8/src/x64/full-codegen-x64.cc
  43. +171 −263 deps/v8/src/x64/ic-x64.cc
  44. +3 −4 deps/v8/src/x64/macro-assembler-x64.cc
  45. +2 −1  deps/v8/src/x64/macro-assembler-x64.h
  46. +28 −8 deps/v8/src/x64/virtual-frame-x64.cc
  47. +1 −1  deps/v8/src/x64/virtual-frame-x64.h
  48. +25 −0 deps/v8/test/cctest/test-api.cc
  49. +2 −2 deps/v8/test/cctest/test-profile-generator.cc
  50. +35 −0 deps/v8/test/mjsunit/for-in.js
  51. +46 −0 deps/v8/test/mjsunit/regress/regress-45469.js
  52. +36 −0 deps/v8/test/mjsunit/regress/regress-752.js
  53. +39 −0 deps/v8/test/mjsunit/regress/regress-754.js
  54. +5 −0 deps/v8/test/mjsunit/smi-ops.js
View
12 deps/v8/ChangeLog
@@ -1,3 +1,15 @@
+2010-06-28: Version 2.2.20
+ Fix bug with for-in on x64 platform (issue 748).
+
+ Fix crash bug on x64 platform (issue 756).
+
+ Fix bug in Object.getOwnPropertyNames. (chromium issue 41243).
+
+ Fix a bug on ARM that caused the result of 1 << x to be
+ miscalculated for some inputs.
+
+ Performance improvements on all platforms.
+
2010-06-23: Version 2.2.19
Fix bug that causes the build to break when profillingsupport=off
View
4 deps/v8/src/api.cc
@@ -2606,6 +2606,8 @@ void v8::Object::SetIndexedPropertiesToPixelData(uint8_t* data, int length) {
return;
}
i::Handle<i::PixelArray> pixels = i::Factory::NewPixelArray(length, data);
+ self->set_map(
+ *i::Factory::GetSlowElementsMap(i::Handle<i::Map>(self->map())));
self->set_elements(*pixels);
}
@@ -2659,6 +2661,8 @@ void v8::Object::SetIndexedPropertiesToExternalArrayData(
}
i::Handle<i::ExternalArray> array =
i::Factory::NewExternalArray(length, array_type, data);
+ self->set_map(
+ *i::Factory::GetSlowElementsMap(i::Handle<i::Map>(self->map())));
self->set_elements(*array);
}
View
1  deps/v8/src/arm/assembler-arm.h
@@ -1110,6 +1110,7 @@ class Assembler : public Malloced {
void EndBlockConstPool() {
const_pool_blocked_nesting_--;
}
+ bool is_const_pool_blocked() const { return const_pool_blocked_nesting_ > 0; }
private:
// Code buffer:
View
387 deps/v8/src/arm/codegen-arm.cc
@@ -157,6 +157,7 @@ CodeGenerator::CodeGenerator(MacroAssembler* masm)
state_(NULL),
loop_nesting_(0),
type_info_(NULL),
+ function_return_(JumpTarget::BIDIRECTIONAL),
function_return_is_shadowed_(false) {
}
@@ -218,7 +219,7 @@ void CodeGenerator::Generate(CompilationInfo* info) {
// for stack overflow.
frame_->AllocateStackSlots();
- VirtualFrame::SpilledScope spilled_scope(frame_);
+ frame_->AssertIsSpilled();
int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
// Allocate local context.
@@ -257,6 +258,7 @@ void CodeGenerator::Generate(CompilationInfo* info) {
// order: such a parameter is copied repeatedly into the same
// context location and thus the last value is what is seen inside
// the function.
+ frame_->AssertIsSpilled();
for (int i = 0; i < scope()->num_parameters(); i++) {
Variable* par = scope()->parameter(i);
Slot* slot = par->slot();
@@ -282,8 +284,7 @@ void CodeGenerator::Generate(CompilationInfo* info) {
// Initialize ThisFunction reference if present.
if (scope()->is_function_scope() && scope()->function() != NULL) {
- __ mov(ip, Operand(Factory::the_hole_value()));
- frame_->EmitPush(ip);
+ frame_->EmitPushRoot(Heap::kTheHoleValueRootIndex);
StoreToSlot(scope()->function()->slot(), NOT_CONST_INIT);
}
} else {
@@ -510,7 +511,6 @@ void CodeGenerator::LoadCondition(Expression* x,
has_valid_frame() &&
!has_cc() &&
frame_->height() == original_height) {
- frame_->SpillAll();
true_target->Jump();
}
}
@@ -535,22 +535,18 @@ void CodeGenerator::Load(Expression* expr) {
if (has_cc()) {
// Convert cc_reg_ into a boolean value.
- VirtualFrame::SpilledScope scope(frame_);
JumpTarget loaded;
JumpTarget materialize_true;
materialize_true.Branch(cc_reg_);
- __ LoadRoot(r0, Heap::kFalseValueRootIndex);
- frame_->EmitPush(r0);
+ frame_->EmitPushRoot(Heap::kFalseValueRootIndex);
loaded.Jump();
materialize_true.Bind();
- __ LoadRoot(r0, Heap::kTrueValueRootIndex);
- frame_->EmitPush(r0);
+ frame_->EmitPushRoot(Heap::kTrueValueRootIndex);
loaded.Bind();
cc_reg_ = al;
}
if (true_target.is_linked() || false_target.is_linked()) {
- VirtualFrame::SpilledScope scope(frame_);
// We have at least one condition value that has been "translated"
// into a branch, thus it needs to be loaded explicitly.
JumpTarget loaded;
@@ -561,8 +557,7 @@ void CodeGenerator::Load(Expression* expr) {
// Load "true" if necessary.
if (true_target.is_linked()) {
true_target.Bind();
- __ LoadRoot(r0, Heap::kTrueValueRootIndex);
- frame_->EmitPush(r0);
+ frame_->EmitPushRoot(Heap::kTrueValueRootIndex);
}
// If both "true" and "false" need to be loaded jump across the code for
// "false".
@@ -572,8 +567,7 @@ void CodeGenerator::Load(Expression* expr) {
// Load "false" if necessary.
if (false_target.is_linked()) {
false_target.Bind();
- __ LoadRoot(r0, Heap::kFalseValueRootIndex);
- frame_->EmitPush(r0);
+ frame_->EmitPushRoot(Heap::kFalseValueRootIndex);
}
// A value is loaded on all paths reaching this point.
loaded.Bind();
@@ -592,11 +586,11 @@ void CodeGenerator::LoadGlobal() {
void CodeGenerator::LoadGlobalReceiver(Register scratch) {
- VirtualFrame::SpilledScope spilled_scope(frame_);
- __ ldr(scratch, ContextOperand(cp, Context::GLOBAL_INDEX));
- __ ldr(scratch,
- FieldMemOperand(scratch, GlobalObject::kGlobalReceiverOffset));
- frame_->EmitPush(scratch);
+ Register reg = frame_->GetTOSRegister();
+ __ ldr(reg, ContextOperand(cp, Context::GLOBAL_INDEX));
+ __ ldr(reg,
+ FieldMemOperand(reg, GlobalObject::kGlobalReceiverOffset));
+ frame_->EmitPush(reg);
}
@@ -613,8 +607,6 @@ ArgumentsAllocationMode CodeGenerator::ArgumentsMode() {
void CodeGenerator::StoreArgumentsObject(bool initial) {
- VirtualFrame::SpilledScope spilled_scope(frame_);
-
ArgumentsAllocationMode mode = ArgumentsMode();
ASSERT(mode != NO_ARGUMENTS_ALLOCATION);
@@ -623,9 +615,9 @@ void CodeGenerator::StoreArgumentsObject(bool initial) {
// When using lazy arguments allocation, we store the hole value
// as a sentinel indicating that the arguments object hasn't been
// allocated yet.
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- frame_->EmitPush(ip);
+ frame_->EmitPushRoot(Heap::kTheHoleValueRootIndex);
} else {
+ frame_->SpillAll();
ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
__ ldr(r2, frame_->Function());
// The receiver is below the arguments, the return address, and the
@@ -649,9 +641,9 @@ void CodeGenerator::StoreArgumentsObject(bool initial) {
// already been written to. This can happen if the a function
// has a local variable named 'arguments'.
LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
- frame_->EmitPop(r0);
+ Register arguments = frame_->PopToRegister();
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(r0, ip);
+ __ cmp(arguments, ip);
done.Branch(ne);
}
StoreToSlot(arguments->slot(), NOT_CONST_INIT);
@@ -754,36 +746,35 @@ void CodeGenerator::UnloadReference(Reference* ref) {
// may jump to 'false_target' in case the register converts to 'false'.
void CodeGenerator::ToBoolean(JumpTarget* true_target,
JumpTarget* false_target) {
- VirtualFrame::SpilledScope spilled_scope(frame_);
// Note: The generated code snippet does not change stack variables.
// Only the condition code should be set.
- frame_->EmitPop(r0);
+ Register tos = frame_->PopToRegister();
// Fast case checks
// Check if the value is 'false'.
__ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ cmp(r0, ip);
+ __ cmp(tos, ip);
false_target->Branch(eq);
// Check if the value is 'true'.
__ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(r0, ip);
+ __ cmp(tos, ip);
true_target->Branch(eq);
// Check if the value is 'undefined'.
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(r0, ip);
+ __ cmp(tos, ip);
false_target->Branch(eq);
// Check if the value is a smi.
- __ cmp(r0, Operand(Smi::FromInt(0)));
+ __ cmp(tos, Operand(Smi::FromInt(0)));
false_target->Branch(eq);
- __ tst(r0, Operand(kSmiTagMask));
+ __ tst(tos, Operand(kSmiTagMask));
true_target->Branch(eq);
// Slow case: call the runtime.
- frame_->EmitPush(r0);
+ frame_->EmitPush(tos);
frame_->CallRuntime(Runtime::kToBool, 1);
// Convert the result (r0) to a condition code.
__ LoadRoot(ip, Heap::kFalseValueRootIndex);
@@ -935,7 +926,15 @@ class DeferredInlineSmiOperation: public DeferredCode {
};
+
+// On entry the non-constant side of the binary operation is in tos_register_
+// and the constant smi side is nowhere. The tos_register_ is not used by the
+// virtual frame. On exit the answer is in the tos_register_ and the virtual
+// frame is unchanged.
void DeferredInlineSmiOperation::Generate() {
+ VirtualFrame copied_frame(*frame_state()->frame());
+ copied_frame.SpillAll();
+
Register lhs = r1;
Register rhs = r0;
switch (op_) {
@@ -969,45 +968,20 @@ void DeferredInlineSmiOperation::Generate() {
case Token::MOD:
case Token::BIT_OR:
case Token::BIT_XOR:
- case Token::BIT_AND: {
- if (reversed_) {
- if (tos_register_.is(r0)) {
- __ mov(r1, Operand(Smi::FromInt(value_)));
- } else {
- ASSERT(tos_register_.is(r1));
- __ mov(r0, Operand(Smi::FromInt(value_)));
- lhs = r0;
- rhs = r1;
- }
- } else {
- if (tos_register_.is(r1)) {
- __ mov(r0, Operand(Smi::FromInt(value_)));
- } else {
- ASSERT(tos_register_.is(r0));
- __ mov(r1, Operand(Smi::FromInt(value_)));
- lhs = r0;
- rhs = r1;
- }
- }
- break;
- }
-
+ case Token::BIT_AND:
case Token::SHL:
case Token::SHR:
case Token::SAR: {
- if (!reversed_) {
- if (tos_register_.is(r1)) {
- __ mov(r0, Operand(Smi::FromInt(value_)));
- } else {
- ASSERT(tos_register_.is(r0));
- __ mov(r1, Operand(Smi::FromInt(value_)));
- lhs = r0;
- rhs = r1;
- }
+ if (tos_register_.is(r1)) {
+ __ mov(r0, Operand(Smi::FromInt(value_)));
} else {
- ASSERT(op_ == Token::SHL);
+ ASSERT(tos_register_.is(r0));
__ mov(r1, Operand(Smi::FromInt(value_)));
}
+ if (reversed_ == tos_register_.is(r1)) {
+ lhs = r0;
+ rhs = r1;
+ }
break;
}
@@ -1019,11 +993,17 @@ void DeferredInlineSmiOperation::Generate() {
GenericBinaryOpStub stub(op_, overwrite_mode_, lhs, rhs, value_);
__ CallStub(&stub);
+
// The generic stub returns its value in r0, but that's not
// necessarily what we want. We want whatever the inlined code
// expected, which is that the answer is in the same register as
// the operand was.
__ Move(tos_register_, r0);
+
+ // The tos register was not in use for the virtual frame that we
+ // came into this function with, so we can merge back to that frame
+ // without trashing it.
+ copied_frame.MergeTo(frame_state()->frame());
}
@@ -1124,12 +1104,6 @@ void CodeGenerator::SmiOperation(Token::Value op,
// We move the top of stack to a register (normally no move is invoved).
Register tos = frame_->PopToRegister();
- // All other registers are spilled. The deferred code expects one argument
- // in a register and all other values are flushed to the stack. The
- // answer is returned in the same register that the top of stack argument was
- // in.
- frame_->SpillAll();
-
switch (op) {
case Token::ADD: {
DeferredCode* deferred =
@@ -1448,8 +1422,6 @@ void CodeGenerator::Comparison(Condition cc,
void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
CallFunctionFlags flags,
int position) {
- frame_->AssertIsSpilled();
-
// Push the arguments ("left-to-right") on the stack.
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
@@ -1482,7 +1454,6 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
// stack, as receiver and arguments, and calls x.
// In the implementation comments, we call x the applicand
// and y the receiver.
- VirtualFrame::SpilledScope spilled_scope(frame_);
ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
ASSERT(arguments->IsArguments());
@@ -1500,6 +1471,15 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
Load(receiver);
LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
+ // At this point the top two stack elements are probably in registers
+ // since they were just loaded. Ensure they are in regs and get the
+ // regs.
+ Register receiver_reg = frame_->Peek2();
+ Register arguments_reg = frame_->Peek();
+
+ // From now on the frame is spilled.
+ frame_->SpillAll();
+
// Emit the source position information after having loaded the
// receiver and the arguments.
CodeForSourcePosition(position);
@@ -1513,32 +1493,30 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
// already. If so, just use that instead of copying the arguments
// from the stack. This also deals with cases where a local variable
// named 'arguments' has been introduced.
- __ ldr(r0, MemOperand(sp, 0));
-
- Label slow, done;
+ JumpTarget slow;
+ Label done;
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(ip, r0);
- __ b(ne, &slow);
+ __ cmp(ip, arguments_reg);
+ slow.Branch(ne);
Label build_args;
// Get rid of the arguments object probe.
frame_->Drop();
// Stack now has 3 elements on it.
// Contents of stack at this point:
- // sp[0]: receiver
+ // sp[0]: receiver - in the receiver_reg register.
// sp[1]: applicand.apply
// sp[2]: applicand.
// Check that the receiver really is a JavaScript object.
- __ ldr(r0, MemOperand(sp, 0));
- __ BranchOnSmi(r0, &build_args);
+ __ BranchOnSmi(receiver_reg, &build_args);
// We allow all JSObjects including JSFunctions. As long as
// JS_FUNCTION_TYPE is the last instance type and it is right
// after LAST_JS_OBJECT_TYPE, we do not have to check the upper
// bound.
ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
- __ CompareObjectType(r0, r1, r2, FIRST_JS_OBJECT_TYPE);
+ __ CompareObjectType(receiver_reg, r2, r3, FIRST_JS_OBJECT_TYPE);
__ b(lt, &build_args);
// Check that applicand.apply is Function.prototype.apply.
@@ -1627,7 +1605,7 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
StoreArgumentsObject(false);
// Stack and frame now have 4 elements.
- __ bind(&slow);
+ slow.Bind();
// Generic computation of x.apply(y, args) with no special optimization.
// Flip applicand.apply and applicand on the stack, so
@@ -1652,7 +1630,6 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
void CodeGenerator::Branch(bool if_true, JumpTarget* target) {
- VirtualFrame::SpilledScope spilled_scope(frame_);
ASSERT(has_cc());
Condition cc = if_true ? cc_reg_ : NegateCondition(cc_reg_);
target->Branch(cc);
@@ -1661,7 +1638,7 @@ void CodeGenerator::Branch(bool if_true, JumpTarget* target) {
void CodeGenerator::CheckStack() {
- VirtualFrame::SpilledScope spilled_scope(frame_);
+ frame_->SpillAll();
Comment cmnt(masm_, "[ check stack");
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
// Put the lr setup instruction in the delay slot. kInstrSize is added to
@@ -1683,7 +1660,6 @@ void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(frame_);
for (int i = 0; frame_ != NULL && i < statements->length(); i++) {
Visit(statements->at(i));
}
@@ -1695,7 +1671,6 @@ void CodeGenerator::VisitBlock(Block* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ Block");
CodeForStatementPosition(node);
node->break_target()->SetExpectedHeight();
@@ -1713,7 +1688,6 @@ void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
frame_->EmitPush(Operand(pairs));
frame_->EmitPush(Operand(Smi::FromInt(is_eval() ? 1 : 0)));
- VirtualFrame::SpilledScope spilled_scope(frame_);
frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
// The result is discarded.
}
@@ -1754,7 +1728,6 @@ void CodeGenerator::VisitDeclaration(Declaration* node) {
frame_->EmitPush(Operand(0));
}
- VirtualFrame::SpilledScope spilled_scope(frame_);
frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
// Ignore the return value (declarations are statements).
@@ -1899,7 +1872,6 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) {
void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
- VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ ContinueStatement");
CodeForStatementPosition(node);
node->target()->continue_target()->Jump();
@@ -1907,7 +1879,6 @@ void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
- VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ BreakStatement");
CodeForStatementPosition(node);
node->target()->break_target()->Jump();
@@ -1915,7 +1886,7 @@ void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
- VirtualFrame::SpilledScope spilled_scope(frame_);
+ frame_->SpillAll();
Comment cmnt(masm_, "[ ReturnStatement");
CodeForStatementPosition(node);
@@ -1926,7 +1897,7 @@ void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
} else {
// Pop the result from the frame and prepare the frame for
// returning thus making it easier to merge.
- frame_->EmitPop(r0);
+ frame_->PopToR0();
frame_->PrepareForReturn();
if (function_return_.is_bound()) {
// If the function return label is already bound we reuse the
@@ -1986,7 +1957,6 @@ void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ WithEnterStatement");
CodeForStatementPosition(node);
Load(node->expression());
@@ -2012,7 +1982,6 @@ void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ WithExitStatement");
CodeForStatementPosition(node);
// Pop context.
@@ -2027,7 +1996,6 @@ void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ SwitchStatement");
CodeForStatementPosition(node);
node->break_target()->SetExpectedHeight();
@@ -2055,8 +2023,7 @@ void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
next_test.Bind();
next_test.Unuse();
// Duplicate TOS.
- __ ldr(r0, frame_->Top());
- frame_->EmitPush(r0);
+ frame_->Dup();
Comparison(eq, NULL, clause->label(), true);
Branch(false, &next_test);
@@ -2094,7 +2061,7 @@ void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
default_entry.Bind();
VisitStatements(default_clause->statements());
// If control flow can fall out of the default and there is a case after
- // it, jup to that case's body.
+ // it, jump to that case's body.
if (frame_ != NULL && default_exit.is_bound()) {
default_exit.Jump();
}
@@ -2116,7 +2083,6 @@ void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ DoWhileStatement");
CodeForStatementPosition(node);
node->break_target()->SetExpectedHeight();
@@ -2191,7 +2157,6 @@ void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ WhileStatement");
CodeForStatementPosition(node);
@@ -2209,7 +2174,7 @@ void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
node->continue_target()->Bind();
if (info == DONT_KNOW) {
- JumpTarget body;
+ JumpTarget body(JumpTarget::BIDIRECTIONAL);
LoadCondition(node->cond(), &body, node->break_target(), true);
if (has_valid_frame()) {
// A NULL frame indicates that control did not fall out of the
@@ -2242,7 +2207,6 @@ void CodeGenerator::VisitForStatement(ForStatement* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ ForStatement");
CodeForStatementPosition(node);
if (node->init() != NULL) {
@@ -2931,7 +2895,6 @@ void CodeGenerator::VisitConditional(Conditional* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ Conditional");
JumpTarget then;
JumpTarget else_;
@@ -2972,10 +2935,8 @@ void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
&done);
slow.Bind();
- VirtualFrame::SpilledScope spilled_scope(frame_);
frame_->EmitPush(cp);
- __ mov(r0, Operand(slot->var()->name()));
- frame_->EmitPush(r0);
+ frame_->EmitPush(Operand(slot->var()->name()));
if (typeof_state == INSIDE_TYPEOF) {
frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
@@ -2990,16 +2951,17 @@ void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
Register scratch = VirtualFrame::scratch0();
TypeInfo info = type_info(slot);
frame_->EmitPush(SlotOperand(slot, scratch), info);
+
if (slot->var()->mode() == Variable::CONST) {
// Const slots may contain 'the hole' value (the constant hasn't been
// initialized yet) which needs to be converted into the 'undefined'
// value.
Comment cmnt(masm_, "[ Unhole const");
- frame_->EmitPop(scratch);
+ Register tos = frame_->PopToRegister();
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(scratch, ip);
- __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex, eq);
- frame_->EmitPush(scratch);
+ __ cmp(tos, ip);
+ __ LoadRoot(tos, Heap::kUndefinedValueRootIndex, eq);
+ frame_->EmitPush(tos);
}
}
}
@@ -3007,6 +2969,7 @@ void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
TypeofState state) {
+ VirtualFrame::RegisterAllocationScope scope(this);
LoadFromSlot(slot, state);
// Bail out quickly if we're not using lazy arguments allocation.
@@ -3015,17 +2978,15 @@ void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
// ... or if the slot isn't a non-parameter arguments slot.
if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
- VirtualFrame::SpilledScope spilled_scope(frame_);
-
- // Load the loaded value from the stack into r0 but leave it on the
+ // Load the loaded value from the stack into a register but leave it on the
// stack.
- __ ldr(r0, MemOperand(sp, 0));
+ Register tos = frame_->Peek();
// If the loaded value is the sentinel that indicates that we
// haven't loaded the arguments object yet, we need to do it now.
JumpTarget exit;
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(r0, ip);
+ __ cmp(tos, ip);
exit.Branch(ne);
frame_->Drop();
StoreArgumentsObject(false);
@@ -3035,14 +2996,13 @@ void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
ASSERT(slot != NULL);
+ VirtualFrame::RegisterAllocationScope scope(this);
if (slot->type() == Slot::LOOKUP) {
- VirtualFrame::SpilledScope spilled_scope(frame_);
ASSERT(slot->var()->is_dynamic());
// For now, just do a runtime call.
frame_->EmitPush(cp);
- __ mov(r0, Operand(slot->var()->name()));
- frame_->EmitPush(r0);
+ frame_->EmitPush(Operand(slot->var()->name()));
if (init_state == CONST_INIT) {
// Same as the case for a normal store, but ignores attribute
@@ -3071,7 +3031,7 @@ void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
} else {
ASSERT(!slot->var()->is_dynamic());
Register scratch = VirtualFrame::scratch0();
- VirtualFrame::RegisterAllocationScope scope(this);
+ Register scratch2 = VirtualFrame::scratch1();
// The frame must be spilled when branching to this target.
JumpTarget exit;
@@ -3085,7 +3045,6 @@ void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
__ ldr(scratch, SlotOperand(slot, scratch));
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(scratch, ip);
- frame_->SpillAll();
exit.Branch(ne);
}
@@ -3104,18 +3063,18 @@ void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
// Skip write barrier if the written value is a smi.
__ tst(tos, Operand(kSmiTagMask));
// We don't use tos any more after here.
- VirtualFrame::SpilledScope spilled_scope(frame_);
exit.Branch(eq);
// scratch is loaded with context when calling SlotOperand above.
int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
- // r1 could be identical with tos, but that doesn't matter.
- __ RecordWrite(scratch, Operand(offset), r3, r1);
+ // We need an extra register. Until we have a way to do that in the
+ // virtual frame we will cheat and ask for a free TOS register.
+ Register scratch3 = frame_->GetTOSRegister();
+ __ RecordWrite(scratch, Operand(offset), scratch2, scratch3);
}
// If we definitely did not jump over the assignment, we do not need
// to bind the exit label. Doing so can defeat peephole
// optimization.
if (init_state == CONST_INIT || slot->type() == Slot::CONTEXT) {
- frame_->SpillAll();
exit.Bind();
}
}
@@ -3289,42 +3248,51 @@ void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ RexExp Literal");
+ Register tmp = VirtualFrame::scratch0();
+ // Free up a TOS register that can be used to push the literal.
+ Register literal = frame_->GetTOSRegister();
+
// Retrieve the literal array and check the allocated entry.
// Load the function of this activation.
- __ ldr(r1, frame_->Function());
+ __ ldr(tmp, frame_->Function());
// Load the literals array of the function.
- __ ldr(r1, FieldMemOperand(r1, JSFunction::kLiteralsOffset));
+ __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kLiteralsOffset));
// Load the literal at the ast saved index.
int literal_offset =
FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
- __ ldr(r2, FieldMemOperand(r1, literal_offset));
+ __ ldr(literal, FieldMemOperand(tmp, literal_offset));
JumpTarget done;
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(r2, ip);
+ __ cmp(literal, ip);
+ // This branch locks the virtual frame at the done label to match the
+ // one we have here, where the literal register is not on the stack and
+ // nothing is spilled.
done.Branch(ne);
- // If the entry is undefined we call the runtime system to computed
+ // If the entry is undefined we call the runtime system to compute
// the literal.
- frame_->EmitPush(r1); // literal array (0)
- __ mov(r0, Operand(Smi::FromInt(node->literal_index())));
- frame_->EmitPush(r0); // literal index (1)
- __ mov(r0, Operand(node->pattern())); // RegExp pattern (2)
- frame_->EmitPush(r0);
- __ mov(r0, Operand(node->flags())); // RegExp flags (3)
- frame_->EmitPush(r0);
+ // literal array (0)
+ frame_->EmitPush(tmp);
+ // literal index (1)
+ frame_->EmitPush(Operand(Smi::FromInt(node->literal_index())));
+ // RegExp pattern (2)
+ frame_->EmitPush(Operand(node->pattern()));
+ // RegExp flags (3)
+ frame_->EmitPush(Operand(node->flags()));
frame_->CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
- __ mov(r2, Operand(r0));
+ __ Move(literal, r0);
+ // This call to bind will get us back to the virtual frame we had before
+ // where things are not spilled and the literal register is not on the stack.
done.Bind();
// Push the literal.
- frame_->EmitPush(r2);
+ frame_->EmitPush(literal);
ASSERT_EQ(original_height + 1, frame_->height());
}
@@ -3333,20 +3301,20 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ ObjectLiteral");
+ Register literal = frame_->GetTOSRegister();
// Load the function of this activation.
- __ ldr(r3, frame_->Function());
+ __ ldr(literal, frame_->Function());
// Literal array.
- __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
+ __ ldr(literal, FieldMemOperand(literal, JSFunction::kLiteralsOffset));
+ frame_->EmitPush(literal);
// Literal index.
- __ mov(r2, Operand(Smi::FromInt(node->literal_index())));
+ frame_->EmitPush(Operand(Smi::FromInt(node->literal_index())));
// Constant properties.
- __ mov(r1, Operand(node->constant_properties()));
+ frame_->EmitPush(Operand(node->constant_properties()));
// Should the object literal have fast elements?
- __ mov(r0, Operand(Smi::FromInt(node->fast_elements() ? 1 : 0)));
- frame_->EmitPushMultiple(4, r3.bit() | r2.bit() | r1.bit() | r0.bit());
+ frame_->EmitPush(Operand(Smi::FromInt(node->fast_elements() ? 1 : 0)));
if (node->depth() > 1) {
frame_->CallRuntime(Runtime::kCreateObjectLiteral, 4);
} else {
@@ -3369,37 +3337,33 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
if (key->handle()->IsSymbol()) {
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
Load(value);
- frame_->EmitPop(r0);
+ frame_->PopToR0();
+ // Fetch the object literal.
+ frame_->SpillAllButCopyTOSToR1();
__ mov(r2, Operand(key->handle()));
- __ ldr(r1, frame_->Top()); // Load the receiver.
frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
break;
}
// else fall through
case ObjectLiteral::Property::PROTOTYPE: {
- __ ldr(r0, frame_->Top());
- frame_->EmitPush(r0); // dup the result
+ frame_->Dup();
Load(key);
Load(value);
frame_->CallRuntime(Runtime::kSetProperty, 3);
break;
}
case ObjectLiteral::Property::SETTER: {
- __ ldr(r0, frame_->Top());
- frame_->EmitPush(r0);
+ frame_->Dup();
Load(key);
- __ mov(r0, Operand(Smi::FromInt(1)));
- frame_->EmitPush(r0);
+ frame_->EmitPush(Operand(Smi::FromInt(1)));
Load(value);
frame_->CallRuntime(Runtime::kDefineAccessor, 4);
break;
}
case ObjectLiteral::Property::GETTER: {
- __ ldr(r0, frame_->Top());
- frame_->EmitPush(r0);
+ frame_->Dup();
Load(key);
- __ mov(r0, Operand(Smi::FromInt(0)));
- frame_->EmitPush(r0);
+ frame_->EmitPush(Operand(Smi::FromInt(0)));
Load(value);
frame_->CallRuntime(Runtime::kDefineAccessor, 4);
break;
@@ -3414,16 +3378,16 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ ArrayLiteral");
+ Register tos = frame_->GetTOSRegister();
// Load the function of this activation.
- __ ldr(r2, frame_->Function());
+ __ ldr(tos, frame_->Function());
// Load the literals array of the function.
- __ ldr(r2, FieldMemOperand(r2, JSFunction::kLiteralsOffset));
- __ mov(r1, Operand(Smi::FromInt(node->literal_index())));
- __ mov(r0, Operand(node->constant_elements()));
- frame_->EmitPushMultiple(3, r2.bit() | r1.bit() | r0.bit());
+ __ ldr(tos, FieldMemOperand(tos, JSFunction::kLiteralsOffset));
+ frame_->EmitPush(tos);
+ frame_->EmitPush(Operand(Smi::FromInt(node->literal_index())));
+ frame_->EmitPush(Operand(node->constant_elements()));
int length = node->values()->length();
if (node->depth() > 1) {
frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
@@ -3450,10 +3414,10 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
// The property must be set by generated code.
Load(value);
- frame_->EmitPop(r0);
-
+ frame_->PopToR0();
// Fetch the object literal.
- __ ldr(r1, frame_->Top());
+ frame_->SpillAllButCopyTOSToR1();
+
// Get the elements array.
__ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
@@ -3863,7 +3827,6 @@ void CodeGenerator::VisitCall(Call* node) {
// ------------------------------------------------------------------------
if (var != NULL && var->is_possibly_eval()) {
- VirtualFrame::SpilledScope spilled_scope(frame_);
// ----------------------------------
// JavaScript example: 'eval(arg)' // eval is not known to be shadowed
// ----------------------------------
@@ -3877,8 +3840,7 @@ void CodeGenerator::VisitCall(Call* node) {
Load(function);
// Allocate a frame slot for the receiver.
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- frame_->EmitPush(r2);
+ frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex);
// Load the arguments.
int arg_count = args->length();
@@ -3886,6 +3848,8 @@ void CodeGenerator::VisitCall(Call* node) {
Load(args->at(i));
}
+ VirtualFrame::SpilledScope spilled_scope(frame_);
+
// If we know that eval can only be shadowed by eval-introduced
// variables we attempt to load the global eval function directly
// in generated code. If we succeed, there is no need to perform a
@@ -5201,7 +5165,6 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ UnaryOperation");
Token::Value op = node->op();
@@ -5273,8 +5236,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
break;
case Token::SUB: {
- VirtualFrame::SpilledScope spilled(frame_);
- frame_->EmitPop(r0);
+ frame_->PopToR0();
GenericUnaryOpStub stub(Token::SUB, overwrite);
frame_->CallStub(&stub, 0);
frame_->EmitPush(r0); // r0 has result
@@ -5282,23 +5244,28 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
}
case Token::BIT_NOT: {
- // smi check
- VirtualFrame::SpilledScope spilled(frame_);
- frame_->EmitPop(r0);
- JumpTarget smi_label;
+ Register tos = frame_->PopToRegister();
+ JumpTarget not_smi_label;
JumpTarget continue_label;
- __ tst(r0, Operand(kSmiTagMask));
- smi_label.Branch(eq);
+ // Smi check.
+ __ tst(tos, Operand(kSmiTagMask));
+ not_smi_label.Branch(ne);
+
+ __ mvn(tos, Operand(tos));
+ __ bic(tos, tos, Operand(kSmiTagMask)); // Bit-clear inverted smi-tag.
+ frame_->EmitPush(tos);
+ // The fast case is the first to jump to the continue label, so it gets
+ // to decide the virtual frame layout.
+ continue_label.Jump();
+ not_smi_label.Bind();
+ frame_->SpillAll();
+ __ Move(r0, tos);
GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
frame_->CallStub(&stub, 0);
- continue_label.Jump();
+ frame_->EmitPush(r0);
- smi_label.Bind();
- __ mvn(r0, Operand(r0));
- __ bic(r0, r0, Operand(kSmiTagMask)); // bit-clear inverted smi-tag
continue_label.Bind();
- frame_->EmitPush(r0); // r0 has result
break;
}
@@ -5308,16 +5275,16 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
break;
case Token::ADD: {
- VirtualFrame::SpilledScope spilled(frame_);
- frame_->EmitPop(r0);
+ Register tos = frame_->Peek();
// Smi check.
JumpTarget continue_label;
- __ tst(r0, Operand(kSmiTagMask));
+ __ tst(tos, Operand(kSmiTagMask));
continue_label.Branch(eq);
- frame_->EmitPush(r0);
+
frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
+ frame_->EmitPush(r0);
+
continue_label.Bind();
- frame_->EmitPush(r0); // r0 has result
break;
}
default:
@@ -5335,6 +5302,7 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
int original_height = frame_->height();
#endif
Comment cmnt(masm_, "[ CountOperation");
+ VirtualFrame::RegisterAllocationScope scope(this);
bool is_postfix = node->is_postfix();
bool is_increment = node->op() == Token::INC;
@@ -5478,7 +5446,6 @@ void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
// after evaluating the left hand side (due to the shortcut
// semantics), but the compiler must (statically) know if the result
// of compiling the binary operation is materialized or not.
- VirtualFrame::SpilledScope spilled_scope(frame_);
if (node->op() == Token::AND) {
JumpTarget is_true;
LoadCondition(node->left(), &is_true, false_target(), false);
@@ -5663,8 +5630,6 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
if (left_is_null || right_is_null) {
Load(left_is_null ? right : left);
Register tos = frame_->PopToRegister();
- // JumpTargets can't cope with register allocation yet.
- frame_->SpillAll();
__ LoadRoot(ip, Heap::kNullValueRootIndex);
__ cmp(tos, ip);
@@ -5707,9 +5672,6 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
LoadTypeofExpression(operation->expression());
Register tos = frame_->PopToRegister();
- // JumpTargets can't cope with register allocation yet.
- frame_->SpillAll();
-
Register scratch = VirtualFrame::scratch0();
if (check->Equals(Heap::number_symbol())) {
@@ -5830,7 +5792,6 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
break;
case Token::IN: {
- VirtualFrame::SpilledScope scope(frame_);
Load(left);
Load(right);
frame_->InvokeBuiltin(Builtins::IN, CALL_JS, 2);
@@ -5839,7 +5800,6 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
}
case Token::INSTANCEOF: {
- VirtualFrame::SpilledScope scope(frame_);
Load(left);
Load(right);
InstanceofStub stub;
@@ -5937,10 +5897,15 @@ class DeferredReferenceGetKeyedValue: public DeferredCode {
};
+// Takes key and register in r0 and r1 or vice versa. Returns result
+// in r0.
void DeferredReferenceGetKeyedValue::Generate() {
ASSERT((key_.is(r0) && receiver_.is(r1)) ||
(key_.is(r1) && receiver_.is(r0)));
+ VirtualFrame copied_frame(*frame_state()->frame());
+ copied_frame.SpillAll();
+
Register scratch1 = VirtualFrame::scratch0();
Register scratch2 = VirtualFrame::scratch1();
__ DecrementCounter(&Counters::keyed_load_inline, 1, scratch1, scratch2);
@@ -5961,6 +5926,13 @@ void DeferredReferenceGetKeyedValue::Generate() {
// keyed load has been inlined.
__ nop(PROPERTY_ACCESS_INLINED);
+ // Now go back to the frame that we entered with. This will not overwrite
+ // the receiver or key registers since they were not in use when we came
+ // in. The instructions emitted by this merge are skipped over by the
+ // inline load patching mechanism when looking for the branch instruction
+ // that tells it where the code to patch is.
+ copied_frame.MergeTo(frame_state()->frame());
+
// Block the constant pool for one more instruction after leaving this
// constant pool block scope to include the branch instruction ending the
// deferred code.
@@ -6114,7 +6086,6 @@ void CodeGenerator::EmitKeyedLoad() {
bool key_is_known_smi = frame_->KnownSmiAt(0);
Register key = frame_->PopToRegister();
Register receiver = frame_->PopToRegister(key);
- VirtualFrame::SpilledScope spilled(frame_);
// The deferred code expects key and receiver in registers.
DeferredReferenceGetKeyedValue* deferred =
@@ -6152,10 +6123,12 @@ void CodeGenerator::EmitKeyedLoad() {
// Get the elements array from the receiver and check that it
// is not a dictionary.
__ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ ldr(scratch2, FieldMemOperand(scratch1, JSObject::kMapOffset));
- __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
- __ cmp(scratch2, ip);
- deferred->Branch(ne);
+ if (FLAG_debug_code) {
+ __ ldr(scratch2, FieldMemOperand(scratch1, JSObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
+ __ cmp(scratch2, ip);
+ __ Assert(eq, "JSObject with fast elements map has slow elements");
+ }
// Check that key is within bounds. Use unsigned comparison to handle
// negative keys.
@@ -6176,7 +6149,7 @@ void CodeGenerator::EmitKeyedLoad() {
__ mov(r0, scratch1);
// Make sure that the expected number of instructions are generated.
- ASSERT_EQ(kInlinedKeyedLoadInstructionsAfterPatch,
+ ASSERT_EQ(GetInlinedKeyedLoadInstructionsAfterPatch(),
masm_->InstructionsGeneratedSince(&check_inlined_codesize));
}
@@ -6204,9 +6177,9 @@ void CodeGenerator::EmitKeyedStore(StaticType* key_type) {
// Load the value, key and receiver from the stack.
Register value = frame_->PopToRegister();
Register key = frame_->PopToRegister(value);
+ VirtualFrame::SpilledScope spilled(frame_);
Register receiver = r2;
frame_->EmitPop(receiver);
- VirtualFrame::SpilledScope spilled(frame_);
// The deferred code expects value, key and receiver in registers.
DeferredReferenceSetKeyedValue* deferred =
View
4 deps/v8/src/arm/codegen-arm.h
@@ -276,7 +276,9 @@ class CodeGenerator: public AstVisitor {
static int InlineRuntimeCallArgumentsCount(Handle<String> name);
// Constants related to patching of inlined load/store.
- static const int kInlinedKeyedLoadInstructionsAfterPatch = 17;
+ static int GetInlinedKeyedLoadInstructionsAfterPatch() {
+ return FLAG_debug_code ? 27 : 13;
+ }
static const int kInlinedKeyedStoreInstructionsAfterPatch = 5;
private:
View
330 deps/v8/src/arm/ic-arm.cc
@@ -47,71 +47,97 @@ namespace internal {
#define __ ACCESS_MASM(masm)
+
+static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
+ Register type,
+ Label* global_object) {
+ // Register usage:
+ // type: holds the receiver instance type on entry.
+ __ cmp(type, Operand(JS_GLOBAL_OBJECT_TYPE));
+ __ b(eq, global_object);
+ __ cmp(type, Operand(JS_BUILTINS_OBJECT_TYPE));
+ __ b(eq, global_object);
+ __ cmp(type, Operand(JS_GLOBAL_PROXY_TYPE));
+ __ b(eq, global_object);
+}
+
+
+// Generated code falls through if the receiver is a regular non-global
+// JS object with slow properties and no interceptors.
+static void GenerateDictionaryLoadReceiverCheck(MacroAssembler* masm,
+ Register receiver,
+ Register elements,
+ Register t0,
+ Register t1,
+ Label* miss) {
+ // Register usage:
+ // receiver: holds the receiver on entry and is unchanged.
+ // elements: holds the property dictionary on fall through.
+ // Scratch registers:
+ // t0: used to holds the receiver map.
+ // t1: used to holds the receiver instance type, receiver bit mask and
+ // elements map.
+
+ // Check that the receiver isn't a smi.
+ __ tst(receiver, Operand(kSmiTagMask));
+ __ b(eq, miss);
+
+ // Check that the receiver is a valid JS object.
+ __ CompareObjectType(receiver, t0, t1, FIRST_JS_OBJECT_TYPE);
+ __ b(lt, miss);
+
+ // If this assert fails, we have to check upper bound too.
+ ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+
+ GenerateGlobalInstanceTypeCheck(masm, t1, miss);
+
+ // Check that the global object does not require access checks.
+ __ ldrb(t1, FieldMemOperand(t0, Map::kBitFieldOffset));
+ __ tst(t1, Operand((1 << Map::kIsAccessCheckNeeded) |
+ (1 << Map::kHasNamedInterceptor)));
+ __ b(nz, miss);
+
+ __ ldr(elements, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ ldr(t1, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
+ __ cmp(t1, ip);
+ __ b(nz, miss);
+}
+
+
// Helper function used from LoadIC/CallIC GenerateNormal.
-// receiver: Receiver. It is not clobbered if a jump to the miss label is
-// done
+//
+// elements: Property dictionary. It is not clobbered if a jump to the miss
+// label is done.
// name: Property name. It is not clobbered if a jump to the miss label is
// done
// result: Register for the result. It is only updated if a jump to the miss
-// label is not done. Can be the same as receiver or name clobbering
+// label is not done. Can be the same as elements or name clobbering
// one of these in the case of not jumping to the miss label.
-// The three scratch registers need to be different from the receiver, name and
+// The two scratch registers need to be different from elements, name and
// result.
+// The generated code assumes that the receiver has slow properties,
+// is not a global object and does not have interceptors.
static void GenerateDictionaryLoad(MacroAssembler* masm,
Label* miss,
- Register receiver,
+ Register elements,
Register name,
Register result,
Register scratch1,
- Register scratch2,
- Register scratch3,
- DictionaryCheck check_dictionary) {
+ Register scratch2) {
// Main use of the scratch registers.
- // scratch1: Used to hold the property dictionary.
- // scratch2: Used as temporary and to hold the capacity of the property
+ // scratch1: Used as temporary and to hold the capacity of the property
// dictionary.
- // scratch3: Used as temporary.
+ // scratch2: Used as temporary.
Label done;
- // Check for the absence of an interceptor.
- // Load the map into scratch1.
- __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kMapOffset));
-
- // Bail out if the receiver has a named interceptor.
- __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitFieldOffset));
- __ tst(scratch2, Operand(1 << Map::kHasNamedInterceptor));
- __ b(nz, miss);
-
- // Bail out if we have a JS global proxy object.
- __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- __ cmp(scratch2, Operand(JS_GLOBAL_PROXY_TYPE));
- __ b(eq, miss);
-
- // Possible work-around for http://crbug.com/16276.
- // See also: http://codereview.chromium.org/155418.
- __ cmp(scratch2, Operand(JS_GLOBAL_OBJECT_TYPE));
- __ b(eq, miss);
- __ cmp(scratch2, Operand(JS_BUILTINS_OBJECT_TYPE));
- __ b(eq, miss);
-
- // Load the properties array.
- __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-
- // Check that the properties array is a dictionary.
- if (check_dictionary == CHECK_DICTIONARY) {
- __ ldr(scratch2, FieldMemOperand(scratch1, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
- __ cmp(scratch2, ip);
- __ b(ne, miss);
- }
-
// Compute the capacity mask.
const int kCapacityOffset = StringDictionary::kHeaderSize +
StringDictionary::kCapacityIndex * kPointerSize;
- __ ldr(scratch2, FieldMemOperand(scratch1, kCapacityOffset));
- __ mov(scratch2, Operand(scratch2, ASR, kSmiTagSize)); // convert smi to int
- __ sub(scratch2, scratch2, Operand(1));
+ __ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset));
+ __ mov(scratch1, Operand(scratch1, ASR, kSmiTagSize)); // convert smi to int
+ __ sub(scratch1, scratch1, Operand(1));
const int kElementsStartOffset = StringDictionary::kHeaderSize +
StringDictionary::kElementsStartIndex * kPointerSize;
@@ -122,26 +148,26 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
static const int kProbes = 4;
for (int i = 0; i < kProbes; i++) {
// Compute the masked index: (hash + i + i * i) & mask.
- __ ldr(scratch3, FieldMemOperand(name, String::kHashFieldOffset));
+ __ ldr(scratch2, FieldMemOperand(name, String::kHashFieldOffset));
if (i > 0) {
// Add the probe offset (i + i * i) left shifted to avoid right shifting
// the hash in a separate instruction. The value hash + i + i * i is right
// shifted in the following and instruction.
ASSERT(StringDictionary::GetProbeOffset(i) <
1 << (32 - String::kHashFieldOffset));
- __ add(scratch3, scratch3, Operand(
+ __ add(scratch2, scratch2, Operand(
StringDictionary::GetProbeOffset(i) << String::kHashShift));
}
- __ and_(scratch3, scratch2, Operand(scratch3, LSR, String::kHashShift));
+ __ and_(scratch2, scratch1, Operand(scratch2, LSR, String::kHashShift));
// Scale the index by multiplying by the element size.
ASSERT(StringDictionary::kEntrySize == 3);
- // scratch3 = scratch3 * 3.
- __ add(scratch3, scratch3, Operand(scratch3, LSL, 1));
+ // scratch2 = scratch2 * 3.
+ __ add(scratch2, scratch2, Operand(scratch2, LSL, 1));
// Check if the key is identical to the name.
- __ add(scratch3, scratch1, Operand(scratch3, LSL, 2));
- __ ldr(ip, FieldMemOperand(scratch3, kElementsStartOffset));
+ __ add(scratch2, elements, Operand(scratch2, LSL, 2));
+ __ ldr(ip, FieldMemOperand(scratch2, kElementsStartOffset));
__ cmp(name, Operand(ip));
if (i != kProbes - 1) {
__ b(eq, &done);
@@ -151,15 +177,15 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
}
// Check that the value is a normal property.
- __ bind(&done); // scratch3 == scratch1 + 4 * index
- __ ldr(scratch2,
- FieldMemOperand(scratch3, kElementsStartOffset + 2 * kPointerSize));
- __ tst(scratch2, Operand(PropertyDetails::TypeField::mask() << kSmiTagSize));
+ __ bind(&done); // scratch2 == elements + 4 * index
+ __ ldr(scratch1,
+ FieldMemOperand(scratch2, kElementsStartOffset + 2 * kPointerSize));
+ __ tst(scratch1, Operand(PropertyDetails::TypeField::mask() << kSmiTagSize));
__ b(ne, miss);
// Get the value at the masked, scaled index and return.
__ ldr(result,
- FieldMemOperand(scratch3, kElementsStartOffset + 1 * kPointerSize));
+ FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
}
@@ -310,6 +336,7 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
Register receiver,
Register scratch1,
Register scratch2,
+ int interceptor_bit,
Label* slow) {
// Check that the object isn't a smi.
__ BranchOnSmi(receiver, slow);
@@ -317,8 +344,9 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
__ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
// Check bit field.
__ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitFieldOffset));
- __ tst(scratch2, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
- __ b(ne, slow);
+ __ tst(scratch2,
+ Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
+ __ b(nz, slow);
// Check that the object is some kind of JS object EXCEPT JS Value type.
// In the case that the object is a value-wrapper object,
// we enter the runtime system to make sure that indexing into string
@@ -502,13 +530,11 @@ static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
}
-static void GenerateNormalHelper(MacroAssembler* masm,
- int argc,
- bool is_global_object,
- Label* miss,
- Register scratch) {
- // Search dictionary - put result in register r1.
- GenerateDictionaryLoad(masm, miss, r1, r2, r1, r0, r3, r4, CHECK_DICTIONARY);
+static void GenerateFunctionTailCall(MacroAssembler* masm,
+ int argc,
+ Label* miss,
+ Register scratch) {
+ // r1: function
// Check that the value isn't a smi.
__ tst(r1, Operand(kSmiTagMask));
@@ -518,13 +544,6 @@ static void GenerateNormalHelper(MacroAssembler* masm,
__ CompareObjectType(r1, scratch, scratch, JS_FUNCTION_TYPE);
__ b(ne, miss);
- // Patch the receiver with the global proxy if necessary.
- if (is_global_object) {
- __ ldr(r0, MemOperand(sp, argc * kPointerSize));
- __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
- __ str(r0, MemOperand(sp, argc * kPointerSize));
- }
-
// Invoke the function.
ParameterCount actual(argc);
__ InvokeFunction(r1, actual, JUMP_FUNCTION);
@@ -536,53 +555,18 @@ static void GenerateCallNormal(MacroAssembler* masm, int argc) {
// -- r2 : name
// -- lr : return address
// -----------------------------------
- Label miss, global_object, non_global_object;
+ Label miss;
// Get the receiver of the function from the stack into r1.
__ ldr(r1, MemOperand(sp, argc * kPointerSize));
- // Check that the receiver isn't a smi.
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &miss);
-
- // Check that the receiver is a valid JS object. Put the map in r3.
- __ CompareObjectType(r1, r3, r0, FIRST_JS_OBJECT_TYPE);
- __ b(lt, &miss);
-
- // If this assert fails, we have to check upper bound too.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
-
- // Check for access to global object.
- __ cmp(r0, Operand(JS_GLOBAL_OBJECT_TYPE));
- __ b(eq, &global_object);
- __ cmp(r0, Operand(JS_BUILTINS_OBJECT_TYPE));
- __ b(ne, &non_global_object);
+ GenerateDictionaryLoadReceiverCheck(masm, r1, r0, r3, r4, &miss);
- // Accessing global object: Load and invoke.
- __ bind(&global_object);
- // Check that the global object does not require access checks.
- __ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset));
- __ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
- __ b(ne, &miss);
- GenerateNormalHelper(masm, argc, true, &miss, r4);
-
- // Accessing non-global object: Check for access to global proxy.
- Label global_proxy, invoke;
- __ bind(&non_global_object);
- __ cmp(r0, Operand(JS_GLOBAL_PROXY_TYPE));
- __ b(eq, &global_proxy);
- // Check that the non-global, non-global-proxy object does not
- // require access checks.
- __ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset));
- __ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
- __ b(ne, &miss);
- __ bind(&invoke);
- GenerateNormalHelper(masm, argc, false, &miss, r4);
+ // r0: elements
+ // Search the dictionary - put result in register r1.
+ GenerateDictionaryLoad(masm, &miss, r0, r2, r1, r3, r4);
- // Global object access: Check access rights.
- __ bind(&global_proxy);
- __ CheckAccessGlobalProxy(r1, r0, &miss);
- __ b(&invoke);
+ GenerateFunctionTailCall(masm, argc, &miss, r4);
__ bind(&miss);
}
@@ -594,6 +578,12 @@ static void GenerateCallMiss(MacroAssembler* masm, int argc, IC::UtilityId id) {
// -- lr : return address
// -----------------------------------
+ if (id == IC::kCallIC_Miss) {
+ __ IncrementCounter(&Counters::call_miss, 1, r3, r4);
+ } else {
+ __ IncrementCounter(&Counters::keyed_call_miss, 1, r3, r4);
+ }
+
// Get the receiver of the function from the stack.
__ ldr(r3, MemOperand(sp, argc * kPointerSize));
@@ -614,23 +604,26 @@ static void GenerateCallMiss(MacroAssembler* masm, int argc, IC::UtilityId id) {
__ LeaveInternalFrame();
// Check if the receiver is a global object of some sort.
- Label invoke, global;
- __ ldr(r2, MemOperand(sp, argc * kPointerSize)); // receiver
- __ tst(r2, Operand(kSmiTagMask));
- __ b(eq, &invoke);
- __ CompareObjectType(r2, r3, r3, JS_GLOBAL_OBJECT_TYPE);
- __ b(eq, &global);
- __ cmp(r3, Operand(JS_BUILTINS_OBJECT_TYPE));
- __ b(ne, &invoke);
-
- // Patch the receiver on the stack.
- __ bind(&global);
- __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
- __ str(r2, MemOperand(sp, argc * kPointerSize));
+ // This can happen only for regular CallIC but not KeyedCallIC.
+ if (id == IC::kCallIC_Miss) {
+ Label invoke, global;
+ __ ldr(r2, MemOperand(sp, argc * kPointerSize)); // receiver
+ __ tst(r2, Operand(kSmiTagMask));
+ __ b(eq, &invoke);
+ __ CompareObjectType(r2, r3, r3, JS_GLOBAL_OBJECT_TYPE);
+ __ b(eq, &global);
+ __ cmp(r3, Operand(JS_BUILTINS_OBJECT_TYPE));
+ __ b(ne, &invoke);
+
+ // Patch the receiver on the stack.
+ __ bind(&global);
+ __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
+ __ str(r2, MemOperand(sp, argc * kPointerSize));
+ __ bind(&invoke);
+ }
// Invoke the function.
ParameterCount actual(argc);
- __ bind(&invoke);
__ InvokeFunction(r1, actual, JUMP_FUNCTION);
}
@@ -698,7 +691,8 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// Now the key is known to be a smi. This place is also jumped to from below
// where a numeric string is converted to a smi.
- GenerateKeyedLoadReceiverCheck(masm, r1, r0, r3, &slow_call);
+ GenerateKeyedLoadReceiverCheck(
+ masm, r1, r0, r3, Map::kHasIndexedInterceptor, &slow_call);
GenerateFastArrayLoad(
masm, r1, r2, r4, r3, r0, r1, &check_number_dictionary, &slow_load);
@@ -708,14 +702,7 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// receiver in r1 is not used after this point.
// r2: key
// r1: function
-
- // Check that the value in r1 is a JSFunction.
- __ BranchOnSmi(r1, &slow_call);
- __ CompareObjectType(r1, r0, r0, JS_FUNCTION_TYPE);
- __ b(ne, &slow_call);
- // Invoke the function.
- ParameterCount actual(argc);
- __ InvokeFunction(r1, actual, JUMP_FUNCTION);
+ GenerateFunctionTailCall(masm, argc, &slow_call, r0);
__ bind(&check_number_dictionary);
// r2: key
@@ -751,16 +738,16 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// If the receiver is a regular JS object with slow properties then do
// a quick inline probe of the receiver's dictionary.
// Otherwise do the monomorphic cache probe.
- GenerateKeyedLoadReceiverCheck(masm, r1, r0, r3, &lookup_monomorphic_cache);
+ GenerateKeyedLoadReceiverCheck(
+ masm, r1, r0, r3, Map::kHasNamedInterceptor, &lookup_monomorphic_cache);
- __ ldr(r3, FieldMemOperand(r1, JSObject::kPropertiesOffset));
- __ ldr(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ ldr(r0, FieldMemOperand(r1, JSObject::kPropertiesOffset));
+ __ ldr(r3, FieldMemOperand(r0, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHashTableMapRootIndex);
__ cmp(r3, ip);
__ b(ne, &lookup_monomorphic_cache);
- GenerateDictionaryLoad(
- masm, &slow_load, r1, r2, r1, r0, r3, r4, DICTIONARY_CHECK_DONE);
+ GenerateDictionaryLoad(masm, &slow_load, r0, r2, r1, r3, r4);
__ IncrementCounter(&Counters::keyed_call_generic_lookup_dict, 1, r0, r3);
__ jmp(&do_call);
@@ -826,36 +813,14 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
// -- r0 : receiver
// -- sp[0] : receiver
// -----------------------------------
- Label miss, probe, global;
-
- // Check that the receiver isn't a smi.
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &miss);
-
- // Check that the receiver is a valid JS object. Put the map in r3.
- __ CompareObjectType(r0, r3, r1, FIRST_JS_OBJECT_TYPE);
- __ b(lt, &miss);
- // If this assert fails, we have to check upper bound too.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ Label miss;
- // Check for access to global object (unlikely).
- __ cmp(r1, Operand(JS_GLOBAL_PROXY_TYPE));
- __ b(eq, &global);
+ GenerateDictionaryLoadReceiverCheck(masm, r0, r1, r3, r4, &miss);
- // Check for non-global object that requires access check.
- __ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset));
- __ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
- __ b(ne, &miss);
-
- __ bind(&probe);
- GenerateDictionaryLoad(masm, &miss, r0, r2, r0, r1, r3, r4, CHECK_DICTIONARY);
+ // r1: elements
+ GenerateDictionaryLoad(masm, &miss, r1, r2, r0, r3, r4);
__ Ret();
- // Global object access: Check access rights.
- __ bind(&global);
- __ CheckAccessGlobalProxy(r0, r1, &miss);
- __ b(&probe);
-
// Cache miss: Jump to runtime.
__ bind(&miss);
GenerateMiss(masm);
@@ -870,6 +835,8 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
// -- sp[0] : receiver
// -----------------------------------
+ __ IncrementCounter(&Counters::load_miss, 1, r3, r4);
+
__ mov(r3, r0);
__ Push(r3, r2);
@@ -963,7 +930,7 @@ bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
// Patch the map check.
Address ldr_map_instr_address =
inline_end_address -
- (CodeGenerator::kInlinedKeyedLoadInstructionsAfterPatch *
+ (CodeGenerator::GetInlinedKeyedLoadInstructionsAfterPatch() *
Assembler::kInstrSize);
Assembler::set_target_address_at(ldr_map_instr_address,
reinterpret_cast<Address>(map));
@@ -1013,6 +980,8 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
// -- r1 : receiver
// -----------------------------------
+ __ IncrementCounter(&Counters::keyed_load_miss, 1, r3, r4);
+
__ Push(r1, r0);
ExternalReference ref = ExternalReference(IC_Utility(kKeyedLoadIC_Miss));
@@ -1045,14 +1014,15 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
Register key = r0;
Register receiver = r1;
- GenerateKeyedLoadReceiverCheck(masm, receiver, r2, r3, &slow);
-
// Check that the key is a smi.
__ BranchOnNotSmi(key, &check_string);
__ bind(&index_smi);
// Now the key is known to be a smi. This place is also jumped to from below
// where a numeric string is converted to a smi.
+ GenerateKeyedLoadReceiverCheck(
+ masm, receiver, r2, r3, Map::kHasIndexedInterceptor, &slow);
+
GenerateFastArrayLoad(
masm, receiver, key, r4, r3, r2, r0, &check_pixel_array, &slow);
__ IncrementCounter(&Counters::keyed_load_generic_smi, 1, r2, r3);
@@ -1095,12 +1065,15 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ bind(&check_string);
GenerateKeyStringCheck(masm, key, r2, r3, &index_string, &slow);
+ GenerateKeyedLoadReceiverCheck(
+ masm, receiver, r2, r3, Map::kHasNamedInterceptor, &slow);
+
// If the receiver is a fast-case object, check the keyed lookup
// cache. Otherwise probe the dictionary.
__ ldr(r3, FieldMemOperand(r1, JSObject::kPropertiesOffset));
- __ ldr(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHashTableMapRootIndex);
- __ cmp(r3, ip);
+ __ cmp(r4, ip);
__ b(eq, &probe_dictionary);
// Load the map of the receiver, compute the keyed lookup cache hash
@@ -1148,9 +1121,14 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Do a quick inline probe of the receiver's dictionary, if it
// exists.
__ bind(&probe_dictionary);
+ // r1: receiver
+ // r0: key
+ // r3: elements
+ __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
+ GenerateGlobalInstanceTypeCheck(masm, r2, &slow);
// Load the property to r0.
- GenerateDictionaryLoad(
- masm, &slow, r1, r0, r0, r2, r3, r4, DICTIONARY_CHECK_DONE);
+ GenerateDictionaryLoad(masm, &slow, r3, r0, r0, r2, r4);
__ IncrementCounter(&Counters::keyed_load_generic_symbol, 1, r2, r3);
__ Ret();
View
16 deps/v8/src/arm/jump-target-arm.cc
@@ -61,9 +61,17 @@ void JumpTarget::DoJump() {
} else {
// Clone the current frame to use as the expected one at the target.
set_entry_frame(cgen()->frame());
+ // Zap the fall-through frame since the jump was unconditional.
RegisterFile empty;
cgen()->SetFrame(NULL, &empty);
}
+ if (entry_label_.is_bound()) {
+ // You can't jump backwards to an already bound label unless you admitted
+ // up front that this was a bidirectional jump target. Bidirectional jump
+ // targets will zap their type info when bound in case some later virtual
+ // frame with less precise type info branches to them.
+ ASSERT(direction_ != FORWARD_ONLY);
+ }
__ jmp(&entry_label_);
}
@@ -83,6 +91,13 @@ void JumpTarget::DoBranch(Condition cc, Hint ignored) {
// Clone the current frame to use as the expected one at the target.
set_entry_frame(cgen()->frame());
}
+ if (entry_label_.is_bound()) {
+ // You can't branch backwards to an already bound label unless you admitted
+ // up front that this was a bidirectional jump target. Bidirectional jump
+ // targets will zap their type info when bound in case some later virtual
+ // frame with less precise type info branches to them.
+ ASSERT(direction_ != FORWARD_ONLY);
+ }
__ b(cc, &entry_label_);
if (cc == al) {
cgen()->DeleteFrame();
@@ -121,6 +136,7 @@ void JumpTarget::DoBind() {
ASSERT(!cgen()->has_valid_frame() || cgen()->HasValidEntryRegisters());
if (cgen()->has_valid_frame()) {
+ if (direction_ != FORWARD_ONLY) cgen()->frame()->ForgetTypeInfo();
// If there is a current frame we can use it on the fall through.
if (!entry_frame_set_) {
entry_frame_ = *cgen()->frame();
View
13 deps/v8/src/arm/macro-assembler-arm.cc
@@ -1548,6 +1548,8 @@ void MacroAssembler::Check(Condition cc, const char* msg) {
void MacroAssembler::Abort(const char* msg) {
+ Label abort_start;
+ bind(&abort_start);
// We want to pass the msg string like a smi to avoid GC
// problems, however msg is not guaranteed to be aligned
// properly. Instead, we pass an aligned pointer that is
@@ -1571,6 +1573,17 @@ void MacroAssembler::Abort(const char* msg) {
push(r0);
CallRuntime(Runtime::kAbort, 2);
// will not return here
+ if (is_const_pool_blocked()) {
+ // If the calling code cares about the exact number of
+ // instructions generated, we insert padding here to keep the size
+ // of the Abort macro constant.
+ static const int kExpectedAbortInstructions = 10;
+ int abort_instructions = InstructionsGeneratedSince(&abort_start);
+ ASSERT(abort_instructions <= kExpectedAbortInstructions);
+ while (abort_instructions++ < kExpectedAbortInstructions) {
+ nop();
+ }
+ }
}
View
44 deps/v8/src/arm/virtual-frame-arm.cc
@@ -482,6 +482,32 @@ void VirtualFrame::SpillAllButCopyTOSToR0() {
}
+void VirtualFrame::SpillAllButCopyTOSToR1() {
+ switch (top_of_stack_state_) {
+ case NO_TOS_REGISTERS:
+ __ ldr(r1, MemOperand(sp, 0));
+ break;
+ case R0_TOS:
+ __ push(r0);
+ __ mov(r1, r0);
+ break;
+ case R1_TOS:
+ __ push(r1);
+ break;
+ case R0_R1_TOS:
+ __ Push(r1, r0);
+ __ mov(r1, r0);
+ break;
+ case R1_R0_TOS:
+ __ Push(r0, r1);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ top_of_stack_state_ = NO_TOS_REGISTERS;
+}
+
+
void VirtualFrame::SpillAllButCopyTOSToR1R0() {
switch (top_of_stack_state_) {
case NO_TOS_REGISTERS:
@@ -524,6 +550,24 @@ Register VirtualFrame::Peek() {
}
+Register VirtualFrame::Peek2() {
+ AssertIsNotSpilled();
+ switch (top_of_stack_state_) {
+ case NO_TOS_REGISTERS:
+ case R0_TOS:
+ case R0_R1_TOS:
+ MergeTOSTo(R0_R1_TOS);
+ return r1;
+ case R1_TOS:
+ case R1_R0_TOS:
+ MergeTOSTo(R1_R0_TOS);
+ return r0;
+ }
+ UNREACHABLE();
+ return no_reg;
+}
+
+
void VirtualFrame::Dup() {
if (SpilledScope::is_spilled()) {
__ ldr(ip, MemOperand(sp, 0));
View
13 deps/v8/src/arm/virtual-frame-arm.h
@@ -189,12 +189,15 @@ class VirtualFrame : public ZoneObject {
return (tos_known_smi_map_ & (~other->tos_known_smi_map_)) == 0;
}
+ inline void ForgetTypeInfo() {
+ tos_known_smi_map_ = 0;
+ }
+
// Detach a frame from its code generator, perhaps temporarily. This
// tells the register allocator that it is free to use frame-internal
// registers. Used when the code generator's frame is switched from this
// one to NULL by an unconditional jump.
void DetachFromCodeGenerator() {
- AssertIsSpilled();
}
// (Re)attach a frame to its code generator. This informs the register
@@ -202,7 +205,6 @@ class VirtualFrame : public ZoneObject {
// Used when a code generator's frame is switched from NULL to this one by
// binding a label.
void AttachToCodeGenerator() {
- AssertIsSpilled();
}
// Emit code for the physical JS entry and exit frame sequences. After
@@ -330,6 +332,10 @@ class VirtualFrame : public ZoneObject {
// must be copied to a scratch register before modification.
Register Peek();
+ // Look at the value beneath the top of the stack. The register returned is
+ // aliased and must be copied to a scratch register before modification.
+ Register Peek2();
+
// Duplicate the top of stack.
void Dup();
@@ -339,6 +345,9 @@ class VirtualFrame : public ZoneObject {
// Flushes all registers, but it puts a copy of the top-of-stack in r0.
void SpillAllButCopyTOSToR0();
+ // Flushes all registers, but it puts a copy of the top-of-stack in r1.
+ void SpillAllButCopyTOSToR1();
+
// Flushes all registers, but it puts a copy of the top-of-stack in r1
// and the next value on the stack in r0.
void SpillAllButCopyTOSToR1R0();
View
4 deps/v8/src/array.js
@@ -954,7 +954,7 @@ function ArrayMap(f, receiver) {
function ArrayIndexOf(element, index) {
var length = this.length;
- if (index == null) {
+ if (IS_UNDEFINED(index)) {
index = 0;
} else {
index = TO_INTEGER(index);
@@ -981,7 +981,7 @@ function ArrayIndexOf(element, index) {
function ArrayLastIndexOf(element, index) {
var length = this.length;
- if (index == null) {
+ if (%_ArgumentsLength() < 2) {
index = length - 1;
} else {
index = TO_INTEGER(index);
View
4 deps/v8/src/ast-inl.h
@@ -45,7 +45,9 @@ SwitchStatement::SwitchStatement(ZoneStringList* labels)
IterationStatement::IterationStatement(ZoneStringList* labels)
- : BreakableStatement(labels, TARGET_FOR_ANONYMOUS), body_(NULL) {
+ : BreakableStatement(labels, TARGET_FOR_ANONYMOUS),
+ body_(NULL),
+ continue_target_(JumpTarget::BIDIRECTIONAL) {
}
View
1  deps/v8/src/builtins.cc
@@ -195,6 +195,7 @@ BUILTIN(ArrayCodeGeneric) {
}
// 'array' now contains the JSArray we should initialize.
+ ASSERT(array->HasFastElements());
// Optimize the case where there is one argument and the argument is a
// small smi.
View
11 deps/v8/src/factory.cc
@@ -274,11 +274,22 @@ Handle<Map> Factory::CopyMap(Handle<Map> src,
return copy;
}
+
Handle<Map> Factory::CopyMapDropTransitions(Handle<Map> src) {
CALL_HEAP_FUNCTION(src->CopyDropTransitions(), Map);
}
+Handle<Map> Factory::GetFastElementsMap(Handle<Map> src) {
+ CALL_HEAP_FUNCTION(src->GetFastElementsMap(), Map);
+}
+
+
+Handle<Map> Factory::GetSlowElementsMap(Handle<Map> src) {
+ CALL_HEAP_FUNCTION(src->GetSlowElementsMap(), Map);
+}
+
+
Handle<FixedArray> Factory::CopyFixedArray(Handle<FixedArray> array) {
CALL_HEAP_FUNCTION(array->Copy(), FixedArray);
}
View
4 deps/v8/src/factory.h
@@ -180,6 +180,10 @@ class Factory : public AllStatic {
static Handle<Map> CopyMapDropTransitions(Handle<Map> map);
+ static Handle<Map> GetFastElementsMap(Handle<Map> map);
+
+ static Handle<Map> GetSlowElementsMap(Handle<Map> map);
+
static Handle<FixedArray> CopyFixedArray(Handle<FixedArray> array);
// Numbers (eg, literals) are pretenured by the parser.
View
59 deps/v8/src/heap.cc
@@ -126,6 +126,12 @@ int Heap::always_allocate_scope_depth_ = 0;
int Heap::linear_allocation_scope_depth_ = 0;
int Heap::contexts_disposed_ = 0;
+int Heap::young_survivors_after_last_gc_ = 0;
+int Heap::high_survival_rate_period_length_ = 0;
+double Heap::survival_rate_ = 0;
+Heap::SurvivalRateTrend Heap::previous_survival_rate_trend_ = Heap::STABLE;
+Heap::SurvivalRateTrend Heap::survival_rate_trend_ = Heap::STABLE;
+
#ifdef DEBUG
bool Heap::allocation_allowed_ = true;
@@ -582,6 +588,29 @@ static void VerifyPageWatermarkValidity(PagedSpace* space,
}
#endif
+void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
+ double survival_rate =
+ (static_cast<double>(young_survivors_after_last_gc_) * 100) /
+ start_new_space_size;
+
+ if (survival_rate > kYoungSurvivalRateThreshold) {
+ high_survival_rate_period_length_++;
+ } else {
+ high_survival_rate_period_length_ = 0;
+ }
+
+ double survival_rate_diff = survival_rate_ - survival_rate;
+
+ if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
+ set_survival_rate_trend(DECREASING);
+ } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
+ set_survival_rate_trend(INCREASING);
+ } else {
+ set_survival_rate_trend(STABLE);
+ }
+
+ survival_rate_ = survival_rate;
+}
void Heap::PerformGarbageCollection(AllocationSpace space,
GarbageCollector collector,
@@ -604,6 +633,8 @@ void Heap::PerformGarbageCollection(AllocationSpace space,
EnsureFromSpaceIsCommitted();
+ int start_new_space_size = Heap::new_space()->Size();
+
if (collector == MARK_COMPACTOR) {
if (FLAG_flush_code) {
// Flush all potentially unused code.
@@ -613,16 +644,36 @@ void Heap::PerformGarbageCollection(AllocationSpace space,
// Perform mark-sweep with optional compaction.
MarkCompact(tracer);
+ bool high_survival_rate_during_scavenges = IsHighSurvivalRate() &&
+ IsStableOrIncreasingSurvivalTrend();
+
+ UpdateSurvivalRateTrend(start_new_space_size);
+
int old_gen_size = PromotedSpaceSize();
old_gen_promotion_limit_ =
old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
old_gen_allocation_limit_ =
old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
+
+ if (high_survival_rate_during_scavenges &&
+ IsStableOrIncreasingSurvivalTrend()) {
+ // Stable high survival rates of young objects both during partial and
+ // full collection indicate that mutator is either building or modifying
+ // a structure with a long lifetime.
+ // In this case we aggressively raise old generation memory limits to
+ // postpone subsequent mark-sweep collection and thus trade memory
+ // space for the mutation speed.
+ old_gen_promotion_limit_ *= 2;
+ old_gen_allocation_limit_ *= 2;
+ }
+
old_gen_exhausted_ = false;
} else {
tracer_ = tracer;
Scavenge();
tracer_ = NULL;
+
+ UpdateSurvivalRateTrend(start_new_space_size);
}
Counters::objs_since_last_young.Set(0);
@@ -1217,7 +1268,7 @@ Object* Heap::AllocateMap(InstanceType instance_type, int instance_size) {
map->set_code_cache(empty_fixed_array());
map->set_unused_property_fields(0);
map->set_bit_field(0);
- map->set_bit_field2(1 << Map::kIsExtensible);
+ map->set_bit_field2((1 << Map::kIsExtensible) | (1 << Map::kHasFastElements));
// If the map object is aligned fill the padding area with Smi 0 objects.
if (Map::kPadStart < Map::kSize) {
@@ -2545,6 +2596,7 @@ Object* Heap::AllocateInitialMap(JSFunction* fun) {
map->set_inobject_properties(in_object_properties);
map->set_unused_property_fields(in_object_properties);
map->set_prototype(prototype);
+ ASSERT(map->has_fast_elements());
// If the function has only simple this property assignments add
// field descriptors for these to the initial map as the object
@@ -2598,8 +2650,8 @@ Object* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
// properly initialized.
ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
- // Both types of globla objects should be allocated using
- // AllocateGloblaObject to be properly initialized.
+ // Both types of global objects should be allocated using
+ // AllocateGlobalObject to be properly initialized.
ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);