Permalink
Browse files

deps: update v8 to 3.22.24.17

  • Loading branch information...
indutny committed Jan 22, 2014
1 parent 17712dd commit 25e66ce5cbb7d5c8559c15af99feaf2ad703a973
Showing with 585 additions and 147 deletions.
  1. +8 −0 deps/v8/src/arguments.cc
  2. +11 −0 deps/v8/src/arguments.h
  3. +13 −2 deps/v8/src/arm/builtins-arm.cc
  4. +5 −0 deps/v8/src/arm/deoptimizer-arm.cc
  5. +45 −22 deps/v8/src/arm/lithium-codegen-arm.cc
  6. +3 −0 deps/v8/src/arm/lithium-codegen-arm.h
  7. +4 −2 deps/v8/src/arm/macro-assembler-arm.h
  8. +1 −0 deps/v8/src/ast.cc
  9. +3 −0 deps/v8/src/builtins.h
  10. +13 −5 deps/v8/src/code-stubs-hydrogen.cc
  11. +1 −1 deps/v8/src/date.js
  12. +1 −2 deps/v8/src/deoptimizer.cc
  13. +4 −0 deps/v8/src/deoptimizer.h
  14. +18 −2 deps/v8/src/ia32/builtins-ia32.cc
  15. +7 −0 deps/v8/src/ia32/deoptimizer-ia32.cc
  16. +40 −22 deps/v8/src/ia32/lithium-codegen-ia32.cc
  17. +3 −0 deps/v8/src/ia32/lithium-codegen-ia32.h
  18. +4 −2 deps/v8/src/ia32/macro-assembler-ia32.h
  19. +1 −0 deps/v8/src/mark-compact.cc
  20. +3 −5 deps/v8/src/mips/assembler-mips-inl.h
  21. +15 −10 deps/v8/src/mips/builtins-mips.cc
  22. +13 −12 deps/v8/src/mips/codegen-mips.cc
  23. +5 −0 deps/v8/src/mips/deoptimizer-mips.cc
  24. +45 −22 deps/v8/src/mips/lithium-codegen-mips.cc
  25. +3 −0 deps/v8/src/mips/lithium-codegen-mips.h
  26. +8 −8 deps/v8/src/mips/macro-assembler-mips.cc
  27. +4 −2 deps/v8/src/mips/macro-assembler-mips.h
  28. +6 −1 deps/v8/src/mips/simulator-mips.cc
  29. +1 −0 deps/v8/src/mips/simulator-mips.h
  30. +12 −0 deps/v8/src/objects.cc
  31. +1 −0 deps/v8/src/objects.h
  32. +1 −1 deps/v8/src/store-buffer.cc
  33. +1 −1 deps/v8/src/version.cc
  34. +13 −2 deps/v8/src/x64/builtins-x64.cc
  35. +5 −0 deps/v8/src/x64/deoptimizer-x64.cc
  36. +43 −21 deps/v8/src/x64/lithium-codegen-x64.cc
  37. +4 −0 deps/v8/src/x64/lithium-codegen-x64.h
  38. +4 −2 deps/v8/src/x64/macro-assembler-x64.h
  39. +32 −0 deps/v8/test/mjsunit/regress/regress-280531.js
  40. +42 −0 deps/v8/test/mjsunit/regress/regress-3027.js
  41. +46 −0 deps/v8/test/mjsunit/regress/regress-318420.js
  42. +44 −0 deps/v8/test/mjsunit/regress/regress-331444.js
  43. +49 −0 deps/v8/test/mjsunit/regress/regress-calls-with-migrating-prototypes.js
View
@@ -117,4 +117,12 @@ FOR_EACH_CALLBACK_TABLE_MAPPING_2_VOID_RETURN(WRITE_CALL_2_VOID)
#undef WRITE_CALL_2_VOID
+double ClobberDoubleRegisters(double x1, double x2, double x3, double x4) {
+ // TODO(ulan): This clobbers only subset of registers depending on compiler,
+ // Rewrite this in assembly to really clobber all registers.
+ // GCC for ia32 uses the FPU and does not touch XMM registers.
+ return x1 * 1.01 + x2 * 2.02 + x3 * 3.03 + x4 * 4.04;
+}
+
+
} } // namespace v8::internal
View
@@ -289,12 +289,23 @@ class FunctionCallbackArguments
};
+double ClobberDoubleRegisters(double x1, double x2, double x3, double x4);
+
+
+#ifdef DEBUG
+#define CLOBBER_DOUBLE_REGISTERS() ClobberDoubleRegisters(1, 2, 3, 4);
+#else
+#define CLOBBER_DOUBLE_REGISTERS()
+#endif
+
+
#define DECLARE_RUNTIME_FUNCTION(Type, Name) \
Type Name(int args_length, Object** args_object, Isolate* isolate)
#define RUNTIME_FUNCTION(Type, Name) \
static Type __RT_impl_##Name(Arguments args, Isolate* isolate); \
Type Name(int args_length, Object** args_object, Isolate* isolate) { \
+ CLOBBER_DOUBLE_REGISTERS(); \
Arguments args(args_length, args_object); \
return __RT_impl_##Name(args, isolate); \
} \
@@ -859,7 +859,8 @@ void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
}
-void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
+static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
+ SaveFPRegsMode save_doubles) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -868,7 +869,7 @@ void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
// registers.
__ stm(db_w, sp, kJSCallerSaved | kCalleeSaved);
// Pass the function and deoptimization type to the runtime system.
- __ CallRuntime(Runtime::kNotifyStubFailure, 0);
+ __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
__ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved);
}
@@ -877,6 +878,16 @@ void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
}
+void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
+ Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
+}
+
+
+void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
+ Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
+}
+
+
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
{
@@ -127,6 +127,11 @@ bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
}
+Code* Deoptimizer::NotifyStubFailureBuiltin() {
+ return isolate_->builtins()->builtin(Builtins::kNotifyStubFailureSaveDoubles);
+}
+
+
#define __ masm()->
// This code tries to be close to ia32 code so that any changes can be
@@ -98,6 +98,38 @@ void LCodeGen::Abort(BailoutReason reason) {
}
+void LCodeGen::SaveCallerDoubles() {
+ ASSERT(info()->saves_caller_doubles());
+ ASSERT(NeedsEagerFrame());
+ Comment(";;; Save clobbered callee double registers");
+ int count = 0;
+ BitVector* doubles = chunk()->allocated_double_registers();
+ BitVector::Iterator save_iterator(doubles);
+ while (!save_iterator.Done()) {
+ __ vstr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
+ MemOperand(sp, count * kDoubleSize));
+ save_iterator.Advance();
+ count++;
+ }
+}
+
+
+void LCodeGen::RestoreCallerDoubles() {
+ ASSERT(info()->saves_caller_doubles());
+ ASSERT(NeedsEagerFrame());
+ Comment(";;; Restore clobbered callee double registers");
+ BitVector* doubles = chunk()->allocated_double_registers();
+ BitVector::Iterator save_iterator(doubles);
+ int count = 0;
+ while (!save_iterator.Done()) {
+ __ vldr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
+ MemOperand(sp, count * kDoubleSize));
+ save_iterator.Advance();
+ count++;
+ }
+}
+
+
bool LCodeGen::GeneratePrologue() {
ASSERT(is_generating());
@@ -158,16 +190,7 @@ bool LCodeGen::GeneratePrologue() {
}
if (info()->saves_caller_doubles()) {
- Comment(";;; Save clobbered callee double registers");
- int count = 0;
- BitVector* doubles = chunk()->allocated_double_registers();
- BitVector::Iterator save_iterator(doubles);
- while (!save_iterator.Done()) {
- __ vstr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
- MemOperand(sp, count * kDoubleSize));
- save_iterator.Advance();
- count++;
- }
+ SaveCallerDoubles();
}
// Possibly allocate a local context.
@@ -313,6 +336,7 @@ bool LCodeGen::GenerateDeoptJumpTable() {
Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
}
if (deopt_jump_table_[i].needs_frame) {
+ ASSERT(!info()->saves_caller_doubles());
__ mov(ip, Operand(ExternalReference::ForDeoptEntry(entry)));
if (needs_frame.is_bound()) {
__ b(&needs_frame);
@@ -330,6 +354,10 @@ bool LCodeGen::GenerateDeoptJumpTable() {
__ mov(pc, ip);
}
} else {
+ if (info()->saves_caller_doubles()) {
+ ASSERT(info()->IsStub());
+ RestoreCallerDoubles();
+ }
__ mov(lr, Operand(pc), LeaveCC, al);
__ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry)));
}
@@ -783,7 +811,10 @@ void LCodeGen::DeoptimizeIf(Condition condition,
}
ASSERT(info()->IsStub() || frame_is_built_);
- if (condition == al && frame_is_built_) {
+ // Go through jump table if we need to handle condition, build frame, or
+ // restore caller doubles.
+ if (condition == al && frame_is_built_ &&
+ !info()->saves_caller_doubles()) {
__ Call(entry, RelocInfo::RUNTIME_ENTRY);
} else {
// We often have several deopts to the same entry, reuse the last
@@ -2853,16 +2884,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
__ CallRuntime(Runtime::kTraceExit, 1);
}
if (info()->saves_caller_doubles()) {
- ASSERT(NeedsEagerFrame());
- BitVector* doubles = chunk()->allocated_double_registers();
- BitVector::Iterator save_iterator(doubles);
- int count = 0;
- while (!save_iterator.Done()) {
- __ vldr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
- MemOperand(sp, count * kDoubleSize));
- save_iterator.Advance();
- count++;
- }
+ RestoreCallerDoubles();
}
int no_frame_start = -1;
if (NeedsEagerFrame()) {
@@ -3434,7 +3456,8 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
__ jmp(&receiver_ok);
__ bind(&global_object);
- __ ldr(receiver, GlobalObjectOperand());
+ __ ldr(receiver, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ ldr(receiver, ContextOperand(receiver, Context::GLOBAL_OBJECT_INDEX));
__ ldr(receiver,
FieldMemOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
__ bind(&receiver_ok);
@@ -186,6 +186,9 @@ class LCodeGen: public LCodeGenBase {
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
+ void SaveCallerDoubles();
+ void RestoreCallerDoubles();
+
// Code generation passes. Returns true if code generation should
// continue.
bool GeneratePrologue();
@@ -1045,8 +1045,10 @@ class MacroAssembler: public Assembler {
}
// Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId id, int num_arguments) {
- CallRuntime(Runtime::FunctionForId(id), num_arguments);
+ void CallRuntime(Runtime::FunctionId id,
+ int num_arguments,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
}
// Convenience function: call an external reference.
View
@@ -554,6 +554,7 @@ bool Call::ComputeTarget(Handle<Map> type, Handle<String> name) {
if (!type->prototype()->IsJSObject()) return false;
// Go up the prototype chain, recording where we are currently.
holder_ = Handle<JSObject>(JSObject::cast(type->prototype()));
+ JSObject::TryMigrateInstance(holder_);
type = Handle<Map>(holder()->map());
}
}
View
@@ -111,6 +111,8 @@ enum BuiltinExtraArguments {
V(NotifyLazyDeoptimized, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(NotifyStubFailure, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
+ V(NotifyStubFailureSaveDoubles, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
\
V(LoadIC_Miss, BUILTIN, UNINITIALIZED, \
@@ -400,6 +402,7 @@ class Builtins {
static void Generate_NotifySoftDeoptimized(MacroAssembler* masm);
static void Generate_NotifyLazyDeoptimized(MacroAssembler* masm);
static void Generate_NotifyStubFailure(MacroAssembler* masm);
+ static void Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm);
static void Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm);
static void Generate_FunctionCall(MacroAssembler* masm);
@@ -721,15 +721,23 @@ HValue* CodeStubGraphBuilderBase::BuildArraySingleArgumentConstructor(
HValue* CodeStubGraphBuilderBase::BuildArrayNArgumentsConstructor(
JSArrayBuilder* array_builder, ElementsKind kind) {
+ // Insert a bounds check because the number of arguments might exceed
+ // the kInitialMaxFastElementArray limit. This cannot happen for code
+ // that was parsed, but calling via Array.apply(thisArg, [...]) might
+ // trigger it.
+ HValue* length = GetArgumentsLength();
+ HConstant* max_alloc_length =
+ Add<HConstant>(JSObject::kInitialMaxFastElementArray);
+ HValue* checked_length = Add<HBoundsCheck>(length, max_alloc_length);
+
// We need to fill with the hole if it's a smi array in the multi-argument
// case because we might have to bail out while copying arguments into
// the array because they aren't compatible with a smi array.
// If it's a double array, no problem, and if it's fast then no
// problem either because doubles are boxed.
- HValue* length = GetArgumentsLength();
bool fill_with_hole = IsFastSmiElementsKind(kind);
- HValue* new_object = array_builder->AllocateArray(length,
- length,
+ HValue* new_object = array_builder->AllocateArray(checked_length,
+ checked_length,
fill_with_hole);
HValue* elements = array_builder->GetElementsLocation();
ASSERT(elements != NULL);
@@ -739,10 +747,10 @@ HValue* CodeStubGraphBuilderBase::BuildArrayNArgumentsConstructor(
context(),
LoopBuilder::kPostIncrement);
HValue* start = graph()->GetConstant0();
- HValue* key = builder.BeginBody(start, length, Token::LT);
+ HValue* key = builder.BeginBody(start, checked_length, Token::LT);
HInstruction* argument_elements = Add<HArgumentsElements>(false);
HInstruction* argument = Add<HAccessArgumentsAt>(
- argument_elements, length, key);
+ argument_elements, checked_length, key);
Add<HStoreKeyed>(elements, key, argument, kind);
builder.EndBody();
View
@@ -132,7 +132,7 @@ function TimeClip(time) {
// strings over and over again.
var Date_cache = {
// Cached time value.
- time: NAN,
+ time: 0,
// String input for which the cached time is valid.
string: null
};
@@ -1574,8 +1574,7 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
output_frame->SetPc(reinterpret_cast<intptr_t>(
trampoline->instruction_start()));
output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS));
- Code* notify_failure =
- isolate_->builtins()->builtin(Builtins::kNotifyStubFailure);
+ Code* notify_failure = NotifyStubFailureBuiltin();
output_frame->SetContinuation(
reinterpret_cast<intptr_t>(notify_failure->entry()));
}
@@ -412,6 +412,10 @@ class Deoptimizer : public Malloced {
// at the dynamic alignment state slot inside the frame.
bool HasAlignmentPadding(JSFunction* function);
+ // Select the version of NotifyStubFailure builtin that either saves or
+ // doesn't save the double registers depending on CPU features.
+ Code* NotifyStubFailureBuiltin();
+
Isolate* isolate_;
JSFunction* function_;
Code* compiled_code_;
@@ -601,7 +601,8 @@ void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
}
-void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
+static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
+ SaveFPRegsMode save_doubles) {
// Enter an internal frame.
{
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -610,7 +611,7 @@ void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
// stubs that tail call the runtime on deopts passing their parameters in
// registers.
__ pushad();
- __ CallRuntime(Runtime::kNotifyStubFailure, 0);
+ __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
__ popad();
// Tear down internal frame.
}
@@ -620,6 +621,21 @@ void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
}
+void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
+ Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
+}
+
+
+void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
+ if (Serializer::enabled()) {
+ PlatformFeatureScope sse2(SSE2);
+ Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
+ } else {
+ Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
+ }
+}
+
+
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
{
@@ -231,6 +231,13 @@ bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
}
+Code* Deoptimizer::NotifyStubFailureBuiltin() {
+ Builtins::Name name = CpuFeatures::IsSupported(SSE2) ?
+ Builtins::kNotifyStubFailureSaveDoubles : Builtins::kNotifyStubFailure;
+ return isolate_->builtins()->builtin(name);
+}
+
+
#define __ masm()->
void Deoptimizer::EntryGenerator::Generate() {
Oops, something went wrong.

0 comments on commit 25e66ce

Please sign in to comment.