Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP
Browse files

Version 3.18.2

OS::MemMove/OS::MemCopy: Don't call through to generated code when size == 0 to avoid prefetching invalid memory (Chromium issue 233500)

Removed heap snapshot size limit. (Chromium issue 232305)

Performance and stability improvements on all platforms.

git-svn-id: https://v8.googlecode.com/svn/trunk@14368 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
  • Loading branch information...
commit e69e1542b71a49e4194382f03e6754b1e6c13807 1 parent b522a29
ulan@chromium.org authored
Showing with 1,340 additions and 600 deletions.
  1. +1 −0  .gitignore
  2. +10 −0 ChangeLog
  3. +7 −0 src/accessors.cc
  4. +2 −0  src/accessors.h
  5. +4 −4 src/api.cc
  6. +29 −16 src/arm/code-stubs-arm.cc
  7. +2 −22 src/arm/code-stubs-arm.h
  8. +54 −0 src/arm/full-codegen-arm.cc
  9. +12 −2 src/arm/lithium-arm.cc
  10. +28 −17 src/arm/lithium-codegen-arm.cc
  11. +3 −2 src/arm/lithium-codegen-arm.h
  12. +18 −17 src/arm/macro-assembler-arm.cc
  13. +7 −0 src/arm/macro-assembler-arm.h
  14. +5 −5 src/arm/simulator-arm.cc
  15. +8 −5 src/assembler.cc
  16. +3 −3 src/ast.cc
  17. +13 −6 src/ast.h
  18. +1 −1  src/bignum-dtoa.cc
  19. +1 −1  src/cached-powers.cc
  20. +80 −12 src/code-stubs-hydrogen.cc
  21. +11 −1 src/code-stubs.cc
  22. +76 −37 src/code-stubs.h
  23. +10 −0 src/compiler.h
  24. +3 −3 src/conversions-inl.h
  25. +6 −1 src/conversions.cc
  26. +1 −1  src/dtoa.cc
  27. +1 −1  src/fixed-dtoa.cc
  28. +0 −24 src/full-codegen.cc
  29. +50 −39 src/heap-inl.h
  30. +0 −36 src/heap-snapshot-generator.cc
  31. +0 −1  src/heap-snapshot-generator.h
  32. +1 −1  src/heap.cc
  33. +1 −1  src/heap.h
  34. +14 −8 src/hydrogen-instructions.cc
  35. +32 −30 src/hydrogen-instructions.h
  36. +17 −19 src/hydrogen.cc
  37. +0 −4 src/hydrogen.h
  38. +25 −0 src/ia32/code-stubs-ia32.cc
  39. +2 −0  src/ia32/codegen-ia32.cc
  40. +54 −0 src/ia32/full-codegen-ia32.cc
  41. +69 −44 src/ia32/lithium-codegen-ia32.cc
  42. +3 −2 src/ia32/lithium-codegen-ia32.h
  43. +37 −12 src/ia32/lithium-ia32.cc
  44. +0 −3  src/ia32/macro-assembler-ia32.cc
  45. +21 −10 src/ic.cc
  46. +1 −0  src/ic.h
  47. +1 −1  src/isolate.cc
  48. +1 −1  src/json-stringifier.h
  49. +7 −1 src/log.cc
  50. +24 −0 src/mips/code-stubs-mips.cc
  51. +54 −0 src/mips/full-codegen-mips.cc
  52. +8 −10 src/mips/lithium-codegen-mips.cc
  53. +3 −2 src/mips/lithium-codegen-mips.h
  54. +10 −3 src/mips/lithium-mips.cc
  55. +9 −6 src/mips/simulator-mips.cc
  56. +1 −1  src/objects-debug.cc
  57. +3 −3 src/objects-inl.h
  58. +34 −7 src/objects.cc
  59. +26 −2 src/objects.h
  60. +27 −9 src/parser.cc
  61. +1 −1  src/platform-cygwin.cc
  62. +1 −1  src/platform-freebsd.cc
  63. +1 −1  src/platform-linux.cc
  64. +1 −1  src/platform-macos.cc
  65. +1 −1  src/platform-openbsd.cc
  66. +2 −1  src/platform-posix.cc
  67. +3 −1 src/platform-solaris.cc
  68. +3 −2 src/platform-win32.cc
  69. +2 −0  src/platform.h
  70. +8 −4 src/preparser.cc
  71. +1 −1  src/profile-generator.cc
  72. +80 −10 src/runtime.cc
  73. +6 −0 src/runtime.h
  74. +10 −1 src/sampler.cc
  75. +7 −2 src/sampler.h
  76. +1 −1  src/strtod.cc
  77. +1 −1  src/version.cc
  78. +10 −9 src/win32-math.cc
  79. +5 −5 src/win32-math.h
  80. +25 −0 src/x64/code-stubs-x64.cc
  81. +54 −0 src/x64/full-codegen-x64.cc
  82. +72 −47 src/x64/lithium-codegen-x64.cc
  83. +3 −2 src/x64/lithium-codegen-x64.h
  84. +29 −6 src/x64/lithium-x64.cc
  85. +4 −4 test/cctest/test-api.cc
  86. +9 −9 test/cctest/test-conversions.cc
  87. +2 −0  test/cctest/test-cpu-profiler.cc
  88. +1 −1  test/cctest/test-heap.cc
  89. +2 −2 test/cctest/test-log-stack-tracer.cc
  90. +1 −1  test/cctest/test-log.cc
  91. +3 −0  test/cctest/test-profile-generator.cc
  92. +6 −0 test/mjsunit/harmony/generators-parsing.js
  93. +3 −0  test/mjsunit/mjsunit.status
  94. +3 −3 test/mjsunit/tools/tickprocessor-test-func-info.log
  95. +13 −13 test/mjsunit/tools/tickprocessor-test.log
  96. +13 −19 tools/gcmole/gcmole.lua
  97. +7 −7 tools/gyp/v8.gyp
  98. +15 −6 tools/tickprocessor.js
View
1  .gitignore
@@ -19,6 +19,7 @@
*~
.cpplint-cache
.d8_history
+bsuite
d8
d8_g
shell
View
10 ChangeLog
@@ -1,3 +1,13 @@
+2013-04-22: Version 3.18.2
+
+ OS::MemMove/OS::MemCopy: Don't call through to generated code when size
+ == 0 to avoid prefetching invalid memory (Chromium issue 233500)
+
+ Removed heap snapshot size limit. (Chromium issue 232305)
+
+ Performance and stability improvements on all platforms.
+
+
2013-04-18: Version 3.18.1
Removed SCons related files and deprecated test suite configurations.
View
7 src/accessors.cc
@@ -441,6 +441,13 @@ const AccessorDescriptor Accessors::ScriptEvalFromFunctionName = {
//
+Handle<Object> Accessors::FunctionGetPrototype(Handle<Object> object) {
+ Isolate* isolate = Isolate::Current();
+ CALL_HEAP_FUNCTION(
+ isolate, Accessors::FunctionGetPrototype(*object, 0), Object);
+}
+
+
MaybeObject* Accessors::FunctionGetPrototype(Object* object, void*) {
Isolate* isolate = Isolate::Current();
JSFunction* function = FindInstanceOf<JSFunction>(isolate, object);
View
2  src/accessors.h
@@ -79,6 +79,8 @@ class Accessors : public AllStatic {
// Accessor functions called directly from the runtime system.
MUST_USE_RESULT static MaybeObject* FunctionGetPrototype(Object* object,
void*);
+ static Handle<Object> FunctionGetPrototype(Handle<Object> object);
+
MUST_USE_RESULT static MaybeObject* FunctionSetPrototype(JSObject* object,
Object* value,
void*);
View
8 src/api.cc
@@ -27,8 +27,8 @@
#include "api.h"
-#include <math.h> // For isnan.
#include <string.h> // For memcpy, strlen.
+#include <cmath> // For isnan.
#include "../include/v8-debug.h"
#include "../include/v8-profiler.h"
#include "../include/v8-testing.h"
@@ -2984,7 +2984,7 @@ bool Value::StrictEquals(Handle<Value> that) const {
double x = obj->Number();
double y = other->Number();
// Must check explicitly for NaN:s on Windows, but -0 works fine.
- return x == y && !isnan(x) && !isnan(y);
+ return x == y && !std::isnan(x) && !std::isnan(y);
} else if (*obj == *other) { // Also covers Booleans.
return true;
} else if (obj->IsSmi()) {
@@ -5568,7 +5568,7 @@ Local<v8::Value> v8::Date::New(double time) {
i::Isolate* isolate = i::Isolate::Current();
EnsureInitializedForIsolate(isolate, "v8::Date::New()");
LOG_API(isolate, "Date::New");
- if (isnan(time)) {
+ if (std::isnan(time)) {
// Introduce only canonical NaN value into the VM, to avoid signaling NaNs.
time = i::OS::nan_value();
}
@@ -5772,7 +5772,7 @@ Local<Symbol> v8::Symbol::New(Isolate* isolate, const char* data, int length) {
Local<Number> v8::Number::New(double value) {
i::Isolate* isolate = i::Isolate::Current();
EnsureInitializedForIsolate(isolate, "v8::Number::New()");
- if (isnan(value)) {
+ if (std::isnan(value)) {
// Introduce only canonical NaN value into the VM, to avoid signaling NaNs.
value = i::OS::nan_value();
}
View
45 src/arm/code-stubs-arm.cc
@@ -161,6 +161,30 @@ static void EmitCheckForHeapNumber(MacroAssembler* masm, Register operand,
}
+void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
+ // Update the static counter each time a new code stub is generated.
+ Isolate* isolate = masm->isolate();
+ isolate->counters()->code_stubs()->Increment();
+
+ CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(isolate);
+ int param_count = descriptor->register_param_count_;
+ {
+ // Call the runtime system in a fresh internal frame.
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ ASSERT(descriptor->register_param_count_ == 0 ||
+ r0.is(descriptor->register_params_[param_count - 1]));
+ // Push arguments
+ for (int i = 0; i < param_count; ++i) {
+ __ push(descriptor->register_params_[i]);
+ }
+ ExternalReference miss = descriptor->miss_handler_;
+ __ CallExternalReference(miss, descriptor->register_param_count_);
+ }
+
+ __ Ret();
+}
+
+
void ToNumberStub::Generate(MacroAssembler* masm) {
// The ToNumber stub takes one argument in eax.
Label check_heap_number, call_builtin;
@@ -1627,14 +1651,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
const Register scratch = r1;
if (save_doubles_ == kSaveFPRegs) {
- // Check CPU flags for number of registers, setting the Z condition flag.
- __ CheckFor32DRegs(scratch);
-
- __ sub(sp, sp, Operand(kDoubleSize * DwVfpRegister::kMaxNumRegisters));
- for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; i++) {
- DwVfpRegister reg = DwVfpRegister::from_code(i);
- __ vstr(reg, MemOperand(sp, i * kDoubleSize), i < 16 ? al : ne);
- }
+ __ SaveFPRegs(sp, scratch);
}
const int argument_count = 1;
const int fp_argument_count = 0;
@@ -1646,14 +1663,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
ExternalReference::store_buffer_overflow_function(masm->isolate()),
argument_count);
if (save_doubles_ == kSaveFPRegs) {
- // Check CPU flags for number of registers, setting the Z condition flag.
- __ CheckFor32DRegs(scratch);
-
- for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; i++) {
- DwVfpRegister reg = DwVfpRegister::from_code(i);
- __ vldr(reg, MemOperand(sp, i * kDoubleSize), i < 16 ? al : ne);
- }
- __ add(sp, sp, Operand(kDoubleSize * DwVfpRegister::kMaxNumRegisters));
+ __ RestoreFPRegs(sp, scratch);
}
__ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0).
}
@@ -7170,6 +7180,9 @@ void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
Isolate* isolate) {
StoreBufferOverflowStub stub1(kDontSaveFPRegs);
stub1.GetCode(isolate)->set_is_pregenerated(true);
+ // Hydrogen code stubs need stub2 at snapshot time.
+ StoreBufferOverflowStub stub2(kSaveFPRegs);
+ stub2.GetCode(isolate)->set_is_pregenerated(true);
}
View
24 src/arm/code-stubs-arm.h
@@ -469,34 +469,14 @@ class RecordWriteStub: public PlatformCodeStub {
void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
masm->stm(db_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit());
if (mode == kSaveFPRegs) {
- // Number of d-regs not known at snapshot time.
- ASSERT(!Serializer::enabled());
- masm->sub(sp,
- sp,
- Operand(kDoubleSize * (DwVfpRegister::NumRegisters() - 1)));
- // Save all VFP registers except d0.
- // TODO(hans): We should probably save d0 too. And maybe use vstm.
- for (int i = DwVfpRegister::NumRegisters() - 1; i > 0; i--) {
- DwVfpRegister reg = DwVfpRegister::from_code(i);
- masm->vstr(reg, MemOperand(sp, (i - 1) * kDoubleSize));
- }
+ masm->SaveFPRegs(sp, scratch0_);
}
}
inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
SaveFPRegsMode mode) {
if (mode == kSaveFPRegs) {
- // Number of d-regs not known at snapshot time.
- ASSERT(!Serializer::enabled());
- // Restore all VFP registers except d0.
- // TODO(hans): We should probably restore d0 too. And maybe use vldm.
- for (int i = DwVfpRegister::NumRegisters() - 1; i > 0; i--) {
- DwVfpRegister reg = DwVfpRegister::from_code(i);
- masm->vldr(reg, MemOperand(sp, (i - 1) * kDoubleSize));
- }
- masm->add(sp,
- sp,
- Operand(kDoubleSize * (DwVfpRegister::NumRegisters() - 1)));
+ masm->RestoreFPRegs(sp, scratch0_);
}
masm->ldm(ia_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit());
}
View
54 src/arm/full-codegen-arm.cc
@@ -1922,6 +1922,60 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
+void FullCodeGenerator::VisitYield(Yield* expr) {
+ Comment cmnt(masm_, "[ Yield");
+ // Evaluate yielded value first; the initial iterator definition depends on
+ // this. It stays on the stack while we update the iterator.
+ VisitForStackValue(expr->expression());
+
+ switch (expr->yield_kind()) {
+ case Yield::INITIAL:
+ case Yield::SUSPEND: {
+ VisitForStackValue(expr->generator_object());
+ __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ ldr(context_register(),
+ MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+ Label resume;
+ __ CompareRoot(result_register(), Heap::kTheHoleValueRootIndex);
+ __ b(ne, &resume);
+ __ pop(result_register());
+ if (expr->yield_kind() == Yield::SUSPEND) {
+ // TODO(wingo): Box into { value: VALUE, done: false }.
+ }
+ EmitReturnSequence();
+
+ __ bind(&resume);
+ context()->Plug(result_register());
+ break;
+ }
+
+ case Yield::FINAL: {
+ VisitForAccumulatorValue(expr->generator_object());
+ __ mov(r1, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorClosed)));
+ __ str(r1, FieldMemOperand(result_register(),
+ JSGeneratorObject::kContinuationOffset));
+ __ pop(result_register());
+ // TODO(wingo): Box into { value: VALUE, done: true }.
+
+ // Exit all nested statements.
+ NestedStatement* current = nesting_stack_;
+ int stack_depth = 0;
+ int context_length = 0;
+ while (current != NULL) {
+ current = current->Exit(&stack_depth, &context_length);
+ }
+ __ Drop(stack_depth);
+ EmitReturnSequence();
+ break;
+ }
+
+ case Yield::DELEGATING:
+ UNIMPLEMENTED();
+ }
+}
+
+
void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
View
14 src/arm/lithium-arm.cc
@@ -989,12 +989,14 @@ LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* instr) {
+ info()->MarkAsRequiresFrame();
LOperand* value = UseRegister(instr->value());
return DefineAsRegister(new(zone()) LArgumentsLength(value));
}
LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
+ info()->MarkAsRequiresFrame();
return DefineAsRegister(new(zone()) LArgumentsElements);
}
@@ -2456,9 +2458,17 @@ LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
+ info()->MarkAsRequiresFrame();
LOperand* args = UseRegister(instr->arguments());
- LOperand* length = UseTempRegister(instr->length());
- LOperand* index = UseRegister(instr->index());
+ LOperand* length;
+ LOperand* index;
+ if (instr->length()->IsConstant() && instr->index()->IsConstant()) {
+ length = UseRegisterOrConstant(instr->length());
+ index = UseOrConstant(instr->index());
+ } else {
+ length = UseTempRegister(instr->length());
+ index = Use(instr->index());
+ }
return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index));
}
View
45 src/arm/lithium-codegen-arm.cc
@@ -238,7 +238,12 @@ bool LCodeGen::GeneratePrologue() {
__ str(r0, target);
// Update the write barrier. This clobbers r3 and r0.
__ RecordWriteContextSlot(
- cp, target.offset(), r0, r3, GetLinkRegisterState(), kSaveFPRegs);
+ cp,
+ target.offset(),
+ r0,
+ r3,
+ GetLinkRegisterState(),
+ kSaveFPRegs);
}
}
Comment(";;; End allocate local context");
@@ -2170,17 +2175,16 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
}
-int LCodeGen::GetNextEmittedBlock(int block) {
- for (int i = block + 1; i < graph()->blocks()->length(); ++i) {
- LLabel* label = chunk_->GetLabel(i);
- if (!label->HasReplacement()) return i;
+int LCodeGen::GetNextEmittedBlock() {
+ for (int i = current_block_ + 1; i < graph()->blocks()->length(); ++i) {
+ if (!chunk_->GetLabel(i)->HasReplacement()) return i;
}
return -1;
}
void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
- int next_block = GetNextEmittedBlock(current_block_);
+ int next_block = GetNextEmittedBlock();
right_block = chunk_->LookupDestination(right_block);
left_block = chunk_->LookupDestination(left_block);
@@ -2317,10 +2321,9 @@ void LCodeGen::DoBranch(LBranch* instr) {
void LCodeGen::EmitGoto(int block) {
- block = chunk_->LookupDestination(block);
- int next_block = GetNextEmittedBlock(current_block_);
- if (block != next_block) {
- __ jmp(chunk_->GetAssemblyLabel(block));
+ int destination = chunk_->LookupDestination(block);
+ if (destination != GetNextEmittedBlock()) {
+ __ jmp(chunk_->GetAssemblyLabel(destination));
}
}
@@ -3274,14 +3277,22 @@ void LCodeGen::DoLoadExternalArrayPointer(
void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
Register arguments = ToRegister(instr->arguments());
- Register length = ToRegister(instr->length());
- Register index = ToRegister(instr->index());
Register result = ToRegister(instr->result());
- // There are two words between the frame pointer and the last argument.
- // Subtracting from length accounts for one of them add one more.
- __ sub(length, length, index);
- __ add(length, length, Operand(1));
- __ ldr(result, MemOperand(arguments, length, LSL, kPointerSizeLog2));
+ if (instr->length()->IsConstantOperand() &&
+ instr->index()->IsConstantOperand()) {
+ int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+ int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
+ int index = (const_length - const_index) + 1;
+ __ ldr(result, MemOperand(arguments, index * kPointerSize));
+ } else {
+ Register length = ToRegister(instr->length());
+ Register index = ToRegister(instr->index());
+ // There are two words between the frame pointer and the last argument.
+ // Subtracting from length accounts for one of them add one more.
+ __ sub(length, length, index);
+ __ add(length, length, Operand(1));
+ __ ldr(result, MemOperand(arguments, length, LSL, kPointerSizeLog2));
+ }
}
View
5 src/arm/lithium-codegen-arm.h
@@ -83,7 +83,8 @@ class LCodeGen BASE_EMBEDDED {
bool NeedsEagerFrame() const {
return GetStackSlotCount() > 0 ||
info()->is_non_deferred_calling() ||
- !info()->IsStub();
+ !info()->IsStub() ||
+ info()->requires_frame();
}
bool NeedsDeferredFrame() const {
return !NeedsEagerFrame() && info()->is_deferred_calling();
@@ -200,7 +201,7 @@ class LCodeGen BASE_EMBEDDED {
Register scratch0() { return r9; }
DwVfpRegister double_scratch0() { return kScratchDoubleReg; }
- int GetNextEmittedBlock(int block);
+ int GetNextEmittedBlock();
LInstruction* GetNextInstruction();
void EmitClassOfTest(Label* if_true,
View
35 src/arm/macro-assembler-arm.cc
@@ -837,14 +837,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
// Optionally save all double registers.
if (save_doubles) {
- // Check CPU flags for number of registers, setting the Z condition flag.
- CheckFor32DRegs(ip);
-
- // Push registers d0-d15, and possibly d16-d31, on the stack.
- // If d16-d31 are not pushed, decrease the stack pointer instead.
- vstm(db_w, sp, d16, d31, ne);
- sub(sp, sp, Operand(16 * kDoubleSize), LeaveCC, eq);
- vstm(db_w, sp, d0, d15);
+ SaveFPRegs(sp, ip);
// Note that d0 will be accessible at
// fp - 2 * kPointerSize - DwVfpRegister::kMaxNumRegisters * kDoubleSize,
// since the sp slot and code slot were pushed after the fp.
@@ -905,15 +898,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles,
const int offset = 2 * kPointerSize;
sub(r3, fp,
Operand(offset + DwVfpRegister::kMaxNumRegisters * kDoubleSize));
-
- // Check CPU flags for number of registers, setting the Z condition flag.
- CheckFor32DRegs(ip);
-
- // Pop registers d0-d15, and possibly d16-d31, from r3.
- // If d16-d31 are not popped, increase r3 instead.
- vldm(ia_w, r3, d0, d15);
- vldm(ia_w, r3, d16, d31, ne);
- add(r3, r3, Operand(16 * kDoubleSize), LeaveCC, eq);
+ RestoreFPRegs(r3, ip);
}
// Clear top frame.
@@ -3183,6 +3168,22 @@ void MacroAssembler::CheckFor32DRegs(Register scratch) {
}
+void MacroAssembler::SaveFPRegs(Register location, Register scratch) {
+ CheckFor32DRegs(scratch);
+ vstm(db_w, location, d16, d31, ne);
+ sub(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
+ vstm(db_w, location, d0, d15);
+}
+
+
+void MacroAssembler::RestoreFPRegs(Register location, Register scratch) {
+ CheckFor32DRegs(scratch);
+ vldm(ia_w, location, d0, d15);
+ vldm(ia_w, location, d16, d31, ne);
+ add(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
+}
+
+
void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
Register first,
Register second,
View
7 src/arm/macro-assembler-arm.h
@@ -997,6 +997,13 @@ class MacroAssembler: public Assembler {
// Z condition flag: Z==0 if d16-d31 available, Z==1 otherwise.
void CheckFor32DRegs(Register scratch);
+ // Does a runtime check for 16/32 FP registers. Either way, pushes 32 double
+ // values to location, saving [d0..(d15|d31)].
+ void SaveFPRegs(Register location, Register scratch);
+
+ // Does a runtime check for 16/32 FP registers. Either way, pops 32 double
+ // values to location, restoring [d0..(d15|d31)].
+ void RestoreFPRegs(Register location, Register scratch);
// ---------------------------------------------------------------------------
// Runtime calls
View
10 src/arm/simulator-arm.cc
@@ -26,7 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <stdlib.h>
-#include <math.h>
+#include <cmath>
#include <cstdarg>
#include "v8.h"
@@ -331,7 +331,7 @@ void ArmDebugger::Debug() {
PrintF("\n");
}
}
- for (int i = 0; i < kNumVFPDoubleRegisters; i++) {
+ for (int i = 0; i < DwVfpRegister::NumRegisters(); i++) {
dvalue = GetVFPDoubleRegisterValue(i);
uint64_t as_words = BitCast<uint64_t>(dvalue);
PrintF("%3s: %f 0x%08x %08x\n",
@@ -1297,7 +1297,7 @@ bool Simulator::OverflowFrom(int32_t alu_out,
// Support for VFP comparisons.
void Simulator::Compute_FPSCR_Flags(double val1, double val2) {
- if (isnan(val1) || isnan(val2)) {
+ if (std::isnan(val1) || std::isnan(val2)) {
n_flag_FPSCR_ = false;
z_flag_FPSCR_ = false;
c_flag_FPSCR_ = true;
@@ -1866,7 +1866,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
double Simulator::canonicalizeNaN(double value) {
- return (FPSCR_default_NaN_mode_ && isnan(value)) ?
+ return (FPSCR_default_NaN_mode_ && std::isnan(value)) ?
FixedDoubleArray::canonical_not_the_hole_nan_as_double() : value;
}
@@ -2947,7 +2947,7 @@ void Simulator::DecodeVCMP(Instruction* instr) {
// Raise exceptions for quiet NaNs if necessary.
if (instr->Bit(7) == 1) {
- if (isnan(dd_value)) {
+ if (std::isnan(dd_value)) {
inv_op_vfp_flag_ = true;
}
}
View
13 src/assembler.cc
@@ -34,7 +34,7 @@
#include "assembler.h"
-#include <math.h> // For cos, log, pow, sin, tan, etc.
+#include <cmath>
#include "api.h"
#include "builtins.h"
#include "counters.h"
@@ -1459,10 +1459,11 @@ double power_helper(double x, double y) {
return power_double_int(x, y_int); // Returns 1 if exponent is 0.
}
if (y == 0.5) {
- return (isinf(x)) ? V8_INFINITY : fast_sqrt(x + 0.0); // Convert -0 to +0.
+ return (std::isinf(x)) ? V8_INFINITY
+ : fast_sqrt(x + 0.0); // Convert -0 to +0.
}
if (y == -0.5) {
- return (isinf(x)) ? 0 : 1.0 / fast_sqrt(x + 0.0); // Convert -0 to +0.
+ return (std::isinf(x)) ? 0 : 1.0 / fast_sqrt(x + 0.0); // Convert -0 to +0.
}
return power_double_double(x, y);
}
@@ -1492,7 +1493,7 @@ double power_double_double(double x, double y) {
(!defined(__MINGW64_VERSION_RC) || __MINGW64_VERSION_RC < 1)
// MinGW64 has a custom implementation for pow. This handles certain
// special cases that are different.
- if ((x == 0.0 || isinf(x)) && isfinite(y)) {
+ if ((x == 0.0 || std::isinf(x)) && std::isfinite(y)) {
double f;
if (modf(y, &f) != 0.0) return ((x == 0.0) ^ (y > 0)) ? V8_INFINITY : 0;
}
@@ -1505,7 +1506,9 @@ double power_double_double(double x, double y) {
// The checks for special cases can be dropped in ia32 because it has already
// been done in generated code before bailing out here.
- if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) return OS::nan_value();
+ if (std::isnan(y) || ((x == 1 || x == -1) && std::isinf(y))) {
+ return OS::nan_value();
+ }
return pow(x, y);
}
View
6 src/ast.cc
@@ -27,7 +27,7 @@
#include "ast.h"
-#include <math.h> // For isfinite.
+#include <cmath> // For isfinite.
#include "builtins.h"
#include "code-stubs.h"
#include "conversions.h"
@@ -241,8 +241,8 @@ bool IsEqualNumber(void* first, void* second) {
if (h2->IsSmi()) return false;
Handle<HeapNumber> n1 = Handle<HeapNumber>::cast(h1);
Handle<HeapNumber> n2 = Handle<HeapNumber>::cast(h2);
- ASSERT(isfinite(n1->value()));
- ASSERT(isfinite(n2->value()));
+ ASSERT(std::isfinite(n1->value()));
+ ASSERT(std::isfinite(n2->value()));
return n1->value() == n2->value();
}
View
19 src/ast.h
@@ -1964,27 +1964,34 @@ class Yield: public Expression {
public:
DECLARE_NODE_TYPE(Yield)
+ enum Kind {
+ INITIAL, // The initial yield that returns the unboxed generator object.
+ SUSPEND, // A normal yield: { value: EXPRESSION, done: false }
+ DELEGATING, // A yield*.
+ FINAL // A return: { value: EXPRESSION, done: true }
+ };
+
Expression* generator_object() const { return generator_object_; }
Expression* expression() const { return expression_; }
- bool is_delegating_yield() const { return is_delegating_yield_; }
+ Kind yield_kind() const { return yield_kind_; }
virtual int position() const { return pos_; }
protected:
Yield(Isolate* isolate,
Expression* generator_object,
Expression* expression,
- bool is_delegating_yield,
+ Kind yield_kind,
int pos)
: Expression(isolate),
generator_object_(generator_object),
expression_(expression),
- is_delegating_yield_(is_delegating_yield),
+ yield_kind_(yield_kind),
pos_(pos) { }
private:
Expression* generator_object_;
Expression* expression_;
- bool is_delegating_yield_;
+ Kind yield_kind_;
int pos_;
};
@@ -2966,10 +2973,10 @@ class AstNodeFactory BASE_EMBEDDED {
Yield* NewYield(Expression *generator_object,
Expression* expression,
- bool is_delegating_yield,
+ Yield::Kind yield_kind,
int pos) {
Yield* yield = new(zone_) Yield(
- isolate_, generator_object, expression, is_delegating_yield, pos);
+ isolate_, generator_object, expression, yield_kind, pos);
VISIT_AND_RETURN(Yield, yield)
}
View
2  src/bignum-dtoa.cc
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include <math.h>
+#include <cmath>
#include "../include/v8stdint.h"
#include "checks.h"
View
2  src/cached-powers.cc
@@ -26,8 +26,8 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <stdarg.h>
-#include <math.h>
#include <limits.h>
+#include <cmath>
#include "../include/v8stdint.h"
#include "globals.h"
View
92 src/code-stubs-hydrogen.cc
@@ -61,11 +61,7 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
arguments_length_(NULL),
info_(stub, isolate),
context_(NULL) {
- int major_key = stub->MajorKey();
- descriptor_ = isolate->code_stub_interface_descriptor(major_key);
- if (descriptor_->register_param_count_ < 0) {
- stub->InitializeInterfaceDescriptor(isolate, descriptor_);
- }
+ descriptor_ = stub->GetInterfaceDescriptor(isolate);
parameters_.Reset(new HParameter*[descriptor_->register_param_count_]);
}
virtual bool BuildGraph();
@@ -96,6 +92,9 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
bool CodeStubGraphBuilderBase::BuildGraph() {
+ // Update the static counter each time a new code stub is generated.
+ isolate()->counters()->code_stubs()->Increment();
+
if (FLAG_trace_hydrogen) {
const char* name = CodeStub::MajorName(stub()->MajorKey(), false);
PrintF("-----------------------------------------------------------\n");
@@ -176,16 +175,87 @@ class CodeStubGraphBuilder: public CodeStubGraphBuilderBase {
: CodeStubGraphBuilderBase(Isolate::Current(), stub) {}
protected:
- virtual HValue* BuildCodeStub();
+ virtual HValue* BuildCodeStub() {
+ if (casted_stub()->IsMiss()) {
+ return BuildCodeInitializedStub();
+ } else {
+ return BuildCodeUninitializedStub();
+ }
+ }
+
+ virtual HValue* BuildCodeInitializedStub() {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+
+ virtual HValue* BuildCodeUninitializedStub() {
+ // Force a deopt that falls back to the runtime.
+ HValue* undefined = graph()->GetConstantUndefined();
+ CheckBuilder builder(this);
+ builder.CheckNotUndefined(undefined);
+ builder.End();
+ return undefined;
+ }
+
Stub* casted_stub() { return static_cast<Stub*>(stub()); }
};
+Handle<Code> HydrogenCodeStub::GenerateLightweightMissCode(Isolate* isolate) {
+ Factory* factory = isolate->factory();
+
+ // Generate the new code.
+ MacroAssembler masm(isolate, NULL, 256);
+
+ {
+ // Update the static counter each time a new code stub is generated.
+ isolate->counters()->code_stubs()->Increment();
+
+ // Nested stubs are not allowed for leaves.
+ AllowStubCallsScope allow_scope(&masm, false);
+
+ // Generate the code for the stub.
+ masm.set_generating_stub(true);
+ NoCurrentFrameScope scope(&masm);
+ GenerateLightweightMiss(&masm);
+ }
+
+ // Create the code object.
+ CodeDesc desc;
+ masm.GetCode(&desc);
+
+ // Copy the generated code into a heap object.
+ Code::Flags flags = Code::ComputeFlags(
+ GetCodeKind(),
+ GetICState(),
+ GetExtraICState(),
+ GetStubType(), -1);
+ Handle<Code> new_object = factory->NewCode(
+ desc, flags, masm.CodeObject(), NeedsImmovableCode());
+ return new_object;
+}
+
+
template <class Stub>
static Handle<Code> DoGenerateCode(Stub* stub) {
- CodeStubGraphBuilder<Stub> builder(stub);
- LChunk* chunk = OptimizeGraph(builder.CreateGraph());
- return chunk->Codegen();
+ Isolate* isolate = Isolate::Current();
+ CodeStub::Major major_key =
+ static_cast<HydrogenCodeStub*>(stub)->MajorKey();
+ CodeStubInterfaceDescriptor* descriptor =
+ isolate->code_stub_interface_descriptor(major_key);
+ if (descriptor->register_param_count_ < 0) {
+ stub->InitializeInterfaceDescriptor(isolate, descriptor);
+ }
+ // The miss case without stack parameters can use a light-weight stub to enter
+ // the runtime that is significantly faster than using the standard
+ // stub-failure deopt mechanism.
+ if (stub->IsMiss() && descriptor->stack_parameter_count_ == NULL) {
+ return stub->GenerateLightweightMissCode(isolate);
+ } else {
+ CodeStubGraphBuilder<Stub> builder(stub);
+ LChunk* chunk = OptimizeGraph(builder.CreateGraph());
+ return chunk->Codegen();
+ }
}
@@ -248,9 +318,7 @@ HValue* CodeStubGraphBuilder<FastCloneShallowArrayStub>::BuildCodeStub() {
Handle<Code> FastCloneShallowArrayStub::GenerateCode() {
- CodeStubGraphBuilder<FastCloneShallowArrayStub> builder(this);
- LChunk* chunk = OptimizeGraph(builder.CreateGraph());
- return chunk->Codegen();
+ return DoGenerateCode(this);
}
View
12 src/code-stubs.cc
@@ -37,6 +37,16 @@
namespace v8 {
namespace internal {
+
+CodeStubInterfaceDescriptor::CodeStubInterfaceDescriptor()
+ : register_param_count_(-1),
+ stack_parameter_count_(NULL),
+ function_mode_(NOT_JS_FUNCTION_STUB_MODE),
+ register_params_(NULL),
+ deoptimization_handler_(NULL),
+ miss_handler_(IC_Utility(IC::kUnreachable), Isolate::Current()) { }
+
+
bool CodeStub::FindCodeInCache(Code** code_out, Isolate* isolate) {
UnseededNumberDictionary* stubs = isolate->heap()->code_stubs();
int index = stubs->FindEntry(GetKey());
@@ -557,7 +567,7 @@ bool ToBooleanStub::Types::Record(Handle<Object> object) {
ASSERT(!object->IsUndetectableObject());
Add(HEAP_NUMBER);
double value = HeapNumber::cast(*object)->value();
- return value != 0 && !isnan(value);
+ return value != 0 && !std::isnan(value);
} else {
// We should never see an internal object at runtime here!
UNREACHABLE();
View
113 src/code-stubs.h
@@ -29,6 +29,7 @@
#define V8_CODE_STUBS_H_
#include "allocation.h"
+#include "assembler.h"
#include "globals.h"
#include "codegen.h"
@@ -260,17 +261,15 @@ class PlatformCodeStub : public CodeStub {
enum StubFunctionMode { NOT_JS_FUNCTION_STUB_MODE, JS_FUNCTION_STUB_MODE };
+
struct CodeStubInterfaceDescriptor {
- CodeStubInterfaceDescriptor()
- : register_param_count_(-1),
- stack_parameter_count_(NULL),
- function_mode_(NOT_JS_FUNCTION_STUB_MODE),
- register_params_(NULL) { }
+ CodeStubInterfaceDescriptor();
int register_param_count_;
const Register* stack_parameter_count_;
StubFunctionMode function_mode_;
Register* register_params_;
Address deoptimization_handler_;
+ ExternalReference miss_handler_;
int environment_length() const {
if (stack_parameter_count_ != NULL) {
@@ -283,8 +282,14 @@ struct CodeStubInterfaceDescriptor {
class HydrogenCodeStub : public CodeStub {
public:
- // Retrieve the code for the stub. Generate the code if needed.
- virtual Handle<Code> GenerateCode() = 0;
+ enum InitializationState {
+ CODE_STUB_IS_NOT_MISS,
+ CODE_STUB_IS_MISS
+ };
+
+ explicit HydrogenCodeStub(InitializationState state) {
+ is_miss_ = (state == CODE_STUB_IS_MISS);
+ }
virtual Code::Kind GetCodeKind() const { return Code::STUB; }
@@ -292,9 +297,36 @@ class HydrogenCodeStub : public CodeStub {
return isolate->code_stub_interface_descriptor(MajorKey());
}
+ bool IsMiss() { return is_miss_; }
+
+ template<class SubClass>
+ static Handle<Code> GetUninitialized(Isolate* isolate) {
+ SubClass::GenerateAheadOfTime(isolate);
+ return SubClass().GetCode(isolate);
+ }
+
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) = 0;
+
+ // Retrieve the code for the stub. Generate the code if needed.
+ virtual Handle<Code> GenerateCode() = 0;
+
+ virtual int NotMissMinorKey() = 0;
+
+ Handle<Code> GenerateLightweightMissCode(Isolate* isolate);
+
+ private:
+ class MinorKeyBits: public BitField<int, 0, kStubMinorKeyBits - 1> {};
+ class IsMissBits: public BitField<bool, kStubMinorKeyBits - 1, 1> {};
+
+ void GenerateLightweightMiss(MacroAssembler* masm);
+ virtual int MinorKey() {
+ return IsMissBits::encode(is_miss_) |
+ MinorKeyBits::encode(NotMissMinorKey());
+ }
+
+ bool is_miss_;
};
@@ -467,7 +499,8 @@ class FastCloneShallowArrayStub : public HydrogenCodeStub {
FastCloneShallowArrayStub(Mode mode,
AllocationSiteMode allocation_site_mode,
int length)
- : mode_(mode),
+ : HydrogenCodeStub(CODE_STUB_IS_NOT_MISS),
+ mode_(mode),
allocation_site_mode_(allocation_site_mode),
length_((mode == COPY_ON_WRITE_ELEMENTS) ? 0 : length) {
ASSERT_GE(length_, 0);
@@ -513,7 +546,7 @@ class FastCloneShallowArrayStub : public HydrogenCodeStub {
STATIC_ASSERT(kFastCloneModeCount < 16);
STATIC_ASSERT(kMaximumClonedLength < 16);
Major MajorKey() { return FastCloneShallowArray; }
- int MinorKey() {
+ int NotMissMinorKey() {
return AllocationSiteModeBits::encode(allocation_site_mode_)
| ModeBits::encode(mode_)
| LengthBits::encode(length_);
@@ -526,7 +559,9 @@ class FastCloneShallowObjectStub : public HydrogenCodeStub {
// Maximum number of properties in copied object.
static const int kMaximumClonedProperties = 6;
- explicit FastCloneShallowObjectStub(int length) : length_(length) {
+ explicit FastCloneShallowObjectStub(int length)
+ : HydrogenCodeStub(CODE_STUB_IS_NOT_MISS),
+ length_(length) {
ASSERT_GE(length_, 0);
ASSERT_LE(length_, kMaximumClonedProperties);
}
@@ -543,7 +578,7 @@ class FastCloneShallowObjectStub : public HydrogenCodeStub {
int length_;
Major MajorKey() { return FastCloneShallowObject; }
- int MinorKey() { return length_; }
+ int NotMissMinorKey() { return length_; }
DISALLOW_COPY_AND_ASSIGN(FastCloneShallowObjectStub);
};
@@ -1291,19 +1326,20 @@ class KeyedLoadDictionaryElementStub : public PlatformCodeStub {
public:
KeyedLoadDictionaryElementStub() {}
- Major MajorKey() { return KeyedLoadElement; }
- int MinorKey() { return DICTIONARY_ELEMENTS; }
-
void Generate(MacroAssembler* masm);
private:
+ Major MajorKey() { return KeyedLoadElement; }
+ int MinorKey() { return DICTIONARY_ELEMENTS; }
+
DISALLOW_COPY_AND_ASSIGN(KeyedLoadDictionaryElementStub);
};
class KeyedLoadFastElementStub : public HydrogenCodeStub {
public:
- KeyedLoadFastElementStub(bool is_js_array, ElementsKind elements_kind) {
+ KeyedLoadFastElementStub(bool is_js_array, ElementsKind elements_kind)
+ : HydrogenCodeStub(CODE_STUB_IS_NOT_MISS) {
bit_field_ = ElementsKindBits::encode(elements_kind) |
IsJSArrayBits::encode(is_js_array);
}
@@ -1323,12 +1359,12 @@ class KeyedLoadFastElementStub : public HydrogenCodeStub {
CodeStubInterfaceDescriptor* descriptor);
private:
- class IsJSArrayBits: public BitField<bool, 8, 1> {};
class ElementsKindBits: public BitField<ElementsKind, 0, 8> {};
+ class IsJSArrayBits: public BitField<bool, 8, 1> {};
uint32_t bit_field_;
Major MajorKey() { return KeyedLoadElement; }
- int MinorKey() { return bit_field_; }
+ int NotMissMinorKey() { return bit_field_; }
DISALLOW_COPY_AND_ASSIGN(KeyedLoadFastElementStub);
};
@@ -1338,15 +1374,13 @@ class KeyedStoreFastElementStub : public HydrogenCodeStub {
public:
KeyedStoreFastElementStub(bool is_js_array,
ElementsKind elements_kind,
- KeyedAccessStoreMode mode) {
+ KeyedAccessStoreMode mode)
+ : HydrogenCodeStub(CODE_STUB_IS_NOT_MISS) {
bit_field_ = ElementsKindBits::encode(elements_kind) |
IsJSArrayBits::encode(is_js_array) |
StoreModeBits::encode(mode);
}
- Major MajorKey() { return KeyedStoreElement; }
- int MinorKey() { return bit_field_; }
-
bool is_js_array() const {
return IsJSArrayBits::decode(bit_field_);
}
@@ -1371,6 +1405,9 @@ class KeyedStoreFastElementStub : public HydrogenCodeStub {
class IsJSArrayBits: public BitField<bool, 12, 1> {};
uint32_t bit_field_;
+ Major MajorKey() { return KeyedStoreElement; }
+ int NotMissMinorKey() { return bit_field_; }
+
DISALLOW_COPY_AND_ASSIGN(KeyedStoreFastElementStub);
};
@@ -1378,7 +1415,8 @@ class KeyedStoreFastElementStub : public HydrogenCodeStub {
class TransitionElementsKindStub : public HydrogenCodeStub {
public:
TransitionElementsKindStub(ElementsKind from_kind,
- ElementsKind to_kind) {
+ ElementsKind to_kind)
+ : HydrogenCodeStub(CODE_STUB_IS_NOT_MISS) {
bit_field_ = FromKindBits::encode(from_kind) |
ToKindBits::encode(to_kind);
}
@@ -1403,7 +1441,7 @@ class TransitionElementsKindStub : public HydrogenCodeStub {
uint32_t bit_field_;
Major MajorKey() { return TransitionElementsKind; }
- int MinorKey() { return bit_field_; }
+ int NotMissMinorKey() { return bit_field_; }
DISALLOW_COPY_AND_ASSIGN(TransitionElementsKindStub);
};
@@ -1411,12 +1449,10 @@ class TransitionElementsKindStub : public HydrogenCodeStub {
class ArrayNoArgumentConstructorStub : public HydrogenCodeStub {
public:
- ArrayNoArgumentConstructorStub() {
+ ArrayNoArgumentConstructorStub()
+ : HydrogenCodeStub(CODE_STUB_IS_NOT_MISS) {
}
- Major MajorKey() { return ArrayNoArgumentConstructor; }
- int MinorKey() { return 0; }
-
virtual Handle<Code> GenerateCode();
virtual void InitializeInterfaceDescriptor(
@@ -1424,17 +1460,17 @@ class ArrayNoArgumentConstructorStub : public HydrogenCodeStub {
CodeStubInterfaceDescriptor* descriptor);
private:
+ Major MajorKey() { return ArrayNoArgumentConstructor; }
+ int NotMissMinorKey() { return 0; }
+
DISALLOW_COPY_AND_ASSIGN(ArrayNoArgumentConstructorStub);
};
class ArraySingleArgumentConstructorStub : public HydrogenCodeStub {
public:
- ArraySingleArgumentConstructorStub() {
- }
-
- Major MajorKey() { return ArraySingleArgumentConstructor; }
- int MinorKey() { return 0; }
+ ArraySingleArgumentConstructorStub()
+ : HydrogenCodeStub(CODE_STUB_IS_NOT_MISS) {}
virtual Handle<Code> GenerateCode();
@@ -1443,17 +1479,17 @@ class ArraySingleArgumentConstructorStub : public HydrogenCodeStub {
CodeStubInterfaceDescriptor* descriptor);
private:
+ Major MajorKey() { return ArraySingleArgumentConstructor; }
+ int NotMissMinorKey() { return 0; }
+
DISALLOW_COPY_AND_ASSIGN(ArraySingleArgumentConstructorStub);
};
class ArrayNArgumentsConstructorStub : public HydrogenCodeStub {
public:
- ArrayNArgumentsConstructorStub() {
- }
-
- Major MajorKey() { return ArrayNArgumentsConstructor; }
- int MinorKey() { return 0; }
+ ArrayNArgumentsConstructorStub()
+ : HydrogenCodeStub(CODE_STUB_IS_NOT_MISS) {}
virtual Handle<Code> GenerateCode();
@@ -1462,6 +1498,9 @@ class ArrayNArgumentsConstructorStub : public HydrogenCodeStub {
CodeStubInterfaceDescriptor* descriptor);
private:
+ Major MajorKey() { return ArrayNArgumentsConstructor; }
+ int NotMissMinorKey() { return 0; }
+
DISALLOW_COPY_AND_ASSIGN(ArrayNArgumentsConstructorStub);
};
View
10 src/compiler.h
@@ -143,6 +143,14 @@ class CompilationInfo {
return SavesCallerDoubles::decode(flags_);
}
+ void MarkAsRequiresFrame() {
+ flags_ |= RequiresFrame::encode(true);
+ }
+
+ bool requires_frame() const {
+ return RequiresFrame::decode(flags_);
+ }
+
void SetParseRestriction(ParseRestriction restriction) {
flags_ = ParseRestricitonField::update(flags_, restriction);
}
@@ -300,6 +308,8 @@ class CompilationInfo {
class SavesCallerDoubles: public BitField<bool, 12, 1> {};
// If the set of valid statements is restricted.
class ParseRestricitonField: public BitField<ParseRestriction, 13, 1> {};
+ // If the function requires a frame (for unspecified reasons)
+ class RequiresFrame: public BitField<bool, 14, 1> {};
unsigned flags_;
View
6 src/conversions-inl.h
@@ -29,9 +29,9 @@
#define V8_CONVERSIONS_INL_H_
#include <limits.h> // Required for INT_MAX etc.
-#include <math.h>
#include <float.h> // Required for DBL_MAX and on Win32 for finite()
#include <stdarg.h>
+#include <cmath>
#include "globals.h" // Required for V8_INFINITY
// ----------------------------------------------------------------------------
@@ -86,8 +86,8 @@ inline unsigned int FastD2UI(double x) {
inline double DoubleToInteger(double x) {
- if (isnan(x)) return 0;
- if (!isfinite(x) || x == 0) return x;
+ if (std::isnan(x)) return 0;
+ if (!std::isfinite(x) || x == 0) return x;
return (x >= 0) ? floor(x) : ceil(x);
}
View
7 src/conversions.cc
@@ -26,14 +26,19 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <stdarg.h>
-#include <math.h>
#include <limits.h>
+#include <cmath>
#include "conversions-inl.h"
#include "dtoa.h"
#include "strtod.h"
#include "utils.h"
+#ifndef _STLP_VENDOR_CSTD
+// STLPort doesn't import fpclassify into the std namespace.
+using std::fpclassify;
+#endif
+
namespace v8 {
namespace internal {
View
2  src/dtoa.cc
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include <math.h>
+#include <cmath>
#include "../include/v8stdint.h"
#include "checks.h"
View
2  src/fixed-dtoa.cc
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include <math.h>
+#include <cmath>
#include "../include/v8stdint.h"
#include "checks.h"
View
24 src/full-codegen.cc
@@ -1548,30 +1548,6 @@ void FullCodeGenerator::VisitSharedFunctionInfoLiteral(
}
-void FullCodeGenerator::VisitYield(Yield* expr) {
- if (expr->is_delegating_yield())
- UNIMPLEMENTED();
-
- Comment cmnt(masm_, "[ Yield");
- // TODO(wingo): Actually update the iterator state.
- VisitForEffect(expr->generator_object());
- VisitForAccumulatorValue(expr->expression());
- // TODO(wingo): Assert that the operand stack depth is 0, at least while
- // general yield expressions are unimplemented.
-
- // TODO(wingo): What follows is as in VisitReturnStatement. Replace it with a
- // call to a builtin that will resume the generator.
- NestedStatement* current = nesting_stack_;
- int stack_depth = 0;
- int context_length = 0;
- while (current != NULL) {
- current = current->Exit(&stack_depth, &context_length);
- }
- __ Drop(stack_depth);
- EmitReturnSequence();
-}
-
-
void FullCodeGenerator::VisitThrow(Throw* expr) {
Comment cmnt(masm_, "[ Throw");
VisitForStackValue(expr->exception());
View
89 src/heap-inl.h
@@ -577,56 +577,67 @@ Isolate* Heap::isolate() {
// Warning: Do not use the identifiers __object__, __maybe_object__ or
// __scope__ in a call to this macro.
-#define CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY)\
- do { \
- GC_GREEDY_CHECK(); \
- MaybeObject* __maybe_object__ = FUNCTION_CALL; \
- Object* __object__ = NULL; \
- if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE; \
- if (__maybe_object__->IsOutOfMemory()) { \
- v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_0", true);\
- } \
- if (!__maybe_object__->IsRetryAfterGC()) RETURN_EMPTY; \
- ISOLATE->heap()->CollectGarbage(Failure::cast(__maybe_object__)-> \
- allocation_space(), \
- "allocation failure"); \
- __maybe_object__ = FUNCTION_CALL; \
- if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE; \
- if (__maybe_object__->IsOutOfMemory()) { \
- v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_1", true);\
- } \
- if (!__maybe_object__->IsRetryAfterGC()) RETURN_EMPTY; \
- ISOLATE->counters()->gc_last_resort_from_handles()->Increment(); \
- ISOLATE->heap()->CollectAllAvailableGarbage("last resort gc"); \
- { \
- AlwaysAllocateScope __scope__; \
- __maybe_object__ = FUNCTION_CALL; \
- } \
- if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE; \
- if (__maybe_object__->IsOutOfMemory() || \
- __maybe_object__->IsRetryAfterGC()) { \
- /* TODO(1181417): Fix this. */ \
- v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_2", true);\
- } \
- RETURN_EMPTY; \
+#define CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY, OOM)\
+ do { \
+ GC_GREEDY_CHECK(); \
+ MaybeObject* __maybe_object__ = FUNCTION_CALL; \
+ Object* __object__ = NULL; \
+ if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE; \
+ if (__maybe_object__->IsOutOfMemory()) { \
+ OOM; \
+ } \
+ if (!__maybe_object__->IsRetryAfterGC()) RETURN_EMPTY; \
+ ISOLATE->heap()->CollectGarbage(Failure::cast(__maybe_object__)-> \
+ allocation_space(), \
+ "allocation failure"); \
+ __maybe_object__ = FUNCTION_CALL; \
+ if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE; \
+ if (__maybe_object__->IsOutOfMemory()) { \
+ OOM; \
+ } \
+ if (!__maybe_object__->IsRetryAfterGC()) RETURN_EMPTY; \
+ ISOLATE->counters()->gc_last_resort_from_handles()->Increment(); \
+ ISOLATE->heap()->CollectAllAvailableGarbage("last resort gc"); \
+ { \
+ AlwaysAllocateScope __scope__; \
+ __maybe_object__ = FUNCTION_CALL; \
+ } \
+ if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE; \
+ if (__maybe_object__->IsOutOfMemory()) { \
+ OOM; \
+ } \
+ if (__maybe_object__->IsRetryAfterGC()) { \
+ /* TODO(1181417): Fix this. */ \
+ v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_LAST", true); \
+ } \
+ RETURN_EMPTY; \
} while (false)
+#define CALL_AND_RETRY_OR_DIE( \
+ ISOLATE, FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY) \
+ CALL_AND_RETRY( \
+ ISOLATE, \
+ FUNCTION_CALL, \
+ RETURN_VALUE, \
+ RETURN_EMPTY, \
+ v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY", true))
-#define CALL_HEAP_FUNCTION(ISOLATE, FUNCTION_CALL, TYPE) \
- CALL_AND_RETRY(ISOLATE, \
- FUNCTION_CALL, \
- return Handle<TYPE>(TYPE::cast(__object__), ISOLATE), \
- return Handle<TYPE>())
+#define CALL_HEAP_FUNCTION(ISOLATE, FUNCTION_CALL, TYPE) \
+ CALL_AND_RETRY_OR_DIE(ISOLATE, \
+ FUNCTION_CALL, \
+ return Handle<TYPE>(TYPE::cast(__object__), ISOLATE), \
+ return Handle<TYPE>()) \
-#define CALL_HEAP_FUNCTION_VOID(ISOLATE, FUNCTION_CALL) \
- CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, return, return)
+#define CALL_HEAP_FUNCTION_VOID(ISOLATE, FUNCTION_CALL) \
+ CALL_AND_RETRY_OR_DIE(ISOLATE, FUNCTION_CALL, return, return)
#define CALL_HEAP_FUNCTION_PASS_EXCEPTION(ISOLATE, FUNCTION_CALL) \
CALL_AND_RETRY(ISOLATE, \
FUNCTION_CALL, \
return __object__, \
+ return __maybe_object__, \
return __maybe_object__)
View
36 src/heap-snapshot-generator.cc
@@ -190,7 +190,6 @@ template <> struct SnapshotSizeConstants<4> {
static const int kExpectedHeapEntrySize = 24;
static const int kExpectedHeapSnapshotsCollectionSize = 100;
static const int kExpectedHeapSnapshotSize = 132;
- static const size_t kMaxSerializableSnapshotRawSize = 256 * MB;
};
template <> struct SnapshotSizeConstants<8> {
@@ -198,8 +197,6 @@ template <> struct SnapshotSizeConstants<8> {
static const int kExpectedHeapEntrySize = 32;
static const int kExpectedHeapSnapshotsCollectionSize = 152;
static const int kExpectedHeapSnapshotSize = 160;
- static const uint64_t kMaxSerializableSnapshotRawSize =
- static_cast<uint64_t>(6000) * MB;
};
} // namespace
@@ -2384,42 +2381,9 @@ const int HeapSnapshotJSONSerializer::kNodeFieldsCount = 5;
void HeapSnapshotJSONSerializer::Serialize(v8::OutputStream* stream) {
ASSERT(writer_ == NULL);
writer_ = new OutputStreamWriter(stream);
-
- HeapSnapshot* original_snapshot = NULL;
- if (snapshot_->RawSnapshotSize() >=
- SnapshotSizeConstants<kPointerSize>::kMaxSerializableSnapshotRawSize) {
- // The snapshot is too big. Serialize a fake snapshot.
- original_snapshot = snapshot_;
- snapshot_ = CreateFakeSnapshot();
- }
-
SerializeImpl();
-
delete writer_;
writer_ = NULL;
-
- if (original_snapshot != NULL) {
- delete snapshot_;
- snapshot_ = original_snapshot;
- }
-}
-
-
-HeapSnapshot* HeapSnapshotJSONSerializer::CreateFakeSnapshot() {
- HeapSnapshot* result = new HeapSnapshot(snapshot_->collection(),
- snapshot_->title(),
- snapshot_->uid());
- result->AddRootEntry();
- const char* text = snapshot_->collection()->names()->GetFormatted(
- "The snapshot is too big. "
- "Maximum snapshot size is %" V8_PTR_PREFIX "u MB. "
- "Actual snapshot size is %" V8_PTR_PREFIX "u MB.",
- SnapshotSizeConstants<kPointerSize>::kMaxSerializableSnapshotRawSize / MB,
- (snapshot_->RawSnapshotSize() + MB - 1) / MB);
- HeapEntry* message = result->AddEntry(HeapEntry::kString, text, 0, 4);
- result->root()->SetIndexedReference(HeapGraphEdge::kElement, 1, message);
- result->FillChildren();
- return result;
}
View
1  src/heap-snapshot-generator.h
@@ -655,7 +655,6 @@ class HeapSnapshotJSONSerializer {
v8::internal::kZeroHashSeed);
}
- HeapSnapshot* CreateFakeSnapshot();
int GetStringId(const char* s);
int entry_index(HeapEntry* e) { return e->index() * kNodeFieldsCount; }
void SerializeEdge(HeapGraphEdge* edge, bool first_edge);
View
2  src/heap.cc
@@ -2745,7 +2745,7 @@ bool Heap::CreateInitialObjects() {
if (!maybe_obj->ToObject(&obj)) return false;
}
set_minus_zero_value(HeapNumber::cast(obj));
- ASSERT(signbit(minus_zero_value()->Number()) != 0);
+ ASSERT(std::signbit(minus_zero_value()->Number()) != 0);
{ MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
if (!maybe_obj->ToObject(&obj)) return false;
View
2  src/heap.h
@@ -28,7 +28,7 @@
#ifndef V8_HEAP_H_
#define V8_HEAP_H_
-#include <math.h>
+#include <cmath>
#include "allocation.h"
#include "globals.h"
View
22 src/hydrogen-instructions.cc
@@ -1683,9 +1683,15 @@ void HInstanceOf::PrintDataTo(StringStream* stream) {
Range* HValue::InferRange(Zone* zone) {
- // Untagged integer32 cannot be -0, all other representations can.
- Range* result = new(zone) Range();
- result->set_can_be_minus_zero(!representation().IsInteger32());
+ Range* result;
+ if (type().IsSmi()) {
+ result = new(zone) Range(Smi::kMinValue, Smi::kMaxValue);
+ result->set_can_be_minus_zero(false);
+ } else {
+ // Untagged integer32 cannot be -0, all other representations can.
+ result = new(zone) Range();
+ result->set_can_be_minus_zero(!representation().IsInteger32());
+ }
return result;
}
@@ -2139,7 +2145,7 @@ HConstant::HConstant(double double_value,
has_int32_value_(IsInteger32(double_value)),
has_double_value_(true),
is_internalized_string_(false),
- boolean_value_(double_value != 0 && !isnan(double_value)),
+ boolean_value_(double_value != 0 && !std::isnan(double_value)),
int32_value_(DoubleToInt32(double_value)),
double_value_(double_value) {
Initialize(r);
@@ -3176,7 +3182,7 @@ HInstruction* HStringCharFromCode::New(
HConstant* c_code = HConstant::cast(char_code);
Isolate* isolate = Isolate::Current();
if (c_code->HasNumberValue()) {
- if (isfinite(c_code->DoubleValue())) {
+ if (std::isfinite(c_code->DoubleValue())) {
uint32_t code = c_code->NumberValueAsInteger32() & 0xffff;
return new(zone) HConstant(LookupSingleCharacterStringFromCode(isolate,
code),
@@ -3209,10 +3215,10 @@ HInstruction* HUnaryMathOperation::New(
HConstant* constant = HConstant::cast(value);
if (!constant->HasNumberValue()) break;
double d = constant->DoubleValue();
- if (isnan(d)) { // NaN poisons everything.
+ if (std::isnan(d)) { // NaN poisons everything.
return H_CONSTANT_DOUBLE(OS::nan_value());
}
- if (isinf(d)) { // +Infinity and -Infinity.
+ if (std::isinf(d)) { // +Infinity and -Infinity.
switch (op) {
case kMathSin:
case kMathCos:
@@ -3276,7 +3282,7 @@ HInstruction* HPower::New(Zone* zone, HValue* left, HValue* right) {
if (c_left->HasNumberValue() && c_right->HasNumberValue()) {
double result = power_helper(c_left->DoubleValue(),
c_right->DoubleValue());
- return H_CONSTANT_DOUBLE(isnan(result) ? OS::nan_value() : result);
+ return H_CONSTANT_DOUBLE(std::isnan(result) ? OS::nan_value() : result);
}
}
return new(zone) HPower(left, right);
View
62 src/hydrogen-instructions.h
@@ -2680,39 +2680,27 @@ class HLoadExternalArrayPointer: public HUnaryOperation {
class HCheckMaps: public HTemplateInstruction<2> {
public:
- HCheckMaps(HValue* value, Handle<Map> map, Zone* zone,
- HValue* typecheck = NULL)
- : map_unique_ids_(0, zone) {
- SetOperandAt(0, value);
- // If callers don't depend on a typecheck, they can pass in NULL. In that
- // case we use a copy of the |value| argument as a dummy value.
- SetOperandAt(1, typecheck != NULL ? typecheck : value);
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetFlag(kTrackSideEffectDominators);
- SetGVNFlag(kDependsOnMaps);
- SetGVNFlag(kDependsOnElementsKind);
- map_set()->Add(map, zone);
+ static HCheckMaps* New(HValue* value, Handle<Map> map, Zone* zone,
+ HValue *typecheck = NULL) {
+ HCheckMaps* check_map = new(zone) HCheckMaps(value, zone, typecheck);
+ check_map->map_set_.Add(map, zone);
+ return check_map;
}
- HCheckMaps(HValue* value, SmallMapList* maps, Zone* zone)
- : map_unique_ids_(0, zone) {
- SetOperandAt(0, value);
- SetOperandAt(1, value);
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetFlag(kTrackSideEffectDominators);
- SetGVNFlag(kDependsOnMaps);
- SetGVNFlag(kDependsOnElementsKind);
+
+ static HCheckMaps* New(HValue* value, SmallMapList* maps, Zone* zone,
+ HValue *typecheck = NULL) {
+ HCheckMaps* check_map = new(zone) HCheckMaps(value, zone, typecheck);
for (int i = 0; i < maps->length(); i++) {
- map_set()->Add(maps->at(i), zone);
+ check_map->map_set_.Add(maps->at(i), zone);
}
- map_set()->Sort();
+ check_map->map_set_.Sort();
+ return check_map;
}
- static HCheckMaps* NewWithTransitions(HValue* object, Handle<Map> map,
+ static HCheckMaps* NewWithTransitions(HValue* value, Handle<Map> map,
Zone* zone) {
- HCheckMaps* check_map = new(zone) HCheckMaps(object, map, zone);
- SmallMapList* map_set = check_map->map_set();
+ HCheckMaps* check_map = new(zone) HCheckMaps(value, zone, value);
+ check_map->map_set_.Add(map, zone);
// Since transitioned elements maps of the initial map don't fail the map
// check, the CheckMaps instruction doesn't need to depend on ElementsKinds.
@@ -2725,10 +2713,10 @@ class HCheckMaps: public HTemplateInstruction<2> {
Map* transitioned_map =
map->LookupElementsTransitionMap(kind);
if (transitioned_map) {
- map_set->Add(Handle<Map>(transitioned_map), zone);
+ check_map->map_set_.Add(Handle<Map>(transitioned_map), zone);
}
};
- map_set->Sort();
+ check_map->map_set_.Sort();
return check_map;
}
@@ -2763,6 +2751,20 @@ class HCheckMaps: public HTemplateInstruction<2> {
}
private:
+ // Clients should use one of the static New* methods above.
+ HCheckMaps(HValue* value, Zone *zone, HValue* typecheck)
+ : map_unique_ids_(0, zone) {
+ SetOperandAt(0, value);
+ // Use the object value for the dependency if NULL is passed.
+ // TODO(titzer): do GVN flags already express this dependency?
+ SetOperandAt(1, typecheck != NULL ? typecheck : value);
+ set_representation(Representation::Tagged());
+ SetFlag(kUseGVN);
+ SetFlag(kTrackSideEffectDominators);
+ SetGVNFlag(kDependsOnMaps);
+ SetGVNFlag(kDependsOnElementsKind);
+ }
+
SmallMapList map_set_;
ZoneList<UniqueValueId> map_unique_ids_;
};
@@ -3239,7 +3241,7 @@ class HConstant: public HTemplateInstruction<0> {
return has_double_value_ &&
(BitCast<int64_t>(double_value_) == BitCast<int64_t>(-0.0) ||
FixedDoubleArray::is_the_hole_nan(double_value_) ||
- isnan(double_value_));
+ std::isnan(double_value_));
}
bool ImmortalImmovable() const {
View
36 src/hydrogen.cc
@@ -714,8 +714,6 @@ HGraphBuilder::IfBuilder::IfBuilder(HGraphBuilder* builder, int position)
finished_(false),
did_then_(false),
did_else_(false),
- deopt_then_(false),
- deopt_else_(false),
did_and_(false),
did_or_(false),
captured_(false),
@@ -736,8 +734,6 @@ HGraphBuilder::IfBuilder::IfBuilder(
finished_(false),
did_then_(false),
did_else_(false),
- deopt_then_(false),
- deopt_else_(false),
did_and_(false),
did_or_(false),
captured_(false),
@@ -874,7 +870,6 @@ void HGraphBuilder::IfBuilder::Deopt() {
block->FinishExitWithDeoptimization(HDeoptimize::kUseAll);
if (did_else_) {
first_false_block_ = NULL;
- did_else_ = false;
} else {
first_true_block_ = NULL;
}
@@ -888,8 +883,9 @@ void HGraphBuilder::IfBuilder::End() {
last_true_block_ = builder_->current_block();
}
if (first_true_block_ == NULL) {
- // Deopt on true. Nothing to do, just continue the else block.
+ // Deopt on true. Nothing to do, just continue the false block.
} else if (first_false_block_ == NULL) {
+ // Deopt on false. Nothing to do except switching to the true block.
builder_->set_current_block(last_true_block_);
} else {
HEnvironment* merge_env = last_true_block_->last_environment()->Copy();
@@ -1081,7 +1077,7 @@ HValue* HGraphBuilder::BuildCheckNonSmi(HValue* obj) {
HValue* HGraphBuilder::BuildCheckMap(HValue* obj,
Handle<Map> map) {
- HCheckMaps* check = new(zone()) HCheckMaps(obj, map, zone());
+ HCheckMaps* check = HCheckMaps::New(obj, map, zone());
AddInstruction(check);
return check;
}
@@ -1297,7 +1293,7 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
AddInstruction(new(zone) HLoadElements(object, mapcheck));
if (is_store && (fast_elements || fast_smi_only_elements) &&
store_mode != STORE_NO_TRANSITION_HANDLE_COW) {
- HCheckMaps* check_cow_map = new(zone) HCheckMaps(
+ HCheckMaps* check_cow_map = HCheckMaps::New(
elements, isolate()->factory()->fixed_array_map(), zone);
check_cow_map->ClearGVNFlag(kDependsOnElementsKind);
AddInstruction(check_cow_map);
@@ -1319,14 +1315,17 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
IfBuilder length_checker(this);
length_checker.IfCompare(key, length, Token::LT);
length_checker.Then();
- CheckBuilder negative_checker(this);
- HValue* bounds_check = negative_checker.CheckIntegerCompare(
+ IfBuilder negative_checker(this);
+ HValue* bounds_check = negative_checker.IfCompare(
key, graph()->GetConstant0(), Token::GTE);
- negative_checker.End();
+ negative_checker.Then();
HInstruction* result = BuildExternalArrayElementAccess(
external_elements, key, val, bounds_check,
elements_kind, is_store);
AddInstruction(result);
+ negative_checker.Else();
+ negative_checker.Deopt();
+ negative_checker.End();
length_checker.End();
return result;
} else {
@@ -1371,7 +1370,7 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
elements = BuildCopyElementsOnWrite(object, elements, elements_kind,
length);
} else {
- HCheckMaps* check_cow_map = new(zone) HCheckMaps(
+ HCheckMaps* check_cow_map = HCheckMaps::New(
elements, isolate()->factory()->fixed_array_map(), zone);
check_cow_map->ClearGVNFlag(kDependsOnElementsKind);
AddInstruction(check_cow_map);
@@ -6652,7 +6651,7 @@ static int ComputeLoadStoreFieldIndex(Handle<Map> type,
void HOptimizedGraphBuilder::AddCheckMap(HValue* object, Handle<Map> map) {
AddInstruction(new(zone()) HCheckNonSmi(object));
- AddInstruction(new(zone()) HCheckMaps(object, map, zone()));
+ AddInstruction(HCheckMaps::New(object, map, zone()));
}
@@ -6781,7 +6780,7 @@ bool HOptimizedGraphBuilder::HandlePolymorphicArrayLengthLoad(
AddInstruction(new(zone()) HCheckNonSmi(object));
HInstruction* typecheck =
- AddInstruction(new(zone()) HCheckMaps(object, types, zone()));
+ AddInstruction(HCheckMaps::New(object, types, zone()));
HInstruction* instr =
HLoadNamedField::NewArrayLength(zone(), object, typecheck);
instr->set_position(expr->position());
@@ -6833,7 +6832,7 @@ void HOptimizedGraphBuilder::HandlePolymorphicLoadNamedField(Property* expr,
AddInstruction(new(zone()) HCheckNonSmi(object));
HInstruction* instr;
if (count == types->length() && is_monomorphic_field) {
- AddInstruction(new(zone()) HCheckMaps(object, types, zone()));
+ AddInstruction(HCheckMaps::New(object, types, zone()));
instr = BuildLoadNamedField(object, map, &lookup);
} else {
HValue* context = environment()->LookupContext();
@@ -7510,8 +7509,7 @@ HInstruction* HOptimizedGraphBuilder::BuildMonomorphicElementAccess(
Handle<Map> map,
bool is_store,
KeyedAccessStoreMode store_mode) {
- HCheckMaps* mapcheck = new(zone()) HCheckMaps(object, map,
- zone(), dependency);
+ HCheckMaps* mapcheck = HCheckMaps::New(object, map, zone(), dependency);
AddInstruction(mapcheck);
if (dependency) {
mapcheck->ClearGVNFlag(kDependsOnElementsKind);
@@ -7568,7 +7566,7 @@ HInstruction* HOptimizedGraphBuilder::TryBuildConsolidatedElementLoad(
}
if (!has_double_maps && !has_smi_or_object_maps) return NULL;
- HCheckMaps* check_maps = new(zone()) HCheckMaps(object, maps, zone());
+ HCheckMaps* check_maps = HCheckMaps::New(object, maps, zone());
AddInstruction(check_maps);
HInstruction* instr = BuildUncheckedMonomorphicElementAccess(
object, key, val, check_maps,
@@ -7720,7 +7718,7 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
HInstruction* access;
if (IsFastElementsKind(elements_kind)) {
if (is_store && !IsFastDoubleElementsKind(elements_kind)) {
- AddInstruction(new(zone()) HCheckMaps(
+ AddInstruction(HCheckMaps::New(
elements, isolate()->factory()->fixed_array_map(),
zone(), elements_kind_branch));
}
View
4 src/hydrogen.h
@@ -1067,7 +1067,6 @@ class HGraphBuilder {
return compare;
}
- template<class Condition>
HInstruction* OrIfCompare(
HValue* p1,
HValue* p2,
@@ -1094,7 +1093,6 @@ class HGraphBuilder {
return If<Condition>(p1, p2);
}
- template<class Condition>
HInstruction* AndIfCompare(
HValue* p1,
HValue* p2,
@@ -1142,8 +1140,6 @@ class HGraphBuilder {
bool finished_ : 1;
bool did_then_ : 1;
bool did_else_ : 1;
- bool deopt_then_ : 1;
- bool deopt_else_ : 1;
bool did_and_ : 1;
bool did_or_ : 1;
bool captured_ : 1;
View
25 src/ia32/code-stubs-ia32.cc
@@ -141,6 +141,31 @@ void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
#define __ ACCESS_MASM(masm)
+
+void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
+ // Update the static counter each time a new code stub is generated.
+ Isolate* isolate = masm->isolate();
+ isolate->counters()->code_stubs()->Increment();
+
+ CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(isolate);
+ int param_count = descriptor->register_param_count_;
+ {
+ // Call the runtime system in a fresh internal frame.
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ ASSERT(descriptor->register_param_count_ == 0 ||
+ eax.is(descriptor->register_params_[param_count - 1]));
+ // Push arguments
+ for (int i = 0; i < param_count; ++i) {
+ __ push(descriptor->register_params_[i]);
+ }
+ ExternalReference miss = descriptor->miss_handler_;
+ __ CallExternalReference(miss, descriptor->register_param_count_);
+ }
+
+ __ ret(0);
+}
+
+
void ToNumberStub::Generate(MacroAssembler* masm) {
// The ToNumber stub takes one argument in eax.
Label check_heap_number, call_builtin;
View
2  src/ia32/codegen-ia32.cc
@@ -635,6 +635,8 @@ OS::MemMoveFunction CreateMemMoveFunction() {
ASSERT(!RelocInfo::RequiresRelocation(desc));
CPU::FlushICache(buffer, actual_size);
OS::ProtectCode(buffer, actual_size);
+ // TODO(jkummerow): It would be nice to register this code creation event
+ // with the PROFILE / GDBJIT system.
return FUNCTION_CAST<OS::MemMoveFunction>(buffer);
}
View
54 src/ia32/full-codegen-ia32.cc
@@ -1883,6 +1883,60 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
+void FullCodeGenerator::VisitYield(Yield* expr) {
+ Comment cmnt(masm_, "[ Yield");
+ // Evaluate yielded value first; the initial iterator definition depends on
+ // this. It stays on the stack while we update the iterator.
+ VisitForStackValue(expr->expression());
+
+ switch (expr->yield_kind()) {
+ case Yield::INITIAL:
+ case Yield::SUSPEND: {
+ VisitForStackValue(expr->generator_object());
+ __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ mov(context_register(),
+ Operand(ebp, StandardFrameConstants::kContextOffset));
+
+ Label resume;
+ __ CompareRoot(result_register(), Heap::kTheHoleValueRootIndex);
+ __ j(not_equal, &resume);
+ __ pop(result_register());
+ if (expr->yield_kind() == Yield::SUSPEND) {
+ // TODO(wingo): Box into { value: VALUE, done: false }.
+ }
+ EmitReturnSequence();
+
+ __ bind(&resume);
+ context()->Plug(result_register());
+ break;
+ }
+
+ case Yield::FINAL: {
+ VisitForAccumulatorValue(expr->generator_object());
+ __ mov(FieldOperand(result_register(),
+ JSGeneratorObject::kContinuationOffset),
+ Immediate(Smi::FromInt(JSGeneratorObject::kGeneratorClosed)));
+ __ pop(result_register());
+ // TODO(wingo): Box into { value: VALUE, done: true }.
+
+ // Exit all nested statements.
+ NestedStatement* current = nesting_stack_;
+ int stack_depth = 0;
+ int context_length = 0;
+ while (current != NULL) {
+ current = current->Exit(&stack_depth, &context_length);
+ }
+ __ Drop(stack_depth);
+ EmitReturnSequence();
+ break;
+ }
+
+ case Yield::DELEGATING:
+ UNIMPLEMENTED();
+ }
+}
+
+
void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
View
113 src/ia32/lithium-codegen-ia32.cc
@@ -2056,17 +2056,16 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
}
-int LCodeGen::GetNextEmittedBlock(int block) {
- for (int i = block + 1; i < graph()->blocks()->length(); ++i) {
- LLabel* label = chunk_->GetLabel(i);
- if (!label->HasReplacement()) return i;
+int LCodeGen::GetNextEmittedBlock() {
+ for (int i = current_block_ + 1; i < graph()->blocks()->length(); ++i) {
+ if (!chunk_->GetLabel(i)->HasReplacement()) return i;
}
return -1;
}
void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
- int next_block = GetNextEmittedBlock(current_block_);
+ int next_block = GetNextEmittedBlock();
right_block = chunk_->LookupDestination(right_block);
left_block = chunk_->LookupDestination(left_block);
@@ -2204,10 +2203,9 @@ void LCodeGen::DoBranch(LBranch* instr) {
void LCodeGen::EmitGoto(int block) {
- block = chunk_->LookupDestination(block);
- int next_block = GetNextEmittedBlock(current_block_);
- if (block != next_block) {
- __ jmp(chunk_->GetAssemblyLabel(block));
+ int destination = chunk_->LookupDestination(block);
+ if (destination != GetNextEmittedBlock()) {
+ __ jmp(chunk_->GetAssemblyLabel(destination));
}
}
@@ -3198,13 +3196,21 @@ void LCodeGen::DoLoadExternalArrayPointer(
void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
Register arguments = ToRegister(instr->arguments());
- Register length = ToRegister(instr->length());
- Operand index = ToOperand(instr->index());
Register result = ToRegister(instr->result());
- // There are two words between the frame pointer and the last argument.
- // Subtracting from length accounts for one of them add one more.
- __ sub(length, index);
- __ mov(result, Operand(arguments, length, times_4, kPointerSize));
+ if (instr->length()->IsConstantOperand() &&
+ instr->index()->IsConstantOperand()) {
+ int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+ int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
+ int index = (const_length - const_index) + 1;
+ __ mov(result, Operand(arguments, index * kPointerSize));
+ } else {
+ Register length = ToRegister(instr->length());
+ Operand index = ToOperand(instr->index());
+ // There are two words between the frame pointer and the last argument.
+ // Subtracting from length accounts for one of them add one more.
+ __ sub(length, index);
+ __ mov(result, Operand(arguments, length, times_4, kPointerSize));
+ }
}
@@ -4222,7 +4228,6 @@ void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
Register object = ToRegister(instr->object());
- Register value = ToRegister(instr->value());
int offset = instr->offset();
if (!instr->transition().is_null()) {
@@ -4248,34 +4253,42 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- if (instr->is_in_object()) {
- __ mov(FieldOperand(object, offset), value);
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- Register temp = ToRegister(instr->temp());