Skip to content
Browse files

Version 3.2.1

Fixed a number of crash bugs.

Improved Crankshaft for x64 and ARM.

Implemented more of EcmaScript 5 strict mode.

git-svn-id: https://v8.googlecode.com/svn/trunk@7114 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
  • Loading branch information...
1 parent 3cf008e commit 5a349b25fcac04fb28efd555b6c81e3ca89c289a danno@chromium.org committed Mar 10, 2011
Showing with 2,085 additions and 2,664 deletions.
  1. +9 −0 ChangeLog
  2. +2 −1 include/v8.h
  3. +1 −1 src/accessors.cc
  4. +40 −18 src/api.cc
  5. +105 −152 src/arm/code-stubs-arm.cc
  6. +6 −50 src/arm/code-stubs-arm.h
  7. +10 −4 src/arm/codegen-arm.cc
  8. +10 −5 src/arm/full-codegen-arm.cc
  9. +7 −39 src/arm/ic-arm.cc
  10. +16 −9 src/arm/lithium-arm.cc
  11. +30 −10 src/arm/lithium-arm.h
  12. +39 −12 src/arm/lithium-codegen-arm.cc
  13. +1 −0 src/arm/lithium-codegen-arm.h
  14. +212 −290 src/arm/stub-cache-arm.cc
  15. +5 −0 src/ast-inl.h
  16. +9 −1 src/ast.cc
  17. +18 −4 src/ast.h
  18. +3 −1 src/code-stubs.cc
  19. +7 −1 src/code-stubs.h
  20. +12 −13 src/debug.cc
  21. +5 −14 src/factory.cc
  22. +3 −7 src/factory.h
  23. +0 −5 src/frames.cc
  24. +0 −6 src/frames.h
  25. +1 −2 src/handles.cc
  26. +4 −4 src/handles.h
  27. +14 −56 src/heap.cc
  28. +28 −33 src/heap.h
  29. +9 −4 src/hydrogen-instructions.cc
  30. +26 −21 src/hydrogen-instructions.h
  31. +314 −467 src/hydrogen.cc
  32. +12 −59 src/hydrogen.h
  33. +5 −142 src/ia32/code-stubs-ia32.cc
  34. +0 −42 src/ia32/code-stubs-ia32.h
  35. +12 −4 src/ia32/codegen-ia32.cc
  36. +7 −3 src/ia32/full-codegen-ia32.cc
  37. +11 −37 src/ia32/ic-ia32.cc
  38. +7 −11 src/ia32/lithium-codegen-ia32.cc
  39. +6 −5 src/ia32/lithium-ia32.cc
  40. +10 −10 src/ia32/lithium-ia32.h
  41. +120 −190 src/ia32/stub-cache-ia32.cc
  42. +6 −12 src/ic.cc
  43. +3 −1 src/jsregexp.cc
  44. +5 −3 src/lithium-allocator.cc
  45. +44 −32 src/liveedit.cc
  46. +8 −0 src/log.cc
  47. +2 −0 src/log.h
  48. +6 −7 src/objects-debug.cc
  49. +55 −72 src/objects-inl.h
  50. +7 −7 src/objects-printer.cc
  51. +1 −1 src/objects-visiting.cc
  52. +34 −31 src/objects.cc
  53. +72 −74 src/objects.h
  54. +39 −47 src/parser.cc
  55. +2 −2 src/parser.h
  56. +0 −1 src/preparser.cc
  57. +13 −12 src/runtime.cc
  58. +43 −20 src/scopes.cc
  59. +18 −8 src/scopes.h
  60. +0 −1 src/serialize.cc
  61. +2 −0 src/spaces.cc
  62. +65 −71 src/stub-cache.cc
  63. +2 −12 src/stub-cache.h
  64. +83 −28 src/type-info.cc
  65. +8 −3 src/type-info.h
  66. +1 −1 src/v8-counters.h
  67. +3 −3 src/version.cc
  68. +109 −172 src/x64/code-stubs-x64.cc
  69. +0 −43 src/x64/code-stubs-x64.h
  70. +10 −4 src/x64/codegen-x64.cc
  71. +8 −4 src/x64/full-codegen-x64.cc
  72. +7 −34 src/x64/ic-x64.cc
  73. +51 −18 src/x64/lithium-codegen-x64.cc
  74. +2 −2 src/x64/lithium-codegen-x64.h
  75. +10 −6 src/x64/lithium-x64.cc
  76. +20 −19 src/x64/lithium-x64.h
  77. +6 −7 src/x64/macro-assembler-x64.cc
  78. +82 −159 src/x64/stub-cache-x64.cc
  79. +33 −9 test/cctest/test-api.cc
  80. +0 −5 test/es5conform/es5conform.status
  81. +34 −0 test/mjsunit/regress/regress-1236.js
  82. +36 −0 test/mjsunit/regress/regress-1237.js
  83. +19 −0 test/mjsunit/strict-mode.js
View
9 ChangeLog
@@ -1,3 +1,12 @@
+2011-03-10: Version 3.2.1
+
+ Fixed a number of crash bugs.
+
+ Improved Crankshaft for x64 and ARM.
+
+ Implemented more of EcmaScript 5 strict mode.
+
+
2011-03-07: Version 3.2.0
Fixed a number of crash bugs.
View
3 include/v8.h
@@ -1435,7 +1435,8 @@ enum ExternalArrayType {
kExternalUnsignedShortArray,
kExternalIntArray,
kExternalUnsignedIntArray,
- kExternalFloatArray
+ kExternalFloatArray,
+ kExternalPixelArray
};
/**
View
2 src/accessors.cc
@@ -791,7 +791,7 @@ MaybeObject* Accessors::FunctionGetArguments(Object* object, void*) {
// Get the number of arguments and construct an arguments object
// mirror for the right frame.
- const int length = frame->GetProvidedParametersCount();
+ const int length = frame->ComputeParametersCount();
Handle<JSObject> arguments = Factory::NewArgumentsObject(function,
length);
Handle<FixedArray> array = Factory::NewFixedArray(length);
View
58 src/api.cc
@@ -2756,11 +2756,40 @@ bool v8::Object::DeleteHiddenValue(v8::Handle<v8::String> key) {
}
+namespace {
+
+void PrepareExternalArrayElements(i::Handle<i::JSObject> object,
+ void* data,
+ ExternalArrayType array_type,
+ int length) {
+ i::Handle<i::ExternalArray> array =
+ i::Factory::NewExternalArray(length, array_type, data);
+
+ // If the object already has external elements, create a new, unique
+ // map if the element type is now changing, because assumptions about
+ // generated code based on the receiver's map will be invalid.
+ i::Handle<i::HeapObject> elements(object->elements());
+ bool force_unique_map =
+ elements->map()->IsUndefined() ||
+ !elements->map()->has_external_array_elements() ||
+ elements->map() != i::Heap::MapForExternalArrayType(array_type);
+ if (force_unique_map) {
+ i::Handle<i::Map> external_array_map =
+ i::Factory::NewExternalArrayElementsMap(
+ i::Handle<i::Map>(object->map()));
+ object->set_map(*external_array_map);
+ }
+ object->set_elements(*array);
+}
+
+} // namespace
+
+
void v8::Object::SetIndexedPropertiesToPixelData(uint8_t* data, int length) {
ON_BAILOUT("v8::SetElementsToPixelData()", return);
ENTER_V8;
HandleScope scope;
- if (!ApiCheck(length <= i::PixelArray::kMaxLength,
+ if (!ApiCheck(length <= i::ExternalPixelArray::kMaxLength,
"v8::Object::SetIndexedPropertiesToPixelData()",
"length exceeds max acceptable value")) {
return;
@@ -2771,26 +2800,23 @@ void v8::Object::SetIndexedPropertiesToPixelData(uint8_t* data, int length) {
"JSArray is not supported")) {
return;
}
- i::Handle<i::PixelArray> pixels = i::Factory::NewPixelArray(length, data);
- i::Handle<i::Map> pixel_array_map =
- i::Factory::GetPixelArrayElementsMap(i::Handle<i::Map>(self->map()));
- self->set_map(*pixel_array_map);
- self->set_elements(*pixels);
+ PrepareExternalArrayElements(self, data, kExternalPixelArray, length);
}
bool v8::Object::HasIndexedPropertiesInPixelData() {
ON_BAILOUT("v8::HasIndexedPropertiesInPixelData()", return false);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- return self->HasPixelElements();
+ return self->HasExternalPixelElements();
}
uint8_t* v8::Object::GetIndexedPropertiesPixelData() {
ON_BAILOUT("v8::GetIndexedPropertiesPixelData()", return NULL);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- if (self->HasPixelElements()) {
- return i::PixelArray::cast(self->elements())->external_pointer();
+ if (self->HasExternalPixelElements()) {
+ return i::ExternalPixelArray::cast(self->elements())->
+ external_pixel_pointer();
} else {
return NULL;
}
@@ -2800,14 +2826,13 @@ uint8_t* v8::Object::GetIndexedPropertiesPixelData() {
int v8::Object::GetIndexedPropertiesPixelDataLength() {
ON_BAILOUT("v8::GetIndexedPropertiesPixelDataLength()", return -1);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- if (self->HasPixelElements()) {
- return i::PixelArray::cast(self->elements())->length();
+ if (self->HasExternalPixelElements()) {
+ return i::ExternalPixelArray::cast(self->elements())->length();
} else {
return -1;
}
}
-
void v8::Object::SetIndexedPropertiesToExternalArrayData(
void* data,
ExternalArrayType array_type,
@@ -2826,12 +2851,7 @@ void v8::Object::SetIndexedPropertiesToExternalArrayData(
"JSArray is not supported")) {
return;
}
- i::Handle<i::ExternalArray> array =
- i::Factory::NewExternalArray(length, array_type, data);
- i::Handle<i::Map> slow_map =
- i::Factory::GetSlowElementsMap(i::Handle<i::Map>(self->map()));
- self->set_map(*slow_map);
- self->set_elements(*array);
+ PrepareExternalArrayElements(self, data, array_type, length);
}
@@ -2872,6 +2892,8 @@ ExternalArrayType v8::Object::GetIndexedPropertiesExternalArrayDataType() {
return kExternalUnsignedIntArray;
case i::EXTERNAL_FLOAT_ARRAY_TYPE:
return kExternalFloatArray;
+ case i::EXTERNAL_PIXEL_ARRAY_TYPE:
+ return kExternalPixelArray;
default:
return static_cast<ExternalArrayType>(-1);
}
View
257 src/arm/code-stubs-arm.cc
@@ -4056,6 +4056,111 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
}
+void MathPowStub::Generate(MacroAssembler* masm) {
+ Label call_runtime;
+
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+
+ Label base_not_smi;
+ Label exponent_not_smi;
+ Label convert_exponent;
+
+ const Register base = r0;
+ const Register exponent = r1;
+ const Register heapnumbermap = r5;
+ const Register heapnumber = r6;
+ const DoubleRegister double_base = d0;
+ const DoubleRegister double_exponent = d1;
+ const DoubleRegister double_result = d2;
+ const SwVfpRegister single_scratch = s0;
+ const Register scratch = r9;
+ const Register scratch2 = r7;
+
+ __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
+ __ ldr(base, MemOperand(sp, 1 * kPointerSize));
+ __ ldr(exponent, MemOperand(sp, 0 * kPointerSize));
+
+ // Convert base to double value and store it in d0.
+ __ JumpIfNotSmi(base, &base_not_smi);
+ // Base is a Smi. Untag and convert it.
+ __ SmiUntag(base);
+ __ vmov(single_scratch, base);
+ __ vcvt_f64_s32(double_base, single_scratch);
+ __ b(&convert_exponent);
+
+ __ bind(&base_not_smi);
+ __ ldr(scratch, FieldMemOperand(base, JSObject::kMapOffset));
+ __ cmp(scratch, heapnumbermap);
+ __ b(ne, &call_runtime);
+ // Base is a heapnumber. Load it into double register.
+ __ vldr(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
+
+ __ bind(&convert_exponent);
+ __ JumpIfNotSmi(exponent, &exponent_not_smi);
+ __ SmiUntag(exponent);
+
+ // The base is in a double register and the exponent is
+ // an untagged smi. Allocate a heap number and call a
+ // C function for integer exponents. The register containing
+ // the heap number is callee-saved.
+ __ AllocateHeapNumber(heapnumber,
+ scratch,
+ scratch2,
+ heapnumbermap,
+ &call_runtime);
+ __ push(lr);
+ __ PrepareCallCFunction(3, scratch);
+ __ mov(r2, exponent);
+ __ vmov(r0, r1, double_base);
+ __ CallCFunction(ExternalReference::power_double_int_function(), 3);
+ __ pop(lr);
+ __ GetCFunctionDoubleResult(double_result);
+ __ vstr(double_result,
+ FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
+ __ mov(r0, heapnumber);
+ __ Ret(2 * kPointerSize);
+
+ __ bind(&exponent_not_smi);
+ __ ldr(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
+ __ cmp(scratch, heapnumbermap);
+ __ b(ne, &call_runtime);
+ // Exponent is a heapnumber. Load it into double register.
+ __ vldr(double_exponent,
+ FieldMemOperand(exponent, HeapNumber::kValueOffset));
+
+ // The base and the exponent are in double registers.
+ // Allocate a heap number and call a C function for
+ // double exponents. The register containing
+ // the heap number is callee-saved.
+ __ AllocateHeapNumber(heapnumber,
+ scratch,
+ scratch2,
+ heapnumbermap,
+ &call_runtime);
+ __ push(lr);
+ __ PrepareCallCFunction(4, scratch);
+ __ vmov(r0, r1, double_base);
+ __ vmov(r2, r3, double_exponent);
+ __ CallCFunction(ExternalReference::power_double_double_function(), 4);
+ __ pop(lr);
+ __ GetCFunctionDoubleResult(double_result);
+ __ vstr(double_result,
+ FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
+ __ mov(r0, heapnumber);
+ __ Ret(2 * kPointerSize);
+ }
+
+ __ bind(&call_runtime);
+ __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
+}
+
+
+bool CEntryStub::NeedsImmovableCode() {
+ return true;
+}
+
+
void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
__ Throw(r0);
}
@@ -6727,158 +6832,6 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
}
-void GenerateFastPixelArrayLoad(MacroAssembler* masm,
- Register receiver,
- Register key,
- Register elements_map,
- Register elements,
- Register scratch1,
- Register scratch2,
- Register result,
- Label* not_pixel_array,
- Label* key_not_smi,
- Label* out_of_range) {
- // Register use:
- //
- // receiver - holds the receiver on entry.
- // Unchanged unless 'result' is the same register.
- //
- // key - holds the smi key on entry.
- // Unchanged unless 'result' is the same register.
- //
- // elements - set to be the receiver's elements on exit.
- //
- // elements_map - set to be the map of the receiver's elements
- // on exit.
- //
- // result - holds the result of the pixel array load on exit,
- // tagged as a smi if successful.
- //
- // Scratch registers:
- //
- // scratch1 - used a scratch register in map check, if map
- // check is successful, contains the length of the
- // pixel array, the pointer to external elements and
- // the untagged result.
- //
- // scratch2 - holds the untaged key.
-
- // Some callers already have verified that the key is a smi. key_not_smi is
- // set to NULL as a sentinel for that case. Otherwise, add an explicit check
- // to ensure the key is a smi must be added.
- if (key_not_smi != NULL) {
- __ JumpIfNotSmi(key, key_not_smi);
- } else {
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(key);
- }
- }
- __ SmiUntag(scratch2, key);
-
- // Verify that the receiver has pixel array elements.
- __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ CheckMap(elements, scratch1, Heap::kPixelArrayMapRootIndex,
- not_pixel_array, true);
-
- // Key must be in range of the pixel array.
- __ ldr(scratch1, FieldMemOperand(elements, PixelArray::kLengthOffset));
- __ cmp(scratch2, scratch1);
- __ b(hs, out_of_range); // unsigned check handles negative keys.
-
- // Perform the indexed load and tag the result as a smi.
- __ ldr(scratch1,
- FieldMemOperand(elements, PixelArray::kExternalPointerOffset));
- __ ldrb(scratch1, MemOperand(scratch1, scratch2));
- __ SmiTag(r0, scratch1);
- __ Ret();
-}
-
-
-void GenerateFastPixelArrayStore(MacroAssembler* masm,
- Register receiver,
- Register key,
- Register value,
- Register elements,
- Register elements_map,
- Register scratch1,
- Register scratch2,
- bool load_elements_from_receiver,
- bool load_elements_map_from_elements,
- Label* key_not_smi,
- Label* value_not_smi,
- Label* not_pixel_array,
- Label* out_of_range) {
- // Register use:
- // receiver - holds the receiver and is unchanged unless the
- // store succeeds.
- // key - holds the key (must be a smi) and is unchanged.
- // value - holds the value (must be a smi) and is unchanged.
- // elements - holds the element object of the receiver on entry if
- // load_elements_from_receiver is false, otherwise used
- // internally to store the pixel arrays elements and
- // external array pointer.
- // elements_map - holds the map of the element object if
- // load_elements_map_from_elements is false, otherwise
- // loaded with the element map.
- //
- Register external_pointer = elements;
- Register untagged_key = scratch1;
- Register untagged_value = scratch2;
-
- if (load_elements_from_receiver) {
- __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- }
-
- // By passing NULL as not_pixel_array, callers signal that they have already
- // verified that the receiver has pixel array elements.
- if (not_pixel_array != NULL) {
- if (load_elements_map_from_elements) {
- __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
- }
- __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex);
- __ cmp(elements_map, ip);
- __ b(ne, not_pixel_array);
- } else {
- if (FLAG_debug_code) {
- // Map check should have already made sure that elements is a pixel array.
- __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex);
- __ cmp(elements_map, ip);
- __ Assert(eq, "Elements isn't a pixel array");
- }
- }
-
- // Some callers already have verified that the key is a smi. key_not_smi is
- // set to NULL as a sentinel for that case. Otherwise, add an explicit check
- // to ensure the key is a smi must be added.
- if (key_not_smi != NULL) {
- __ JumpIfNotSmi(key, key_not_smi);
- } else {
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(key);
- }
- }
-
- __ SmiUntag(untagged_key, key);
-
- // Perform bounds check.
- __ ldr(scratch2, FieldMemOperand(elements, PixelArray::kLengthOffset));
- __ cmp(untagged_key, scratch2);
- __ b(hs, out_of_range); // unsigned check handles negative keys.
-
- __ JumpIfNotSmi(value, value_not_smi);
- __ SmiUntag(untagged_value, value);
-
- // Clamp the value to [0..255].
- __ Usat(untagged_value, 8, Operand(untagged_value));
- // Get the pointer to the external array. This clobbers elements.
- __ ldr(external_pointer,
- FieldMemOperand(elements, PixelArray::kExternalPointerOffset));
- __ strb(untagged_value, MemOperand(external_pointer, untagged_key));
- __ Ret();
-}
-
-
#undef __
} } // namespace v8::internal
View
56 src/arm/code-stubs-arm.h
@@ -588,6 +588,9 @@ class RegExpCEntryStub: public CodeStub {
private:
Major MajorKey() { return RegExpCEntry; }
int MinorKey() { return 0; }
+
+ bool NeedsImmovableCode() { return true; }
+
const char* GetName() { return "RegExpCEntryStub"; }
};
@@ -607,60 +610,13 @@ class DirectCEntryStub: public CodeStub {
private:
Major MajorKey() { return DirectCEntry; }
int MinorKey() { return 0; }
+
+ bool NeedsImmovableCode() { return true; }
+
const char* GetName() { return "DirectCEntryStub"; }
};
-// Generate code to load an element from a pixel array. The receiver is assumed
-// to not be a smi and to have elements, the caller must guarantee this
-// precondition. If key is not a smi, then the generated code branches to
-// key_not_smi. Callers can specify NULL for key_not_smi to signal that a smi
-// check has already been performed on key so that the smi check is not
-// generated. If key is not a valid index within the bounds of the pixel array,
-// the generated code jumps to out_of_range. receiver, key and elements are
-// unchanged throughout the generated code sequence.
-void GenerateFastPixelArrayLoad(MacroAssembler* masm,
- Register receiver,
- Register key,
- Register elements_map,
- Register elements,
- Register scratch1,
- Register scratch2,
- Register result,
- Label* not_pixel_array,
- Label* key_not_smi,
- Label* out_of_range);
-
-// Generate code to store an element into a pixel array, clamping values between
-// [0..255]. The receiver is assumed to not be a smi and to have elements, the
-// caller must guarantee this precondition. If key is not a smi, then the
-// generated code branches to key_not_smi. Callers can specify NULL for
-// key_not_smi to signal that a smi check has already been performed on key so
-// that the smi check is not generated. If value is not a smi, the generated
-// code will branch to value_not_smi. If the receiver doesn't have pixel array
-// elements, the generated code will branch to not_pixel_array, unless
-// not_pixel_array is NULL, in which case the caller must ensure that the
-// receiver has pixel array elements. If key is not a valid index within the
-// bounds of the pixel array, the generated code jumps to out_of_range. If
-// load_elements_from_receiver is true, then the elements of receiver is loaded
-// into elements, otherwise elements is assumed to already be the receiver's
-// elements. If load_elements_map_from_elements is true, elements_map is loaded
-// from elements, otherwise it is assumed to already contain the element map.
-void GenerateFastPixelArrayStore(MacroAssembler* masm,
- Register receiver,
- Register key,
- Register value,
- Register elements,
- Register elements_map,
- Register scratch1,
- Register scratch2,
- bool load_elements_from_receiver,
- bool load_elements_map_from_elements,
- Label* key_not_smi,
- Label* value_not_smi,
- Label* not_pixel_array,
- Label* out_of_range);
-
} } // namespace v8::internal
#endif // V8_ARM_CODE_STUBS_ARM_H_
View
14 src/arm/codegen-arm.cc
@@ -577,11 +577,13 @@ void CodeGenerator::LoadGlobalReceiver(Register scratch) {
ArgumentsAllocationMode CodeGenerator::ArgumentsMode() {
if (scope()->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
- ASSERT(scope()->arguments_shadow() != NULL);
+
+ // In strict mode there is no need for shadow arguments.
+ ASSERT(scope()->arguments_shadow() != NULL || scope()->is_strict_mode());
// We don't want to do lazy arguments allocation for functions that
// have heap-allocated contexts, because it interfers with the
// uninitialized const tracking in the context objects.
- return (scope()->num_heap_slots() > 0)
+ return (scope()->num_heap_slots() > 0 || scope()->is_strict_mode())
? EAGER_ARGUMENTS_ALLOCATION
: LAZY_ARGUMENTS_ALLOCATION;
}
@@ -615,7 +617,9 @@ void CodeGenerator::StoreArgumentsObject(bool initial) {
Variable* arguments = scope()->arguments();
Variable* shadow = scope()->arguments_shadow();
ASSERT(arguments != NULL && arguments->AsSlot() != NULL);
- ASSERT(shadow != NULL && shadow->AsSlot() != NULL);
+ ASSERT((shadow != NULL && shadow->AsSlot() != NULL) ||
+ scope()->is_strict_mode());
+
JumpTarget done;
if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
// We have to skip storing into the arguments slot if it has
@@ -629,7 +633,9 @@ void CodeGenerator::StoreArgumentsObject(bool initial) {
}
StoreToSlot(arguments->AsSlot(), NOT_CONST_INIT);
if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
- StoreToSlot(shadow->AsSlot(), NOT_CONST_INIT);
+ if (shadow != NULL) {
+ StoreToSlot(shadow->AsSlot(), NOT_CONST_INIT);
+ }
}
View
15 src/arm/full-codegen-arm.cc
@@ -212,11 +212,14 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// stack frame was an arguments adapter frame.
ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
__ CallStub(&stub);
- // Duplicate the value; move-to-slot operation might clobber registers.
- __ mov(r3, r0);
+
+ Variable* arguments_shadow = scope()->arguments_shadow();
+ if (arguments_shadow != NULL) {
+ // Duplicate the value; move-to-slot operation might clobber registers.
+ __ mov(r3, r0);
+ Move(arguments_shadow->AsSlot(), r3, r1, r2);
+ }
Move(arguments->AsSlot(), r0, r1, r2);
- Slot* dot_arguments_slot = scope()->arguments_shadow()->AsSlot();
- Move(dot_arguments_slot, r3, r1, r2);
}
if (FLAG_trace) {
@@ -875,6 +878,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Comment cmnt(masm_, "[ Case body");
CaseClause* clause = clauses->at(i);
__ bind(clause->body_target()->entry_label());
+ PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
VisitStatements(clause->statements());
}
@@ -2855,7 +2859,8 @@ void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
ASSERT(args->length() == 2);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
- __ CallRuntime(Runtime::kMath_pow, 2);
+ MathPowStub stub;
+ __ CallStub(&stub);
context()->Plug(r0);
}
View
46 src/arm/ic-arm.cc
@@ -1170,7 +1170,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// -- r1 : receiver
// -----------------------------------
Label slow, check_string, index_smi, index_string, property_array_property;
- Label check_pixel_array, probe_dictionary, check_number_dictionary;
+ Label probe_dictionary, check_number_dictionary;
Register key = r0;
Register receiver = r1;
@@ -1188,31 +1188,17 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// now in r2.
__ ldrb(r3, FieldMemOperand(r2, Map::kBitField2Offset));
__ tst(r3, Operand(1 << Map::kHasFastElements));
- __ b(eq, &check_pixel_array);
+ __ b(eq, &check_number_dictionary);
GenerateFastArrayLoad(
masm, receiver, key, r4, r3, r2, r0, NULL, &slow);
__ IncrementCounter(&Counters::keyed_load_generic_smi, 1, r2, r3);
__ Ret();
- // Check whether the elements is a pixel array.
- // r0: key
- // r1: receiver
- __ bind(&check_pixel_array);
-
- GenerateFastPixelArrayLoad(masm,
- r1,
- r0,
- r3,
- r4,
- r2,
- r5,
- r0,
- &check_number_dictionary,
- NULL,
- &slow);
-
__ bind(&check_number_dictionary);
+ __ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ ldr(r3, FieldMemOperand(r4, JSObject::kMapOffset));
+
// Check whether the elements is a number dictionary.
// r0: key
// r3: elements map
@@ -1428,7 +1414,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// -- r2 : receiver
// -- lr : return address
// -----------------------------------
- Label slow, fast, array, extra, check_pixel_array;
+ Label slow, fast, array, extra;
// Register usage.
Register value = r0;
@@ -1464,7 +1450,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
__ cmp(r4, ip);
- __ b(ne, &check_pixel_array);
+ __ b(ne, &slow);
// Check array bounds. Both the key and the length of FixedArray are smis.
__ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ cmp(key, Operand(ip));
@@ -1478,24 +1464,6 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// r2: receiver.
GenerateRuntimeSetProperty(masm, strict_mode);
- // Check whether the elements is a pixel array.
- // r4: elements map.
- __ bind(&check_pixel_array);
- GenerateFastPixelArrayStore(masm,
- r2,
- r1,
- r0,
- elements,
- r4,
- r5,
- r6,
- false,
- false,
- NULL,
- &slow,
- &slow,
- &slow);
-
// Extra capacity case: Check if there is extra capacity to
// perform the store and update the length. Used for adding one
// element to the array by writing to array[array.length].
View
25 src/arm/lithium-arm.cc
@@ -1230,8 +1230,7 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
case kMathRound:
return AssignEnvironment(DefineAsRegister(result));
case kMathPowHalf:
- Abort("MathPowHalf LUnaryMathOperation not implemented");
- return NULL;
+ return DefineSameAsFirst(result);
default:
UNREACHABLE();
return NULL;
@@ -1548,9 +1547,10 @@ LInstruction* LChunkBuilder::DoJSArrayLength(HJSArrayLength* instr) {
}
-LInstruction* LChunkBuilder::DoPixelArrayLength(HPixelArrayLength* instr) {
+LInstruction* LChunkBuilder::DoExternalArrayLength(
+ HExternalArrayLength* instr) {
LOperand* array = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LPixelArrayLength(array));
+ return DefineAsRegister(new LExternalArrayLength(array));
}
@@ -1774,10 +1774,10 @@ LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
}
-LInstruction* LChunkBuilder::DoLoadPixelArrayExternalPointer(
- HLoadPixelArrayExternalPointer* instr) {
+LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
+ HLoadExternalArrayPointer* instr) {
LOperand* input = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LLoadPixelArrayExternalPointer(input));
+ return DefineAsRegister(new LLoadExternalArrayPointer(input));
}
@@ -1836,8 +1836,15 @@ LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
LInstruction* LChunkBuilder::DoStorePixelArrayElement(
HStorePixelArrayElement* instr) {
- Abort("DoStorePixelArrayElement not implemented");
- return NULL;
+ ASSERT(instr->value()->representation().IsInteger32());
+ ASSERT(instr->external_pointer()->representation().IsExternal());
+ ASSERT(instr->key()->representation().IsInteger32());
+
+ LOperand* external_pointer = UseRegister(instr->external_pointer());
+ LOperand* value = UseTempRegister(instr->value()); // changed by clamp.
+ LOperand* key = UseRegister(instr->key());
+
+ return new LStorePixelArrayElement(external_pointer, key, value);
}
View
40 src/arm/lithium-arm.h
@@ -89,6 +89,7 @@ class LCodeGen;
V(Deoptimize) \
V(DivI) \
V(DoubleToI) \
+ V(ExternalArrayLength) \
V(FixedArrayLength) \
V(FunctionLiteral) \
V(Gap) \
@@ -115,14 +116,14 @@ class LCodeGen;
V(LazyBailout) \
V(LoadContextSlot) \
V(LoadElements) \
+ V(LoadExternalArrayPointer) \
V(LoadFunctionPrototype) \
V(LoadGlobal) \
V(LoadKeyedFastElement) \
V(LoadKeyedGeneric) \
V(LoadNamedField) \
V(LoadNamedGeneric) \
V(LoadPixelArrayElement) \
- V(LoadPixelArrayExternalPointer) \
V(ModI) \
V(MulI) \
V(NumberTagD) \
@@ -132,7 +133,6 @@ class LCodeGen;
V(OsrEntry) \
V(OuterContext) \
V(Parameter) \
- V(PixelArrayLength) \
V(Power) \
V(PushArgument) \
V(RegExpLiteral) \
@@ -147,6 +147,7 @@ class LCodeGen;
V(StoreKeyedGeneric) \
V(StoreNamedField) \
V(StoreNamedGeneric) \
+ V(StorePixelArrayElement) \
V(StringCharCodeAt) \
V(StringLength) \
V(SubI) \
@@ -991,14 +992,14 @@ class LJSArrayLength: public LTemplateInstruction<1, 1, 0> {
};
-class LPixelArrayLength: public LTemplateInstruction<1, 1, 0> {
+class LExternalArrayLength: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LPixelArrayLength(LOperand* value) {
+ explicit LExternalArrayLength(LOperand* value) {
inputs_[0] = value;
}
- DECLARE_CONCRETE_INSTRUCTION(PixelArrayLength, "pixel-array-length")
- DECLARE_HYDROGEN_ACCESSOR(PixelArrayLength)
+ DECLARE_CONCRETE_INSTRUCTION(ExternalArrayLength, "external-array-length")
+ DECLARE_HYDROGEN_ACCESSOR(ExternalArrayLength)
};
@@ -1163,14 +1164,14 @@ class LLoadElements: public LTemplateInstruction<1, 1, 0> {
};
-class LLoadPixelArrayExternalPointer: public LTemplateInstruction<1, 1, 0> {
+class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LLoadPixelArrayExternalPointer(LOperand* object) {
+ explicit LLoadExternalArrayPointer(LOperand* object) {
inputs_[0] = object;
}
- DECLARE_CONCRETE_INSTRUCTION(LoadPixelArrayExternalPointer,
- "load-pixel-array-external-pointer")
+ DECLARE_CONCRETE_INSTRUCTION(LoadExternalArrayPointer,
+ "load-external-array-pointer")
};
@@ -1591,6 +1592,7 @@ class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
virtual void PrintDataTo(StringStream* stream);
@@ -1599,6 +1601,24 @@ class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
LOperand* value() { return inputs_[2]; }
};
+class LStorePixelArrayElement: public LTemplateInstruction<0, 3, 0> {
+ public:
+ LStorePixelArrayElement(LOperand* external_pointer,
+ LOperand* key,
+ LOperand* val) {
+ inputs_[0] = external_pointer;
+ inputs_[1] = key;
+ inputs_[2] = val;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StorePixelArrayElement,
+ "store-pixel-array-element")
+ DECLARE_HYDROGEN_ACCESSOR(StorePixelArrayElement)
+
+ LOperand* external_pointer() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+};
class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
public:
View
51 src/arm/lithium-codegen-arm.cc
@@ -745,10 +745,6 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
- case CodeStub::MathPow: {
- Abort("MathPowStub unimplemented.");
- break;
- }
case CodeStub::NumberToString: {
NumberToStringStub stub;
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
@@ -1137,10 +1133,10 @@ void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
}
-void LCodeGen::DoPixelArrayLength(LPixelArrayLength* instr) {
+void LCodeGen::DoExternalArrayLength(LExternalArrayLength* instr) {
Register result = ToRegister(instr->result());
Register array = ToRegister(instr->InputAt(0));
- __ ldr(result, FieldMemOperand(array, PixelArray::kLengthOffset));
+ __ ldr(result, FieldMemOperand(array, ExternalArray::kLengthOffset));
}
@@ -2207,7 +2203,7 @@ void LCodeGen::DoLoadElements(LLoadElements* instr) {
__ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
__ cmp(scratch, ip);
__ b(eq, &done);
- __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex);
+ __ LoadRoot(ip, Heap::kExternalPixelArrayMapRootIndex);
__ cmp(scratch, ip);
__ b(eq, &done);
__ LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
@@ -2218,11 +2214,12 @@ void LCodeGen::DoLoadElements(LLoadElements* instr) {
}
-void LCodeGen::DoLoadPixelArrayExternalPointer(
- LLoadPixelArrayExternalPointer* instr) {
+void LCodeGen::DoLoadExternalArrayPointer(
+ LLoadExternalArrayPointer* instr) {
Register to_reg = ToRegister(instr->result());
Register from_reg = ToRegister(instr->InputAt(0));
- __ ldr(to_reg, FieldMemOperand(from_reg, PixelArray::kExternalPointerOffset));
+ __ ldr(to_reg, FieldMemOperand(from_reg,
+ ExternalArray::kExternalPointerOffset));
}
@@ -2263,12 +2260,12 @@ void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
void LCodeGen::DoLoadPixelArrayElement(LLoadPixelArrayElement* instr) {
- Register external_elements = ToRegister(instr->external_pointer());
+ Register external_pointer = ToRegister(instr->external_pointer());
Register key = ToRegister(instr->key());
Register result = ToRegister(instr->result());
// Load the result.
- __ ldrb(result, MemOperand(external_elements, key));
+ __ ldrb(result, MemOperand(external_pointer, key));
}
@@ -2646,6 +2643,22 @@ void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
}
+void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
+ Register scratch = scratch0();
+ SwVfpRegister single_scratch = double_scratch0().low();
+ DoubleRegister double_scratch = double_scratch0();
+ ASSERT(ToDoubleRegister(instr->result()).is(input));
+
+ // Add +0 to convert -0 to +0.
+ __ mov(scratch, Operand(0));
+ __ vmov(single_scratch, scratch);
+ __ vcvt_f64_s32(double_scratch, single_scratch);
+ __ vadd(input, input, double_scratch);
+ __ vsqrt(input, input);
+}
+
+
void LCodeGen::DoPower(LPower* instr) {
LOperand* left = instr->InputAt(0);
LOperand* right = instr->InputAt(1);
@@ -2742,6 +2755,9 @@ void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
case kMathSqrt:
DoMathSqrt(instr);
break;
+ case kMathPowHalf:
+ DoMathPowHalf(instr);
+ break;
case kMathCos:
DoMathCos(instr);
break;
@@ -2901,6 +2917,17 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
}
+void LCodeGen::DoStorePixelArrayElement(LStorePixelArrayElement* instr) {
+ Register external_pointer = ToRegister(instr->external_pointer());
+ Register key = ToRegister(instr->key());
+ Register value = ToRegister(instr->value());
+
+ // Clamp the value to [0..255].
+ __ Usat(value, 8, Operand(value));
+ __ strb(value, MemOperand(external_pointer, key, LSL, 0));
+}
+
+
void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
ASSERT(ToRegister(instr->object()).is(r2));
ASSERT(ToRegister(instr->key()).is(r1));
View
1 src/arm/lithium-codegen-arm.h
@@ -210,6 +210,7 @@ class LCodeGen BASE_EMBEDDED {
void DoMathFloor(LUnaryMathOperation* instr);
void DoMathRound(LUnaryMathOperation* instr);
void DoMathSqrt(LUnaryMathOperation* instr);
+ void DoMathPowHalf(LUnaryMathOperation* instr);
void DoMathLog(LUnaryMathOperation* instr);
void DoMathCos(LUnaryMathOperation* instr);
void DoMathSin(LUnaryMathOperation* instr);
View
502 src/arm/stub-cache-arm.cc
@@ -3143,38 +3143,6 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadSpecialized(JSObject* receiver) {
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadPixelArray(JSObject* receiver) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label miss;
-
- // Check that the map matches.
- __ CheckMap(r1, r2, Handle<Map>(receiver->map()), &miss, false);
-
- GenerateFastPixelArrayLoad(masm(),
- r1,
- r0,
- r2,
- r3,
- r4,
- r5,
- r0,
- &miss,
- &miss,
- &miss);
-
- __ bind(&miss);
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Miss));
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(NORMAL, NULL);
-}
-
-
MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
int index,
Map* transition,
@@ -3283,47 +3251,6 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreSpecialized(
}
-MaybeObject* KeyedStoreStubCompiler::CompileStorePixelArray(
- JSObject* receiver) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- r3 : scratch
- // -- r4 : scratch
- // -- r5 : scratch
- // -- r6 : scratch
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- // Check that the map matches.
- __ CheckMap(r2, r6, Handle<Map>(receiver->map()), &miss, false);
-
- GenerateFastPixelArrayStore(masm(),
- r2,
- r1,
- r0,
- r3,
- r4,
- r5,
- r6,
- true,
- true,
- &miss,
- &miss,
- NULL,
- &miss);
-
- __ bind(&miss);
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss));
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(NORMAL, NULL);
-}
-
-
MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
// ----------- S t a t e -------------
// -- r0 : argc
@@ -3488,7 +3415,9 @@ static bool IsElementTypeSigned(ExternalArrayType array_type) {
MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
- ExternalArrayType array_type, Code::Flags flags) {
+ JSObject* receiver_object,
+ ExternalArrayType array_type,
+ Code::Flags flags) {
// ---------- S t a t e --------------
// -- lr : return address
// -- r0 : key
@@ -3505,32 +3434,20 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
// Check that the key is a smi.
__ JumpIfNotSmi(key, &slow);
- // Check that the object is a JS object. Load map into r2.
- __ CompareObjectType(receiver, r2, r3, FIRST_JS_OBJECT_TYPE);
- __ b(lt, &slow);
-
- // Check that the receiver does not require access checks. We need
- // to check this explicitly since this generic stub does not perform
- // map checks.
- __ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
- __ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
+ // Make sure that we've got the right map.
+ __ ldr(r2, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ cmp(r2, Operand(Handle<Map>(receiver_object->map())));
__ b(ne, &slow);
- // Check that the elements array is the appropriate type of
- // ExternalArray.
__ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type));
- __ cmp(r2, ip);
- __ b(ne, &slow);
+ // r3: elements array
// Check that the index is in range.
__ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
__ cmp(ip, Operand(key, ASR, kSmiTagSize));
// Unsigned comparison catches both negative and too-large values.
__ b(lo, &slow);
- // r3: elements array
__ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
// r3: base pointer of external storage
@@ -3543,6 +3460,7 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
case kExternalByteArray:
__ ldrsb(value, MemOperand(r3, key, LSR, 1));
break;
+ case kExternalPixelArray:
case kExternalUnsignedByteArray:
__ ldrb(value, MemOperand(r3, key, LSR, 1));
break;
@@ -3768,7 +3686,9 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
- ExternalArrayType array_type, Code::Flags flags) {
+ JSObject* receiver_object,
+ ExternalArrayType array_type,
+ Code::Flags flags) {
// ---------- S t a t e --------------
// -- r0 : value
// -- r1 : key
@@ -3786,28 +3706,18 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
// Check that the object isn't a smi.
__ JumpIfSmi(receiver, &slow);
- // Check that the object is a JS object. Load map into r3.
- __ CompareObjectType(receiver, r3, r4, FIRST_JS_OBJECT_TYPE);
- __ b(le, &slow);
-
- // Check that the receiver does not require access checks. We need
- // to do this because this generic stub does not perform map checks.
- __ ldrb(ip, FieldMemOperand(r3, Map::kBitFieldOffset));
- __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded));
+ // Make sure that we've got the right map.
+ __ ldr(r3, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ cmp(r3, Operand(Handle<Map>(receiver_object->map())));
__ b(ne, &slow);
+ __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
+
// Check that the key is a smi.
__ JumpIfNotSmi(key, &slow);
- // Check that the elements array is the appropriate type of ExternalArray.
- __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type));
- __ cmp(r4, ip);
- __ b(ne, &slow);
-
- // Check that the index is in range.
- __ mov(r4, Operand(key, ASR, kSmiTagSize)); // Untag the index.
+ // Check that the index is in range
+ __ SmiUntag(r4, key);
__ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
__ cmp(r4, ip);
// Unsigned comparison catches both negative and too-large values.
@@ -3817,14 +3727,24 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
// runtime for all other kinds of values.
// r3: external array.
// r4: key (integer).
- __ JumpIfNotSmi(value, &check_heap_number);
- __ mov(r5, Operand(value, ASR, kSmiTagSize)); // Untag the value.
+ if (array_type == kExternalPixelArray) {
+ // Double to pixel conversion is only implemented in the runtime for now.
+ __ JumpIfNotSmi(value, &slow);
+ } else {
+ __ JumpIfNotSmi(value, &check_heap_number);
+ }
+ __ SmiUntag(r5, value);
__ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
// r3: base pointer of external storage.
// r4: key (integer).
// r5: value (integer).
switch (array_type) {
+ case kExternalPixelArray:
+ // Clamp the value to [0..255].
+ __ Usat(r5, 8, Operand(r5));
+ __ strb(r5, MemOperand(r3, r4, LSL, 0));
+ break;
case kExternalByteArray:
case kExternalUnsignedByteArray:
__ strb(r5, MemOperand(r3, r4, LSL, 0));
@@ -3849,198 +3769,200 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
// Entry registers are intact, r0 holds the value which is the return value.
__ Ret();
+ if (array_type != kExternalPixelArray) {
+ // r3: external array.
+ // r4: index (integer).
+ __ bind(&check_heap_number);
+ __ CompareObjectType(value, r5, r6, HEAP_NUMBER_TYPE);
+ __ b(ne, &slow);
- // r3: external array.
- // r4: index (integer).
- __ bind(&check_heap_number);
- __ CompareObjectType(value, r5, r6, HEAP_NUMBER_TYPE);
- __ b(ne, &slow);
+ __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
- __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
-
- // r3: base pointer of external storage.
- // r4: key (integer).
+ // r3: base pointer of external storage.
+ // r4: key (integer).
- // The WebGL specification leaves the behavior of storing NaN and
- // +/-Infinity into integer arrays basically undefined. For more
- // reproducible behavior, convert these to zero.
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
+ // The WebGL specification leaves the behavior of storing NaN and
+ // +/-Infinity into integer arrays basically undefined. For more
+ // reproducible behavior, convert these to zero.
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
- if (array_type == kExternalFloatArray) {
- // vldr requires offset to be a multiple of 4 so we can not
- // include -kHeapObjectTag into it.
- __ sub(r5, r0, Operand(kHeapObjectTag));
- __ vldr(d0, r5, HeapNumber::kValueOffset);
- __ add(r5, r3, Operand(r4, LSL, 2));
- __ vcvt_f32_f64(s0, d0);
- __ vstr(s0, r5, 0);
- } else {
- // Need to perform float-to-int conversion.
- // Test for NaN or infinity (both give zero).
- __ ldr(r6, FieldMemOperand(value, HeapNumber::kExponentOffset));
-
- // Hoisted load. vldr requires offset to be a multiple of 4 so we can not
- // include -kHeapObjectTag into it.
- __ sub(r5, value, Operand(kHeapObjectTag));
- __ vldr(d0, r5, HeapNumber::kValueOffset);
-
- __ Sbfx(r6, r6, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
- // NaNs and Infinities have all-one exponents so they sign extend to -1.
- __ cmp(r6, Operand(-1));
- __ mov(r5, Operand(0), LeaveCC, eq);
-
- // Not infinity or NaN simply convert to int.
- if (IsElementTypeSigned(array_type)) {
- __ vcvt_s32_f64(s0, d0, kDefaultRoundToZero, ne);
+ if (array_type == kExternalFloatArray) {
+ // vldr requires offset to be a multiple of 4 so we can not
+ // include -kHeapObjectTag into it.
+ __ sub(r5, r0, Operand(kHeapObjectTag));
+ __ vldr(d0, r5, HeapNumber::kValueOffset);
+ __ add(r5, r3, Operand(r4, LSL, 2));
+ __ vcvt_f32_f64(s0, d0);
+ __ vstr(s0, r5, 0);
} else {
- __ vcvt_u32_f64(s0, d0, kDefaultRoundToZero, ne);
- }
- __ vmov(r5, s0, ne);
-
- switch (array_type) {
- case kExternalByteArray:
- case kExternalUnsignedByteArray:
- __ strb(r5, MemOperand(r3, r4, LSL, 0));
- break;
- case kExternalShortArray:
- case kExternalUnsignedShortArray:
- __ strh(r5, MemOperand(r3, r4, LSL, 1));
- break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray:
- __ str(r5, MemOperand(r3, r4, LSL, 2));
- break;
- default:
- UNREACHABLE();
- break;
+ // Need to perform float-to-int conversion.
+ // Test for NaN or infinity (both give zero).
+ __ ldr(r6, FieldMemOperand(value, HeapNumber::kExponentOffset));
+
+ // Hoisted load. vldr requires offset to be a multiple of 4 so we can
+ // not include -kHeapObjectTag into it.
+ __ sub(r5, value, Operand(kHeapObjectTag));
+ __ vldr(d0, r5, HeapNumber::kValueOffset);
+
+ __ Sbfx(r6, r6, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
+ // NaNs and Infinities have all-one exponents so they sign extend to -1.
+ __ cmp(r6, Operand(-1));
+ __ mov(r5, Operand(0), LeaveCC, eq);
+
+ // Not infinity or NaN simply convert to int.
+ if (IsElementTypeSigned(array_type)) {
+ __ vcvt_s32_f64(s0, d0, kDefaultRoundToZero, ne);
+ } else {
+ __ vcvt_u32_f64(s0, d0, kDefaultRoundToZero, ne);
+ }
+ __ vmov(r5, s0, ne);
+
+ switch (array_type) {
+ case kExternalByteArray:
+ case kExternalUnsignedByteArray:
+ __ strb(r5, MemOperand(r3, r4, LSL, 0));
+ break;
+ case kExternalShortArray:
+ case kExternalUnsignedShortArray:
+ __ strh(r5, MemOperand(r3, r4, LSL, 1));
+ break;
+ case kExternalIntArray:
+ case kExternalUnsignedIntArray:
+ __ str(r5, MemOperand(r3, r4, LSL, 2));
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
}
- }
-
- // Entry registers are intact, r0 holds the value which is the return value.
- __ Ret();
- } else {
- // VFP3 is not available do manual conversions.
- __ ldr(r5, FieldMemOperand(value, HeapNumber::kExponentOffset));
- __ ldr(r6, FieldMemOperand(value, HeapNumber::kMantissaOffset));
-
- if (array_type == kExternalFloatArray) {
- Label done, nan_or_infinity_or_zero;
- static const int kMantissaInHiWordShift =
- kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
- static const int kMantissaInLoWordShift =
- kBitsPerInt - kMantissaInHiWordShift;
-
- // Test for all special exponent values: zeros, subnormal numbers, NaNs
- // and infinities. All these should be converted to 0.
- __ mov(r7, Operand(HeapNumber::kExponentMask));
- __ and_(r9, r5, Operand(r7), SetCC);
- __ b(eq, &nan_or_infinity_or_zero);
-
- __ teq(r9, Operand(r7));
- __ mov(r9, Operand(kBinary32ExponentMask), LeaveCC, eq);
- __ b(eq, &nan_or_infinity_or_zero);
-
- // Rebias exponent.
- __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
- __ add(r9,
- r9,
- Operand(kBinary32ExponentBias - HeapNumber::kExponentBias));
-
- __ cmp(r9, Operand(kBinary32MaxExponent));
- __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, gt);
- __ orr(r5, r5, Operand(kBinary32ExponentMask), LeaveCC, gt);
- __ b(gt, &done);
-
- __ cmp(r9, Operand(kBinary32MinExponent));
- __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, lt);
- __ b(lt, &done);
-
- __ and_(r7, r5, Operand(HeapNumber::kSignMask));
- __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
- __ orr(r7, r7, Operand(r5, LSL, kMantissaInHiWordShift));
- __ orr(r7, r7, Operand(r6, LSR, kMantissaInLoWordShift));
- __ orr(r5, r7, Operand(r9, LSL, kBinary32ExponentShift));
-
- __ bind(&done);
- __ str(r5, MemOperand(r3, r4, LSL, 2));
// Entry registers are intact, r0 holds the value which is the return
// value.
__ Ret();
-
- __ bind(&nan_or_infinity_or_zero);
- __ and_(r7, r5, Operand(HeapNumber::kSignMask));
- __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
- __ orr(r9, r9, r7);
- __ orr(r9, r9, Operand(r5, LSL, kMantissaInHiWordShift));
- __ orr(r5, r9, Operand(r6, LSR, kMantissaInLoWordShift));
- __ b(&done);
} else {
- bool is_signed_type = IsElementTypeSigned(array_type);
- int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt;
- int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000;
-
- Label done, sign;
-
- // Test for all special exponent values: zeros, subnormal numbers, NaNs
- // and infinities. All these should be converted to 0.
- __ mov(r7, Operand(HeapNumber::kExponentMask));
- __ and_(r9, r5, Operand(r7), SetCC);
- __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq);
- __ b(eq, &done);
-
- __ teq(r9, Operand(r7));
- __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq);
- __ b(eq, &done);
-
- // Unbias exponent.
- __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
- __ sub(r9, r9, Operand(HeapNumber::kExponentBias), SetCC);
- // If exponent is negative then result is 0.
- __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, mi);
- __ b(mi, &done);
-
- // If exponent is too big then result is minimal value.
- __ cmp(r9, Operand(meaningfull_bits - 1));
- __ mov(r5, Operand(min_value), LeaveCC, ge);
- __ b(ge, &done);
-
- __ and_(r7, r5, Operand(HeapNumber::kSignMask), SetCC);
- __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
- __ orr(r5, r5, Operand(1u << HeapNumber::kMantissaBitsInTopWord));
-
- __ rsb(r9, r9, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
- __ mov(r5, Operand(r5, LSR, r9), LeaveCC, pl);
- __ b(pl, &sign);
-
- __ rsb(r9, r9, Operand(0, RelocInfo::NONE));
- __ mov(r5, Operand(r5, LSL, r9));
- __ rsb(r9, r9, Operand(meaningfull_bits));
- __ orr(r5, r5, Operand(r6, LSR, r9));
-
- __ bind(&sign);
- __ teq(r7, Operand(0, RelocInfo::NONE));
- __ rsb(r5, r5, Operand(0, RelocInfo::NONE), LeaveCC, ne);
-
- __ bind(&done);
- switch (array_type) {
- case kExternalByteArray:
- case kExternalUnsignedByteArray:
- __ strb(r5, MemOperand(r3, r4, LSL, 0));
- break;
- case kExternalShortArray:
- case kExternalUnsignedShortArray:
- __ strh(r5, MemOperand(r3, r4, LSL, 1));
- break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray:
- __ str(r5, MemOperand(r3, r4, LSL, 2));
- break;
- default:
- UNREACHABLE();
- break;
+ // VFP3 is not available do manual conversions.
+ __ ldr(r5, FieldMemOperand(value, HeapNumber::kExponentOffset));
+ __ ldr(r6, FieldMemOperand(value, HeapNumber::kMantissaOffset));
+
+ if (array_type == kExternalFloatArray) {
+ Label done, nan_or_infinity_or_zero;
+ static const int kMantissaInHiWordShift =
+ kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
+
+ static const int kMantissaInLoWordShift =
+ kBitsPerInt - kMantissaInHiWordShift;
+
+ // Test for all special exponent values: zeros, subnormal numbers, NaNs
+ // and infinities. All these should be converted to 0.
+ __ mov(r7, Operand(HeapNumber::kExponentMask));
+ __ and_(r9, r5, Operand(r7), SetCC);
+ __ b(eq, &nan_or_infinity_or_zero);
+
+ __ teq(r9, Operand(r7));
+ __ mov(r9, Operand(kBinary32ExponentMask), LeaveCC, eq);
+ __ b(eq, &nan_or_infinity_or_zero);
+
+ // Rebias exponent.
+ __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
+ __ add(r9,
+ r9,
+ Operand(kBinary32ExponentBias - HeapNumber::kExponentBias));
+
+ __ cmp(r9, Operand(kBinary32MaxExponent));
+ __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, gt);
+ __ orr(r5, r5, Operand(kBinary32ExponentMask), LeaveCC, gt);
+ __ b(gt, &done);
+
+ __ cmp(r9, Operand(kBinary32MinExponent));
+ __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, lt);
+ __ b(lt, &done);
+
+ __ and_(r7, r5, Operand(HeapNumber::kSignMask));
+ __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
+ __ orr(r7, r7, Operand(r5, LSL, kMantissaInHiWordShift));
+ __ orr(r7, r7, Operand(r6, LSR, kMantissaInLoWordShift));
+ __ orr(r5, r7, Operand(r9, LSL, kBinary32ExponentShift));
+
+ __ bind(&done);
+ __ str(r5, MemOperand(r3, r4, LSL, 2));
+ // Entry registers are intact, r0 holds the value which is the return
+ // value.
+ __ Ret();
+
+ __ bind(&nan_or_infinity_or_zero);
+ __ and_(r7, r5, Operand(HeapNumber::kSignMask));
+ __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
+ __ orr(r9, r9, r7);
+ __ orr(r9, r9, Operand(r5, LSL, kMantissaInHiWordShift));
+ __ orr(r5, r9, Operand(r6, LSR, kMantissaInLoWordShift));
+ __ b(&done);
+ } else {
+ bool is_signed_type = IsElementTypeSigned(array_type);
+ int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt;
+ int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000;
+
+ Label done, sign;
+
+ // Test for all special exponent values: zeros, subnormal numbers, NaNs
+ // and infinities. All these should be converted to 0.
+ __ mov(r7, Operand(HeapNumber::kExponentMask));
+ __ and_(r9, r5, Operand(r7), SetCC);
+ __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq);
+ __ b(eq, &done);
+
+ __ teq(r9, Operand(r7));
+ __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq);
+ __ b(eq, &done);
+
+ // Unbias exponent.
+ __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
+ __ sub(r9, r9, Operand(HeapNumber::kExponentBias), SetCC);
+ // If exponent is negative then result is 0.
+ __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, mi);
+ __ b(mi, &done);
+
+ // If exponent is too big then result is minimal value.
+ __ cmp(r9, Operand(meaningfull_bits - 1));
+ __ mov(r5, Operand(min_value), LeaveCC, ge);
+ __ b(ge, &done);
+
+ __ and_(r7, r5, Operand(HeapNumber::kSignMask), SetCC);
+ __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
+ __ orr(r5, r5, Operand(1u << HeapNumber::kMantissaBitsInTopWord));
+
+ __ rsb(r9, r9, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
+ __ mov(r5, Operand(r5, LSR, r9), LeaveCC, pl);
+ __ b(pl, &sign);
+
+ __ rsb(r9, r9, Operand(0, RelocInfo::NONE));
+ __ mov(r5, Operand(r5, LSL, r9));
+ __ rsb(r9, r9, Operand(meaningfull_bits));
+ __ orr(r5, r5, Operand(r6, LSR, r9));
+
+ __ bind(&sign);
+ __ teq(r7, Operand(0, RelocInfo::NONE));
+ __ rsb(r5, r5, Operand(0, RelocInfo::NONE), LeaveCC, ne);
+
+ __ bind(&done);
+ switch (array_type) {
+ case kExternalByteArray:
+ case kExternalUnsignedByteArray:
+ __ strb(r5, MemOperand(r3, r4, LSL, 0));
+ break;
+ case kExternalShortArray:
+ case kExternalUnsignedShortArray:
+ __ strh(r5, MemOperand(r3, r4, LSL, 1));
+ break;
+ case kExternalIntArray:
+ case kExternalUnsignedIntArray:
+ __ str(r5, MemOperand(r3, r4, LSL, 2));
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
}
}
}
View
5 src/ast-inl.h
@@ -102,6 +102,11 @@ ForInStatement::ForInStatement(ZoneStringList* labels)
}
+bool FunctionLiteral::strict_mode() const {
+ return scope()->is_strict_mode();
+}
+
+
} } // namespace v8::internal
#endif // V8_AST_INL_H_
View
10 src/ast.cc
@@ -542,6 +542,9 @@ void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
}
} else if (is_monomorphic_) {
monomorphic_receiver_type_ = oracle->LoadMonomorphicReceiverType(this);
+ if (monomorphic_receiver_type_->has_external_array_elements()) {
+ SetExternalArrayType(oracle->GetKeyedLoadExternalArrayType(this));
+ }
}
}
@@ -559,6 +562,9 @@ void Assignment::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
} else if (is_monomorphic_) {
// Record receiver type for monomorphic keyed loads.
monomorphic_receiver_type_ = oracle->StoreMonomorphicReceiverType(this);
+ if (monomorphic_receiver_type_->has_external_array_elements()) {
+ SetExternalArrayType(oracle->GetKeyedStoreExternalArrayType(this));
+ }
}
}
@@ -1062,6 +1068,8 @@ CaseClause::CaseClause(Expression* label,
: label_(label),
statements_(statements),
position_(pos),
- compare_type_(NONE) {}
+ compare_type_(NONE),
+ entry_id_(AstNode::GetNextId()) {
+}
} } // namespace v8::internal
View
22 src/ast.h
@@ -175,6 +175,8 @@ class AstNode: public ZoneObject {
static unsigned current_id_;
static unsigned count_;
unsigned id_;
+
+ friend class CaseClause; // Generates AST IDs.
};
@@ -694,6 +696,8 @@ class CaseClause: public ZoneObject {
int position() { return position_; }
void set_position(int pos) { position_ = pos; }
+ int EntryId() { return entry_id_; }
+
// Type feedback information.
void RecordTypeFeedback(TypeFeedbackOracle* oracle);
bool IsSmiCompare() { return compare_type_ == SMI_ONLY; }
@@ -706,6 +710,7 @@ class CaseClause: public ZoneObject {
int position_;
enum CompareTypeFeedback { NONE, SMI_ONLY, OBJECT_ONLY };
CompareTypeFeedback compare_type_;
+ int entry_id_;
};
@@ -1232,6 +1237,11 @@ class Property: public Expression {
}
bool is_arguments_access() const { return is_arguments_access_; }
+ ExternalArrayType GetExternalArrayType() const { return array_type_; }
+ void SetExternalArrayType(ExternalArrayType array_type) {
+ array_type_ = array_type;
+ }
+
// Type feedback information.
void RecordTypeFeedback(TypeFeedbackOracle* oracle);
virtual bool IsMonomorphic() { return is_monomorphic_; }
@@ -1258,6 +1268,7 @@ class Property: public Expression {
bool is_function_prototype_ : 1;
bool is_arguments_access_ : 1;
Handle<Map> monomorphic_receiver_type_;
+ ExternalArrayType array_type_;
// Dummy property used during preparsing.
static Property this_property_;
@@ -1621,6 +1632,10 @@ class Assignment: public Expression {
virtual Handle<Map> GetMonomorphicReceiverType() {
return monomorphic_receiver_type_;
}
+ ExternalArrayType GetExternalArrayType() const { return array_type_; }
+ void SetExternalArrayType(ExternalArrayType array_type) {
+ array_type_ = array_type;
+ }
// Bailout support.
int CompoundLoadId() const { return compound_load_id_; }
@@ -1641,6 +1656,7 @@ class Assignment: public Expression {
bool is_monomorphic_;
ZoneMapList* receiver_types_;
Handle<Map> monomorphic_receiver_type_;
+ ExternalArrayType array_type_;
};
@@ -1673,8 +1689,7 @@ class FunctionLiteral: public Expression {
int start_position,
int end_position,
bool is_expression,
- bool contains_loops,
- bool strict_mode)
+ bool contains_loops)
: name_(name),
scope_(scope),
body_(body),
@@ -1688,7 +1703,6 @@ class FunctionLiteral: public Expression {
end_position_(end_position),
is_expression_(is_expression),
contains_loops_(contains_loops),
- strict_mode_(strict_mode),
function_token_position_(RelocInfo::kNoPosition),
inferred_name_(Heap::empty_string()),
try_full_codegen_(false),
@@ -1705,7 +1719,7 @@ class FunctionLiteral: public Expression {
int end_position() const { return end_position_; }
bool is_expression() const { return is_expression_; }
bool contains_loops() const { return contains_loops_; }
- bool strict_mode() const { return strict_mode_; }
+ bool strict_mode() const;
int materialized_literal_count() { return materialized_literal_count_; }
int expected_property_count() { return expected_property_count_; }
View
4 src/code-stubs.cc
@@ -101,7 +101,8 @@ Handle<Code> CodeStub::GetCode() {
static_cast<Code::Kind>(GetCodeKind()),
InLoop(),
GetICState());
- Handle<Code> new_object = Factory::NewCode(desc, flags, masm.CodeObject());
+ Handle<Code> new_object = Factory::NewCode(
+ desc, flags, masm.CodeObject(), NeedsImmovableCode());
RecordCodeGeneration(*new_object, &masm);
FinishCode(*new_object);
@@ -116,6 +117,7 @@ Handle<Code> CodeStub::GetCode() {
code = *new_object;
}
+ ASSERT(!NeedsImmovableCode() || Heap::lo_space()->Contains(code));
return Handle<Code>(code);
}
View
8 src/code-stubs.h
@@ -167,7 +167,11 @@ class CodeStub BASE_EMBEDDED {
// Returns a name for logging/debugging purposes.
virtual const char* GetName() { return MajorName(MajorKey(), false); }
-#ifdef DEBUG
+ // Returns whether the code generated for this stub needs to be allocated as
+ // a fixed (non-moveable) code object.
+ virtual bool NeedsImmovableCode() { return false; }
+
+ #ifdef DEBUG
virtual void Print() { PrintF("%s\n", GetName()); }
#endif
@@ -623,6 +627,8 @@ class CEntryStub : public CodeStub {
Major MajorKey() { return CEntry; }
int MinorKey();
+ bool NeedsImmovableCode();
+
const char* GetName() { return "CEntryStub"; }
};
View
25 src/debug.cc
@@ -1003,36 +1003,35 @@ Object* Debug::Break(Arguments args) {
// triggered. This function returns a JSArray with the break point objects
// which is triggered.
Handle<Object> Debug::CheckBreakPoints(Handle<Object> break_point_objects) {
+ // Count the number of break points hit. If there are multiple break points
+ // they are in a FixedArray.
+ Handle<FixedArray> break_points_hit;
int break_points_hit_count = 0;
- Handle<JSArray> break_points_hit = Factory::NewJSArray(1);
-
- // If there are multiple break points they are in a FixedArray.
ASSERT(!break_point_objects->IsUndefined());
if (break_point_objects->IsFixedArray()) {
Handle<FixedArray> array(FixedArray::cast(*break_point_objects));
+ break_points_hit = Factory::NewFixedArray(array->length());
for (int i = 0; i < array->length(); i++) {
Handle<Object> o(array->get(i));
if (CheckBreakPoint(o)) {
- SetElement(break_points_hit,
- break_points_hit_count++,
- o,
- kNonStrictMode);
+ break_points_hit->set(break_points_hit_count++, *o);
}
}
} else {
+ break_points_hit = Factory::NewFixedArray(1);
if (CheckBreakPoint(break_point_objects)) {
- SetElement(break_points_hit,
- break_points_hit_count++,
- break_point_objects,
- kNonStrictMode);
+ break_points_hit->set(break_points_hit_count++, *break_point_objects);
}
}
// Return undefined if no break points were triggered.
if (break_points_hit_count == 0) {
return Factory::undefined_value();
}
- return break_points_hit;
+ // Return break points hit as a JSArray.
+ Handle<JSArray> result = Factory::NewJSArrayWithElements(break_points_hit);
+ result->set_length(Smi::FromInt(break_points_hit_count));
+ return result;
}
@@ -1043,7 +1042,7 @@ bool Debug::CheckBreakPoint(Handle<Object> break_point_object) {
// Ignore check if break point object is not a JSObject.
if (!break_point_object->IsJSObject()) return true;
- // Get the function CheckBreakPoint (defined in debug.js).
+ // Get the function IsBreakPointTriggered (defined in debug-debugger.js).
Handle<String> is_break_point_triggered_symbol =
Factory::LookupAsciiSymbol("IsBreakPointTriggered");
Handle<JSFunction> check_break_point =
View
19 src/factory.cc
@@ -250,16 +250,6 @@ Handle<ByteArray> Factory::NewByteArray(int length, PretenureFlag pretenure) {
}
-Handle<PixelArray> Factory::NewPixelArray(int length,
- uint8_t* external_pointer,
- PretenureFlag pretenure) {
- ASSERT(0 <= length);
- CALL_HEAP_FUNCTION(Heap::AllocatePixelArray(length,
- external_pointer,
- pretenure), PixelArray);
-}
-
-
Handle<ExternalArray> Factory::NewExternalArray(int length,
ExternalArrayType array_type,
void* external_pointer,
@@ -334,8 +324,8 @@ Handle<Map> Factory::GetSlowElementsMap(Handle<Map> src) {
}
-Handle<Map> Factory::GetPixelArrayElementsMap(Handle<Map> src) {
- CALL_HEAP_FUNCTION(src->GetPixelArrayElementsMap(), Map);
+Handle<Map> Factory::NewExternalArrayElementsMap(Handle<Map> src) {
+ CALL_HEAP_FUNCTION(src->NewExternalArrayElementsMap(), Map);
}
@@ -605,8 +595,9 @@ Handle<JSFunction> Factory::NewFunctionWithoutPrototype(Handle<String> name,
Handle<Code> Factory::NewCode(const CodeDesc& desc,
Code::Flags flags,
- Handle<Object> self_ref) {
- CALL_HEAP_FUNCTION(Heap::CreateCode(desc, flags, self_ref), Code);
+ Handle<Object> self_ref,
+ bool immovable) {
+ CALL_HEAP_FUNCTION(Heap::CreateCode(desc, flags, self_ref, immovable), Code);
}
View
10 src/factory.h
@@ -166,11 +166,6 @@ class Factory : public AllStatic {
static Handle<ByteArray> NewByteArray(int length,
PretenureFlag pretenure = NOT_TENURED);
- static Handle<PixelArray> NewPixelArray(
- int length,
- uint8_t* external_pointer,
- PretenureFlag pretenure = NOT_TENURED);
-
static Handle<ExternalArray> NewExternalArray(
int length,
ExternalArrayType array_type,
@@ -196,7 +191,7 @@ class Factory : public AllStatic {
static Handle<Map> GetSlowElementsMap(Handle<Map> map);
- static Handle<Map> GetPixelArrayElementsMap(Handle<Map> map);
+ static Handle<Map> NewExternalArrayElementsMap(Handle<Map> map);
static Handle<FixedArray> CopyFixedArray(Handle<FixedArray> array);
@@ -252,7 +247,8 @@ class Factory : public AllStatic {
static Handle<Code> NewCode(const CodeDesc& desc,
Code::Flags flags,
- Handle<Object> self_reference);
+ Handle<Object> self_reference,
+ bool immovable = false);
static Handle<Code> CopyCode(Handle<Code> code);
View
5 src/frames.cc
@@ -630,11 +630,6 @@ Code* JavaScriptFrame::unchecked_code() const {
}
-int JavaScriptFrame::GetProvidedParametersCount() const {
- return ComputeParametersCount();
-}
-
-
Address JavaScriptFrame::GetCallerStackPointer() const {
int arguments;
if (Heap::gc_state() != Heap::NOT_IN_GC ||
View
6 src/frames.h
@@ -452,12 +452,6 @@ class JavaScriptFrame: public StandardFrame {
Object* GetParameter(int index) const;
int ComputeParametersCount() const;
- // Temporary way of getting access to the number of parameters
- // passed on the stack by the caller. Once argument adaptor frames
- // has been introduced on ARM, this number will always match the
- // computed parameters count.
- int GetProvidedParametersCount() const;
-
// Check if this frame is a constructor frame invoked through 'new'.
bool IsConstructor() const;
View
3 src/handles.cc
@@ -430,7 +430,7 @@ Handle<Object> SetElement(Handle<JSObject> object,
uint32_t index,
Handle<Object> value,
StrictModeFlag strict_mode) {
- if (object->HasPixelElements() || object->HasExternalArrayElements()) {
+ if (object->HasExternalArrayElements()) {
if (!value->IsSmi() && !value->IsHeapNumber() && !value->IsUndefined()) {
bool has_exception;
Handle<Object> number = Execution::ToNumber(value, &has_exception);
@@ -446,7 +446,6 @@ Handle<Object> SetOwnElement(Handle<JSObject> object,
uint32_t index,
Handle<Object> value,
StrictModeFlag strict_mode) {
- ASSERT(!object->HasPixelElements());
ASSERT(!object->HasExternalArrayElements());
CALL_HEAP_FUNCTION(object->SetElement(index, *value, strict_mode, false),
Object);
View
8 src/handles.h
@@ -264,10 +264,10 @@ Handle<Object> SetPropertyWithInterceptor(Handle<JSObject> object,
PropertyAttributes attributes,
StrictModeFlag strict_mode);
-Handle<Object> SetElement(Handle<JSObject> object,
- uint32_t index,
- Handle<Object> value,
- StrictModeFlag strict_mode);
+MUST_USE_RESULT Handle<Object> SetElement(Handle<JSObject> object,
+ uint32_t index,
+ Handle<Object> value,
+ StrictModeFlag strict_mode);
Handle<Object> SetOwnElement(Handle<JSObject> object,
uint32_t index,
View
70 src/heap.cc
@@ -1707,10 +1707,10 @@ bool Heap::CreateInitialMaps() {
set_empty_byte_array(ByteArray::cast(obj));
{ MaybeObject* maybe_obj =
- AllocateMap(PIXEL_ARRAY_TYPE, PixelArray::kAlignedSize);
+ AllocateMap(EXTERNAL_PIXEL_ARRAY_TYPE, ExternalArray::kAlignedSize);
if (!maybe_obj->ToObject(&obj)) return false;
}
- set_pixel_array_map(Map::cast(obj));
+ set_external_pixel_array_map(Map::cast(obj));
{ MaybeObject* maybe_obj = AllocateMap(EXTERNAL_BYTE_ARRAY_TYPE,
ExternalArray::kAlignedSize);
@@ -1906,20 +1906,6 @@ bool Heap::CreateApiObjects() {
}
-void Heap::CreateCEntryStub() {
- CEntryStub stub(1);
- set_c_entry_code(*stub.GetCode());