Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP
Browse files

Version 3.17.5

Made __proto__ a foreign callback on Object.prototype. (issue 621, issue 1949 and issue 2441)

Performance and stability improvements on all platforms.

git-svn-id: https://v8.googlecode.com/svn/trunk@13744 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
  • Loading branch information...
commit 9371452b63e36308adee3c89ba2de7723fe7a3a6 1 parent 5d8992a
mstarzinger@chromium.org authored
Showing with 1,676 additions and 966 deletions.
  1. +8 −0 ChangeLog
  2. +1 −1  Makefile
  3. +11 −5 Makefile.android
  4. +15 −0 build/android.gypi
  5. +8 −9 src/arm/assembler-arm.cc
  6. +1 −2  src/arm/assembler-arm.h
  7. +119 −151 src/arm/code-stubs-arm.cc
  8. +80 −37 src/arm/deoptimizer-arm.cc
  9. +4 −3 src/arm/full-codegen-arm.cc
  10. +6 −0 src/arm/lithium-arm.cc
  11. +14 −0 src/arm/lithium-arm.h
  12. +35 −17 src/arm/lithium-codegen-arm.cc
  13. +4 −5 src/arm/macro-assembler-arm.cc
  14. +1 −2  src/arm/macro-assembler-arm.h
  15. +19 −3 src/bootstrapper.cc
  16. +12 −8 src/circular-queue.cc
  17. +2 −2 src/circular-queue.h
  18. +57 −2 src/code-stubs-hydrogen.cc
  19. +16 −10 src/code-stubs.h
  20. +47 −13 src/cpu-profiler.cc
  21. +10 −5 src/cpu-profiler.h
  22. +1 −1  src/deoptimizer.cc
  23. +2 −2 src/deoptimizer.h
  24. +3 −2 src/factory.cc
  25. +2 −1  src/factory.h
  26. +4 −0 src/flag-definitions.h
  27. +7 −6 src/frames.cc
  28. +27 −15 src/heap-snapshot-generator.cc
  29. +21 −0 src/hydrogen-instructions.h
  30. +54 −5 src/hydrogen.cc
  31. +27 −7 src/hydrogen.h
  32. +153 −166 src/ia32/code-stubs-ia32.cc
  33. +79 −36 src/ia32/deoptimizer-ia32.cc
  34. +16 −6 src/ia32/full-codegen-ia32.cc
  35. +33 −18 src/ia32/lithium-codegen-ia32.cc
  36. +6 −0 src/ia32/lithium-ia32.cc
  37. +14 −0 src/ia32/lithium-ia32.h
  38. +5 −11 src/json-parser.h
  39. +1 −0  src/jsregexp.cc
  40. +15 −17 src/mips/code-stubs-mips.cc
  41. +7 −21 src/objects.cc
  42. +10 −0 src/objects.h
  43. +17 −0 src/platform-cygwin.cc
  44. +17 −0 src/platform-freebsd.cc
  45. +98 −39 src/platform-linux.cc
  46. +17 −0 src/platform-macos.cc
  47. +21 −0 src/platform-nullos.cc
  48. +17 −0 src/platform-openbsd.cc
  49. +18 −0 src/platform-solaris.cc
  50. +17 −0 src/platform-win32.cc
  51. +25 −4 src/platform.h
  52. +2 −16 src/property.h
  53. +8 −40 src/runtime.cc
  54. +25 −0 src/scopeinfo.cc
  55. +30 −24 src/v8natives.js
  56. +2 −2 src/version.cc
  57. +141 −159 src/x64/code-stubs-x64.cc
  58. +86 −36 src/x64/deoptimizer-x64.cc
  59. +16 −6 src/x64/full-codegen-x64.cc
  60. +32 −17 src/x64/lithium-codegen-x64.cc
  61. +6 −0 src/x64/lithium-x64.cc
  62. +14 −0 src/x64/lithium-x64.h
  63. +48 −7 test/cctest/test-api.cc
  64. +4 −4 test/cctest/test-cpu-profiler.cc
  65. +1 −2  test/cctest/test-heap-profiler.cc
  66. +2 −2 test/cctest/test-regexp.cc
  67. +1 −1  test/mjsunit/builtins.js
  68. +2 −2 test/mjsunit/harmony/object-observe.js
  69. +0 −1  test/mjsunit/harmony/proxies.js
  70. +17 −10 test/mjsunit/json.js
  71. +31 −0 test/mjsunit/regress/regress-2441.js
  72. +0 −3  test/test262/test262.status
  73. +4 −2 tools/gyp/v8.gyp
View
8 ChangeLog
@@ -1,3 +1,11 @@
+2013-02-27: Version 3.17.5
+
+ Made __proto__ a foreign callback on Object.prototype.
+ (issue 621, issue 1949 and issue 2441)
+
+ Performance and stability improvements on all platforms.
+
+
2013-02-25: Version 3.17.4
Performance and stability improvements on all platforms.
View
2  Makefile
@@ -148,7 +148,7 @@ endif
ARCHES = ia32 x64 arm mipsel
DEFAULT_ARCHES = ia32 x64 arm
MODES = release debug
-ANDROID_ARCHES = android_ia32 android_arm
+ANDROID_ARCHES = android_ia32 android_arm android_mipsel
# List of files that trigger Makefile regeneration:
GYPFILES = build/all.gyp build/common.gypi build/standalone.gypi \
View
16 Makefile.android
@@ -26,7 +26,7 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Those definitions should be consistent with the main Makefile
-ANDROID_ARCHES = android_ia32 android_arm
+ANDROID_ARCHES = android_ia32 android_arm android_mipsel
MODES = release debug
# Generates all combinations of ANDROID ARCHES and MODES,
@@ -50,11 +50,17 @@ ifeq ($(ARCH), android_arm)
DEFINES += arm_neon=0 armv7=1
TOOLCHAIN_ARCH = arm-linux-androideabi-4.6
else
- ifeq ($(ARCH), android_ia32)
- DEFINES = target_arch=ia32 v8_target_arch=ia32 android_target_arch=x86
- TOOLCHAIN_ARCH = x86-4.6
+ ifeq ($(ARCH), android_mipsel)
+ DEFINES = target_arch=mipsel v8_target_arch=mipsel android_target_arch=mips
+ DEFINES += mips_arch_variant=mips32r2
+ TOOLCHAIN_ARCH = mipsel-linux-android-4.6
else
- $(error Target architecture "${ARCH}" is not supported)
+ ifeq ($(ARCH), android_ia32)
+ DEFINES = target_arch=ia32 v8_target_arch=ia32 android_target_arch=x86
+ TOOLCHAIN_ARCH = x86-4.6
+ else
+ $(error Target architecture "${ARCH}" is not supported)
+ endif
endif
endif
View
15 build/android.gypi
@@ -181,6 +181,11 @@
'-L<(android_stlport_libs)/armeabi',
],
}],
+ ['target_arch=="mipsel"', {
+ 'ldflags': [
+ '-L<(android_stlport_libs)/mips',
+ ],
+ }],
['target_arch=="ia32"', {
'ldflags': [
'-L<(android_stlport_libs)/x86',
@@ -197,6 +202,16 @@
'-fno-stack-protector',
],
}],
+ ['target_arch=="mipsel"', {
+ # The mips toolchain currently has problems with stack-protector.
+ 'cflags!': [
+ '-fstack-protector',
+ '-U__linux__'
+ ],
+ 'cflags': [
+ '-fno-stack-protector',
+ ],
+ }],
],
'target_conditions': [
['_type=="executable"', {
View
17 src/arm/assembler-arm.cc
@@ -2067,8 +2067,7 @@ static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) {
void Assembler::vmov(const DwVfpRegister dst,
double imm,
- const Register scratch,
- const Condition cond) {
+ const Register scratch) {
ASSERT(CpuFeatures::IsEnabled(VFP2));
uint32_t enc;
@@ -2081,7 +2080,7 @@ void Assembler::vmov(const DwVfpRegister dst,
// Vd(15-12) | 101(11-9) | sz=1(8) | imm4L(3-0)
int vd, d;
dst.split_code(&vd, &d);
- emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | enc);
+ emit(al | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | enc);
} else if (FLAG_enable_vldr_imm) {
// TODO(jfb) Temporarily turned off until we have constant blinding or
// some equivalent mitigation: an attacker can otherwise control
@@ -2099,7 +2098,7 @@ void Assembler::vmov(const DwVfpRegister dst,
// that's tricky because vldr has a limited reach. Furthermore
// it breaks load locality.
RecordRelocInfo(imm);
- vldr(dst, MemOperand(pc, 0), cond);
+ vldr(dst, MemOperand(pc, 0));
} else {
// Synthesise the double from ARM immediates.
uint32_t lo, hi;
@@ -2110,27 +2109,27 @@ void Assembler::vmov(const DwVfpRegister dst,
// Move the low part of the double into the lower of the corresponsing S
// registers of D register dst.
mov(ip, Operand(lo));
- vmov(dst.low(), ip, cond);
+ vmov(dst.low(), ip);
// Move the high part of the double into the higher of the
// corresponsing S registers of D register dst.
mov(ip, Operand(hi));
- vmov(dst.high(), ip, cond);
+ vmov(dst.high(), ip);
} else {
// D16-D31 does not have S registers, so move the low and high parts
// directly to the D register using vmov.32.
// Note: This may be slower, so we only do this when we have to.
mov(ip, Operand(lo));
- vmov(dst, VmovIndexLo, ip, cond);
+ vmov(dst, VmovIndexLo, ip);
mov(ip, Operand(hi));
- vmov(dst, VmovIndexHi, ip, cond);
+ vmov(dst, VmovIndexHi, ip);
}
} else {
// Move the low and high parts of the double to a D register in one
// instruction.
mov(ip, Operand(lo));
mov(scratch, Operand(hi));
- vmov(dst, ip, scratch, cond);
+ vmov(dst, ip, scratch);
}
}
}
View
3  src/arm/assembler-arm.h
@@ -1066,8 +1066,7 @@ class Assembler : public AssemblerBase {
void vmov(const DwVfpRegister dst,
double imm,
- const Register scratch = no_reg,
- const Condition cond = al);
+ const Register scratch = no_reg);
void vmov(const SwVfpRegister dst,
const SwVfpRegister src,
const Condition cond = al);
View
270 src/arm/code-stubs-arm.cc
@@ -38,6 +38,18 @@ namespace v8 {
namespace internal {
+void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r3, r2, r1, r0 };
+ descriptor->register_param_count_ = 4;
+ descriptor->register_params_ = registers;
+ descriptor->stack_parameter_count_ = NULL;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kCreateObjectLiteralShallow)->entry;
+}
+
+
void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -500,49 +512,6 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
}
-void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [sp]: object literal flags.
- // [sp + kPointerSize]: constant properties.
- // [sp + (2 * kPointerSize)]: literal index.
- // [sp + (3 * kPointerSize)]: literals array.
-
- // Load boilerplate object into r3 and check if we need to create a
- // boilerplate.
- Label slow_case;
- __ ldr(r3, MemOperand(sp, 3 * kPointerSize));
- __ ldr(r0, MemOperand(sp, 2 * kPointerSize));
- __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
- __ b(eq, &slow_case);
-
- // Check that the boilerplate contains only fast properties and we can
- // statically determine the instance size.
- int size = JSObject::kHeaderSize + length_ * kPointerSize;
- __ ldr(r0, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceSizeOffset));
- __ cmp(r0, Operand(size >> kPointerSizeLog2));
- __ b(ne, &slow_case);
-
- // Allocate the JS object and copy header together with all in-object
- // properties from the boilerplate.
- __ AllocateInNewSpace(size, r0, r1, r2, &slow_case, TAG_OBJECT);
- for (int i = 0; i < size; i += kPointerSize) {
- __ ldr(r1, FieldMemOperand(r3, i));
- __ str(r1, FieldMemOperand(r0, i));
- }
-
- // Return and remove the on-stack parameters.
- __ add(sp, sp, Operand(4 * kPointerSize));
- __ Ret();
-
- __ bind(&slow_case);
- __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
-}
-
-
// Takes a Smi and converts to an IEEE 64 bit floating point value in two
// registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
@@ -5082,8 +5051,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
const int kSubjectOffset = 2 * kPointerSize;
const int kJSRegExpOffset = 3 * kPointerSize;
- Label runtime, invoke_regexp;
-
+ Label runtime;
// Allocation of registers for this function. These are in callee save
// registers and will be preserved by the call to the native RegExp code, as
// this code is called using the normal C calling convention. When calling
@@ -5130,68 +5098,48 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check that the number of captures fit in the static offsets vector buffer.
__ ldr(r2,
FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
- // Calculate number of capture registers (number_of_captures + 1) * 2. This
- // uses the asumption that smis are 2 * their untagged value.
+ // Check (number_of_captures + 1) * 2 <= offsets vector size
+ // Or number_of_captures * 2 <= offsets vector size - 2
+ // Multiplying by 2 comes for free since r2 is smi-tagged.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- __ add(r2, r2, Operand(2)); // r2 was a smi.
- // Check that the static offsets vector buffer is large enough.
- __ cmp(r2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize));
+ STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
+ __ cmp(r2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2));
__ b(hi, &runtime);
- // r2: Number of capture registers
- // regexp_data: RegExp data (FixedArray)
- // Check that the second argument is a string.
- __ ldr(subject, MemOperand(sp, kSubjectOffset));
- __ JumpIfSmi(subject, &runtime);
- Condition is_string = masm->IsObjectStringType(subject, r0);
- __ b(NegateCondition(is_string), &runtime);
- // Get the length of the string to r3.
- __ ldr(r3, FieldMemOperand(subject, String::kLengthOffset));
-
- // r2: Number of capture registers
- // r3: Length of subject string as a smi
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // Check that the third argument is a positive smi less than the subject
- // string length. A negative value will be greater (unsigned comparison).
- __ ldr(r0, MemOperand(sp, kPreviousIndexOffset));
- __ JumpIfNotSmi(r0, &runtime);
- __ cmp(r3, Operand(r0));
- __ b(ls, &runtime);
-
- // r2: Number of capture registers
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // Check that the fourth object is a JSArray object.
- __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
- __ JumpIfSmi(r0, &runtime);
- __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
- __ b(ne, &runtime);
- // Check that the JSArray is in fast case.
- __ ldr(last_match_info_elements,
- FieldMemOperand(r0, JSArray::kElementsOffset));
- __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
- __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex);
- __ b(ne, &runtime);
- // Check that the last match info has space for the capture registers and the
- // additional information.
- __ ldr(r0,
- FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
- __ add(r2, r2, Operand(RegExpImpl::kLastMatchOverhead));
- __ cmp(r2, Operand(r0, ASR, kSmiTagSize));
- __ b(gt, &runtime);
-
// Reset offset for possibly sliced string.
__ mov(r9, Operand::Zero());
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // Check the representation and encoding of the subject string.
- Label seq_string;
+ __ ldr(subject, MemOperand(sp, kSubjectOffset));
+ __ JumpIfSmi(subject, &runtime);
+ __ mov(r3, subject); // Make a copy of the original subject string.
__ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
__ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
- // First check for flat string. None of the following string type tests will
- // succeed if subject is not a string or a short external string.
+ // subject: subject string
+ // r3: subject string
+ // r0: subject string instance type
+ // regexp_data: RegExp data (FixedArray)
+ // Handle subject string according to its encoding and representation:
+ // (1) Sequential string? If yes, go to (5).
+ // (2) Anything but sequential or cons? If yes, go to (6).
+ // (3) Cons string. If the string is flat, replace subject with first string.
+ // Otherwise bailout.
+ // (4) Is subject external? If yes, go to (7).
+ // (5) Sequential string. Load regexp code according to encoding.
+ // (E) Carry on.
+ /// [...]
+
+ // Deferred code at the end of the stub:
+ // (6) Not a long external string? If yes, go to (8).
+ // (7) External string. Make it, offset-wise, look like a sequential string.
+ // Go to (5).
+ // (8) Short external string or not a string? If yes, bail out to runtime.
+ // (9) Sliced string. Replace subject with parent. Go to (4).
+
+ Label seq_string /* 5 */, external_string /* 7 */,
+ check_underlying /* 4 */, not_seq_nor_cons /* 6 */,
+ not_long_external /* 8 */;
+
+ // (1) Sequential string? If yes, go to (5).
__ and_(r1,
r0,
Operand(kIsNotStringMask |
@@ -5199,77 +5147,62 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
kShortExternalStringMask),
SetCC);
STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
- __ b(eq, &seq_string);
+ __ b(eq, &seq_string); // Go to (5).
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // r1: whether subject is a string and if yes, its string representation
- // Check for flat cons string or sliced string.
- // A flat cons string is a cons string where the second part is the empty
- // string. In that case the subject string is just the first part of the cons
- // string. Also in this case the first part of the cons string is known to be
- // a sequential string or an external string.
- // In the case of a sliced string its offset has to be taken into account.
- Label cons_string, external_string, check_encoding;
+ // (2) Anything but sequential or cons? If yes, go to (6).
STATIC_ASSERT(kConsStringTag < kExternalStringTag);
STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
__ cmp(r1, Operand(kExternalStringTag));
- __ b(lt, &cons_string);
- __ b(eq, &external_string);
+ __ b(ge, &not_seq_nor_cons); // Go to (6).
- // Catch non-string subject or short external string.
- STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
- __ tst(r1, Operand(kIsNotStringMask | kShortExternalStringMask));
- __ b(ne, &runtime);
-
- // String is sliced.
- __ ldr(r9, FieldMemOperand(subject, SlicedString::kOffsetOffset));
- __ mov(r9, Operand(r9, ASR, kSmiTagSize));
- __ ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
- // r9: offset of sliced string, smi-tagged.
- __ jmp(&check_encoding);
- // String is a cons string, check whether it is flat.
- __ bind(&cons_string);
+ // (3) Cons string. Check that it's flat.
+ // Replace subject with first string and reload instance type.
__ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset));
__ CompareRoot(r0, Heap::kEmptyStringRootIndex);
__ b(ne, &runtime);
__ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
- // Is first part of cons or parent of slice a flat string?
- __ bind(&check_encoding);
+
+ // (4) Is subject external? If yes, go to (7).
+ __ bind(&check_underlying);
__ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
__ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
STATIC_ASSERT(kSeqStringTag == 0);
__ tst(r0, Operand(kStringRepresentationMask));
- __ b(ne, &external_string);
+ // The underlying external string is never a short external string.
+ STATIC_CHECK(ExternalString::kMaxShortLength < ConsString::kMinLength);
+ STATIC_CHECK(ExternalString::kMaxShortLength < SlicedString::kMinLength);
+ __ b(ne, &external_string); // Go to (7).
+ // (5) Sequential string. Load regexp code according to encoding.
__ bind(&seq_string);
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // r0: Instance type of subject string
+ // subject: sequential subject string (or look-alike, external string)
+ // r3: original subject string
+ // Load previous index and check range before r3 is overwritten. We have to
+ // use r3 instead of subject here because subject might have been only made
+ // to look like a sequential string when it actually is an external string.
+ __ ldr(r1, MemOperand(sp, kPreviousIndexOffset));
+ __ JumpIfNotSmi(r1, &runtime);
+ __ ldr(r3, FieldMemOperand(r3, String::kLengthOffset));
+ __ cmp(r3, Operand(r1));
+ __ b(ls, &runtime);
+ __ mov(r1, Operand(r1, ASR, kSmiTagSize));
+
STATIC_ASSERT(4 == kOneByteStringTag);
STATIC_ASSERT(kTwoByteStringTag == 0);
- // Find the code object based on the assumptions above.
__ and_(r0, r0, Operand(kStringEncodingMask));
__ mov(r3, Operand(r0, ASR, 2), SetCC);
__ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne);
__ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq);
+ // (E) Carry on. String handling is done.
+ // r7: irregexp code
// Check that the irregexp code has been generated for the actual string
// encoding. If it has, the field contains a code object otherwise it contains
// a smi (code flushing support).
__ JumpIfSmi(r7, &runtime);
- // r3: encoding of subject string (1 if ASCII, 0 if two_byte);
- // r7: code
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // Load used arguments before starting to push arguments for call to native
- // RegExp code to avoid handling changing stack height.
- __ ldr(r1, MemOperand(sp, kPreviousIndexOffset));
- __ mov(r1, Operand(r1, ASR, kSmiTagSize));
-
// r1: previous index
// r3: encoding of subject string (1 if ASCII, 0 if two_byte);
// r7: code
@@ -5349,10 +5282,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// subject: subject string (callee saved)
// regexp_data: RegExp data (callee saved)
// last_match_info_elements: Last match info elements (callee saved)
-
// Check the result.
Label success;
-
__ cmp(r0, Operand(1));
// We expect exactly one result since we force the called regexp to behave
// as non-global.
@@ -5398,10 +5329,29 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ ldr(r1,
FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
// Calculate number of capture registers (number_of_captures + 1) * 2.
+ // Multiplying by 2 comes for free since r1 is smi-tagged.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
__ add(r1, r1, Operand(2)); // r1 was a smi.
+ __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
+ __ JumpIfSmi(r0, &runtime);
+ __ CompareObjectType(r0, r2, r2, JS_ARRAY_TYPE);
+ __ b(ne, &runtime);
+ // Check that the JSArray is in fast case.
+ __ ldr(last_match_info_elements,
+ FieldMemOperand(r0, JSArray::kElementsOffset));
+ __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
+ __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex);
+ __ b(ne, &runtime);
+ // Check that the last match info has space for the capture registers and the
+ // additional information.
+ __ ldr(r0,
+ FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
+ __ add(r2, r1, Operand(RegExpImpl::kLastMatchOverhead));
+ __ cmp(r2, Operand(r0, ASR, kSmiTagSize));
+ __ b(gt, &runtime);
+
// r1: number of capture registers
// r4: subject string
// Store the capture count.
@@ -5415,10 +5365,11 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ mov(r2, subject);
__ RecordWriteField(last_match_info_elements,
RegExpImpl::kLastSubjectOffset,
- r2,
+ subject,
r7,
kLRHasNotBeenSaved,
kDontSaveFPRegs);
+ __ mov(subject, r2);
__ str(subject,
FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastInputOffset));
@@ -5458,8 +5409,17 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ add(sp, sp, Operand(4 * kPointerSize));
__ Ret();
- // External string. Short external strings have already been ruled out.
- // r0: scratch
+ // Do the runtime call to execute the regexp.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+
+ // Deferred code for string handling.
+ // (6) Not a long external string? If yes, go to (8).
+ __ bind(&not_seq_nor_cons);
+ // Compare flags are still set.
+ __ b(gt, &not_long_external); // Go to (8).
+
+ // (7) External string. Make it, offset-wise, look like a sequential string.
__ bind(&external_string);
__ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
__ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
@@ -5476,11 +5436,20 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ sub(subject,
subject,
Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- __ jmp(&seq_string);
+ __ jmp(&seq_string); // Go to (5).
- // Do the runtime call to execute the regexp.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ // (8) Short external string or not a string? If yes, bail out to runtime.
+ __ bind(&not_long_external);
+ STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
+ __ tst(r1, Operand(kIsNotStringMask | kShortExternalStringMask));
+ __ b(ne, &runtime);
+
+ // (9) Sliced string. Replace subject with parent. Go to (4).
+ // Load offset into r9 and replace subject string with parent.
+ __ ldr(r9, FieldMemOperand(subject, SlicedString::kOffsetOffset));
+ __ mov(r9, Operand(r9, ASR, kSmiTagSize));
+ __ ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
+ __ jmp(&check_underlying); // Go to (4).
#endif // V8_INTERPRETED_REGEXP
}
@@ -7532,7 +7501,6 @@ struct AheadOfTimeWriteBarrierStubList {
static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
// Used in RegExpExecStub.
{ REG(r6), REG(r4), REG(r7), EMIT_REMEMBERED_SET },
- { REG(r6), REG(r2), REG(r7), EMIT_REMEMBERED_SET },
// Used in CompileArrayPushCall.
// Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
// Also used in KeyedStoreIC::GenerateGeneric.
View
117 src/arm/deoptimizer-arm.cc
@@ -452,8 +452,8 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
}
-void Deoptimizer::DoCompiledStubFrame(TranslationIterator* iterator,
- int frame_index) {
+void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
+ int frame_index) {
//
// FROM TO
// | .... | | .... |
@@ -466,9 +466,9 @@ void Deoptimizer::DoCompiledStubFrame(TranslationIterator* iterator,
// v +-------------------------+ +-------------------------|
// | COMPILED_STUB marker | | STUB_FAILURE marker |
// +-------------------------+ +-------------------------+
- // | | | caller args.length_ |
- // | ... | +-------------------------+
// | | | caller args.arguments_ |
+ // | ... | +-------------------------+
+ // | | | caller args.length_ |
// |-------------------------|<-sp +-------------------------+
// | caller args pointer |
// +-------------------------+
@@ -490,58 +490,77 @@ void Deoptimizer::DoCompiledStubFrame(TranslationIterator* iterator,
isolate_->code_stub_interface_descriptor(major_key);
// The output frame must have room for all pushed register parameters
- // and the standard stack frame slots.
- int output_frame_size = StandardFrameConstants::kFixedFrameSize +
- kPointerSize * descriptor->register_param_count_;
-
- // Include space for an argument object to the callee and optionally
- // the space to pass the argument object to the stub failure handler.
- output_frame_size += sizeof(Arguments) + kPointerSize;
+ // and the standard stack frame slots. Include space for an argument
+ // object to the callee and optionally the space to pass the argument
+ // object to the stub failure handler.
+ int height_in_bytes = kPointerSize * descriptor->register_param_count_ +
+ sizeof(Arguments) + kPointerSize;
+ int fixed_frame_size = StandardFrameConstants::kFixedFrameSize;
+ int input_frame_size = input_->GetFrameSize();
+ int output_frame_size = height_in_bytes + fixed_frame_size;
+ if (trace_) {
+ PrintF(" translating %s => StubFailureTrampolineStub, height=%d\n",
+ CodeStub::MajorName(static_cast<CodeStub::Major>(major_key), false),
+ height_in_bytes);
+ }
+ // The stub failure trampoline is a single frame.
FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, 0);
+ new(output_frame_size) FrameDescription(output_frame_size, NULL);
+ output_frame->SetFrameType(StackFrame::STUB_FAILURE_TRAMPOLINE);
ASSERT(frame_index == 0);
output_[frame_index] = output_frame;
- Code* notify_failure =
- isolate_->builtins()->builtin(Builtins::kNotifyStubFailure);
- output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS));
- output_frame->SetContinuation(
- reinterpret_cast<intptr_t>(notify_failure->entry()));
- Code* trampoline = NULL;
- int extra = descriptor->extra_expression_stack_count_;
- StubFailureTrampolineStub(extra).FindCodeInCache(&trampoline, isolate_);
- ASSERT(trampoline != NULL);
- output_frame->SetPc(reinterpret_cast<intptr_t>(
- trampoline->instruction_start()));
- unsigned input_frame_size = input_->GetFrameSize();
-
- intptr_t frame_ptr = input_->GetRegister(fp.code());
+ // The top address for the output frame can be computed from the input
+ // frame pointer and the output frame's height. Subtract space for the
+ // context and function slots.
+ intptr_t top_address = input_->GetRegister(fp.code()) - (2 * kPointerSize) -
+ height_in_bytes;
+ output_frame->SetTop(top_address);
- // JSFunction continuation
+ // Read caller's PC (JSFunction continuation) from the input frame.
intptr_t input_frame_offset = input_frame_size - kPointerSize;
intptr_t output_frame_offset = output_frame_size - kPointerSize;
intptr_t value = input_->GetFrameSlot(input_frame_offset);
output_frame->SetFrameSlot(output_frame_offset, value);
+ if (trace_) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
+ top_address + output_frame_offset, output_frame_offset, value);
+ }
- // saved frame ptr
+ // Read caller's FP from the input frame, and set this frame's FP.
input_frame_offset -= kPointerSize;
value = input_->GetFrameSlot(input_frame_offset);
output_frame_offset -= kPointerSize;
output_frame->SetFrameSlot(output_frame_offset, value);
+ intptr_t frame_ptr = input_->GetRegister(fp.code());
+ output_frame->SetRegister(fp.code(), frame_ptr);
+ output_frame->SetFp(frame_ptr);
+ if (trace_) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
+ top_address + output_frame_offset, output_frame_offset, value);
+ }
- // Restore context
+ // The context can be gotten from the input frame.
input_frame_offset -= kPointerSize;
value = input_->GetFrameSlot(input_frame_offset);
output_frame->SetRegister(cp.code(), value);
output_frame_offset -= kPointerSize;
output_frame->SetFrameSlot(output_frame_offset, value);
+ if (trace_) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
+ top_address + output_frame_offset, output_frame_offset, value);
+ }
- // Internal frame markers
+ // A marker value is used in place of the function.
output_frame_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(
Smi::FromInt(StackFrame::STUB_FAILURE_TRAMPOLINE));
output_frame->SetFrameSlot(output_frame_offset, value);
+ if (trace_) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function (stub fail sentinel)\n",
+ top_address + output_frame_offset, output_frame_offset, value);
+ }
int caller_arg_count = 0;
if (descriptor->stack_parameter_count_ != NULL) {
@@ -554,15 +573,27 @@ void Deoptimizer::DoCompiledStubFrame(TranslationIterator* iterator,
value = frame_ptr + StandardFrameConstants::kCallerSPOffset +
(caller_arg_count - 1) * kPointerSize;
output_frame->SetFrameSlot(output_frame_offset, value);
+ if (trace_) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; args.arguments\n",
+ top_address + output_frame_offset, output_frame_offset, value);
+ }
- output_frame->SetFrameSlot(output_frame_offset, value);
output_frame_offset -= kPointerSize;
- output_frame->SetFrameSlot(output_frame_offset, caller_arg_count);
+ value = caller_arg_count;
+ output_frame->SetFrameSlot(output_frame_offset, value);
+ if (trace_) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; args.length\n",
+ top_address + output_frame_offset, output_frame_offset, value);
+ }
- value = frame_ptr - (output_frame_size - output_frame_offset) -
- StandardFrameConstants::kMarkerOffset;
output_frame_offset -= kPointerSize;
+ value = frame_ptr - (output_frame_size - output_frame_offset) -
+ StandardFrameConstants::kMarkerOffset + kPointerSize;
output_frame->SetFrameSlot(output_frame_offset, value);
+ if (trace_) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; args*\n",
+ top_address + output_frame_offset, output_frame_offset, value);
+ }
// Copy the register parameters to the failure frame.
for (int i = 0; i < descriptor->register_param_count_; ++i) {
@@ -570,14 +601,13 @@ void Deoptimizer::DoCompiledStubFrame(TranslationIterator* iterator,
DoTranslateCommand(iterator, 0, output_frame_offset);
}
+ ASSERT(0 == output_frame_offset);
+
for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; ++i) {
double double_value = input_->GetDoubleRegister(i);
output_frame->SetDoubleRegister(i, double_value);
}
- output_frame->SetRegister(fp.code(), frame_ptr);
- output_frame->SetFp(frame_ptr);
-
ApiFunction function(descriptor->deoptimization_handler_);
ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
@@ -587,6 +617,19 @@ void Deoptimizer::DoCompiledStubFrame(TranslationIterator* iterator,
}
output_frame->SetRegister(r0.code(), params);
output_frame->SetRegister(r1.code(), handler);
+
+ // Compute this frame's PC, state, and continuation.
+ Code* trampoline = NULL;
+ int extra = descriptor->extra_expression_stack_count_;
+ StubFailureTrampolineStub(extra).FindCodeInCache(&trampoline, isolate_);
+ ASSERT(trampoline != NULL);
+ output_frame->SetPc(reinterpret_cast<intptr_t>(
+ trampoline->instruction_start()));
+ output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS));
+ Code* notify_failure =
+ isolate_->builtins()->builtin(Builtins::kNotifyStubFailure);
+ output_frame->SetContinuation(
+ reinterpret_cast<intptr_t>(notify_failure->entry()));
}
View
7 src/arm/full-codegen-arm.cc
@@ -1580,7 +1580,7 @@ void FullCodeGenerator::EmitAccessor(Expression* expression) {
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
Handle<FixedArray> constant_properties = expr->constant_properties();
- __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
__ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
__ mov(r1, Operand(constant_properties));
@@ -1591,12 +1591,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
? ObjectLiteral::kHasFunction
: ObjectLiteral::kNoFlags;
__ mov(r0, Operand(Smi::FromInt(flags)));
- __ Push(r3, r2, r1, r0);
int properties_count = constant_properties->length() / 2;
if (expr->depth() > 1) {
+ __ Push(r3, r2, r1, r0);
__ CallRuntime(Runtime::kCreateObjectLiteral, 4);
- } else if (flags != ObjectLiteral::kFastElements ||
+ } else if (Serializer::enabled() || flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
+ __ Push(r3, r2, r1, r0);
__ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
} else {
FastCloneShallowObjectStub stub(properties_count);
View
6 src/arm/lithium-arm.cc
@@ -972,6 +972,12 @@ LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
}
+LInstruction* LChunkBuilder::DoInstanceSize(HInstanceSize* instr) {
+ LOperand* object = UseRegisterAtStart(instr->object());
+ return DefineAsRegister(new(zone()) LInstanceSize(object));
+}
+
+
LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
LOperand* receiver = UseRegisterAtStart(instr->receiver());
LOperand* function = UseRegisterAtStart(instr->function());
View
14 src/arm/lithium-arm.h
@@ -108,6 +108,7 @@ class LCodeGen;
V(In) \
V(InstanceOf) \
V(InstanceOfKnownGlobal) \
+ V(InstanceSize) \
V(InstructionGap) \
V(Integer32ToDouble) \
V(Uint32ToDouble) \
@@ -980,6 +981,19 @@ class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
};
+class LInstanceSize: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LInstanceSize(LOperand* object) {
+ inputs_[0] = object;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(InstanceSize, "instance-size")
+ DECLARE_HYDROGEN_ACCESSOR(InstanceSize)
+};
+
+
class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
public:
LBoundsCheck(LOperand* index, LOperand* length) {
View
52 src/arm/lithium-codegen-arm.cc
@@ -2842,6 +2842,14 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
}
+void LCodeGen::DoInstanceSize(LInstanceSize* instr) {
+ Register object = ToRegister(instr->object());
+ Register result = ToRegister(instr->result());
+ __ ldr(result, FieldMemOperand(object, HeapObject::kMapOffset));
+ __ ldrb(result, FieldMemOperand(result, Map::kInstanceSizeOffset));
+}
+
+
void LCodeGen::DoCmpT(LCmpT* instr) {
Token::Value op = instr->op();
@@ -4472,10 +4480,14 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
if (instr->NeedsCanonicalization()) {
// Check for NaN. All NaNs must be canonicalized.
__ VFPCompareAndSetFlags(value, value);
+ Label after_canonicalization;
+
// Only load canonical NaN if the comparison above set the overflow.
+ __ b(vc, &after_canonicalization);
__ Vmov(value,
- FixedDoubleArray::canonical_not_the_hole_nan_as_double(),
- no_reg, vs);
+ FixedDoubleArray::canonical_not_the_hole_nan_as_double());
+
+ __ bind(&after_canonicalization);
}
__ vstr(value, scratch, instr->additional_index() << element_size_shift);
@@ -5598,26 +5610,31 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
DeferredAllocate* deferred =
new(zone()) DeferredAllocate(this, instr);
- Register size = ToRegister(instr->size());
Register result = ToRegister(instr->result());
Register scratch = ToRegister(instr->temp1());
Register scratch2 = ToRegister(instr->temp2());
- HAllocate* original_instr = instr->hydrogen();
- if (original_instr->size()->IsConstant()) {
- UNREACHABLE();
+ // Allocate memory for the object.
+ AllocationFlags flags = TAG_OBJECT;
+ if (instr->hydrogen()->MustAllocateDoubleAligned()) {
+ flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
+ }
+ if (instr->size()->IsConstantOperand()) {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ __ AllocateInNewSpace(size,
+ result,
+ scratch,
+ scratch2,
+ deferred->entry(),
+ flags);
} else {
- // Allocate memory for the object.
- AllocationFlags flags = TAG_OBJECT;
- if (original_instr->MustAllocateDoubleAligned()) {
- flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
- }
+ Register size = ToRegister(instr->size());
__ AllocateInNewSpace(size,
result,
scratch,
scratch2,
deferred->entry(),
- TAG_OBJECT);
+ flags);
}
__ bind(deferred->exit());
@@ -5865,21 +5882,22 @@ void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
instr->hydrogen()->constant_properties();
// Set up the parameters to the stub/runtime call.
- __ LoadHeapObject(r4, literals);
- __ mov(r3, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ mov(r2, Operand(constant_properties));
+ __ LoadHeapObject(r3, literals);
+ __ mov(r2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
+ __ mov(r1, Operand(constant_properties));
int flags = instr->hydrogen()->fast_elements()
? ObjectLiteral::kFastElements
: ObjectLiteral::kNoFlags;
- __ mov(r1, Operand(Smi::FromInt(flags)));
- __ Push(r4, r3, r2, r1);
+ __ mov(r0, Operand(Smi::FromInt(flags)));
// Pick the right runtime function or stub to call.
int properties_count = constant_properties->length() / 2;
if (instr->hydrogen()->depth() > 1) {
+ __ Push(r3, r2, r1, r0);
CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
} else if (flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
+ __ Push(r3, r2, r1, r0);
CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
} else {
FastCloneShallowObjectStub stub(properties_count);
View
9 src/arm/macro-assembler-arm.cc
@@ -812,19 +812,18 @@ void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
void MacroAssembler::Vmov(const DwVfpRegister dst,
const double imm,
- const Register scratch,
- const Condition cond) {
+ const Register scratch) {
ASSERT(CpuFeatures::IsEnabled(VFP2));
static const DoubleRepresentation minus_zero(-0.0);
static const DoubleRepresentation zero(0.0);
DoubleRepresentation value(imm);
// Handle special values first.
if (value.bits == zero.bits) {
- vmov(dst, kDoubleRegZero, cond);
+ vmov(dst, kDoubleRegZero);
} else if (value.bits == minus_zero.bits) {
- vneg(dst, kDoubleRegZero, cond);
+ vneg(dst, kDoubleRegZero);
} else {
- vmov(dst, imm, scratch, cond);
+ vmov(dst, imm, scratch);
}
}
View
3  src/arm/macro-assembler-arm.h
@@ -480,8 +480,7 @@ class MacroAssembler: public Assembler {
void Vmov(const DwVfpRegister dst,
const double imm,
- const Register scratch = no_reg,
- const Condition cond = al);
+ const Register scratch = no_reg);
// Enter exit frame.
// stack_space - extra stack space, used for alignment before call to C.
View
22 src/bootstrapper.cc
@@ -485,10 +485,26 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
native_context()->set_object_function(*object_fun);
// Allocate a new prototype for the object function.
- Handle<JSObject> prototype = factory->NewJSObject(
- isolate->object_function(),
- TENURED);
+ Handle<Map> object_prototype_map =
+ factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
+ Handle<DescriptorArray> prototype_descriptors(
+ factory->NewDescriptorArray(0, 1));
+ DescriptorArray::WhitenessWitness witness(*prototype_descriptors);
+ Handle<Foreign> object_prototype(
+ factory->NewForeign(&Accessors::ObjectPrototype));
+ PropertyAttributes attribs = static_cast<PropertyAttributes>(
+ DONT_ENUM | DONT_DELETE);
+ object_prototype_map->set_instance_descriptors(*prototype_descriptors);
+
+ { // Add __proto__.
+ CallbacksDescriptor d(heap->Proto_symbol(), *object_prototype, attribs);
+ object_prototype_map->AppendDescriptor(&d, witness);
+ }
+
+ Handle<JSObject> prototype = factory->NewJSObjectFromMap(
+ object_prototype_map,
+ TENURED);
native_context()->set_initial_object_prototype(*prototype);
SetPrototype(object_fun, prototype);
}
View
20 src/circular-queue.cc
@@ -33,18 +33,16 @@ namespace v8 {
namespace internal {
-SamplingCircularQueue::SamplingCircularQueue(int record_size_in_bytes,
- int desired_chunk_size_in_bytes,
- int buffer_size_in_chunks)
+SamplingCircularQueue::SamplingCircularQueue(
+ int record_size_in_bytes,
+ int desired_chunk_size_in_bytes,
+ int buffer_size_in_chunks,
+ bool keep_producer_consumer_distance)
: record_size_(record_size_in_bytes / sizeof(Cell)),
chunk_size_in_bytes_(desired_chunk_size_in_bytes / record_size_in_bytes *
record_size_in_bytes),
chunk_size_(chunk_size_in_bytes_ / sizeof(Cell)),
buffer_size_(chunk_size_ * buffer_size_in_chunks),
- // The distance ensures that producer and consumer never step on
- // each other's chunks and helps eviction of produced data from
- // the CPU cache (having that chunk size is bigger than the cache.)
- producer_consumer_distance_(2 * chunk_size_),
buffer_(NewArray<Cell>(buffer_size_ + 1)) {
ASSERT(buffer_size_in_chunks > 2);
// Clean up the whole buffer to avoid encountering a random kEnd
@@ -74,7 +72,13 @@ SamplingCircularQueue::SamplingCircularQueue(int record_size_in_bytes,
ASSERT(reinterpret_cast<byte*>(consumer_pos_ + 1) <=
positions_ + positions_size);
consumer_pos_->dequeue_chunk_pos = buffer_;
- consumer_pos_->dequeue_chunk_poll_pos = buffer_ + producer_consumer_distance_;
+ consumer_pos_->dequeue_chunk_poll_pos = buffer_;
+ // The distance ensures that producer and consumer never step on
+ // each other's chunks and helps eviction of produced data from
+ // the CPU cache (having that chunk size is bigger than the cache.)
+ if (keep_producer_consumer_distance) {
+ consumer_pos_->dequeue_chunk_poll_pos += 2 * chunk_size_;
+ }
consumer_pos_->dequeue_pos = NULL;
}
View
4 src/circular-queue.h
@@ -47,7 +47,8 @@ class SamplingCircularQueue {
// Executed on the application thread.
SamplingCircularQueue(int record_size_in_bytes,
int desired_chunk_size_in_bytes,
- int buffer_size_in_chunks);
+ int buffer_size_in_chunks,
+ bool keep_producer_consumer_distance = true);
~SamplingCircularQueue();
// Enqueue returns a pointer to a memory location for storing the next
@@ -88,7 +89,6 @@ class SamplingCircularQueue {
const int chunk_size_in_bytes_;
const int chunk_size_;
const int buffer_size_;
- const int producer_consumer_distance_;
Cell* buffer_;
byte* positions_;
ProducerPosition* producer_pos_;
View
59 src/code-stubs-hydrogen.cc
@@ -138,6 +138,60 @@ class CodeStubGraphBuilder: public CodeStubGraphBuilderBase {
template <>
+void CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() {
+ Zone* zone = this->zone();
+ Factory* factory = isolate()->factory();
+
+ HInstruction* boilerplate =
+ AddInstruction(new(zone) HLoadKeyed(GetParameter(0),
+ GetParameter(1),
+ NULL,
+ FAST_ELEMENTS));
+
+ CheckBuilder builder(this, BailoutId::StubEntry());
+ builder.CheckNotUndefined(boilerplate);
+
+ int size = JSObject::kHeaderSize + casted_stub()->length() * kPointerSize;
+ HValue* boilerplate_size =
+ AddInstruction(new(zone) HInstanceSize(boilerplate));
+ HValue* size_in_words =
+ AddInstruction(new(zone) HConstant(size >> kPointerSizeLog2,
+ Representation::Integer32()));
+ builder.CheckIntegerEq(boilerplate_size, size_in_words);
+
+ HValue* size_in_bytes =
+ AddInstruction(new(zone) HConstant(size, Representation::Integer32()));
+ HInstruction* object =
+ AddInstruction(new(zone) HAllocate(context(),
+ size_in_bytes,
+ HType::JSObject(),
+ HAllocate::CAN_ALLOCATE_IN_NEW_SPACE));
+
+ for (int i = 0; i < size; i += kPointerSize) {
+ HInstruction* value =
+ AddInstruction(new(zone) HLoadNamedField(boilerplate, true, i));
+ AddInstruction(new(zone) HStoreNamedField(object,
+ factory->empty_symbol(),
+ value,
+ true, i));
+ AddSimulate(BailoutId::StubEntry());
+ }
+
+ builder.End();
+
+ HReturn* ret = new(zone) HReturn(object, context());
+ current_block()->Finish(ret);
+}
+
+
+Handle<Code> FastCloneShallowObjectStub::GenerateCode() {
+ CodeStubGraphBuilder<FastCloneShallowObjectStub> builder(this);
+ LChunk* chunk = OptimizeGraph(builder.CreateGraph());
+ return chunk->Codegen(Code::COMPILED_STUB);
+}
+
+
+template <>
void CodeStubGraphBuilder<KeyedLoadFastElementStub>::BuildCodeStub() {
Zone* zone = this->zone();
@@ -189,7 +243,7 @@ void CodeStubGraphBuilder<TransitionElementsKindStub>::BuildCodeStub() {
new(zone) HBoundsCheck(array_length, max_alloc_size,
DONT_ALLOW_SMI_KEY, Representation::Integer32()));
- IfBuilder if_builder(this);
+ IfBuilder if_builder(this, BailoutId::StubEntry());
if_builder.BeginTrue(array_length, graph()->GetConstant0(), Token::EQ);
@@ -219,7 +273,8 @@ void CodeStubGraphBuilder<TransitionElementsKindStub>::BuildCodeStub() {
: AddInstruction(new(zone) HConstant(nan_double,
Representation::Double()));
- LoopBuilder builder(this, context(), LoopBuilder::kPostIncrement);
+ LoopBuilder builder(this, context(), LoopBuilder::kPostIncrement,
+ BailoutId::StubEntry());
HValue* zero = graph()->GetConstant0();
HValue* start = IsFastElementsKind(to_kind) ? zero : array_length;
View
26 src/code-stubs.h
@@ -264,8 +264,6 @@ struct CodeStubInterfaceDescriptor {
};
-class HGraph;
-struct Register;
class HydrogenCodeStub : public CodeStub {
public:
// Retrieve the code for the stub. Generate the code if needed.
@@ -475,7 +473,7 @@ class FastCloneShallowArrayStub : public PlatformCodeStub {
};
-class FastCloneShallowObjectStub : public PlatformCodeStub {
+class FastCloneShallowObjectStub : public HydrogenCodeStub {
public:
// Maximum number of properties in copied object.
static const int kMaximumClonedProperties = 6;
@@ -485,13 +483,21 @@ class FastCloneShallowObjectStub : public PlatformCodeStub {
ASSERT_LE(length_, kMaximumClonedProperties);
}
- void Generate(MacroAssembler* masm);
+ int length() const { return length_; }
+
+ virtual Handle<Code> GenerateCode();
+
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor);
private:
int length_;
Major MajorKey() { return FastCloneShallowObject; }
int MinorKey() { return length_; }
+
+ DISALLOW_COPY_AND_ASSIGN(FastCloneShallowObjectStub);
};
@@ -1222,9 +1228,6 @@ class KeyedLoadFastElementStub : public HydrogenCodeStub {
IsJSArrayBits::encode(is_js_array);
}
- Major MajorKey() { return KeyedLoadElement; }
- int MinorKey() { return bit_field_; }
-
bool is_js_array() const {
return IsJSArrayBits::decode(bit_field_);
}
@@ -1244,6 +1247,9 @@ class KeyedLoadFastElementStub : public HydrogenCodeStub {
class ElementsKindBits: public BitField<ElementsKind, 0, 8> {};
uint32_t bit_field_;
+ Major MajorKey() { return KeyedLoadElement; }
+ int MinorKey() { return bit_field_; }
+
DISALLOW_COPY_AND_ASSIGN(KeyedLoadFastElementStub);
};
@@ -1256,9 +1262,6 @@ class TransitionElementsKindStub : public HydrogenCodeStub {
ToKindBits::encode(to_kind);
}
- Major MajorKey() { return TransitionElementsKind; }
- int MinorKey() { return bit_field_; }
-
ElementsKind from_kind() const {
return FromKindBits::decode(bit_field_);
}
@@ -1278,6 +1281,9 @@ class TransitionElementsKindStub : public HydrogenCodeStub {
class ToKindBits: public BitField<ElementsKind, 0, 8> {};
uint32_t bit_field_;
+ Major MajorKey() { return TransitionElementsKind; }
+ int MinorKey() { return bit_field_; }
+
DISALLOW_COPY_AND_ASSIGN(TransitionElementsKindStub);
};
View
60 src/cpu-profiler.cc
@@ -45,13 +45,18 @@ static const int kTickSamplesBufferChunksCount = 16;
static const int kProfilerStackSize = 64 * KB;
-ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator)
+ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator,
+ Sampler* sampler,
+ int period_in_useconds)
: Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)),
generator_(generator),
+ sampler_(sampler),
running_(true),
+ period_in_useconds_(period_in_useconds),
ticks_buffer_(sizeof(TickSampleEventRecord),
kTickSamplesBufferChunkSize,
- kTickSamplesBufferChunksCount),
+ kTickSamplesBufferChunksCount,
+ !Sampler::CanSampleOnProfilerEventsProcessorThread()),
enqueue_order_(0) {
}
@@ -239,17 +244,42 @@ bool ProfilerEventsProcessor::ProcessTicks(unsigned dequeue_order) {
}
+void ProfilerEventsProcessor::ProcessEventsAndDoSample(
+ unsigned* dequeue_order) {
+ int64_t stop_time = OS::Ticks() + period_in_useconds_;
+ // Keep processing existing events until we need to do next sample.
+ while (OS::Ticks() < stop_time) {
+ if (ProcessTicks(*dequeue_order)) {
+ // All ticks of the current dequeue_order are processed,
+ // proceed to the next code event.
+ ProcessCodeEvent(dequeue_order);
+ }
+ }
+ // Schedule next sample. sampler_ is NULL in tests.
+ if (sampler_)
+ sampler_->DoSample();
+}
+
+
+void ProfilerEventsProcessor::ProcessEventsAndYield(unsigned* dequeue_order) {
+ if (ProcessTicks(*dequeue_order)) {
+ // All ticks of the current dequeue_order are processed,
+ // proceed to the next code event.
+ ProcessCodeEvent(dequeue_order);
+ }
+ YieldCPU();
+}
+
+
void ProfilerEventsProcessor::Run() {
unsigned dequeue_order = 0;
while (running_) {
- // Process ticks until we have any.
- if (ProcessTicks(dequeue_order)) {
- // All ticks of the current dequeue_order are processed,
- // proceed to the next code event.
- ProcessCodeEvent(&dequeue_order);
+ if (Sampler::CanSampleOnProfilerEventsProcessorThread()) {
+ ProcessEventsAndDoSample(&dequeue_order);
+ } else {
+ ProcessEventsAndYield(&dequeue_order);
}
- YieldCPU();
}
// Process remaining tick events.
@@ -486,13 +516,15 @@ void CpuProfiler::StartProcessorIfNotStarted() {
if (processor_ == NULL) {
Isolate* isolate = Isolate::Current();
+ Sampler* sampler = reinterpret_cast<Sampler*>(isolate->logger()->ticker_);
// Disable logging when using the new implementation.
saved_logging_nesting_ = isolate->logger()->logging_nesting_;
isolate->logger()->logging_nesting_ = 0;
generator_ = new ProfileGenerator(profiles_);
- processor_ = new ProfilerEventsProcessor(generator_);
- NoBarrier_Store(&is_profiling_, true);
- processor_->Start();
+ processor_ = new ProfilerEventsProcessor(generator_,
+ sampler,
+ FLAG_cpu_profiler_sampling_period);
+ is_profiling_ = true;
// Enumerate stuff we already have in the heap.
if (isolate->heap()->HasBeenSetUp()) {
if (!FLAG_prof_browser_mode) {
@@ -505,12 +537,13 @@ void CpuProfiler::StartProcessorIfNotStarted() {
isolate->logger()->LogAccessorCallbacks();
}
// Enable stack sampling.
- Sampler* sampler = reinterpret_cast<Sampler*>(isolate->logger()->ticker_);
if (!sampler->IsActive()) {
sampler->Start();
need_to_stop_sampler_ = true;
}
+ sampler->SetHasProcessingThread(true);
sampler->IncreaseProfilingDepth();
+ processor_->Start();
}
}
@@ -548,11 +581,12 @@ void CpuProfiler::StopProcessor() {
Logger* logger = Isolate::Current()->logger();
Sampler* sampler = reinterpret_cast<Sampler*>(logger->ticker_);
sampler->DecreaseProfilingDepth();
+ sampler->SetHasProcessingThread(false);
if (need_to_stop_sampler_) {
sampler->Stop();
need_to_stop_sampler_ = false;
}
- NoBarrier_Store(&is_profiling_, false);
+ is_profiling_ = false;
processor_->Stop();
processor_->Join();
delete processor_;
View
15 src/cpu-profiler.h
@@ -124,7 +124,9 @@ class TickSampleEventRecord {
// methods called by event producers: VM and stack sampler threads.
class ProfilerEventsProcessor : public Thread {
public:
- explicit ProfilerEventsProcessor(ProfileGenerator* generator);
+ ProfilerEventsProcessor(ProfileGenerator* generator,
+ Sampler* sampler,
+ int period_in_useconds);
virtual ~ProfilerEventsProcessor() {}
// Thread control.
@@ -173,11 +175,16 @@ class ProfilerEventsProcessor : public Thread {
// Called from events processing thread (Run() method.)
bool ProcessCodeEvent(unsigned* dequeue_order);
bool ProcessTicks(unsigned dequeue_order);
+ void ProcessEventsAndDoSample(unsigned* dequeue_order);
+ void ProcessEventsAndYield(unsigned* dequeue_order);
INLINE(static bool FilterOutCodeCreateEvent(Logger::LogEventsAndTags tag));
ProfileGenerator* generator_;
+ Sampler* sampler_;
bool running_;
+ // Sampling period in microseconds.
+ const int period_in_useconds_;
UnboundQueue<CodeEventsContainer> events_buffer_;
SamplingCircularQueue ticks_buffer_;
UnboundQueue<TickSampleEventRecord> ticks_from_vm_buffer_;
@@ -245,11 +252,9 @@ class CpuProfiler {
static void SetterCallbackEvent(String* name, Address entry_point);
static void SharedFunctionInfoMoveEvent(Address from, Address to);
- // TODO(isolates): this doesn't have to use atomics anymore.
-
static INLINE(bool is_profiling(Isolate* isolate)) {
CpuProfiler* profiler = isolate->cpu_profiler();
- return profiler != NULL && NoBarrier_Load(&profiler->is_profiling_);
+ return profiler != NULL && profiler->is_profiling_;
}
private:
@@ -271,7 +276,7 @@ class CpuProfiler {
ProfilerEventsProcessor* processor_;
int saved_logging_nesting_;
bool need_to_stop_sampler_;
- Atomic32 is_profiling_;
+ bool is_profiling_;
private:
DISALLOW_COPY_AND_ASSIGN(CpuProfiler);
View
2  src/deoptimizer.cc
@@ -779,7 +779,7 @@ void Deoptimizer::DoComputeOutputFrames() {
DoComputeAccessorStubFrame(&iterator, i, true);
break;
case Translation::COMPILED_STUB_FRAME:
- DoCompiledStubFrame(&iterator, i);
+ DoComputeCompiledStubFrame(&iterator, i);
break;
case Translation::BEGIN:
case Translation::REGISTER:
View
4 src/deoptimizer.h
@@ -345,8 +345,8 @@ class Deoptimizer : public Malloced {
void DoComputeAccessorStubFrame(TranslationIterator* iterator,
int frame_index,
bool is_setter_stub_frame);
- void DoCompiledStubFrame(TranslationIterator* iterator,
- int frame_index);
+ void DoComputeCompiledStubFrame(TranslationIterator* iterator,
+ int frame_index);
void DoTranslateCommand(TranslationIterator* iterator,
int frame_index,
unsigned output_offset);
View
5 src/factory.cc
@@ -950,10 +950,11 @@ Handle<GlobalObject> Factory::NewGlobalObject(
-Handle<JSObject> Factory::NewJSObjectFromMap(Handle<Map> map) {
+Handle<JSObject> Factory::NewJSObjectFromMap(Handle<Map> map,
+ PretenureFlag pretenure) {
CALL_HEAP_FUNCTION(
isolate(),
- isolate()->heap()->AllocateJSObjectFromMap(*map, NOT_TENURED),
+ isolate()->heap()->AllocateJSObjectFromMap(*map, pretenure),
JSObject);
}
View
3  src/factory.h
@@ -278,7 +278,8 @@ class Factory {
// JS objects are pretenured when allocated by the bootstrapper and
// runtime.
- Handle<JSObject> NewJSObjectFromMap(Handle<Map> map);
+ Handle<JSObject> NewJSObjectFromMap(Handle<Map> map,
+ PretenureFlag pretenure = NOT_TENURED);
// JS modules are pretenured.
Handle<JSModule> NewJSModule(Handle<Context> context,
View
4 src/flag-definitions.h
@@ -365,6 +365,10 @@ DEFINE_bool(compilation_cache, true, "enable compilation cache")
DEFINE_bool(cache_prototype_transitions, true, "cache prototype transitions")
+// cpu-profiler.cc
+DEFINE_int(cpu_profiler_sampling_period, 1000,
+ "CPU profiler sampling period in microseconds")
+
// debug.cc
DEFINE_bool(trace_debug_json, false, "trace debugging JSON request/response")
DEFINE_bool(trace_js_array_abuse, false,
View
13 src/frames.cc
@@ -666,6 +666,13 @@ void StandardFrame::IterateCompiledFrame(ObjectVisitor* v) const {
// Visit the return address in the callee and incoming arguments.
IteratePc(v, pc_address(), code);
+
+ // Visit the context in stub frame and JavaScript frame.
+ // Visit the function in JavaScript frame.
+ Object** fixed_base = &Memory::Object_at(
+ fp() + StandardFrameConstants::kMarkerOffset);
+ Object** fixed_limit = &Memory::Object_at(fp());
+ v->VisitPointers(fixed_base, fixed_limit);
}
@@ -697,12 +704,6 @@ void OptimizedFrame::Iterate(ObjectVisitor* v) const {
#endif
IterateCompiledFrame(v);
-
- // Visit the context and the function.
- Object** fixed_base = &Memory::Object_at(
- fp() + JavaScriptFrameConstants::kFunctionOffset);
- Object** fixed_limit = &Memory::Object_at(fp());
- v->VisitPointers(fixed_base, fixed_limit);
}
View
42 src/heap-snapshot-generator.cc
@@ -404,21 +404,33 @@ void HeapObjectsMap::MoveObject(Address from, Address to) {
ASSERT(from != NULL);
if (from == to) return;
void* from_value = entries_map_.Remove(from, AddressHash(from));
- if (from_value == NULL) return;
- int from_entry_info_index =
- static_cast<int>(reinterpret_cast<intptr_t>(from_value));
- entries_.at(from_entry_info_index).addr = to;
- HashMap::Entry* to_entry = entries_map_.Lookup(to, AddressHash(to), true);
- if (to_entry->value != NULL) {
- int to_entry_info_index =
- static_cast<int>(reinterpret_cast<intptr_t>(to_entry->value));
- // Without this operation we will have two EntryInfo's with the same
- // value in addr field. It is bad because later at RemoveDeadEntries
- // one of this entry will be removed with the corresponding entries_map_
- // entry.
- entries_.at(to_entry_info_index).addr = NULL;
- }
- to_entry->value = reinterpret_cast<void*>(from_entry_info_index);
+ if (from_value == NULL) {
+ // It may occur that some untracked object moves to an address X and there
+ // is a tracked object at that address. In this case we should remove the
+ // entry as we know that the object has died.
+ void* to_value = entries_map_.Remove(to, AddressHash(to));
+ if (to_value != NULL) {
+ int to_entry_info_index =
+ static_cast<int>(reinterpret_cast<intptr_t>(to_value));
+ entries_.at(to_entry_info_index).addr = NULL;
+ }
+ } else {
+ HashMap::Entry* to_entry = entries_map_.Lookup(to, AddressHash(to), true);
+ if (to_entry->value != NULL) {
+ // We found the existing entry with to address for an old object.
+ // Without this operation we will have two EntryInfo's with the same
+ // value in addr field. It is bad because later at RemoveDeadEntries
+ // one of this entry will be removed with the corresponding entries_map_
+ // entry.
+ int to_entry_info_index =
+ static_cast<int>(reinterpret_cast<intptr_t>(to_entry->value));
+ entries_.at(to_entry_info_index).addr = NULL;
+ }
+ int from_entry_info_index =
+ static_cast<int>(reinterpret_cast<intptr_t>(from_value));
+ entries_.at(from_entry_info_index).addr = to;
+ to_entry->value = from_value;
+ }
}
View
21 src/hydrogen-instructions.h
@@ -123,6 +123,7 @@ class LChunkBuilder;
V(In) \
V(InstanceOf) \
V(InstanceOfKnownGlobal) \
+ V(InstanceSize) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
V(IsNilAndBranch) \
@@ -3948,6 +3949,26 @@ class HInstanceOfKnownGlobal: public HTemplateInstruction<2> {
};
+// TODO(mstarzinger): This instruction should be modeled as a load of the map
+// field followed by a load of the instance size field once HLoadNamedField is
+// flexible enough to accommodate byte-field loads.
+class HInstanceSize: public HTemplateInstruction<1> {
+ public:
+ explicit HInstanceSize(HValue* object) {
+ SetOperandAt(0, object);
+ set_representation(Representation::Integer32());
+ }
+
+ HValue* object() { return OperandAt(0); }
+
+ virtual Representation RequiredInputRepresentation(int index) {
+ return Representation::Tagged();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(InstanceSize)
+};
+
+
class HPower: public HTemplateInstruction<2> {
public:
static HInstruction* New(Zone* zone, HValue* left, HValue* right);
View
59 src/hydrogen.cc
@@ -634,16 +634,64 @@ HConstant* HGraph::GetConstantHole() {
}
+HGraphBuilder::CheckBuilder::CheckBuilder(HGraphBuilder* builder, BailoutId id)
+ : builder_(builder),
+ finished_(false),
+ id_(id) {
+ HEnvironment* env = builder->environment();
+ failure_block_ = builder->CreateBasicBlock(env->Copy());
+ merge_block_ = builder->CreateBasicBlock(env->Copy());
+}
+
+
+void HGraphBuilder::CheckBuilder::CheckNotUndefined(HValue* value) {
+ HEnvironment* env = builder_->environment();
+ HIsNilAndBranch* compare =
+ new(zone()) HIsNilAndBranch(value, kStrictEquality, kUndefinedValue);
+ HBasicBlock* success_block = builder_->CreateBasicBlock(env->Copy());
+ HBasicBlock* failure_block = builder_->CreateBasicBlock(env->Copy());
+ compare->SetSuccessorAt(0, failure_block);
+ compare->SetSuccessorAt(1, success_block);
+ failure_block->Goto(failure_block_);
+ builder_->current_block()->Finish(compare);
+ builder_->set_current_block(success_block);
+}
+
+
+void HGraphBuilder::CheckBuilder::CheckIntegerEq(HValue* left, HValue* right) {
+ HEnvironment* env = builder_->environment();
+ HCompareIDAndBranch* compare =
+ new(zone()) HCompareIDAndBranch(left, right, Token::EQ);
+ compare->AssumeRepresentation(Representation::Integer32());
+ HBasicBlock* success_block = builder_->CreateBasicBlock(env->Copy());
+ HBasicBlock* failure_block = builder_->CreateBasicBlock(env->Copy());
+ compare->SetSuccessorAt(0, success_block);
+ compare->SetSuccessorAt(1, failure_block);
+ failure_block->Goto(failure_block_);
+ builder_->current_block()->Finish(compare);
+ builder_->set_current_block(success_block);
+}
+
+
+void HGraphBuilder::CheckBuilder::End() {
+ ASSERT(!finished_);
+ builder_->current_block()->Goto(merge_block_);
+ failure_block_->FinishExitWithDeoptimization(HDeoptimize::kUseAll);
+ failure_block_->SetJoinId(id_);
+ builder_->set_current_block(merge_block_);
+ merge_block_->SetJoinId(id_);
+ finished_ = true;
+}
+
+
HGraphBuilder::IfBuilder::IfBuilder(HGraphBuilder* builder, BailoutId id)
: builder_(builder),
finished_(false),
id_(id) {
HEnvironment* env = builder->environment();
- HEnvironment* true_env = env->Copy();
- HEnvironment* false_env = env->Copy();
- first_true_block_ = builder->CreateBasicBlock(true_env);
+ first_true_block_ = builder->CreateBasicBlock(env->Copy());
last_true_block_ = NULL;
- first_false_block_ = builder->CreateBasicBlock(false_env);
+ first_false_block_ = builder->CreateBasicBlock(env->Copy());
}
@@ -1071,7 +1119,8 @@ void HGraphBuilder::BuildCopyElements(HContext* context,
HValue* to_elements,
ElementsKind to_elements_kind,
HValue* length) {
- LoopBuilder builder(this, context, LoopBuilder::kPostIncrement);
+ LoopBuilder builder(this, context, LoopBuilder::kPostIncrement,
+ BailoutId::StubEntry());
HValue* key = builder.BeginBody(graph()->GetConstant0(),
length, Token::LT);
View
34 src/hydrogen.h
@@ -920,10 +920,30 @@ class HGraphBuilder {
HInstruction* BuildStoreMap(HValue* object, HValue* map, BailoutId id);
HInstruction* BuildStoreMap(HValue* object, Handle<Map> map, BailoutId id);
+ class CheckBuilder {
+ public:
+ CheckBuilder(HGraphBuilder* builder, BailoutId id);
+ ~CheckBuilder() {
+ if (!finished_) End();
+ }
+
+ void CheckNotUndefined(HValue* value);
+ void CheckIntegerEq(HValue* left, HValue* right);
+ void End();
+
+ private:
+ Zone* zone() { return builder_->zone(); }
+
+ HGraphBuilder* builder_;
+ bool finished_;
+ HBasicBlock* failure_block_;
+ HBasicBlock* merge_block_;
+ BailoutId id_;
+ };
+
class IfBuilder {
public:
- IfBuilder(HGraphBuilder* builder,
- BailoutId id = BailoutId::StubEntry());
+ IfBuilder(HGraphBuilder* builder, BailoutId id);
~IfBuilder() {
if (!finished_) End();
}
@@ -937,6 +957,8 @@ class HGraphBuilder {
void End();
private:
+ Zone* zone() { return builder_->zone(); }
+
HGraphBuilder* builder_;
bool finished_;
HBasicBlock* first_true_block_;
@@ -944,8 +966,6 @@ class HGraphBuilder {
HBasicBlock* first_false_block_;
HBasicBlock* merge_block_;
BailoutId id_;
-
- Zone* zone() { return builder_->zone(); }
};
class LoopBuilder {
@@ -960,7 +980,7 @@ class HGraphBuilder {
LoopBuilder(HGraphBuilder* builder,
HValue* context,
Direction direction,
- BailoutId id = BailoutId::StubEntry());
+ BailoutId id);
~LoopBuilder() {
ASSERT(finished_);
}
@@ -973,6 +993,8 @@ class HGraphBuilder {
void EndBody();
private:
+ Zone* zone() { return builder_->zone(); }
+
HGraphBuilder* builder_;
HValue* context_;
HInstruction* increment_;
@@ -983,8 +1005,6 @@ class HGraphBuilder {
Direction direction_;
BailoutId id_;
bool finished_;
-
- Zone* zone() { return builder_->zone(); }
};
HValue* BuildAllocateElements(HContext* context,
View
319 src/ia32/code-stubs-ia32.cc
@@ -42,6 +42,18 @@ namespace v8 {
namespace internal {
+void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { eax, ebx, ecx, edx };
+ descriptor->register_param_count_ = 4;
+ descriptor->register_params_ = registers;
+ descriptor->stack_parameter_count_ = NULL;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kCreateObjectLiteralShallow)->entry;
+}
+
+
void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -493,52 +505,6 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
}
-void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [esp + kPointerSize]: object literal flags.
- // [esp + (2 * kPointerSize)]: constant properties.
- // [esp + (3 * kPointerSize)]: literal index.
- // [esp + (4 * kPointerSize)]: literals array.
-
- // Load boilerplate object into ecx and check if we need to create a
- // boilerplate.
- Label slow_case;
- __ mov(ecx, Operand(esp, 4 * kPointerSize));
- __ mov(eax, Operand(esp, 3 * kPointerSize));
- STATIC_ASSERT(kPointerSize == 4);
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
- __ mov(ecx, FieldOperand(ecx, eax, times_half_pointer_size,
- FixedArray::kHeaderSize));
- Factory* factory = masm->isolate()->factory();
- __ cmp(ecx, factory->undefined_value());
- __ j(equal, &slow_case);
-
- // Check that the boilerplate contains only fast properties and we can
- // statically determine the instance size.
- int size = JSObject::kHeaderSize + length_ * kPointerSize;
- __ mov(eax, FieldOperand(ecx, HeapObject::kMapOffset));
- __ movzx_b(eax, FieldOperand(eax, Map::kInstanceSizeOffset));
- __ cmp(eax, Immediate(size >> kPointerSizeLog2));
- __ j(not_equal, &slow_case);
-
- // Allocate the JS object and copy header together with all in-object
- // properties from the boilerplate.
- __ AllocateInNewSpace(size, eax, ebx, edx, &slow_case, TAG_OBJECT);
- for (int i = 0; i < size; i += kPointerSize) {
- __ mov(ebx, FieldOperand(ecx, i));
- __ mov(FieldOperand(eax, i), ebx);
- }
-
- // Return and remove the on-stack parameters.
- __ ret(4 * kPointerSize);
-
- __ bind(&slow_case);
- __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
-}
-
-
// The stub expects its argument on the stack and returns its result in tos_:
// zero for false, and a non-zero value for true.
void ToBooleanStub::Generate(MacroAssembler* masm) {
@@ -3826,7 +3792,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
static const int kSubjectOffset = 3 * kPointerSize;
static const int kJSRegExpOffset = 4 * kPointerSize;
- Label runtime, invoke_regexp;
+ Label runtime;
+ Factory* factory = masm->isolate()->factory();
// Ensure that a RegExp stack is allocated.
ExternalReference address_of_regexp_stack_memory_address =
@@ -3844,6 +3811,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ JumpIfSmi(eax, &runtime);
__ CmpObjectType(eax, JS_REGEXP_TYPE, ecx);
__ j(not_equal, &runtime);
+
// Check that the RegExp has been compiled (data contains a fixed array).
__ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset));
if (FLAG_debug_code) {
@@ -3862,156 +3830,124 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// ecx: RegExp data (FixedArray)
// Check that the number of captures fit in the static offsets vector buffer.
__ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset));
- // Calculate number of capture registers (number_of_captures + 1) * 2. This
- // uses the asumption that smis are 2 * their untagged value.
+ // Check (number_of_captures + 1) * 2 <= offsets vector size
+ // Or number_of_captures * 2 <= offsets vector size - 2
+ // Multiplying by 2 comes for free since edx is smi-tagged.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- __ add(edx, Immediate(2)); // edx was a smi.
- // Check that the static offsets vector buffer is large enough.
- __ cmp(edx, Isolate::kJSRegexpStaticOffsetsVectorSize);
+ STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
+ __ cmp(edx, Isolate::kJSRegexpStaticOffsetsVectorSize - 2);
__ j(above, &runtime);
- // ecx: RegExp data (FixedArray)
- // edx: Number of capture registers
- // Check that the second argument is a string.
- __ mov(eax, Operand(esp, kSubjectOffset));
- __ JumpIfSmi(eax, &runtime);
- Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
- __ j(NegateCondition(is_string), &runtime);
- // Get the length of the string to ebx.
- __ mov(ebx, FieldOperand(eax, String::kLengthOffset));
-
- // ebx: Length of subject string as a smi
- // ecx: RegExp data (FixedArray)
- // edx: Number of capture registers
- // Check that the third argument is a positive smi less than the subject
- // string length. A negative value will be greater (unsigned comparison).
- __ mov(eax, Operand(esp, kPreviousIndexOffset));
- __ JumpIfNotSmi(eax, &runtime);
- __ cmp(eax, ebx);
- __ j(above_equal, &runtime);
-
- // ecx: RegExp data (FixedArray)
- // edx: Number of capture registers
- // Check that the fourth object is a JSArray object.
- __ mov(eax, Operand(esp, kLastMatchInfoOffset));
- __ JumpIfSmi(eax, &runtime);
- __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
- __ j(not_equal, &runtime);
- // Check that the JSArray is in fast case.
- __ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset));
- __ mov(eax, FieldOperand(ebx, HeapObject::kMapOffset));
- Factory* factory = masm->isolate()->factory();
- __ cmp(eax, factory->fixed_array_map());
- __ j(not_equal, &runtime);
- // Check that the last match info has space for the capture registers and the
- // additional information.
- __ mov(eax, FieldOperand(ebx, FixedArray::kLengthOffset));
- __ SmiUntag(eax);
- __ add(edx, Immediate(RegExpImpl::kLastMatchOverhead));
- __ cmp(edx, eax);
- __ j(greater, &runtime);
-
// Reset offset for possibly sliced string.
__ Set(edi, Immediate(0));
- // ecx: RegExp data (FixedArray)
- // Check the representation and encoding of the subject string.
- Label seq_ascii_string, seq_two_byte_string, check_code;
__ mov(eax, Operand(esp, kSubjectOffset));
+ __ JumpIfSmi(eax, &runtime);
+ __ mov(edx, eax); // Make a copy of the original subject string.
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
- // First check for flat two byte string.
+
+ // eax: subject string
+ // edx: subject string
+ // ebx: subject string instance type
+ // ecx: RegExp data (FixedArray)
+ // Handle subject string according to its encoding and representation:
+ // (1) Sequential two byte? If yes, go to (9).
+ // (2) Sequential one byte? If yes, go to (6).
+ // (3) Anything but sequential or cons? If yes, go to (7).
+ // (4) Cons string. If the string is flat, replace subject with first string.
+ // Otherwise bailout.
+ // (5a) Is subject sequential two byte? If yes, go to (9).
+ // (5b) Is subject external? If yes, go to (8).
+ // (6) One byte sequential. Load regexp code for one byte.
+ // (E) Carry on.
+ /// [...]
+
+ // Deferred code at the end of the stub:
+ // (7) Not a long external string? If yes, go to (10).
+ // (8) External string. Make it, offset-wise, look like a sequential string.
+ // (8a) Is the external string one byte? If yes, go to (6).
+ // (9) Two byte sequential. Load regexp code for one byte. Go to (E).
+ // (10) Short external string or not a string? If yes, bail out to runtime.
+ // (11) Sliced string. Replace subject with parent. Go to (5a).
+
+ Label seq_one_byte_string /* 6 */, seq_two_byte_string /* 9 */,
+ external_string /* 8 */, check_underlying /* 5a */,
+ not_seq_nor_cons /* 7 */, check_code /* E */,
+ not_long_external /* 10 */;
+
+ // (1) Sequential two byte? If yes, go to (9).
__ and_(ebx, kIsNotStringMask |
kStringRepresentationMask |
kStringEncodingMask |
kShortExternalStringMask);
STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
- __ j(zero, &seq_two_byte_string, Label::kNear);
- // Any other flat string must be a flat ASCII string. None of the following
- // string type tests will succeed if subject is not a string or a short
- // external string.
+ __ j(zero, &seq_two_byte_string); // Go to (9).
+
+ // (2) Sequential one byte? If yes, go to (6).
+ // Any other sequential string must be one byte.
__ and_(ebx, Immediate(kIsNotStringMask |
kStringRepresentationMask |
kShortExternalStringMask));
- __ j(zero, &seq_ascii_string, Label::kNear);
-
- // ebx: whether subject is a string and if yes, its string representation
- // Check for flat cons string or sliced string.
- // A flat cons string is a cons string where the second part is the empty
- // string. In that case the subject string is just the first part of the cons
- // string. Also in this case the first part of the cons string is known to be
- // a sequential string or an external string.
- // In the case of a sliced string its offset has to be taken into account.
- Label cons_string, external_string, check_encoding;
+ __ j(zero, &seq_one_byte_string, Label::kNear); // Go to (6).
+
+ // (3) Anything but sequential or cons? If yes, go to (7).
+ // We check whether the subject string is a cons, since sequential strings
+ // have already been covered.
STATIC_ASSERT(kConsStringTag < kExternalStringTag);
STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
__ cmp(ebx, Immediate(kExternalStringTag));
- __ j(less, &cons_string);
- __ j(equal, &external_string);
+ __ j(greater_equal, &not_seq_nor_cons); // Go to (7).
- // Catch non-string subject or short external string.
- STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
- __ test(ebx, Immediate(kIsNotStringMask | kShortExternalStringTag));
- __ j(not_zero, &runtime);
-
- // String is sliced.
- __ mov(edi, FieldOperand(eax, SlicedString::kOffsetOffset));
- __ mov(eax, FieldOperand(eax, SlicedString::kParentOffset));
- // edi: offset of sliced string, smi-tagged.
- // eax: parent string.
- __ jmp(&check_encoding, Label::kNear);
- // String is a cons string, check whether it is flat.
- __ bind(&cons_string);
+ // (4) Cons string. Check that it's flat.
+ // Replace subject with first string and reload instance type.
__ cmp(FieldOperand(eax, ConsString::kSecondOffset), factory->empty_string());
__ j(not_equal, &runtime);
__ mov(eax, FieldOperand(eax, ConsString::kFirstOffset));
- __ bind(&check_encoding);
+ __ bind(&check_underlying);
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- // eax: first part of cons string or parent of sliced string.
- // ebx: map of first part of cons string or map of parent of sliced string.
- // Is first part of cons or parent of slice a flat two byte string?
- __ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset),
- kStringRepresentationMask | kStringEncodingMask);
+ __ mov(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+
+ // (5a) Is subject sequential two byte? If yes, go to (9).
+ __ test_b(ebx, kStringRepresentationMask | kStringEncodingMask);
STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
- __ j(zero, &seq_two_byte_string, Label::kNear);
- // Any other flat string must be sequential ASCII or external.
- __ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset),
- kStringRepresentationMask);
- __ j(not_zero, &external_string);
-
- __ bind(&seq_ascii_string);
- // eax: subject string (flat ASCII)
+ __ j(zero, &seq_two_byte_string); // Go to (9).
+ // (5b) Is subject external? If yes, go to (8).
+ __ test_b(ebx, kStringRepresentationMask);
+ // The underlying external string is never a short external string.
+ STATIC_CHECK(ExternalString::kMaxShortLength < ConsString::kMinLength);
+ STATIC_CHECK(ExternalString::kMaxShortLength < SlicedString::kMinLength);
+ __ j(not_zero, &external_string); // Go to (8).
+
+ // eax: sequential subject string (or look-alike, external string)
+ // edx: original subject string
// ecx: RegExp data (FixedArray)
+ // (6) One byte sequential. Load regexp code for one byte.
+ __ bind(&seq_one_byte_string);
+ // Load previous index and check range before edx is overwritten. We have
+ // to use edx instead of eax here because it might have been only made to
+ // look like a sequential string when it actually is an external string.
+ __ mov(ebx, Operand(esp, kPreviousIndexOffset));
+ __ JumpIfNotSmi(ebx, &runtime);
+ __ cmp(ebx, FieldOperand(edx, String::kLengthOffset));
+ __ j(above_equal, &runtime);
__ mov(edx, FieldOperand(ecx, JSRegExp::kDataAsciiCodeOffset));
- __ Set(ecx, Immediate(1)); // Type is ASCII.
- __ jmp(&check_code, Label::kNear);
-
- __ bind(&seq_two_byte_string);
- // eax: subject string (flat two byte)
- // ecx: RegExp data (FixedArray)
- __ mov(edx, FieldOperand(ecx, JSRegExp::kDataUC16CodeOffset));
- __ Set(ecx, Immediate(0)); // Type is two byte.
+ __ Set(ecx, Immediate(1)); // Type is one byte.
+ // (E) Carry on. String handling is done.
__ bind(&check_code);
+ // edx: irregexp code
// Check that the irregexp code has been generated for the actual string
// encoding. If it has, the field contains a code object otherwise it contains
// a smi (code flushing support).
__ JumpIfSmi(edx, &runtime);
// eax: subject string
+ // ebx: previous index (smi)
// edx: code
// ecx: encoding of subject string (1 if ASCII, 0 if two_byte);
- // Load used arguments before starting to push arguments for call to native
- // RegExp code to avoid handling changing stack height.
- __ mov(ebx, Operand(esp, kPreviousIndexOffset));
- __ SmiUntag(ebx); // Previous index from smi.
-
- // eax: subject string
- // ebx: previous index
- // edx: code
- // ecx: encoding of subject string (1 if ASCII 0 if two_byte);
// All checks done. Now push arguments for native regexp code.
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->regexp_entry_native(), 1);
@@ -4042,6 +3978,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
masm->isolate())));
// Argument 2: Previous index.
+ __ SmiUntag(ebx);
__ mov(Operand(esp, 1 * kPointerSize), ebx);
// Argument 1: Original subject string.
@@ -4151,8 +4088,23 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// edx: Number of capture registers
// Load last_match_info which is still known to be a fast case JSArray.
+ // Check that the fourth object is a JSArray object.
__ mov(eax, Operand(esp, kLastMatchInfoOffset));
+ __ JumpIfSmi(eax, &runtime);
+ __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
+ __ j(not_equal, &runtime);
+ // Check that the JSArray is in fast case.