Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP
Browse files

Upgrade V8 to 3.1.2

  • Loading branch information...
commit a0702b54d1db35a6006644882c0b5420d8670958 1 parent a48a075
@ry ry authored
Showing with 5,878 additions and 1,766 deletions.
  1. +1 −0  deps/v8/.gitignore
  2. +2 −1  deps/v8/AUTHORS
  3. +12 −0 deps/v8/ChangeLog
  4. +5 −5 deps/v8/{src/third_party/strongtalk/LICENSE → LICENSE.strongtalk}
  5. +26 −0 deps/v8/LICENSE.v8
  6. +45 −0 deps/v8/LICENSE.valgrind
  7. +1 −1  deps/v8/SConstruct
  8. +8 −8 deps/v8/src/arm/assembler-arm.cc
  9. +7 −11 deps/v8/src/arm/assembler-arm.h
  10. +355 −91 deps/v8/src/arm/code-stubs-arm.cc
  11. +39 −0 deps/v8/src/arm/code-stubs-arm.h
  12. +9 −3 deps/v8/src/arm/codegen-arm.cc
  13. +1 −0  deps/v8/src/arm/codegen-arm.h
  14. +17 −7 deps/v8/src/arm/constants-arm.h
  15. +1 −1  deps/v8/src/arm/deoptimizer-arm.cc
  16. +8 −2 deps/v8/src/arm/full-codegen-arm.cc
  17. +12 −13 deps/v8/src/arm/ic-arm.cc
  18. +93 −24 deps/v8/src/arm/lithium-arm.cc
  19. +109 −134 deps/v8/src/arm/lithium-arm.h
  20. +166 −30 deps/v8/src/arm/lithium-codegen-arm.cc
  21. +5 −0 deps/v8/src/arm/lithium-codegen-arm.h
  22. +141 −17 deps/v8/src/arm/macro-assembler-arm.cc
  23. +31 −5 deps/v8/src/arm/macro-assembler-arm.h
  24. +142 −71 deps/v8/src/arm/simulator-arm.cc
  25. +5 −3 deps/v8/src/arm/simulator-arm.h
  26. +147 −83 deps/v8/src/arm/stub-cache-arm.cc
  27. +5 −3 deps/v8/src/array.js
  28. +7 −5 deps/v8/src/assembler.cc
  29. +27 −8 deps/v8/src/assembler.h
  30. +2 −1  deps/v8/src/code-stubs.h
  31. +4 −0 deps/v8/src/codegen-inl.h
  32. +13 −8 deps/v8/src/compilation-cache.cc
  33. +2 −1  deps/v8/src/compilation-cache.h
  34. +12 −2 deps/v8/src/compiler.cc
  35. +12 −1 deps/v8/src/compiler.h
  36. +32 −46 deps/v8/src/conversions.cc
  37. +2 −2 deps/v8/src/deoptimizer.cc
  38. +2 −2 deps/v8/src/disassembler.cc
  39. +5 −1 deps/v8/src/extensions/gc-extension.cc
  40. +1 −1  deps/v8/src/full-codegen.cc
  41. +3 −0  deps/v8/src/full-codegen.h
  42. +1 −1  deps/v8/src/handles.cc
  43. +2 −1  deps/v8/src/heap-profiler.cc
  44. +11 −0 deps/v8/src/heap.cc
  45. +9 −2 deps/v8/src/heap.h
  46. +9 −1 deps/v8/src/hydrogen-instructions.cc
  47. +85 −29 deps/v8/src/hydrogen-instructions.h
  48. +60 −32 deps/v8/src/hydrogen.cc
  49. +2 −0  deps/v8/src/hydrogen.h
  50. +48 −0 deps/v8/src/ia32/code-stubs-ia32.cc
  51. +19 −0 deps/v8/src/ia32/code-stubs-ia32.h
  52. +8 −2 deps/v8/src/ia32/codegen-ia32.cc
  53. +1 −0  deps/v8/src/ia32/codegen-ia32.h
  54. +48 −67 deps/v8/src/ia32/deoptimizer-ia32.cc
  55. +90 −63 deps/v8/src/ia32/full-codegen-ia32.cc
  56. +9 −13 deps/v8/src/ia32/ic-ia32.cc
  57. +37 −8 deps/v8/src/ia32/lithium-codegen-ia32.cc
  58. +80 −18 deps/v8/src/ia32/lithium-ia32.cc
  59. +107 −131 deps/v8/src/ia32/lithium-ia32.h
  60. +15 −5 deps/v8/src/ia32/macro-assembler-ia32.cc
  61. +11 −0 deps/v8/src/ia32/macro-assembler-ia32.h
  62. +31 −0 deps/v8/src/ia32/stub-cache-ia32.cc
  63. +30 −18 deps/v8/src/ic.cc
  64. +140 −0 deps/v8/src/lithium-allocator-inl.h
  65. +55 −116 deps/v8/src/lithium-allocator.cc
  66. +50 −68 deps/v8/src/lithium-allocator.h
  67. +76 −0 deps/v8/src/lithium.h
  68. +3 −6 deps/v8/src/messages.js
  69. +20 −8 deps/v8/src/objects-inl.h
  70. +42 −13 deps/v8/src/objects.cc
  71. +15 −8 deps/v8/src/objects.h
  72. +97 −29 deps/v8/src/parser.cc
  73. +7 −1 deps/v8/src/parser.h
  74. +21 −10 deps/v8/src/preparser.cc
  75. +2 −0  deps/v8/src/preparser.h
  76. +4 −21 deps/v8/src/prettyprinter.cc
  77. +164 −28 deps/v8/src/runtime.cc
  78. +2 −2 deps/v8/src/runtime.h
  79. +21 −1 deps/v8/src/safepoint-table.cc
  80. +8 −3 deps/v8/src/safepoint-table.h
  81. +55 −6 deps/v8/src/scanner-base.cc
  82. +8 −1 deps/v8/src/scanner-base.h
  83. +18 −1 deps/v8/src/scanner.cc
  84. +9 −0 deps/v8/src/scanner.h
  85. +1 −2  deps/v8/src/scopes.cc
  86. +33 −0 deps/v8/src/stub-cache.cc
  87. +4 −0 deps/v8/src/stub-cache.h
  88. +0 −18 deps/v8/src/third_party/strongtalk/README.chromium
  89. +5 −32 deps/v8/src/token.h
  90. +1 −1  deps/v8/src/top.h
  91. +3 −3 deps/v8/src/type-info.h
  92. +1 −2  deps/v8/src/uri.js
  93. +6 −0 deps/v8/src/v8globals.h
  94. +96 −50 deps/v8/src/v8natives.js
  95. +1 −1  deps/v8/src/version.cc
  96. +27 −0 deps/v8/src/x64/assembler-x64.cc
  97. +29 −3 deps/v8/src/x64/assembler-x64.h
  98. +433 −64 deps/v8/src/x64/code-stubs-x64.cc
  99. +24 −0 deps/v8/src/x64/code-stubs-x64.h
  100. +8 −2 deps/v8/src/x64/codegen-x64.cc
  101. +1 −0  deps/v8/src/x64/codegen-x64.h
  102. +117 −30 deps/v8/src/x64/deoptimizer-x64.cc
  103. +2 −0  deps/v8/src/x64/disasm-x64.cc
  104. +2 −3 deps/v8/src/x64/frames-x64.h
  105. +19 −22 deps/v8/src/x64/full-codegen-x64.cc
  106. +11 −15 deps/v8/src/x64/ic-x64.cc
  107. +366 −43 deps/v8/src/x64/lithium-codegen-x64.cc
  108. +176 −61 deps/v8/src/x64/lithium-x64.cc
  109. +50 −15 deps/v8/src/x64/lithium-x64.h
  110. +3 −1 deps/v8/src/x64/macro-assembler-x64.cc
  111. +28 −1 deps/v8/src/x64/macro-assembler-x64.h
  112. +29 −0 deps/v8/src/x64/stub-cache-x64.cc
  113. +1 −1  deps/v8/test/cctest/cctest.status
  114. +317 −12 deps/v8/test/cctest/test-api.cc
  115. +167 −49 deps/v8/test/cctest/test-assembler-arm.cc
  116. +2 −2 deps/v8/test/cctest/test-bignum-dtoa.cc
  117. +1 −1  deps/v8/test/cctest/test-dtoa.cc
  118. +1 −1  deps/v8/test/cctest/test-fast-dtoa.cc
  119. +213 −7 deps/v8/test/es5conform/es5conform.status
  120. +16 −0 deps/v8/test/mjsunit/get-own-property-descriptor.js
  121. +4 −0 deps/v8/test/mjsunit/mjsunit.status
  122. +135 −2 deps/v8/test/mjsunit/object-define-property.js
  123. +38 −0 deps/v8/test/mjsunit/regress/regress-1083.js
  124. +35 −0 deps/v8/test/mjsunit/regress/regress-1092.js
  125. +46 −0 deps/v8/test/mjsunit/regress/regress-1099.js
  126. +2 −0  deps/v8/test/mjsunit/regress/regress-900966.js
  127. +43 −0 deps/v8/test/mjsunit/regress/regress-992.js
  128. +49 −0 deps/v8/test/mjsunit/regress/regress-deopt-gc.js
  129. +82 −0 deps/v8/test/mjsunit/strict-mode-eval.js
  130. +65 −0 deps/v8/test/mjsunit/strict-mode.js
  131. +5 −10 deps/v8/test/mozilla/mozilla.status
  132. +1 −0  deps/v8/tools/gyp/v8.gyp
  133. +20 −0 deps/v8/tools/visual_studio/v8_base.vcproj
  134. +4 −0 deps/v8/tools/visual_studio/v8_base_arm.vcproj
  135. +108 −8 deps/v8/tools/visual_studio/v8_base_x64.vcproj
View
1  deps/v8/.gitignore
@@ -20,6 +20,7 @@ d8_g
shell
shell_g
/obj/
+/test/sputnik/sputniktests/
/tools/oom_dump/oom_dump
/tools/oom_dump/oom_dump.o
/tools/visual_studio/Debug
View
3  deps/v8/AUTHORS
@@ -26,6 +26,7 @@ Kun Zhang <zhangk@codeaurora.org>
Matt Hanselman <mjhanselman@gmail.com>
Martyn Capewell <martyn.capewell@arm.com>
Michael Smith <mike@w3.org>
+Mike Gilbert <floppymaster@gmail.com>
Paolo Giarrusso <p.giarrusso@gmail.com>
Patrick Gansterer <paroga@paroga.com>
Rafal Krypa <rafal@krypa.net>
@@ -35,4 +36,4 @@ Ryan Dahl <coldredlemur@gmail.com>
Sanjoy Das <sanjoy@playingwithpointers.com>
Subrato K De <subratokde@codeaurora.org>
Vlad Burlik <vladbph@gmail.com>
-Mike Gilbert <floppymaster@gmail.com>
+Zaheer Ahmad <zahmad@codeaurora.org>
View
12 deps/v8/ChangeLog
@@ -1,3 +1,15 @@
+2011-02-07: Version 3.1.2
+
+ Added better security checks when accessing properties via
+ Object.getOwnPropertyDescriptor.
+
+ Fixed bug in Object.defineProperty and related access bugs (issues
+ 992, 1083 and 1092).
+
+ Added LICENSE.v8, LICENSE.strongtalk and LICENSE.valgrind to ease
+ copyright notice generation for embedders.
+
+
2011-02-02: Version 3.1.1
Perform security checks before fetching the value in
View
10 deps/v8/src/third_party/strongtalk/LICENSE → deps/v8/LICENSE.strongtalk
@@ -6,15 +6,15 @@ modification, are permitted provided that the following conditions are
met:
- Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
+this list of conditions and the following disclaimer.
- Redistribution in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
- Neither the name of Sun Microsystems or the names of contributors may
- be used to endorse or promote products derived from this software without
- specific prior written permission.
+be used to endorse or promote products derived from this software without
+specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
View
26 deps/v8/LICENSE.v8
@@ -0,0 +1,26 @@
+Copyright 2006-2011, the V8 project authors. All rights reserved.
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+ * Neither the name of Google Inc. nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
View
45 deps/v8/LICENSE.valgrind
@@ -0,0 +1,45 @@
+----------------------------------------------------------------
+
+Notice that the following BSD-style license applies to this one
+file (valgrind.h) only. The rest of Valgrind is licensed under the
+terms of the GNU General Public License, version 2, unless
+otherwise indicated. See the COPYING file in the source
+distribution for details.
+
+----------------------------------------------------------------
+
+This file is part of Valgrind, a dynamic binary instrumentation
+framework.
+
+Copyright (C) 2000-2007 Julian Seward. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+2. The origin of this software must not be misrepresented; you must
+ not claim that you wrote the original software. If you use this
+ software in a product, an acknowledgment in the product
+ documentation would be appreciated but is not required.
+
+3. Altered source versions must be plainly marked as such, and must
+ not be misrepresented as being the original software.
+
+4. The name of the author may not be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
View
2  deps/v8/SConstruct
@@ -136,7 +136,7 @@ LIBRARY_FLAGS = {
'gcc': {
'all': {
'CCFLAGS': ['$DIALECTFLAGS', '$WARNINGFLAGS'],
- 'CXXFLAGS': ['$CCFLAGS', '-fno-rtti', '-fno-exceptions', '-fno-builtin-memcpy'],
+ 'CXXFLAGS': ['$CCFLAGS', '-fno-rtti', '-fno-exceptions'],
},
'visibility:hidden': {
# Use visibility=default to disable this.
View
16 deps/v8/src/arm/assembler-arm.cc
@@ -2124,7 +2124,7 @@ static Instr EncodeVCVT(const VFPType dst_type,
const int dst_code,
const VFPType src_type,
const int src_code,
- Assembler::ConversionMode mode,
+ VFPConversionMode mode,
const Condition cond) {
ASSERT(src_type != dst_type);
int D, Vd, M, Vm;
@@ -2167,7 +2167,7 @@ static Instr EncodeVCVT(const VFPType dst_type,
void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
const SwVfpRegister src,
- ConversionMode mode,
+ VFPConversionMode mode,
const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(F64, dst.code(), S32, src.code(), mode, cond));
@@ -2176,7 +2176,7 @@ void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
void Assembler::vcvt_f32_s32(const SwVfpRegister dst,
const SwVfpRegister src,
- ConversionMode mode,
+ VFPConversionMode mode,
const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(F32, dst.code(), S32, src.code(), mode, cond));
@@ -2185,7 +2185,7 @@ void Assembler::vcvt_f32_s32(const SwVfpRegister dst,
void Assembler::vcvt_f64_u32(const DwVfpRegister dst,
const SwVfpRegister src,
- ConversionMode mode,
+ VFPConversionMode mode,
const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(F64, dst.code(), U32, src.code(), mode, cond));
@@ -2194,7 +2194,7 @@ void Assembler::vcvt_f64_u32(const DwVfpRegister dst,
void Assembler::vcvt_s32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
- ConversionMode mode,
+ VFPConversionMode mode,
const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(S32, dst.code(), F64, src.code(), mode, cond));
@@ -2203,7 +2203,7 @@ void Assembler::vcvt_s32_f64(const SwVfpRegister dst,
void Assembler::vcvt_u32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
- ConversionMode mode,
+ VFPConversionMode mode,
const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(U32, dst.code(), F64, src.code(), mode, cond));
@@ -2212,7 +2212,7 @@ void Assembler::vcvt_u32_f64(const SwVfpRegister dst,
void Assembler::vcvt_f64_f32(const DwVfpRegister dst,
const SwVfpRegister src,
- ConversionMode mode,
+ VFPConversionMode mode,
const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(F64, dst.code(), F32, src.code(), mode, cond));
@@ -2221,7 +2221,7 @@ void Assembler::vcvt_f64_f32(const DwVfpRegister dst,
void Assembler::vcvt_f32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
- ConversionMode mode,
+ VFPConversionMode mode,
const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond));
View
18 deps/v8/src/arm/assembler-arm.h
@@ -942,37 +942,33 @@ class Assembler : public Malloced {
void vmov(const Register dst,
const SwVfpRegister src,
const Condition cond = al);
- enum ConversionMode {
- FPSCRRounding = 0,
- RoundToZero = 1
- };
void vcvt_f64_s32(const DwVfpRegister dst,
const SwVfpRegister src,
- ConversionMode mode = RoundToZero,
+ VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al);
void vcvt_f32_s32(const SwVfpRegister dst,
const SwVfpRegister src,
- ConversionMode mode = RoundToZero,
+ VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al);
void vcvt_f64_u32(const DwVfpRegister dst,
const SwVfpRegister src,
- ConversionMode mode = RoundToZero,
+ VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al);
void vcvt_s32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
- ConversionMode mode = RoundToZero,
+ VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al);
void vcvt_u32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
- ConversionMode mode = RoundToZero,
+ VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al);
void vcvt_f64_f32(const DwVfpRegister dst,
const SwVfpRegister src,
- ConversionMode mode = RoundToZero,
+ VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al);
void vcvt_f32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
- ConversionMode mode = RoundToZero,
+ VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al);
void vabs(const DwVfpRegister dst,
View
446 deps/v8/src/arm/code-stubs-arm.cc
@@ -396,6 +396,19 @@ class FloatingPointHelper : public AllStatic {
Register scratch1,
Register scratch2,
Label* not_number);
+
+ // Loads the number from object into dst as a 32-bit integer if possible. If
+ // the object is not a 32-bit integer control continues at the label
+ // not_int32. If VFP is supported double_scratch is used but not scratch2.
+ static void LoadNumberAsInteger(MacroAssembler* masm,
+ Register object,
+ Register dst,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ DwVfpRegister double_scratch,
+ Label* not_int32);
+
private:
static void LoadNumber(MacroAssembler* masm,
FloatingPointHelper::Destination destination,
@@ -461,15 +474,21 @@ void FloatingPointHelper::LoadOperands(
void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
- Destination destination,
- Register object,
- DwVfpRegister dst,
- Register dst1,
- Register dst2,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* not_number) {
+ Destination destination,
+ Register object,
+ DwVfpRegister dst,
+ Register dst1,
+ Register dst2,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* not_number) {
+ if (FLAG_debug_code) {
+ __ AbortIfNotRootValue(heap_number_map,
+ Heap::kHeapNumberMapRootIndex,
+ "HeapNumberMap register clobbered.");
+ }
+
Label is_smi, done;
__ JumpIfSmi(object, &is_smi);
@@ -514,6 +533,34 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
}
+void FloatingPointHelper::LoadNumberAsInteger(MacroAssembler* masm,
+ Register object,
+ Register dst,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ DwVfpRegister double_scratch,
+ Label* not_int32) {
+ if (FLAG_debug_code) {
+ __ AbortIfNotRootValue(heap_number_map,
+ Heap::kHeapNumberMapRootIndex,
+ "HeapNumberMap register clobbered.");
+ }
+ Label is_smi, done;
+ __ JumpIfSmi(object, &is_smi);
+ __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset));
+ __ cmp(scratch1, heap_number_map);
+ __ b(ne, not_int32);
+ __ ConvertToInt32(
+ object, dst, scratch1, scratch2, double_scratch, not_int32);
+ __ jmp(&done);
+ __ bind(&is_smi);
+ __ SmiUntag(dst, object);
+ __ bind(&done);
+}
+
+
+
// See comment for class.
void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
Label max_negative_int;
@@ -1676,7 +1723,7 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
__ ldr(r4, FieldMemOperand(lhs, HeapNumber::kMapOffset));
__ cmp(r4, heap_number_map);
__ b(ne, &slow);
- __ ConvertToInt32(lhs, r3, r5, r4, &slow);
+ __ ConvertToInt32(lhs, r3, r5, r4, d0, &slow);
__ jmp(&done_checking_lhs);
__ bind(&lhs_is_smi);
__ mov(r3, Operand(lhs, ASR, 1));
@@ -1687,7 +1734,7 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
__ ldr(r4, FieldMemOperand(rhs, HeapNumber::kMapOffset));
__ cmp(r4, heap_number_map);
__ b(ne, &slow);
- __ ConvertToInt32(rhs, r2, r5, r4, &slow);
+ __ ConvertToInt32(rhs, r2, r5, r4, d0, &slow);
__ jmp(&done_checking_rhs);
__ bind(&rhs_is_smi);
__ mov(r2, Operand(rhs, ASR, 1));
@@ -2529,6 +2576,18 @@ void TypeRecordingBinaryOpStub::GenerateSmiSmiOperation(
__ and_(right, left, Operand(scratch1));
__ Ret();
break;
+ case Token::BIT_OR:
+ __ orr(right, left, Operand(right));
+ __ Ret();
+ break;
+ case Token::BIT_AND:
+ __ and_(right, left, Operand(right));
+ __ Ret();
+ break;
+ case Token::BIT_XOR:
+ __ eor(right, left, Operand(right));
+ __ Ret();
+ break;
default:
UNREACHABLE();
}
@@ -2545,90 +2604,179 @@ void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
Register scratch1 = r7;
Register scratch2 = r9;
- // Load left and right operands into d6 and d7 or r0/r1 and r2/r3 depending
- // on whether VFP3 is available.
- FloatingPointHelper::Destination destination =
- CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD ?
- FloatingPointHelper::kVFPRegisters :
- FloatingPointHelper::kCoreRegisters;
+ ASSERT(smi_operands || (not_numbers != NULL));
+ if (smi_operands && FLAG_debug_code) {
+ __ AbortIfNotSmi(left);
+ __ AbortIfNotSmi(right);
+ }
Register heap_number_map = r6;
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- // Allocate new heap number for result.
- Register result = r5;
- __ AllocateHeapNumber(
- result, scratch1, scratch2, heap_number_map, gc_required);
+ switch (op_) {
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV:
+ case Token::MOD: {
+ // Load left and right operands into d6 and d7 or r0/r1 and r2/r3
+ // depending on whether VFP3 is available or not.
+ FloatingPointHelper::Destination destination =
+ CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD ?
+ FloatingPointHelper::kVFPRegisters :
+ FloatingPointHelper::kCoreRegisters;
+
+ // Allocate new heap number for result.
+ Register result = r5;
+ __ AllocateHeapNumber(
+ result, scratch1, scratch2, heap_number_map, gc_required);
+
+ // Load the operands.
+ if (smi_operands) {
+ FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
+ } else {
+ FloatingPointHelper::LoadOperands(masm,
+ destination,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ not_numbers);
+ }
- // Load the operands.
- if (smi_operands) {
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(left);
- __ AbortIfNotSmi(right);
- }
- FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
- } else {
- FloatingPointHelper::LoadOperands(masm,
- destination,
- heap_number_map,
- scratch1,
- scratch2,
- not_numbers);
- }
+ // Calculate the result.
+ if (destination == FloatingPointHelper::kVFPRegisters) {
+ // Using VFP registers:
+ // d6: Left value
+ // d7: Right value
+ CpuFeatures::Scope scope(VFP3);
+ switch (op_) {
+ case Token::ADD:
+ __ vadd(d5, d6, d7);
+ break;
+ case Token::SUB:
+ __ vsub(d5, d6, d7);
+ break;
+ case Token::MUL:
+ __ vmul(d5, d6, d7);
+ break;
+ case Token::DIV:
+ __ vdiv(d5, d6, d7);
+ break;
+ default:
+ UNREACHABLE();
+ }
- // Calculate the result.
- if (destination == FloatingPointHelper::kVFPRegisters) {
- // Using VFP registers:
- // d6: Left value
- // d7: Right value
- CpuFeatures::Scope scope(VFP3);
- switch (op_) {
- case Token::ADD:
- __ vadd(d5, d6, d7);
- break;
- case Token::SUB:
- __ vsub(d5, d6, d7);
- break;
- case Token::MUL:
- __ vmul(d5, d6, d7);
- break;
- case Token::DIV:
- __ vdiv(d5, d6, d7);
- break;
- default:
- UNREACHABLE();
- }
+ __ sub(r0, result, Operand(kHeapObjectTag));
+ __ vstr(d5, r0, HeapNumber::kValueOffset);
+ __ add(r0, r0, Operand(kHeapObjectTag));
+ __ Ret();
+ } else {
+ // Using core registers:
+ // r0: Left value (least significant part of mantissa).
+ // r1: Left value (sign, exponent, top of mantissa).
+ // r2: Right value (least significant part of mantissa).
+ // r3: Right value (sign, exponent, top of mantissa).
- __ sub(r0, result, Operand(kHeapObjectTag));
- __ vstr(d5, r0, HeapNumber::kValueOffset);
- __ add(r0, r0, Operand(kHeapObjectTag));
- __ Ret();
- } else {
- // Using core registers:
- // r0: Left value (least significant part of mantissa).
- // r1: Left value (sign, exponent, top of mantissa).
- // r2: Right value (least significant part of mantissa).
- // r3: Right value (sign, exponent, top of mantissa).
-
- __ push(lr); // For later.
- __ PrepareCallCFunction(4, scratch1); // Two doubles are 4 arguments.
- // Call C routine that may not cause GC or other trouble. r5 is callee
- // save.
- __ CallCFunction(ExternalReference::double_fp_operation(op_), 4);
- // Store answer in the overwritable heap number.
+ // Push the current return address before the C call. Return will be
+ // through pop(pc) below.
+ __ push(lr);
+ __ PrepareCallCFunction(4, scratch1); // Two doubles are 4 arguments.
+ // Call C routine that may not cause GC or other trouble. r5 is callee
+ // save.
+ __ CallCFunction(ExternalReference::double_fp_operation(op_), 4);
+ // Store answer in the overwritable heap number.
#if !defined(USE_ARM_EABI)
- // Double returned in fp coprocessor register 0 and 1, encoded as
- // register cr8. Offsets must be divisible by 4 for coprocessor so we
- // need to substract the tag from r5.
- __ sub(scratch1, result, Operand(kHeapObjectTag));
- __ stc(p1, cr8, MemOperand(scratch1, HeapNumber::kValueOffset));
+ // Double returned in fp coprocessor register 0 and 1, encoded as
+ // register cr8. Offsets must be divisible by 4 for coprocessor so we
+ // need to substract the tag from r5.
+ __ sub(scratch1, result, Operand(kHeapObjectTag));
+ __ stc(p1, cr8, MemOperand(scratch1, HeapNumber::kValueOffset));
#else
- // Double returned in registers 0 and 1.
- __ Strd(r0, r1, FieldMemOperand(result, HeapNumber::kValueOffset));
+ // Double returned in registers 0 and 1.
+ __ Strd(r0, r1, FieldMemOperand(result, HeapNumber::kValueOffset));
#endif
- __ mov(r0, Operand(result));
- // And we are done.
- __ pop(pc);
+ // Plase result in r0 and return to the pushed return address.
+ __ mov(r0, Operand(result));
+ __ pop(pc);
+ }
+ break;
+ }
+ case Token::BIT_OR:
+ case Token::BIT_XOR:
+ case Token::BIT_AND: {
+ if (smi_operands) {
+ __ SmiUntag(r3, left);
+ __ SmiUntag(r2, right);
+ } else {
+ // Convert operands to 32-bit integers. Right in r2 and left in r3.
+ FloatingPointHelper::LoadNumberAsInteger(masm,
+ left,
+ r3,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ d0,
+ not_numbers);
+ FloatingPointHelper::LoadNumberAsInteger(masm,
+ right,
+ r2,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ d0,
+ not_numbers);
+ }
+ switch (op_) {
+ case Token::BIT_OR:
+ __ orr(r2, r3, Operand(r2));
+ break;
+ case Token::BIT_XOR:
+ __ eor(r2, r3, Operand(r2));
+ break;
+ case Token::BIT_AND:
+ __ and_(r2, r3, Operand(r2));
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ Label result_not_a_smi;
+ // Check that the *signed* result fits in a smi.
+ __ add(r3, r2, Operand(0x40000000), SetCC);
+ __ b(mi, &result_not_a_smi);
+ __ SmiTag(r0, r2);
+ __ Ret();
+
+ // Allocate new heap number for result.
+ __ bind(&result_not_a_smi);
+ __ AllocateHeapNumber(
+ r5, scratch1, scratch2, heap_number_map, gc_required);
+
+ // r2: Answer as signed int32.
+ // r5: Heap number to write answer into.
+
+ // Nothing can go wrong now, so move the heap number to r0, which is the
+ // result.
+ __ mov(r0, Operand(r5));
+
+ if (CpuFeatures::IsSupported(VFP3)) {
+ // Convert the int32 in r2 to the heap number in r0. r3 is corrupted.
+ CpuFeatures::Scope scope(VFP3);
+ __ vmov(s0, r2);
+ __ vcvt_f64_s32(d0, s0);
+ __ sub(r3, r0, Operand(kHeapObjectTag));
+ __ vstr(d0, r3, HeapNumber::kValueOffset);
+ __ Ret();
+ } else {
+ // Tail call that writes the int32 in r2 to the heap number in r0, using
+ // r3 as scratch. r0 is preserved and returned.
+ WriteInt32ToHeapNumberStub stub(r2, r0, r3);
+ __ TailCallStub(&stub);
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
}
}
@@ -2646,7 +2794,10 @@ void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
op_ == Token::SUB ||
op_ == Token::MUL ||
op_ == Token::DIV ||
- op_ == Token::MOD);
+ op_ == Token::MOD ||
+ op_ == Token::BIT_OR ||
+ op_ == Token::BIT_AND ||
+ op_ == Token::BIT_XOR);
Register left = r1;
Register right = r0;
@@ -2678,7 +2829,10 @@ void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
op_ == Token::SUB ||
op_ == Token::MUL ||
op_ == Token::DIV ||
- op_ == Token::MOD);
+ op_ == Token::MOD ||
+ op_ == Token::BIT_OR ||
+ op_ == Token::BIT_AND ||
+ op_ == Token::BIT_XOR);
if (result_type_ == TRBinaryOpIC::UNINITIALIZED ||
result_type_ == TRBinaryOpIC::SMI) {
@@ -2714,7 +2868,10 @@ void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
op_ == Token::SUB ||
op_ == Token::MUL ||
op_ == Token::DIV ||
- op_ == Token::MOD);
+ op_ == Token::MOD ||
+ op_ == Token::BIT_OR ||
+ op_ == Token::BIT_AND ||
+ op_ == Token::BIT_XOR);
ASSERT(operands_type_ == TRBinaryOpIC::INT32);
@@ -2727,7 +2884,10 @@ void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
op_ == Token::SUB ||
op_ == Token::MUL ||
op_ == Token::DIV ||
- op_ == Token::MOD);
+ op_ == Token::MOD ||
+ op_ == Token::BIT_OR ||
+ op_ == Token::BIT_AND ||
+ op_ == Token::BIT_XOR);
Label not_numbers, call_runtime;
ASSERT(operands_type_ == TRBinaryOpIC::HEAP_NUMBER);
@@ -2747,7 +2907,10 @@ void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
op_ == Token::SUB ||
op_ == Token::MUL ||
op_ == Token::DIV ||
- op_ == Token::MOD);
+ op_ == Token::MOD ||
+ op_ == Token::BIT_OR ||
+ op_ == Token::BIT_AND ||
+ op_ == Token::BIT_XOR);
Label call_runtime;
@@ -2812,6 +2975,15 @@ void TypeRecordingBinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
case Token::MOD:
__ InvokeBuiltin(Builtins::MOD, JUMP_JS);
break;
+ case Token::BIT_OR:
+ __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS);
+ break;
+ case Token::BIT_AND:
+ __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS);
+ break;
+ case Token::BIT_XOR:
+ __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS);
+ break;
default:
UNREACHABLE();
}
@@ -3037,7 +3209,7 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
__ b(ne, &slow);
// Convert the heap number is r0 to an untagged integer in r1.
- __ ConvertToInt32(r0, r1, r2, r3, &slow);
+ __ ConvertToInt32(r0, r1, r2, r3, d0, &slow);
// Do the bitwise operation (move negated) and check if the result
// fits in a smi.
@@ -3329,9 +3501,17 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// this by performing a garbage collection and retrying the
// builtin once.
+ // Compute the argv pointer in a callee-saved register.
+ __ add(r6, sp, Operand(r0, LSL, kPointerSizeLog2));
+ __ sub(r6, r6, Operand(kPointerSize));
+
// Enter the exit frame that transitions from JavaScript to C++.
__ EnterExitFrame(save_doubles_);
+ // Setup argc and the builtin function in callee-saved registers.
+ __ mov(r4, Operand(r0));
+ __ mov(r5, Operand(r1));
+
// r4: number of arguments (C callee-saved)
// r5: pointer to builtin function (C callee-saved)
// r6: pointer to first argument (C callee-saved)
@@ -5734,6 +5914,90 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
}
+void DirectCEntryStub::Generate(MacroAssembler* masm) {
+ __ ldr(pc, MemOperand(sp, 0));
+}
+
+
+void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
+ ApiFunction *function) {
+ __ mov(lr, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
+ RelocInfo::CODE_TARGET));
+ // Push return address (accessible to GC through exit frame pc).
+ __ mov(r2,
+ Operand(ExternalReference(function, ExternalReference::DIRECT_CALL)));
+ __ str(pc, MemOperand(sp, 0));
+ __ Jump(r2); // Call the api function.
+}
+
+
+void GenerateFastPixelArrayLoad(MacroAssembler* masm,
+ Register receiver,
+ Register key,
+ Register elements_map,
+ Register elements,
+ Register scratch1,
+ Register scratch2,
+ Register result,
+ Label* not_pixel_array,
+ Label* key_not_smi,
+ Label* out_of_range) {
+ // Register use:
+ //
+ // receiver - holds the receiver on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ // key - holds the smi key on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ // elements - set to be the receiver's elements on exit.
+ //
+ // elements_map - set to be the map of the receiver's elements
+ // on exit.
+ //
+ // result - holds the result of the pixel array load on exit,
+ // tagged as a smi if successful.
+ //
+ // Scratch registers:
+ //
+ // scratch1 - used a scratch register in map check, if map
+ // check is successful, contains the length of the
+ // pixel array, the pointer to external elements and
+ // the untagged result.
+ //
+ // scratch2 - holds the untaged key.
+
+ // Some callers already have verified that the key is a smi. key_not_smi is
+ // set to NULL as a sentinel for that case. Otherwise, add an explicit check
+ // to ensure the key is a smi must be added.
+ if (key_not_smi != NULL) {
+ __ JumpIfNotSmi(key, key_not_smi);
+ } else {
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(key);
+ }
+ }
+ __ SmiUntag(scratch2, key);
+
+ // Verify that the receiver has pixel array elements.
+ __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ CheckMap(elements, scratch1, Heap::kPixelArrayMapRootIndex,
+ not_pixel_array, true);
+
+ // Key must be in range of the pixel array.
+ __ ldr(scratch1, FieldMemOperand(elements, PixelArray::kLengthOffset));
+ __ cmp(scratch2, scratch1);
+ __ b(hs, out_of_range); // unsigned check handles negative keys.
+
+ // Perform the indexed load and tag the result as a smi.
+ __ ldr(scratch1,
+ FieldMemOperand(elements, PixelArray::kExternalPointerOffset));
+ __ ldrb(scratch1, MemOperand(scratch1, scratch2));
+ __ SmiTag(r0, scratch1);
+ __ Ret();
+}
+
+
#undef __
} } // namespace v8::internal
View
39 deps/v8/src/arm/code-stubs-arm.h
@@ -571,6 +571,45 @@ class RegExpCEntryStub: public CodeStub {
};
+// Trampoline stub to call into native code. To call safely into native code
+// in the presence of compacting GC (which can move code objects) we need to
+// keep the code which called into native pinned in the memory. Currently the
+// simplest approach is to generate such stub early enough so it can never be
+// moved by GC
+class DirectCEntryStub: public CodeStub {
+ public:
+ DirectCEntryStub() {}
+ void Generate(MacroAssembler* masm);
+ void GenerateCall(MacroAssembler* masm, ApiFunction *function);
+
+ private:
+ Major MajorKey() { return DirectCEntry; }
+ int MinorKey() { return 0; }
+ const char* GetName() { return "DirectCEntryStub"; }
+};
+
+
+// Generate code the to load an element from a pixel array. The receiver is
+// assumed to not be a smi and to have elements, the caller must guarantee this
+// precondition. If the receiver does not have elements that are pixel arrays,
+// the generated code jumps to not_pixel_array. If key is not a smi, then the
+// generated code branches to key_not_smi. Callers can specify NULL for
+// key_not_smi to signal that a smi check has already been performed on key so
+// that the smi check is not generated . If key is not a valid index within the
+// bounds of the pixel array, the generated code jumps to out_of_range.
+void GenerateFastPixelArrayLoad(MacroAssembler* masm,
+ Register receiver,
+ Register key,
+ Register elements_map,
+ Register elements,
+ Register scratch1,
+ Register scratch2,
+ Register result,
+ Label* not_pixel_array,
+ Label* key_not_smi,
+ Label* out_of_range);
+
+
} } // namespace v8::internal
#endif // V8_ARM_CODE_STUBS_ARM_H_
View
12 deps/v8/src/arm/codegen-arm.cc
@@ -1110,7 +1110,7 @@ void DeferredInlineSmiOperation::GenerateNonSmiInput() {
Register int32 = r2;
// Not a 32bits signed int, fall back to the GenericBinaryOpStub.
- __ ConvertToInt32(tos_register_, int32, r4, r5, entry_label());
+ __ ConvertToInt32(tos_register_, int32, r4, r5, d0, entry_label());
// tos_register_ (r0 or r1): Original heap number.
// int32: signed 32bits int.
@@ -4177,7 +4177,10 @@ void CodeGenerator::VisitCall(Call* node) {
__ ldr(r1, frame_->Receiver());
frame_->EmitPush(r1);
- frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 3);
+ // Push the strict mode flag.
+ frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag())));
+
+ frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 4);
done.Jump();
slow.Bind();
@@ -4197,8 +4200,11 @@ void CodeGenerator::VisitCall(Call* node) {
__ ldr(r1, frame_->Receiver());
frame_->EmitPush(r1);
+ // Push the strict mode flag.
+ frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag())));
+
// Resolve the call.
- frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
+ frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
// If we generated fast-case code bind the jump-target where fast
// and slow case merge.
View
1  deps/v8/src/arm/codegen-arm.h
@@ -287,6 +287,7 @@ class CodeGenerator: public AstVisitor {
// Accessors
inline bool is_eval();
inline Scope* scope();
+ inline StrictModeFlag strict_mode_flag();
// Generating deferred code.
void ProcessDeferred();
View
24 deps/v8/src/arm/constants-arm.h
@@ -380,10 +380,13 @@ enum VFPRegPrecision {
// VFP FPSCR constants.
+enum VFPConversionMode {
+ kFPSCRRounding = 0,
+ kDefaultRoundToZero = 1
+};
+
static const uint32_t kVFPExceptionMask = 0xf;
-static const uint32_t kVFPRoundingModeMask = 3 << 22;
static const uint32_t kVFPFlushToZeroMask = 1 << 24;
-static const uint32_t kVFPRoundToMinusInfinityBits = 2 << 22;
static const uint32_t kVFPInvalidExceptionBit = 1;
static const uint32_t kVFPNConditionFlagBit = 1 << 31;
@@ -393,13 +396,20 @@ static const uint32_t kVFPVConditionFlagBit = 1 << 28;
// VFP rounding modes. See ARM DDI 0406B Page A2-29.
-enum FPSCRRoundingModes {
- RN, // Round to Nearest.
- RP, // Round towards Plus Infinity.
- RM, // Round towards Minus Infinity.
- RZ // Round towards zero.
+enum VFPRoundingMode {
+ RN = 0 << 22, // Round to Nearest.
+ RP = 1 << 22, // Round towards Plus Infinity.
+ RM = 2 << 22, // Round towards Minus Infinity.
+ RZ = 3 << 22, // Round towards zero.
+
+ // Aliases.
+ kRoundToNearest = RN,
+ kRoundToPlusInf = RP,
+ kRoundToMinusInf = RM,
+ kRoundToZero = RZ
};
+static const uint32_t kVFPRoundingModeMask = 3 << 22;
// -----------------------------------------------------------------------------
// Hints.
View
2  deps/v8/src/arm/deoptimizer-arm.cc
@@ -97,7 +97,7 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
#ifdef DEBUG
// Destroy the code which is not supposed to be run again.
int instructions =
- (code->safepoint_table_start() - last_pc_offset) / Assembler::kInstrSize;
+ (code->safepoint_table_offset() - last_pc_offset) / Assembler::kInstrSize;
CodePatcher destroyer(code->instruction_start() + last_pc_offset,
instructions);
for (int x = 0; x < instructions; x++) {
View
10 deps/v8/src/arm/full-codegen-arm.cc
@@ -1554,7 +1554,10 @@ void FullCodeGenerator::EmitBinaryOp(Token::Value op,
op == Token::SUB ||
op == Token::MUL ||
op == Token::DIV ||
- op == Token::MOD) {
+ op == Token::MOD ||
+ op == Token::BIT_OR ||
+ op == Token::BIT_AND ||
+ op == Token::BIT_XOR) {
TypeRecordingBinaryOpStub stub(op, mode);
__ CallStub(&stub);
} else {
@@ -1923,7 +1926,10 @@ void FullCodeGenerator::VisitCall(Call* expr) {
__ ldr(r1,
MemOperand(fp, (2 + scope()->num_parameters()) * kPointerSize));
__ push(r1);
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
+ // Push the strict mode flag.
+ __ mov(r1, Operand(Smi::FromInt(strict_mode_flag())));
+ __ push(r1);
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
// The runtime call returns a pair of values in r0 (function) and
// r1 (receiver). Touch up the stack with the right values.
View
25 deps/v8/src/arm/ic-arm.cc
@@ -1189,19 +1189,18 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// r0: key
// r1: receiver
__ bind(&check_pixel_array);
- __ ldr(r4, FieldMemOperand(r1, JSObject::kElementsOffset));
- __ ldr(r3, FieldMemOperand(r4, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex);
- __ cmp(r3, ip);
- __ b(ne, &check_number_dictionary);
- __ ldr(ip, FieldMemOperand(r4, PixelArray::kLengthOffset));
- __ mov(r2, Operand(key, ASR, kSmiTagSize));
- __ cmp(r2, ip);
- __ b(hs, &slow);
- __ ldr(ip, FieldMemOperand(r4, PixelArray::kExternalPointerOffset));
- __ ldrb(r2, MemOperand(ip, r2));
- __ mov(r0, Operand(r2, LSL, kSmiTagSize)); // Tag result as smi.
- __ Ret();
+
+ GenerateFastPixelArrayLoad(masm,
+ r1,
+ r0,
+ r3,
+ r4,
+ r2,
+ r5,
+ r0,
+ &check_number_dictionary,
+ NULL,
+ &slow);
__ bind(&check_number_dictionary);
// Check whether the elements is a number dictionary.
View
117 deps/v8/src/arm/lithium-arm.cc
@@ -25,6 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include "lithium-allocator-inl.h"
#include "arm/lithium-arm.h"
#include "arm/lithium-codegen-arm.h"
@@ -56,6 +57,31 @@ void LOsrEntry::MarkSpilledRegister(int allocation_index,
}
+#ifdef DEBUG
+void LInstruction::VerifyCall() {
+ // Call instructions can use only fixed registers as
+ // temporaries and outputs because all registers
+ // are blocked by the calling convention.
+ // Inputs can use either fixed register or have a short lifetime (be
+ // used at start of the instruction).
+ ASSERT(Output() == NULL ||
+ LUnallocated::cast(Output())->HasFixedPolicy() ||
+ !LUnallocated::cast(Output())->HasRegisterPolicy());
+ for (UseIterator it(this); it.HasNext(); it.Advance()) {
+ LOperand* operand = it.Next();
+ ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
+ LUnallocated::cast(operand)->IsUsedAtStart() ||
+ !LUnallocated::cast(operand)->HasRegisterPolicy());
+ }
+ for (TempIterator it(this); it.HasNext(); it.Advance()) {
+ LOperand* operand = it.Next();
+ ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
+ !LUnallocated::cast(operand)->HasRegisterPolicy());
+ }
+}
+#endif
+
+
void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index,
LOperand* spill_operand) {
ASSERT(spill_operand->IsDoubleStackSlot());
@@ -66,9 +92,8 @@ void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index,
void LInstruction::PrintTo(StringStream* stream) {
stream->Add("%s ", this->Mnemonic());
- if (HasResult()) {
- PrintOutputOperandTo(stream);
- }
+
+ PrintOutputOperandTo(stream);
PrintDataTo(stream);
@@ -158,6 +183,9 @@ const char* LArithmeticT::Mnemonic() const {
case Token::MUL: return "mul-t";
case Token::MOD: return "mod-t";
case Token::DIV: return "div-t";
+ case Token::BIT_AND: return "bit-and-t";
+ case Token::BIT_OR: return "bit-or-t";
+ case Token::BIT_XOR: return "bit-xor-t";
default:
UNREACHABLE();
return NULL;
@@ -258,7 +286,15 @@ void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
void LLoadContextSlot::PrintDataTo(StringStream* stream) {
- stream->Add("(%d, %d)", context_chain_length(), slot_index());
+ InputAt(0)->PrintTo(stream);
+ stream->Add("[%d]", slot_index());
+}
+
+
+void LStoreContextSlot::PrintDataTo(StringStream* stream) {
+ InputAt(0)->PrintTo(stream);
+ stream->Add("[%d] <- ", slot_index());
+ InputAt(1)->PrintTo(stream);
}
@@ -390,7 +426,7 @@ void LChunk::MarkEmptyBlocks() {
}
-int LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
+void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
LGap* gap = new LGap(block);
int index = -1;
if (instr->IsControl()) {
@@ -406,7 +442,6 @@ int LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
pointer_maps_.Add(instr->pointer_map());
instr->pointer_map()->set_lithium_position(index);
}
- return index;
}
@@ -672,7 +707,10 @@ void LChunkBuilder::ClearInstructionPendingDeoptimizationEnvironment() {
LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
HInstruction* hinstr,
CanDeoptimize can_deoptimize) {
- allocator_->MarkAsCall();
+#ifdef DEBUG
+ instr->VerifyCall();
+#endif
+ instr->MarkAsCall();
instr = AssignPointerMap(instr);
if (hinstr->HasSideEffects()) {
@@ -697,7 +735,7 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
LInstruction* LChunkBuilder::MarkAsSaveDoubles(LInstruction* instr) {
- allocator_->MarkAsSaveDoubles();
+ instr->MarkAsSaveDoubles();
return instr;
}
@@ -742,13 +780,23 @@ LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
LInstruction* LChunkBuilder::DoBit(Token::Value op,
HBitwiseBinaryOperation* instr) {
- ASSERT(instr->representation().IsInteger32());
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
+ if (instr->representation().IsInteger32()) {
+ ASSERT(instr->left()->representation().IsInteger32());
+ ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
- return DefineSameAsFirst(new LBitI(op, left, right));
+ LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+ LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+ return DefineSameAsFirst(new LBitI(op, left, right));
+ } else {
+ ASSERT(instr->representation().IsTagged());
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
+
+ LOperand* left = UseFixed(instr->left(), r1);
+ LOperand* right = UseFixed(instr->right(), r0);
+ LArithmeticT* result = new LArithmeticT(op, left, right);
+ return MarkAsCall(DefineFixed(result, r0), instr);
+ }
}
@@ -887,7 +935,6 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
void LChunkBuilder::VisitInstruction(HInstruction* current) {
HInstruction* old_current = current_instruction_;
current_instruction_ = current;
- allocator_->BeginInstruction();
if (current->has_position()) position_ = current->position();
LInstruction* instr = current->CompileToLithium(this);
@@ -910,11 +957,7 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
instr->set_hydrogen_value(current);
}
- int index = chunk_->AddInstruction(instr, current_block_);
- allocator_->SummarizeInstruction(index);
- } else {
- // This instruction should be omitted.
- allocator_->OmitInstruction();
+ chunk_->AddInstruction(instr, current_block_);
}
current_instruction_ = old_current;
}
@@ -1105,13 +1148,26 @@ LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
}
+LInstruction* LChunkBuilder::DoContext(HContext* instr) {
+ return DefineAsRegister(new LContext);
+}
+
+
+LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
+ LOperand* context = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LOuterContext(context));
+}
+
+
LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
- return DefineAsRegister(new LGlobalObject);
+ LOperand* context = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LGlobalObject(context));
}
LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
- return DefineAsRegister(new LGlobalReceiver);
+ LOperand* global_object = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LGlobalReceiver(global_object));
}
@@ -1514,7 +1570,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
} else {
ASSERT(to.IsInteger32());
LOperand* value = UseRegister(instr->value());
- LDoubleToI* res = new LDoubleToI(value);
+ LDoubleToI* res = new LDoubleToI(value, TempRegister());
return AssignEnvironment(DefineAsRegister(res));
}
} else if (from.IsInteger32()) {
@@ -1621,7 +1677,20 @@ LInstruction* LChunkBuilder::DoStoreGlobal(HStoreGlobal* instr) {
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
- return DefineAsRegister(new LLoadContextSlot);
+ LOperand* context = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LLoadContextSlot(context));
+}
+
+
+LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
+ LOperand* context = UseTempRegister(instr->context());
+ LOperand* value;
+ if (instr->NeedsWriteBarrier()) {
+ value = UseTempRegister(instr->value());
+ } else {
+ value = UseRegister(instr->value());
+ }
+ return new LStoreContextSlot(context, value);
}
View
243 deps/v8/src/arm/lithium-arm.h
@@ -39,118 +39,6 @@ namespace internal {
// Forward declarations.
class LCodeGen;
-
-// Type hierarchy:
-//
-// LInstruction
-// LTemplateInstruction
-// LControlInstruction
-// LBranch
-// LClassOfTestAndBranch
-// LCmpJSObjectEqAndBranch
-// LCmpIDAndBranch
-// LHasCachedArrayIndexAndBranch
-// LHasInstanceTypeAndBranch
-// LInstanceOfAndBranch
-// LIsNullAndBranch
-// LIsObjectAndBranch
-// LIsSmiAndBranch
-// LTypeofIsAndBranch
-// LAccessArgumentsAt
-// LArgumentsElements
-// LArgumentsLength
-// LAddI
-// LApplyArguments
-// LArithmeticD
-// LArithmeticT
-// LBitI
-// LBoundsCheck
-// LCmpID
-// LCmpJSObjectEq
-// LCmpT
-// LDivI
-// LInstanceOf
-// LInstanceOfKnownGlobal
-// LLoadKeyedFastElement
-// LLoadKeyedGeneric
-// LModI
-// LMulI
-// LPower
-// LShiftI
-// LSubI
-// LCallConstantFunction
-// LCallFunction
-// LCallGlobal
-// LCallKeyed
-// LCallKnownGlobal
-// LCallNamed
-// LCallRuntime
-// LCallStub
-// LConstant
-// LConstantD
-// LConstantI
-// LConstantT
-// LDeoptimize
-// LFunctionLiteral
-// LGap
-// LLabel
-// LGlobalObject
-// LGlobalReceiver
-// LGoto
-// LLazyBailout
-// LLoadGlobal
-// LCheckPrototypeMaps
-// LLoadContextSlot
-// LArrayLiteral
-// LObjectLiteral
-// LRegExpLiteral
-// LOsrEntry
-// LParameter
-// LRegExpConstructResult
-// LStackCheck
-// LStoreKeyed
-// LStoreKeyedFastElement
-// LStoreKeyedGeneric
-// LStoreNamed
-// LStoreNamedField
-// LStoreNamedGeneric
-// LStringCharCodeAt
-// LBitNotI
-// LCallNew
-// LCheckFunction
-// LCheckPrototypeMaps
-// LCheckInstanceType
-// LCheckMap
-// LCheckSmi
-// LClassOfTest
-// LDeleteProperty
-// LDoubleToI
-// LFixedArrayLength
-// LHasCachedArrayIndex
-// LHasInstanceType
-// LInteger32ToDouble
-// LIsNull
-// LIsObject
-// LIsSmi
-// LJSArrayLength
-// LLoadNamedField
-// LLoadNamedGeneric
-// LLoadFunctionPrototype
-// LNumberTagD
-// LNumberTagI
-// LPushArgument
-// LReturn
-// LSmiTag
-// LStoreGlobal
-// LStringLength
-// LTaggedToI
-// LThrow
-// LTypeof
-// LTypeofIs
-// LUnaryMathOperation
-// LValueOf
-// LUnknownOSRValue
-
#define LITHIUM_ALL_INSTRUCTION_LIST(V) \
V(ControlInstruction) \
V(Constant) \
@@ -187,6 +75,8 @@ class LCodeGen;
V(CheckMap) \
V(CheckPrototypeMaps) \
V(CheckSmi) \
+ V(ClassOfTest) \
+ V(ClassOfTestAndBranch) \
V(CmpID) \
V(CmpIDAndBranch) \
V(CmpJSObjectEq) \
@@ -197,6 +87,7 @@ class LCodeGen;
V(ConstantD) \
V(ConstantI) \
V(ConstantT) \
+ V(Context) \
V(DeleteProperty) \
V(Deoptimize) \
V(DivI) \
@@ -207,6 +98,10 @@ class LCodeGen;
V(GlobalObject) \
V(GlobalReceiver) \
V(Goto) \
+ V(HasCachedArrayIndex) \
+ V(HasCachedArrayIndexAndBranch) \
+ V(HasInstanceType) \
+ V(HasInstanceTypeAndBranch) \
V(InstanceOf) \
V(InstanceOfAndBranch) \
V(InstanceOfKnownGlobal) \
@@ -218,22 +113,16 @@ class LCodeGen;
V(IsSmi) \
V(IsSmiAndBranch) \
V(JSArrayLength) \
- V(HasInstanceType) \
- V(HasInstanceTypeAndBranch) \
- V(HasCachedArrayIndex) \
- V(HasCachedArrayIndexAndBranch) \
- V(ClassOfTest) \
- V(ClassOfTestAndBranch) \
V(Label) \
V(LazyBailout) \
V(LoadContextSlot) \
V(LoadElements) \
+ V(LoadFunctionPrototype) \
V(LoadGlobal) \
V(LoadKeyedFastElement) \
V(LoadKeyedGeneric) \
V(LoadNamedField) \
V(LoadNamedGeneric) \
- V(LoadFunctionPrototype) \
V(ModI) \
V(MulI) \
V(NumberTagD) \
@@ -241,6 +130,7 @@ class LCodeGen;
V(NumberUntagD) \
V(ObjectLiteral) \
V(OsrEntry) \
+ V(OuterContext) \
V(Parameter) \
V(PushArgument) \
V(RegExpLiteral) \
@@ -249,14 +139,15 @@ class LCodeGen;
V(SmiTag) \
V(SmiUntag) \
V(StackCheck) \
+ V(StoreContextSlot) \
V(StoreGlobal) \
V(StoreKeyedFastElement) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
V(StoreNamedGeneric) \
- V(SubI) \
V(StringCharCodeAt) \
V(StringLength) \
+ V(SubI) \
V(TaggedToI) \
V(Throw) \
V(Typeof) \
@@ -290,7 +181,10 @@ class LCodeGen;
class LInstruction: public ZoneObject {
public:
LInstruction()
- : hydrogen_value_(NULL) { }
+ : environment_(NULL),
+ hydrogen_value_(NULL),
+ is_call_(false),
+ is_save_doubles_(false) { }
virtual ~LInstruction() { }
virtual void CompileToNative(LCodeGen* generator) = 0;
@@ -307,16 +201,14 @@ class LInstruction: public ZoneObject {
virtual bool IsControl() const { return false; }
virtual void SetBranchTargets(int true_block_id, int false_block_id) { }
- void set_environment(LEnvironment* env) { environment_.set(env); }
- LEnvironment* environment() const { return environment_.get(); }
- bool HasEnvironment() const { return environment_.is_set(); }
+ void set_environment(LEnvironment* env) { environment_ = env; }
+ LEnvironment* environment() const { return environment_; }
+ bool HasEnvironment() const { return environment_ != NULL; }
void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
LPointerMap* pointer_map() const { return pointer_map_.get(); }
bool HasPointerMap() const { return pointer_map_.is_set(); }
- virtual bool HasResult() const = 0;
-
void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
HValue* hydrogen_value() const { return hydrogen_value_; }
@@ -330,11 +222,35 @@ class LInstruction: public ZoneObject {
return deoptimization_environment_.is_set();
}
+ void MarkAsCall() { is_call_ = true; }
+ void MarkAsSaveDoubles() { is_save_doubles_ = true; }
+
+ // Interface to the register allocator and iterators.
+ bool IsMarkedAsCall() const { return is_call_; }
+ bool IsMarkedAsSaveDoubles() const { return is_save_doubles_; }
+
+ virtual bool HasResult() const = 0;
+ virtual LOperand* result() = 0;
+
+ virtual int InputCount() = 0;
+ virtual LOperand* InputAt(int i) = 0;
+ virtual int TempCount() = 0;
+ virtual LOperand* TempAt(int i) = 0;
+
+ LOperand* FirstInput() { return InputAt(0); }
+ LOperand* Output() { return HasResult() ? result() : NULL; }
+
+#ifdef DEBUG
+ void VerifyCall();
+#endif
+
private:
- SetOncePointer<LEnvironment> environment_;
+ LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
HValue* hydrogen_value_;
SetOncePointer<LEnvironment> deoptimization_environment_;
+ bool is_call_;
+ bool is_save_doubles_;
};
@@ -361,6 +277,11 @@ class OperandContainer<ElementType, 0> {
public:
int length() { return 0; }
void PrintOperandsTo(StringStream* stream) { }
+ ElementType& operator[](int i) {
+ UNREACHABLE();
+ static ElementType t = 0;
+ return t;
+ }
};
@@ -1266,18 +1187,41 @@ class LStoreGlobal: public LTemplateInstruction<0, 1, 1> {
};
-class LLoadContextSlot: public LTemplateInstruction<1, 0, 0> {
+class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LLoadContextSlot(LOperand* context) {
+ inputs_[0] = context;
+ }
+
DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
- int context_chain_length() { return hydrogen()->context_chain_length(); }
+ LOperand* context() { return InputAt(0); }
int slot_index() { return hydrogen()->slot_index(); }
virtual void PrintDataTo(StringStream* stream);
};
+class LStoreContextSlot: public LTemplateInstruction<0, 2, 0> {
+ public:
+ LStoreContextSlot(LOperand* context, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
+ DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
+
+ LOperand* context() { return InputAt(0); }
+ LOperand* value() { return InputAt(1); }
+ int slot_index() { return hydrogen()->slot_index(); }
+ int needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
class LPushArgument: public LTemplateInstruction<0, 1, 0> {
public:
explicit LPushArgument(LOperand* value) {
@@ -1288,15 +1232,45 @@ class LPushArgument: public LTemplateInstruction<0, 1, 0> {
};
-class LGlobalObject: public LTemplateInstruction<1, 0, 0> {
+class LContext: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(Context, "context")
+};
+
+
+class LOuterContext: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LOuterContext(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer-context")
+
+ LOperand* context() { return InputAt(0); }
+};
+
+
+class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LGlobalObject(LOperand* context) {
+ inputs_[0] = context;
+ }
+
DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
+
+ LOperand* context() { return InputAt(0); }
};
-class LGlobalReceiver: public LTemplateInstruction<1, 0, 0> {
+class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LGlobalReceiver(LOperand* global_object) {
+ inputs_[0] = global_object;
+ }
+
DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
+
+ LOperand* global() { return InputAt(0); }
};
@@ -1431,10 +1405,11 @@ class LNumberTagD: public LTemplateInstruction<1, 1, 2> {
// Sometimes truncating conversion from a tagged value to an int32.
-class LDoubleToI: public LTemplateInstruction<1, 1, 0> {
+class LDoubleToI: public LTemplateInstruction<1, 1, 1> {
public:
- explicit LDoubleToI(LOperand* value) {
+ explicit LDoubleToI(LOperand* value, LOperand* temp1) {
inputs_[0] = value;
+ temps_[0] = temp1;
}
DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
@@ -1789,7 +1764,7 @@ class LChunk: public ZoneObject {
public:
explicit LChunk(HGraph* graph);
- int AddInstruction(LInstruction* instruction, HBasicBlock* block);
+ void AddInstruction(LInstruction* instruction, HBasicBlock* block);
LConstantOperand* DefineConstantOperand(HConstant* constant);
Handle<Object> LookupLiteral(LConstantOperand* operand) const;
Representation LookupLiteralRepresentation(LConstantOperand* operand) const;
View
196 deps/v8/src/arm/lithium-codegen-arm.cc
@@ -223,7 +223,7 @@ bool LCodeGen::GenerateCode() {
void LCodeGen::FinishCode(Handle<Code> code) {
ASSERT(is_done());
code->set_stack_slots(StackSlotCount());
- code->set_safepoint_table_start(safepoints_.GetCodeOffset());
+ code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
PopulateDeoptimizationData(code);
}
@@ -1174,7 +1174,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
// scratch:left = left * right.
- __ smull(scratch, left, left, right);
+ __ smull(left, scratch, left, right);
__ mov(ip, Operand(left, ASR, 31));
__ cmp(ip, Operand(scratch));
DeoptimizeIf(ne, instr->environment());
@@ -1398,7 +1398,18 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
__ vdiv(left, left, right);
break;
case Token::MOD: {
- Abort("DoArithmeticD unimplemented for MOD.");
+ // Save r0-r3 on the stack.
+ __ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit());
+
+ __ PrepareCallCFunction(4, scratch0());
+ __ vmov(r0, r1, left);
+ __ vmov(r2, r3, right);
+ __ CallCFunction(ExternalReference::double_fp_operation(Token::MOD), 4);
+ // Move the result in the double result register.
+ __ vmov(ToDoubleRegister(instr->result()), r0, r1);
+
+ // Restore r0-r3.
+ __ ldm(ia_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit());
break;
}
default:
@@ -1595,17 +1606,58 @@ Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
__ cmp(ToRegister(left), ToOperand(right));
- Abort("EmitCmpI untested.");
}
void LCodeGen::DoCmpID(LCmpID* instr) {
- Abort("DoCmpID unimplemented.");
+ LOperand* left = instr->InputAt(0);
+ LOperand* right = instr->InputAt(1);
+ LOperand* result = instr->result();
+ Register scratch = scratch0();
+
+ Label unordered, done;
+ if (instr->is_double()) {
+ // Compare left and right as doubles and load the
+ // resulting flags into the normal status register.
+ __ vcmp(ToDoubleRegister(left), ToDoubleRegister(right));
+ __ vmrs(pc);
+ // If a NaN is involved, i.e. the result is unordered (V set),
+ // jump to unordered to return false.
+ __ b(vs, &unordered);
+ } else {
+ EmitCmpI(left, right);
+ }
+
+ Condition cc = TokenToCondition(instr->op(), instr->is_double());
+ __ LoadRoot(ToRegister(result), Heap::kTrueValueRootIndex);
+ __ b(cc, &done);
+
+ __ bind(&unordered);
+ __ LoadRoot(ToRegister(result), Heap::kFalseValueRootIndex);
+ __ bind(&done);
}
void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
- Abort("DoCmpIDAndBranch unimplemented.");
+ LOperand* left = instr->InputAt(0);
+ LOperand* right = instr->InputAt(1);
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+
+ if (instr->is_double()) {
+ // Compare left and right as doubles and load the
+ // resulting flags into the normal status register.
+ __ vcmp(ToDoubleRegister(left), ToDoubleRegister(right));
+ __ vmrs(pc);
+ // If a NaN is involved, i.e. the result is unordered (V set),
+ // jump to false block label.
+ __ b(vs, chunk_->GetAssemblyLabel(false_block));
+ } else {
+ EmitCmpI(left, right);
+ }
+
+ Condition cc = TokenToCondition(instr->op(), instr->is_double());
+ EmitBranch(true_block, false_block, cc);
}
@@ -2201,13 +2253,27 @@ void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
- // TODO(antonm): load a context with a separate instruction.
+ Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
- __ LoadContext(result, instr->context_chain_length());
+ __ ldr(result,
+ MemOperand(context, Context::SlotOffset(Context::FCONTEXT_INDEX)));
__ ldr(result, ContextOperand(result, instr->slot_index()));
}
+void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
+ Register context = ToRegister(instr->context());
+ Register value = ToRegister(instr->value());
+ __ ldr(context,
+ MemOperand(context, Context::SlotOffset(Context::FCONTEXT_INDEX)));
+ __ str(value, ContextOperand(context, instr->slot_index()));
+ if (instr->needs_write_barrier()) {
+ int offset = Context::SlotOffset(instr->slot_index());
+ __ RecordWrite(context, Operand(offset), value, scratch0());
+ }
+}
+
+
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
Register object = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
@@ -2458,16 +2524,32 @@ void LCodeGen::DoPushArgument(LPushArgument* instr) {
}
+void LCodeGen::DoContext(LContext* instr) {
+ Register result = ToRegister(instr->result());
+ __ mov(result, cp);
+}
+
+
+void LCodeGen::DoOuterContext(LOuterContext* instr) {
+ Register context = ToRegister(instr->context());
+ Register result = ToRegister(instr->result());
+ __ ldr(result,
+ MemOperand(context, Context::SlotOffset(Context::CLOSURE_INDEX)));
+ __ ldr(result, FieldMemOperand(result, JSFunction::kContextOffset));
+}
+
+
void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
+ Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
__ ldr(result, ContextOperand(cp, Context::GLOBAL_INDEX));
}
void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
+ Register global = ToRegister(instr->global());
Register result = ToRegister(instr->result());
- __ ldr(result, ContextOperand(cp, Context::GLOBAL_INDEX));
- __ ldr(result, FieldMemOperand(result, GlobalObject::kGlobalReceiverOffset));
+ __ ldr(result, FieldMemOperand(global, GlobalObject::kGlobalReceiverOffset));
}
@@ -2625,34 +2707,53 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
}
-void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
- DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
- Register prev_fpscr = ToRegister(instr->TempAt(0));
- SwVfpRegister single_scratch = double_scratch0().low();
- Register scratch = scratch0();
+// Truncates a double using a specific rounding mode.
+// Clears the z flag (ne condition) if an overflow occurs.
+void LCodeGen::EmitVFPTruncate(VFPRoundingMode rounding_mode,
+ SwVfpRegister result,
+ DwVfpRegister double_input,
+ Register scratch1,
+ Register scratch2) {
+ Register prev_fpscr = scratch1;
+ Register scratch = scratch2;
// Set custom FPCSR:
- // - Set rounding mode to "Round towards Minus Infinity".
+ // - Set rounding mode.
// - Clear vfp cumulative exception flags.
// - Make sure Flush-to-zero mode control bit is unset.
__ vmrs(prev_fpscr);
- __ bic(scratch, prev_fpscr,
- Operand(kVFPExceptionMask | kVFPRoundingModeMask | kVFPFlushToZeroMask));
- __ orr(scratch, scratch, Operand(kVFPRoundToMinusInfinityBits));
+ __ bic(scratch, prev_fpscr, Operand(kVFPExceptionMask |
+ kVFPRoundingModeMask |
+ kVFPFlushToZeroMask));
+ __ orr(scratch, scratch, Operand(rounding_mode));
__ vmsr(scratch);
// Convert the argument to an integer.
- __ vcvt_s32_f64(single_scratch,
- input,
- Assembler::FPSCRRounding,
- al);
+ __ vcvt_s32_f64(result,
+ double_input,
+ kFPSCRRounding);
- // Retrieve FPSCR and check for vfp exceptions.
+ // Retrieve FPSCR.
__ vmrs(scratch);
- // Restore FPSCR
+ // Restore FPSCR.
__ vmsr(prev_fpscr);
+ // Check for vfp exceptions.
__ tst(scratch, Operand(kVFPExceptionMask));
+}
+
+
+void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+ SwVfpRegister single_scratch = double_scratch0().low();
+ Register scratch1 = scratch0();
+ Register scratch2 = ToRegister(instr->TempAt(0));
+
+ EmitVFPTruncate(kRoundToMinusInf,
+ single_scratch,
+ input,
+ scratch1,
+ scratch2);
DeoptimizeIf(ne, instr->environment());
// Move the result back to general purpose register r0.
@@ -2662,8 +2763,8 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
Label done;
__ cmp(result, Operand(0));
__ b(ne, &done);
- __ vmov(scratch, input.high());
- __ tst(scratch, Operand(HeapNumber::kSignMask));
+ __ vmov(scratch1, input.high());
+ __ tst(scratch1, Operand(HeapNumber::kSignMask));
DeoptimizeIf(ne, instr->environment());
__ bind(&done);
}
@@ -3297,7 +3398,42 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
- Abort("DoDoubleToI unimplemented.");
+ LOperand* input = instr->InputAt(0);
+ ASSERT(input->IsDoubleRegister());
+ LOperand* result = instr->result();
+ ASSERT(result->IsRegister());
+
+ DoubleRegister double_input = ToDoubleRegister(input);
+ Register result_reg = ToRegister(result);
+ SwVfpRegister single_scratch = double_scratch0().low();
+ Register scratch1 = scratch0();
+ Register scratch2 = ToRegister(instr->TempAt(0));
+
+ VFPRoundingMode rounding_mode = instr->truncating() ? kRoundToMinusInf
+ : kRoundToNearest;
+
+ EmitVFPTruncate(rounding_mode,
+ single_scratch,
+ double_input,
+ scratch1,
+ scratch2);
+ // Deoptimize if we had a vfp invalid exception.
+ DeoptimizeIf(ne, instr->environment());
+ // Retrieve the result.
+ __ vmov(result_reg, single_scratch);
+
+ if (instr->truncating() &&
+ instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label done;
+ __ cmp(result_reg, Operand(0));
+ __ b(ne, &done);
+ // Check for -0.
+ __ vmov(scratch1, double_input.high());
+ __ tst(scratch1, Operand(HeapNumber::kSignMask));
+ DeoptimizeIf(ne, instr->environment());
+
+ __ bind(&done);
+ }
}
@@ -3497,7 +3633,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
// Use the fast case closure allocation code that allocates in new
// space for nested functions that don't need literals cloning.
Handle<SharedFunctionInfo> shared_info = instr->shared_info();
- bool pretenure = !instr->hydrogen()->pretenure();
+ bool pretenure = instr->hydrogen()->pretenure();
if (shared_info->num_literals() == 0 && !pretenure) {
FastNewClosureStub stub;
__ mov(r1, Operand(shared_info));
View
5 deps/v8/src/arm/lithium-codegen-arm.h
@@ -219,6 +219,11 @@ class LCodeGen BASE_EMBEDDED {
// Specific math operations - used from DoUnaryMathOperation.
void EmitIntegerMathAbs(LUnaryMathOperation* instr);
void DoMathAbs(LUnaryMathOperation* instr);
+ void EmitVFPTruncate(VFPRoundingMode rounding_mode,
+ SwVfpRegister result,
+ DwVfpRegister double_input,
+ Register scratch1,
+ Register scratch2);
void DoMathFloor(LUnaryMathOperation* instr);
void DoMathSqrt(LUnaryMathOperation* instr);
View
158 deps/v8/src/arm/macro-assembler-arm.cc
@@ -632,11 +632,7 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) {
}
-void MacroAssembler::EnterExitFrame(bool save_doubles) {
- // Compute the argv pointer in a callee-saved register.
- add(r6, sp, Operand(r0, LSL, kPointerSizeLog2));
- sub(r6, r6, Operand(kPointerSize));
-
+void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
// Setup the frame structure on the stack.
ASSERT_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);