Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse files

Roll V8 back to 3.10.8.13

  • Loading branch information...
commit 46b09e419039d2fbfde4029606de3d3aaef7af25 1 parent 3116522
@isaacs isaacs authored
Showing with 4,208 additions and 7,189 deletions.
  1. +0 −107 deps/v8/ChangeLog
  2. +0 −27 deps/v8/DEPS
  3. +0 −7 deps/v8/Makefile
  4. +2 −15 deps/v8/SConstruct
  5. +158 −129 deps/v8/build/common.gypi
  6. +25 −11 deps/v8/build/gyp_v8
  7. +3 −9 deps/v8/build/standalone.gypi
  8. +3 −5 deps/v8/include/v8.h
  9. +0 −1  deps/v8/src/SConscript
  10. +33 −4 deps/v8/src/api.cc
  11. +2 −2 deps/v8/src/api.h
  12. +4 −5 deps/v8/src/arm/builtins-arm.cc
  13. +14 −21 deps/v8/src/arm/code-stubs-arm.cc
  14. +2 −2 deps/v8/src/arm/codegen-arm.cc
  15. +1 −3 deps/v8/src/arm/debug-arm.cc
  16. +4 −3 deps/v8/src/arm/full-codegen-arm.cc
  17. +32 −29 deps/v8/src/arm/ic-arm.cc
  18. +2 −3 deps/v8/src/arm/lithium-arm.cc
  19. +2 −7 deps/v8/src/arm/lithium-arm.h
  20. +60 −94 deps/v8/src/arm/lithium-codegen-arm.cc
  21. +39 −49 deps/v8/src/arm/macro-assembler-arm.cc
  22. +4 −5 deps/v8/src/arm/macro-assembler-arm.h
  23. +55 −115 deps/v8/src/arm/regexp-macro-assembler-arm.cc
  24. +6 −7 deps/v8/src/arm/regexp-macro-assembler-arm.h
  25. +6 −6 deps/v8/src/arm/simulator-arm.h
  26. +11 −39 deps/v8/src/arm/stub-cache-arm.cc
  27. +11 −9 deps/v8/src/bootstrapper.cc
  28. +31 −56 deps/v8/src/builtins.cc
  29. +9 −23 deps/v8/src/code-stubs.cc
  30. +0 −1  deps/v8/src/code-stubs.h
  31. +3 −3 deps/v8/src/codegen.h
  32. +18 −2 deps/v8/src/contexts.h
  33. +20 −41 deps/v8/src/d8.cc
  34. +1 −1  deps/v8/src/d8.h
  35. +16 −16 deps/v8/src/debug-agent.cc
  36. +1 −41 deps/v8/src/debug.cc
  37. +0 −50 deps/v8/src/debug.h
  38. +0 −134 deps/v8/src/elements-kind.cc
  39. +0 −210 deps/v8/src/elements-kind.h
  40. +190 −356 deps/v8/src/elements.cc
  41. +28 −36 deps/v8/src/elements.h
  42. +2 −3 deps/v8/src/factory.cc
  43. +7 −10 deps/v8/src/factory.h
  44. +0 −3  deps/v8/src/flag-definitions.h
  45. +0 −3  deps/v8/src/frames.h
  46. +0 −2  deps/v8/src/func-name-inferrer.h
  47. +0 −3  deps/v8/src/globals.h
  48. +4 −16 deps/v8/src/heap-inl.h
  49. +32 −38 deps/v8/src/heap.cc
  50. +17 −3 deps/v8/src/heap.h
  51. +5 −28 deps/v8/src/hydrogen-instructions.cc
  52. +44 −138 deps/v8/src/hydrogen-instructions.h
  53. +91 −274 deps/v8/src/hydrogen.cc
  54. +0 −3  deps/v8/src/hydrogen.h
  55. +0 −3  deps/v8/src/ia32/assembler-ia32.h
  56. +4 −5 deps/v8/src/ia32/builtins-ia32.cc
  57. +16 −22 deps/v8/src/ia32/code-stubs-ia32.cc
  58. +2 −2 deps/v8/src/ia32/codegen-ia32.cc
  59. +2 −29 deps/v8/src/ia32/debug-ia32.cc
  60. +8 −8 deps/v8/src/ia32/full-codegen-ia32.cc
  61. +17 −21 deps/v8/src/ia32/ic-ia32.cc
  62. +79 −110 deps/v8/src/ia32/lithium-codegen-ia32.cc
  63. +1 −2  deps/v8/src/ia32/lithium-codegen-ia32.h
  64. +4 −4 deps/v8/src/ia32/lithium-ia32.cc
  65. +4 −8 deps/v8/src/ia32/lithium-ia32.h
  66. +40 −50 deps/v8/src/ia32/macro-assembler-ia32.cc
  67. +4 −5 deps/v8/src/ia32/macro-assembler-ia32.h
  68. +43 −113 deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
  69. +4 −9 deps/v8/src/ia32/regexp-macro-assembler-ia32.h
  70. +4 −4 deps/v8/src/ia32/simulator-ia32.h
  71. +6 −21 deps/v8/src/ia32/stub-cache-ia32.cc
  72. +20 −77 deps/v8/src/ic.cc
  73. +2 −18 deps/v8/src/ic.h
  74. +5 −21 deps/v8/src/incremental-marking-inl.h
  75. +8 −35 deps/v8/src/incremental-marking.cc
  76. +3 −12 deps/v8/src/incremental-marking.h
  77. +1 −1  deps/v8/src/isolate.h
  78. +47 −41 deps/v8/src/jsregexp.cc
  79. +66 −40 deps/v8/src/jsregexp.h
  80. +0 −8 deps/v8/src/list-inl.h
  81. +0 −3  deps/v8/src/list.h
  82. +2 −5 deps/v8/src/lithium.cc
  83. +5 −57 deps/v8/src/liveedit.cc
  84. +11 −17 deps/v8/src/mark-compact-inl.h
  85. +138 −96 deps/v8/src/mark-compact.cc
  86. +20 −44 deps/v8/src/mark-compact.h
  87. +99 −131 deps/v8/src/messages.js
  88. +4 −5 deps/v8/src/mips/builtins-mips.cc
  89. +15 −23 deps/v8/src/mips/code-stubs-mips.cc
  90. +2 −2 deps/v8/src/mips/codegen-mips.cc
  91. +1 −3 deps/v8/src/mips/debug-mips.cc
  92. +4 −4 deps/v8/src/mips/full-codegen-mips.cc
  93. +35 −33 deps/v8/src/mips/ic-mips.cc
  94. +60 −98 deps/v8/src/mips/lithium-codegen-mips.cc
  95. +2 −3 deps/v8/src/mips/lithium-mips.cc
  96. +2 −8 deps/v8/src/mips/lithium-mips.h
  97. +39 −50 deps/v8/src/mips/macro-assembler-mips.cc
  98. +4 −5 deps/v8/src/mips/macro-assembler-mips.h
  99. +50 −110 deps/v8/src/mips/regexp-macro-assembler-mips.cc
  100. +5 −6 deps/v8/src/mips/regexp-macro-assembler-mips.h
  101. +5 −5 deps/v8/src/mips/simulator-mips.h
  102. +16 −50 deps/v8/src/mips/stub-cache-mips.cc
  103. +6 −103 deps/v8/src/objects-debug.cc
  104. +116 −209 deps/v8/src/objects-inl.h
  105. +1 −4 deps/v8/src/objects-printer.cc
  106. +291 −524 deps/v8/src/objects.cc
  107. +94 −114 deps/v8/src/objects.h
  108. +4 −16 deps/v8/src/parser.cc
  109. +0 −1  deps/v8/src/platform-freebsd.cc
  110. +7 −28 deps/v8/src/platform-posix.cc
  111. +3 −15 deps/v8/src/platform-win32.cc
  112. +1 −2  deps/v8/src/platform.h
  113. +2 −25 deps/v8/src/profile-generator-inl.h
  114. +822 −300 deps/v8/src/profile-generator.cc
  115. +214 −97 deps/v8/src/profile-generator.h
  116. +1 −2  deps/v8/src/regexp-macro-assembler-irregexp.cc
  117. +2 −2 deps/v8/src/regexp-macro-assembler-irregexp.h
  118. +5 −6 deps/v8/src/regexp-macro-assembler-tracer.cc
  119. +1 −1  deps/v8/src/regexp-macro-assembler-tracer.h
  120. +3 −7 deps/v8/src/regexp-macro-assembler.cc
  121. +2 −10 deps/v8/src/regexp-macro-assembler.h
  122. +0 −6 deps/v8/src/regexp.js
  123. +182 −265 deps/v8/src/runtime.cc
  124. +2 −5 deps/v8/src/runtime.h
  125. +2 −0  deps/v8/src/spaces.cc
  126. +2 −2 deps/v8/src/string-stream.cc
  127. +2 −0  deps/v8/src/v8-counters.h
  128. +3 −6 deps/v8/src/v8utils.h
  129. +3 −3 deps/v8/src/version.cc
  130. +1 −2  deps/v8/src/x64/assembler-x64.h
  131. +4 −5 deps/v8/src/x64/builtins-x64.cc
  132. +16 −25 deps/v8/src/x64/code-stubs-x64.cc
  133. +2 −2 deps/v8/src/x64/codegen-x64.cc
  134. +1 −14 deps/v8/src/x64/debug-x64.cc
  135. +1 −1  deps/v8/src/x64/disasm-x64.cc
  136. +10 −10 deps/v8/src/x64/full-codegen-x64.cc
  137. +16 −20 deps/v8/src/x64/ic-x64.cc
  138. +78 −140 deps/v8/src/x64/lithium-codegen-x64.cc
  139. +1 −2  deps/v8/src/x64/lithium-codegen-x64.h
  140. +2 −3 deps/v8/src/x64/lithium-x64.cc
  141. +2 −7 deps/v8/src/x64/lithium-x64.h
  142. +40 −50 deps/v8/src/x64/macro-assembler-x64.cc
  143. +4 −5 deps/v8/src/x64/macro-assembler-x64.h
  144. +51 −122 deps/v8/src/x64/regexp-macro-assembler-x64.cc
  145. +8 −17 deps/v8/src/x64/regexp-macro-assembler-x64.h
  146. +4 −4 deps/v8/src/x64/simulator-x64.h
  147. +10 −31 deps/v8/src/x64/stub-cache-x64.cc
  148. +0 −1  deps/v8/test/cctest/cctest.status
  149. +0 −38 deps/v8/test/cctest/test-func-name-inference.cc
  150. +77 −21 deps/v8/test/cctest/test-heap-profiler.cc
  151. +3 −62 deps/v8/test/cctest/test-heap.cc
  152. +0 −12 deps/v8/test/cctest/test-list.cc
  153. +5 −5 deps/v8/test/cctest/test-mark-compact.cc
  154. +8 −14 deps/v8/test/cctest/test-regexp.cc
  155. +0 −2  deps/v8/test/cctest/testcfg.py
  156. +0 −176 deps/v8/test/mjsunit/accessor-map-sharing.js
  157. +3 −3 deps/v8/test/mjsunit/array-construct-transition.js
  158. +10 −10 deps/v8/test/mjsunit/array-literal-transitions.js
  159. +0 −3  deps/v8/test/mjsunit/big-array-literal.js
  160. +2 −4 deps/v8/test/mjsunit/compiler/inline-construct.js
  161. +0 −88 deps/v8/test/mjsunit/debug-liveedit-stack-padding.js
  162. +4 −4 deps/v8/test/mjsunit/elements-kind.js
  163. +2 −2 deps/v8/test/mjsunit/elements-transition-hoisting.js
  164. +5 −5 deps/v8/test/mjsunit/elements-transition.js
  165. +33 −68 deps/v8/test/mjsunit/error-constructors.js
  166. +0 −1  deps/v8/test/mjsunit/mjsunit.status
  167. +0 −112 deps/v8/test/mjsunit/packed-elements.js
  168. +2 −1  deps/v8/test/mjsunit/regexp-capture-3.js
  169. +2 −0  deps/v8/test/mjsunit/regexp-capture.js
  170. +0 −132 deps/v8/test/mjsunit/regexp-global.js
  171. +0 −11 deps/v8/test/mjsunit/regexp.js
  172. +1 −1  deps/v8/test/mjsunit/regress/regress-117409.js
  173. +0 −33 deps/v8/test/mjsunit/regress/regress-128146.js
  174. +1 −4 deps/v8/test/mjsunit/regress/regress-1639-2.js
  175. +8 −14 deps/v8/test/mjsunit/regress/regress-1639.js
  176. +3 −3 deps/v8/test/mjsunit/regress/regress-1849.js
  177. +2 −2 deps/v8/test/mjsunit/regress/regress-1878.js
  178. +0 −32 deps/v8/test/mjsunit/regress/regress-2153.js
  179. +4 −4 deps/v8/test/mjsunit/regress/regress-crbug-122271.js
  180. +2 −2 deps/v8/test/mjsunit/regress/regress-smi-only-concat.js
  181. +0 −49 deps/v8/test/mjsunit/regress/regress-transcendental.js
  182. +0 −14 deps/v8/test/mjsunit/stack-traces.js
  183. +3 −4 deps/v8/test/mjsunit/unbox-double-arrays.js
  184. +1 −6 deps/v8/test/test262/testcfg.py
  185. +3 −11 deps/v8/tools/check-static-initializers.sh
  186. +0 −92 deps/v8/tools/fuzz-harness.sh
  187. +39 −242 deps/v8/tools/grokdump.py
  188. +39 −21 deps/v8/tools/gyp/v8.gyp
  189. +3 −3 deps/v8/tools/js2c.py
  190. +2 −2 deps/v8/tools/jsmin.py
  191. +1 −2  deps/v8/tools/presubmit.py
  192. +11 −25 deps/v8/tools/test-wrapper-gypbuild.py
View
107 deps/v8/ChangeLog
@@ -1,110 +1,3 @@
-2012-05-29: Version 3.11.7
-
- Get better function names in stack traces.
-
- Performance and stability improvements on all platforms.
-
-
-2012-05-24: Version 3.11.6
-
- Fixed RegExp.prototype.toString for incompatible receivers
- (issue 1981).
-
- Performance and stability improvements on all platforms.
-
-
-2012-05-23: Version 3.11.5
-
- Performance and stability improvements on all platforms.
-
-
-2012-05-22: Version 3.11.4
-
- Some cleanup to common.gypi. This fixes some host/target combinations
- that weren't working in the Make build on Mac.
-
- Handle EINTR in socket functions and continue incomplete sends.
- (issue 2098)
-
- Fixed python deprecations. (issue 1391)
-
- Made socket send and receive more robust and return 0 on failure.
- (Chromium issue 15719)
-
- Fixed GCC 4.7 (C++11) compilation. (issue 2136)
-
- Set '-m32' option for host and target platforms
-
- Performance and stability improvements on all platforms.
-
-
-2012-05-18: Version 3.11.3
-
- Disable optimization for functions that have scopes that cannot be
- reconstructed from the context chain. (issue 2071)
-
- Define V8_EXPORT to nothing for clients of v8. (Chromium issue 90078)
-
- Correctly check for native error objects. (Chromium issue 2138)
-
- Performance and stability improvements on all platforms.
-
-
-2012-05-16: Version 3.11.2
-
- Revert r11496. (Chromium issue 128146)
-
- Implement map collection for incremental marking. (issue 1465)
-
- Add toString method to CallSite (which describes a frame of the
- stack trace).
-
-
-2012-05-15: Version 3.11.1
-
- Added a readbuffer function to d8 that reads a file into an ArrayBuffer.
-
- Fix freebsd build. (V8 issue 2126)
-
- Performance and stability improvements on all platforms.
-
-
-2012-05-11: Version 3.11.0
-
- Fixed compose-discard crasher from r11524 (issue 2123).
-
- Activated new global semantics by default. Global variables can
- now shadow properties of the global object (ES5.1 erratum).
-
- Properly set ElementsKind of empty FAST_DOUBLE_ELEMENTS arrays when
- transitioning (Chromium issue 117409).
-
- Made Error.prototype.name writable again, as required by the spec and
- the web (Chromium issue 69187).
-
- Implemented map collection with incremental marking (issue 1465).
-
- Regexp: Fixed overflow in min-match-length calculation
- (Chromium issue 126412).
-
- MIPS: Fixed illegal instruction use on Loongson in code for
- Math.random() (issue 2115).
-
- Fixed crash bug in VisitChoice (Chromium issue 126272).
-
- Fixed unsigned-Smi check in MappedArgumentsLookup
- (Chromium issue 126414).
-
- Fixed LiveEdit for function with no locals (issue 825).
-
- Fixed register clobbering in LoadIC for interceptors
- (Chromium issue 125988).
-
- Implemented clearing of CompareICs (issue 2102).
-
- Performance and stability improvements on all platforms.
-
-
2012-05-03: Version 3.10.8
Enabled MIPS cross-compilation.
View
27 deps/v8/DEPS
@@ -1,27 +0,0 @@
-# Note: The buildbots evaluate this file with CWD set to the parent
-# directory and assume that the root of the checkout is in ./v8/, so
-# all paths in here must match this assumption.
-
-deps = {
- # Remember to keep the revision in sync with the Makefile.
- "v8/build/gyp":
- "http://gyp.googlecode.com/svn/trunk@1282",
-}
-
-deps_os = {
- "win": {
- "v8/third_party/cygwin":
- "http://src.chromium.org/svn/trunk/deps/third_party/cygwin@66844",
-
- "v8/third_party/python_26":
- "http://src.chromium.org/svn/trunk/tools/third_party/python_26@89111",
- }
-}
-
-hooks = [
- {
- # A change to a .gyp, .gypi, or to GYP itself should run the generator.
- "pattern": ".",
- "action": ["python", "v8/build/gyp_v8"],
- },
-]
View
7 deps/v8/Makefile
@@ -137,12 +137,6 @@ ENVFILE = $(OUTDIR)/environment
# Target definitions. "all" is the default.
all: $(MODES)
-# Special target for the buildbots to use. Depends on $(OUTDIR)/Makefile
-# having been created before.
-buildbot:
- $(MAKE) -C "$(OUTDIR)" BUILDTYPE=$(BUILDTYPE) \
- builddir="$(abspath $(OUTDIR))/$(BUILDTYPE)"
-
# Compile targets. MODES and ARCHES are convenience targets.
.SECONDEXPANSION:
$(MODES): $(addsuffix .$$@,$(DEFAULT_ARCHES))
@@ -228,7 +222,6 @@ $(OUTDIR)/Makefile.android: $(GYPFILES) $(ENVFILE) build/android.gypi \
must-set-ANDROID_NDK_ROOT
GYP_GENERATORS=make \
CC="${ANDROID_TOOL_PREFIX}-gcc" \
- CXX="${ANDROID_TOOL_PREFIX}-g++" \
build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
-Ibuild/standalone.gypi --depth=. -Ibuild/android.gypi \
-S.android $(GYPFLAGS)
View
17 deps/v8/SConstruct
@@ -101,14 +101,14 @@ LIBRARY_FLAGS = {
'os:linux': {
'CCFLAGS': ['-ansi'] + GCC_EXTRA_CCFLAGS,
'library:shared': {
- 'CPPDEFINES': ['V8_SHARED', 'BUILDING_V8_SHARED'],
+ 'CPPDEFINES': ['V8_SHARED'],
'LIBS': ['pthread']
}
},
'os:macos': {
'CCFLAGS': ['-ansi', '-mmacosx-version-min=10.4'],
'library:shared': {
- 'CPPDEFINES': ['V8_SHARED', 'BUILDING_V8_SHARED'],
+ 'CPPDEFINES': ['V8_SHARED']
}
},
'os:freebsd': {
@@ -1601,17 +1601,4 @@ except:
pass
-def WarnAboutDeprecation():
- print """
-#######################################################
-# WARNING: Building V8 with SCons is deprecated and #
-# will not work much longer. Please switch to using #
-# the GYP-based build now. Instructions are at #
-# http://code.google.com/p/v8/wiki/BuildingWithGYP. #
-#######################################################
- """
-
-WarnAboutDeprecation()
-import atexit
-atexit.register(WarnAboutDeprecation)
Build()
View
287 deps/v8/build/common.gypi
@@ -110,117 +110,151 @@
['v8_enable_gdbjit==1', {
'defines': ['ENABLE_GDB_JIT_INTERFACE',],
}],
- ['v8_target_arch=="arm"', {
- 'defines': [
- 'V8_TARGET_ARCH_ARM',
- ],
+ ['OS!="mac"', {
+ # TODO(mark): The OS!="mac" conditional is temporary. It can be
+ # removed once the Mac Chromium build stops setting target_arch to
+ # ia32 and instead sets it to mac. Other checks in this file for
+ # OS=="mac" can be removed at that time as well. This can be cleaned
+ # up once http://crbug.com/44205 is fixed.
'conditions': [
- [ 'v8_can_use_unaligned_accesses=="true"', {
+ ['v8_target_arch=="arm"', {
'defines': [
- 'CAN_USE_UNALIGNED_ACCESSES=1',
+ 'V8_TARGET_ARCH_ARM',
],
- }],
- [ 'v8_can_use_unaligned_accesses=="false"', {
- 'defines': [
- 'CAN_USE_UNALIGNED_ACCESSES=0',
+ 'conditions': [
+ [ 'v8_can_use_unaligned_accesses=="true"', {
+ 'defines': [
+ 'CAN_USE_UNALIGNED_ACCESSES=1',
+ ],
+ }],
+ [ 'v8_can_use_unaligned_accesses=="false"', {
+ 'defines': [
+ 'CAN_USE_UNALIGNED_ACCESSES=0',
+ ],
+ }],
+ [ 'v8_can_use_vfp_instructions=="true"', {
+ 'defines': [
+ 'CAN_USE_VFP_INSTRUCTIONS',
+ ],
+ }],
+ [ 'v8_use_arm_eabi_hardfloat=="true"', {
+ 'defines': [
+ 'USE_EABI_HARDFLOAT=1',
+ 'CAN_USE_VFP_INSTRUCTIONS',
+ ],
+ 'target_conditions': [
+ ['_toolset=="target"', {
+ 'cflags': ['-mfloat-abi=hard',],
+ }],
+ ],
+ }, {
+ 'defines': [
+ 'USE_EABI_HARDFLOAT=0',
+ ],
+ }],
+ # The ARM assembler assumes the host is 32 bits,
+ # so force building 32-bit host tools.
+ ['host_arch=="x64" or OS=="android"', {
+ 'target_conditions': [
+ ['_toolset=="host"', {
+ 'cflags': ['-m32'],
+ 'ldflags': ['-m32'],
+ }],
+ ],
+ }],
],
}],
- [ 'v8_can_use_vfp_instructions=="true"', {
+ ['v8_target_arch=="ia32"', {
'defines': [
- 'CAN_USE_VFP_INSTRUCTIONS',
+ 'V8_TARGET_ARCH_IA32',
],
}],
- [ 'v8_use_arm_eabi_hardfloat=="true"', {
- 'defines': [
- 'USE_EABI_HARDFLOAT=1',
- 'CAN_USE_VFP_INSTRUCTIONS',
- ],
- 'target_conditions': [
- ['_toolset=="target"', {
- 'cflags': ['-mfloat-abi=hard',],
- }],
- ],
- }, {
+ ['v8_target_arch=="mips"', {
'defines': [
- 'USE_EABI_HARDFLOAT=0',
+ 'V8_TARGET_ARCH_MIPS',
],
- }],
- ],
- }], # v8_target_arch=="arm"
- ['v8_target_arch=="ia32"', {
- 'defines': [
- 'V8_TARGET_ARCH_IA32',
- ],
- }], # v8_target_arch=="ia32"
- ['v8_target_arch=="mips"', {
- 'defines': [
- 'V8_TARGET_ARCH_MIPS',
- ],
- 'variables': {
- 'mipscompiler': '<!($(echo ${CXX:-$(which g++)}) -v 2>&1 | grep -q "^Target: mips-" && echo "yes" || echo "no")',
- },
- 'conditions': [
- ['mipscompiler=="yes"', {
- 'target_conditions': [
- ['_toolset=="target"', {
- 'cflags': ['-EL'],
- 'ldflags': ['-EL'],
- 'conditions': [
- [ 'v8_use_mips_abi_hardfloat=="true"', {
- 'cflags': ['-mhard-float'],
- 'ldflags': ['-mhard-float'],
- }, {
- 'cflags': ['-msoft-float'],
- 'ldflags': ['-msoft-float'],
- }],
- ['mips_arch_variant=="mips32r2"', {
- 'cflags': ['-mips32r2', '-Wa,-mips32r2'],
+ 'variables': {
+ 'mipscompiler': '<!($(echo ${CXX:-$(which g++)}) -v 2>&1 | grep -q "^Target: mips-" && echo "yes" || echo "no")',
+ },
+ 'conditions': [
+ ['mipscompiler=="yes"', {
+ 'target_conditions': [
+ ['_toolset=="target"', {
+ 'cflags': ['-EL'],
+ 'ldflags': ['-EL'],
+ 'conditions': [
+ [ 'v8_use_mips_abi_hardfloat=="true"', {
+ 'cflags': ['-mhard-float'],
+ 'ldflags': ['-mhard-float'],
+ }, {
+ 'cflags': ['-msoft-float'],
+ 'ldflags': ['-msoft-float'],
+ }],
+ ['mips_arch_variant=="mips32r2"', {
+ 'cflags': ['-mips32r2', '-Wa,-mips32r2'],
+ }],
+ ['mips_arch_variant=="loongson"', {
+ 'cflags': ['-mips3', '-Wa,-mips3'],
+ }, {
+ 'cflags': ['-mips32', '-Wa,-mips32'],
+ }],
+ ],
}],
- ['mips_arch_variant=="loongson"', {
- 'cflags': ['-mips3', '-Wa,-mips3'],
- }, {
- 'cflags': ['-mips32', '-Wa,-mips32'],
+ ],
+ }],
+ [ 'v8_can_use_fpu_instructions=="true"', {
+ 'defines': [
+ 'CAN_USE_FPU_INSTRUCTIONS',
+ ],
+ }],
+ [ 'v8_use_mips_abi_hardfloat=="true"', {
+ 'defines': [
+ '__mips_hard_float=1',
+ 'CAN_USE_FPU_INSTRUCTIONS',
+ ],
+ }, {
+ 'defines': [
+ '__mips_soft_float=1'
+ ],
+ }],
+ ['mips_arch_variant=="mips32r2"', {
+ 'defines': ['_MIPS_ARCH_MIPS32R2',],
+ }],
+ ['mips_arch_variant=="loongson"', {
+ 'defines': ['_MIPS_ARCH_LOONGSON',],
+ }],
+ # The MIPS assembler assumes the host is 32 bits,
+ # so force building 32-bit host tools.
+ ['host_arch=="x64"', {
+ 'target_conditions': [
+ ['_toolset=="host"', {
+ 'cflags': ['-m32'],
+ 'ldflags': ['-m32'],
}],
],
}],
],
}],
- [ 'v8_can_use_fpu_instructions=="true"', {
- 'defines': [
- 'CAN_USE_FPU_INSTRUCTIONS',
- ],
- }],
- [ 'v8_use_mips_abi_hardfloat=="true"', {
- 'defines': [
- '__mips_hard_float=1',
- 'CAN_USE_FPU_INSTRUCTIONS',
- ],
- }, {
+ ['v8_target_arch=="x64"', {
'defines': [
- '__mips_soft_float=1'
+ 'V8_TARGET_ARCH_X64',
],
}],
- ['mips_arch_variant=="mips32r2"', {
- 'defines': ['_MIPS_ARCH_MIPS32R2',],
+ ],
+ }, { # Section for OS=="mac".
+ 'conditions': [
+ ['target_arch=="ia32"', {
+ 'xcode_settings': {
+ 'ARCHS': ['i386'],
+ }
}],
- ['mips_arch_variant=="loongson"', {
- 'defines': ['_MIPS_ARCH_LOONGSON',],
+ ['target_arch=="x64"', {
+ 'xcode_settings': {
+ 'ARCHS': ['x86_64'],
+ }
}],
],
- }], # v8_target_arch=="mips"
- ['v8_target_arch=="x64"', {
- 'defines': [
- 'V8_TARGET_ARCH_X64',
- ],
- 'xcode_settings': {
- 'ARCHS': [ 'x86_64' ],
- },
- 'msvs_settings': {
- 'VCLinkerTool': {
- 'StackReserveSize': '2097152',
- },
- },
- }], # v8_target_arch=="x64"
+ }],
['v8_use_liveobjectlist=="true"', {
'defines': [
'ENABLE_DEBUGGER_SUPPORT',
@@ -238,10 +272,6 @@
'defines': [
'WIN32',
],
- 'msvs_configuration_attributes': {
- 'IntermediateDirectory': '$(OutDir)\\obj\\$(ProjectName)',
- 'CharacterSet': '1',
- },
}],
['OS=="win" and v8_enable_prof==1', {
'msvs_settings': {
@@ -253,48 +283,21 @@
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
or OS=="netbsd"', {
'conditions': [
- [ 'v8_no_strict_aliasing==1', {
- 'cflags': [ '-fno-strict-aliasing' ],
- }],
- ], # conditions
- }],
- ['OS=="solaris"', {
- 'defines': [ '__C99FEATURES__=1' ], # isinf() etc.
- }],
- ['(OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
- or OS=="netbsd" or OS=="mac" or OS=="android") and \
- (v8_target_arch=="arm" or v8_target_arch=="ia32" or \
- v8_target_arch=="mips")', {
- # Check whether the host compiler and target compiler support the
- # '-m32' option and set it if so.
- 'target_conditions': [
- ['_toolset=="host"', {
+ [ 'v8_target_arch!="x64"', {
+ # Pass -m32 to the compiler iff it understands the flag.
'variables': {
- 'm32flag': '<!((echo | $(echo ${CXX_host:-$(which g++)}) -m32 -E - > /dev/null 2>&1) && echo "-m32" || true)',
+ 'm32flag': '<!((echo | $(echo ${CXX:-$(which g++)}) -m32 -E - > /dev/null 2>&1) && echo -n "-m32" || true)',
},
'cflags': [ '<(m32flag)' ],
'ldflags': [ '<(m32flag)' ],
- 'xcode_settings': {
- 'ARCHS': [ 'i386' ],
- },
}],
- ['_toolset=="target"', {
- 'variables': {
- 'm32flag': '<!((echo | $(echo ${CXX_target:-${CXX:-$(which g++)}}) -m32 -E - > /dev/null 2>&1) && echo "-m32" || true)',
- },
- 'cflags': [ '<(m32flag)' ],
- 'ldflags': [ '<(m32flag)' ],
- 'xcode_settings': {
- 'ARCHS': [ 'i386' ],
- },
+ [ 'v8_no_strict_aliasing==1', {
+ 'cflags': [ '-fno-strict-aliasing' ],
}],
- ],
- }],
- ['OS=="freebsd" or OS=="openbsd"', {
- 'cflags': [ '-I/usr/local/include' ],
+ ], # conditions
}],
- ['OS=="netbsd"', {
- 'cflags': [ '-I/usr/pkg/include' ],
+ ['OS=="solaris"', {
+ 'defines': [ '__C99FEATURES__=1' ], # isinf() etc.
}],
], # conditions
'configurations': {
@@ -319,11 +322,21 @@
},
'VCLinkerTool': {
'LinkIncremental': '2',
+ # For future reference, the stack size needs to be increased
+ # when building for Windows 64-bit, otherwise some test cases
+ # can cause stack overflow.
+ # 'StackReserveSize': '297152',
},
},
'conditions': [
+ ['OS=="freebsd" or OS=="openbsd"', {
+ 'cflags': [ '-I/usr/local/include' ],
+ }],
+ ['OS=="netbsd"', {
+ 'cflags': [ '-I/usr/pkg/include' ],
+ }],
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', {
- 'cflags': [ '-Wno-unused-parameter',
+ 'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
'-Wnon-virtual-dtor', '-Woverloaded-virtual' ],
}],
],
@@ -351,6 +364,12 @@
}],
],
}],
+ ['OS=="freebsd" or OS=="openbsd"', {
+ 'cflags': [ '-I/usr/local/include' ],
+ }],
+ ['OS=="netbsd"', {
+ 'cflags': [ '-I/usr/pkg/include' ],
+ }],
['OS=="mac"', {
'xcode_settings': {
'GCC_OPTIMIZATION_LEVEL': '3', # -O3
@@ -363,6 +382,11 @@
},
}], # OS=="mac"
['OS=="win"', {
+ 'msvs_configuration_attributes': {
+ 'OutputDirectory': '<(DEPTH)\\build\\$(ConfigurationName)',
+ 'IntermediateDirectory': '$(OutDir)\\obj\\$(ProjectName)',
+ 'CharacterSet': '1',
+ },
'msvs_settings': {
'VCCLCompilerTool': {
'Optimization': '2',
@@ -383,7 +407,12 @@
'VCLinkerTool': {
'LinkIncremental': '1',
'OptimizeReferences': '2',
+ 'OptimizeForWindows98': '1',
'EnableCOMDATFolding': '2',
+ # For future reference, the stack size needs to be
+ # increased when building for Windows 64-bit, otherwise
+ # some test cases can cause stack overflow.
+ # 'StackReserveSize': '297152',
},
},
}], # OS=="win"
View
36 deps/v8/build/gyp_v8
@@ -38,11 +38,6 @@ import sys
script_dir = os.path.dirname(__file__)
v8_root = os.path.normpath(os.path.join(script_dir, os.pardir))
-if __name__ == '__main__':
- os.chdir(v8_root)
- script_dir = os.path.dirname(__file__)
- v8_root = '.'
-
sys.path.insert(0, os.path.join(v8_root, 'tools'))
import utils
@@ -98,7 +93,7 @@ def additional_include_files(args=[]):
result.append(path)
# Always include standalone.gypi
- AddInclude(os.path.join(v8_root, 'build', 'standalone.gypi'))
+ AddInclude(os.path.join(script_dir, 'standalone.gypi'))
# Optionally add supplemental .gypi files if present.
supplements = glob.glob(os.path.join(v8_root, '*', 'supplement.gypi'))
@@ -140,10 +135,7 @@ if __name__ == '__main__':
# path separators even on Windows due to the use of shlex.split().
args.extend(shlex.split(gyp_file))
else:
- # Note that this must not start with "./" or things break.
- # So we rely on having done os.chdir(v8_root) above and use the
- # relative path.
- args.append(os.path.join('build', 'all.gyp'))
+ args.append(os.path.join(script_dir, 'all.gyp'))
args.extend(['-I' + i for i in additional_include_files(args)])
@@ -164,6 +156,28 @@ if __name__ == '__main__':
# Generate for the architectures supported on the given platform.
gyp_args = list(args)
+ target_arch = None
+ for p in gyp_args:
+ if p.find('-Dtarget_arch=') == 0:
+ target_arch = p
+ if target_arch is None:
+ gyp_args.append('-Dtarget_arch=ia32')
if utils.GuessOS() == 'linux':
- gyp_args.append('--generator-output=out')
+ gyp_args.append('-S.ia32')
run_gyp(gyp_args)
+
+ if utils.GuessOS() == 'linux':
+ gyp_args = list(args)
+ gyp_args.append('-Dtarget_arch=x64')
+ gyp_args.append('-S.x64')
+ run_gyp(gyp_args)
+
+ gyp_args = list(args)
+ gyp_args.append('-Dv8_target_arch=arm')
+ gyp_args.append('-S.arm')
+ run_gyp(gyp_args)
+
+ gyp_args = list(args)
+ gyp_args.append('-Dv8_target_arch=mips')
+ gyp_args.append('-S.mips')
+ run_gyp(gyp_args)
View
12 deps/v8/build/standalone.gypi
@@ -37,9 +37,8 @@
'variables': {
'variables': {
'conditions': [
- ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or \
- OS=="netbsd" or OS=="mac"', {
- # This handles the Unix platforms we generally deal with.
+ ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', {
+ # This handles the Linux platforms we generally deal with.
# Anything else gets passed through, which probably won't work
# very well; such hosts should pass an explicit target_arch
# to gyp.
@@ -47,8 +46,7 @@
'<!(uname -m | sed -e "s/i.86/ia32/;\
s/x86_64/x64/;s/amd64/x64/;s/arm.*/arm/;s/mips.*/mips/")',
}, {
- # OS!="linux" and OS!="freebsd" and OS!="openbsd" and
- # OS!="netbsd" and OS!="mac"
+ # OS!="linux" and OS!="freebsd" and OS!="openbsd" and OS!="netbsd"
'host_arch%': 'ia32',
}],
],
@@ -171,9 +169,6 @@
},
}], # OS=="win"
['OS=="mac"', {
- 'xcode_settings': {
- 'SYMROOT': '<(DEPTH)/xcodebuild',
- },
'target_defaults': {
'xcode_settings': {
'ALWAYS_SEARCH_USER_PATHS': 'NO',
@@ -193,7 +188,6 @@
'GCC_WARN_ABOUT_MISSING_NEWLINE': 'YES', # -Wnewline-eof
'MACOSX_DEPLOYMENT_TARGET': '10.4', # -mmacosx-version-min=10.4
'PREBINDING': 'NO', # No -Wl,-prebind
- 'SYMROOT': '<(DEPTH)/xcodebuild',
'USE_HEADERMAP': 'NO',
'OTHER_CFLAGS': [
'-fno-strict-aliasing',
View
8 deps/v8/include/v8.h
@@ -62,13 +62,11 @@
#else // _WIN32
-// Setup for Linux shared library export.
+// Setup for Linux shared library export. There is no need to distinguish
+// between building or using the V8 shared library, but we should not
+// export symbols when we are building a static library.
#if defined(__GNUC__) && (__GNUC__ >= 4) && defined(V8_SHARED)
-#ifdef BUILDING_V8_SHARED
#define V8EXPORT __attribute__ ((visibility("default")))
-#else
-#define V8EXPORT
-#endif
#else // defined(__GNUC__) && (__GNUC__ >= 4)
#define V8EXPORT
#endif // defined(__GNUC__) && (__GNUC__ >= 4)
View
1  deps/v8/src/SConscript
@@ -68,7 +68,6 @@ SOURCES = {
diy-fp.cc
dtoa.cc
elements.cc
- elements-kind.cc
execution.cc
factory.cc
flags.cc
View
37 deps/v8/src/api.cc
@@ -5040,7 +5040,7 @@ Local<Object> Array::CloneElementAt(uint32_t index) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Array::CloneElementAt()", return Local<Object>());
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- if (!self->HasFastObjectElements()) {
+ if (!self->HasFastElements()) {
return Local<Object>();
}
i::FixedArray* elms = i::FixedArray::cast(self->elements());
@@ -6045,6 +6045,13 @@ int HeapGraphNode::GetSelfSize() const {
}
+int HeapGraphNode::GetRetainedSize() const {
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapSnapshot::GetRetainedSize");
+ return ToInternal(this)->retained_size();
+}
+
+
int HeapGraphNode::GetChildrenCount() const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetChildrenCount");
@@ -6056,7 +6063,29 @@ const HeapGraphEdge* HeapGraphNode::GetChild(int index) const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetChild");
return reinterpret_cast<const HeapGraphEdge*>(
- ToInternal(this)->children()[index]);
+ &ToInternal(this)->children()[index]);
+}
+
+
+int HeapGraphNode::GetRetainersCount() const {
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapSnapshot::GetRetainersCount");
+ return ToInternal(this)->retainers().length();
+}
+
+
+const HeapGraphEdge* HeapGraphNode::GetRetainer(int index) const {
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapSnapshot::GetRetainer");
+ return reinterpret_cast<const HeapGraphEdge*>(
+ ToInternal(this)->retainers()[index]);
+}
+
+
+const HeapGraphNode* HeapGraphNode::GetDominatorNode() const {
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapSnapshot::GetDominatorNode");
+ return reinterpret_cast<const HeapGraphNode*>(ToInternal(this)->dominator());
}
@@ -6128,7 +6157,7 @@ const HeapGraphNode* HeapSnapshot::GetNodeById(SnapshotObjectId id) const {
int HeapSnapshot::GetNodesCount() const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetNodesCount");
- return ToInternal(this)->entries().length();
+ return ToInternal(this)->entries()->length();
}
@@ -6136,7 +6165,7 @@ const HeapGraphNode* HeapSnapshot::GetNode(int index) const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetNode");
return reinterpret_cast<const HeapGraphNode*>(
- &ToInternal(this)->entries().at(index));
+ ToInternal(this)->entries()->at(index));
}
View
4 deps/v8/src/api.h
@@ -105,13 +105,13 @@ NeanderArray::NeanderArray(v8::internal::Handle<v8::internal::Object> obj)
v8::internal::Object* NeanderObject::get(int offset) {
- ASSERT(value()->HasFastObjectElements());
+ ASSERT(value()->HasFastElements());
return v8::internal::FixedArray::cast(value()->elements())->get(offset);
}
void NeanderObject::set(int offset, v8::internal::Object* value) {
- ASSERT(value_->HasFastObjectElements());
+ ASSERT(value_->HasFastElements());
v8::internal::FixedArray::cast(value_->elements())->set(offset, value);
}
View
9 deps/v8/src/arm/builtins-arm.cc
@@ -114,7 +114,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
Label* gc_required) {
const int initial_capacity = JSArray::kPreallocatedArrayElements;
STATIC_ASSERT(initial_capacity >= 0);
- __ LoadInitialArrayMap(array_function, scratch2, scratch1, false);
+ __ LoadInitialArrayMap(array_function, scratch2, scratch1);
// Allocate the JSArray object together with space for a fixed array with the
// requested elements.
@@ -208,8 +208,7 @@ static void AllocateJSArray(MacroAssembler* masm,
bool fill_with_hole,
Label* gc_required) {
// Load the initial map from the array function.
- __ LoadInitialArrayMap(array_function, scratch2,
- elements_array_storage, fill_with_hole);
+ __ LoadInitialArrayMap(array_function, scratch2, elements_array_storage);
if (FLAG_debug_code) { // Assert that array size is not zero.
__ tst(array_size, array_size);
@@ -441,10 +440,10 @@ static void ArrayNativeCode(MacroAssembler* masm,
__ b(call_generic_code);
__ bind(&not_double);
- // Transition FAST_SMI_ELEMENTS to FAST_ELEMENTS.
+ // Transition FAST_SMI_ONLY_ELEMENTS to FAST_ELEMENTS.
// r3: JSArray
__ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
FAST_ELEMENTS,
r2,
r9,
View
35 deps/v8/src/arm/code-stubs-arm.cc
@@ -4824,32 +4824,27 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ IncrementCounter(isolate->counters()->regexp_entry_native(), 1, r0, r2);
// Isolates: note we add an additional parameter here (isolate pointer).
- const int kRegExpExecuteArguments = 9;
+ const int kRegExpExecuteArguments = 8;
const int kParameterRegisters = 4;
__ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
// Stack pointer now points to cell where return address is to be written.
// Arguments are before that on the stack or in registers.
- // Argument 9 (sp[20]): Pass current isolate address.
+ // Argument 8 (sp[16]): Pass current isolate address.
__ mov(r0, Operand(ExternalReference::isolate_address()));
- __ str(r0, MemOperand(sp, 5 * kPointerSize));
+ __ str(r0, MemOperand(sp, 4 * kPointerSize));
- // Argument 8 (sp[16]): Indicate that this is a direct call from JavaScript.
+ // Argument 7 (sp[12]): Indicate that this is a direct call from JavaScript.
__ mov(r0, Operand(1));
- __ str(r0, MemOperand(sp, 4 * kPointerSize));
+ __ str(r0, MemOperand(sp, 3 * kPointerSize));
- // Argument 7 (sp[12]): Start (high end) of backtracking stack memory area.
+ // Argument 6 (sp[8]): Start (high end) of backtracking stack memory area.
__ mov(r0, Operand(address_of_regexp_stack_memory_address));
__ ldr(r0, MemOperand(r0, 0));
__ mov(r2, Operand(address_of_regexp_stack_memory_size));
__ ldr(r2, MemOperand(r2, 0));
__ add(r0, r0, Operand(r2));
- __ str(r0, MemOperand(sp, 3 * kPointerSize));
-
- // Argument 6: Set the number of capture registers to zero to force global
- // regexps to behave as non-global. This does not affect non-global regexps.
- __ mov(r0, Operand(0));
__ str(r0, MemOperand(sp, 2 * kPointerSize));
// Argument 5 (sp[4]): static offsets vector buffer.
@@ -4898,9 +4893,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check the result.
Label success;
- __ cmp(r0, Operand(1));
- // We expect exactly one result since we force the called regexp to behave
- // as non-global.
+ __ cmp(r0, Operand(NativeRegExpMacroAssembler::SUCCESS));
__ b(eq, &success);
Label failure;
__ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE));
@@ -7102,8 +7095,8 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
// KeyedStoreStubCompiler::GenerateStoreFastElement.
{ REG(r3), REG(r2), REG(r4), EMIT_REMEMBERED_SET },
{ REG(r2), REG(r3), REG(r4), EMIT_REMEMBERED_SET },
- // ElementsTransitionGenerator::GenerateMapChangeElementTransition
- // and ElementsTransitionGenerator::GenerateSmiToDouble
+ // ElementsTransitionGenerator::GenerateSmiOnlyToObject
+ // and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
// and ElementsTransitionGenerator::GenerateDoubleToObject
{ REG(r2), REG(r3), REG(r9), EMIT_REMEMBERED_SET },
{ REG(r2), REG(r3), REG(r9), OMIT_REMEMBERED_SET },
@@ -7366,9 +7359,9 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
Label fast_elements;
__ CheckFastElements(r2, r5, &double_elements);
- // FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS
+ // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
__ JumpIfSmi(r0, &smi_element);
- __ CheckFastSmiElements(r2, r5, &fast_elements);
+ __ CheckFastSmiOnlyElements(r2, r5, &fast_elements);
// Store into the array literal requires a elements transition. Call into
// the runtime.
@@ -7380,7 +7373,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
__ Push(r5, r4);
__ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
- // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
+ // Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
__ bind(&fast_elements);
__ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
__ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
@@ -7391,8 +7384,8 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ Ret();
- // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
- // and value is Smi.
+ // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
+ // FAST_ELEMENTS, and value is Smi.
__ bind(&smi_element);
__ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
__ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
View
4 deps/v8/src/arm/codegen-arm.cc
@@ -73,7 +73,7 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
// -------------------------------------------------------------------------
// Code generators
-void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
+void ElementsTransitionGenerator::GenerateSmiOnlyToObject(
MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : value
@@ -96,7 +96,7 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
}
-void ElementsTransitionGenerator::GenerateSmiToDouble(
+void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
MacroAssembler* masm, Label* fail) {
// ----------- S t a t e -------------
// -- r0 : value
View
4 deps/v8/src/arm/debug-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -125,8 +125,6 @@ void BreakLocationIterator::ClearDebugBreakAtSlot() {
Assembler::kDebugBreakSlotInstructions);
}
-const bool Debug::FramePaddingLayout::kIsSupported = false;
-
#define __ ACCESS_MASM(masm)
View
7 deps/v8/src/arm/full-codegen-arm.cc
@@ -1701,7 +1701,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
ASSERT_EQ(2, constant_elements->length());
ElementsKind constant_elements_kind =
static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
- bool has_fast_elements = IsFastObjectElementsKind(constant_elements_kind);
+ bool has_fast_elements = constant_elements_kind == FAST_ELEMENTS;
Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(constant_elements->get(1)));
@@ -1722,7 +1722,8 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
} else {
- ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
+ ASSERT(constant_elements_kind == FAST_ELEMENTS ||
+ constant_elements_kind == FAST_SMI_ONLY_ELEMENTS ||
FLAG_smi_only_arrays);
FastCloneShallowArrayStub::Mode mode = has_fast_elements
? FastCloneShallowArrayStub::CLONE_ELEMENTS
@@ -1750,7 +1751,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
VisitForAccumulatorValue(subexpr);
- if (IsFastObjectElementsKind(constant_elements_kind)) {
+ if (constant_elements_kind == FAST_ELEMENTS) {
int offset = FixedArray::kHeaderSize + (i * kPointerSize);
__ ldr(r6, MemOperand(sp)); // Copy of array literal.
__ ldr(r1, FieldMemOperand(r6, JSObject::kElementsOffset));
View
61 deps/v8/src/arm/ic-arm.cc
@@ -1249,7 +1249,7 @@ void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
// Must return the modified receiver in r0.
if (!FLAG_trace_elements_transitions) {
Label fail;
- ElementsTransitionGenerator::GenerateSmiToDouble(masm, &fail);
+ ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail);
__ mov(r0, r2);
__ Ret();
__ bind(&fail);
@@ -1462,27 +1462,27 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ CompareRoot(r4, Heap::kHeapNumberMapRootIndex);
__ b(ne, &non_double_value);
- // Value is a double. Transition FAST_SMI_ELEMENTS ->
+ // Value is a double. Transition FAST_SMI_ONLY_ELEMENTS ->
// FAST_DOUBLE_ELEMENTS and complete the store.
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
FAST_DOUBLE_ELEMENTS,
receiver_map,
r4,
&slow);
ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
- ElementsTransitionGenerator::GenerateSmiToDouble(masm, &slow);
+ ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &slow);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&fast_double_without_map_check);
__ bind(&non_double_value);
- // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+ // Value is not a double, FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
FAST_ELEMENTS,
receiver_map,
r4,
&slow);
ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
- ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm);
+ ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
@@ -1690,12 +1690,12 @@ void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
// Activate inlined smi code.
if (previous_state == UNINITIALIZED) {
- PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
+ PatchInlinedSmiCode(address());
}
}
-void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
+void PatchInlinedSmiCode(Address address) {
Address cmp_instruction_address =
address + Assembler::kCallTargetAddressOffset;
@@ -1729,31 +1729,34 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
Instr instr_at_patch = Assembler::instr_at(patch_address);
Instr branch_instr =
Assembler::instr_at(patch_address + Instruction::kInstrSize);
- // This is patching a conditional "jump if not smi/jump if smi" site.
- // Enabling by changing from
- // cmp rx, rx
- // b eq/ne, <target>
- // to
- // tst rx, #kSmiTagMask
- // b ne/eq, <target>
- // and vice-versa to be disabled again.
- CodePatcher patcher(patch_address, 2);
- Register reg = Assembler::GetRn(instr_at_patch);
- if (check == ENABLE_INLINED_SMI_CHECK) {
- ASSERT(Assembler::IsCmpRegister(instr_at_patch));
- ASSERT_EQ(Assembler::GetRn(instr_at_patch).code(),
- Assembler::GetRm(instr_at_patch).code());
- patcher.masm()->tst(reg, Operand(kSmiTagMask));
- } else {
- ASSERT(check == DISABLE_INLINED_SMI_CHECK);
- ASSERT(Assembler::IsTstImmediate(instr_at_patch));
- patcher.masm()->cmp(reg, reg);
- }
+ ASSERT(Assembler::IsCmpRegister(instr_at_patch));
+ ASSERT_EQ(Assembler::GetRn(instr_at_patch).code(),
+ Assembler::GetRm(instr_at_patch).code());
ASSERT(Assembler::IsBranch(branch_instr));
if (Assembler::GetCondition(branch_instr) == eq) {
+ // This is patching a "jump if not smi" site to be active.
+ // Changing
+ // cmp rx, rx
+ // b eq, <target>
+ // to
+ // tst rx, #kSmiTagMask
+ // b ne, <target>
+ CodePatcher patcher(patch_address, 2);
+ Register reg = Assembler::GetRn(instr_at_patch);
+ patcher.masm()->tst(reg, Operand(kSmiTagMask));
patcher.EmitCondition(ne);
} else {
ASSERT(Assembler::GetCondition(branch_instr) == ne);
+ // This is patching a "jump if smi" site to be active.
+ // Changing
+ // cmp rx, rx
+ // b ne, <target>
+ // to
+ // tst rx, #kSmiTagMask
+ // b eq, <target>
+ CodePatcher patcher(patch_address, 2);
+ Register reg = Assembler::GetRn(instr_at_patch);
+ patcher.masm()->tst(reg, Operand(kSmiTagMask));
patcher.EmitCondition(eq);
}
}
View
5 deps/v8/src/arm/lithium-arm.cc
@@ -2082,9 +2082,8 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoTransitionElementsKind(
HTransitionElementsKind* instr) {
- ElementsKind from_kind = instr->original_map()->elements_kind();
- ElementsKind to_kind = instr->transitioned_map()->elements_kind();
- if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
+ if (instr->original_map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS &&
+ instr->transitioned_map()->elements_kind() == FAST_ELEMENTS) {
LOperand* object = UseRegister(instr->object());
LOperand* new_map_reg = TempRegister();
LTransitionElementsKind* result =
View
9 deps/v8/src/arm/lithium-arm.h
@@ -1236,7 +1236,6 @@ class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
@@ -1253,13 +1252,13 @@ class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> {
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
public:
- LLoadKeyedSpecializedArrayElement(LOperand* external_pointer, LOperand* key) {
+ LLoadKeyedSpecializedArrayElement(LOperand* external_pointer,
+ LOperand* key) {
inputs_[0] = external_pointer;
inputs_[1] = key;
}
@@ -1273,7 +1272,6 @@ class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
@@ -1742,7 +1740,6 @@ class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
LOperand* object() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
@@ -1765,7 +1762,6 @@ class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> {
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
};
@@ -1810,7 +1806,6 @@ class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
View
154 deps/v8/src/arm/lithium-codegen-arm.cc
@@ -2587,38 +2587,42 @@ void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
Register object = ToRegister(instr->object());
Register result = ToRegister(instr->result());
Register scratch = scratch0();
-
int map_count = instr->hydrogen()->types()->length();
- bool need_generic = instr->hydrogen()->need_generic();
-
- if (map_count == 0 && !need_generic) {
- DeoptimizeIf(al, instr->environment());
- return;
- }
Handle<String> name = instr->hydrogen()->name();
- Label done;
- __ ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
- for (int i = 0; i < map_count; ++i) {
- bool last = (i == map_count - 1);
- Handle<Map> map = instr->hydrogen()->types()->at(i);
- __ cmp(scratch, Operand(map));
- if (last && !need_generic) {
- DeoptimizeIf(ne, instr->environment());
- EmitLoadFieldOrConstantFunction(result, object, map, name);
- } else {
+ if (map_count == 0) {
+ ASSERT(instr->hydrogen()->need_generic());
+ __ mov(r2, Operand(name));
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ } else {
+ Label done;
+ __ ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+ for (int i = 0; i < map_count - 1; ++i) {
+ Handle<Map> map = instr->hydrogen()->types()->at(i);
Label next;
+ __ cmp(scratch, Operand(map));
__ b(ne, &next);
EmitLoadFieldOrConstantFunction(result, object, map, name);
__ b(&done);
__ bind(&next);
}
+ Handle<Map> map = instr->hydrogen()->types()->last();
+ __ cmp(scratch, Operand(map));
+ if (instr->hydrogen()->need_generic()) {
+ Label generic;
+ __ b(ne, &generic);
+ EmitLoadFieldOrConstantFunction(result, object, map, name);
+ __ b(&done);
+ __ bind(&generic);
+ __ mov(r2, Operand(name));
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ } else {
+ DeoptimizeIf(ne, instr->environment());
+ EmitLoadFieldOrConstantFunction(result, object, map, name);
+ }
+ __ bind(&done);
}
- if (need_generic) {
- __ mov(r2, Operand(name));
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
- }
- __ bind(&done);
}
@@ -2696,10 +2700,8 @@ void LCodeGen::DoLoadElements(LLoadElements* instr) {
__ ldr(scratch, FieldMemOperand(scratch, Map::kBitField2Offset));
__ ubfx(scratch, scratch, Map::kElementsKindShift,
Map::kElementsKindBitCount);
- __ cmp(scratch, Operand(GetInitialFastElementsKind()));
- __ b(lt, &fail);
- __ cmp(scratch, Operand(TERMINAL_FAST_ELEMENTS_KIND));
- __ b(le, &done);
+ __ cmp(scratch, Operand(FAST_ELEMENTS));
+ __ b(eq, &done);
__ cmp(scratch, Operand(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
__ b(lt, &fail);
__ cmp(scratch, Operand(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND));
@@ -2746,9 +2748,7 @@ void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
// Load the result.
__ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
- uint32_t offset = FixedArray::kHeaderSize +
- (instr->additional_index() << kPointerSizeLog2);
- __ ldr(result, FieldMemOperand(scratch, offset));
+ __ ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize));
// Check for the hole value.
if (instr->hydrogen()->RequiresHoleCheck()) {
@@ -2780,21 +2780,18 @@ void LCodeGen::DoLoadKeyedFastDoubleElement(
}
Operand operand = key_is_constant
- ? Operand(((constant_key + instr->additional_index()) << shift_size) +
+ ? Operand(constant_key * (1 << shift_size) +
FixedDoubleArray::kHeaderSize - kHeapObjectTag)
: Operand(key, LSL, shift_size);
__ add(elements, elements, operand);
if (!key_is_constant) {
__ add(elements, elements,
- Operand((FixedDoubleArray::kHeaderSize - kHeapObjectTag) +
- (instr->additional_index() << shift_size)));
+ Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
}
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
- __ cmp(scratch, Operand(kHoleNanUpper32));
- DeoptimizeIf(eq, instr->environment());
- }
+ __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
+ __ cmp(scratch, Operand(kHoleNanUpper32));
+ DeoptimizeIf(eq, instr->environment());
__ vldr(result, elements, 0);
}
@@ -2816,33 +2813,26 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
key = ToRegister(instr->key());
}
int shift_size = ElementsKindToShiftSize(elements_kind);
- int additional_offset = instr->additional_index() << shift_size;
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
CpuFeatures::Scope scope(VFP3);
DwVfpRegister result = ToDoubleRegister(instr->result());
Operand operand = key_is_constant
- ? Operand(constant_key << shift_size)
+ ? Operand(constant_key * (1 << shift_size))
: Operand(key, LSL, shift_size);
__ add(scratch0(), external_pointer, operand);
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- __ vldr(result.low(), scratch0(), additional_offset);
+ __ vldr(result.low(), scratch0(), 0);
__ vcvt_f64_f32(result, result.low());
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
- __ vldr(result, scratch0(), additional_offset);
+ __ vldr(result, scratch0(), 0);
}
} else {
Register result = ToRegister(instr->result());
- if (instr->additional_index() != 0 && !key_is_constant) {
- __ add(scratch0(), key, Operand(instr->additional_index()));
- }
MemOperand mem_operand(key_is_constant
- ? MemOperand(external_pointer,
- (constant_key << shift_size) + additional_offset)
- : (instr->additional_index() == 0
- ? MemOperand(external_pointer, key, LSL, shift_size)
- : MemOperand(external_pointer, scratch0(), LSL, shift_size)));
+ ? MemOperand(external_pointer, constant_key * (1 << shift_size))
+ : MemOperand(external_pointer, key, LSL, shift_size));
switch (elements_kind) {
case EXTERNAL_BYTE_ELEMENTS:
__ ldrsb(result, mem_operand);
@@ -2870,12 +2860,9 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
break;
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@@ -3743,16 +3730,10 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
int offset =
- (ToInteger32(const_operand) + instr->additional_index()) * kPointerSize
- + FixedArray::kHeaderSize;
+ ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
__ str(value, FieldMemOperand(elements, offset));
} else {
__ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
- if (instr->additional_index() != 0) {
- __ add(scratch,
- scratch,
- Operand(instr->additional_index() << kPointerSizeLog2));
- }
__ str(value, FieldMemOperand(scratch, FixedArray::kHeaderSize));
}
@@ -3794,7 +3775,7 @@ void LCodeGen::DoStoreKeyedFastDoubleElement(
}
int shift_size = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
Operand operand = key_is_constant
- ? Operand((constant_key << shift_size) +
+ ? Operand(constant_key * (1 << shift_size) +
FixedDoubleArray::kHeaderSize - kHeapObjectTag)
: Operand(key, LSL, shift_size);
__ add(scratch, elements, operand);
@@ -3812,7 +3793,7 @@ void LCodeGen::DoStoreKeyedFastDoubleElement(
vs);
}
- __ vstr(value, scratch, instr->additional_index() << shift_size);
+ __ vstr(value, scratch, 0);
}
@@ -3833,33 +3814,25 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
key = ToRegister(instr->key());
}
int shift_size = ElementsKindToShiftSize(elements_kind);
- int additional_offset = instr->additional_index() << shift_size;
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
CpuFeatures::Scope scope(VFP3);
DwVfpRegister value(ToDoubleRegister(instr->value()));
- Operand operand(key_is_constant ? Operand(constant_key << shift_size)
+ Operand operand(key_is_constant ? Operand(constant_key * (1 << shift_size))
: Operand(key, LSL, shift_size));
__ add(scratch0(), external_pointer, operand);
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
__ vcvt_f32_f64(double_scratch0().low(), value);
- __ vstr(double_scratch0().low(), scratch0(), additional_offset);
+ __ vstr(double_scratch0().low(), scratch0(), 0);
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
- __ vstr(value, scratch0(), additional_offset);
+ __ vstr(value, scratch0(), 0);
}
} else {
Register value(ToRegister(instr->value()));
- if (instr->additional_index() != 0 && !key_is_constant) {
- __ add(scratch0(), key, Operand(instr->additional_index()));
- }
MemOperand mem_operand(key_is_constant
- ? MemOperand(external_pointer,
- ((constant_key + instr->additional_index())
- << shift_size))
- : (instr->additional_index() == 0
- ? MemOperand(external_pointer, key, LSL, shift_size)
- : MemOperand(external_pointer, scratch0(), LSL, shift_size)));
+ ? MemOperand(external_pointer, constant_key * (1 << shift_size))
+ : MemOperand(external_pointer, key, LSL, shift_size));
switch (elements_kind) {
case EXTERNAL_PIXEL_ELEMENTS:
case EXTERNAL_BYTE_ELEMENTS:
@@ -3878,10 +3851,7 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@@ -3918,22 +3888,20 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
__ cmp(scratch, Operand(from_map));
__ b(ne, &not_applicable);
__ mov(new_map_reg, Operand(to_map));
-
- if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
+ if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) {
__ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
// Write barrier.
__ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
scratch, kLRHasBeenSaved, kDontSaveFPRegs);
- } else if (IsFastSmiElementsKind(from_kind) &&
- IsFastDoubleElementsKind(to_kind)) {
+ } else if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
+ to_kind == FAST_DOUBLE_ELEMENTS) {
Register fixed_object_reg = ToRegister(instr->temp_reg());
ASSERT(fixed_object_reg.is(r2));
ASSERT(new_map_reg.is(r3));
__ mov(fixed_object_reg, object_reg);
CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
RelocInfo::CODE_TARGET, instr);
- } else if (IsFastDoubleElementsKind(from_kind) &&
- IsFastObjectElementsKind(to_kind)) {
+ } else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
Register fixed_object_reg = ToRegister(instr->temp_reg());
ASSERT(fixed_object_reg.is(r2));
ASSERT(new_map_reg.is(r3));
@@ -4707,9 +4675,8 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
// Deopt if the array literal boilerplate ElementsKind is of a type different
// than the expected one. The check isn't necessary if the boilerplate has
- // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
- if (CanTransitionToMoreGeneralFastElementsKind(
- boilerplate_elements_kind, true)) {
+ // already been converted to FAST_ELEMENTS.
+ if (boilerplate_elements_kind != FAST_ELEMENTS) {
__ LoadHeapObject(r1, instr->hydrogen()->boilerplate_object());
// Load map into r2.
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
@@ -4860,11 +4827,10 @@ void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
ElementsKind boilerplate_elements_kind =
instr->hydrogen()->boilerplate()->GetElementsKind();
- // Deopt if the array literal boilerplate ElementsKind is of a type different
- // than the expected one. The check isn't necessary if the boilerplate has
- // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
- if (CanTransitionToMoreGeneralFastElementsKind(
- boilerplate_elements_kind, true)) {
+ // Deopt if the literal boilerplate ElementsKind is of a type different than
+ // the expected one. The check isn't necessary if the boilerplate has already
+ // been converted to FAST_ELEMENTS.
+ if (boilerplate_elements_kind != FAST_ELEMENTS) {
__ LoadHeapObject(r1, instr->hydrogen()->boilerplate());
// Load map into r2.
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
View
88 deps/v8/src/arm/macro-assembler-arm.cc
@@ -1868,12 +1868,10 @@ void MacroAssembler::CompareRoot(Register obj,
void MacroAssembler::CheckFastElements(Register map,
Register scratch,
Label* fail) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_ELEMENTS == 1);
ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
- cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
+ cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue));
b(hi, fail);
}
@@ -1881,25 +1879,22 @@ void MacroAssembler::CheckFastElements(Register map,
void MacroAssembler::CheckFastObjectElements(Register map,
Register scratch,
Label* fail) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_ELEMENTS == 1);
ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
- cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
+ cmp(scratch, Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
b(ls, fail);
- cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
+ cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue));
b(hi, fail);
}
-void MacroAssembler::CheckFastSmiElements(Register map,
- Register scratch,
- Label* fail) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+void MacroAssembler::CheckFastSmiOnlyElements(Register map,
+ Register scratch,
+ Label* fail) {
+ STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
- cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
+ cmp(scratch, Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
b(hi, fail);
}
@@ -2002,17 +1997,22 @@ void MacroAssembler::CompareMap(Register obj,
ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
cmp(scratch, Operand(map));
if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
- ElementsKind kind = map->elements_kind();
- if (IsFastElementsKind(kind)) {
- bool packed = IsFastPackedElementsKind(kind);
- Map* current_map = *map;
- while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) {
- kind = GetNextMoreGeneralFastElementsKind(kind, packed);
- current_map = current_map->LookupElementsTransitionMap(kind, NULL);
- if (!current_map) break;
- b(eq, early_success);
- cmp(scratch, Operand(Handle<Map>(current_map)));
- }
+ Map* transitioned_fast_element_map(
+ map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL));
+ ASSERT(transitioned_fast_element_map == NULL ||
+ map->elements_kind() != FAST_ELEMENTS);
+ if (transitioned_fast_element_map != NULL) {
+ b(eq, early_success);
+ cmp(scratch, Operand(Handle<Map>(transitioned_fast_element_map)));
+ }
+
+ Map* transitioned_double_map(
+ map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL));
+ ASSERT(transitioned_double_map == NULL ||
+ map->elements_kind() == FAST_SMI_ONLY_ELEMENTS);
+ if (transitioned_double_map != NULL) {
+ b(eq, early_success);
+ cmp(scratch, Operand(Handle<Map>(transitioned_double_map)));
}
}
}
@@ -2865,38 +2865,28 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
ldr(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
// Check that the function's map is the same as the expected cached map.
- ldr(scratch,
- MemOperand(scratch,
- Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
- size_t offset = expected_kind * kPointerSize +
- FixedArrayBase::kHeaderSize;
- cmp(map_in_out, scratch);
+ int expected_index =
+ Context::GetContextMapIndexFromElementsKind(expected_kind);
+ ldr(ip, MemOperand(scratch, Context::SlotOffset(expected_index)));
+ cmp(map_in_out, ip);
b(ne, no_map_match);
// Use the transitioned cached map.
- offset = transitioned_kind * kPointerSize +
- FixedArrayBase::kHeaderSize;
- ldr(map_in_out, FieldMemOperand(scratch, offset));
+ int trans_index =
+ Context::GetContextMapIndexFromElementsKind(transitioned_kind);
+ ldr(map_in_out, MemOperand(scratch, Context::SlotOffset(trans_index)));
}
void MacroAssembler::LoadInitialArrayMap(
- Register function_in, Register scratch,
- Register map_out, bool can_have_holes) {
+ Register function_in, Register scratch, Register map_out) {
ASSERT(!function_in.is(map_out));
Label done;
ldr(map_out, FieldMemOperand(function_in,
JSFunction::kPrototypeOrInitialMapOffset));
if (!FLAG_smi_only_arrays) {
- ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- kind,
- map_out,
- scratch,
- &done);
- } else if (can_have_holes) {
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_HOLEY_SMI_ELEMENTS,
+ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
+ FAST_ELEMENTS,
map_out,
scratch,
&done);
@@ -3748,7 +3738,7 @@ CodePatcher::CodePatcher(byte* address, int instructions)
: address_(address),
instructions_(instructions),
size_(instructions * Assembler::kInstrSize),
- masm_(NULL, address, size_ + Assembler::kGap) {
+ masm_(Isolate::Current(), address, size_ + Assembler::kGap) {
// Create a new macro assembler pointing to the address of the code to patch.
// The size is adjusted with kGap on order for the assembler to generate size
// bytes of instructions without failing with buffer size constraints.
View
9 deps/v8/src/arm/macro-assembler-arm.h
@@ -512,8 +512,7 @@ class MacroAssembler: public Assembler {
// Load the initial map for new Arrays from a JSFunction.
void LoadInitialArrayMap(Register function_in,
Register scratch,
- Register map_out,
- bool can_have_holes);
+ Register map_out);
void LoadGlobalFunction(int index, Register function);
@@ -803,9 +802,9 @@ class MacroAssembler: public Assembler {
// Check if a map for a JSObject indicates that the object has fast smi only
// elements. Jump to the specified label if it does not.
- void CheckFastSmiElements(Register map,
- Register scratch,
- Label* fail);
+ void CheckFastSmiOnlyElements(Register map,
+ Register scratch,
+ Label* fail);
// Check to see if maybe_number can be stored as a double in
// FastDoubleElements. If it can, store it at the index specified by key in
View
170 deps/v8/src/arm/regexp-macro-assembler-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -43,49 +43,45 @@ namespace internal {
#ifndef V8_INTERPRETED_REGEXP
/*
* This assembler uses the following register assignment convention
- * - r4 : Temporarily stores the index of capture start after a matching pass
- * for a global regexp.
* - r5 : Pointer to current code object (Code*) including heap object tag.
* - r6 : Current position in input, as negative offset from end of string.
* Please notice that this is the byte offset, not the character offset!
* - r7 : Currently loaded character. Must be loaded using
* LoadCurrentCharacter before using any of the dispatch methods.
- * - r8 : Points to tip of backtrack stack
+ * - r8 : points to tip of backtrack stack
* - r9 : Unused, might be used by C code and expected unchanged.
* - r10 : End of input (points to byte after last character in input).
* - r11 : Frame pointer. Used to access arguments, local variables and
* RegExp registers.
* - r12 : IP register, used by assembler. Very volatile.
- * - r13/sp : Points to tip of C stack.
+ * - r13/sp : points to tip of C stack.
*
* The remaining registers are free for computations.
* Each call to a public method should retain this convention.
*
* The stack will have the following structure:
- * - fp[56] Isolate* isolate (address of the current isolate)
- * - fp[52] direct_call (if 1, direct call from JavaScript code,
- * if 0, call through the runtime system).
- * - fp[48] stack_area_base (high end of the memory area to use as
- * backtracking stack).
- * - fp[44] capture array size (may fit multiple sets of matches)
+ * - fp[52] Isolate* isolate (Address of the current isolate)
+ * - fp[48] direct_call (if 1, direct call from JavaScript code,
+ * if 0, call through the runtime system).
+ * - fp[44] stack_area_base (High end of the memory area to use as
+ * backtracking stack).
* - fp[40] int* capture_array (int[num_saved_registers_], for output).
* - fp[36] secondary link/return address used by native call.
* --- sp when called ---
- * - fp[32] return address (lr).
- * - fp[28] old frame pointer (r11).
+ * - fp[32] return address (lr).
+ * - fp[28] old frame pointer (r11).
* - fp[0..24] backup of registers r4..r10.
* --- frame pointer ----
- * - fp[-4] end of input (address of end of string).
- * - fp[-8] start of input (address of first character in string).
+ * - fp[-4] end of input (Address of end of string).
+ * - fp[-8] start of input (Address of first character in string).
* - fp[-12] start index (character index of start).
* - fp[-16] void* input_string (location of a handle containing the string).
- * - fp[-20] success counter (only for global regexps to count matches).
- * - fp[-24] Offset of location before start of input (effectively character
+ * - fp[-20] Offset of location before start of input (effectively character
* position -1). Used to initialize capture registers to a
* non-position.
- * - fp[-28] At start (if 1, we are starting at the start of the
+ * - fp[-24] At start (if 1, we are starting at the start of the
* string, otherwise 0)
- * - fp[-32] register 0 (Only positions must be stored in the first
+ * - fp[-28] register 0 (Only positions must be stored in the first
* - register 1 num_saved_registers_ registers)
* - ...
* - register num_registers-1
@@ -201,9 +197,9 @@ void RegExpMacroAssemblerARM::CheckCharacterGT(uc16 limit, Label* on_greater) {
void RegExpMacroAssemblerARM::CheckAtStart(Label* on_at_start) {
Label not_at_start;
// Did we start the match at the start of the string at all?
- __ ldr(r0, MemOperand(frame_pointer(), kStartIndex));
+ __ ldr(r0, MemOperand(frame_pointer(), kAtStart));
__ cmp(r0, Operand(0, RelocInfo::NONE));
- BranchOrBacktrack(ne, &not_at_start);
+ BranchOrBacktrack(eq, &not_at_start);
// If we did, are we still at the start of the input?
__ ldr(r1, MemOperand(frame_pointer(), kInputStart));
@@ -216,9 +212,9 @@ void RegExpMacroAssemblerARM::CheckAtStart(Label* on_at_start) {
void RegExpMacroAssemblerARM::CheckNotAtStart(Label* on_not_at_start) {
// Did we start the match at the start of the string at all?
- __ ldr(r0, MemOperand(frame_pointer(), kStartIndex));
+ __ ldr(r0, MemOperand(frame_pointer(), kAtStart));
__ cmp(r0, Operand(0, RelocInfo::NONE));
- BranchOrBacktrack(ne, on_not_at_start);
+ BranchOrBacktrack(eq, on_not_at_start);
// If we did, are we still at the start of the input?
__ ldr(r1, MemOperand(frame_pointer(), kInputStart));
__ add(r0, end_of_input_address(), Operand(current_input_offset()));
@@ -659,7 +655,6 @@ void RegExpMacroAssemblerARM::Fail() {
Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
- Label return_r0;
// Finalize code - write the entry point code now we know how many
// registers we need.
@@ -683,9 +678,8 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// Set frame pointer in space for it if this is not a direct call
// from generated code.
__ add(frame_pointer(), sp, Operand(4 * kPointerSize));
- __ mov(r0, Operand(0, RelocInfo::NONE));
- __ push(r0); // Make room for success counter and initialize it to 0.
__ push(r0); // Make room for "position - 1" constant (value is irrelevant).
+ __ push(r0); // Make room for "at start" constant (value is irrelevant).
// Check if we have space on the stack for registers.
Label stack_limit_hit;
Label stack_ok;
@@ -704,13 +698,13 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// Exit with OutOfMemory exception. There is not enough space on the stack
// for our working registers.
__ mov(r0, Operand(EXCEPTION));
- __ jmp(&return_r0);
+ __ jmp(&exit_label_);
__ bind(&stack_limit_hit);
CallCheckStackGuardState(r0);
__ cmp(r0, Operand(0, RelocInfo::NONE));
// If returned value is non-zero, we exit with the returned value as result.
- __ b(ne, &return_r0);
+ __ b(ne, &exit_label_);
__ bind(&stack_ok);
@@ -731,46 +725,42 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// position registers.
__ str(r0, MemOperand(frame_pointer(), kInputStartMinusOne));
- // Initialize code pointer register
- __ mov(code_pointer(), Operand(masm_->CodeObject()));
-
- Label load_char_start_regexp, start_regexp;
- // Load newline if index is at start, previous character otherwise.
- __ cmp(r1, Operand(0, RelocInfo::NONE));
- __ b(ne, &load_char_start_regexp);
- __ mov(current_character(), Operand('\n'), LeaveCC, eq);
- __ jmp(&start_regexp);
-
- // Global regexp restarts matching here.
- __ bind(&load_char_start_regexp);
- // Load previous char as initial value of current character register.
- LoadCurrentCharacterUnchecked(-1, 1);
- __ bind(&start_regexp);
+ // Determine whether the start index is zero, that is at the start of the
+ // string, and store that value in a local variable.
+ __ cmp(r1, Operand(0));
+ __ mov(r1, Operand(1), LeaveCC, eq);
+ __ mov(r1, Operand(0, RelocInfo::NONE), LeaveCC, ne);
+ __ str(r1, MemOperand(frame_pointer(), kAtStart));
- // Initialize on-stack registers.
if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
// Fill saved registers with initial value = start offset - 1
- if (num_saved_registers_ > 8) {
- // Address of register 0.
- __ add(r1, frame_pointer(), Operand(kRegisterZero));
- __ mov(r2, Operand(num_saved_registers_));
- Label init_loop;
- __ bind(&init_loop);
- __ str(r0, MemOperand(r1, kPointerSize, NegPostIndex));
- __ sub(r2, r2, Operand(1), SetCC);
- __ b(ne, &init_loop);
- } else {
- for (int i = 0; i < num_saved_registers_; i++) {
- __ str(r0, register_location(i));
- }
- }
+
+ // Address of register 0.
+ __ add(r1, frame_pointer(), Operand(kRegisterZero));
+ __ mov(r2, Operand(num_saved_registers_));
+ Label init_loop;
+ __ bind(&init_loop);
+ __ str(r0, MemOperand(r1, kPointerSize, NegPostIndex));
+ __ sub(r2, r2, Operand(1), SetCC);
+ __ b(ne, &init_loop);
}
// Initialize backtrack stack pointer.
__ ldr(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackHighEnd));
-
+ // Initialize code pointer register
+ __ mov(code_pointer(), Operand(masm_->CodeObject()));
+ // Load previous char as initial value of current character register.
+ Label at_start;
+ __ ldr(r0, MemOperand(frame_pointer(), kAtStart));
+ __ cmp(r0, Operand(0, RelocInfo::NONE));
+ __ b(ne, &at_start);
+ LoadCurrentCharacterUnchecked(-1, 1); // Load previous char.
+ __ jmp(&start_label_);
+ __ bind(&at_start);
+ __ mov(current_character(), Operand('\n'));
__ jmp(&start_label_);
+
// Exit code:
if (success_label_.is_linked()) {
// Save captures when successful.
@@ -796,10 +786,6 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
for (int i = 0; i < num_saved_registers_; i += 2) {
__ ldr(r2, register_location(i));
__ ldr(r3, register_location(i + 1));
- if (global()) {
- // Keep capture start in r4 for the zero-length check later.
- __ mov(r4, r2);
- }
if (mode_ == UC16) {
__ add(r2, r1, Operand(r2, ASR, 1));
__ add(r3, r1, Operand(r3, ASR, 1));
@@ -811,54 +797,10 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
__ str(r3, MemOperand(r0, kPointerSize, PostIndex));
}
}
-
- if (global()) {
- // Restart matching if the regular expression is flagged as global.
- __ ldr(r0, MemOperand(frame_pointer(), kSuccessfulCaptures));
- __ ldr(r1, MemOperand(frame_pointer(), kNumOutputRegisters));
- __ ldr(r2, MemOperand(frame_pointer(), kRegisterOutput));
- // Increment success counter.
- __ add(r0, r0, Operand(1));
- __ str(r0, MemOperand(frame_pointer(), kSuccessfulCaptures));
- // Capture results have been stored, so the number of remaining global
- // output registers is reduced by the number of stored captures.
- __ sub(r1, r1, Operand(num_saved_registers_));
- // Check whether we have enough room for another set of capture results.
- __ cmp(r1, Operand(num_saved_registers_));
- __ b(lt, &return_r0);
-
- __ str(r1, MemOperand(frame_pointer(), kNumOutputRegisters));
- // Advance the location for output.
- __ add(r2, r2, Operand(num_saved_registers_ * kPointerSize));
- __ str(r2, MemOperand(frame_pointer(), kRegisterOutput));
-
- // Prepare r0 to initialize registers with its value in the next run.
- __ ldr(r0, MemOperand(frame_pointer(), kInputStartMinusOne));
- // Special case for zero-length matches.
- // r4: capture start index
- __ cmp(current_input_offset(), r4);
- // Not a zero-length match, restart.
- __ b(ne, &load_char_start_regexp);
- // Offset from the end is zero if we already reached the end.
- __ cmp(current_input_offset(), Operand(0));
- __ b(eq, &exit_label_);
- // Advance current position after a zero-length match.
- __ add(current_input_offset(),
- current_input_offset(),
- Operand((mode_ == UC16) ? 2 : 1));
- __ b(&load_char_start_regexp);
- } else {
- __ mov(r0, Operand(SUCCESS));
- }
+ __ mov(r0, Operand(SUCCESS));
}
-
// Exit and return r0
__ bind(&exit_label_);
- if (global()) {
- __ ldr(r0, MemOperand(frame_pointer(), kSuccessfulCaptures));
- }
-
- __ bind(&return_r0);
// Skip sp past regexp registers and local variables..
__ mov(sp, frame_pointer());
// Restore registers r4..r11 and return (restoring lr to pc).
@@ -880,7 +822,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
__ cmp(r0, Operand(0, RelocInfo::NONE));
// If returning non-zero, we should end execution with the given
// result as return value.
- __ b(ne, &return_r0);
+ __ b(ne, &exit_label_);
// String might have moved: Reload end of string from frame.
__ ldr(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
@@ -917,7 +859,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
__ bind(&exit_with_exception);
// Exit with Result EXCEPTION(-1) to signal thrown exception.
__ mov(r0, Operand(EXCEPTION));
- __ jmp(&return_r0);
+ __ jmp(&exit_label_);
}
CodeDesc code_desc;
@@ -1072,9 +1014,8 @@ void RegExpMacroAssemblerARM::SetRegister(int register_index, int to) {
}
-bool RegExpMacroAssemblerARM::Succeed() {
+void RegExpMacroAssemblerARM::Succeed() {
__ jmp(&success_label_);
- return global();
}
@@ -1366,9 +1307,8 @@ void RegExpMacroAssemblerARM::LoadCurrentCharacterUnchecked(int cp_offset,
int characters) {
Register offset = current_input_offset();
if (cp_offset != 0) {
- // r4 is not being used to store the capture start index at this point.
- __ add(r4, current_input_offset(), Operand(cp_offset * char_size()));
- offset = r4;
+ __ add(r0, current_input_offset(), Operand(cp_offset * char_size()));
+ offset = r0;
}
// The ldr, str, ldrh, strh instructions can do unaligned accesses, if the CPU
// and the operating system running on the target allow it.
View
13 deps/v8/src/arm/regexp-macro-assembler-arm.h
<
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -113,7 +113,7 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
virtual void ReadStackPointerFromRegister(int reg);
virtual void SetCurrentPositionFromEnd(int by);
virtual void SetRegister(int register_index, int to);
- virtual bool Succeed();