Skip to content
Browse files

Upgrade V8 to 3.1.6

  • Loading branch information...
1 parent 2680522 commit e33e7d1a3712add02cd08c0069bb56ba76e49aa5 @ry ry committed
Showing with 4,912 additions and 2,851 deletions.
  1. +13 −0 deps/v8/ChangeLog
  2. +28 −94 deps/v8/SConstruct
  3. +2 −2 deps/v8/src/SConscript
  4. +15 −1 deps/v8/src/accessors.cc
  5. +94 −16 deps/v8/src/arm/assembler-arm.cc
  6. +22 −6 deps/v8/src/arm/assembler-arm.h
  7. +111 −34 deps/v8/src/arm/code-stubs-arm.cc
  8. +19 −7 deps/v8/src/arm/code-stubs-arm.h
  9. +2 −2 deps/v8/src/arm/codegen-arm.cc
  10. +5 −0 deps/v8/src/arm/cpu-arm.cc
  11. +8 −6 deps/v8/src/arm/deoptimizer-arm.cc
  12. +111 −81 deps/v8/src/arm/full-codegen-arm.cc
  13. +48 −9 deps/v8/src/arm/lithium-arm.cc
  14. +45 −36 deps/v8/src/arm/lithium-arm.h
  15. +180 −293 deps/v8/src/arm/lithium-codegen-arm.cc
  16. +31 −46 deps/v8/src/arm/lithium-codegen-arm.h
  17. +303 −0 deps/v8/src/arm/lithium-gap-resolver-arm.cc
  18. +84 −0 deps/v8/src/arm/lithium-gap-resolver-arm.h
  19. +31 −11 deps/v8/src/arm/macro-assembler-arm.cc
  20. +18 −6 deps/v8/src/arm/macro-assembler-arm.h
  21. +9 −6 deps/v8/src/arm/stub-cache-arm.cc
  22. +6 −2 deps/v8/src/assembler.cc
  23. +8 −0 deps/v8/src/assembler.h
  24. +34 −38 deps/v8/src/compiler.cc
  25. +2 −3 deps/v8/src/compiler.h
  26. +5 −2 deps/v8/src/cpu-profiler-inl.h
  27. +38 −102 deps/v8/src/cpu-profiler.cc
  28. +16 −32 deps/v8/src/cpu-profiler.h
  29. +5 −0 deps/v8/src/execution.cc
  30. +5 −0 deps/v8/src/flag-definitions.h
  31. +2 −16 deps/v8/src/gdb-jit.cc
  32. +12 −22 deps/v8/src/handles.cc
  33. +108 −90 deps/v8/src/heap.cc
  34. +64 −3 deps/v8/src/heap.h
  35. +106 −104 deps/v8/src/hydrogen-instructions.cc
  36. +539 −481 deps/v8/src/hydrogen-instructions.h
  37. +548 −404 deps/v8/src/hydrogen.cc
  38. +106 −81 deps/v8/src/hydrogen.h
  39. +4 −7 deps/v8/src/ia32/assembler-ia32.h
  40. +2 −2 deps/v8/src/ia32/code-stubs-ia32.cc
  41. +6 −9 deps/v8/src/ia32/codegen-ia32.cc
  42. +8 −6 deps/v8/src/ia32/deoptimizer-ia32.cc
  43. +18 −13 deps/v8/src/ia32/full-codegen-ia32.cc
  44. +112 −26 deps/v8/src/ia32/lithium-codegen-ia32.cc
  45. +14 −0 deps/v8/src/ia32/lithium-codegen-ia32.h
  46. +44 −7 deps/v8/src/ia32/lithium-ia32.cc
  47. +33 −28 deps/v8/src/ia32/lithium-ia32.h
  48. +22 −0 deps/v8/src/ia32/macro-assembler-ia32.cc
  49. +14 −1 deps/v8/src/ia32/macro-assembler-ia32.h
  50. +11 −8 deps/v8/src/ia32/stub-cache-ia32.cc
  51. +52 −41 deps/v8/src/ic.cc
  52. +1 −1 deps/v8/src/ic.h
  53. +15 −5 deps/v8/src/lithium-allocator.cc
  54. +0 −2 deps/v8/src/lithium-allocator.h
  55. +5 −3 deps/v8/src/lithium.h
  56. +2 −0 deps/v8/src/log-utils.cc
  57. +61 −90 deps/v8/src/log.cc
  58. +12 −14 deps/v8/src/log.h
  59. +4 −8 deps/v8/src/mark-compact.cc
  60. +53 −28 deps/v8/src/objects.cc
  61. +5 −0 deps/v8/src/objects.h
  62. +30 −12 deps/v8/src/platform-cygwin.cc
  63. +1 −1 deps/v8/src/platform-freebsd.cc
  64. +1 −1 deps/v8/src/platform-linux.cc
  65. +1 −1 deps/v8/src/platform-macos.cc
  66. +1 −1 deps/v8/src/platform-openbsd.cc
  67. +1 −1 deps/v8/src/platform-solaris.cc
  68. +1 −1 deps/v8/src/platform-win32.cc
  69. +5 −9 deps/v8/src/platform.h
  70. +1 −10 deps/v8/src/profile-generator-inl.h
  71. +37 −41 deps/v8/src/profile-generator.cc
  72. +9 −3 deps/v8/src/profile-generator.h
  73. +52 −32 deps/v8/src/runtime.cc
  74. +6 −6 deps/v8/src/top.cc
  75. +5 −0 deps/v8/src/v8.cc
  76. +2 −2 deps/v8/src/v8natives.js
  77. +1 −1 deps/v8/src/version.cc
  78. +7 −7 deps/v8/src/x64/assembler-x64.cc
  79. +5 −5 deps/v8/src/x64/assembler-x64.h
  80. +100 −52 deps/v8/src/x64/code-stubs-x64.cc
  81. +18 −7 deps/v8/src/x64/code-stubs-x64.h
  82. +6 −9 deps/v8/src/x64/codegen-x64.cc
  83. +8 −6 deps/v8/src/x64/deoptimizer-x64.cc
  84. +18 −12 deps/v8/src/x64/full-codegen-x64.cc
  85. +755 −74 deps/v8/src/x64/lithium-codegen-x64.cc
  86. +10 −2 deps/v8/src/x64/lithium-codegen-x64.h
  87. +179 −55 deps/v8/src/x64/lithium-x64.cc
  88. +111 −45 deps/v8/src/x64/lithium-x64.h
  89. +52 −15 deps/v8/src/x64/macro-assembler-x64.cc
  90. +42 −11 deps/v8/src/x64/macro-assembler-x64.h
  91. +9 −6 deps/v8/src/x64/stub-cache-x64.cc
  92. +0 −21 deps/v8/test/cctest/cctest.status
  93. +88 −2 deps/v8/test/cctest/test-api.cc
  94. +3 −7 deps/v8/test/cctest/test-cpu-profiler.cc
  95. +4 −4 deps/v8/test/cctest/test-decls.cc
  96. +15 −17 deps/v8/test/cctest/test-log-stack-tracer.cc
  97. +4 −19 deps/v8/test/cctest/test-log.cc
  98. +7 −4 deps/v8/test/cctest/test-parsing.cc
  99. +3 −3 deps/v8/test/cctest/test-profile-generator.cc
  100. +3 −2 deps/v8/test/cctest/test-serialize.cc
  101. +6 −9 deps/v8/test/mjsunit/{regress/regress-1105.js → compiler/regress-valueof.js}
  102. +7 −0 deps/v8/test/mjsunit/mjsunit.js
  103. +17 −0 deps/v8/test/mjsunit/regress/regress-1151.js
Sorry, we could not display the entire diff because it was too big.
View
13 deps/v8/ChangeLog
@@ -1,3 +1,16 @@
+2011-02-24: Version 3.1.6
+
+ Fixed a number of crash bugs.
+
+ Added support for Cygwin (issue 64).
+
+ Improved Crankshaft for x64 and ARM.
+
+ Added Crankshaft support for stores to pixel arrays.
+
+ Fixed issue in CPU profiler with Crankshaft.
+
+
2011-02-16: Version 3.1.5
Change RegExp parsing to disallow /(*)/.
View
122 deps/v8/SConstruct
@@ -27,7 +27,6 @@
import platform
import re
-import subprocess
import sys
import os
from os.path import join, dirname, abspath
@@ -143,9 +142,6 @@ LIBRARY_FLAGS = {
# Use visibility=default to disable this.
'CXXFLAGS': ['-fvisibility=hidden']
},
- 'strictaliasing:off': {
- 'CCFLAGS': ['-fno-strict-aliasing']
- },
'mode:debug': {
'CCFLAGS': ['-g', '-O0'],
'CPPDEFINES': ['ENABLE_DISASSEMBLER', 'DEBUG'],
@@ -306,6 +302,7 @@ V8_EXTRA_FLAGS = {
'gcc': {
'all': {
'WARNINGFLAGS': ['-Wall',
+ '-Werror',
'-W',
'-Wno-unused-parameter',
'-Wnon-virtual-dtor']
@@ -655,16 +652,8 @@ def Abort(message):
sys.exit(1)
-def GuessOS(env):
- return utils.GuessOS()
-
-
-def GuessArch(env):
- return utils.GuessArchitecture()
-
-
-def GuessToolchain(env):
- tools = env['TOOLS']
+def GuessToolchain(os):
+ tools = Environment()['TOOLS']
if 'gcc' in tools:
return 'gcc'
elif 'msvc' in tools:
@@ -673,9 +662,7 @@ def GuessToolchain(env):
return None
-def GuessVisibility(env):
- os = env['os']
- toolchain = env['toolchain'];
+def GuessVisibility(os, toolchain):
if (os == 'win32' or os == 'cygwin') and toolchain == 'gcc':
# MinGW / Cygwin can't do it.
return 'default'
@@ -685,35 +672,27 @@ def GuessVisibility(env):
return 'hidden'
-def GuessStrictAliasing(env):
- # There seems to be a problem with gcc 4.5.x
- # see http://code.google.com/p/v8/issues/detail?id=884
- # it can be worked around by disabling strict aliasing
- toolchain = env['toolchain'];
- if toolchain == 'gcc':
- env = Environment(tools=['gcc'])
- version = subprocess.Popen([env['CC'], '-dumpversion'],
- stdout=subprocess.PIPE).communicate()[0]
- if version.find('4.5.') == 0:
- return 'off'
- return 'default'
+OS_GUESS = utils.GuessOS()
+TOOLCHAIN_GUESS = GuessToolchain(OS_GUESS)
+ARCH_GUESS = utils.GuessArchitecture()
+VISIBILITY_GUESS = GuessVisibility(OS_GUESS, TOOLCHAIN_GUESS)
SIMPLE_OPTIONS = {
'toolchain': {
'values': ['gcc', 'msvc'],
- 'guess': GuessToolchain,
- 'help': 'the toolchain to use'
+ 'default': TOOLCHAIN_GUESS,
+ 'help': 'the toolchain to use (%s)' % TOOLCHAIN_GUESS
},
'os': {
'values': ['freebsd', 'linux', 'macos', 'win32', 'android', 'openbsd', 'solaris', 'cygwin'],
- 'guess': GuessOS,
- 'help': 'the os to build for'
+ 'default': OS_GUESS,
+ 'help': 'the os to build for (%s)' % OS_GUESS
},
'arch': {
'values':['arm', 'ia32', 'x64', 'mips'],
- 'guess': GuessArch,
- 'help': 'the architecture to build for'
+ 'default': ARCH_GUESS,
+ 'help': 'the architecture to build for (%s)' % ARCH_GUESS
},
'regexp': {
'values': ['native', 'interpreted'],
@@ -822,15 +801,8 @@ SIMPLE_OPTIONS = {
},
'visibility': {
'values': ['default', 'hidden'],
- 'guess': GuessVisibility,
- 'depends': ['os', 'toolchain'],
- 'help': 'shared library symbol visibility'
- },
- 'strictaliasing': {
- 'values': ['default', 'off'],
- 'guess': GuessStrictAliasing,
- 'depends': ['toolchain'],
- 'help': 'assume strict aliasing while optimizing'
+ 'default': VISIBILITY_GUESS,
+ 'help': 'shared library symbol visibility (%s)' % VISIBILITY_GUESS
},
'pgo': {
'values': ['off', 'instrument', 'optimize'],
@@ -840,26 +812,6 @@ SIMPLE_OPTIONS = {
}
-def AddOption(result, name, option):
- if 'guess' in option:
- # Option has a guess function
- guess = option.get('guess')
- guess_env = Environment(options=result)
- # Check if all options that the guess function depends on are set
- if 'depends' in option:
- for dependency in option.get('depends'):
- if not dependency in guess_env:
- return False
- default = guess(guess_env)
- else:
- # Option has a fixed default
- default = option.get('default')
-
- help = '%s (%s)' % (option.get('help'), ", ".join(option['values']))
- result.Add(name, help, default)
- return True
-
-
def GetOptions():
result = Options()
result.Add('mode', 'compilation mode (debug, release)', 'release')
@@ -867,28 +819,12 @@ def GetOptions():
result.Add('cache', 'directory to use for scons build cache', '')
result.Add('env', 'override environment settings (NAME0:value0,NAME1:value1,...)', '')
result.Add('importenv', 'import environment settings (NAME0,NAME1,...)', '')
- options = SIMPLE_OPTIONS
- while len(options):
- postpone = {}
- for (name, option) in options.iteritems():
- if not AddOption(result, name, option):
- postpone[name] = option
- options = postpone
+ for (name, option) in SIMPLE_OPTIONS.iteritems():
+ help = '%s (%s)' % (name, ", ".join(option['values']))
+ result.Add(name, help, option.get('default'))
return result
-def GetTools(opts):
- env = Environment(options=opts)
- os = env['os']
- toolchain = env['toolchain']
- if os == 'win32' and toolchain == 'gcc':
- return ['mingw']
- elif os == 'win32' and toolchain == 'msvc':
- return ['msvc', 'mslink', 'mslib', 'msvs']
- else:
- return ['default']
-
-
def GetVersionComponents():
MAJOR_VERSION_PATTERN = re.compile(r"#define\s+MAJOR_VERSION\s+(.*)")
MINOR_VERSION_PATTERN = re.compile(r"#define\s+MINOR_VERSION\s+(.*)")
@@ -969,7 +905,7 @@ def VerifyOptions(env):
print env['simulator']
Abort("Option unalignedaccesses only supported for the ARM architecture.")
for (name, option) in SIMPLE_OPTIONS.iteritems():
- if (not name in env):
+ if (not option.get('default')) and (name not in ARGUMENTS):
message = ("A value for option %s must be specified (%s)." %
(name, ", ".join(option['values'])))
Abort(message)
@@ -1097,7 +1033,7 @@ def ParseEnvOverrides(arg, imports):
return overrides
-def BuildSpecific(env, mode, env_overrides, tools):
+def BuildSpecific(env, mode, env_overrides):
options = {'mode': mode}
for option in SIMPLE_OPTIONS:
options[option] = env[option]
@@ -1150,7 +1086,7 @@ def BuildSpecific(env, mode, env_overrides, tools):
(object_files, shell_files, mksnapshot) = env.SConscript(
join('src', 'SConscript'),
build_dir=join('obj', target_id),
- exports='context tools',
+ exports='context',
duplicate=False
)
@@ -1170,21 +1106,21 @@ def BuildSpecific(env, mode, env_overrides, tools):
library = env.SharedLibrary(library_name, object_files, PDB=pdb_name)
context.library_targets.append(library)
- d8_env = Environment(tools=tools)
+ d8_env = Environment()
d8_env.Replace(**context.flags['d8'])
context.ApplyEnvOverrides(d8_env)
shell = d8_env.Program('d8' + suffix, object_files + shell_files)
context.d8_targets.append(shell)
for sample in context.samples:
- sample_env = Environment(tools=tools)
+ sample_env = Environment()
sample_env.Replace(**context.flags['sample'])
sample_env.Prepend(LIBS=[library_name])
context.ApplyEnvOverrides(sample_env)
sample_object = sample_env.SConscript(
join('samples', 'SConscript'),
build_dir=join('obj', 'sample', sample, target_id),
- exports='sample context tools',
+ exports='sample context',
duplicate=False
)
sample_name = sample + suffix
@@ -1197,7 +1133,7 @@ def BuildSpecific(env, mode, env_overrides, tools):
cctest_program = cctest_env.SConscript(
join('test', 'cctest', 'SConscript'),
build_dir=join('obj', 'test', target_id),
- exports='context object_files tools',
+ exports='context object_files',
duplicate=False
)
context.cctest_targets.append(cctest_program)
@@ -1207,9 +1143,7 @@ def BuildSpecific(env, mode, env_overrides, tools):
def Build():
opts = GetOptions()
- tools = GetTools(opts)
- env = Environment(options=opts, tools=tools)
-
+ env = Environment(options=opts)
Help(opts.GenerateHelpText(env))
VerifyOptions(env)
env_overrides = ParseEnvOverrides(env['env'], env['importenv'])
@@ -1223,7 +1157,7 @@ def Build():
d8s = []
modes = SplitList(env['mode'])
for mode in modes:
- context = BuildSpecific(env.Copy(), mode, env_overrides, tools)
+ context = BuildSpecific(env.Copy(), mode, env_overrides)
libraries += context.library_targets
mksnapshots += context.mksnapshot_targets
cctests += context.cctest_targets
View
4 deps/v8/src/SConscript
@@ -31,7 +31,6 @@ root_dir = dirname(File('SConstruct').rfile().abspath)
sys.path.append(join(root_dir, 'tools'))
import js2c
Import('context')
-Import('tools')
SOURCES = {
@@ -154,6 +153,7 @@ SOURCES = {
arm/jump-target-arm.cc
arm/lithium-arm.cc
arm/lithium-codegen-arm.cc
+ arm/lithium-gap-resolver-arm.cc
arm/macro-assembler-arm.cc
arm/regexp-macro-assembler-arm.cc
arm/register-allocator-arm.cc
@@ -305,7 +305,7 @@ def Abort(message):
def ConfigureObjectFiles():
- env = Environment(tools=tools)
+ env = Environment()
env.Replace(**context.flags['v8'])
context.ApplyEnvOverrides(env)
env['BUILDERS']['JS2C'] = Builder(action=js2c.JS2C)
View
16 deps/v8/src/accessors.cc
@@ -446,8 +446,15 @@ MaybeObject* Accessors::FunctionGetPrototype(Object* object, void*) {
bool found_it = false;
JSFunction* function = FindInPrototypeChain<JSFunction>(object, &found_it);
if (!found_it) return Heap::undefined_value();
+ while (!function->should_have_prototype()) {
+ found_it = false;
+ function = FindInPrototypeChain<JSFunction>(object->GetPrototype(),
+ &found_it);
+ // There has to be one because we hit the getter.
+ ASSERT(found_it);
+ }
+
if (!function->has_prototype()) {
- if (!function->should_have_prototype()) return Heap::undefined_value();
Object* prototype;
{ MaybeObject* maybe_prototype = Heap::AllocateFunctionPrototype(function);
if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
@@ -467,6 +474,13 @@ MaybeObject* Accessors::FunctionSetPrototype(JSObject* object,
bool found_it = false;
JSFunction* function = FindInPrototypeChain<JSFunction>(object, &found_it);
if (!found_it) return Heap::undefined_value();
+ if (!function->should_have_prototype()) {
+ // Since we hit this accessor, object will have no prototype property.
+ return object->SetLocalPropertyIgnoreAttributes(Heap::prototype_symbol(),
+ value,
+ NONE);
+ }
+
if (function->has_initial_map()) {
// If the function has allocated the initial map
// replace it with a copy containing the new prototype.
View
110 deps/v8/src/arm/assembler-arm.cc
@@ -1848,11 +1848,31 @@ void Assembler::vldr(const DwVfpRegister dst,
offset = -offset;
u = 0;
}
- ASSERT(offset % 4 == 0);
- ASSERT((offset / 4) < 256);
+
ASSERT(offset >= 0);
- emit(cond | u*B23 | 0xD1*B20 | base.code()*B16 | dst.code()*B12 |
- 0xB*B8 | ((offset / 4) & 255));
+ if ((offset % 4) == 0 && (offset / 4) < 256) {
+ emit(cond | u*B23 | 0xD1*B20 | base.code()*B16 | dst.code()*B12 |
+ 0xB*B8 | ((offset / 4) & 255));
+ } else {
+ // Larger offsets must be handled by computing the correct address
+ // in the ip register.
+ ASSERT(!base.is(ip));
+ if (u == 1) {
+ add(ip, base, Operand(offset));
+ } else {
+ sub(ip, base, Operand(offset));
+ }
+ emit(cond | 0xD1*B20 | ip.code()*B16 | dst.code()*B12 | 0xB*B8);
+ }
+}
+
+
+void Assembler::vldr(const DwVfpRegister dst,
+ const MemOperand& operand,
+ const Condition cond) {
+ ASSERT(!operand.rm().is_valid());
+ ASSERT(operand.am_ == Offset);
+ vldr(dst, operand.rn(), operand.offset(), cond);
}
@@ -1870,13 +1890,33 @@ void Assembler::vldr(const SwVfpRegister dst,
offset = -offset;
u = 0;
}
- ASSERT(offset % 4 == 0);
- ASSERT((offset / 4) < 256);
- ASSERT(offset >= 0);
int sd, d;
dst.split_code(&sd, &d);
+ ASSERT(offset >= 0);
+
+ if ((offset % 4) == 0 && (offset / 4) < 256) {
emit(cond | u*B23 | d*B22 | 0xD1*B20 | base.code()*B16 | sd*B12 |
0xA*B8 | ((offset / 4) & 255));
+ } else {
+ // Larger offsets must be handled by computing the correct address
+ // in the ip register.
+ ASSERT(!base.is(ip));
+ if (u == 1) {
+ add(ip, base, Operand(offset));
+ } else {
+ sub(ip, base, Operand(offset));
+ }
+ emit(cond | d*B22 | 0xD1*B20 | ip.code()*B16 | sd*B12 | 0xA*B8);
+ }
+}
+
+
+void Assembler::vldr(const SwVfpRegister dst,
+ const MemOperand& operand,
+ const Condition cond) {
+ ASSERT(!operand.rm().is_valid());
+ ASSERT(operand.am_ == Offset);
+ vldr(dst, operand.rn(), operand.offset(), cond);
}
@@ -1894,11 +1934,30 @@ void Assembler::vstr(const DwVfpRegister src,
offset = -offset;
u = 0;
}
- ASSERT(offset % 4 == 0);
- ASSERT((offset / 4) < 256);
ASSERT(offset >= 0);
- emit(cond | u*B23 | 0xD0*B20 | base.code()*B16 | src.code()*B12 |
- 0xB*B8 | ((offset / 4) & 255));
+ if ((offset % 4) == 0 && (offset / 4) < 256) {
+ emit(cond | u*B23 | 0xD0*B20 | base.code()*B16 | src.code()*B12 |
+ 0xB*B8 | ((offset / 4) & 255));
+ } else {
+ // Larger offsets must be handled by computing the correct address
+ // in the ip register.
+ ASSERT(!base.is(ip));
+ if (u == 1) {
+ add(ip, base, Operand(offset));
+ } else {
+ sub(ip, base, Operand(offset));
+ }
+ emit(cond | 0xD0*B20 | ip.code()*B16 | src.code()*B12 | 0xB*B8);
+ }
+}
+
+
+void Assembler::vstr(const DwVfpRegister src,
+ const MemOperand& operand,
+ const Condition cond) {
+ ASSERT(!operand.rm().is_valid());
+ ASSERT(operand.am_ == Offset);
+ vstr(src, operand.rn(), operand.offset(), cond);
}
@@ -1916,13 +1975,32 @@ void Assembler::vstr(const SwVfpRegister src,
offset = -offset;
u = 0;
}
- ASSERT(offset % 4 == 0);
- ASSERT((offset / 4) < 256);
- ASSERT(offset >= 0);
int sd, d;
src.split_code(&sd, &d);
- emit(cond | u*B23 | d*B22 | 0xD0*B20 | base.code()*B16 | sd*B12 |
- 0xA*B8 | ((offset / 4) & 255));
+ ASSERT(offset >= 0);
+ if ((offset % 4) == 0 && (offset / 4) < 256) {
+ emit(cond | u*B23 | d*B22 | 0xD0*B20 | base.code()*B16 | sd*B12 |
+ 0xA*B8 | ((offset / 4) & 255));
+ } else {
+ // Larger offsets must be handled by computing the correct address
+ // in the ip register.
+ ASSERT(!base.is(ip));
+ if (u == 1) {
+ add(ip, base, Operand(offset));
+ } else {
+ sub(ip, base, Operand(offset));
+ }
+ emit(cond | d*B22 | 0xD0*B20 | ip.code()*B16 | sd*B12 | 0xA*B8);
+ }
+}
+
+
+void Assembler::vstr(const SwVfpRegister src,
+ const MemOperand& operand,
+ const Condition cond) {
+ ASSERT(!operand.rm().is_valid());
+ ASSERT(operand.am_ == Offset);
+ vldr(src, operand.rn(), operand.offset(), cond);
}
View
28 deps/v8/src/arm/assembler-arm.h
@@ -387,7 +387,7 @@ class Operand BASE_EMBEDDED {
// Return true if this is a register operand.
INLINE(bool is_reg() const);
- // Return true of this operand fits in one instruction so that no
+ // Return true if this operand fits in one instruction so that no
// 2-instruction solution with a load into the ip register is necessary.
bool is_single_instruction() const;
bool must_use_constant_pool() const;
@@ -439,7 +439,7 @@ class MemOperand BASE_EMBEDDED {
offset_ = offset;
}
- uint32_t offset() {
+ uint32_t offset() const {
ASSERT(rm_.is(no_reg));
return offset_;
}
@@ -447,6 +447,10 @@ class MemOperand BASE_EMBEDDED {
Register rn() const { return rn_; }
Register rm() const { return rm_; }
+ bool OffsetIsUint12Encodable() const {
+ return offset_ >= 0 ? is_uint12(offset_) : is_uint12(-offset_);
+ }
+
private:
Register rn_; // base
Register rm_; // register offset
@@ -902,22 +906,34 @@ class Assembler : public Malloced {
void vldr(const DwVfpRegister dst,
const Register base,
- int offset, // Offset must be a multiple of 4.
+ int offset,
+ const Condition cond = al);
+ void vldr(const DwVfpRegister dst,
+ const MemOperand& src,
const Condition cond = al);
void vldr(const SwVfpRegister dst,
const Register base,
- int offset, // Offset must be a multiple of 4.
+ int offset,
+ const Condition cond = al);
+ void vldr(const SwVfpRegister dst,
+ const MemOperand& src,
const Condition cond = al);
void vstr(const DwVfpRegister src,
const Register base,
- int offset, // Offset must be a multiple of 4.
+ int offset,
+ const Condition cond = al);
+ void vstr(const DwVfpRegister src,
+ const MemOperand& dst,
const Condition cond = al);
void vstr(const SwVfpRegister src,
const Register base,
- int offset, // Offset must be a multiple of 4.
+ int offset,
+ const Condition cond = al);
+ void vstr(const SwVfpRegister src,
+ const MemOperand& dst,
const Condition cond = al);
void vmov(const DwVfpRegister dst,
View
145 deps/v8/src/arm/code-stubs-arm.cc
@@ -2661,8 +2661,8 @@ void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
// Allocate new heap number for result.
Register result = r5;
- __ AllocateHeapNumber(
- result, scratch1, scratch2, heap_number_map, gc_required);
+ GenerateHeapResultAllocation(
+ masm, result, heap_number_map, scratch1, scratch2, gc_required);
// Load the operands.
if (smi_operands) {
@@ -2811,8 +2811,14 @@ void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
// Allocate new heap number for result.
__ bind(&result_not_a_smi);
- __ AllocateHeapNumber(
- r5, scratch1, scratch2, heap_number_map, gc_required);
+ Register result = r5;
+ if (smi_operands) {
+ __ AllocateHeapNumber(
+ result, scratch1, scratch2, heap_number_map, gc_required);
+ } else {
+ GenerateHeapResultAllocation(
+ masm, result, heap_number_map, scratch1, scratch2, gc_required);
+ }
// r2: Answer as signed int32.
// r5: Heap number to write answer into.
@@ -2934,45 +2940,47 @@ void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
- Label call_runtime;
+ Label call_runtime, call_string_add_or_runtime;
GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
- // If all else fails, use the runtime system to get the correct
- // result.
- __ bind(&call_runtime);
+ GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime);
- // Try to add strings before calling runtime.
+ __ bind(&call_string_add_or_runtime);
if (op_ == Token::ADD) {
GenerateAddStrings(masm);
}
- GenericBinaryOpStub stub(op_, mode_, r1, r0);
- __ TailCallStub(&stub);
+ __ bind(&call_runtime);
+ GenerateCallRuntime(masm);
}
void TypeRecordingBinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
ASSERT(op_ == Token::ADD);
+ Label left_not_string, call_runtime;
Register left = r1;
Register right = r0;
- Label call_runtime;
- // Check if first argument is a string.
- __ JumpIfSmi(left, &call_runtime);
+ // Check if left argument is a string.
+ __ JumpIfSmi(left, &left_not_string);
__ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE);
- __ b(ge, &call_runtime);
+ __ b(ge, &left_not_string);
- // First argument is a a string, test second.
+ StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
+ GenerateRegisterArgsPush(masm);
+ __ TailCallStub(&string_add_left_stub);
+
+ // Left operand is not a string, test right.
+ __ bind(&left_not_string);
__ JumpIfSmi(right, &call_runtime);
__ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE);
__ b(ge, &call_runtime);
- // First and second argument are strings.
- StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
+ StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_stub);
+ __ TailCallStub(&string_add_right_stub);
// At least one argument is not a string.
__ bind(&call_runtime);
@@ -3706,7 +3714,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// The offset was stored in r4 safepoint slot.
// (See LCodeGen::DoDeferredLInstanceOfKnownGlobal)
- __ ldr(scratch, MacroAssembler::SafepointRegisterSlot(r4));
+ __ LoadFromSafepointRegisterSlot(scratch, r4);
__ sub(inline_site, lr, scratch);
// Get the map location in scratch and patch it.
__ GetRelocatedValueLocation(inline_site, scratch);
@@ -5438,18 +5446,19 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
void StringAddStub::Generate(MacroAssembler* masm) {
- Label string_add_runtime;
+ Label string_add_runtime, call_builtin;
+ Builtins::JavaScript builtin_id = Builtins::ADD;
+
// Stack on entry:
- // sp[0]: second argument.
- // sp[4]: first argument.
+ // sp[0]: second argument (right).
+ // sp[4]: first argument (left).
// Load the two arguments.
__ ldr(r0, MemOperand(sp, 1 * kPointerSize)); // First argument.
__ ldr(r1, MemOperand(sp, 0 * kPointerSize)); // Second argument.
// Make sure that both arguments are strings if not known in advance.
- if (string_check_) {
- STATIC_ASSERT(kSmiTag == 0);
+ if (flags_ == NO_STRING_ADD_FLAGS) {
__ JumpIfEitherSmi(r0, r1, &string_add_runtime);
// Load instance types.
__ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
@@ -5461,13 +5470,27 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ tst(r4, Operand(kIsNotStringMask));
__ tst(r5, Operand(kIsNotStringMask), eq);
__ b(ne, &string_add_runtime);
+ } else {
+ // Here at least one of the arguments is definitely a string.
+ // We convert the one that is not known to be a string.
+ if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
+ ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
+ GenerateConvertArgument(
+ masm, 1 * kPointerSize, r0, r2, r3, r4, r5, &call_builtin);
+ builtin_id = Builtins::STRING_ADD_RIGHT;
+ } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
+ ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
+ GenerateConvertArgument(
+ masm, 0 * kPointerSize, r1, r2, r3, r4, r5, &call_builtin);
+ builtin_id = Builtins::STRING_ADD_LEFT;
+ }
}
// Both arguments are strings.
// r0: first string
// r1: second string
- // r4: first string instance type (if string_check_)
- // r5: second string instance type (if string_check_)
+ // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
+ // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
{
Label strings_not_empty;
// Check if either of the strings are empty. In that case return the other.
@@ -5495,8 +5518,8 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// r1: second string
// r2: length of first string
// r3: length of second string
- // r4: first string instance type (if string_check_)
- // r5: second string instance type (if string_check_)
+ // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
+ // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
// Look at the length of the result of adding the two strings.
Label string_add_flat_result, longer_than_two;
// Adding two lengths can't overflow.
@@ -5508,7 +5531,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ b(ne, &longer_than_two);
// Check that both strings are non-external ascii strings.
- if (!string_check_) {
+ if (flags_ != NO_STRING_ADD_FLAGS) {
__ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
__ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
@@ -5556,7 +5579,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// If result is not supposed to be flat, allocate a cons string object.
// If both strings are ascii the result is an ascii cons string.
- if (!string_check_) {
+ if (flags_ != NO_STRING_ADD_FLAGS) {
__ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
__ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
@@ -5604,11 +5627,11 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// r1: second string
// r2: length of first string
// r3: length of second string
- // r4: first string instance type (if string_check_)
- // r5: second string instance type (if string_check_)
+ // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
+ // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
// r6: sum of lengths.
__ bind(&string_add_flat_result);
- if (!string_check_) {
+ if (flags_ != NO_STRING_ADD_FLAGS) {
__ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
__ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
@@ -5706,6 +5729,60 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to add the two strings.
__ bind(&string_add_runtime);
__ TailCallRuntime(Runtime::kStringAdd, 2, 1);
+
+ if (call_builtin.is_linked()) {
+ __ bind(&call_builtin);
+ __ InvokeBuiltin(builtin_id, JUMP_JS);
+ }
+}
+
+
+void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
+ int stack_offset,
+ Register arg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Label* slow) {
+ // First check if the argument is already a string.
+ Label not_string, done;
+ __ JumpIfSmi(arg, &not_string);
+ __ CompareObjectType(arg, scratch1, scratch1, FIRST_NONSTRING_TYPE);
+ __ b(lt, &done);
+
+ // Check the number to string cache.
+ Label not_cached;
+ __ bind(&not_string);
+ // Puts the cached result into scratch1.
+ NumberToStringStub::GenerateLookupNumberStringCache(masm,
+ arg,
+ scratch1,
+ scratch2,
+ scratch3,
+ scratch4,
+ false,
+ &not_cached);
+ __ mov(arg, scratch1);
+ __ str(arg, MemOperand(sp, stack_offset));
+ __ jmp(&done);
+
+ // Check if the argument is a safe string wrapper.
+ __ bind(&not_cached);
+ __ JumpIfSmi(arg, slow);
+ __ CompareObjectType(
+ arg, scratch1, scratch2, JS_VALUE_TYPE); // map -> scratch1.
+ __ b(ne, slow);
+ __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset));
+ __ and_(scratch2,
+ scratch2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
+ __ cmp(scratch2,
+ Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
+ __ b(ne, slow);
+ __ ldr(arg, FieldMemOperand(arg, JSValue::kValueOffset));
+ __ str(arg, MemOperand(sp, stack_offset));
+
+ __ bind(&done);
}
View
26 deps/v8/src/arm/code-stubs-arm.h
@@ -335,24 +335,36 @@ class TypeRecordingBinaryOpStub: public CodeStub {
// Flag that indicates how to generate code for the stub StringAddStub.
enum StringAddFlags {
NO_STRING_ADD_FLAGS = 0,
- NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub.
+ // Omit left string check in stub (left is definitely a string).
+ NO_STRING_CHECK_LEFT_IN_STUB = 1 << 0,
+ // Omit right string check in stub (right is definitely a string).
+ NO_STRING_CHECK_RIGHT_IN_STUB = 1 << 1,
+ // Omit both string checks in stub.
+ NO_STRING_CHECK_IN_STUB =
+ NO_STRING_CHECK_LEFT_IN_STUB | NO_STRING_CHECK_RIGHT_IN_STUB
};
class StringAddStub: public CodeStub {
public:
- explicit StringAddStub(StringAddFlags flags) {
- string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
- }
+ explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
private:
Major MajorKey() { return StringAdd; }
- int MinorKey() { return string_check_ ? 0 : 1; }
+ int MinorKey() { return flags_; }
void Generate(MacroAssembler* masm);
- // Should the stub check whether arguments are strings?
- bool string_check_;
+ void GenerateConvertArgument(MacroAssembler* masm,
+ int stack_offset,
+ Register arg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Label* slow);
+
+ const StringAddFlags flags_;
};
View
4 deps/v8/src/arm/codegen-arm.cc
@@ -5850,8 +5850,8 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
} else if (variable != NULL) {
// Delete of an unqualified identifier is disallowed in strict mode
- // so this code can only be reached in non-strict mode.
- ASSERT(strict_mode_flag() == kNonStrictMode);
+ // but "delete this" is.
+ ASSERT(strict_mode_flag() == kNonStrictMode || variable->is_this());
Slot* slot = variable->AsSlot();
if (variable->is_global()) {
LoadGlobal();
View
5 deps/v8/src/arm/cpu-arm.cc
@@ -50,6 +50,11 @@ void CPU::Setup() {
void CPU::FlushICache(void* start, size_t size) {
+ // Nothing to do flushing no instructions.
+ if (size == 0) {
+ return;
+ }
+
#if defined (USE_SIMULATOR)
// Not generating ARM instructions for C-code. This means that we are
// building an ARM emulator based target. We should notify the simulator
View
14 deps/v8/src/arm/deoptimizer-arm.cc
@@ -429,14 +429,16 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
fp_value, output_offset, value);
}
- // The context can be gotten from the function so long as we don't
- // optimize functions that need local contexts.
+ // For the bottommost output frame the context can be gotten from the input
+ // frame. For all subsequent output frames it can be gotten from the function
+ // so long as we don't inline functions that need local contexts.
output_offset -= kPointerSize;
input_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(function->context());
- // The context for the bottommost output frame should also agree with the
- // input frame.
- ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
+ if (is_bottommost) {
+ value = input_->GetFrameSlot(input_offset);
+ } else {
+ value = reinterpret_cast<intptr_t>(function->context());
+ }
output_frame->SetFrameSlot(output_offset, value);
if (is_topmost) {
output_frame->SetRegister(cp.code(), value);
View
192 deps/v8/src/arm/full-codegen-arm.cc
@@ -219,46 +219,47 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
Move(dot_arguments_slot, r3, r1, r2);
}
- { Comment cmnt(masm_, "[ Declarations");
- // For named function expressions, declare the function name as a
- // constant.
- if (scope()->is_function_scope() && scope()->function() != NULL) {
- EmitDeclaration(scope()->function(), Variable::CONST, NULL);
- }
- // Visit all the explicit declarations unless there is an illegal
- // redeclaration.
- if (scope()->HasIllegalRedeclaration()) {
- scope()->VisitIllegalRedeclaration(this);
- } else {
- VisitDeclarations(scope()->declarations());
- }
- }
-
if (FLAG_trace) {
__ CallRuntime(Runtime::kTraceEnter, 0);
}
- // Check the stack for overflow or break request.
- { Comment cmnt(masm_, "[ Stack check");
- PrepareForBailout(info->function(), NO_REGISTERS);
- Label ok;
- __ LoadRoot(ip, Heap::kStackLimitRootIndex);
- __ cmp(sp, Operand(ip));
- __ b(hs, &ok);
- StackCheckStub stub;
- __ CallStub(&stub);
- __ bind(&ok);
- }
+ // Visit the declarations and body unless there is an illegal
+ // redeclaration.
+ if (scope()->HasIllegalRedeclaration()) {
+ Comment cmnt(masm_, "[ Declarations");
+ scope()->VisitIllegalRedeclaration(this);
- { Comment cmnt(masm_, "[ Body");
- ASSERT(loop_depth() == 0);
- VisitStatements(function()->body());
- ASSERT(loop_depth() == 0);
+ } else {
+ { Comment cmnt(masm_, "[ Declarations");
+ // For named function expressions, declare the function name as a
+ // constant.
+ if (scope()->is_function_scope() && scope()->function() != NULL) {
+ EmitDeclaration(scope()->function(), Variable::CONST, NULL);
+ }
+ VisitDeclarations(scope()->declarations());
+ }
+
+ { Comment cmnt(masm_, "[ Stack check");
+ PrepareForBailout(info->function(), NO_REGISTERS);
+ Label ok;
+ __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+ __ cmp(sp, Operand(ip));
+ __ b(hs, &ok);
+ StackCheckStub stub;
+ __ CallStub(&stub);
+ __ bind(&ok);
+ }
+
+ { Comment cmnt(masm_, "[ Body");
+ ASSERT(loop_depth() == 0);
+ VisitStatements(function()->body());
+ ASSERT(loop_depth() == 0);
+ }
}
+ // Always emit a 'return undefined' in case control fell off the end of
+ // the body.
{ Comment cmnt(masm_, "[ return <undefined>;");
- // Emit a 'return undefined' in case control fell off the end of the
- // body.
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
}
EmitReturnSequence();
@@ -694,10 +695,11 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
// We bypass the general EmitSlotSearch because we know more about
// this specific context.
- // The variable in the decl always resides in the current context.
+ // The variable in the decl always resides in the current function
+ // context.
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
if (FLAG_debug_code) {
- // Check if we have the correct context pointer.
+ // Check that we're not inside a 'with'.
__ ldr(r1, ContextOperand(cp, Context::FCONTEXT_INDEX));
__ cmp(r1, cp);
__ Check(eq, "Unexpected declaration in current context.");
@@ -1037,7 +1039,7 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(
Slot* slot,
Label* slow) {
ASSERT(slot->type() == Slot::CONTEXT);
- Register current = cp;
+ Register context = cp;
Register next = r3;
Register temp = r4;
@@ -1045,22 +1047,25 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(
if (s->num_heap_slots() > 0) {
if (s->calls_eval()) {
// Check that extension is NULL.
- __ ldr(temp, ContextOperand(current, Context::EXTENSION_INDEX));
+ __ ldr(temp, ContextOperand(context, Context::EXTENSION_INDEX));
__ tst(temp, temp);
__ b(ne, slow);
}
- __ ldr(next, ContextOperand(current, Context::CLOSURE_INDEX));
+ __ ldr(next, ContextOperand(context, Context::CLOSURE_INDEX));
__ ldr(next, FieldMemOperand(next, JSFunction::kContextOffset));
// Walk the rest of the chain without clobbering cp.
- current = next;
+ context = next;
}
}
// Check that last extension is NULL.
- __ ldr(temp, ContextOperand(current, Context::EXTENSION_INDEX));
+ __ ldr(temp, ContextOperand(context, Context::EXTENSION_INDEX));
__ tst(temp, temp);
__ b(ne, slow);
- __ ldr(temp, ContextOperand(current, Context::FCONTEXT_INDEX));
- return ContextOperand(temp, slot->index());
+
+ // This function is used only for loads, not stores, so it's safe to
+ // return an cp-based operand (the write barrier cannot be allowed to
+ // destroy the cp register).
+ return ContextOperand(context, slot->index());
}
@@ -2004,34 +2009,60 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
: Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
- } else if (var->mode() != Variable::CONST || op == Token::INIT_CONST) {
- // Perform the assignment for non-const variables and for initialization
- // of const variables. Const assignments are simply skipped.
- Label done;
+ } else if (op == Token::INIT_CONST) {
+ // Like var declarations, const declarations are hoisted to function
+ // scope. However, unlike var initializers, const initializers are able
+ // to drill a hole to that function context, even from inside a 'with'
+ // context. We thus bypass the normal static scope lookup.
+ Slot* slot = var->AsSlot();
+ Label skip;
+ switch (slot->type()) {
+ case Slot::PARAMETER:
+ // No const parameters.
+ UNREACHABLE();
+ break;
+ case Slot::LOCAL:
+ // Detect const reinitialization by checking for the hole value.
+ __ ldr(r1, MemOperand(fp, SlotOffset(slot)));
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ cmp(r1, ip);
+ __ b(ne, &skip);
+ __ str(result_register(), MemOperand(fp, SlotOffset(slot)));
+ break;
+ case Slot::CONTEXT: {
+ __ ldr(r1, ContextOperand(cp, Context::FCONTEXT_INDEX));
+ __ ldr(r2, ContextOperand(r1, slot->index()));
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ cmp(r2, ip);
+ __ b(ne, &skip);
+ __ str(r0, ContextOperand(r1, slot->index()));
+ int offset = Context::SlotOffset(slot->index());
+ __ mov(r3, r0); // Preserve the stored value in r0.
+ __ RecordWrite(r1, Operand(offset), r3, r2);
+ break;
+ }
+ case Slot::LOOKUP:
+ __ push(r0);
+ __ mov(r0, Operand(slot->var()->name()));
+ __ Push(cp, r0); // Context and name.
+ __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
+ break;
+ }
+ __ bind(&skip);
+
+ } else if (var->mode() != Variable::CONST) {
+ // Perform the assignment for non-const variables. Const assignments
+ // are simply skipped.
Slot* slot = var->AsSlot();
switch (slot->type()) {
case Slot::PARAMETER:
case Slot::LOCAL:
- if (op == Token::INIT_CONST) {
- // Detect const reinitialization by checking for the hole value.
- __ ldr(r1, MemOperand(fp, SlotOffset(slot)));
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(r1, ip);
- __ b(ne, &done);
- }
// Perform the assignment.
__ str(result_register(), MemOperand(fp, SlotOffset(slot)));
break;
case Slot::CONTEXT: {
MemOperand target = EmitSlotSearch(slot, r1);
- if (op == Token::INIT_CONST) {
- // Detect const reinitialization by checking for the hole value.
- __ ldr(r2, target);
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(r2, ip);
- __ b(ne, &done);
- }
// Perform the assignment and issue the write barrier.
__ str(result_register(), target);
// RecordWrite may destroy all its register arguments.
@@ -2042,20 +2073,13 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
}
case Slot::LOOKUP:
- // Call the runtime for the assignment. The runtime will ignore
- // const reinitialization.
+ // Call the runtime for the assignment.
__ push(r0); // Value.
__ mov(r0, Operand(slot->var()->name()));
__ Push(cp, r0); // Context and name.
- if (op == Token::INIT_CONST) {
- // The runtime will ignore const redeclaration.
- __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
- } else {
- __ CallRuntime(Runtime::kStoreContextSlot, 3);
- }
+ __ CallRuntime(Runtime::kStoreContextSlot, 3);
break;
}
- __ bind(&done);
}
}
@@ -3373,8 +3397,8 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
}
} else if (var != NULL) {
// Delete of an unqualified identifier is disallowed in strict mode
- // so this code can only be reached in non-strict mode.
- ASSERT(strict_mode_flag() == kNonStrictMode);
+ // but "delete this" is.
+ ASSERT(strict_mode_flag() == kNonStrictMode || var->is_this());
if (var->is_global()) {
__ ldr(r2, GlobalObjectOperand());
__ mov(r1, Operand(var->name()));
@@ -3414,17 +3438,23 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
case Token::NOT: {
Comment cmnt(masm_, "[ UnaryOperation (NOT)");
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
-
- // Notice that the labels are swapped.
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_false, &if_true, &fall_through);
- if (context()->IsTest()) ForwardBailoutToChild(expr);
- VisitForControl(expr->expression(), if_true, if_false, fall_through);
- context()->Plug(if_false, if_true); // Labels swapped.
+ if (context()->IsEffect()) {
+ // Unary NOT has no side effects so it's only necessary to visit the
+ // subexpression. Match the optimizing compiler by not branching.
+ VisitForEffect(expr->expression());
+ } else {
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+
+ // Notice that the labels are swapped.
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_false, &if_true, &fall_through);
+ if (context()->IsTest()) ForwardBailoutToChild(expr);
+ VisitForControl(expr->expression(), if_true, if_false, fall_through);
+ context()->Plug(if_false, if_true); // Labels swapped.
+ }
break;
}
View
57 deps/v8/src/arm/lithium-arm.cc
@@ -346,7 +346,7 @@ void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
}
-void LStoreNamed::PrintDataTo(StringStream* stream) {
+void LStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add(".");
stream->Add(*String::cast(*name())->ToCString());
@@ -355,7 +355,25 @@ void LStoreNamed::PrintDataTo(StringStream* stream) {
}
-void LStoreKeyed::PrintDataTo(StringStream* stream) {
+void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add(".");
+ stream->Add(*String::cast(*name())->ToCString());
+ stream->Add(" <- ");
+ value()->PrintTo(stream);
+}
+
+
+void LStoreKeyedFastElement::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add("[");
+ key()->PrintTo(stream);
+ stream->Add("] <- ");
+ value()->PrintTo(stream);
+}
+
+
+void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add("[");
key()->PrintTo(stream);
@@ -1204,8 +1222,7 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
case kMathSqrt:
return DefineSameAsFirst(result);
case kMathRound:
- Abort("MathRound LUnaryMathOperation not implemented");
- return NULL;
+ return AssignEnvironment(DefineAsRegister(result));
case kMathPowHalf:
Abort("MathPowHalf LUnaryMathOperation not implemented");
return NULL;
@@ -1418,8 +1435,19 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
LInstruction* LChunkBuilder::DoPower(HPower* instr) {
- Abort("LPower instruction not implemented on ARM");
- return NULL;
+ ASSERT(instr->representation().IsDouble());
+ // We call a C function for double power. It can't trigger a GC.
+ // We need to use fixed result register for the call.
+ Representation exponent_type = instr->right()->representation();
+ ASSERT(instr->left()->representation().IsDouble());
+ LOperand* left = UseFixedDouble(instr->left(), d1);
+ LOperand* right = exponent_type.IsDouble() ?
+ UseFixedDouble(instr->right(), d2) :
+ UseFixed(instr->right(), r0);
+ LPower* result = new LPower(left, right);
+ return MarkAsCall(DefineFixedDouble(result, d3),
+ instr,
+ CAN_DEOPTIMIZE_EAGERLY);
}
@@ -1709,11 +1737,13 @@ LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
- LOperand* context = UseTempRegister(instr->context());
+ LOperand* context;
LOperand* value;
if (instr->NeedsWriteBarrier()) {
+ context = UseTempRegister(instr->context());
value = UseTempRegister(instr->value());
} else {
+ context = UseRegister(instr->context());
value = UseRegister(instr->value());
}
return new LStoreContextSlot(context, value);
@@ -1806,6 +1836,13 @@ LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
}
+LInstruction* LChunkBuilder::DoStorePixelArrayElement(
+ HStorePixelArrayElement* instr) {
+ Abort("DoStorePixelArrayElement not implemented");
+ return NULL;
+}
+
+
LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LOperand* obj = UseFixed(instr->object(), r2);
LOperand* key = UseFixed(instr->key(), r1);
@@ -1911,8 +1948,10 @@ LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
- // There are no real uses of the arguments object (we bail out in all other
- // cases).
+ // There are no real uses of the arguments object.
+ // arguments.length and element access are supported directly on
+ // stack arguments, and any real arguments object use causes a bailout.
+ // So this value is never used.
return NULL;
}
View
81 deps/v8/src/arm/lithium-arm.h
@@ -42,8 +42,6 @@ class LCodeGen;
#define LITHIUM_ALL_INSTRUCTION_LIST(V) \
V(ControlInstruction) \
V(Call) \
- V(StoreKeyed) \
- V(StoreNamed) \
LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
@@ -135,6 +133,7 @@ class LCodeGen;
V(OuterContext) \
V(Parameter) \
V(PixelArrayLength) \
+ V(Power) \
V(PushArgument) \
V(RegExpLiteral) \
V(Return) \
@@ -1058,6 +1057,18 @@ class LAddI: public LTemplateInstruction<1, 2, 0> {
};
+class LPower: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LPower(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(Power, "power")
+ DECLARE_HYDROGEN_ACCESSOR(Power)
+};
+
+
class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
@@ -1510,32 +1521,22 @@ class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
};
-class LStoreNamed: public LTemplateInstruction<0, 2, 0> {
+class LStoreNamedField: public LTemplateInstruction<0, 2, 0> {
public:
- LStoreNamed(LOperand* obj, LOperand* val) {
+ LStoreNamedField(LOperand* obj, LOperand* val) {
inputs_[0] = obj;
inputs_[1] = val;
}
- DECLARE_INSTRUCTION(StoreNamed)
- DECLARE_HYDROGEN_ACCESSOR(StoreNamed)
+ DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
+ DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
virtual void PrintDataTo(StringStream* stream);
LOperand* object() { return inputs_[0]; }
LOperand* value() { return inputs_[1]; }
- Handle<Object> name() const { return hydrogen()->name(); }
-};
-
-
-class LStoreNamedField: public LStoreNamed {
- public:
- LStoreNamedField(LOperand* obj, LOperand* val)
- : LStoreNamed(obj, val) { }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
- DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
+ Handle<Object> name() const { return hydrogen()->name(); }
bool is_in_object() { return hydrogen()->is_in_object(); }
int offset() { return hydrogen()->offset(); }
bool needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
@@ -1543,25 +1544,35 @@ class LStoreNamedField: public LStoreNamed {
};
-class LStoreNamedGeneric: public LStoreNamed {
+class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
public:
- LStoreNamedGeneric(LOperand* obj, LOperand* val)
- : LStoreNamed(obj, val) { }
+ LStoreNamedGeneric(LOperand* obj, LOperand* val) {
+ inputs_[0] = obj;
+ inputs_[1] = val;
+ }
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+ Handle<Object> name() const { return hydrogen()->name(); }
};
-class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
public:
- LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val) {
+ LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val) {
inputs_[0] = obj;
inputs_[1] = key;
inputs_[2] = val;
}
- DECLARE_INSTRUCTION(StoreKeyed)
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
+ "store-keyed-fast-element")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement)
virtual void PrintDataTo(StringStream* stream);
@@ -1571,23 +1582,21 @@ class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
};
-class LStoreKeyedFastElement: public LStoreKeyed {
+class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
public:
- LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val)
- : LStoreKeyed(obj, key, val) {}
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
- "store-keyed-fast-element")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement)
-};
+ LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* val) {
+ inputs_[0] = obj;
+ inputs_[1] = key;
+ inputs_[2] = val;
+ }
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
-class LStoreKeyedGeneric: public LStoreKeyed {
- public:
- LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* val)
- : LStoreKeyed(obj, key, val) { }
+ virtual void PrintDataTo(StringStream* stream);
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
+ LOperand* object() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
};
View
473 deps/v8/src/arm/lithium-codegen-arm.cc
@@ -26,6 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "arm/lithium-codegen-arm.h"
+#include "arm/lithium-gap-resolver-arm.h"
#include "code-stubs.h"
#include "stub-cache.h"
@@ -54,157 +55,6 @@ class SafepointGenerator : public PostCallGenerator {
};
-class LGapNode: public ZoneObject {
- public:
- explicit LGapNode(LOperand* operand)
- : operand_(operand), resolved_(false), visited_id_(-1) { }
-
- LOperand* operand() const { return operand_; }
- bool IsResolved() const { return !IsAssigned() || resolved_; }
- void MarkResolved() {
- ASSERT(!IsResolved());
- resolved_ = true;
- }
- int visited_id() const { return visited_id_; }
- void set_visited_id(int id) {
- ASSERT(id > visited_id_);
- visited_id_ = id;
- }
-
- bool IsAssigned() const { return assigned_from_.is_set(); }
- LGapNode* assigned_from() const { return assigned_from_.get(); }
- void set_assigned_from(LGapNode* n) { assigned_from_.set(n); }
-
- private:
- LOperand* operand_;
- SetOncePointer<LGapNode> assigned_from_;
- bool resolved_;
- int visited_id_;
-};
-
-
-LGapResolver::LGapResolver()
- : nodes_(32),
- identified_cycles_(4),
- result_(16),
- next_visited_id_(0) {
-}
-
-
-const ZoneList<LMoveOperands>* LGapResolver::Resolve(
- const ZoneList<LMoveOperands>* moves,
- LOperand* marker_operand) {
- nodes_.Rewind(0);
- identified_cycles_.Rewind(0);
- result_.Rewind(0);
- next_visited_id_ = 0;
-
- for (int i = 0; i < moves->length(); ++i) {
- LMoveOperands move = moves->at(i);
- if (!move.IsRedundant()) RegisterMove(move);
- }
-
- for (int i = 0; i < identified_cycles_.length(); ++i) {
- ResolveCycle(identified_cycles_[i], marker_operand);
- }
-
- int unresolved_nodes;
- do {
- unresolved_nodes = 0;
- for (int j = 0; j < nodes_.length(); j++) {
- LGapNode* node = nodes_[j];
- if (!node->IsResolved() && node->assigned_from()->IsResolved()) {
- AddResultMove(node->assigned_from(), node);
- node->MarkResolved();
- }
- if (!node->IsResolved()) ++unresolved_nodes;
- }
- } while (unresolved_nodes > 0);
- return &result_;
-}
-
-
-void LGapResolver::AddResultMove(LGapNode* from, LGapNode* to) {
- AddResultMove(from->operand(), to->operand());
-}
-
-
-void LGapResolver::AddResultMove(LOperand* from, LOperand* to) {
- result_.Add(LMoveOperands(from, to));
-}
-
-
-void LGapResolver::ResolveCycle(LGapNode* start, LOperand* marker_operand) {
- ZoneList<LOperand*> cycle_operands(8);
- cycle_operands.Add(marker_operand);
- LGapNode* cur = start;
- do {
- cur->MarkResolved();
- cycle_operands.Add(cur->operand());
- cur = cur->assigned_from();
- } while (cur != start);
- cycle_operands.Add(marker_operand);
-
- for (int i = cycle_operands.length() - 1; i > 0; --i) {
- LOperand* from = cycle_operands[i];
- LOperand* to = cycle_operands[i - 1];
- AddResultMove(from, to);
- }
-}
-
-
-bool LGapResolver::CanReach(LGapNode* a, LGapNode* b, int visited_id) {
- ASSERT(a != b);
- LGapNode* cur = a;
- while (cur != b && cur->visited_id() != visited_id && cur->IsAssigned()) {
- cur->set_visited_id(visited_id);
- cur = cur->assigned_from();
- }
-
- return cur == b;
-}
-
-
-bool LGapResolver::CanReach(LGapNode* a, LGapNode* b) {
- ASSERT(a != b);
- return CanReach(a, b, next_visited_id_++);
-}
-
-
-void LGapResolver::RegisterMove(LMoveOperands move) {
- if (move.source()->IsConstantOperand()) {
- // Constant moves should be last in the machine code. Therefore add them
- // first to the result set.
- AddResultMove(move.source(), move.destination());
- } else {
- LGapNode* from = LookupNode(move.source());
- LGapNode* to = LookupNode(move.destination());
- if (to->IsAssigned() && to->assigned_from() == from) {
- move.Eliminate();
- return;
- }
- ASSERT(!to->IsAssigned());
- if (CanReach(from, to)) {
- // This introduces a cycle. Save.
- identified_cycles_.Add(from);
- }
- to->set_assigned_from(from);
- }
-}
-
-
-LGapNode* LGapResolver::LookupNode(LOperand* operand) {
- for (int i = 0; i < nodes_.length(); ++i) {
- if (nodes_[i]->operand()->Equals(operand)) return nodes_[i];
- }
-
- // No node found => create a new one.
- LGapNode* result = new LGapNode(operand);
- nodes_.Add(result);
- return result;
-}
-
-
#define __ masm()->
bool LCodeGen::GenerateCode() {
@@ -294,6 +144,44 @@ bool LCodeGen::GeneratePrologue() {
}
}
+ // Possibly allocate a local context.
+ int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ if (heap_slots > 0) {
+ Comment(";;; Allocate local context");
+ // Argument to NewContext is the function, which is in r1.
+ __ push(r1);
+ if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(heap_slots);
+ __ CallStub(&stub);
+ } else {
+ __ CallRuntime(Runtime::kNewContext, 1);
+ }
+ RecordSafepoint(Safepoint::kNoDeoptimizationIndex);
+ // Context is returned in both r0 and cp. It replaces the context
+ // passed to us. It's saved in the stack and kept live in cp.
+ __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Copy any necessary parameters into the context.
+ int num_parameters = scope()->num_parameters();
+ for (int i = 0; i < num_parameters; i++) {
+ Slot* slot = scope()->parameter(i)->AsSlot();
+ if (slot != NULL && slot->type() == Slot::CONTEXT) {
+ int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+ (num_parameters - 1 - i) * kPointerSize;
+ // Load parameter from stack.
+ __ ldr(r0, MemOperand(fp, parameter_offset));
+ // Store it in the context.
+ __ mov(r1, Operand(Context::SlotOffset(slot->index())));
+ __ str(r0, MemOperand(cp, r1));
+ // Update the write barrier. This clobbers all involved
+ // registers, so we have to use two more registers to avoid
+ // clobbering cp.
+ __ mov(r2, Operand(cp));
+ __ RecordWrite(r2, Operand(r1), r3, r0);
+ }
+ }
+ Comment(";;; End allocate local context");
+ }
+
// Trace the call.
if (FLAG_trace) {
__ CallRuntime(Runtime::kTraceEnter, 0);
@@ -464,7 +352,6 @@ Operand LCodeGen::ToOperand(LOperand* op) {
MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
- // TODO(regis): Revisit.
ASSERT(!op->IsRegister());
ASSERT(!op->IsDoubleRegister());
ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
@@ -480,6 +367,21 @@ MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
}
+MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
+ ASSERT(op->IsDoubleStackSlot());
+ int index = op->index();
+ if (index >= 0) {
+ // Local or spill slot. Skip the frame pointer, function, context,
+ // and the first word of the double in the fixed part of the frame.
+ return MemOperand(fp, -(index + 3) * kPointerSize + kPointerSize);
+ } else {
+ // Incoming parameter. Skip the return address and the first word of
+ // the double.
+ return MemOperand(fp, -(index - 1) * kPointerSize + kPointerSize);
+ }
+}
+
+
void LCodeGen::WriteTranslation(LEnvironment* environment,
Translation* translation) {
if (environment == NULL) return;
@@ -751,6 +653,12 @@ void LCodeGen::RecordSafepoint(LPointerMap* pointers,
}
+void LCodeGen::RecordSafepoint(int deoptimization_index) {
+ LPointerMap empty_pointers(RelocInfo::kNoPosition);
+ RecordSafepoint(&empty_pointers, deoptimization_index);
+}
+
+
void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
int deoptimization_index) {
@@ -787,116 +695,7 @@ void LCodeGen::DoLabel(LLabel* label) {
void LCodeGen::DoParallelMove(LParallelMove* move) {
- // d0 must always be a scratch register.
- DoubleRegister dbl_scratch = d0;
- LUnallocated marker_operand(LUnallocated::NONE);
-
- Register core_scratch = scratch0();
- bool destroys_core_scratch = false;
-
- const ZoneList<LMoveOperands>* moves =
- resolver_.Resolve(move->move_operands(), &marker_operand);
- for (int i = moves->length() - 1; i >= 0; --i) {
- LMoveOperands move = moves->at(i);
- LOperand* from = move.source();
- LOperand* to = move.destination();
- ASSERT(!from->IsDoubleRegister() ||
- !ToDoubleRegister(from).is(dbl_scratch));
- ASSERT(!to->IsDoubleRegister() || !ToDoubleRegister(to).is(dbl_scratch));
- ASSERT(!from->IsRegister() || !ToRegister(from).is(core_scratch));
- ASSERT(!to->IsRegister() || !ToRegister(to).is(core_scratch));
- if (from == &marker_operand) {
- if (to->IsRegister()) {
- __ mov(ToRegister(to), core_scratch);
- ASSERT(destroys_core_scratch);
- } else if (to->IsStackSlot()) {
- __ str(core_scratch, ToMemOperand(to));
- ASSERT(destroys_core_scratch);
- } else if (to->IsDoubleRegister()) {
- __ vmov(ToDoubleRegister(to), dbl_scratch);
- } else {
- ASSERT(to->IsDoubleStackSlot());
- // TODO(regis): Why is vstr not taking a MemOperand?
- // __ vstr(dbl_scratch, ToMemOperand(to));
- MemOperand to_operand = ToMemOperand(to);
- __ vstr(dbl_scratch, to_operand.rn(), to_operand.offset());
- }
- } else if (to == &marker_operand) {
- if (from->IsRegister() || from->IsConstantOperand()) {
- __ mov(core_scratch, ToOperand(from));
- destroys_core_scratch = true;
- } else if (from->IsStackSlot()) {
- __ ldr(core_scratch, ToMemOperand(from));
- destroys_core_scratch = true;
- } else if (from->IsDoubleRegister()) {
- __ vmov(dbl_scratch, ToDoubleRegister(from));
- } else {
- ASSERT(from->IsDoubleStackSlot());
- // TODO(regis): Why is vldr not taking a MemOperand?
- // __ vldr(dbl_scratch, ToMemOperand(from));
- MemOperand from_operand = ToMemOperand(from);
- __ vldr(dbl_scratch, from_operand.rn(), from_operand.offset());
- }
- } else if (from->IsConstantOperand()) {
- if (to->IsRegister()) {
- __ mov(ToRegister(to), ToOperand(from));
- } else {
- ASSERT(to->IsStackSlot());
- __ mov(ip, ToOperand(from));
- __ str(ip, ToMemOperand(to));
- }
- } else if (from->IsRegister()) {
- if (to->IsRegister()) {
- __ mov(ToRegister(to), ToOperand(from));
- } else {
- ASSERT(to->IsStackSlot());
- __ str(ToRegister(from), ToMemOperand(to));
- }
- } else if (to->IsRegister()) {
- ASSERT(from->IsStackSlot());
- __ ldr(ToRegister(to), ToMemOperand(from));
- } else if (from->IsStackSlot()) {
- ASSERT(to->IsStackSlot());
- __ ldr(ip, ToMemOperand(from));
- __ str(ip, ToMemOperand(to));
- } else if (from->IsDoubleRegister()) {
- if (to->IsDoubleRegister()) {
- __ vmov(ToDoubleRegister(to), ToDoubleRegister(from));
- } else {
- ASSERT(to->IsDoubleStackSlot());
- // TODO(regis): Why is vstr not taking a MemOperand?
- // __ vstr(dbl_scratch, ToMemOperand(to));
- MemOperand to_operand = ToMemOperand(to);
- __ vstr(ToDoubleRegister(from), to_operand.rn(), to_operand.offset());
- }
- } else if (to->IsDoubleRegister()) {
- ASSERT(from->IsDoubleStackSlot());
- // TODO(regis): Why is vldr not taking a MemOperand?
- // __ vldr(ToDoubleRegister(to), ToMemOperand(from));
- MemOperand from_operand = ToMemOperand(from);
- __ vldr(ToDoubleRegister(to), from_operand.rn(), from_operand.offset());
- } else {
- ASSERT(to->IsDoubleStackSlot() && from->IsDoubleStackSlot());
- // TODO(regis): Why is vldr not taking a MemOperand?
- // __ vldr(dbl_scratch, ToMemOperand(from));
- MemOperand from_operand = ToMemOperand(from);
- __ vldr(dbl_scratch, from_operand.rn(), from_operand.offset());
- // TODO(regis): Why is vstr not taking a MemOperand?
- // __ vstr(dbl_scratch, ToMemOperand(to));
- MemOperand to_operand = ToMemOperand(to);
- __ vstr(dbl_scratch, to_operand.rn(), to_operand.offset());
- }
- }
-
- if (destroys_core_scratch) {
- __ ldr(core_scratch, MemOperand(fp, -kPointerSize));
- }
-
- LInstruction* next = GetNextInstruction();
- if (next != NULL && next->IsLazyBailout()) {
- int pc = masm()->pc_offset();
- safepoints_.SetPcAfterGap(pc);
- }
+ resolver_.Resolve(move);
}
@@ -987,7 +786,7 @@ void LCodeGen::DoModI(LModI* instr) {
DeferredModI(LCodeGen* codegen, LModI* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() {
- codegen()->DoDeferredGenericBinaryStub(instr_, Token::MOD);
+ codegen()->DoDeferredBinaryOpStub(instr_, Token::MOD);
}
private:
LModI* instr_;
@@ -1016,7 +815,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ bind(&ok);
}
- // Try a few common cases before using the generic stub.
+ // Try a few common cases before using the stub.
Label call_stub;
const int kUnfolds = 3;
// Skip if either side is negative.
@@ -1044,7 +843,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ and_(result, scratch, Operand(left));
__ bind(&call_stub);
- // Call the generic stub. The numbers in r0 and r1 have
+ // Call the stub. The numbers in r0 and r1 have
// to be tagged to Smis. If that is not possible, deoptimize.
DeferredModI* deferred = new DeferredModI(this, instr);
__ TrySmiTag(left, &deoptimize, scratch);
@@ -1070,7 +869,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
DeferredDivI(LCodeGen* codegen, LDivI* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() {
- codegen()->DoDeferredGenericBinaryStub(instr_, Token::DIV);
+ codegen()->DoDeferredBinaryOpStub(instr_, Token::DIV);
}
private:
LDivI* instr_;
@@ -1123,7 +922,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ mov(result, Operand(left, ASR, 2), LeaveCC, eq);
__ b(eq, &done);
- // Call the generic stub. The numbers in r0 and r1 have
+ // Call the stub. The numbers in r0 and r1 have
// to be tagged to Smis. If that is not possible, deoptimize.
DeferredDivI* deferred = new DeferredDivI(this, instr);
@@ -1145,19 +944,33 @@ void LCodeGen::DoDivI(LDivI* instr) {
template<int T>
-void LCodeGen::DoDeferredGenericBinaryStub(LTemplateInstruction<1, 2, T>* instr,
- Token::Value op) {
+void LCodeGen::DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr,
+ Token::Value op) {
Register left = ToRegister(instr->InputAt(0));
Register right = ToRegister(instr->InputAt(1));
__ PushSafepointRegistersAndDoubles();
- GenericBinaryOpStub stub(op, OVERWRITE_LEFT, left, right);
+ // Move left to r1 and right to r0 for the stub call.
+ if (left.is(r1)) {
+ __ Move(r0, right);
+ } else if (left.is(r0) && right.is(r1)) {
+ __ Swap(r0, r1, r2);
+ } else if (left.is(r0)) {
+ ASSERT(!right.is(r1));
+ __ mov(r1, r0);
+ __ mov(r0, right);
+ } else {
+ ASSERT(!left.is(r0) && !right.is(r0));
+ __ mov(r0, right);
+ __ mov(r1, left);
+ }
+ TypeRecordingBinaryOpStub stub(op, OVERWRITE_LEFT);