Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse files

Upgrade V8 to 3.1.8

  • Loading branch information...
commit 14475c77a61795ab706ddca41be955ea8b8c9eeb 1 parent cf7b680
ry ry authored
Showing with 7,645 additions and 2,254 deletions.
  1. +14 −1 deps/v8/ChangeLog
  2. +27 −94 deps/v8/SConstruct
  3. +1 −2  deps/v8/src/SConscript
  4. +4 −2 deps/v8/src/api.cc
  5. +1 −0  deps/v8/src/arm/assembler-arm.h
  6. +675 −31 deps/v8/src/arm/code-stubs-arm.cc
  7. +33 −14 deps/v8/src/arm/codegen-arm.cc
  8. +8 −0 deps/v8/src/arm/constants-arm.h
  9. +312 −270 deps/v8/src/arm/full-codegen-arm.cc
  10. +19 −8 deps/v8/src/arm/ic-arm.cc
  11. +1 −2  deps/v8/src/arm/lithium-arm.cc
  12. +25 −60 deps/v8/src/arm/lithium-codegen-arm.cc
  13. +0 −5 deps/v8/src/arm/lithium-codegen-arm.h
  14. +126 −3 deps/v8/src/arm/macro-assembler-arm.cc
  15. +45 −4 deps/v8/src/arm/macro-assembler-arm.h
  16. +22 −8 deps/v8/src/arm/simulator-arm.cc
  17. +10 −2 deps/v8/src/arm/stub-cache-arm.cc
  18. +7 −5 deps/v8/src/arm/virtual-frame-arm.cc
  19. +1 −1  deps/v8/src/arm/virtual-frame-arm.h
  20. +15 −5 deps/v8/src/builtins.cc
  21. +110 −105 deps/v8/src/builtins.h
  22. +4 −3 deps/v8/src/compiler.cc
  23. +7 −1 deps/v8/src/d8.cc
  24. +618 −0 deps/v8/src/d8.js
  25. +121 −0 deps/v8/src/debug-debugger.js
  26. +2 −1  deps/v8/src/debug.cc
  27. +7 −2 deps/v8/src/flag-definitions.h
  28. +4 −16 deps/v8/src/full-codegen.cc
  29. +9 −39 deps/v8/src/full-codegen.h
  30. +0 −10 deps/v8/src/handles-inl.h
  31. +12 −6 deps/v8/src/handles.cc
  32. +43 −63 deps/v8/src/handles.h
  33. +27 −18 deps/v8/src/heap-profiler.cc
  34. +3 −1 deps/v8/src/heap-profiler.h
  35. +5 −10 deps/v8/src/heap.cc
  36. +105 −164 deps/v8/src/hydrogen.cc
  37. +4 −11 deps/v8/src/hydrogen.h
  38. +2 −3 deps/v8/src/ia32/code-stubs-ia32.cc
  39. +20 −9 deps/v8/src/ia32/codegen-ia32.cc
  40. +88 −272 deps/v8/src/ia32/full-codegen-ia32.cc
  41. +17 −10 deps/v8/src/ia32/ic-ia32.cc
  42. +9 −9 deps/v8/src/ia32/lithium-codegen-ia32.cc
  43. +13 −6 deps/v8/src/ia32/lithium-ia32.cc
  44. +7 −3 deps/v8/src/ia32/stub-cache-ia32.cc
  45. +21 −5 deps/v8/src/ia32/virtual-frame-ia32.cc
  46. +2 −2 deps/v8/src/ia32/virtual-frame-ia32.h
  47. +9 −0 deps/v8/src/ic-inl.h
  48. +64 −38 deps/v8/src/ic.cc
  49. +33 −12 deps/v8/src/ic.h
  50. +90 −0 deps/v8/src/liveobjectlist-inl.h
  51. +2,475 −1 deps/v8/src/liveobjectlist.cc
  52. +245 −35 deps/v8/src/liveobjectlist.h
  53. +7 −0 deps/v8/src/mark-compact.cc
  54. +6 −2 deps/v8/src/messages.js
  55. +4 −2 deps/v8/src/objects-inl.h
  56. +30 −11 deps/v8/src/objects.cc
  57. +8 −4 deps/v8/src/objects.h
  58. +65 −23 deps/v8/src/parser.cc
  59. +0 −28 deps/v8/src/profile-generator-inl.h
  60. +433 −372 deps/v8/src/profile-generator.cc
  61. +122 −62 deps/v8/src/profile-generator.h
  62. +33 −38 deps/v8/src/runtime-profiler.cc
  63. +3 −2 deps/v8/src/runtime-profiler.h
  64. +344 −57 deps/v8/src/runtime.cc
  65. +21 −6 deps/v8/src/runtime.h
  66. +46 −32 deps/v8/src/stub-cache.cc
  67. +21 −12 deps/v8/src/stub-cache.h
  68. +1 −1  deps/v8/src/version.cc
  69. +0 −8 deps/v8/src/virtual-frame-heavy-inl.h
  70. +210 −4 deps/v8/src/x64/code-stubs-x64.cc
  71. +20 −20 deps/v8/src/x64/codegen-x64.cc
  72. +142 −45 deps/v8/src/x64/full-codegen-x64.cc
  73. +16 −9 deps/v8/src/x64/ic-x64.cc
  74. +94 −16 deps/v8/src/x64/lithium-codegen-x64.cc
  75. +11 −0 deps/v8/src/x64/lithium-codegen-x64.h
  76. +21 −7 deps/v8/src/x64/lithium-x64.cc
  77. +6 −2 deps/v8/src/x64/stub-cache-x64.cc
  78. +25 −5 deps/v8/src/x64/virtual-frame-x64.cc
  79. +2 −2 deps/v8/src/x64/virtual-frame-x64.h
  80. +5 −4 deps/v8/test/cctest/test-api.cc
  81. +1 −1  deps/v8/test/cctest/test-compiler.cc
  82. +2 −1  deps/v8/test/cctest/test-debug.cc
  83. +38 −25 deps/v8/test/cctest/test-heap.cc
  84. +8 −3 deps/v8/test/cctest/test-mark-compact.cc
  85. +0 −66 deps/v8/test/es5conform/es5conform.status
  86. +25 −0 deps/v8/test/mjsunit/array-join.js
  87. +0 −12 deps/v8/test/mjsunit/mjsunit.status
  88. +36 −0 deps/v8/test/mjsunit/override-eval-with-non-function.js
  89. +35 −0 deps/v8/test/mjsunit/regress/regress-1207.js
  90. +34 −0 deps/v8/test/mjsunit/regress/regress-1209.js
  91. +48 −0 deps/v8/test/mjsunit/regress/regress-1210.js
  92. +43 −0 deps/v8/test/mjsunit/regress/regress-1213.js
  93. +29 −0 deps/v8/test/mjsunit/regress/regress-1218.js
  94. +128 −0 deps/v8/test/mjsunit/strict-mode.js
15 deps/v8/ChangeLog
View
@@ -1,10 +1,23 @@
+2011-03-02: Version 3.1.8
+
+ Fixed a number of crash bugs.
+
+ Improved Crankshaft for x64 and ARM.
+
+ Implemented more of EcmaScript 5 strict mode.
+
+ Fixed issue with unaligned reads and writes on ARM.
+
+ Improved heap profiler support.
+
+
2011-02-28: Version 3.1.7
Fixed a number of crash bugs.
Improved Crankshaft for x64 and ARM.
- Fixed implementation of indexOf/lastIndexOf for sparse
+ Fixed implementation of indexOf/lastIndexOf for sparse
arrays (http://crbug.com/73940).
Fixed bug in map space compaction (http://crbug.com/59688).
121 deps/v8/SConstruct
View
@@ -27,7 +27,6 @@
import platform
import re
-import subprocess
import sys
import os
from os.path import join, dirname, abspath
@@ -143,9 +142,6 @@ LIBRARY_FLAGS = {
# Use visibility=default to disable this.
'CXXFLAGS': ['-fvisibility=hidden']
},
- 'strictaliasing:off': {
- 'CCFLAGS': ['-fno-strict-aliasing']
- },
'mode:debug': {
'CCFLAGS': ['-g', '-O0'],
'CPPDEFINES': ['ENABLE_DISASSEMBLER', 'DEBUG'],
@@ -655,16 +651,8 @@ def Abort(message):
sys.exit(1)
-def GuessOS(env):
- return utils.GuessOS()
-
-
-def GuessArch(env):
- return utils.GuessArchitecture()
-
-
-def GuessToolchain(env):
- tools = env['TOOLS']
+def GuessToolchain(os):
+ tools = Environment()['TOOLS']
if 'gcc' in tools:
return 'gcc'
elif 'msvc' in tools:
@@ -673,9 +661,7 @@ def GuessToolchain(env):
return None
-def GuessVisibility(env):
- os = env['os']
- toolchain = env['toolchain'];
+def GuessVisibility(os, toolchain):
if (os == 'win32' or os == 'cygwin') and toolchain == 'gcc':
# MinGW / Cygwin can't do it.
return 'default'
@@ -685,35 +671,27 @@ def GuessVisibility(env):
return 'hidden'
-def GuessStrictAliasing(env):
- # There seems to be a problem with gcc 4.5.x
- # see http://code.google.com/p/v8/issues/detail?id=884
- # it can be worked around by disabling strict aliasing
- toolchain = env['toolchain'];
- if toolchain == 'gcc':
- env = Environment(tools=['gcc'])
- version = subprocess.Popen([env['CC'], '-dumpversion'],
- stdout=subprocess.PIPE).communicate()[0]
- if version.find('4.5.') == 0:
- return 'off'
- return 'default'
+OS_GUESS = utils.GuessOS()
+TOOLCHAIN_GUESS = GuessToolchain(OS_GUESS)
+ARCH_GUESS = utils.GuessArchitecture()
+VISIBILITY_GUESS = GuessVisibility(OS_GUESS, TOOLCHAIN_GUESS)
SIMPLE_OPTIONS = {
'toolchain': {
'values': ['gcc', 'msvc'],
- 'guess': GuessToolchain,
- 'help': 'the toolchain to use'
+ 'default': TOOLCHAIN_GUESS,
+ 'help': 'the toolchain to use (%s)' % TOOLCHAIN_GUESS
},
'os': {
'values': ['freebsd', 'linux', 'macos', 'win32', 'android', 'openbsd', 'solaris', 'cygwin'],
- 'guess': GuessOS,
- 'help': 'the os to build for'
+ 'default': OS_GUESS,
+ 'help': 'the os to build for (%s)' % OS_GUESS
},
'arch': {
'values':['arm', 'ia32', 'x64', 'mips'],
- 'guess': GuessArch,
- 'help': 'the architecture to build for'
+ 'default': ARCH_GUESS,
+ 'help': 'the architecture to build for (%s)' % ARCH_GUESS
},
'regexp': {
'values': ['native', 'interpreted'],
@@ -822,15 +800,8 @@ SIMPLE_OPTIONS = {
},
'visibility': {
'values': ['default', 'hidden'],
- 'guess': GuessVisibility,
- 'depends': ['os', 'toolchain'],
- 'help': 'shared library symbol visibility'
- },
- 'strictaliasing': {
- 'values': ['default', 'off'],
- 'guess': GuessStrictAliasing,
- 'depends': ['toolchain'],
- 'help': 'assume strict aliasing while optimizing'
+ 'default': VISIBILITY_GUESS,
+ 'help': 'shared library symbol visibility (%s)' % VISIBILITY_GUESS
},
'pgo': {
'values': ['off', 'instrument', 'optimize'],
@@ -840,26 +811,6 @@ SIMPLE_OPTIONS = {
}
-def AddOption(result, name, option):
- if 'guess' in option:
- # Option has a guess function
- guess = option.get('guess')
- guess_env = Environment(options=result)
- # Check if all options that the guess function depends on are set
- if 'depends' in option:
- for dependency in option.get('depends'):
- if not dependency in guess_env:
- return False
- default = guess(guess_env)
- else:
- # Option has a fixed default
- default = option.get('default')
-
- help = '%s (%s)' % (option.get('help'), ", ".join(option['values']))
- result.Add(name, help, default)
- return True
-
-
def GetOptions():
result = Options()
result.Add('mode', 'compilation mode (debug, release)', 'release')
@@ -867,28 +818,12 @@ def GetOptions():
result.Add('cache', 'directory to use for scons build cache', '')
result.Add('env', 'override environment settings (NAME0:value0,NAME1:value1,...)', '')
result.Add('importenv', 'import environment settings (NAME0,NAME1,...)', '')
- options = SIMPLE_OPTIONS
- while len(options):
- postpone = {}
- for (name, option) in options.iteritems():
- if not AddOption(result, name, option):
- postpone[name] = option
- options = postpone
+ for (name, option) in SIMPLE_OPTIONS.iteritems():
+ help = '%s (%s)' % (name, ", ".join(option['values']))
+ result.Add(name, help, option.get('default'))
return result
-def GetTools(opts):
- env = Environment(options=opts)
- os = env['os']
- toolchain = env['toolchain']
- if os == 'win32' and toolchain == 'gcc':
- return ['mingw']
- elif os == 'win32' and toolchain == 'msvc':
- return ['msvc', 'mslink', 'mslib', 'msvs']
- else:
- return ['default']
-
-
def GetVersionComponents():
MAJOR_VERSION_PATTERN = re.compile(r"#define\s+MAJOR_VERSION\s+(.*)")
MINOR_VERSION_PATTERN = re.compile(r"#define\s+MINOR_VERSION\s+(.*)")
@@ -969,7 +904,7 @@ def VerifyOptions(env):
print env['simulator']
Abort("Option unalignedaccesses only supported for the ARM architecture.")
for (name, option) in SIMPLE_OPTIONS.iteritems():
- if (not name in env):
+ if (not option.get('default')) and (name not in ARGUMENTS):
message = ("A value for option %s must be specified (%s)." %
(name, ", ".join(option['values'])))
Abort(message)
@@ -1097,7 +1032,7 @@ def ParseEnvOverrides(arg, imports):
return overrides
-def BuildSpecific(env, mode, env_overrides, tools):
+def BuildSpecific(env, mode, env_overrides):
options = {'mode': mode}
for option in SIMPLE_OPTIONS:
options[option] = env[option]
@@ -1150,7 +1085,7 @@ def BuildSpecific(env, mode, env_overrides, tools):
(object_files, shell_files, mksnapshot) = env.SConscript(
join('src', 'SConscript'),
build_dir=join('obj', target_id),
- exports='context tools',
+ exports='context',
duplicate=False
)
@@ -1170,21 +1105,21 @@ def BuildSpecific(env, mode, env_overrides, tools):
library = env.SharedLibrary(library_name, object_files, PDB=pdb_name)
context.library_targets.append(library)
- d8_env = Environment(tools=tools)
+ d8_env = Environment()
d8_env.Replace(**context.flags['d8'])
context.ApplyEnvOverrides(d8_env)
shell = d8_env.Program('d8' + suffix, object_files + shell_files)
context.d8_targets.append(shell)
for sample in context.samples:
- sample_env = Environment(tools=tools)
+ sample_env = Environment()
sample_env.Replace(**context.flags['sample'])
sample_env.Prepend(LIBS=[library_name])
context.ApplyEnvOverrides(sample_env)
sample_object = sample_env.SConscript(
join('samples', 'SConscript'),
build_dir=join('obj', 'sample', sample, target_id),
- exports='sample context tools',
+ exports='sample context',
duplicate=False
)
sample_name = sample + suffix
@@ -1197,7 +1132,7 @@ def BuildSpecific(env, mode, env_overrides, tools):
cctest_program = cctest_env.SConscript(
join('test', 'cctest', 'SConscript'),
build_dir=join('obj', 'test', target_id),
- exports='context object_files tools',
+ exports='context object_files',
duplicate=False
)
context.cctest_targets.append(cctest_program)
@@ -1207,9 +1142,7 @@ def BuildSpecific(env, mode, env_overrides, tools):
def Build():
opts = GetOptions()
- tools = GetTools(opts)
- env = Environment(options=opts, tools=tools)
-
+ env = Environment(options=opts)
Help(opts.GenerateHelpText(env))
VerifyOptions(env)
env_overrides = ParseEnvOverrides(env['env'], env['importenv'])
@@ -1223,7 +1156,7 @@ def Build():
d8s = []
modes = SplitList(env['mode'])
for mode in modes:
- context = BuildSpecific(env.Copy(), mode, env_overrides, tools)
+ context = BuildSpecific(env.Copy(), mode, env_overrides)
libraries += context.library_targets
mksnapshots += context.mksnapshot_targets
cctests += context.cctest_targets
3  deps/v8/src/SConscript
View
@@ -31,7 +31,6 @@ root_dir = dirname(File('SConstruct').rfile().abspath)
sys.path.append(join(root_dir, 'tools'))
import js2c
Import('context')
-Import('tools')
SOURCES = {
@@ -306,7 +305,7 @@ def Abort(message):
def ConfigureObjectFiles():
- env = Environment(tools=tools)
+ env = Environment()
env.Replace(**context.flags['v8'])
context.ApplyEnvOverrides(env)
env['BUILDERS']['JS2C'] = Builder(action=js2c.JS2C)
6 deps/v8/src/api.cc
View
@@ -2286,7 +2286,8 @@ bool v8::Object::Set(v8::Handle<Value> key, v8::Handle<Value> value,
self,
key_obj,
value_obj,
- static_cast<PropertyAttributes>(attribs));
+ static_cast<PropertyAttributes>(attribs),
+ i::kNonStrictMode);
has_pending_exception = obj.is_null();
EXCEPTION_BAILOUT_CHECK(false);
return true;
@@ -2711,7 +2712,8 @@ bool v8::Object::SetHiddenValue(v8::Handle<v8::String> key,
hidden_props,
key_obj,
value_obj,
- static_cast<PropertyAttributes>(None));
+ static_cast<PropertyAttributes>(None),
+ i::kNonStrictMode);
has_pending_exception = obj.is_null();
EXCEPTION_BAILOUT_CHECK(false);
return true;
1  deps/v8/src/arm/assembler-arm.h
View
@@ -284,6 +284,7 @@ const SwVfpRegister s29 = { 29 };
const SwVfpRegister s30 = { 30 };
const SwVfpRegister s31 = { 31 };
+const DwVfpRegister no_dreg = { -1 };
const DwVfpRegister d0 = { 0 };
const DwVfpRegister d1 = { 1 };
const DwVfpRegister d2 = { 2 };
706 deps/v8/src/arm/code-stubs-arm.cc
View
@@ -398,8 +398,11 @@ class FloatingPointHelper : public AllStatic {
Label* not_number);
// Loads the number from object into dst as a 32-bit integer if possible. If
- // the object is not a 32-bit integer control continues at the label
- // not_int32. If VFP is supported double_scratch is used but not scratch2.
+ // the object cannot be converted to a 32-bit integer control continues at
+ // the label not_int32. If VFP is supported double_scratch is used
+ // but not scratch2.
+ // Floating point value in the 32-bit integer range will be rounded
+ // to an integer.
static void LoadNumberAsInteger(MacroAssembler* masm,
Register object,
Register dst,
@@ -409,6 +412,76 @@ class FloatingPointHelper : public AllStatic {
DwVfpRegister double_scratch,
Label* not_int32);
+ // Load the number from object into double_dst in the double format.
+ // Control will jump to not_int32 if the value cannot be exactly represented
+ // by a 32-bit integer.
+ // Floating point value in the 32-bit integer range that are not exact integer
+ // won't be loaded.
+ static void LoadNumberAsInt32Double(MacroAssembler* masm,
+ Register object,
+ Destination destination,
+ DwVfpRegister double_dst,
+ Register dst1,
+ Register dst2,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ SwVfpRegister single_scratch,
+ Label* not_int32);
+
+ // Loads the number from object into dst as a 32-bit integer.
+ // Control will jump to not_int32 if the object cannot be exactly represented
+ // by a 32-bit integer.
+ // Floating point value in the 32-bit integer range that are not exact integer
+ // won't be converted.
+ // scratch3 is not used when VFP3 is supported.
+ static void LoadNumberAsInt32(MacroAssembler* masm,
+ Register object,
+ Register dst,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ DwVfpRegister double_scratch,
+ Label* not_int32);
+
+ // Generate non VFP3 code to check if a double can be exactly represented by a
+ // 32-bit integer. This does not check for 0 or -0, which need
+ // to be checked for separately.
+ // Control jumps to not_int32 if the value is not a 32-bit integer, and falls
+ // through otherwise.
+ // src1 and src2 will be cloberred.
+ //
+ // Expected input:
+ // - src1: higher (exponent) part of the double value.
+ // - src2: lower (mantissa) part of the double value.
+ // Output status:
+ // - dst: 32 higher bits of the mantissa. (mantissa[51:20])
+ // - src2: contains 1.
+ // - other registers are clobbered.
+ static void DoubleIs32BitInteger(MacroAssembler* masm,
+ Register src1,
+ Register src2,
+ Register dst,
+ Register scratch,
+ Label* not_int32);
+
+ // Generates code to call a C function to do a double operation using core
+ // registers. (Used when VFP3 is not supported.)
+ // This code never falls through, but returns with a heap number containing
+ // the result in r0.
+ // Register heapnumber_result must be a heap number in which the
+ // result of the operation will be stored.
+ // Requires the following layout on entry:
+ // r0: Left value (least significant part of mantissa).
+ // r1: Left value (sign, exponent, top of mantissa).
+ // r2: Right value (least significant part of mantissa).
+ // r3: Right value (sign, exponent, top of mantissa).
+ static void CallCCodeForDoubleOperation(MacroAssembler* masm,
+ Token::Value op,
+ Register heap_number_result,
+ Register scratch);
+
private:
static void LoadNumber(MacroAssembler* masm,
FloatingPointHelper::Destination destination,
@@ -560,6 +633,319 @@ void FloatingPointHelper::LoadNumberAsInteger(MacroAssembler* masm,
}
+void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
+ Register object,
+ Destination destination,
+ DwVfpRegister double_dst,
+ Register dst1,
+ Register dst2,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ SwVfpRegister single_scratch,
+ Label* not_int32) {
+ ASSERT(!scratch1.is(object) && !scratch2.is(object));
+ ASSERT(!scratch1.is(scratch2));
+ ASSERT(!heap_number_map.is(object) &&
+ !heap_number_map.is(scratch1) &&
+ !heap_number_map.is(scratch2));
+
+ Label done, obj_is_not_smi;
+
+ __ JumpIfNotSmi(object, &obj_is_not_smi);
+ __ SmiUntag(scratch1, object);
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ __ vmov(single_scratch, scratch1);
+ __ vcvt_f64_s32(double_dst, single_scratch);
+ if (destination == kCoreRegisters) {
+ __ vmov(dst1, dst2, double_dst);
+ }
+ } else {
+ Label fewer_than_20_useful_bits;
+ // Expected output:
+ // | dst1 | dst2 |
+ // | s | exp | mantissa |
+
+ // Check for zero.
+ __ cmp(scratch1, Operand(0));
+ __ mov(dst1, scratch1);
+ __ mov(dst2, scratch1);
+ __ b(eq, &done);
+
+ // Preload the sign of the value.
+ __ and_(dst1, scratch1, Operand(HeapNumber::kSignMask), SetCC);
+ // Get the absolute value of the object (as an unsigned integer).
+ __ rsb(scratch1, scratch1, Operand(0), SetCC, mi);
+
+ // Get mantisssa[51:20].
+
+ // Get the position of the first set bit.
+ __ CountLeadingZeros(dst2, scratch1, scratch2);
+ __ rsb(dst2, dst2, Operand(31));
+
+ // Set the exponent.
+ __ add(scratch2, dst2, Operand(HeapNumber::kExponentBias));
+ __ Bfi(dst1, scratch2, scratch2,
+ HeapNumber::kExponentShift, HeapNumber::kExponentBits);
+
+ // Clear the first non null bit.
+ __ mov(scratch2, Operand(1));
+ __ bic(scratch1, scratch1, Operand(scratch2, LSL, dst2));
+
+ __ cmp(dst2, Operand(HeapNumber::kMantissaBitsInTopWord));
+ // Get the number of bits to set in the lower part of the mantissa.
+ __ sub(scratch2, dst2, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
+ __ b(mi, &fewer_than_20_useful_bits);
+ // Set the higher 20 bits of the mantissa.
+ __ orr(dst1, dst1, Operand(scratch1, LSR, scratch2));
+ __ rsb(scratch2, scratch2, Operand(32));
+ __ mov(dst2, Operand(scratch1, LSL, scratch2));
+ __ b(&done);
+
+ __ bind(&fewer_than_20_useful_bits);
+ __ rsb(scratch2, dst2, Operand(HeapNumber::kMantissaBitsInTopWord));
+ __ mov(scratch2, Operand(scratch1, LSL, scratch2));
+ __ orr(dst1, dst1, scratch2);
+ // Set dst2 to 0.
+ __ mov(dst2, Operand(0));
+ }
+
+ __ b(&done);
+
+ __ bind(&obj_is_not_smi);
+ if (FLAG_debug_code) {
+ __ AbortIfNotRootValue(heap_number_map,
+ Heap::kHeapNumberMapRootIndex,
+ "HeapNumberMap register clobbered.");
+ }
+ __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
+
+ // Load the number.
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ // Load the double value.
+ __ sub(scratch1, object, Operand(kHeapObjectTag));
+ __ vldr(double_dst, scratch1, HeapNumber::kValueOffset);
+
+ __ EmitVFPTruncate(kRoundToZero,
+ single_scratch,
+ double_dst,
+ scratch1,
+ scratch2,
+ kCheckForInexactConversion);
+
+ // Jump to not_int32 if the operation did not succeed.
+ __ b(ne, not_int32);
+
+ if (destination == kCoreRegisters) {
+ __ vmov(dst1, dst2, double_dst);
+ }
+
+ } else {
+ ASSERT(!scratch1.is(object) && !scratch2.is(object));
+ // Load the double value in the destination registers..
+ __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
+
+ // Check for 0 and -0.
+ __ bic(scratch1, dst1, Operand(HeapNumber::kSignMask));
+ __ orr(scratch1, scratch1, Operand(dst2));
+ __ cmp(scratch1, Operand(0));
+ __ b(eq, &done);
+
+ // Check that the value can be exactly represented by a 32-bit integer.
+ // Jump to not_int32 if that's not the case.
+ DoubleIs32BitInteger(masm, dst1, dst2, scratch1, scratch2, not_int32);
+
+ // dst1 and dst2 were trashed. Reload the double value.
+ __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
+ }
+
+ __ bind(&done);
+}
+
+
+void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
+ Register object,
+ Register dst,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ DwVfpRegister double_scratch,
+ Label* not_int32) {
+ ASSERT(!dst.is(object));
+ ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object));
+ ASSERT(!scratch1.is(scratch2) &&
+ !scratch1.is(scratch3) &&
+ !scratch2.is(scratch3));
+
+ Label done;
+
+ // Untag the object into the destination register.
+ __ SmiUntag(dst, object);
+ // Just return if the object is a smi.
+ __ JumpIfSmi(object, &done);
+
+ if (FLAG_debug_code) {
+ __ AbortIfNotRootValue(heap_number_map,
+ Heap::kHeapNumberMapRootIndex,
+ "HeapNumberMap register clobbered.");
+ }
+ __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
+
+ // Object is a heap number.
+ // Convert the floating point value to a 32-bit integer.
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ SwVfpRegister single_scratch = double_scratch.low();
+ // Load the double value.
+ __ sub(scratch1, object, Operand(kHeapObjectTag));
+ __ vldr(double_scratch, scratch1, HeapNumber::kValueOffset);
+
+ __ EmitVFPTruncate(kRoundToZero,
+ single_scratch,
+ double_scratch,
+ scratch1,
+ scratch2,
+ kCheckForInexactConversion);
+
+ // Jump to not_int32 if the operation did not succeed.
+ __ b(ne, not_int32);
+ // Get the result in the destination register.
+ __ vmov(dst, single_scratch);
+
+ } else {
+ // Load the double value in the destination registers.
+ __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
+ __ ldr(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
+
+ // Check for 0 and -0.
+ __ bic(dst, scratch1, Operand(HeapNumber::kSignMask));
+ __ orr(dst, scratch2, Operand(dst));
+ __ cmp(dst, Operand(0));
+ __ b(eq, &done);
+
+ DoubleIs32BitInteger(masm, scratch1, scratch2, dst, scratch3, not_int32);
+
+ // Registers state after DoubleIs32BitInteger.
+ // dst: mantissa[51:20].
+ // scratch2: 1
+
+ // Shift back the higher bits of the mantissa.
+ __ mov(dst, Operand(dst, LSR, scratch3));
+ // Set the implicit first bit.
+ __ rsb(scratch3, scratch3, Operand(32));
+ __ orr(dst, dst, Operand(scratch2, LSL, scratch3));
+ // Set the sign.
+ __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
+ __ tst(scratch1, Operand(HeapNumber::kSignMask));
+ __ rsb(dst, dst, Operand(0), LeaveCC, mi);
+ }
+
+ __ bind(&done);
+}
+
+
+void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
+ Register src1,
+ Register src2,
+ Register dst,
+ Register scratch,
+ Label* not_int32) {
+ // Get exponent alone in scratch.
+ __ Ubfx(scratch,
+ src1,
+ HeapNumber::kExponentShift,
+ HeapNumber::kExponentBits);
+
+ // Substract the bias from the exponent.
+ __ sub(scratch, scratch, Operand(HeapNumber::kExponentBias), SetCC);
+
+ // src1: higher (exponent) part of the double value.
+ // src2: lower (mantissa) part of the double value.
+ // scratch: unbiased exponent.
+
+ // Fast cases. Check for obvious non 32-bit integer values.
+ // Negative exponent cannot yield 32-bit integers.
+ __ b(mi, not_int32);
+ // Exponent greater than 31 cannot yield 32-bit integers.
+ // Also, a positive value with an exponent equal to 31 is outside of the
+ // signed 32-bit integer range.
+ __ tst(src1, Operand(HeapNumber::kSignMask));
+ __ cmp(scratch, Operand(30), eq); // Executed for positive. If exponent is 30
+ // the gt condition will be "correct" and
+ // the next instruction will be skipped.
+ __ cmp(scratch, Operand(31), ne); // Executed for negative and positive where
+ // exponent is not 30.
+ __ b(gt, not_int32);
+ // - Bits [21:0] in the mantissa are not null.
+ __ tst(src2, Operand(0x3fffff));
+ __ b(ne, not_int32);
+
+ // Otherwise the exponent needs to be big enough to shift left all the
+ // non zero bits left. So we need the (30 - exponent) last bits of the
+ // 31 higher bits of the mantissa to be null.
+ // Because bits [21:0] are null, we can check instead that the
+ // (32 - exponent) last bits of the 32 higher bits of the mantisssa are null.
+
+ // Get the 32 higher bits of the mantissa in dst.
+ __ Ubfx(dst,
+ src2,
+ HeapNumber::kMantissaBitsInTopWord,
+ 32 - HeapNumber::kMantissaBitsInTopWord);
+ __ orr(dst,
+ dst,
+ Operand(src1, LSL, HeapNumber::kNonMantissaBitsInTopWord));
+
+ // Create the mask and test the lower bits (of the higher bits).
+ __ rsb(scratch, scratch, Operand(32));
+ __ mov(src2, Operand(1));
+ __ mov(src1, Operand(src2, LSL, scratch));
+ __ sub(src1, src1, Operand(1));
+ __ tst(dst, src1);
+ __ b(ne, not_int32);
+}
+
+
+void FloatingPointHelper::CallCCodeForDoubleOperation(
+ MacroAssembler* masm,
+ Token::Value op,
+ Register heap_number_result,
+ Register scratch) {
+ // Using core registers:
+ // r0: Left value (least significant part of mantissa).
+ // r1: Left value (sign, exponent, top of mantissa).
+ // r2: Right value (least significant part of mantissa).
+ // r3: Right value (sign, exponent, top of mantissa).
+
+ // Assert that heap_number_result is callee-saved.
+ // We currently always use r5 to pass it.
+ ASSERT(heap_number_result.is(r5));
+
+ // Push the current return address before the C call. Return will be
+ // through pop(pc) below.
+ __ push(lr);
+ __ PrepareCallCFunction(4, scratch); // Two doubles are 4 arguments.
+ // Call C routine that may not cause GC or other trouble.
+ __ CallCFunction(ExternalReference::double_fp_operation(op), 4);
+ // Store answer in the overwritable heap number.
+#if !defined(USE_ARM_EABI)
+ // Double returned in fp coprocessor register 0 and 1, encoded as
+ // register cr8. Offsets must be divisible by 4 for coprocessor so we
+ // need to substract the tag from heap_number_result.
+ __ sub(scratch, heap_number_result, Operand(kHeapObjectTag));
+ __ stc(p1, cr8, MemOperand(scratch, HeapNumber::kValueOffset));
+#else
+ // Double returned in registers 0 and 1.
+ __ Strd(r0, r1, FieldMemOperand(heap_number_result,
+ HeapNumber::kValueOffset));
+#endif
+ // Place heap_number_result in r0 and return to the pushed return address.
+ __ mov(r0, Operand(heap_number_result));
+ __ pop(pc);
+}
+
// See comment for class.
void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
@@ -2707,33 +3093,11 @@ void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
__ add(r0, r0, Operand(kHeapObjectTag));
__ Ret();
} else {
- // Using core registers:
- // r0: Left value (least significant part of mantissa).
- // r1: Left value (sign, exponent, top of mantissa).
- // r2: Right value (least significant part of mantissa).
- // r3: Right value (sign, exponent, top of mantissa).
-
- // Push the current return address before the C call. Return will be
- // through pop(pc) below.
- __ push(lr);
- __ PrepareCallCFunction(4, scratch1); // Two doubles are 4 arguments.
- // Call C routine that may not cause GC or other trouble. r5 is callee
- // save.
- __ CallCFunction(ExternalReference::double_fp_operation(op_), 4);
- // Store answer in the overwritable heap number.
-#if !defined(USE_ARM_EABI)
- // Double returned in fp coprocessor register 0 and 1, encoded as
- // register cr8. Offsets must be divisible by 4 for coprocessor so we
- // need to substract the tag from r5.
- __ sub(scratch1, result, Operand(kHeapObjectTag));
- __ stc(p1, cr8, MemOperand(scratch1, HeapNumber::kValueOffset));
-#else
- // Double returned in registers 0 and 1.
- __ Strd(r0, r1, FieldMemOperand(result, HeapNumber::kValueOffset));
-#endif
- // Plase result in r0 and return to the pushed return address.
- __ mov(r0, Operand(result));
- __ pop(pc);
+ // Call the C function to handle the double operation.
+ FloatingPointHelper::CallCCodeForDoubleOperation(masm,
+ op_,
+ result,
+ scratch1);
}
break;
}
@@ -2779,7 +3143,6 @@ void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
break;
case Token::SAR:
// Use only the 5 least significant bits of the shift count.
- __ and_(r2, r2, Operand(0x1f));
__ GetLeastBitsFromInt32(r2, r2, 5);
__ mov(r2, Operand(r3, ASR, r2));
break;
@@ -2924,7 +3287,288 @@ void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
ASSERT(operands_type_ == TRBinaryOpIC::INT32);
- GenerateTypeTransition(masm);
+ Register left = r1;
+ Register right = r0;
+ Register scratch1 = r7;
+ Register scratch2 = r9;
+ DwVfpRegister double_scratch = d0;
+ SwVfpRegister single_scratch = s3;
+
+ Register heap_number_result = no_reg;
+ Register heap_number_map = r6;
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+
+ Label call_runtime;
+ // Labels for type transition, used for wrong input or output types.
+ // Both label are currently actually bound to the same position. We use two
+ // different label to differentiate the cause leading to type transition.
+ Label transition;
+
+ // Smi-smi fast case.
+ Label skip;
+ __ orr(scratch1, left, right);
+ __ JumpIfNotSmi(scratch1, &skip);
+ GenerateSmiSmiOperation(masm);
+ // Fall through if the result is not a smi.
+ __ bind(&skip);
+
+ switch (op_) {
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV:
+ case Token::MOD: {
+ // Load both operands and check that they are 32-bit integer.
+ // Jump to type transition if they are not. The registers r0 and r1 (right
+ // and left) are preserved for the runtime call.
+ FloatingPointHelper::Destination destination =
+ CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD ?
+ FloatingPointHelper::kVFPRegisters :
+ FloatingPointHelper::kCoreRegisters;
+
+ FloatingPointHelper::LoadNumberAsInt32Double(masm,
+ right,
+ destination,
+ d7,
+ r2,
+ r3,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ s0,
+ &transition);
+ FloatingPointHelper::LoadNumberAsInt32Double(masm,
+ left,
+ destination,
+ d6,
+ r4,
+ r5,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ s0,
+ &transition);
+
+ if (destination == FloatingPointHelper::kVFPRegisters) {
+ CpuFeatures::Scope scope(VFP3);
+ Label return_heap_number;
+ switch (op_) {
+ case Token::ADD:
+ __ vadd(d5, d6, d7);
+ break;
+ case Token::SUB:
+ __ vsub(d5, d6, d7);
+ break;
+ case Token::MUL:
+ __ vmul(d5, d6, d7);
+ break;
+ case Token::DIV:
+ __ vdiv(d5, d6, d7);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ if (op_ != Token::DIV) {
+ // These operations produce an integer result.
+ // Try to return a smi if we can.
+ // Otherwise return a heap number if allowed, or jump to type
+ // transition.
+
+ __ EmitVFPTruncate(kRoundToZero,
+ single_scratch,
+ d5,
+ scratch1,
+ scratch2);
+
+ if (result_type_ <= TRBinaryOpIC::INT32) {
+ // If the ne condition is set, result does
+ // not fit in a 32-bit integer.
+ __ b(ne, &transition);
+ }
+
+ // Check if the result fits in a smi.
+ __ vmov(scratch1, single_scratch);
+ __ add(scratch2, scratch1, Operand(0x40000000), SetCC);
+ // If not try to return a heap number.
+ __ b(mi, &return_heap_number);
+ // Tag the result and return.
+ __ SmiTag(r0, scratch1);
+ __ Ret();
+ }
+
+ if (result_type_ >= (op_ == Token::DIV) ? TRBinaryOpIC::HEAP_NUMBER
+ : TRBinaryOpIC::INT32) {
+ __ bind(&return_heap_number);
+ // We are using vfp registers so r5 is available.
+ heap_number_result = r5;
+ GenerateHeapResultAllocation(masm,
+ heap_number_result,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ &call_runtime);
+ __ sub(r0, heap_number_result, Operand(kHeapObjectTag));
+ __ vstr(d5, r0, HeapNumber::kValueOffset);
+ __ mov(r0, heap_number_result);
+ __ Ret();
+ }
+
+ // A DIV operation expecting an integer result falls through
+ // to type transition.
+
+ } else {
+ // We preserved r0 and r1 to be able to call runtime.
+ // Save the left value on the stack.
+ __ Push(r5, r4);
+
+ // Allocate a heap number to store the result.
+ heap_number_result = r5;
+ GenerateHeapResultAllocation(masm,
+ heap_number_result,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ &call_runtime);
+
+ // Load the left value from the value saved on the stack.
+ __ Pop(r1, r0);
+
+ // Call the C function to handle the double operation.
+ FloatingPointHelper::CallCCodeForDoubleOperation(
+ masm, op_, heap_number_result, scratch1);
+ }
+
+ break;
+ }
+
+ case Token::BIT_OR:
+ case Token::BIT_XOR:
+ case Token::BIT_AND:
+ case Token::SAR:
+ case Token::SHR:
+ case Token::SHL: {
+ Label return_heap_number;
+ Register scratch3 = r5;
+ // Convert operands to 32-bit integers. Right in r2 and left in r3. The
+ // registers r0 and r1 (right and left) are preserved for the runtime
+ // call.
+ FloatingPointHelper::LoadNumberAsInt32(masm,
+ left,
+ r3,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ scratch3,
+ d0,
+ &transition);
+ FloatingPointHelper::LoadNumberAsInt32(masm,
+ right,
+ r2,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ scratch3,
+ d0,
+ &transition);
+
+ // The ECMA-262 standard specifies that, for shift operations, only the
+ // 5 least significant bits of the shift value should be used.
+ switch (op_) {
+ case Token::BIT_OR:
+ __ orr(r2, r3, Operand(r2));
+ break;
+ case Token::BIT_XOR:
+ __ eor(r2, r3, Operand(r2));
+ break;
+ case Token::BIT_AND:
+ __ and_(r2, r3, Operand(r2));
+ break;
+ case Token::SAR:
+ __ and_(r2, r2, Operand(0x1f));
+ __ mov(r2, Operand(r3, ASR, r2));
+ break;
+ case Token::SHR:
+ __ and_(r2, r2, Operand(0x1f));
+ __ mov(r2, Operand(r3, LSR, r2), SetCC);
+ // SHR is special because it is required to produce a positive answer.
+ // We only get a negative result if the shift value (r2) is 0.
+ // This result cannot be respresented as a signed 32-bit integer, try
+ // to return a heap number if we can.
+ // The non vfp3 code does not support this special case, so jump to
+ // runtime if we don't support it.
+ if (CpuFeatures::IsSupported(VFP3)) {
+ __ b(mi,
+ (result_type_ <= TRBinaryOpIC::INT32) ? &transition
+ : &return_heap_number);
+ } else {
+ __ b(mi, (result_type_ <= TRBinaryOpIC::INT32) ? &transition
+ : &call_runtime);
+ }
+ break;
+ case Token::SHL:
+ __ and_(r2, r2, Operand(0x1f));
+ __ mov(r2, Operand(r3, LSL, r2));
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ // Check if the result fits in a smi.
+ __ add(scratch1, r2, Operand(0x40000000), SetCC);
+ // If not try to return a heap number. (We know the result is an int32.)
+ __ b(mi, &return_heap_number);
+ // Tag the result and return.
+ __ SmiTag(r0, r2);
+ __ Ret();
+
+ __ bind(&return_heap_number);
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ heap_number_result = r5;
+ GenerateHeapResultAllocation(masm,
+ heap_number_result,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ &call_runtime);
+
+ if (op_ != Token::SHR) {
+ // Convert the result to a floating point value.
+ __ vmov(double_scratch.low(), r2);
+ __ vcvt_f64_s32(double_scratch, double_scratch.low());
+ } else {
+ // The result must be interpreted as an unsigned 32-bit integer.
+ __ vmov(double_scratch.low(), r2);
+ __ vcvt_f64_u32(double_scratch, double_scratch.low());
+ }
+
+ // Store the result.
+ __ sub(r0, heap_number_result, Operand(kHeapObjectTag));
+ __ vstr(double_scratch, r0, HeapNumber::kValueOffset);
+ __ mov(r0, heap_number_result);
+ __ Ret();
+ } else {
+ // Tail call that writes the int32 in r2 to the heap number in r0, using
+ // r3 as scratch. r0 is preserved and returned.
+ WriteInt32ToHeapNumberStub stub(r2, r0, r3);
+ __ TailCallStub(&stub);
+ }
+
+ break;
+ }
+
+ default:
+ UNREACHABLE();
+ }
+
+ if (transition.is_linked()) {
+ __ bind(&transition);
+ GenerateTypeTransition(masm);
+ }
+
+ __ bind(&call_runtime);
+ GenerateCallRuntime(masm);
}
47 deps/v8/src/arm/codegen-arm.cc
View
@@ -1938,8 +1938,9 @@ void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
frame_->EmitPush(cp);
frame_->EmitPush(Operand(pairs));
frame_->EmitPush(Operand(Smi::FromInt(is_eval() ? 1 : 0)));
+ frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag())));
- frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
+ frame_->CallRuntime(Runtime::kDeclareGlobals, 4);
// The result is discarded.
}
@@ -3287,7 +3288,8 @@ void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
// context slot followed by initialization.
frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
} else {
- frame_->CallRuntime(Runtime::kStoreContextSlot, 3);
+ frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag())));
+ frame_->CallRuntime(Runtime::kStoreContextSlot, 4);
}
// Storing a variable must keep the (new) value on the expression
// stack. This is necessary for compiling assignment expressions.
@@ -3637,7 +3639,8 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
Load(key);
Load(value);
if (property->emit_store()) {
- frame_->CallRuntime(Runtime::kSetProperty, 3);
+ frame_->EmitPush(Operand(Smi::FromInt(NONE))); // PropertyAttributes
+ frame_->CallRuntime(Runtime::kSetProperty, 4);
} else {
frame_->Drop(3);
}
@@ -5170,11 +5173,11 @@ class DeferredIsStringWrapperSafeForDefaultValueOf : public DeferredCode {
// Set the bit in the map to indicate that it has been checked safe for
// default valueOf and set true result.
- __ ldr(scratch1_, FieldMemOperand(map_result_, Map::kBitField2Offset));
+ __ ldrb(scratch1_, FieldMemOperand(map_result_, Map::kBitField2Offset));
__ orr(scratch1_,
scratch1_,
Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ str(scratch1_, FieldMemOperand(map_result_, Map::kBitField2Offset));
+ __ strb(scratch1_, FieldMemOperand(map_result_, Map::kBitField2Offset));
__ mov(map_result_, Operand(1));
__ jmp(exit_label());
__ bind(&false_result);
@@ -6674,8 +6677,12 @@ class DeferredReferenceSetKeyedValue: public DeferredCode {
public:
DeferredReferenceSetKeyedValue(Register value,
Register key,
- Register receiver)
- : value_(value), key_(key), receiver_(receiver) {
+ Register receiver,
+ StrictModeFlag strict_mode)
+ : value_(value),
+ key_(key),
+ receiver_(receiver),
+ strict_mode_(strict_mode) {
set_comment("[ DeferredReferenceSetKeyedValue");
}
@@ -6685,6 +6692,7 @@ class DeferredReferenceSetKeyedValue: public DeferredCode {
Register value_;
Register key_;
Register receiver_;
+ StrictModeFlag strict_mode_;
};
@@ -6706,7 +6714,9 @@ void DeferredReferenceSetKeyedValue::Generate() {
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
// Call keyed store IC. It has the arguments value, key and receiver in r0,
// r1 and r2.
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ Handle<Code> ic(Builtins::builtin(
+ (strict_mode_ == kStrictMode) ? Builtins::KeyedStoreIC_Initialize_Strict
+ : Builtins::KeyedStoreIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
// The call must be followed by a nop instruction to indicate that the
// keyed store has been inlined.
@@ -6724,8 +6734,12 @@ class DeferredReferenceSetNamedValue: public DeferredCode {
public:
DeferredReferenceSetNamedValue(Register value,
Register receiver,
- Handle<String> name)
- : value_(value), receiver_(receiver), name_(name) {
+ Handle<String> name,
+ StrictModeFlag strict_mode)
+ : value_(value),
+ receiver_(receiver),
+ name_(name),
+ strict_mode_(strict_mode) {
set_comment("[ DeferredReferenceSetNamedValue");
}
@@ -6735,6 +6749,7 @@ class DeferredReferenceSetNamedValue: public DeferredCode {
Register value_;
Register receiver_;
Handle<String> name_;
+ StrictModeFlag strict_mode_;
};
@@ -6754,7 +6769,9 @@ void DeferredReferenceSetNamedValue::Generate() {
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
// Call keyed store IC. It has the arguments value, key and receiver in r0,
// r1 and r2.
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ Handle<Code> ic(Builtins::builtin(
+ (strict_mode_ == kStrictMode) ? Builtins::StoreIC_Initialize_Strict
+ : Builtins::StoreIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
// The call must be followed by a nop instruction to indicate that the
// named store has been inlined.
@@ -6943,7 +6960,8 @@ void CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
Register receiver = r1;
DeferredReferenceSetNamedValue* deferred =
- new DeferredReferenceSetNamedValue(value, receiver, name);
+ new DeferredReferenceSetNamedValue(
+ value, receiver, name, strict_mode_flag());
// Check that the receiver is a heap object.
__ tst(receiver, Operand(kSmiTagMask));
@@ -7129,7 +7147,8 @@ void CodeGenerator::EmitKeyedStore(StaticType* key_type,
// The deferred code expects value, key and receiver in registers.
DeferredReferenceSetKeyedValue* deferred =
- new DeferredReferenceSetKeyedValue(value, key, receiver);
+ new DeferredReferenceSetKeyedValue(
+ value, key, receiver, strict_mode_flag());
// Check that the value is a smi. As this inlined code does not set the
// write barrier it is only possible to store smi values.
@@ -7214,7 +7233,7 @@ void CodeGenerator::EmitKeyedStore(StaticType* key_type,
deferred->BindExit();
} else {
- frame()->CallKeyedStoreIC();
+ frame()->CallKeyedStoreIC(strict_mode_flag());
}
}
8 deps/v8/src/arm/constants-arm.h
View
@@ -385,7 +385,10 @@ enum VFPConversionMode {
kDefaultRoundToZero = 1
};
+// This mask does not include the "inexact" or "input denormal" cumulative
+// exceptions flags, because we usually don't want to check for it.
static const uint32_t kVFPExceptionMask = 0xf;
+static const uint32_t kVFPInexactExceptionBit = 1 << 4;
static const uint32_t kVFPFlushToZeroMask = 1 << 24;
static const uint32_t kVFPInvalidExceptionBit = 1;
@@ -411,6 +414,11 @@ enum VFPRoundingMode {
static const uint32_t kVFPRoundingModeMask = 3 << 22;
+enum CheckForInexactConversion {
+ kCheckForInexactConversion,
+ kDontCheckForInexactConversion
+};
+
// -----------------------------------------------------------------------------
// Hints.
582 deps/v8/src/arm/full-codegen-arm.cc
View
@@ -339,23 +339,6 @@ void FullCodeGenerator::EmitReturnSequence() {
}
-FullCodeGenerator::ConstantOperand FullCodeGenerator::GetConstantOperand(
- Token::Value op, Expression* left, Expression* right) {
- ASSERT(ShouldInlineSmiCase(op));
- if (op == Token::DIV || op == Token::MOD || op == Token::MUL) {
- // We never generate inlined constant smi operations for these.
- return kNoConstants;
- } else if (right->IsSmiLiteral()) {
- return kRightConstant;
- } else if (left->IsSmiLiteral() && !Token::IsShiftOp(op)) {
- // Don't inline shifts with constant left hand side.
- return kLeftConstant;
- } else {
- return kNoConstants;
- }
-}
-
-
void FullCodeGenerator::EffectContext::Plug(Slot* slot) const {
}
@@ -793,7 +776,9 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
prop->key()->AsLiteral()->handle()->IsSmi());
__ mov(r1, Operand(prop->key()->AsLiteral()->handle()));
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ Handle<Code> ic(Builtins::builtin(is_strict()
+ ? Builtins::KeyedStoreIC_Initialize_Strict
+ : Builtins::KeyedStoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
// Value in r0 is ignored (declarations are statements).
}
@@ -809,10 +794,11 @@ void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals.
// The context is the first argument.
- __ mov(r1, Operand(pairs));
- __ mov(r0, Operand(Smi::FromInt(is_eval() ? 1 : 0)));
- __ Push(cp, r1, r0);
- __ CallRuntime(Runtime::kDeclareGlobals, 3);
+ __ mov(r2, Operand(pairs));
+ __ mov(r1, Operand(Smi::FromInt(is_eval() ? 1 : 0)));
+ __ mov(r0, Operand(Smi::FromInt(strict_mode_flag())));
+ __ Push(cp, r2, r1, r0);
+ __ CallRuntime(Runtime::kDeclareGlobals, 4);
// Return value is ignored.
}
@@ -1456,7 +1442,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForStackValue(key);
VisitForStackValue(value);
if (property->emit_store()) {
- __ CallRuntime(Runtime::kSetProperty, 3);
+ __ mov(r0, Operand(Smi::FromInt(NONE))); // PropertyAttributes
+ __ push(r0);
+ __ CallRuntime(Runtime::kSetProperty, 4);
} else {
__ Drop(3);
}
@@ -1634,14 +1622,8 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
Token::Value op = expr->binary_op();
- ConstantOperand constant = ShouldInlineSmiCase(op)
- ? GetConstantOperand(op, expr->target(), expr->value())
- : kNoConstants;
- ASSERT(constant == kRightConstant || constant == kNoConstants);
- if (constant == kNoConstants) {
- __ push(r0); // Left operand goes on the stack.
- VisitForAccumulatorValue(expr->value());
- }
+ __ push(r0); // Left operand goes on the stack.
+ VisitForAccumulatorValue(expr->value());
OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
? OVERWRITE_RIGHT
@@ -1653,8 +1635,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
op,
mode,
expr->target(),
- expr->value(),
- constant);
+ expr->value());
} else {
EmitBinaryOp(op, mode);
}
@@ -1704,217 +1685,11 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
}
-void FullCodeGenerator::EmitConstantSmiAdd(Expression* expr,
- OverwriteMode mode,
- bool left_is_constant_smi,
- Smi* value) {
- Label call_stub, done;
- // Optimistically add smi value with unknown object. If result overflows or is
- // not a smi then we had either a smi overflow or added a smi with a tagged
- // pointer.
- __ mov(r1, Operand(value));
- __ add(r2, r0, r1, SetCC);
- __ b(vs, &call_stub);
- JumpPatchSite patch_site(masm_);
- patch_site.EmitJumpIfNotSmi(r2, &call_stub);
- __ mov(r0, r2);
- __ b(&done);
-
- // Call the shared stub.
- __ bind(&call_stub);
- if (!left_is_constant_smi) {
- __ Swap(r0, r1, r2);
- }
- TypeRecordingBinaryOpStub stub(Token::ADD, mode);
- EmitCallIC(stub.GetCode(), &patch_site);
-
- __ bind(&done);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitConstantSmiSub(Expression* expr,
- OverwriteMode mode,
- bool left_is_constant_smi,
- Smi* value) {
- Label call_stub, done;
- // Optimistically subtract smi value and unknown object. If result overflows
- // or is not a smi then we had either a smi overflow or subtraction between a
- // smi and a tagged pointer.
- __ mov(r1, Operand(value));
- if (left_is_constant_smi) {
- __ sub(r2, r1, r0, SetCC);
- } else {
- __ sub(r2, r0, r1, SetCC);
- }
- __ b(vs, &call_stub);
- JumpPatchSite patch_site(masm_);
- patch_site.EmitJumpIfNotSmi(r2, &call_stub);
- __ mov(r0, r2);
- __ b(&done);
-
- // Call the shared stub.
- __ bind(&call_stub);
- if (!left_is_constant_smi) {
- __ Swap(r0, r1, r2);
- }
- TypeRecordingBinaryOpStub stub(Token::SUB, mode);
- EmitCallIC(stub.GetCode(), &patch_site);
-
- __ bind(&done);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitConstantSmiShiftOp(Expression* expr,
- Token::Value op,
- OverwriteMode mode,
- Smi* value) {
- Label call_stub, smi_case, done;
- int shift_value = value->value() & 0x1f;
-
- JumpPatchSite patch_site(masm_);
- patch_site.EmitJumpIfSmi(r0, &smi_case);
-
- // Call stub.
- __ bind(&call_stub);
- __ mov(r1, r0);
- __ mov(r0, Operand(value));
- TypeRecordingBinaryOpStub stub(op, mode);
- EmitCallIC(stub.GetCode(), &patch_site);
- __ b(&done);
-
- // Smi case.
- __ bind(&smi_case);
- switch (op) {
- case Token::SHL:
- if (shift_value != 0) {
- __ mov(r1, r0);
- if (shift_value > 1) {
- __ mov(r1, Operand(r1, LSL, shift_value - 1));
- }
- // Convert int result to smi, checking that it is in int range.
- __ SmiTag(r1, SetCC);
- __ b(vs, &call_stub);
- __ mov(r0, r1); // Put result back into r0.
- }
- break;
- case Token::SAR:
- if (shift_value != 0) {
- __ mov(r0, Operand(r0, ASR, shift_value));
- __ bic(r0, r0, Operand(kSmiTagMask));
- }
- break;
- case Token::SHR:
- // SHR must return a positive value. When shifting by 0 or 1 we need to
- // check that smi tagging the result will not create a negative value.
- if (shift_value < 2) {
- __ mov(r2, Operand(shift_value));
- __ SmiUntag(r1, r0);
- if (shift_value != 0) {
- __ mov(r1, Operand(r1, LSR, shift_value));
- }
- __ tst(r1, Operand(0xc0000000));
- __ b(ne, &call_stub);
- __ SmiTag(r0, r1); // result in r0.
- } else {
- __ SmiUntag(r0);
- __ mov(r0, Operand(r0, LSR, shift_value));
- __ SmiTag(r0);
- }
- break;
- default:
- UNREACHABLE();
- }
-
- __ bind(&done);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitConstantSmiBitOp(Expression* expr,
- Token::Value op,
- OverwriteMode mode,
- Smi* value) {
- Label smi_case, done;
-
- JumpPatchSite patch_site(masm_);
- patch_site.EmitJumpIfSmi(r0, &smi_case);
-
- // The order of the arguments does not matter for bit-ops with a
- // constant operand.
- __ mov(r1, Operand(value));
- TypeRecordingBinaryOpStub stub(op, mode);
- EmitCallIC(stub.GetCode(), &patch_site);
- __ jmp(&done);
-
- // Smi case.
- __ bind(&smi_case);
- __ mov(r1, Operand(value));
- switch (op) {
- case Token::BIT_OR:
- __ orr(r0, r0, Operand(r1));
- break;
- case Token::BIT_XOR:
- __ eor(r0, r0, Operand(r1));
- break;
- case Token::BIT_AND:
- __ and_(r0, r0, Operand(r1));
- break;
- default:
- UNREACHABLE();
- }
-
- __ bind(&done);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitConstantSmiBinaryOp(Expression* expr,
- Token::Value op,
- OverwriteMode mode,
- bool left_is_constant_smi,
- Smi* value) {
- switch (op) {
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND:
- EmitConstantSmiBitOp(expr, op, mode, value);
- break;
- case Token::SHL:
- case Token::SAR:
- case Token::SHR:
- ASSERT(!left_is_constant_smi);
- EmitConstantSmiShiftOp(expr, op, mode, value);
- break;
- case Token::ADD:
- EmitConstantSmiAdd(expr, mode, left_is_constant_smi, value);
- break;
- case Token::SUB:
- EmitConstantSmiSub(expr, mode, left_is_constant_smi, value);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
Token::Value op,
OverwriteMode mode,
Expression* left_expr,
- Expression* right_expr,
- ConstantOperand constant) {
- if (constant == kRightConstant) {
- Smi* value = Smi::cast(*right_expr->AsLiteral()->handle());
- EmitConstantSmiBinaryOp(expr, op, mode, false, value);
- return;
- } else if (constant == kLeftConstant) {
- Smi* value = Smi::cast(*left_expr->AsLiteral()->handle());
- EmitConstantSmiBinaryOp(expr, op, mode, true, value);
- return;
- }
-
+ Expression* right_expr) {
Label done, smi_case, stub_call;
Register scratch1 = r2;
@@ -2050,7 +1825,9 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
__ mov(r1, r0);
__ pop(r0); // Restore value.
__ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ Handle<Code> ic(Builtins::builtin(
+ is_strict() ? Builtins::StoreIC_Initialize_Strict
+ : Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
break;
}
@@ -2071,7 +1848,9 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
__ pop(r2);
}
__ pop(r0); // Restore value.
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ Handle<Code> ic(Builtins::builtin(
+ is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
+ : Builtins::KeyedStoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
break;
}
@@ -2095,9 +1874,9 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
// r2, and the global object in r1.
__ mov(r2, Operand(var->name()));
__ ldr(r1, GlobalObjectOperand());
- Handle<Code> ic(Builtins::builtin(is_strict()
- ? Builtins::StoreIC_Initialize_Strict
- : Builtins::StoreIC_Initialize));
+ Handle<Code> ic(Builtins::builtin(
+ is_strict() ? Builtins::StoreIC_Initialize_Strict
+ : Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
} else if (op == Token::INIT_CONST) {
@@ -2166,9 +1945,10 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
case Slot::LOOKUP:
// Call the runtime for the assignment.
__ push(r0); // Value.
- __ mov(r0, Operand(slot->var()->name()));
- __ Push(cp, r0); // Context and name.
- __ CallRuntime(Runtime::kStoreContextSlot, 3);
+ __ mov(r1, Operand(slot->var()->name()));
+ __ mov(r0, Operand(Smi::FromInt(strict_mode_flag())));
+ __ Push(cp, r1, r0); // Context, name, strict mode.
+ __ CallRuntime(Runtime::kStoreContextSlot, 4);
break;
}
}
@@ -2203,7 +1983,9 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ pop(r1);
}
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ Handle<Code> ic(Builtins::builtin(
+ is_strict() ? Builtins::StoreIC_Initialize_Strict
+ : Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
// If the assignment ends an initialization block, revert to fast case.
@@ -2247,7 +2029,9 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
__ pop(r2);
}
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ Handle<Code> ic(Builtins::builtin(
+ is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
+ : Builtins::KeyedStoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
// If the assignment ends an initialization block, revert to fast case.
@@ -2362,6 +2146,29 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) {
}
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
+ int arg_count) {
+ // Push copy of the first argument or undefined if it doesn't exist.
+ if (arg_count > 0) {
+ __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
+ } else {
+ __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
+ }
+ __ push(r1);
+
+ // Push the receiver of the enclosing function and do runtime call.
+ __ ldr(r1, MemOperand(fp, (2 + scope()->num_parameters()) * kPointerSize));
+ __ push(r1);
+ // Push the strict mode flag.
+ __ mov(r1, Operand(Smi::FromInt(strict_mode_flag())));
+ __ push(r1);
+
+ __ CallRuntime(flag == SKIP_CONTEXT_LOOKUP
+ ? Runtime::kResolvePossiblyDirectEvalNoLookup
+ : Runtime::kResolvePossiblyDirectEval, 4);
+}
+
+
void FullCodeGenerator::VisitCall(Call* expr) {
#ifdef DEBUG
// We want to verify that RecordJSReturnSite gets called on all paths
@@ -2391,26 +2198,31 @@ void FullCodeGenerator::VisitCall(Call* expr) {
VisitForStackValue(args->at(i));
}
- // Push copy of the function - found below the arguments.
- __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ push(r1);
-
- // Push copy of the first argument or undefined if it doesn't exist.
- if (arg_count > 0) {
- __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
- __ push(r1);
- } else {
- __ push(r2);
+ // If we know that eval can only be shadowed by eval-introduced
+ // variables we attempt to load the global eval function directly
+ // in generated code. If we succeed, there is no need to perform a
+ // context lookup in the runtime system.
+ Label done;
+ if (var->AsSlot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
+ Label slow;
+ EmitLoadGlobalSlotCheckExtensions(var->AsSlot(),
+ NOT_INSIDE_TYPEOF,
+ &slow);
+ // Push the function and resolve eval.
+ __ push(r0);
+ EmitResolvePossiblyDirectEval(SKIP_CONTEXT_LOOKUP, arg_count);
+ __ jmp(&done);
+ __ bind(&slow);
}
- // Push the receiver of the enclosing function and do runtime call.
- __ ldr(r1,
- MemOperand(fp, (2 + scope()->num_parameters()) * kPointerSize));
- __ push(r1);
- // Push the strict mode flag.
- __ mov(r1, Operand(Smi::FromInt(strict_mode_flag())));
+ // Push copy of the function (found below the arguments) and
+ // resolve eval.
+ __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ push(r1);
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
+ EmitResolvePossiblyDirectEval(PERFORM_CONTEXT_LOOKUP, arg_count);
+ if (done.is_linked()) {
+ __ bind(&done);
+ }
// The runtime call returns a pair of values in r0 (function) and
// r1 (receiver). Touch up the stack with the right values.
@@ -3430,9 +3242,235 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
+ Label bailout, done, one_char_separator, long_separator,
+ non_trivial_array, not_size_one_array, loop,
+ empty_separator_loop, one_char_separator_loop,
+ one_char_separator_loop_entry, long_separator_loop;
+
+ ASSERT(args->length() == 2);
+ VisitForStackValue(args->at(1));
+ VisitForAccumulatorValue(args->at(0));
+
+ // All aliases of the same register have disjoint lifetimes.
+ Register array = r0;
+ Register elements = no_reg; // Will be r0.
+ Register result = no_reg; // Will be r0.
+ Register separator = r1;
+ Register array_length = r2;
+ Register result_pos = no_reg; // Will be r2
+ Register string_length = r3;
+ Register string = r4;
+ Register element = r5;
+ Register elements_end = r6;
+ Register scratch1 = r7;
+ Register scratch2 = r9;
+
+ // Separator operand is on the stack.
+ __ pop(separator);
+
+ // Check that the array is a JSArray.
+ __ JumpIfSmi(array, &bailout);
+ __ CompareObjectType(array, scratch1, scratch2, JS_ARRAY_TYPE);
+ __ b(ne, &bailout);
+
+ // Check that the array has fast elements.
+ __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset));
+ __ tst(scratch2, Operand(1 << Map::kHasFastElements));
+ __ b(eq, &bailout);
+
+ // If the array has length zero, return the empty string.
+ __ ldr(array_length, FieldMemOperand(array, JSArray::kLengthOffset));
+ __ SmiUntag(array_length, SetCC);
+ __ b(ne, &non_trivial_array);
+ __ LoadRoot(r0, Heap::kEmptyStringRootIndex);
+ __ b(&done);
+
+ __ bind(&non_trivial_array);
+
+ // Get the FixedArray containing array's elements.
+ elements = array;
+ __ ldr(elements, FieldMemOperand(array, JSArray::kElementsOffset));
+ array = no_reg; // End of array's live range.
+
+ // Check that all array elements are sequential ASCII strings, and
+ // accumulate the sum of their lengths, as a smi-encoded value.
+ __ mov(string_length, Operand(0));
+ __ add(element,
+ elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ add(elements_end, element, Operand(array_length, LSL, kPointerSizeLog2));
+ // Loop condition: while (element < elements_end).
+ // Live values in registers:
+ // elements: Fixed array of strings.
+ // array_length: Length of the fixed array of strings (not smi)
+ // separator: Separator string
+ // string_length: Accumulated sum of string lengths (smi).
+ // element: Current array element.
+ // elements_end: Array end.
+ if (FLAG_debug_code) {
+ __ cmp(array_length, Operand(0));
+ __ Assert(gt, "No empty arrays here in EmitFastAsciiArrayJoin");
+ }
+ __ bind(&loop);
+ __ ldr(string, MemOperand(element, kPointerSize, PostIndex));
+ __ JumpIfSmi(string, &bailout);
+ __ ldr(scratch1, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
+ __ ldr(scratch1, FieldMemOperand(string, SeqAsciiString::kLengthOffset));
+ __ add(string_length, string_length, Operand(scratch1));
+ __ b(vs, &bailout);
+ __ cmp(element, elements_end);
+ __ b(lt, &loop);
+
+ // If array_length is 1, return elements[0], a string.
+ __ cmp(array_length, Operand(1));
+ __ b(ne, &not_size_one_array);
+ __ ldr(r0, FieldMemOperand(elements, FixedArray::kHeaderSize));
+ __ b(&done);
+
+ __ bind(&not_size_one_array);
+
+ // Live values in registers:
+ // separator: Separator string
+ // array_length: Length of the array.
+ // string_length: Sum of string lengths (smi).
+ // elements: FixedArray of strings.
+
+ // Check that the separator is a flat ASCII string.
+ __ JumpIfSmi(separator, &bailout);
+ __ ldr(scratch1, FieldMemOperand(separator, HeapObject::kMapOffset));
+ __ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
+
+ // Add (separator length times array_length) - separator length to the
+ // string_length to get the length of the result string. array_length is not
+ // smi but the other values are, so the result is a smi
+ __ ldr(scratch1, FieldMemOperand(separator, SeqAsciiString::kLengthOffset));
+ __ sub(string_length, string_length, Operand(scratch1));
+ __ smull(scratch2, ip, array_length, scratch1);
+ // Check for smi overflow. No overflow if higher 33 bits of 64-bit result are
+ // zero.
+ __ cmp(ip, Operand(0));
+ __ b(ne, &bailout);
+ __ tst(scratch2, Operand(0x80000000));
+ __ b(ne, &bailout);
+ __ add(string_length, string_length, Operand(scratch2));
+ __ b(vs, &bailout);
+ __ SmiUntag(string_length);
+
+ // Get first element in the array to free up the elements register to be used
+ // for the result.
+ __ add(element,
+ elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ result = elements; // End of live range for elements.
+ elements = no_reg;
+ // Live values in registers:
+ // element: First array element
+ // separator: Separator string
+ // string_length: Length of result string (not smi)
+ // array_length: Length of the array.
+ __ AllocateAsciiString(result,
+ string_length,
+ scratch1,
+ scratch2,
+ elements_end,
+ &bailout);
+ // Prepare for looping. Set up elements_end to end of the array. Set
+ // result_pos to the position of the result where to write the first
+ // character.
+ __ add(elements_end, element, Operand(array_length, LSL, kPointerSizeLog2));
+ result_pos = array_length; // End of live range for array_length.
+ array_length = no_reg;
+ __ add(result_pos,
+ result,
+ Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+
+ // Check the length of the separator.
+ __ ldr(scratch1, FieldMemOperand(separator, SeqAsciiString::kLengthOffset));
+ __ cmp(scratch1, Operand(Smi::FromInt(1)));
+ __ b(eq, &one_char_separator);
+ __ b(gt, &long_separator);
+
+ // Empty separator case
+ __ bind(&empty_separator_loop);
+ // Live values in registers:
+ // result_pos: the position to which we are currently copying characters.
+ // element: Current array element.
+ // elements_end: Array end.
+
+ // Copy next array element to the result.
+ __ ldr(string, MemOperand(element, kPointerSize, PostIndex));
+ __ ldr(string_length, FieldMemOperand(string, String::kLengthOffset));
+ __ SmiUntag(string_length);
+ __ add(string, string, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ CopyBytes(string, result_pos, string_length, scratch1);
+ __ cmp(element, elements_end);
+ __ b(lt, &empty_separator_loop); // End while (element < elements_end).
+ ASSERT(result.is(r0));
+ __ b(&done);
+
+ // One-character separator case
+ __ bind(&one_char_separator);
+ // Replace separator with its ascii character value.
+ __ ldrb(separator, FieldMemOperand(separator, SeqAsciiString::kHeaderSize));
+ // Jump into the loop after the code that copies the separator, so the first
+ // element is not preceded by a separator
+ __ jmp(&one_char_separator_loop_entry);
+
+ __ bind(&one_char_separator_loop);
+ // Live values in registers:
+ // result_pos: the position to which we are currently copying characters.
+ // element: Current array element.
+ // elements_end: Array end.
+ // separator: Single separator ascii char (in lower byte).
+
+ // Copy the separator character to the result.
+ __ strb(separator, MemOperand(result_pos, 1, PostIndex));
+
+ // Copy next array element to the result.
+ __ bind(&one_char_separator_loop_entry);
+ __ ldr(string, MemOperand(element, kPointerSize, PostIndex));
+ __ ldr(string_length, FieldMemOperand(string, String::kLengthOffset));
+ __ SmiUntag(string_length);
+ __ add(string, string, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ CopyBytes(string, result_pos, string_length, scratch1);
+ __ cmp(element, elements_end);
+ __ b(lt, &one_char_separator_loop); // End while (element < elements_end).
+ ASSERT(result.is(r0));
+ __ b(&done);
+
+ // Long separator case (separator is more than one character). Entry is at the
+ // label long_separator below.
+ __ bind(&long_separator_loop);
+ // Live values in registers:
+ // result_pos: the position to which we are currently copying characters.
+ // element: Current array element.
+ // elements_end: Array end.
+ // separator: Separator string.
+
+ // Copy the separator to the result.
+ __ ldr(string_length, FieldMemOperand(separator, String::kLengthOffset));
+ __ SmiUntag(string_length);
+ __ add(string,
+ separator,
+ Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ CopyBytes(string, result_pos, string_length, scratch1);
+
+ __ bind(&long_separator);
+ __ ldr(string, MemOperand(element, kPointerSize, PostIndex));
+ __ ldr(string_length, FieldMemOperand(string, String::kLengthOffset));
+ __ SmiUntag(string_length);
+ __ add(string, string, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ CopyBytes(string, result_pos, string_length, scratch1);
+ __ cmp(element, elements_end);
+ __ b(lt, &long_separator_loop); // End while (element < elements_end).
+ ASSERT(result.is(r0));
+ __ b(&done);
+
+ __ bind(&bailout);
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+ __ bind(&done);
context()->Plug(r0);
- return;
}
@@ -3767,7 +3805,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY: {
__ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
__ pop(r1);
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ Handle<Code> ic(Builtins::builtin(
+ is_strict() ? Builtins::StoreIC_Initialize_Strict
+ : Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
@@ -3782,7 +3822,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case KEYED_PROPERTY: {
__ pop(r1); // Key.
__ pop(r2); // Receiver.
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ Handle<Code> ic(Builtins::builtin(
+ is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
+ : Builtins::KeyedStoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
27 deps/v8/src/arm/ic-arm.cc
View
@@ -1400,7 +1400,8 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
}
-void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) {
+void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
+ StrictModeFlag strict_mode) {
// ---------- S t a t e --------------
// -- r0 : value
// -- r1 : key
@@ -1411,11 +1412,16 @@ void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) {
// Push receiver, key and value for runtime call.
__ Push(r2, r1, r0);
- __ TailCallRuntime(Runtime::kSetProperty, 3, 1);
+ __ mov(r1, Operand(Smi::FromInt(NONE))); // PropertyAttributes
+ __ mov(r0, Operand(Smi::FromInt(strict_mode))); // Strict mode.
+ __ Push(r1, r0);
+
+ __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
}
-void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
+void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
+ StrictModeFlag strict_mode) {
// ---------- S t a t e --------------
// -- r0 : value
// -- r1 : key
@@ -1470,7 +1476,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// r0: value.
// r1: key.
// r2: receiver.
- GenerateRuntimeSetProperty(masm);
+ GenerateRuntimeSetProperty(masm, strict_mode);
// Check whether the elements is a pixel array.
// r4: elements map.
@@ -1540,7 +1546,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
- Code::ExtraICState extra_ic_state) {
+ StrictModeFlag strict_mode) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : receiver
@@ -1552,7 +1558,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
Code::Flags flags = Code::ComputeFlags(Code::STORE_IC,
NOT_IN_LOOP,
MONOMORPHIC,
- extra_ic_state);
+ strict_mode);
StubCache::GenerateProbe(masm, flags, r1, r2, r3, r4, r5);
// Cache miss: Jump to runtime.
@@ -1646,7 +1652,8 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
}
-void StoreIC::GenerateGlobalProxy(MacroAssembler* masm) {
+void StoreIC::GenerateGlobalProxy(MacroAssembler* masm,
+ StrictModeFlag strict_mode) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : receiver
@@ -1656,8 +1663,12 @@ void StoreIC::GenerateGlobalProxy(MacroAssembler* masm) {
__ Push(r1, r2, r0);
+ __ mov(r1, Operand(Smi::FromInt(NONE))); // PropertyAttributes
+ __ mov(r0, Operand(Smi::FromInt(strict_mode)));
+ __ Push(r1, r0);
+
// Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 3, 1);
+ __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
}
3  deps/v8/src/arm/lithium-arm.cc
View
@@ -1154,8 +1154,7 @@ LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
HInstanceOfKnownGlobal* instr) {
LInstanceOfKnownGlobal* result =
new LInstanceOfKnownGlobal(UseFixed(instr->value(), r0), FixedTemp(r4));
- MarkAsSaveDoubles(result);
- return AssignEnvironment(AssignPointerMap(DefineFixed(result, r0)));
+ return MarkAsCall(DefineFixed(result, r0), instr);
}
85 deps/v8/src/arm/lithium-codegen-arm.cc
View
@@ -573,7 +573,8 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
Handle<DeoptimizationInputData> data =
Factory::NewDeoptimizationInputData(length, TENURED);
- data->SetTranslationByteArray(*translations_.CreateByteArray());
+ Handle<ByteArray> translations = translations_.CreateByteArray();
+ data->SetTranslationByteArray(*translations);
data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
Handle<FixedArray> literals =
@@ -1985,11 +1986,7 @@ void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
__ BlockConstPoolFor(kAdditionalDelta);
__ mov(temp, Operand(delta * kPointerSize));
__ StoreToSafepointRegisterSlot(temp, temp);
- __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
- ASSERT_EQ(kAdditionalDelta,
- masm_->InstructionsGeneratedSince(&before_push_delta));
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
// Put the result value into the result register slot and
// restore all registers.
__ StoreToSafepointRegisterSlot(result, result);
@@ -2586,41 +2583,6 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
}
-// Truncates a double using a specific rounding mode.
-// Clears the z flag (ne condition) if an overflow occurs.
-void LCodeGen::EmitVFPTruncate(VFPRoundingMode rounding_mode,
- SwVfpRegister result,
- DwVfpRegister double_input,
- Register scratch1,
- Register scratch2) {
- Register prev_fpscr = scratch1;
- Register scratch = scratch2;
-
- // Set custom FPCSR:
- // - Set rounding mode.
- // - Clear vfp cumulative exception flags.
- // - Make sure Flush-to-zero mode control bit is unset.
- __ vmrs(prev_fpscr);
- __ bic(scratch, prev_fpscr, Operand(kVFPExceptionMask |
- kVFPRoundingModeMask |
- kVFPFlushToZeroMask));
- __ orr(scratch, scratch, Operand(rounding_mode));
- __ vmsr(scratch);
-
- // Convert the argument to an integer.
- __ vcvt_s32_f64(result,
- double_input,
- kFPSCRRounding);
-
- // Retrieve FPSCR.
- __ vmrs(scratch);
- // Restore FPSCR.
- __ vmsr(prev_fpscr);
- // Check for vfp exceptions.
- __ tst(scratch, Operand(kVFPExceptionMask));
-}
-
-
void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
@@ -2628,11 +2590,11 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
Register scratch1 = scratch0();
Register scratch2 = ToRegister(instr->TempAt(0));
- EmitVFPTruncate(kRoundToMinusInf,
- single_scratch,
- input,
- scratch1,
- scratch2);
+ __ EmitVFPTruncate(kRoundToMinusInf,
+ single_scratch,
+ input,
+ scratch1,
+ scratch2);
DeoptimizeIf(ne, instr->environment());
// Move the result back to general purpose register r0.
@@ -2654,11 +2616,11 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
Register result = ToRegister(instr->result());
Register scratch1 = scratch0();
Register scratch2 = result;
- EmitVFPTruncate(kRoundToNearest,
- double_scratch0().low(),
- input,
- scratch1,
- scratch2);
+ __ EmitVFPTruncate(kRoundToNearest,
+ double_scratch0().low(),
+ input,
+ scratch1,
+ scratch2);
DeoptimizeIf(ne, instr->environment());
__ vmov(result, double_scratch0().low());
@@ -2863,9 +2825,9 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
// Name is always in r2.
__ mov(r2, Operand(instr->name()));
- Handle<Code> ic(Builtins::builtin(info_->is_strict()
- ? Builtins::StoreIC_Initialize_Strict
- : Builtins::StoreIC_Initialize));
+ Handle<Code> ic(Builtins::builtin(
+ info_->is_strict() ? Builtins::StoreIC_Initialize_Strict
+ : Builtins::StoreIC_Initialize));
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -2907,7 +2869,9 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
ASSERT(ToRegister(instr->key()).is(r1));
ASSERT(ToRegister(instr->value()).is(r0));
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ Handle<Code> ic(Builtins::builtin(
+ info_->is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
+ : Builtins::KeyedStoreIC_Initialize));
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3371,11 +3335,12 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
Register scratch1 = scratch0();
Register scratch2 = ToRegister(instr->TempAt(0));
- EmitVFPTruncate(kRoundToZero,
- single_scratch,
- double_input,
- scratch1,
- scratch2);
+ __ EmitVFPTruncate(kRoundToZero,
+ single_scratch,
+ double_input,
+ scratch1,
+ scratch2);
+
// Deoptimize if we had a vfp invalid exception.
DeoptimizeIf(ne, instr->environment());
5 deps/v8/src/arm/lithium-codegen-arm.h
View
@@ -206,11 +206,6 @@ class LCodeGen BASE_EMBEDDED {
// Specific math operations - used from DoUnaryMathOperation.
void EmitIntegerMathAbs(LUnaryMathOperation* instr);
void DoMathAbs(LUnaryMathOperation* instr);
- void EmitVFPTruncate(VFPRoundingMode rounding_mode,
- SwVfpRegister result,
- DwVfpRegister double_input,
- Register scratch1,
- Register scratch2);
void DoMathFloor(LUnaryMathOperation* instr);
void DoMathRound(LUnaryMathOperation* instr);
void DoMathSqrt(LUnaryMathOperation* instr);
129 deps/v8/src/arm/macro-assembler-arm.cc
View
@@ -271,6 +271,29 @@ void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
}
+void MacroAssembler::Bfi(Register dst,
+ Register src,
+ Register scratch,
+ int lsb,
+ int width,
+ Condition cond) {
+ ASSERT(0 <= lsb && lsb < 32);
+ ASSERT(0 <= width && width < 32);
+ ASSERT(lsb + width < 32);
+ ASSERT(!scratch.is(dst));
+ if (width == 0) return;
+ if (!CpuFeatures::IsSupported(ARMv7)) {
+ int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
+ bic(dst, dst, Operand(mask));
+ and_(scratch, src, Operand((1 << width) - 1));
+ mov(scratch, Operand(scratch, LSL, lsb));
+ orr(dst, dst, scratch);
+ } else {
+ bfi(dst, src, lsb, width, cond);
+ }
+}
+
+
void MacroAssembler::Bfc(Register dst, int lsb, int width, Condition cond) {
ASSERT(lsb < 32);
if (!CpuFeatures::IsSupported(ARMv7)) {
@@ -1818,9 +1841,9 @@ void MacroAssembler::ConvertToInt32(Register source,
ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
// Get exponent alone in scratch2.
Ubfx(scratch2,
- scratch,
- HeapNumber::kExponentShift,
- HeapNumber::kExponentBits);
+ scratch,
+ HeapNumber::kExponentShift,
+ HeapNumber::kExponentBits);
// Load dest with zero. We use this either for the final shift or
// for the answer.
mov(dest, Operand(0, RelocInfo::NONE));
@@ -1883,6 +1906,52 @@ void MacroAssembler::ConvertToInt32(Register source,
}
+void MacroAssembler::EmitVFPTruncate(VFPRoundingMode rounding_mode,
+ SwVfpRegister result,
+ DwVfpRegister double_input,
+ Register scratch1,
+ Register scratch2,
+ CheckForInexactConversion check_inexact) {
+ ASSERT(CpuFeatures::IsSupported(VFP3));