Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse files

Upgrade V8 to 2.1.1

  • Loading branch information...
commit bcf163da27676e26108ec430a392baee84f2831c 1 parent 7647835
ry ry authored
Showing with 17,992 additions and 3,407 deletions.
  1. +2 −0  deps/v8/AUTHORS
  2. +14 −0 deps/v8/ChangeLog
  3. +33 −2 deps/v8/SConstruct
  4. +85 −37 deps/v8/include/v8.h
  5. +1 −1  deps/v8/samples/lineprocessor.cc
  6. +20 −0 deps/v8/src/SConscript
  7. +2 −35 deps/v8/src/accessors.cc
  8. +138 −26 deps/v8/src/api.cc
  9. +109 −82 deps/v8/src/arm/assembler-arm.cc
  10. +6 −3 deps/v8/src/arm/assembler-arm.h
  11. +13 −17 deps/v8/src/arm/assembler-thumb2-inl.h
  12. +142 −85 deps/v8/src/arm/assembler-thumb2.cc
  13. +17 −8 deps/v8/src/arm/assembler-thumb2.h
  14. +73 −70 deps/v8/src/arm/builtins-arm.cc
  15. +794 −108 deps/v8/src/arm/codegen-arm.cc
  16. +82 −18 deps/v8/src/arm/codegen-arm.h
  17. +5 −5 deps/v8/src/arm/debug-arm.cc
  18. +23 −2 deps/v8/src/arm/disasm-arm.cc
  19. +138 −50 deps/v8/src/arm/fast-codegen-arm.cc
  20. +133 −49 deps/v8/src/arm/full-codegen-arm.cc
  21. +136 −78 deps/v8/src/arm/ic-arm.cc
  22. +212 −52 deps/v8/src/arm/macro-assembler-arm.cc
  23. +88 −39 deps/v8/src/arm/macro-assembler-arm.h
  24. +23 −6 deps/v8/src/arm/simulator-arm.cc
  25. +197 −270 deps/v8/src/arm/stub-cache-arm.cc
  26. +10 −2 deps/v8/src/arm/virtual-frame-arm.cc
  27. +7 −2 deps/v8/src/arm/virtual-frame-arm.h
  28. +3 −2 deps/v8/src/array.js
  29. +10 −0 deps/v8/src/assembler.cc
  30. +15 −1 deps/v8/src/assembler.h
  31. +38 −1 deps/v8/src/ast.h
  32. +43 −134 deps/v8/src/bootstrapper.cc
  33. +0 −3  deps/v8/src/bootstrapper.h
  34. +449 −50 deps/v8/src/builtins.cc
  35. +7 −3 deps/v8/src/builtins.h
  36. +3 −1 deps/v8/src/checks.h
  37. +12 −4 deps/v8/src/code-stubs.cc
  38. +3 −2 deps/v8/src/code-stubs.h
  39. +5 −36 deps/v8/src/codegen-inl.h
  40. +46 −29 deps/v8/src/codegen.cc
  41. +26 −15 deps/v8/src/codegen.h
  42. +76 −69 deps/v8/src/compiler.cc
  43. +116 −13 deps/v8/src/compiler.h
  44. +0 −1  deps/v8/src/contexts.h
  45. +2 −2 deps/v8/src/d8-readline.cc
  46. +306 −12 deps/v8/src/data-flow.cc
  47. +56 −6 deps/v8/src/data-flow.h
  48. +6 −2 deps/v8/src/debug-delay.js
  49. +4 −10 deps/v8/src/debug.cc
  50. +1 −1  deps/v8/src/disassembler.cc
  51. +1 −1  deps/v8/src/execution.cc
  52. +190 −35 deps/v8/src/fast-codegen.cc
  53. +59 −35 deps/v8/src/fast-codegen.h
  54. +3 −1 deps/v8/src/flag-definitions.h
  55. +0 −4 deps/v8/src/frame-element.cc
  56. +34 −9 deps/v8/src/frame-element.h
  57. +2 −0  deps/v8/src/frames-inl.h
  58. +1 −6 deps/v8/src/frames.cc
  59. +14 −91 deps/v8/src/full-codegen.cc
  60. +11 −12 deps/v8/src/full-codegen.h
  61. +5 −0 deps/v8/src/globals.h
  62. +26 −21 deps/v8/src/handles.cc
  63. +2 −0  deps/v8/src/handles.h
  64. +7 −12 deps/v8/src/heap.cc
  65. +0 −2  deps/v8/src/heap.h
  66. +29 −30 deps/v8/src/ia32/assembler-ia32.cc
  67. +3 −2 deps/v8/src/ia32/assembler-ia32.h
  68. +65 −70 deps/v8/src/ia32/builtins-ia32.cc
  69. +1,139 −505 deps/v8/src/ia32/codegen-ia32.cc
  70. +118 −43 deps/v8/src/ia32/codegen-ia32.h
  71. +2 −1  deps/v8/src/ia32/debug-ia32.cc
  72. +0 −1  deps/v8/src/ia32/disasm-ia32.cc
  73. +146 −49 deps/v8/src/ia32/fast-codegen-ia32.cc
  74. +149 −45 deps/v8/src/ia32/full-codegen-ia32.cc
  75. +257 −205 deps/v8/src/ia32/ic-ia32.cc
  76. +93 −56 deps/v8/src/ia32/macro-assembler-ia32.cc
  77. +34 −23 deps/v8/src/ia32/macro-assembler-ia32.h
  78. +447 −187 deps/v8/src/ia32/stub-cache-ia32.cc
  79. +84 −23 deps/v8/src/ia32/virtual-frame-ia32.cc
  80. +16 −7 deps/v8/src/ia32/virtual-frame-ia32.h
  81. +46 −48 deps/v8/src/ic.cc
  82. +19 −13 deps/v8/src/ic.h
  83. +25 −11 deps/v8/src/json-delay.js
  84. +3 −0  deps/v8/src/jump-target-inl.h
  85. +47 −3 deps/v8/src/jump-target.cc
  86. +87 −0 deps/v8/src/liveedit.cc
  87. +78 −0 deps/v8/src/liveedit.h
  88. +0 −9 deps/v8/src/log-utils.cc
  89. +0 −3  deps/v8/src/log-utils.h
  90. +55 −43 deps/v8/src/log.cc
  91. +8 −9 deps/v8/src/log.h
  92. +9 −0 deps/v8/src/macro-assembler.h
  93. +1 −1  deps/v8/src/math.js
  94. +2 −0  deps/v8/src/messages.js
  95. +215 −0 deps/v8/src/mips/assembler-mips-inl.h
  96. +1,208 −0 deps/v8/src/mips/assembler-mips.cc
  97. +663 −0 deps/v8/src/mips/assembler-mips.h
  98. +109 −0 deps/v8/src/mips/builtins-mips.cc
  99. +56 −0 deps/v8/src/mips/codegen-mips-inl.h
  100. +501 −0 deps/v8/src/mips/codegen-mips.cc
  101. +311 −0 deps/v8/src/mips/codegen-mips.h
  102. +323 −0 deps/v8/src/mips/constants-mips.cc
  103. +525 −0 deps/v8/src/mips/constants-mips.h
  104. +69 −0 deps/v8/src/mips/cpu-mips.cc
  105. +112 −0 deps/v8/src/mips/debug-mips.cc
  106. +784 −0 deps/v8/src/mips/disasm-mips.cc
  107. +56 −0 deps/v8/src/mips/fast-codegen-mips.cc
  108. +100 −0 deps/v8/src/mips/frames-mips.cc
  109. +164 −0 deps/v8/src/mips/frames-mips.h
  110. +268 −0 deps/v8/src/mips/full-codegen-mips.cc
  111. +187 −0 deps/v8/src/mips/ic-mips.cc
  112. +87 −0 deps/v8/src/mips/jump-target-mips.cc
  113. +895 −0 deps/v8/src/mips/macro-assembler-mips.cc
  114. +381 −0 deps/v8/src/mips/macro-assembler-mips.h
  115. +137 −0 deps/v8/src/mips/register-allocator-mips-inl.h
  116. +60 −0 deps/v8/src/mips/register-allocator-mips.cc
  117. +46 −0 deps/v8/src/mips/register-allocator-mips.h
  118. +1,648 −0 deps/v8/src/mips/simulator-mips.cc
  119. +311 −0 deps/v8/src/mips/simulator-mips.h
  120. +384 −0 deps/v8/src/mips/stub-cache-mips.cc
  121. +240 −0 deps/v8/src/mips/virtual-frame-mips.cc
  122. +548 −0 deps/v8/src/mips/virtual-frame-mips.h
  123. +25 −1 deps/v8/src/mirror-delay.js
  124. +72 −0 deps/v8/src/number-info.h
  125. +2 −0  deps/v8/src/objects-debug.cc
  126. +7 −0 deps/v8/src/objects-inl.h
  127. +144 −83 deps/v8/src/objects.cc
  128. +10 −1 deps/v8/src/objects.h
  129. +2 −1  deps/v8/src/parser.cc
  130. +15 −2 deps/v8/src/platform-linux.cc
  131. +1 −1  deps/v8/src/property.cc
  132. +12 −15 deps/v8/src/property.h
  133. +35 −0 deps/v8/src/register-allocator-inl.h
  134. +21 −2 deps/v8/src/register-allocator.cc
  135. +21 −16 deps/v8/src/register-allocator.h
  136. +88 −96 deps/v8/src/runtime.cc
  137. +4 −5 deps/v8/src/runtime.h
  138. +38 −25 deps/v8/src/runtime.js
  139. +2 −0  deps/v8/src/simulator.h
  140. +11 −7 deps/v8/src/string.js
  141. +17 −3 deps/v8/src/stub-cache.cc
  142. +23 −9 deps/v8/src/stub-cache.h
  143. +6 −1 deps/v8/src/top.cc
  144. +1 −0  deps/v8/src/top.h
  145. +0 −37 deps/v8/src/utils.cc
  146. +0 −42 deps/v8/src/utils.h
Sorry, we could not display the entire diff because it was too big.
2  deps/v8/AUTHORS
View
@@ -4,6 +4,7 @@
# Name/Organization <email address>
Google Inc.
+Sigma Designs Inc.
Alexander Botero-Lowry <alexbl@FreeBSD.org>
Alexandre Vassalotti <avassalotti@gmail.com>
@@ -22,3 +23,4 @@ Rene Rebe <rene@exactcode.de>
Ryan Dahl <coldredlemur@gmail.com>
Patrick Gansterer <paroga@paroga.com>
Subrato K De <subratokde@codeaurora.org>
+Dineel D Sule <dsule@codeaurora.org>
14 deps/v8/ChangeLog
View
@@ -1,3 +1,17 @@
+2010-02-19: Version 2.1.1
+
+ [ES5] Implemented Object.defineProperty.
+
+ Improved profiler support.
+
+ Added SetPrototype method in the public V8 API.
+
+ Added GetScriptOrigin and GetScriptLineNumber methods to Function
+ objects in the API.
+
+ Performance improvements on all platforms.
+
+
2010-02-03: Version 2.1.0
Values are now always wrapped in objects when used as a receiver.
35 deps/v8/SConstruct
View
@@ -191,6 +191,17 @@ LIBRARY_FLAGS = {
'armvariant:arm': {
'CPPDEFINES': ['V8_ARM_VARIANT_ARM']
},
+ 'arch:mips': {
+ 'CPPDEFINES': ['V8_TARGET_ARCH_MIPS'],
+ 'simulator:none': {
+ 'CCFLAGS': ['-EL', '-mips32r2', '-Wa,-mips32r2', '-fno-inline'],
+ 'LDFLAGS': ['-EL']
+ }
+ },
+ 'simulator:mips': {
+ 'CCFLAGS': ['-m32'],
+ 'LINKFLAGS': ['-m32']
+ },
'arch:x64': {
'CPPDEFINES': ['V8_TARGET_ARCH_X64'],
'CCFLAGS': ['-m64'],
@@ -292,6 +303,9 @@ V8_EXTRA_FLAGS = {
# used by the arm simulator.
'WARNINGFLAGS': ['/wd4996']
},
+ 'arch:mips': {
+ 'CPPDEFINES': ['V8_TARGET_ARCH_MIPS'],
+ },
'disassembler:on': {
'CPPDEFINES': ['ENABLE_DISASSEMBLER']
}
@@ -457,10 +471,22 @@ SAMPLE_FLAGS = {
'CCFLAGS': ['-m64'],
'LINKFLAGS': ['-m64']
},
+ 'arch:mips': {
+ 'CPPDEFINES': ['V8_TARGET_ARCH_MIPS'],
+ 'simulator:none': {
+ 'CCFLAGS': ['-EL', '-mips32r2', '-Wa,-mips32r2', '-fno-inline'],
+ 'LINKFLAGS': ['-EL'],
+ 'LDFLAGS': ['-EL']
+ }
+ },
'simulator:arm': {
'CCFLAGS': ['-m32'],
'LINKFLAGS': ['-m32']
},
+ 'simulator:mips': {
+ 'CCFLAGS': ['-m32'],
+ 'LINKFLAGS': ['-m32']
+ },
'mode:release': {
'CCFLAGS': ['-O2']
},
@@ -601,7 +627,7 @@ SIMPLE_OPTIONS = {
'help': 'the os to build for (' + OS_GUESS + ')'
},
'arch': {
- 'values':['arm', 'ia32', 'x64'],
+ 'values':['arm', 'ia32', 'x64', 'mips'],
'default': ARCH_GUESS,
'help': 'the architecture to build for (' + ARCH_GUESS + ')'
},
@@ -651,7 +677,7 @@ SIMPLE_OPTIONS = {
'help': 'use Microsoft Visual C++ link-time code generation'
},
'simulator': {
- 'values': ['arm', 'none'],
+ 'values': ['arm', 'mips', 'none'],
'default': 'none',
'help': 'build with simulator'
},
@@ -871,6 +897,11 @@ def PostprocessOptions(options):
options['armvariant'] = 'arm'
if (options['armvariant'] != 'none' and options['arch'] != 'arm'):
options['armvariant'] = 'none'
+ if options['arch'] == 'mips':
+ if ('regexp' in ARGUMENTS) and options['regexp'] == 'native':
+ # Print a warning if native regexp is specified for mips
+ print "Warning: forcing regexp to interpreted for mips"
+ options['regexp'] = 'interpreted'
def ParseEnvOverrides(arg, imports):
122 deps/v8/include/v8.h
View
@@ -534,51 +534,76 @@ class V8EXPORT ScriptOrigin {
class V8EXPORT Script {
public:
- /**
- * Compiles the specified script. The ScriptOrigin* and ScriptData*
- * parameters are owned by the caller of Script::Compile. No
- * references to these objects are kept after compilation finishes.
- *
- * The script object returned is context independent; when run it
- * will use the currently entered context.
- */
- static Local<Script> New(Handle<String> source,
- ScriptOrigin* origin = NULL,
- ScriptData* pre_data = NULL);
+ /**
+ * Compiles the specified script (context-independent).
+ *
+ * \param source Script source code.
+ * \param origin Script origin, owned by caller, no references are kept
+ * when New() returns
+ * \param pre_data Pre-parsing data, as obtained by ScriptData::PreCompile()
+ * using pre_data speeds compilation if it's done multiple times.
+ * Owned by caller, no references are kept when New() returns.
+ * \param script_data Arbitrary data associated with script. Using
+ * this has same effect as calling SetData(), but allows data to be
+ * available to compile event handlers.
+ * \return Compiled script object (context independent; when run it
+ * will use the currently entered context).
+ */
+ static Local<Script> New(Handle<String> source,
+ ScriptOrigin* origin = NULL,
+ ScriptData* pre_data = NULL,
+ Handle<String> script_data = Handle<String>());
- /**
- * Compiles the specified script using the specified file name
- * object (typically a string) as the script's origin.
- *
- * The script object returned is context independent; when run it
- * will use the currently entered context.
- */
- static Local<Script> New(Handle<String> source,
- Handle<Value> file_name);
-
- /**
- * Compiles the specified script. The ScriptOrigin* and ScriptData*
- * parameters are owned by the caller of Script::Compile. No
- * references to these objects are kept after compilation finishes.
+ /**
+ * Compiles the specified script using the specified file name
+ * object (typically a string) as the script's origin.
+ *
+ * \param source Script source code.
+ * \patam file_name file name object (typically a string) to be used
+ * as the script's origin.
+ * \return Compiled script object (context independent; when run it
+ * will use the currently entered context).
+ */
+ static Local<Script> New(Handle<String> source,
+ Handle<Value> file_name);
+
+ /**
+ * Compiles the specified script (bound to current context).
*
- * The script object returned is bound to the context that was active
- * when this function was called. When run it will always use this
- * context.
+ * \param source Script source code.
+ * \param origin Script origin, owned by caller, no references are kept
+ * when Compile() returns
+ * \param pre_data Pre-parsing data, as obtained by ScriptData::PreCompile()
+ * using pre_data speeds compilation if it's done multiple times.
+ * Owned by caller, no references are kept when Compile() returns.
+ * \param script_data Arbitrary data associated with script. Using
+ * this has same effect as calling SetData(), but makes data available
+ * earlier (i.e. to compile event handlers).
+ * \return Compiled script object, bound to the context that was active
+ * when this function was called. When run it will always use this
+ * context.
*/
static Local<Script> Compile(Handle<String> source,
ScriptOrigin* origin = NULL,
- ScriptData* pre_data = NULL);
+ ScriptData* pre_data = NULL,
+ Handle<String> script_data = Handle<String>());
/**
* Compiles the specified script using the specified file name
* object (typically a string) as the script's origin.
*
- * The script object returned is bound to the context that was active
- * when this function was called. When run it will always use this
- * context.
+ * \param source Script source code.
+ * \param file_name File name to use as script's origin
+ * \param script_data Arbitrary data associated with script. Using
+ * this has same effect as calling SetData(), but makes data available
+ * earlier (i.e. to compile event handlers).
+ * \return Compiled script object, bound to the context that was active
+ * when this function was called. When run it will always use this
+ * context.
*/
static Local<Script> Compile(Handle<String> source,
- Handle<Value> file_name);
+ Handle<Value> file_name,
+ Handle<String> script_data = Handle<String>());
/**
* Runs the script returning the resulting value. If the script is
@@ -1197,6 +1222,13 @@ class V8EXPORT Object : public Value {
Local<Value> GetPrototype();
/**
+ * Set the prototype object. This does not skip objects marked to
+ * be skipped by __proto__ and it does not consult the security
+ * handler.
+ */
+ bool SetPrototype(Handle<Value> prototype);
+
+ /**
* Finds an instance of the given function template in the prototype
* chain.
*/
@@ -1354,7 +1386,15 @@ class V8EXPORT Function : public Object {
Local<Value> Call(Handle<Object> recv, int argc, Handle<Value> argv[]);
void SetName(Handle<String> name);
Handle<Value> GetName() const;
+
+ /**
+ * Returns zero based line number of function body and
+ * kLineOffsetNotFound if no information available.
+ */
+ int GetScriptLineNumber() const;
+ ScriptOrigin GetScriptOrigin() const;
static inline Function* Cast(Value* obj);
+ static const int kLineOffsetNotFound;
private:
Function();
static void CheckCast(Value* obj);
@@ -2309,22 +2349,30 @@ class V8EXPORT V8 {
static bool IsProfilerPaused();
/**
- * Resumes specified profiler modules.
+ * Resumes specified profiler modules. Can be called several times to
+ * mark the opening of a profiler events block with the given tag.
+ *
* "ResumeProfiler" is equivalent to "ResumeProfilerEx(PROFILER_MODULE_CPU)".
* See ProfilerModules enum.
*
* \param flags Flags specifying profiler modules.
+ * \param tag Profile tag.
*/
- static void ResumeProfilerEx(int flags);
+ static void ResumeProfilerEx(int flags, int tag = 0);
/**
- * Pauses specified profiler modules.
+ * Pauses specified profiler modules. Each call to "PauseProfilerEx" closes
+ * a block of profiler events opened by a call to "ResumeProfilerEx" with the
+ * same tag value. There is no need for blocks to be properly nested.
+ * The profiler is paused when the last opened block is closed.
+ *
* "PauseProfiler" is equivalent to "PauseProfilerEx(PROFILER_MODULE_CPU)".
* See ProfilerModules enum.
*
* \param flags Flags specifying profiler modules.
+ * \param tag Profile tag.
*/
- static void PauseProfilerEx(int flags);
+ static void PauseProfilerEx(int flags, int tag = 0);
/**
* Returns active (resumed) profiler modules.
2  deps/v8/samples/lineprocessor.cc
View
@@ -152,7 +152,7 @@ int RunMain(int argc, char* argv[]) {
} else if (strcmp(str, "--main-cycle-in-js") == 0) {
cycle_type = CycleInJs;
} else if (strcmp(str, "-p") == 0 && i + 1 < argc) {
- port_number = atoi(argv[i + 1]);
+ port_number = atoi(argv[i + 1]); // NOLINT
i++;
} else if (strncmp(str, "--", 2) == 0) {
printf("Warning: unknown flag %s.\nTry --help for options\n", str);
20 deps/v8/src/SConscript
View
@@ -72,6 +72,7 @@ SOURCES = {
interpreter-irregexp.cc
jsregexp.cc
jump-target.cc
+ liveedit.cc
log-utils.cc
log.cc
mark-compact.cc
@@ -131,6 +132,24 @@ SOURCES = {
'armvariant:thumb2': Split("""
arm/assembler-thumb2.cc
"""),
+ 'arch:mips': Split("""
+ mips/assembler-mips.cc
+ mips/builtins-mips.cc
+ mips/codegen-mips.cc
+ mips/constants-mips.cc
+ mips/cpu-mips.cc
+ mips/debug-mips.cc
+ mips/disasm-mips.cc
+ mips/fast-codegen-mips.cc
+ mips/full-codegen-mips.cc
+ mips/frames-mips.cc
+ mips/ic-mips.cc
+ mips/jump-target-mips.cc
+ mips/macro-assembler-mips.cc
+ mips/register-allocator-mips.cc
+ mips/stub-cache-mips.cc
+ mips/virtual-frame-mips.cc
+ """),
'arch:ia32': Split("""
ia32/assembler-ia32.cc
ia32/builtins-ia32.cc
@@ -168,6 +187,7 @@ SOURCES = {
x64/virtual-frame-x64.cc
"""),
'simulator:arm': ['arm/simulator-arm.cc'],
+ 'simulator:mips': ['mips/simulator-mips.cc'],
'os:freebsd': ['platform-freebsd.cc', 'platform-posix.cc'],
'os:openbsd': ['platform-openbsd.cc', 'platform-posix.cc'],
'os:linux': ['platform-linux.cc', 'platform-posix.cc'],
37 deps/v8/src/accessors.cc
View
@@ -647,42 +647,9 @@ Object* Accessors::ObjectGetPrototype(Object* receiver, void*) {
Object* Accessors::ObjectSetPrototype(JSObject* receiver,
Object* value,
void*) {
- // Before we can set the prototype we need to be sure
- // prototype cycles are prevented.
- // It is sufficient to validate that the receiver is not in the new prototype
- // chain.
-
- // Silently ignore the change if value is not a JSObject or null.
- // SpiderMonkey behaves this way.
- if (!value->IsJSObject() && !value->IsNull()) return value;
-
- for (Object* pt = value; pt != Heap::null_value(); pt = pt->GetPrototype()) {
- if (JSObject::cast(pt) == receiver) {
- // Cycle detected.
- HandleScope scope;
- return Top::Throw(*Factory::NewError("cyclic_proto",
- HandleVector<Object>(NULL, 0)));
- }
- }
-
- // Find the first object in the chain whose prototype object is not
- // hidden and set the new prototype on that object.
- JSObject* current = receiver;
- Object* current_proto = receiver->GetPrototype();
- while (current_proto->IsJSObject() &&
- JSObject::cast(current_proto)->map()->is_hidden_prototype()) {
- current = JSObject::cast(current_proto);
- current_proto = current_proto->GetPrototype();
- }
-
- // Set the new prototype of the object.
- Object* new_map = current->map()->CopyDropTransitions();
- if (new_map->IsFailure()) return new_map;
- Map::cast(new_map)->set_prototype(value);
- current->set_map(Map::cast(new_map));
-
+ const bool skip_hidden_prototypes = true;
// To be consistent with other Set functions, return the value.
- return value;
+ return receiver->SetPrototype(value, skip_hidden_prototypes);
}
164 deps/v8/src/api.cc
View
@@ -1106,7 +1106,8 @@ ScriptData* ScriptData::New(unsigned* data, int length) {
Local<Script> Script::New(v8::Handle<String> source,
v8::ScriptOrigin* origin,
- v8::ScriptData* script_data) {
+ v8::ScriptData* pre_data,
+ v8::Handle<String> script_data) {
ON_BAILOUT("v8::Script::New()", return Local<Script>());
LOG_API("Script::New");
ENTER_V8;
@@ -1126,20 +1127,17 @@ Local<Script> Script::New(v8::Handle<String> source,
}
}
EXCEPTION_PREAMBLE();
- i::ScriptDataImpl* pre_data = static_cast<i::ScriptDataImpl*>(script_data);
+ i::ScriptDataImpl* pre_data_impl = static_cast<i::ScriptDataImpl*>(pre_data);
// We assert that the pre-data is sane, even though we can actually
// handle it if it turns out not to be in release mode.
- ASSERT(pre_data == NULL || pre_data->SanityCheck());
+ ASSERT(pre_data_impl == NULL || pre_data_impl->SanityCheck());
// If the pre-data isn't sane we simply ignore it
- if (pre_data != NULL && !pre_data->SanityCheck()) {
- pre_data = NULL;
- }
- i::Handle<i::JSFunction> boilerplate = i::Compiler::Compile(str,
- name_obj,
- line_offset,
- column_offset,
- NULL,
- pre_data);
+ if (pre_data_impl != NULL && !pre_data_impl->SanityCheck()) {
+ pre_data_impl = NULL;
+ }
+ i::Handle<i::JSFunction> boilerplate =
+ i::Compiler::Compile(str, name_obj, line_offset, column_offset, NULL,
+ pre_data_impl, Utils::OpenHandle(*script_data));
has_pending_exception = boilerplate.is_null();
EXCEPTION_BAILOUT_CHECK(Local<Script>());
return Local<Script>(ToApi<Script>(boilerplate));
@@ -1155,11 +1153,12 @@ Local<Script> Script::New(v8::Handle<String> source,
Local<Script> Script::Compile(v8::Handle<String> source,
v8::ScriptOrigin* origin,
- v8::ScriptData* script_data) {
+ v8::ScriptData* pre_data,
+ v8::Handle<String> script_data) {
ON_BAILOUT("v8::Script::Compile()", return Local<Script>());
LOG_API("Script::Compile");
ENTER_V8;
- Local<Script> generic = New(source, origin, script_data);
+ Local<Script> generic = New(source, origin, pre_data, script_data);
if (generic.IsEmpty())
return generic;
i::Handle<i::JSFunction> boilerplate = Utils::OpenHandle(*generic);
@@ -1171,9 +1170,10 @@ Local<Script> Script::Compile(v8::Handle<String> source,
Local<Script> Script::Compile(v8::Handle<String> source,
- v8::Handle<Value> file_name) {
+ v8::Handle<Value> file_name,
+ v8::Handle<String> script_data) {
ScriptOrigin origin(file_name);
- return Compile(source, &origin);
+ return Compile(source, &origin, 0, script_data);
}
@@ -2032,6 +2032,19 @@ Local<Value> v8::Object::GetPrototype() {
}
+bool v8::Object::SetPrototype(Handle<Value> value) {
+ ON_BAILOUT("v8::Object::SetPrototype()", return false);
+ ENTER_V8;
+ i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
+ EXCEPTION_PREAMBLE();
+ i::Handle<i::Object> result = i::SetPrototype(self, value_obj);
+ has_pending_exception = result.is_null();
+ EXCEPTION_BAILOUT_CHECK(false);
+ return true;
+}
+
+
Local<Object> v8::Object::FindInstanceInPrototypeChain(
v8::Handle<FunctionTemplate> tmpl) {
ON_BAILOUT("v8::Object::FindInstanceInPrototypeChain()",
@@ -2194,7 +2207,7 @@ Local<Value> v8::Object::GetRealNamedPropertyInPrototypeChain(
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
i::LookupResult lookup;
self_obj->LookupRealNamedPropertyInPrototypes(*key_obj, &lookup);
- if (lookup.IsValid()) {
+ if (lookup.IsProperty()) {
PropertyAttributes attributes;
i::Handle<i::Object> result(self_obj->GetProperty(*self_obj,
&lookup,
@@ -2213,7 +2226,7 @@ Local<Value> v8::Object::GetRealNamedProperty(Handle<String> key) {
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
i::LookupResult lookup;
self_obj->LookupRealNamedProperty(*key_obj, &lookup);
- if (lookup.IsValid()) {
+ if (lookup.IsProperty()) {
PropertyAttributes attributes;
i::Handle<i::Object> result(self_obj->GetProperty(*self_obj,
&lookup,
@@ -2445,6 +2458,99 @@ Handle<Value> Function::GetName() const {
}
+ScriptOrigin Function::GetScriptOrigin() const {
+ i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
+ if (func->shared()->script()->IsScript()) {
+ i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
+ v8::ScriptOrigin origin(
+ Utils::ToLocal(i::Handle<i::Object>(script->name())),
+ v8::Integer::New(script->line_offset()->value()),
+ v8::Integer::New(script->column_offset()->value()));
+ return origin;
+ }
+ return v8::ScriptOrigin(Handle<Value>());
+}
+
+
+const int Function::kLineOffsetNotFound = -1;
+
+
+int Function::GetScriptLineNumber() const {
+ i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
+ if (func->shared()->script()->IsScript()) {
+ i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
+ return i::GetScriptLineNumber(script, func->shared()->start_position());
+ }
+ return kLineOffsetNotFound;
+}
+
+
+namespace {
+
+// Tracks string usage to help make better decisions when
+// externalizing strings.
+//
+// Implementation note: internally this class only tracks fresh
+// strings and keeps a single use counter for them.
+class StringTracker {
+ public:
+ // Records that the given string's characters were copied to some
+ // external buffer. If this happens often we should honor
+ // externalization requests for the string.
+ static void RecordWrite(i::Handle<i::String> string) {
+ i::Address address = reinterpret_cast<i::Address>(*string);
+ i::Address top = i::Heap::NewSpaceTop();
+ if (IsFreshString(address, top)) {
+ IncrementUseCount(top);
+ }
+ }
+
+ // Estimates freshness and use frequency of the given string based
+ // on how close it is to the new space top and the recorded usage
+ // history.
+ static inline bool IsFreshUnusedString(i::Handle<i::String> string) {
+ i::Address address = reinterpret_cast<i::Address>(*string);
+ i::Address top = i::Heap::NewSpaceTop();
+ return IsFreshString(address, top) && IsUseCountLow(top);
+ }
+
+ private:
+ static inline bool IsFreshString(i::Address string, i::Address top) {
+ return top - kFreshnessLimit <= string && string <= top;
+ }
+
+ static inline bool IsUseCountLow(i::Address top) {
+ if (last_top_ != top) return true;
+ return use_count_ < kUseLimit;
+ }
+
+ static inline void IncrementUseCount(i::Address top) {
+ if (last_top_ != top) {
+ use_count_ = 0;
+ last_top_ = top;
+ }
+ ++use_count_;
+ }
+
+ // How close to the new space top a fresh string has to be.
+ static const int kFreshnessLimit = 1024;
+
+ // The number of uses required to consider a string useful.
+ static const int kUseLimit = 32;
+
+ // Single use counter shared by all fresh strings.
+ static int use_count_;
+
+ // Last new space top when the use count above was valid.
+ static i::Address last_top_;
+};
+
+int StringTracker::use_count_ = 0;
+i::Address StringTracker::last_top_ = NULL;
+
+} // namespace
+
+
int String::Length() const {
if (IsDeadCheck("v8::String::Length()")) return 0;
return Utils::OpenHandle(this)->length();
@@ -2462,6 +2568,7 @@ int String::WriteUtf8(char* buffer, int capacity) const {
LOG_API("String::WriteUtf8");
ENTER_V8;
i::Handle<i::String> str = Utils::OpenHandle(this);
+ StringTracker::RecordWrite(str);
write_input_buffer.Reset(0, *str);
int len = str->length();
// Encode the first K - 3 bytes directly into the buffer since we
@@ -2505,6 +2612,7 @@ int String::WriteAscii(char* buffer, int start, int length) const {
ENTER_V8;
ASSERT(start >= 0 && length >= -1);
i::Handle<i::String> str = Utils::OpenHandle(this);
+ StringTracker::RecordWrite(str);
// Flatten the string for efficiency. This applies whether we are
// using StringInputBuffer or Get(i) to access the characters.
str->TryFlattenIfNotFlat();
@@ -2531,6 +2639,7 @@ int String::Write(uint16_t* buffer, int start, int length) const {
ENTER_V8;
ASSERT(start >= 0 && length >= -1);
i::Handle<i::String> str = Utils::OpenHandle(this);
+ StringTracker::RecordWrite(str);
int end = length;
if ( (length == -1) || (length > str->length() - start) )
end = str->length() - start;
@@ -3098,6 +3207,7 @@ bool v8::String::MakeExternal(v8::String::ExternalStringResource* resource) {
if (this->IsExternal()) return false; // Already an external string.
ENTER_V8;
i::Handle<i::String> obj = Utils::OpenHandle(this);
+ if (StringTracker::IsFreshUnusedString(obj)) return false;
bool result = obj->MakeExternal(resource);
if (result && !obj->IsSymbol()) {
i::ExternalStringTable::AddString(*obj);
@@ -3123,6 +3233,7 @@ bool v8::String::MakeExternal(
if (this->IsExternal()) return false; // Already an external string.
ENTER_V8;
i::Handle<i::String> obj = Utils::OpenHandle(this);
+ if (StringTracker::IsFreshUnusedString(obj)) return false;
bool result = obj->MakeExternal(resource);
if (result && !obj->IsSymbol()) {
i::ExternalStringTable::AddString(*obj);
@@ -3134,6 +3245,7 @@ bool v8::String::MakeExternal(
bool v8::String::CanMakeExternal() {
if (IsDeadCheck("v8::String::CanMakeExternal()")) return false;
i::Handle<i::String> obj = Utils::OpenHandle(this);
+ if (StringTracker::IsFreshUnusedString(obj)) return false;
int size = obj->Size(); // Byte size of the original string.
if (size < i::ExternalString::kSize)
return false;
@@ -3357,14 +3469,14 @@ void V8::SetGlobalGCEpilogueCallback(GCCallback callback) {
void V8::PauseProfiler() {
#ifdef ENABLE_LOGGING_AND_PROFILING
- i::Logger::PauseProfiler(PROFILER_MODULE_CPU);
+ PauseProfilerEx(PROFILER_MODULE_CPU);
#endif
}
void V8::ResumeProfiler() {
#ifdef ENABLE_LOGGING_AND_PROFILING
- i::Logger::ResumeProfiler(PROFILER_MODULE_CPU);
+ ResumeProfilerEx(PROFILER_MODULE_CPU);
#endif
}
@@ -3378,7 +3490,7 @@ bool V8::IsProfilerPaused() {
}
-void V8::ResumeProfilerEx(int flags) {
+void V8::ResumeProfilerEx(int flags, int tag) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (flags & PROFILER_MODULE_HEAP_SNAPSHOT) {
// Snapshot mode: resume modules, perform GC, then pause only
@@ -3388,19 +3500,19 @@ void V8::ResumeProfilerEx(int flags) {
// Reset snapshot flag and CPU module flags.
flags &= ~(PROFILER_MODULE_HEAP_SNAPSHOT | PROFILER_MODULE_CPU);
const int current_flags = i::Logger::GetActiveProfilerModules();
- i::Logger::ResumeProfiler(flags);
+ i::Logger::ResumeProfiler(flags, tag);
i::Heap::CollectAllGarbage(false);
- i::Logger::PauseProfiler(~current_flags & flags);
+ i::Logger::PauseProfiler(~current_flags & flags, tag);
} else {
- i::Logger::ResumeProfiler(flags);
+ i::Logger::ResumeProfiler(flags, tag);
}
#endif
}
-void V8::PauseProfilerEx(int flags) {
+void V8::PauseProfilerEx(int flags, int tag) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- i::Logger::PauseProfiler(flags);
+ i::Logger::PauseProfiler(flags, tag);
#endif
}
191 deps/v8/src/arm/assembler-arm.cc
View
@@ -51,9 +51,14 @@ void CpuFeatures::Probe() {
// If the compiler is allowed to use vfp then we can use vfp too in our
// code generation.
#if !defined(__arm__)
- // For the simulator=arm build, always use VFP since the arm simulator has
- // VFP support.
- supported_ |= 1u << VFP3;
+ // For the simulator=arm build, use VFP when FLAG_enable_vfp3 is enabled.
+ if (FLAG_enable_vfp3) {
+ supported_ |= 1u << VFP3;
+ }
+ // For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled
+ if (FLAG_enable_armv7) {
+ supported_ |= 1u << ARMv7;
+ }
#else
if (Serializer::enabled()) {
supported_ |= OS::CpuFeaturesImpliedByPlatform();
@@ -66,6 +71,11 @@ void CpuFeatures::Probe() {
supported_ |= 1u << VFP3;
found_by_runtime_probing_ |= 1u << VFP3;
}
+
+ if (OS::ArmCpuHasFeature(ARMv7)) {
+ supported_ |= 1u << ARMv7;
+ found_by_runtime_probing_ |= 1u << ARMv7;
+ }
#endif
}
@@ -83,9 +93,9 @@ Register r4 = { 4 };
Register r5 = { 5 };
Register r6 = { 6 };
Register r7 = { 7 };
-Register r8 = { 8 };
+Register r8 = { 8 }; // Used as context register.
Register r9 = { 9 };
-Register r10 = { 10 };
+Register r10 = { 10 }; // Used as roots register.
Register fp = { 11 };
Register ip = { 12 };
Register sp = { 13 };
@@ -264,9 +274,9 @@ MemOperand::MemOperand(Register rn, Register rm,
// -----------------------------------------------------------------------------
-// Implementation of Assembler
+// Implementation of Assembler.
-// Instruction encoding bits
+// Instruction encoding bits.
enum {
H = 1 << 5, // halfword (or byte)
S6 = 1 << 6, // signed (or unsigned)
@@ -299,14 +309,14 @@ enum {
B26 = 1 << 26,
B27 = 1 << 27,
- // Instruction bit masks
+ // Instruction bit masks.
RdMask = 15 << 12, // in str instruction
CondMask = 15 << 28,
CoprocessorMask = 15 << 8,
OpCodeMask = 15 << 21, // in data-processing instructions
Imm24Mask = (1 << 24) - 1,
Off12Mask = (1 << 12) - 1,
- // Reserved condition
+ // Reserved condition.
nv = 15 << 28
};
@@ -327,13 +337,13 @@ const Instr kMovLrPc = al | 13*B21 | pc.code() | lr.code() * B12;
// ldr pc, [pc, #XXX]
const Instr kLdrPCPattern = al | B26 | L | pc.code() * B16;
-// spare_buffer_
+// Spare buffer.
static const int kMinimalBufferSize = 4*KB;
static byte* spare_buffer_ = NULL;
Assembler::Assembler(void* buffer, int buffer_size) {
if (buffer == NULL) {
- // do our own buffer management
+ // Do our own buffer management.
if (buffer_size <= kMinimalBufferSize) {
buffer_size = kMinimalBufferSize;
@@ -351,14 +361,14 @@ Assembler::Assembler(void* buffer, int buffer_size) {
own_buffer_ = true;
} else {
- // use externally provided buffer instead
+ // Use externally provided buffer instead.
ASSERT(buffer_size > 0);
buffer_ = static_cast<byte*>(buffer);
buffer_size_ = buffer_size;
own_buffer_ = false;
}
- // setup buffer pointers
+ // Setup buffer pointers.
ASSERT(buffer_ != NULL);
pc_ = buffer_;
reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
@@ -386,11 +396,11 @@ Assembler::~Assembler() {
void Assembler::GetCode(CodeDesc* desc) {
- // emit constant pool if necessary
+ // Emit constant pool if necessary.
CheckConstPool(true, false);
ASSERT(num_prinfo_ == 0);
- // setup desc
+ // Setup code descriptor.
desc->buffer = buffer_;
desc->buffer_size = buffer_size_;
desc->instr_size = pc_offset();
@@ -539,7 +549,7 @@ void Assembler::bind_to(Label* L, int pos) {
void Assembler::link_to(Label* L, Label* appendix) {
if (appendix->is_linked()) {
if (L->is_linked()) {
- // append appendix to L's list
+ // Append appendix to L's list.
int fixup_pos;
int link = L->pos();
do {
@@ -549,7 +559,7 @@ void Assembler::link_to(Label* L, Label* appendix) {
ASSERT(link == kEndOfChain);
target_at_put(fixup_pos, appendix->pos());
} else {
- // L is empty, simply use appendix
+ // L is empty, simply use appendix.
*L = *appendix;
}
}
@@ -575,12 +585,12 @@ void Assembler::next(Label* L) {
}
-// Low-level code emission routines depending on the addressing mode
+// Low-level code emission routines depending on the addressing mode.
static bool fits_shifter(uint32_t imm32,
uint32_t* rotate_imm,
uint32_t* immed_8,
Instr* instr) {
- // imm32 must be unsigned
+ // imm32 must be unsigned.
for (int rot = 0; rot < 16; rot++) {
uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot));
if ((imm8 <= 0xff)) {
@@ -589,7 +599,7 @@ static bool fits_shifter(uint32_t imm32,
return true;
}
}
- // if the opcode is mov or mvn and if ~imm32 fits, change the opcode
+ // If the opcode is mov or mvn and if ~imm32 fits, change the opcode.
if (instr != NULL && (*instr & 0xd*B21) == 0xd*B21) {
if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
*instr ^= 0x2*B21;
@@ -626,7 +636,7 @@ void Assembler::addrmod1(Instr instr,
CheckBuffer();
ASSERT((instr & ~(CondMask | OpCodeMask | S)) == 0);
if (!x.rm_.is_valid()) {
- // immediate
+ // Immediate.
uint32_t rotate_imm;
uint32_t immed_8;
if (MustUseIp(x.rmode_) ||
@@ -634,7 +644,7 @@ void Assembler::addrmod1(Instr instr,
// The immediate operand cannot be encoded as a shifter operand, so load
// it first to register ip and change the original instruction to use ip.
// However, if the original instruction is a 'mov rd, x' (not setting the
- // condition code), then replace it with a 'ldr rd, [pc]'
+ // condition code), then replace it with a 'ldr rd, [pc]'.
RecordRelocInfo(x.rmode_, x.imm32_);
CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
Condition cond = static_cast<Condition>(instr & CondMask);
@@ -648,16 +658,16 @@ void Assembler::addrmod1(Instr instr,
}
instr |= I | rotate_imm*B8 | immed_8;
} else if (!x.rs_.is_valid()) {
- // immediate shift
+ // Immediate shift.
instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
} else {
- // register shift
+ // Register shift.
ASSERT(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc));
instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code();
}
emit(instr | rn.code()*B16 | rd.code()*B12);
if (rn.is(pc) || x.rm_.is(pc))
- // block constant pool emission for one instruction after reading pc
+ // Block constant pool emission for one instruction after reading pc.
BlockConstPoolBefore(pc_offset() + kInstrSize);
}
@@ -666,15 +676,15 @@ void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
ASSERT((instr & ~(CondMask | B | L)) == B26);
int am = x.am_;
if (!x.rm_.is_valid()) {
- // immediate offset
+ // Immediate offset.
int offset_12 = x.offset_;
if (offset_12 < 0) {
offset_12 = -offset_12;
am ^= U;
}
if (!is_uint12(offset_12)) {
- // immediate offset cannot be encoded, load it first to register ip
- // rn (and rd in a load) should never be ip, or will be trashed
+ // Immediate offset cannot be encoded, load it first to register ip
+ // rn (and rd in a load) should never be ip, or will be trashed.
ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
mov(ip, Operand(x.offset_), LeaveCC,
static_cast<Condition>(instr & CondMask));
@@ -684,9 +694,9 @@ void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
ASSERT(offset_12 >= 0); // no masking needed
instr |= offset_12;
} else {
- // register offset (shift_imm_ and shift_op_ are 0) or scaled
+ // Register offset (shift_imm_ and shift_op_ are 0) or scaled
// register offset the constructors make sure than both shift_imm_
- // and shift_op_ are initialized
+ // and shift_op_ are initialized.
ASSERT(!x.rm_.is(pc));
instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
}
@@ -700,15 +710,15 @@ void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
ASSERT(x.rn_.is_valid());
int am = x.am_;
if (!x.rm_.is_valid()) {
- // immediate offset
+ // Immediate offset.
int offset_8 = x.offset_;
if (offset_8 < 0) {
offset_8 = -offset_8;
am ^= U;
}
if (!is_uint8(offset_8)) {
- // immediate offset cannot be encoded, load it first to register ip
- // rn (and rd in a load) should never be ip, or will be trashed
+ // Immediate offset cannot be encoded, load it first to register ip
+ // rn (and rd in a load) should never be ip, or will be trashed.
ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
mov(ip, Operand(x.offset_), LeaveCC,
static_cast<Condition>(instr & CondMask));
@@ -718,15 +728,15 @@ void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
ASSERT(offset_8 >= 0); // no masking needed
instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf);
} else if (x.shift_imm_ != 0) {
- // scaled register offset not supported, load index first
- // rn (and rd in a load) should never be ip, or will be trashed
+ // Scaled register offset not supported, load index first
+ // rn (and rd in a load) should never be ip, or will be trashed.
ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
static_cast<Condition>(instr & CondMask));
addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
return;
} else {
- // register offset
+ // Register offset.
ASSERT((am & (P|W)) == P || !x.rm_.is(pc)); // no pc index with writeback
instr |= x.rm_.code();
}
@@ -744,7 +754,7 @@ void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
- // unindexed addressing is not encoded by this function
+ // Unindexed addressing is not encoded by this function.
ASSERT_EQ((B27 | B26),
(instr & ~(CondMask | CoprocessorMask | P | U | N | W | L)));
ASSERT(x.rn_.is_valid() && !x.rm_.is_valid());
@@ -759,7 +769,7 @@ void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
ASSERT(is_uint8(offset_8)); // unsigned word offset must fit in a byte
ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
- // post-indexed addressing requires W == 1; different than in addrmod2/3
+ // Post-indexed addressing requires W == 1; different than in addrmod2/3.
if ((am & P) == 0)
am |= W;
@@ -782,7 +792,7 @@ int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
}
// Block the emission of the constant pool, since the branch instruction must
- // be emitted at the pc offset recorded by the label
+ // be emitted at the pc offset recorded by the label.
BlockConstPoolBefore(pc_offset() + kInstrSize);
return target_pos - (pc_offset() + kPcLoadDelta);
}
@@ -804,7 +814,7 @@ void Assembler::label_at_put(Label* L, int at_offset) {
}
-// Branch instructions
+// Branch instructions.
void Assembler::b(int branch_offset, Condition cond) {
ASSERT((branch_offset & 3) == 0);
int imm24 = branch_offset >> 2;
@@ -812,7 +822,7 @@ void Assembler::b(int branch_offset, Condition cond) {
emit(cond | B27 | B25 | (imm24 & Imm24Mask));
if (cond == al)
- // dead code is a good location to emit the constant pool
+ // Dead code is a good location to emit the constant pool.
CheckConstPool(false, false);
}
@@ -849,7 +859,22 @@ void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t
}
-// Data-processing instructions
+// Data-processing instructions.
+
+// UBFX <Rd>,<Rn>,#<lsb>,#<width - 1>
+// Instruction details available in ARM DDI 0406A, A8-464.
+// cond(31-28) | 01111(27-23)| 1(22) | 1(21) | widthm1(20-16) |
+// Rd(15-12) | lsb(11-7) | 101(6-4) | Rn(3-0)
+void Assembler::ubfx(Register dst, Register src1, const Operand& src2,
+ const Operand& src3, Condition cond) {
+ ASSERT(!src2.rm_.is_valid() && !src3.rm_.is_valid());
+ ASSERT(static_cast<uint32_t>(src2.imm32_) <= 0x1f);
+ ASSERT(static_cast<uint32_t>(src3.imm32_) <= 0x1f);
+ emit(cond | 0x3F*B21 | src3.imm32_*B16 |
+ dst.code()*B12 | src2.imm32_*B7 | 0x5*B4 | src1.code());
+}
+
+
void Assembler::and_(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) {
addrmod1(cond | 0*B21 | s, src1, dst, src2);
@@ -886,7 +911,7 @@ void Assembler::add(Register dst, Register src1, const Operand& src2,
if (FLAG_push_pop_elimination &&
last_bound_pos_ <= (pc_offset() - pattern_size) &&
reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
- // pattern
+ // Pattern.
instr_at(pc_ - 1 * kInstrSize) == kPopInstruction &&
(instr_at(pc_ - 2 * kInstrSize) & ~RdMask) == kPushRegPattern) {
pc_ -= 2 * kInstrSize;
@@ -960,7 +985,7 @@ void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
}
-// Multiply instructions
+// Multiply instructions.
void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
SBit s, Condition cond) {
ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
@@ -1029,7 +1054,7 @@ void Assembler::umull(Register dstL,
}
-// Miscellaneous arithmetic instructions
+// Miscellaneous arithmetic instructions.
void Assembler::clz(Register dst, Register src, Condition cond) {
// v5 and above.
ASSERT(!dst.is(pc) && !src.is(pc));
@@ -1038,7 +1063,7 @@ void Assembler::clz(Register dst, Register src, Condition cond) {
}
-// Status register access instructions
+// Status register access instructions.
void Assembler::mrs(Register dst, SRegister s, Condition cond) {
ASSERT(!dst.is(pc));
emit(cond | B24 | s | 15*B16 | dst.code()*B12);
@@ -1050,12 +1075,12 @@ void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
ASSERT(fields >= B16 && fields < B20); // at least one field set
Instr instr;
if (!src.rm_.is_valid()) {
- // immediate
+ // Immediate.
uint32_t rotate_imm;
uint32_t immed_8;
if (MustUseIp(src.rmode_) ||
!fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
- // immediate operand cannot be encoded, load it first to register ip
+ // Immediate operand cannot be encoded, load it first to register ip.
RecordRelocInfo(src.rmode_, src.imm32_);
ldr(ip, MemOperand(pc, 0), cond);
msr(fields, Operand(ip), cond);
@@ -1070,7 +1095,7 @@ void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
}
-// Load/Store instructions
+// Load/Store instructions.
void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
if (dst.is(pc)) {
WriteRecordedPositions();
@@ -1085,7 +1110,7 @@ void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
if (FLAG_push_pop_elimination &&
last_bound_pos_ <= (pc_offset() - pattern_size) &&
reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
- // pattern
+ // Pattern.
instr_at(pc_ - 1 * kInstrSize) == (kPopRegPattern | dst.code() * B12) &&
instr_at(pc_ - 2 * kInstrSize) == (kPushRegPattern | dst.code() * B12)) {
pc_ -= 2 * kInstrSize;
@@ -1106,6 +1131,7 @@ void Assembler::str(Register src, const MemOperand& dst, Condition cond) {
if (FLAG_push_pop_elimination &&
last_bound_pos_ <= (pc_offset() - pattern_size) &&
reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
+ // Pattern.
instr_at(pc_ - 1 * kInstrSize) == (kPushRegPattern | src.code() * B12) &&
instr_at(pc_ - 2 * kInstrSize) == kPopInstruction) {
pc_ -= 2 * kInstrSize;
@@ -1147,17 +1173,17 @@ void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) {
}
-// Load/Store multiple instructions
+// Load/Store multiple instructions.
void Assembler::ldm(BlockAddrMode am,
Register base,
RegList dst,
Condition cond) {
- // ABI stack constraint: ldmxx base, {..sp..} base != sp is not restartable
+ // ABI stack constraint: ldmxx base, {..sp..} base != sp is not restartable.
ASSERT(base.is(sp) || (dst & sp.bit()) == 0);
addrmod4(cond | B27 | am | L, base, dst);
- // emit the constant pool after a function return implemented by ldm ..{..pc}
+ // Emit the constant pool after a function return implemented by ldm ..{..pc}.
if (cond == al && (dst & pc.bit()) != 0) {
// There is a slight chance that the ldm instruction was actually a call,
// in which case it would be wrong to return into the constant pool; we
@@ -1177,7 +1203,7 @@ void Assembler::stm(BlockAddrMode am,
}
-// Semaphore instructions
+// Semaphore instructions.
void Assembler::swp(Register dst, Register src, Register base, Condition cond) {
ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
ASSERT(!dst.is(base) && !src.is(base));
@@ -1197,7 +1223,7 @@ void Assembler::swpb(Register dst,
}
-// Exception-generating instructions and debugging support
+// Exception-generating instructions and debugging support.
void Assembler::stop(const char* msg) {
#if !defined(__arm__)
// The simulator handles these special instructions and stops execution.
@@ -1222,7 +1248,7 @@ void Assembler::swi(uint32_t imm24, Condition cond) {
}
-// Coprocessor instructions
+// Coprocessor instructions.
void Assembler::cdp(Coprocessor coproc,
int opcode_1,
CRegister crd,
@@ -1307,7 +1333,7 @@ void Assembler::ldc(Coprocessor coproc,
int option,
LFlag l,
Condition cond) {
- // unindexed addressing
+ // Unindexed addressing.
ASSERT(is_uint8(option));
emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 |
coproc*B8 | (option & 255));
@@ -1346,7 +1372,7 @@ void Assembler::stc(Coprocessor coproc,
int option,
LFlag l,
Condition cond) {
- // unindexed addressing
+ // Unindexed addressing.
ASSERT(is_uint8(option));
emit(cond | B27 | B26 | U | l | rn.code()*B16 | crd.code()*B12 |
coproc*B8 | (option & 255));
@@ -1464,7 +1490,7 @@ void Assembler::vcvt(const DwVfpRegister dst,
const Condition cond) {
// Dd = Sm (integer in Sm converted to IEEE 64-bit doubles in Dd).
// Instruction details available in ARM DDI 0406A, A8-576.
- // cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) |opc2=000(18-16) |
+ // cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) | opc2=000(18-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | op(7)=1 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(cond | 0xE*B24 | B23 | 0x3*B20 | B19 |
@@ -1571,14 +1597,14 @@ void Assembler::vmrs(Register dst, Condition cond) {
}
-// Pseudo instructions
+// Pseudo instructions.
void Assembler::lea(Register dst,
const MemOperand& x,
SBit s,
Condition cond) {
int am = x.am_;
if (!x.rm_.is_valid()) {
- // immediate offset
+ // Immediate offset.
if ((am & P) == 0) // post indexing
mov(dst, Operand(x.rn_), s, cond);
else if ((am & U) == 0) // negative indexing
@@ -1612,7 +1638,7 @@ void Assembler::BlockConstPoolFor(int instructions) {
}
-// Debugging
+// Debugging.
void Assembler::RecordJSReturn() {
WriteRecordedPositions();
CheckBuffer();
@@ -1665,7 +1691,7 @@ void Assembler::WriteRecordedPositions() {
void Assembler::GrowBuffer() {
if (!own_buffer_) FATAL("external code buffer is too small");
- // compute new buffer size
+ // Compute new buffer size.
CodeDesc desc; // the new buffer
if (buffer_size_ < 4*KB) {
desc.buffer_size = 4*KB;
@@ -1676,20 +1702,20 @@ void Assembler::GrowBuffer() {
}
CHECK_GT(desc.buffer_size, 0); // no overflow
- // setup new buffer
+ // Setup new buffer.
desc.buffer = NewArray<byte>(desc.buffer_size);
desc.instr_size = pc_offset();
desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
- // copy the data
+ // Copy the data.
int pc_delta = desc.buffer - buffer_;
int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
memmove(desc.buffer, buffer_, desc.instr_size);
memmove(reloc_info_writer.pos() + rc_delta,
reloc_info_writer.pos(), desc.reloc_size);
- // switch buffers
+ // Switch buffers.
DeleteArray(buffer_);
buffer_ = desc.buffer;
buffer_size_ = desc.buffer_size;
@@ -1697,11 +1723,11 @@ void Assembler::GrowBuffer() {
reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
reloc_info_writer.last_pc() + pc_delta);
- // none of our relocation types are pc relative pointing outside the code
+ // None of our relocation types are pc relative pointing outside the code
// buffer nor pc absolute pointing inside the code buffer, so there is no need
- // to relocate any emitted relocation entries
+ // to relocate any emitted relocation entries.
- // relocate pending relocation entries
+ // Relocate pending relocation entries.
for (int i = 0; i < num_prinfo_; i++) {
RelocInfo& rinfo = prinfo_[i];
ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
@@ -1716,16 +1742,16 @@ void Assembler::GrowBuffer() {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
RelocInfo rinfo(pc_, rmode, data); // we do not try to reuse pool constants
if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::STATEMENT_POSITION) {
- // Adjust code for new modes
+ // Adjust code for new modes.
ASSERT(RelocInfo::IsJSReturn(rmode)
|| RelocInfo::IsComment(rmode)
|| RelocInfo::IsPosition(rmode));
- // these modes do not need an entry in the constant pool
+ // These modes do not need an entry in the constant pool.
} else {
ASSERT(num_prinfo_ < kMaxNumPRInfo);
prinfo_[num_prinfo_++] = rinfo;
// Make sure the constant pool is not emitted in place of the next
- // instruction for which we just recorded relocation info
+ // instruction for which we just recorded relocation info.
BlockConstPoolBefore(pc_offset() + kInstrSize);
}
if (rinfo.rmode() != RelocInfo::NONE) {
@@ -1752,7 +1778,7 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// blocked for a specific range.
next_buffer_check_ = pc_offset() + kCheckConstInterval;
- // There is nothing to do if there are no pending relocation info entries
+ // There is nothing to do if there are no pending relocation info entries.
if (num_prinfo_ == 0) return;
// We emit a constant pool at regular intervals of about kDistBetweenPools
@@ -1778,10 +1804,11 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// no_const_pool_before_, which is checked here. Also, recursive calls to
// CheckConstPool are blocked by no_const_pool_before_.
if (pc_offset() < no_const_pool_before_) {
- // Emission is currently blocked; make sure we try again as soon as possible
+ // Emission is currently blocked; make sure we try again as soon as
+ // possible.
next_buffer_check_ = no_const_pool_before_;
- // Something is wrong if emission is forced and blocked at the same time
+ // Something is wrong if emission is forced and blocked at the same time.
ASSERT(!force_emit);
return;
}
@@ -1795,23 +1822,23 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
jump_instr + kInstrSize + num_prinfo_*(kInstrSize + kMaxRelocSize);
while (buffer_space() <= (max_needed_space + kGap)) GrowBuffer();
- // Block recursive calls to CheckConstPool
+ // Block recursive calls to CheckConstPool.
BlockConstPoolBefore(pc_offset() + jump_instr + kInstrSize +
num_prinfo_*kInstrSize);
// Don't bother to check for the emit calls below.
next_buffer_check_ = no_const_pool_before_;
- // Emit jump over constant pool if necessary
+ // Emit jump over constant pool if necessary.
Label after_pool;
if (require_jump) b(&after_pool);
RecordComment("[ Constant Pool");
- // Put down constant pool marker
- // "Undefined instruction" as specified by A3.1 Instruction set encoding
+ // Put down constant pool marker "Undefined instruction" as specified by
+ // A3.1 Instruction set encoding.
emit(0x03000000 | num_prinfo_);
- // Emit constant pool entries
+ // Emit constant pool entries.
for (int i = 0; i < num_prinfo_; i++) {
RelocInfo& rinfo = prinfo_[i];
ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
@@ -1819,8 +1846,8 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
rinfo.rmode() != RelocInfo::STATEMENT_POSITION);
Instr instr = instr_at(rinfo.pc());
- // Instruction to patch must be a ldr/str [pc, #offset]
- // P and U set, B and W clear, Rn == pc, offset12 still 0
+ // Instruction to patch must be a ldr/str [pc, #offset].
+ // P and U set, B and W clear, Rn == pc, offset12 still 0.
ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | Off12Mask)) ==
(2*B25 | P | U | pc.code()*B16));
int delta = pc_ - rinfo.pc() - 8;
9 deps/v8/src/arm/assembler-arm.h
View
@@ -80,7 +80,7 @@ struct Register {
return 1 << code_;
}
- // (unfortunately we can't make this private in a struct)
+ // Unfortunately we can't make this private in a struct.
int code_;
};
@@ -205,7 +205,7 @@ struct CRegister {
return 1 << code_;
}
- // (unfortunately we can't make this private in a struct)
+ // Unfortunately we can't make this private in a struct.
int code_;
};
@@ -250,7 +250,7 @@ enum Coprocessor {
};
-// Condition field in instructions
+// Condition field in instructions.
enum Condition {
eq = 0 << 28, // Z set equal.
ne = 1 << 28, // Z clear not equal.
@@ -628,6 +628,9 @@ class Assembler : public Malloced {
void blx(Label* L) { blx(branch_offset(L, false)); } // v5 and above
// Data-processing instructions
+ void ubfx(Register dst, Register src1, const Operand& src2,
+ const Operand& src3, Condition cond = al);
+
void and_(Register dst, Register src1, const Operand& src2,
SBit s = LeaveCC, Condition cond = al);
30 deps/v8/src/arm/assembler-thumb2-inl.h
View
@@ -174,20 +174,6 @@ Operand::Operand(const ExternalReference& f) {
}
-Operand::Operand(Object** opp) {
- rm_ = no_reg;
- imm32_ = reinterpret_cast<int32_t>(opp);
- rmode_ = RelocInfo::NONE;
-}
-
-
-Operand::Operand(Context** cpp) {
- rm_ = no_reg;
- imm32_ = reinterpret_cast<int32_t>(cpp);
- rmode_ = RelocInfo::NONE;
-}
-
-
Operand::Operand(Smi* value) {
rm_ = no_reg;
imm32_ = reinterpret_cast<intptr_t>(value);
@@ -229,14 +215,24 @@ void Assembler::emit(Instr x) {
Address Assembler::target_address_address_at(Address pc) {
- Instr instr = Memory::int32_at(pc);
- // Verify that the instruction at pc is a ldr<cond> <Rd>, [pc +/- offset_12].
+ Address target_pc = pc;
+ Instr instr = Memory::int32_at(target_pc);
+ // If we have a bx instruction, the instruction before the bx is
+ // what we need to patch.
+ static const int32_t kBxInstMask = 0x0ffffff0;
+ static const int32_t kBxInstPattern = 0x012fff10;
+ if ((instr & kBxInstMask) == kBxInstPattern) {
+ target_pc -= kInstrSize;
+ instr = Memory::int32_at(target_pc);
+ }
+ // Verify that the instruction to patch is a
+ // ldr<cond> <Rd>, [pc +/- offset_12].
ASSERT((instr & 0x0f7f0000) == 0x051f0000);
int offset = instr & 0xfff; // offset_12 is unsigned
if ((instr & (1 << 23)) == 0) offset = -offset; // U bit defines offset sign
// Verify that the constant pool comes after the instruction referencing it.
ASSERT(offset >= -4);
- return pc + offset + 8;
+ return target_pc + offset + 8;
}
227 deps/v8/src/arm/assembler-thumb2.cc
View
@@ -30,9 +30,9 @@
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
-// The original source code covered by the above license above has been modified
-// significantly by Google Inc.
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2010 the V8 project authors. All rights reserved.
#include "v8.h"
@@ -51,9 +51,14 @@ void CpuFeatures::Probe() {
// If the compiler is allowed to use vfp then we can use vfp too in our
// code generation.
#if !defined(__arm__)
- // For the simulator=arm build, always use VFP since the arm simulator has
- // VFP support.
- supported_ |= 1u << VFP3;
+ // For the simulator=arm build, use VFP when FLAG_enable_vfp3 is enabled.
+ if (FLAG_enable_vfp3) {
+ supported_ |= 1u << VFP3;
+ }
+ // For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled
+ if (FLAG_enable_armv7) {
+ supported_ |= 1u << ARMv7;
+ }
#else
if (Serializer::enabled()) {
supported_ |= OS::CpuFeaturesImpliedByPlatform();
@@ -66,6 +71,11 @@ void CpuFeatures::Probe() {
supported_ |= 1u << VFP3;
found_by_runtime_probing_ |= 1u << VFP3;
}
+
+ if (OS::ArmCpuHasFeature(ARMv7)) {
+ supported_ |= 1u << ARMv7;
+ found_by_runtime_probing_ |= 1u << ARMv7;
+ }
#endif
}
@@ -83,9 +93,9 @@ Register r4 = { 4 };
Register r5 = { 5 };
Register r6 = { 6 };
Register r7 = { 7 };
-Register r8 = { 8 };
+Register r8 = { 8 }; // Used as context register.
Register r9 = { 9 };
-Register r10 = { 10 };
+Register r10 = { 10 }; // Used as roots register.
Register fp = { 11 };
Register ip = { 12 };
Register sp = { 13 };
@@ -264,9 +274,9 @@ MemOperand::MemOperand(Register rn, Register rm,
// -----------------------------------------------------------------------------
-// Implementation of Assembler
+// Implementation of Assembler.
-// Instruction encoding bits
+// Instruction encoding bits.
enum {
H = 1 << 5, // halfword (or byte)
S6 = 1 << 6, // signed (or unsigned)
@@ -299,14 +309,14 @@ enum {
B26 = 1 << 26,
B27 = 1 << 27,
- // Instruction bit masks
+ // Instruction bit masks.
RdMask = 15 << 12, // in str instruction
CondMask = 15 << 28,
CoprocessorMask = 15 << 8,
OpCodeMask = 15 << 21, // in data-processing instructions
Imm24Mask = (1 << 24) - 1,
Off12Mask = (1 << 12) - 1,
- // Reserved condition
+ // Reserved condition.
nv = 15 << 28
};
@@ -327,13 +337,13 @@ const Instr kMovLrPc = al | 13*B21 | pc.code() | lr.code() * B12;
// ldr pc, [pc, #XXX]
const Instr kLdrPCPattern = al | B26 | L | pc.code() * B16;
-// spare_buffer_
+// Spare buffer.
static const int kMinimalBufferSize = 4*KB;
static byte* spare_buffer_ = NULL;
Assembler::Assembler(void* buffer, int buffer_size) {
if (buffer == NULL) {
- // do our own buffer management
+ // Do our own buffer management.
if (buffer_size <= kMinimalBufferSize) {
buffer_size = kMinimalBufferSize;
@@ -351,14 +361,14 @@ Assembler::Assembler(void* buffer, int buffer_size) {
own_buffer_ = true;
} else {
- // use externally provided buffer instead
+ // Use externally provided buffer instead.
ASSERT(buffer_size > 0);
buffer_ = static_cast<byte*>(buffer);
buffer_size_ = buffer_size;
own_buffer_ = false;
}
- // setup buffer pointers
+ // Setup buffer pointers.
ASSERT(buffer_ != NULL);
pc_ = buffer_;
reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
@@ -386,11 +396,11 @@ Assembler::~Assembler() {
void Assembler::GetCode(CodeDesc* desc) {
- // emit constant pool if necessary
+ // Emit constant pool if necessary.
CheckConstPool(true, false);
ASSERT(num_prinfo_ == 0);
- // setup desc
+ // Setup code descriptor.
desc->buffer = buffer_;
desc->buffer_size = buffer_size_;
desc->instr_size = pc_offset();
@@ -539,7 +549,7 @@ void Assembler::bind_to(Label* L, int pos) {
void Assembler::link_to(Label* L, Label* appendix) {
if (appendix->is_linked()) {
if (L->is_linked()) {
- // append appendix to L's list
+ // Append appendix to L's list.
int fixup_pos;
int link = L->pos();
do {
@@ -549,7 +559,7 @@ void Assembler::link_to(Label* L, Label* appendix) {
ASSERT(link == kEndOfChain);
target_at_put(fixup_pos, appendix->pos());
} else {
- // L is empty, simply use appendix
+ // L is empty, simply use appendix.
*L = *appendix;
}
}
@@ -575,12 +585,12 @@ void Assembler::next(Label* L) {
}
-// Low-level code emission routines depending on the addressing mode
+// Low-level code emission routines depending on the addressing mode.
static bool fits_shifter(uint32_t imm32,
uint32_t* rotate_imm,
uint32_t* immed_8,
Instr* instr) {
- // imm32 must be unsigned
+ // imm32 must be unsigned.
for (int rot = 0; rot < 16; rot++) {
uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot));
if ((imm8 <= 0xff)) {
@@ -589,7 +599,7 @@ static bool fits_shifter(uint32_t imm32,
return true;
}
}
- // if the opcode is mov or mvn and if ~imm32 fits, change the opcode
+ // If the opcode is mov or mvn and if ~imm32 fits, change the opcode.
if (instr != NULL && (*instr & 0xd*B21) == 0xd*B21) {
if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
*instr ^= 0x2*B21;
@@ -626,7 +636,7 @@ void Assembler::addrmod1(Instr instr,
CheckBuffer();
ASSERT((instr & ~(CondMask | OpCodeMask | S)) == 0);
if (!x.rm_.is_valid()) {
- // immediate
+ // Immediate.
uint32_t rotate_imm;
uint32_t immed_8;
if (MustUseIp(x.rmode_) ||
@@ -634,7 +644,7 @@ void Assembler::addrmod1(Instr instr,
// The immediate operand cannot be encoded as a shifter operand, so load
// it first to register ip and change the original instruction to use ip.
// However, if the original instruction is a 'mov rd, x' (not setting the
- // condition code), then replace it with a 'ldr rd, [pc]'
+ // condition code), then replace it with a 'ldr rd, [pc]'.
RecordRelocInfo(x.rmode_, x.imm32_);
CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
Condition cond = static_cast<Condition>(instr & CondMask);
@@ -648,16 +658,16 @@ void Assembler::addrmod1(Instr instr,
}
instr |= I | rotate_imm*B8 | immed_8;
} else if (!x.rs_.is_valid()) {
- // immediate shift
+ // Immediate shift.
instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
} else {
- // register shift
+ // Register shift.
ASSERT(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc));
instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code();
}
emit(instr | rn.code()*B16 | rd.code()*B12);
if (rn.is(pc) || x.rm_.is(pc))
- // block constant pool emission for one instruction after reading pc
+ // Block constant pool emission for one instruction after reading pc.
BlockConstPoolBefore(pc_offset() + kInstrSize);
}
@@ -666,15 +676,15 @@ void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
ASSERT((instr & ~(CondMask | B | L)) == B26);
int am = x.am_;
if (!x.rm_.is_valid()) {
- // immediate offset
+ // Immediate offset.
int offset_12 = x.offset_;
if (offset_12 < 0) {
offset_12 = -offset_12;
am ^= U;
}
if (!is_uint12(offset_12)) {
- // immediate offset cannot be encoded, load it first to register ip
- // rn (and rd in a load) should never be ip, or will be trashed
+ // Immediate offset cannot be encoded, load it first to register ip
+ // rn (and rd in a load) should never be ip, or will be trashed.
ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
mov(ip, Operand(x.offset_), LeaveCC,
static_cast<Condition>(instr & CondMask));
@@ -684,9 +694,9 @@ void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
ASSERT(offset_12 >= 0); // no masking needed
instr |= offset_12;
} else {
- // register offset (shift_imm_ and shift_op_ are 0) or scaled
+ // Register offset (shift_imm_ and shift_op_ are 0) or scaled
// register offset the constructors make sure than both shift_imm_
- // and shift_op_ are initialized
+ // and shift_op_ are initialized.
ASSERT(!x.rm_.is(pc));
instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
}
@@ -700,15 +710,15 @@ void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
ASSERT(x.rn_.is_valid());
int am = x.am_;
if (!x.rm_.is_valid()) {
- // immediate offset
+ // Immediate offset.
int offset_8 = x.offset_;
if (offset_8 < 0) {
offset_8 = -offset_8;
am ^= U;
}
if (!is_uint8(offset_8)) {
- // immediate offset cannot be encoded, load it first to register ip
- // rn (and rd in a load) should never be ip, or will be trashed
+ // Immediate offset cannot be encoded, load it first to register ip
+ // rn (and rd in a load) should never be ip, or will be trashed.
ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
mov(ip, Operand(x.offset_), LeaveCC,
static_cast<Condition>(instr & CondMask));
@@ -718,15 +728,15 @@ void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
ASSERT(offset_8 >= 0); // no masking needed
instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf);
} else if (x.shift_imm_ != 0) {
- // scaled register offset not supported, load index first
- // rn (and rd in a load) should never be ip, or will be trashed
+ // Scaled register offset not supported, load index first
+ // rn (and rd in a load) should never be ip, or will be trashed.
ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
static_cast<Condition>(instr & CondMask));
addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
return;
} else {
- // register offset
+ // Register offset.
ASSERT((am & (P|W)) == P || !x.rm_.is(pc)); // no pc index with writeback
instr |= x.rm_.code();
}
@@ -744,7 +754,7 @@ void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
- // unindexed addressing is not encoded by this function
+ // Unindexed addressing is not encoded by this function.
ASSERT_EQ((B27 | B26),
(instr & ~(CondMask | CoprocessorMask | P | U | N | W | L)));
ASSERT(x.rn_.is_valid() && !x.rm_.is_valid());
@@ -759,7 +769,7 @@ void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
ASSERT(is_uint8(offset_8)); // unsigned word offset must fit in a byte
ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
- // post-indexed addressing requires W == 1; different than in addrmod2/3
+ // Post-indexed addressing requires W == 1; different than in addrmod2/3.
if ((am & P) == 0)
am |= W;
@@ -782,7 +792,7 @@ int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
}
// Block the emission of the constant pool, since the branch instruction must
- // be emitted at the pc offset recorded by the label
+ // be emitted at the pc offset recorded by the label.
BlockConstPoolBefore(pc_offset() + kInstrSize);
return target_pos - (pc_offset() + kPcLoadDelta);
}
@@ -804,7 +814,7 @@ void Assembler::label_at_put(Label* L, int at_offset) {
}
-// Branch instructions
+// Branch instructions.
void Assembler::b(int branch_offset, Condition cond) {
ASSERT((branch_offset & 3) == 0);
int imm24 = branch_offset >> 2;
@@ -812,7 +822,7 @@ void Assembler::b(int branch_offset, Condition cond) {
emit(cond | B27 | B25 | (imm24 & Imm24Mask));
if (cond == al)
- // dead code is a good location to emit the constant pool
+ // Dead code is a good location to emit the constant pool.
CheckConstPool(false, false);
}
@@ -849,7 +859,22 @@ void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t
}
-// Data-processing instructions
+// Data-processing instructions.
+
+// UBFX <Rd>,<Rn>,#<lsb>,#<width - 1>
+// Instruction details available in ARM DDI 0406A, A8-464.
+// cond(31-28) | 01111(27-23)| 1(22) | 1(21) | widthm1(20-16) |
+// Rd(15-12) | lsb(11-7) | 101(6-4) | Rn(3-0)
+void Assembler::ubfx(Register dst, Register src1, const Operand& src2,
+ const Operand& src3, Condition cond) {
+ ASSERT(!src2.rm_.is_valid() && !src3.rm_.is_valid());
+ ASSERT(static_cast<uint32_t>(src2.imm32_) <= 0x1f);
+ ASSERT(static_cast<uint32_t>(src3.imm32_) <= 0x1f);
+ emit(cond | 0x3F*B21 | src3.imm32_*B16 |
+ dst.code()*B12 | src2.imm32_*B7 | 0x5*B4 | src1.code());
+}
+
+
void Assembler::and_(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) {
addrmod1(cond | 0*B21 | s, src1, dst, src2);
@@ -886,7 +911,7 @@ void Assembler::add(Register dst, Register src1, const Operand& src2,
if (FLAG_push_pop_elimination &&
last_bound_pos_ <= (pc_offset() - pattern_size) &&
reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
- // pattern
+ // Pattern.
instr_at(pc_ - 1 * kInstrSize) == kPopInstruction &&
(instr_at(pc_ - 2 * kInstrSize) & ~RdMask) == kPushRegPattern) {
pc_ -= 2 * kInstrSize;
@@ -960,7 +985,7 @@ void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
}
-// Multiply instructions
+// Multiply instructions.
void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
SBit s, Condition cond) {
ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
@@ -1029,7 +1054,7 @@ void Assembler::umull(Register dstL,
}
-// Miscellaneous arithmetic instructions
+// Miscellaneous arithmetic instructions.
void Assembler::clz(Register dst, Register src, Condition cond) {
// v5 and above.
ASSERT(!dst.is(pc) && !src.is(pc));
@@ -1038,7 +1063,7 @@ void Assembler::clz(Register dst, Register src, Condition cond) {
}
-// Status register access instructions
+// Status register access instructions.
void Assembler::mrs(Register dst, SRegister s, Condition cond) {
ASSERT(!dst.is(pc));
emit(cond | B24 | s | 15*B16 | dst.code()*B12);
@@ -1050,12 +1075,12 @@ void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
ASSERT(fields >= B16 && fields < B20); // at least one field set
Instr instr;
if (!src.rm_.is_valid()) {
- // immediate
+ // Immediate.
uint32_t rotate_imm;
uint32_t immed_8;
if (MustUseIp(src.rmode_) ||
!fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
- // immediate operand cannot be encoded, load it first to register ip
+ // Immediate operand cannot be encoded, load it first to register ip.
RecordRelocInfo(src.rmode_, src.imm32_);
ldr(ip, MemOperand(pc, 0), cond);
msr(fields, Operand(ip), cond);
@@ -1070,7 +1095,7 @@ void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
}
-// Load/Store instructions
+// Load/Store instructions.
void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
if (dst.is(pc)) {
WriteRecordedPositions();
@@ -1085,7 +1110,7 @@ void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
if (FLAG_push_pop_elimination &&
last_bound_pos_ <= (pc_offset() - pattern_size) &&
reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
- // pattern
+ // Pattern.
instr_at(pc_ - 1 * kInstrSize) == (kPopRegPattern | dst.code() * B12) &&
instr_at(pc_ - 2 * kInstrSize) == (kPushRegPattern | dst.code() * B12)) {
pc_ -= 2 * kInstrSize;
@@ -1106,6 +1131,7 @@ void Assembler::str(Register src, const MemOperand& dst, Condition cond) {
if (FLAG_push_pop_elimination &&
last_bound_pos_ <= (pc_offset() - pattern_size) &&
reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
+ // Pattern.
instr_at(pc_ - 1 * kInstrSize) == (kPushRegPattern | src.code() * B12) &&
instr_at(pc_ - 2 * kInstrSize) == kPopInstruction) {
pc_ -= 2 * kInstrSize;
@@ -1147,17 +1173,17 @@ void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) {
}
-// Load/Store multiple instructions
+// Load/Store multiple instructions.
void Assembler::ldm(BlockAddrMode am,
Register base,
RegList dst,
Condition cond) {
- // ABI stack constraint: ldmxx base, {..sp..} base != sp is not restartable
+ // ABI stack constraint: ldmxx base, {..sp..} base != sp is not restartable.
ASSERT(base.is(sp) || (dst & sp.bit()) == 0);
addrmod4(cond | B27 | am | L, base, dst);
- // emit the constant pool after a function return implemented by ldm ..{..pc}
+ // Emit the constant pool after a function return implemented by ldm ..{..pc}.
if (cond == al && (dst & pc.bit()) != 0) {
// There is a slight chance that the ldm instruction was actually a call,
// in which case it would be wrong to return into the constant pool; we
@@ -1177,7 +1203,7 @@ void Assembler::stm(BlockAddrMode am,
}
-// Semaphore instructions
+// Semaphore instructions.
void Assembler::swp(Register dst, Register src, Register base, Condition cond) {
ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
ASSERT(!dst.is(base) && !src.is(base));
@@ -1197,7 +1223,7 @@ void Assembler::swpb(Register dst,
}
-// Exception-generating instructions and debugging support
+// Exception-generating instructions and debugging support.
void Assembler::stop(const char* msg) {
#if !defined(__arm__)
// The simulator handles these special instructions and stops execution.
@@ -1222,7 +1248,7 @@ void Assembler::swi(uint32_t imm24, Condition cond) {
}
-// Coprocessor instructions
+// Coprocessor instructions.
void Assembler::cdp(Coprocessor coproc,
int opcode_1,
CRegister crd,
@@ -1307,7 +1333,7 @@ void Assembler::ldc(Coprocessor coproc,
int option,
LFlag l,
Condition cond) {
- // unindexed addressing
+ // Unindexed addressing.
ASSERT(is_uint8(option));
emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 |
coproc*B8 | (option & 255));
@@ -1346,7 +1372,7 @@ void Assembler::stc(Coprocessor coproc,
int option,
LFlag l,
Condition cond) {
- // unindexed addressing
+ // Unindexed addressing.
ASSERT(is_uint8(option));
emit(cond | B27 | B26 | U | l | rn.code()*B16 | crd.code()*B12 |
coproc*B8 | (option & 255));
@@ -1371,6 +1397,36 @@ void Assembler::stc2(Coprocessor coproc,
// Support for VFP.
+void Assembler::vldr(const DwVfpRegister dst,
+ const Register base,
+ int offset,
+ const Condition cond) {
+ // Ddst = MEM(Rbase + offset).
+ // Instruction details available in ARM DDI 0406A, A8-628.
+ // cond(31-28) | 1101(27-24)| 1001(23-20) | Rbase(19-16) |
+ // Vdst(15-12) | 1011(11-8) | offset
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(offset % 4 == 0);
+ emit(cond | 0xD9*B20 | base.code()*B16 | dst.code()*B12 |
+ 0xB*B8 | ((offset / 4) & 255));
+}
+
+
+void Assembler::vstr(const DwVfpRegister src,
+ const Register base,
+ int offset,