Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse files

Upgrade v8 to 1.3.14

  • Loading branch information...
commit 1f31a7dbfe792fa6eee8a9cdcdfd662aad5cde06 1 parent 1a2762b
@ry ry authored
Showing with 3,495 additions and 4,380 deletions.
  1. +0 −3  LICENSE
  2. +1 −1  Makefile
  3. +43 −0 deps/v8/ChangeLog
  4. +0 −7 deps/v8/LICENSE
  5. +1 −0  deps/v8/SConstruct
  6. +46 −33 deps/v8/include/v8.h
  7. +30 −31 deps/v8/src/SConscript
  8. +59 −25 deps/v8/src/api.cc
  9. +51 −34 deps/v8/src/api.h
  10. +26 −0 deps/v8/src/arguments.h
  11. +7 −1 deps/v8/src/arm/assembler-arm-inl.h
  12. +2 −2 deps/v8/src/arm/assembler-arm.h
  13. +402 −11 deps/v8/src/arm/builtins-arm.cc
  14. +0 −301 deps/v8/src/arm/cfg-arm.cc
  15. +61 −73 deps/v8/src/arm/codegen-arm.cc
  16. +1 −1  deps/v8/src/arm/codegen-arm.h
  17. +40 −37 deps/v8/src/arm/macro-assembler-arm.cc
  18. +27 −17 deps/v8/src/arm/macro-assembler-arm.h
  19. +2 −1  deps/v8/src/arm/simulator-arm.cc
  20. +25 −12 deps/v8/src/arm/simulator-arm.h
  21. +6 −6 deps/v8/src/arm/stub-cache-arm.cc
  22. +3 −1 deps/v8/src/array.js
  23. +1 −0  deps/v8/src/assembler.h
  24. +0 −1  deps/v8/src/ast.cc
  25. +18 −31 deps/v8/src/ast.h
  26. +9 −10 deps/v8/src/bootstrapper.cc
  27. +3 −3 deps/v8/src/bootstrapper.h
  28. +3 −1 deps/v8/src/builtins.cc
  29. +0 −763 deps/v8/src/cfg.cc
  30. +0 −871 deps/v8/src/cfg.h
  31. +14 −26 deps/v8/src/codegen.cc
  32. +0 −21 deps/v8/src/compiler.cc
  33. +5 −0 deps/v8/src/debug-agent.cc
  34. +4 −1 deps/v8/src/debug-agent.h
  35. +11 −13 deps/v8/src/debug-delay.js
  36. +5 −0 deps/v8/src/debug.cc
  37. +4 −0 deps/v8/src/debug.h
  38. +59 −55 deps/v8/src/execution.cc
  39. +30 −34 deps/v8/src/execution.h
  40. +5 −0 deps/v8/src/factory.cc
  41. +2 −0  deps/v8/src/factory.h
  42. +0 −2  deps/v8/src/flag-definitions.h
  43. +15 −15 deps/v8/src/handles.cc
  44. +144 −66 deps/v8/src/heap-profiler.cc
  45. +9 −12 deps/v8/src/heap-profiler.h
  46. +68 −6 deps/v8/src/heap.cc
  47. +3 −0  deps/v8/src/heap.h
  48. +9 −3 deps/v8/src/ia32/assembler-ia32-inl.h
  49. +43 −47 deps/v8/src/ia32/builtins-ia32.cc
  50. +0 −315 deps/v8/src/ia32/cfg-ia32.cc
  51. +66 −75 deps/v8/src/ia32/codegen-ia32.cc
  52. +1 −1  deps/v8/src/ia32/codegen-ia32.h
  53. +9 −8 deps/v8/src/ia32/ic-ia32.cc
  54. +22 −24 deps/v8/src/ia32/macro-assembler-ia32.cc
  55. +36 −28 deps/v8/src/ia32/macro-assembler-ia32.h
  56. +10 −9 deps/v8/src/ia32/simulator-ia32.h
  57. +6 −6 deps/v8/src/ia32/stub-cache-ia32.cc
  58. +7 −0 deps/v8/src/list.h
  59. +1 −1  deps/v8/src/log-utils.cc
  60. +1 −1  deps/v8/src/macro-assembler.h
  61. +8 −2 deps/v8/src/mark-compact.cc
  62. +4 −0 deps/v8/src/memory.h
  63. +9 −8 deps/v8/src/messages.js
  64. +0 −2  deps/v8/src/mirror-delay.js
  65. +84 −73 deps/v8/src/objects.cc
  66. +49 −10 deps/v8/src/objects.h
  67. +52 −131 deps/v8/src/parser.cc
  68. +9 −5 deps/v8/src/platform-freebsd.cc
  69. +12 −4 deps/v8/src/platform-macos.cc
  70. +0 −10 deps/v8/src/prettyprinter.cc
  71. +8 −0 deps/v8/src/regexp-stack.cc
  72. +2 −0  deps/v8/src/regexp-stack.h
  73. +0 −12 deps/v8/src/rewriter.cc
  74. +16 −24 deps/v8/src/runtime.cc
  75. +23 −9 deps/v8/src/serialize.cc
  76. +133 −3 deps/v8/src/spaces.cc
  77. +72 −2 deps/v8/src/spaces.h
  78. +39 −39 deps/v8/src/string.js
  79. +13 −28 deps/v8/src/stub-cache.cc
  80. +2 −1  deps/v8/src/top.cc
  81. +7 −0 deps/v8/src/top.h
  82. +5 −5 deps/v8/src/uri.js
  83. +8 −45 deps/v8/src/usage-analyzer.cc
  84. +1 −1  deps/v8/src/utils.cc
  85. +12 −4 deps/v8/src/v8.cc
  86. +1 −1  deps/v8/src/v8.h
  87. +32 −2 deps/v8/src/v8threads.cc
  88. +1 −0  deps/v8/src/v8threads.h
  89. +7 −1 deps/v8/src/variables.h
  90. +1 −1  deps/v8/src/version.cc
  91. +52 −13 deps/v8/src/x64/assembler-x64-inl.h
  92. +36 −3 deps/v8/src/x64/assembler-x64.cc
  93. +27 −18 deps/v8/src/x64/assembler-x64.h
  94. +431 −18 deps/v8/src/x64/builtins-x64.cc
  95. +0 −324 deps/v8/src/x64/cfg-x64.cc
  96. +67 −75 deps/v8/src/x64/codegen-x64.cc
  97. +1 −1  deps/v8/src/x64/codegen-x64.h
  98. +64 −20 deps/v8/src/x64/ic-x64.cc
  99. +61 −48 deps/v8/src/x64/macro-assembler-x64.cc
  100. +46 −28 deps/v8/src/x64/macro-assembler-x64.h
  101. +10 −9 deps/v8/src/x64/simulator-x64.h
  102. +7 −6 deps/v8/src/x64/stub-cache-x64.cc
  103. +69 −0 deps/v8/test/cctest/test-alloc.cc
  104. +123 −5 deps/v8/test/cctest/test-api.cc
  105. +3 −2 deps/v8/test/cctest/test-debug.cc
  106. +75 −9 deps/v8/test/cctest/test-heap-profiler.cc
  107. +0 −7 deps/v8/test/cctest/test-log.cc
  108. +1 −0  deps/v8/test/cctest/test-sockets.cc
  109. +1 −1  deps/v8/test/mjsunit/class-of-builtins.js
  110. +2 −2 deps/v8/test/mjsunit/debug-compile-event.js
  111. +4 −7 deps/v8/test/mjsunit/invalid-lhs.js
  112. +8 −8 deps/v8/test/mjsunit/mirror-script.js
  113. +1 −1  deps/v8/test/mjsunit/regress/regress-220.js
  114. +2 −2 deps/v8/test/mjsunit/switch.js
  115. +2 −0  deps/v8/test/mjsunit/third_party/object-keys.js
  116. +1 −6 deps/v8/tools/gyp/v8.gyp
  117. +4 −16 deps/v8/tools/js2c.py
  118. +278 −218 deps/v8/tools/jsmin.py
  119. +0 −12 deps/v8/tools/visual_studio/v8_base.vcproj
  120. +0 −12 deps/v8/tools/visual_studio/v8_base_arm.vcproj
  121. +0 −12 deps/v8/tools/visual_studio/v8_base_x64.vcproj
  122. +3 −1 src/node.js
  123. +50 −27 tools/js2c.py
View
3  LICENSE
@@ -9,9 +9,6 @@ are:
This code is copyrighted by Marc Alexander Lehmann. Both are dually
licensed under MIT and GPL2.
- - JSMin JavaScript minifier, located at tools/jsmin.py. This code is
- copyrighted by Douglas Crockford and Baruch Even and has an MIT license.
-
- parseUri, a URI parser, is located in lib/http.js. This is just a small
snippit. It is copyrighted 2007 by Steven Levithan and released under an
MIT license.
View
2  Makefile
@@ -49,7 +49,7 @@ clean:
distclean:
@-rm -rf build/
- @-rm -f *.pyc
+ @-find tools | egrep --colour=never ".pyc$" | xargs rm
check:
@tools/waf-light check
View
43 deps/v8/ChangeLog
@@ -1,3 +1,46 @@
+2009-10-07: Version 1.3.14
+
+ Added GetRealNamedProperty to the API to lookup real properties
+ located on the object or in the prototype chain skipping any
+ interceptors.
+
+ Fix the stack limits setting API to work correctly with threads. The
+ stack limit now needs to be set to each thread thich is used with V8.
+
+ Remove the high-priority flag from IdleNotification()
+
+ Ensure V8 is initialized before locking and unlocking threads.
+
+ Implemented a new JavaScript minifier for compressing the source of
+ the built-in JavaScript. This Remove non-Open Source code from Douglas
+ Crockford from the project.
+
+ Added a missing optimization in StringCharAt.
+
+ Fixed some flaky socket tests.
+
+ Change by Alexander Botero-Lowry to fix profiler sampling on FreeBSD
+ in 64-bit mode.
+
+ Fixed memory leaks in the thread management code.
+
+ Fixed the result of assignment to a pixel array. The assigned value
+ is now the result.
+
+ Error reporting for invalid left-hand sides in for-in statements, pre-
+ and postfix count expressions, and assignments now matches the JSC
+ behavior in Safari 4.
+
+ Follow the spec in disallowing function declarations without a name.
+
+ Always allocate code objects within a 2 GB range. On x64 architecture
+ this is used to use near calls (32-bit displacement) in Code objects.
+
+ Optimized array construction ported to x64 and ARM architectures.
+
+ [ES5] Changed Object.keys to return strings for element indices.
+
+
2009-09-23: Version 1.3.13
Fixed uninitialized memory problem.
View
7 deps/v8/LICENSE
@@ -21,13 +21,6 @@ are:
This code is copyrighted by Sun Microsystems Inc. and released
under a 3-clause BSD license.
- - JSMin JavaScript minifier, located at tools/jsmin.py. This code is
- copyrighted by Douglas Crockford and Baruch Even and released under
- an MIT license.
-
- - Valgrind client API header, located at third_party/valgrind/valgrind.h
- This is release under the BSD license.
-
- Valgrind client API header, located at third_party/valgrind/valgrind.h
This is release under the BSD license.
View
1  deps/v8/SConstruct
@@ -238,6 +238,7 @@ V8_EXTRA_FLAGS = {
'gcc': {
'all': {
'WARNINGFLAGS': ['-Wall',
+ '-Werror',
'-W',
'-Wno-unused-parameter',
'-Wnon-virtual-dtor']
View
79 deps/v8/include/v8.h
@@ -1,4 +1,4 @@
-// Copyright 2007-2008 the V8 project authors. All rights reserved.
+// Copyright 2007-2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -130,6 +130,7 @@ class Data;
namespace internal {
class Object;
+class Arguments;
}
@@ -1205,7 +1206,14 @@ class V8EXPORT Object : public Value {
* If result.IsEmpty() no real property was located in the prototype chain.
* This means interceptors in the prototype chain are not called.
*/
- Handle<Value> GetRealNamedPropertyInPrototypeChain(Handle<String> key);
+ Local<Value> GetRealNamedPropertyInPrototypeChain(Handle<String> key);
+
+ /**
+ * If result.IsEmpty() no real property was located on the object or
+ * in the prototype chain.
+ * This means interceptors in the prototype chain are not called.
+ */
+ Local<Value> GetRealNamedProperty(Handle<String> key);
/** Tests for a named lookup interceptor.*/
bool HasNamedLookupInterceptor();
@@ -1401,17 +1409,13 @@ class V8EXPORT Arguments {
*/
class V8EXPORT AccessorInfo {
public:
- inline AccessorInfo(Local<Object> self,
- Local<Value> data,
- Local<Object> holder)
- : self_(self), data_(data), holder_(holder) { }
+ inline AccessorInfo(internal::Object** args)
+ : args_(args) { }
inline Local<Value> Data() const;
inline Local<Object> This() const;
inline Local<Object> Holder() const;
private:
- Local<Object> self_;
- Local<Value> data_;
- Local<Object> holder_;
+ internal::Object** args_;
};
@@ -1567,7 +1571,10 @@ typedef bool (*IndexedSecurityCallback)(Local<Object> host,
/**
* A FunctionTemplate is used to create functions at runtime. There
* can only be one function created from a FunctionTemplate in a
- * context.
+ * context. The lifetime of the created function is equal to the
+ * lifetime of the context. So in case the embedder needs to create
+ * temporary functions that can be collected using Scripts is
+ * preferred.
*
* A FunctionTemplate can have properties, these properties are added to the
* function object when it is created.
@@ -1974,8 +1981,13 @@ Handle<Boolean> V8EXPORT False();
/**
- * A set of constraints that specifies the limits of the runtime's
- * memory use.
+ * A set of constraints that specifies the limits of the runtime's memory use.
+ * You must set the heap size before initializing the VM - the size cannot be
+ * adjusted after the VM is initialized.
+ *
+ * If you are using threads then you should hold the V8::Locker lock while
+ * setting the stack limit and you must set a non-default stack limit separately
+ * for each thread.
*/
class V8EXPORT ResourceConstraints {
public:
@@ -1985,6 +1997,7 @@ class V8EXPORT ResourceConstraints {
int max_old_space_size() const { return max_old_space_size_; }
void set_max_old_space_size(int value) { max_old_space_size_ = value; }
uint32_t* stack_limit() const { return stack_limit_; }
+ // Sets an address beyond which the VM's stack may not grow.
void set_stack_limit(uint32_t* value) { stack_limit_ = value; }
private:
int max_young_space_size_;
@@ -2192,7 +2205,8 @@ class V8EXPORT V8 {
/**
* Initializes from snapshot if possible. Otherwise, attempts to
- * initialize from scratch.
+ * initialize from scratch. This function is called implicitly if
+ * you use the API without calling it first.
*/
static bool Initialize();
@@ -2335,12 +2349,11 @@ class V8EXPORT V8 {
* Optional notification that the embedder is idle.
* V8 uses the notification to reduce memory footprint.
* This call can be used repeatedly if the embedder remains idle.
- * \param is_high_priority tells whether the embedder is high priority.
* Returns true if the embedder should stop calling IdleNotification
* until real work has been done. This indicates that V8 has done
* as much cleanup as it will be able to do.
*/
- static bool IdleNotification(bool is_high_priority);
+ static bool IdleNotification();
/**
* Optional notification that the system is running low on memory.
@@ -2742,15 +2755,15 @@ class Internals {
return ((reinterpret_cast<intptr_t>(value) & kHeapObjectTagMask) ==
kHeapObjectTag);
}
-
+
static inline bool HasSmiTag(internal::Object* value) {
return ((reinterpret_cast<intptr_t>(value) & kSmiTagMask) == kSmiTag);
}
-
+
static inline int SmiValue(internal::Object* value) {
return static_cast<int>(reinterpret_cast<intptr_t>(value)) >> kSmiTagSize;
}
-
+
static inline bool IsExternalTwoByteString(int instance_type) {
int representation = (instance_type & kFullStringRepresentationMask);
return representation == kExternalTwoByteRepresentationTag;
@@ -2863,21 +2876,6 @@ int Arguments::Length() const {
}
-Local<Value> AccessorInfo::Data() const {
- return data_;
-}
-
-
-Local<Object> AccessorInfo::This() const {
- return self_;
-}
-
-
-Local<Object> AccessorInfo::Holder() const {
- return holder_;
-}
-
-
template <class T>
Local<T> HandleScope::Close(Handle<T> value) {
internal::Object** before = reinterpret_cast<internal::Object**>(*value);
@@ -3075,6 +3073,21 @@ External* External::Cast(v8::Value* value) {
}
+Local<Value> AccessorInfo::Data() const {
+ return Local<Value>(reinterpret_cast<Value*>(&args_[-3]));
+}
+
+
+Local<Object> AccessorInfo::This() const {
+ return Local<Object>(reinterpret_cast<Object*>(&args_[0]));
+}
+
+
+Local<Object> AccessorInfo::Holder() const {
+ return Local<Object>(reinterpret_cast<Object*>(&args_[-1]));
+}
+
+
/**
* \example shell.cc
* A simple shell that takes a list of expressions on the
View
61 deps/v8/src/SConscript
@@ -36,49 +36,48 @@ Import('context')
SOURCES = {
'all': [
'accessors.cc', 'allocation.cc', 'api.cc', 'assembler.cc', 'ast.cc',
- 'bootstrapper.cc', 'builtins.cc', 'checks.cc', 'cfg.cc',
- 'code-stubs.cc', 'codegen.cc', 'compilation-cache.cc', 'compiler.cc',
- 'contexts.cc', 'conversions.cc', 'counters.cc', 'dateparser.cc',
- 'debug.cc', 'debug-agent.cc', 'disassembler.cc', 'execution.cc',
- 'factory.cc', 'flags.cc', 'frame-element.cc', 'frames.cc',
- 'func-name-inferrer.cc', 'global-handles.cc', 'handles.cc',
- 'hashmap.cc', 'heap.cc', 'heap-profiler.cc', 'ic.cc',
- 'interpreter-irregexp.cc', 'jsregexp.cc', 'jump-target.cc',
- 'log.cc', 'log-utils.cc', 'mark-compact.cc', 'messages.cc',
- 'objects.cc', 'oprofile-agent.cc', 'parser.cc', 'property.cc',
- 'regexp-macro-assembler.cc', 'regexp-macro-assembler-irregexp.cc',
- 'regexp-stack.cc', 'register-allocator.cc', 'rewriter.cc',
- 'runtime.cc', 'scanner.cc', 'scopeinfo.cc', 'scopes.cc',
- 'serialize.cc', 'snapshot-common.cc', 'spaces.cc',
- 'string-stream.cc', 'stub-cache.cc', 'token.cc', 'top.cc',
+ 'bootstrapper.cc', 'builtins.cc', 'checks.cc', 'code-stubs.cc',
+ 'codegen.cc', 'compilation-cache.cc', 'compiler.cc', 'contexts.cc',
+ 'conversions.cc', 'counters.cc', 'dateparser.cc', 'debug.cc',
+ 'debug-agent.cc', 'disassembler.cc', 'execution.cc', 'factory.cc',
+ 'flags.cc', 'frame-element.cc', 'frames.cc', 'func-name-inferrer.cc',
+ 'global-handles.cc', 'handles.cc', 'hashmap.cc', 'heap.cc',
+ 'heap-profiler.cc', 'ic.cc', 'interpreter-irregexp.cc', 'jsregexp.cc',
+ 'jump-target.cc', 'log.cc', 'log-utils.cc', 'mark-compact.cc',
+ 'messages.cc', 'objects.cc', 'oprofile-agent.cc', 'parser.cc',
+ 'property.cc', 'regexp-macro-assembler.cc',
+ 'regexp-macro-assembler-irregexp.cc', 'regexp-stack.cc',
+ 'register-allocator.cc', 'rewriter.cc', 'runtime.cc', 'scanner.cc',
+ 'scopeinfo.cc', 'scopes.cc', 'serialize.cc', 'snapshot-common.cc',
+ 'spaces.cc', 'string-stream.cc', 'stub-cache.cc', 'token.cc', 'top.cc',
'unicode.cc', 'usage-analyzer.cc', 'utils.cc', 'v8-counters.cc',
'v8.cc', 'v8threads.cc', 'variables.cc', 'version.cc',
'virtual-frame.cc', 'zone.cc'
],
'arch:arm': [
- 'arm/assembler-arm.cc', 'arm/builtins-arm.cc', 'arm/cfg-arm.cc',
- 'arm/codegen-arm.cc', 'arm/constants-arm.cc', 'arm/cpu-arm.cc',
- 'arm/disasm-arm.cc', 'arm/debug-arm.cc', 'arm/frames-arm.cc',
- 'arm/ic-arm.cc', 'arm/jump-target-arm.cc', 'arm/macro-assembler-arm.cc',
- 'arm/regexp-macro-assembler-arm.cc',
- 'arm/register-allocator-arm.cc', 'arm/stub-cache-arm.cc',
- 'arm/virtual-frame-arm.cc'
+ 'arm/assembler-arm.cc', 'arm/builtins-arm.cc', 'arm/codegen-arm.cc',
+ 'arm/constants-arm.cc', 'arm/cpu-arm.cc', 'arm/disasm-arm.cc',
+ 'arm/debug-arm.cc', 'arm/frames-arm.cc', 'arm/ic-arm.cc',
+ 'arm/jump-target-arm.cc', 'arm/macro-assembler-arm.cc',
+ 'arm/regexp-macro-assembler-arm.cc', 'arm/register-allocator-arm.cc',
+ 'arm/stub-cache-arm.cc', 'arm/virtual-frame-arm.cc'
],
'arch:ia32': [
- 'ia32/assembler-ia32.cc', 'ia32/builtins-ia32.cc', 'ia32/cfg-ia32.cc',
+ 'ia32/assembler-ia32.cc', 'ia32/builtins-ia32.cc',
'ia32/codegen-ia32.cc', 'ia32/cpu-ia32.cc', 'ia32/disasm-ia32.cc',
'ia32/debug-ia32.cc', 'ia32/frames-ia32.cc', 'ia32/ic-ia32.cc',
'ia32/jump-target-ia32.cc', 'ia32/macro-assembler-ia32.cc',
- 'ia32/regexp-macro-assembler-ia32.cc', 'ia32/register-allocator-ia32.cc',
- 'ia32/stub-cache-ia32.cc', 'ia32/virtual-frame-ia32.cc'
+ 'ia32/regexp-macro-assembler-ia32.cc',
+ 'ia32/register-allocator-ia32.cc', 'ia32/stub-cache-ia32.cc',
+ 'ia32/virtual-frame-ia32.cc'
],
'arch:x64': [
- 'x64/assembler-x64.cc', 'x64/builtins-x64.cc', 'x64/cfg-x64.cc',
- 'x64/codegen-x64.cc', 'x64/cpu-x64.cc', 'x64/disasm-x64.cc',
- 'x64/debug-x64.cc', 'x64/frames-x64.cc', 'x64/ic-x64.cc',
- 'x64/jump-target-x64.cc', 'x64/macro-assembler-x64.cc',
- 'x64/regexp-macro-assembler-x64.cc', 'x64/register-allocator-x64.cc',
- 'x64/stub-cache-x64.cc', 'x64/virtual-frame-x64.cc'
+ 'x64/assembler-x64.cc', 'x64/builtins-x64.cc', 'x64/codegen-x64.cc',
+ 'x64/cpu-x64.cc', 'x64/disasm-x64.cc', 'x64/debug-x64.cc',
+ 'x64/frames-x64.cc', 'x64/ic-x64.cc', 'x64/jump-target-x64.cc',
+ 'x64/macro-assembler-x64.cc', 'x64/regexp-macro-assembler-x64.cc',
+ 'x64/register-allocator-x64.cc', 'x64/stub-cache-x64.cc',
+ 'x64/virtual-frame-x64.cc'
],
'simulator:arm': ['arm/simulator-arm.cc'],
'os:freebsd': ['platform-freebsd.cc', 'platform-posix.cc'],
View
84 deps/v8/src/api.cc
@@ -28,6 +28,7 @@
#include "v8.h"
#include "api.h"
+#include "arguments.h"
#include "bootstrapper.h"
#include "compiler.h"
#include "debug.h"
@@ -71,7 +72,7 @@ namespace v8 {
thread_local.DecrementCallDepth(); \
if (has_pending_exception) { \
if (thread_local.CallDepthIsZero() && i::Top::is_out_of_memory()) { \
- if (!thread_local.IgnoreOutOfMemory()) \
+ if (!thread_local.ignore_out_of_memory()) \
i::V8::FatalProcessOutOfMemory(NULL); \
} \
bool call_depth_is_zero = thread_local.CallDepthIsZero(); \
@@ -341,9 +342,12 @@ ResourceConstraints::ResourceConstraints()
bool SetResourceConstraints(ResourceConstraints* constraints) {
- bool result = i::Heap::ConfigureHeap(constraints->max_young_space_size(),
- constraints->max_old_space_size());
- if (!result) return false;
+ int semispace_size = constraints->max_young_space_size();
+ int old_gen_size = constraints->max_old_space_size();
+ if (semispace_size != 0 || old_gen_size != 0) {
+ bool result = i::Heap::ConfigureHeap(semispace_size, old_gen_size);
+ if (!result) return false;
+ }
if (constraints->stack_limit() != NULL) {
uintptr_t limit = reinterpret_cast<uintptr_t>(constraints->stack_limit());
i::StackGuard::SetStackLimit(limit);
@@ -1898,6 +1902,7 @@ bool v8::Object::Set(v8::Handle<Value> key, v8::Handle<Value> value,
v8::PropertyAttribute attribs) {
ON_BAILOUT("v8::Object::Set()", return false);
ENTER_V8;
+ HandleScope scope;
i::Handle<i::Object> self = Utils::OpenHandle(this);
i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
@@ -1918,6 +1923,7 @@ bool v8::Object::ForceSet(v8::Handle<Value> key,
v8::PropertyAttribute attribs) {
ON_BAILOUT("v8::Object::ForceSet()", return false);
ENTER_V8;
+ HandleScope scope;
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
@@ -1936,6 +1942,7 @@ bool v8::Object::ForceSet(v8::Handle<Value> key,
bool v8::Object::ForceDelete(v8::Handle<Value> key) {
ON_BAILOUT("v8::Object::ForceDelete()", return false);
ENTER_V8;
+ HandleScope scope;
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
EXCEPTION_PREAMBLE();
@@ -2121,7 +2128,7 @@ bool v8::Object::HasIndexedLookupInterceptor() {
}
-Handle<Value> v8::Object::GetRealNamedPropertyInPrototypeChain(
+Local<Value> v8::Object::GetRealNamedPropertyInPrototypeChain(
Handle<String> key) {
ON_BAILOUT("v8::Object::GetRealNamedPropertyInPrototypeChain()",
return Local<Value>());
@@ -2142,12 +2149,32 @@ Handle<Value> v8::Object::GetRealNamedPropertyInPrototypeChain(
}
+Local<Value> v8::Object::GetRealNamedProperty(Handle<String> key) {
+ ON_BAILOUT("v8::Object::GetRealNamedProperty()", return Local<Value>());
+ ENTER_V8;
+ i::Handle<i::JSObject> self_obj = Utils::OpenHandle(this);
+ i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
+ i::LookupResult lookup;
+ self_obj->LookupRealNamedProperty(*key_obj, &lookup);
+ if (lookup.IsValid()) {
+ PropertyAttributes attributes;
+ i::Handle<i::Object> result(self_obj->GetProperty(*self_obj,
+ &lookup,
+ *key_obj,
+ &attributes));
+ return Utils::ToLocal(result);
+ }
+ return Local<Value>(); // No real property was found in prototype chain.
+}
+
+
// Turns on access checks by copying the map and setting the check flag.
// Because the object gets a new map, existing inline cache caching
// the old map of this object will fail.
void v8::Object::TurnOnAccessCheck() {
ON_BAILOUT("v8::Object::TurnOnAccessCheck()", return);
ENTER_V8;
+ HandleScope scope;
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
i::Handle<i::Map> new_map =
@@ -2177,6 +2204,7 @@ Local<v8::Object> v8::Object::Clone() {
int v8::Object::GetIdentityHash() {
ON_BAILOUT("v8::Object::GetIdentityHash()", return 0);
ENTER_V8;
+ HandleScope scope;
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> hidden_props(i::GetHiddenProperties(self, true));
i::Handle<i::Object> hash_symbol = i::Factory::identity_hash_symbol();
@@ -2206,6 +2234,7 @@ bool v8::Object::SetHiddenValue(v8::Handle<v8::String> key,
v8::Handle<v8::Value> value) {
ON_BAILOUT("v8::Object::SetHiddenValue()", return false);
ENTER_V8;
+ HandleScope scope;
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> hidden_props(i::GetHiddenProperties(self, true));
i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
@@ -2245,6 +2274,7 @@ v8::Local<v8::Value> v8::Object::GetHiddenValue(v8::Handle<v8::String> key) {
bool v8::Object::DeleteHiddenValue(v8::Handle<v8::String> key) {
ON_BAILOUT("v8::DeleteHiddenValue()", return false);
ENTER_V8;
+ HandleScope scope;
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> hidden_props(i::GetHiddenProperties(self, false));
if (hidden_props->IsUndefined()) {
@@ -2259,6 +2289,7 @@ bool v8::Object::DeleteHiddenValue(v8::Handle<v8::String> key) {
void v8::Object::SetIndexedPropertiesToPixelData(uint8_t* data, int length) {
ON_BAILOUT("v8::SetElementsToPixelData()", return);
ENTER_V8;
+ HandleScope scope;
if (!ApiCheck(i::Smi::IsValid(length),
"v8::Object::SetIndexedPropertiesToPixelData()",
"length exceeds max acceptable value")) {
@@ -2419,20 +2450,14 @@ int String::Write(uint16_t* buffer, int start, int length) const {
ENTER_V8;
ASSERT(start >= 0 && length >= -1);
i::Handle<i::String> str = Utils::OpenHandle(this);
- // Flatten the string for efficiency. This applies whether we are
- // using StringInputBuffer or Get(i) to access the characters.
- str->TryFlattenIfNotFlat();
int end = length;
if ( (length == -1) || (length > str->length() - start) )
end = str->length() - start;
if (end < 0) return 0;
- write_input_buffer.Reset(start, *str);
- int i;
- for (i = 0; i < end; i++)
- buffer[i] = write_input_buffer.GetNext();
- if (length == -1 || i < length)
- buffer[i] = '\0';
- return i;
+ i::String::WriteToFlat(*str, buffer, start, end);
+ if (length == -1 || end < length)
+ buffer[end] = '\0';
+ return end;
}
@@ -2577,9 +2602,11 @@ bool v8::V8::Dispose() {
}
-bool v8::V8::IdleNotification(bool is_high_priority) {
- if (!i::V8::IsRunning()) return false;
- return i::V8::IdleNotification(is_high_priority);
+bool v8::V8::IdleNotification() {
+ // Returning true tells the caller that it need not
+ // continue to call IdleNotification.
+ if (!i::V8::IsRunning()) return true;
+ return i::V8::IdleNotification();
}
@@ -2740,7 +2767,9 @@ v8::Local<v8::Context> Context::GetCurrent() {
v8::Local<v8::Context> Context::GetCalling() {
if (IsDeadCheck("v8::Context::GetCalling()")) return Local<Context>();
- i::Handle<i::Context> context(i::Top::GetCallingGlobalContext());
+ i::Handle<i::Object> calling = i::Top::GetCallingGlobalContext();
+ if (calling.is_null()) return Local<Context>();
+ i::Handle<i::Context> context = i::Handle<i::Context>::cast(calling);
return Utils::ToLocal(context);
}
@@ -3187,7 +3216,7 @@ Local<Integer> v8::Integer::New(int32_t value) {
void V8::IgnoreOutOfMemoryException() {
- thread_local.SetIgnoreOutOfMemory(true);
+ thread_local.set_ignore_out_of_memory(true);
}
@@ -3669,6 +3698,11 @@ HandleScopeImplementer* HandleScopeImplementer::instance() {
}
+void HandleScopeImplementer::FreeThreadResources() {
+ thread_local.Free();
+}
+
+
char* HandleScopeImplementer::ArchiveThread(char* storage) {
return thread_local.ArchiveThreadHelper(storage);
}
@@ -3680,7 +3714,7 @@ char* HandleScopeImplementer::ArchiveThreadHelper(char* storage) {
handle_scope_data_ = *current;
memcpy(storage, this, sizeof(*this));
- Initialize();
+ ResetAfterArchive();
current->Initialize();
return storage + ArchiveSpacePerThread();
@@ -3706,14 +3740,14 @@ char* HandleScopeImplementer::RestoreThreadHelper(char* storage) {
void HandleScopeImplementer::IterateThis(ObjectVisitor* v) {
// Iterate over all handles in the blocks except for the last.
- for (int i = Blocks()->length() - 2; i >= 0; --i) {
- Object** block = Blocks()->at(i);
+ for (int i = blocks()->length() - 2; i >= 0; --i) {
+ Object** block = blocks()->at(i);
v->VisitPointers(block, &block[kHandleBlockSize]);
}
// Iterate over live handles in the last block (if any).
- if (!Blocks()->is_empty()) {
- v->VisitPointers(Blocks()->last(), handle_scope_data_.next);
+ if (!blocks()->is_empty()) {
+ v->VisitPointers(blocks()->last(), handle_scope_data_.next);
}
if (!saved_contexts_.is_empty()) {
View
85 deps/v8/src/api.h
@@ -311,20 +311,12 @@ class HandleScopeImplementer {
public:
HandleScopeImplementer()
- : blocks(0),
+ : blocks_(0),
entered_contexts_(0),
- saved_contexts_(0) {
- Initialize();
- }
-
- void Initialize() {
- blocks.Initialize(0);
- entered_contexts_.Initialize(0);
- saved_contexts_.Initialize(0);
- spare = NULL;
- ignore_out_of_memory = false;
- call_depth = 0;
- }
+ saved_contexts_(0),
+ spare_(NULL),
+ ignore_out_of_memory_(false),
+ call_depth_(0) { }
static HandleScopeImplementer* instance();
@@ -332,6 +324,7 @@ class HandleScopeImplementer {
static int ArchiveSpacePerThread();
static char* RestoreThread(char* from);
static char* ArchiveThread(char* to);
+ static void FreeThreadResources();
// Garbage collection support.
static void Iterate(v8::internal::ObjectVisitor* v);
@@ -341,9 +334,9 @@ class HandleScopeImplementer {
inline internal::Object** GetSpareOrNewBlock();
inline void DeleteExtensions(int extensions);
- inline void IncrementCallDepth() {call_depth++;}
- inline void DecrementCallDepth() {call_depth--;}
- inline bool CallDepthIsZero() { return call_depth == 0; }
+ inline void IncrementCallDepth() {call_depth_++;}
+ inline void DecrementCallDepth() {call_depth_--;}
+ inline bool CallDepthIsZero() { return call_depth_ == 0; }
inline void EnterContext(Handle<Object> context);
inline bool LeaveLastContext();
@@ -356,20 +349,44 @@ class HandleScopeImplementer {
inline Context* RestoreContext();
inline bool HasSavedContexts();
- inline List<internal::Object**>* Blocks() { return &blocks; }
-
- inline bool IgnoreOutOfMemory() { return ignore_out_of_memory; }
- inline void SetIgnoreOutOfMemory(bool value) { ignore_out_of_memory = value; }
+ inline List<internal::Object**>* blocks() { return &blocks_; }
+ inline bool ignore_out_of_memory() { return ignore_out_of_memory_; }
+ inline void set_ignore_out_of_memory(bool value) {
+ ignore_out_of_memory_ = value;
+ }
private:
- List<internal::Object**> blocks;
- Object** spare;
- int call_depth;
+ void ResetAfterArchive() {
+ blocks_.Initialize(0);
+ entered_contexts_.Initialize(0);
+ saved_contexts_.Initialize(0);
+ spare_ = NULL;
+ ignore_out_of_memory_ = false;
+ call_depth_ = 0;
+ }
+
+ void Free() {
+ ASSERT(blocks_.length() == 0);
+ ASSERT(entered_contexts_.length() == 0);
+ ASSERT(saved_contexts_.length() == 0);
+ blocks_.Free();
+ entered_contexts_.Free();
+ saved_contexts_.Free();
+ if (spare_ != NULL) {
+ DeleteArray(spare_);
+ spare_ = NULL;
+ }
+ ASSERT(call_depth_ == 0);
+ }
+
+ List<internal::Object**> blocks_;
// Used as a stack to keep track of entered contexts.
List<Handle<Object> > entered_contexts_;
// Used as a stack to keep track of saved contexts.
List<Context*> saved_contexts_;
- bool ignore_out_of_memory;
+ Object** spare_;
+ bool ignore_out_of_memory_;
+ int call_depth_;
// This is only used for threading support.
v8::ImplementationUtilities::HandleScopeData handle_scope_data_;
@@ -419,32 +436,32 @@ Handle<Object> HandleScopeImplementer::LastEnteredContext() {
// If there's a spare block, use it for growing the current scope.
internal::Object** HandleScopeImplementer::GetSpareOrNewBlock() {
- internal::Object** block = (spare != NULL) ?
- spare :
+ internal::Object** block = (spare_ != NULL) ?
+ spare_ :
NewArray<internal::Object*>(kHandleBlockSize);
- spare = NULL;
+ spare_ = NULL;
return block;
}
void HandleScopeImplementer::DeleteExtensions(int extensions) {
- if (spare != NULL) {
- DeleteArray(spare);
- spare = NULL;
+ if (spare_ != NULL) {
+ DeleteArray(spare_);
+ spare_ = NULL;
}
for (int i = extensions; i > 1; --i) {
- internal::Object** block = blocks.RemoveLast();
+ internal::Object** block = blocks_.RemoveLast();
#ifdef DEBUG
v8::ImplementationUtilities::ZapHandleRange(block,
&block[kHandleBlockSize]);
#endif
DeleteArray(block);
}
- spare = blocks.RemoveLast();
+ spare_ = blocks_.RemoveLast();
#ifdef DEBUG
v8::ImplementationUtilities::ZapHandleRange(
- spare,
- &spare[kHandleBlockSize]);
+ spare_,
+ &spare_[kHandleBlockSize]);
#endif
}
View
26 deps/v8/src/arguments.h
@@ -45,6 +45,9 @@ namespace internal {
class Arguments BASE_EMBEDDED {
public:
+ Arguments(int length, Object** arguments)
+ : length_(length), arguments_(arguments) { }
+
Object*& operator[] (int index) {
ASSERT(0 <= index && index < length_);
return arguments_[-index];
@@ -61,11 +64,34 @@ class Arguments BASE_EMBEDDED {
// Get the total number of arguments including the receiver.
int length() const { return length_; }
+ Object** arguments() { return arguments_; }
+
private:
int length_;
Object** arguments_;
};
+
+// Cursom arguments replicate a small segment of stack that can be
+// accessed through an Arguments object the same way the actual stack
+// can.
+class CustomArguments : public Relocatable {
+ public:
+ inline CustomArguments(Object *data,
+ JSObject *self,
+ JSObject *holder) {
+ values_[3] = self;
+ values_[2] = holder;
+ values_[1] = Smi::FromInt(0);
+ values_[0] = data;
+ }
+ void IterateInstance(ObjectVisitor* v);
+ Object** end() { return values_ + 3; }
+ private:
+ Object* values_[4];
+};
+
+
} } // namespace v8::internal
#endif // V8_ARGUMENTS_H_
View
8 deps/v8/src/arm/assembler-arm-inl.h
@@ -81,7 +81,13 @@ void RelocInfo::set_target_address(Address target) {
Object* RelocInfo::target_object() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return reinterpret_cast<Object*>(Assembler::target_address_at(pc_));
+ return Memory::Object_at(Assembler::target_address_address_at(pc_));
+}
+
+
+Handle<Object> RelocInfo::target_object_handle(Assembler *origin) {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ return Memory::Object_Handle_at(Assembler::target_address_address_at(pc_));
}
View
4 deps/v8/src/arm/assembler-arm.h
@@ -645,8 +645,8 @@ class Assembler : public Malloced {
str(src, MemOperand(sp, 4, NegPreIndex), cond);
}
- void pop(Register dst) {
- ldr(dst, MemOperand(sp, 4, PostIndex), al);
+ void pop(Register dst, Condition cond = al) {
+ ldr(dst, MemOperand(sp, 4, PostIndex), cond);
}
void pop() {
View
413 deps/v8/src/arm/builtins-arm.cc
@@ -44,15 +44,379 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id) {
__ str(r1, MemOperand(ip, 0));
// The actual argument count has already been loaded into register
- // r0, but JumpToBuiltin expects r0 to contain the number of
+ // r0, but JumpToRuntime expects r0 to contain the number of
// arguments including the receiver.
__ add(r0, r0, Operand(1));
- __ JumpToBuiltin(ExternalReference(id));
+ __ JumpToRuntime(ExternalReference(id));
+}
+
+
+// Load the built-in Array function from the current context.
+static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
+ // Load the global context.
+
+ __ ldr(result, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ ldr(result,
+ FieldMemOperand(result, GlobalObject::kGlobalContextOffset));
+ // Load the Array function from the global context.
+ __ ldr(result,
+ MemOperand(result,
+ Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+}
+
+
+// This constant has the same value as JSArray::kPreallocatedArrayElements and
+// if JSArray::kPreallocatedArrayElements is changed handling of loop unfolding
+// below should be reconsidered.
+static const int kLoopUnfoldLimit = 4;
+
+
+// Allocate an empty JSArray. The allocated array is put into the result
+// register. An elements backing store is allocated with size initial_capacity
+// and filled with the hole values.
+static void AllocateEmptyJSArray(MacroAssembler* masm,
+ Register array_function,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ int initial_capacity,
+ Label* gc_required) {
+ ASSERT(initial_capacity > 0);
+ // Load the initial map from the array function.
+ __ ldr(scratch1, FieldMemOperand(array_function,
+ JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Allocate the JSArray object together with space for a fixed array with the
+ // requested elements.
+ int size = JSArray::kSize + FixedArray::SizeFor(initial_capacity);
+ __ AllocateInNewSpace(size / kPointerSize,
+ result,
+ scratch2,
+ scratch3,
+ gc_required,
+ TAG_OBJECT);
+
+ // Allocated the JSArray. Now initialize the fields except for the elements
+ // array.
+ // result: JSObject
+ // scratch1: initial map
+ // scratch2: start of next object
+ __ str(scratch1, FieldMemOperand(result, JSObject::kMapOffset));
+ __ LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
+ __ str(scratch1, FieldMemOperand(result, JSArray::kPropertiesOffset));
+ // Field JSArray::kElementsOffset is initialized later.
+ __ mov(scratch3, Operand(0));
+ __ str(scratch3, FieldMemOperand(result, JSArray::kLengthOffset));
+
+ // Calculate the location of the elements array and set elements array member
+ // of the JSArray.
+ // result: JSObject
+ // scratch2: start of next object
+ __ lea(scratch1, MemOperand(result, JSArray::kSize));
+ __ str(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
+
+ // Clear the heap tag on the elements array.
+ __ and_(scratch1, scratch1, Operand(~kHeapObjectTagMask));
+
+ // Initialize the FixedArray and fill it with holes. FixedArray length is not
+ // stored as a smi.
+ // result: JSObject
+ // scratch1: elements array (untagged)
+ // scratch2: start of next object
+ __ LoadRoot(scratch3, Heap::kFixedArrayMapRootIndex);
+ ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
+ __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
+ __ mov(scratch3, Operand(initial_capacity));
+ ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+ __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
+
+ // Fill the FixedArray with the hole value.
+ ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
+ ASSERT(initial_capacity <= kLoopUnfoldLimit);
+ __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
+ for (int i = 0; i < initial_capacity; i++) {
+ __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
+ }
+}
+
+// Allocate a JSArray with the number of elements stored in a register. The
+// register array_function holds the built-in Array function and the register
+// array_size holds the size of the array as a smi. The allocated array is put
+// into the result register and beginning and end of the FixedArray elements
+// storage is put into registers elements_array_storage and elements_array_end
+// (see below for when that is not the case). If the parameter fill_with_holes
+// is true the allocated elements backing store is filled with the hole values
+// otherwise it is left uninitialized. When the backing store is filled the
+// register elements_array_storage is scratched.
+static void AllocateJSArray(MacroAssembler* masm,
+ Register array_function, // Array function.
+ Register array_size, // As a smi.
+ Register result,
+ Register elements_array_storage,
+ Register elements_array_end,
+ Register scratch1,
+ Register scratch2,
+ bool fill_with_hole,
+ Label* gc_required) {
+ Label not_empty, allocated;
+
+ // Load the initial map from the array function.
+ __ ldr(elements_array_storage,
+ FieldMemOperand(array_function,
+ JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Check whether an empty sized array is requested.
+ __ tst(array_size, array_size);
+ __ b(nz, &not_empty);
+
+ // If an empty array is requested allocate a small elements array anyway. This
+ // keeps the code below free of special casing for the empty array.
+ int size = JSArray::kSize +
+ FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
+ __ AllocateInNewSpace(size / kPointerSize,
+ result,
+ elements_array_end,
+ scratch1,
+ gc_required,
+ TAG_OBJECT);
+ __ jmp(&allocated);
+
+ // Allocate the JSArray object together with space for a FixedArray with the
+ // requested number of elements.
+ __ bind(&not_empty);
+ ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+ __ mov(elements_array_end,
+ Operand((JSArray::kSize + FixedArray::kHeaderSize) / kPointerSize));
+ __ add(elements_array_end,
+ elements_array_end,
+ Operand(array_size, ASR, kSmiTagSize));
+ __ AllocateInNewSpace(elements_array_end,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ TAG_OBJECT);
+
+ // Allocated the JSArray. Now initialize the fields except for the elements
+ // array.
+ // result: JSObject
+ // elements_array_storage: initial map
+ // array_size: size of array (smi)
+ __ bind(&allocated);
+ __ str(elements_array_storage, FieldMemOperand(result, JSObject::kMapOffset));
+ __ LoadRoot(elements_array_storage, Heap::kEmptyFixedArrayRootIndex);
+ __ str(elements_array_storage,
+ FieldMemOperand(result, JSArray::kPropertiesOffset));
+ // Field JSArray::kElementsOffset is initialized later.
+ __ str(array_size, FieldMemOperand(result, JSArray::kLengthOffset));
+
+ // Calculate the location of the elements array and set elements array member
+ // of the JSArray.
+ // result: JSObject
+ // array_size: size of array (smi)
+ __ add(elements_array_storage, result, Operand(JSArray::kSize));
+ __ str(elements_array_storage,
+ FieldMemOperand(result, JSArray::kElementsOffset));
+
+ // Clear the heap tag on the elements array.
+ __ and_(elements_array_storage,
+ elements_array_storage,
+ Operand(~kHeapObjectTagMask));
+ // Initialize the fixed array and fill it with holes. FixedArray length is not
+ // stored as a smi.
+ // result: JSObject
+ // elements_array_storage: elements array (untagged)
+ // array_size: size of array (smi)
+ ASSERT(kSmiTag == 0);
+ __ LoadRoot(scratch1, Heap::kFixedArrayMapRootIndex);
+ ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
+ __ str(scratch1, MemOperand(elements_array_storage, kPointerSize, PostIndex));
+ // Convert array_size from smi to value.
+ __ mov(array_size,
+ Operand(array_size, ASR, kSmiTagSize));
+ __ tst(array_size, array_size);
+ // Length of the FixedArray is the number of pre-allocated elements if
+ // the actual JSArray has length 0 and the size of the JSArray for non-empty
+ // JSArrays. The length of a FixedArray is not stored as a smi.
+ __ mov(array_size, Operand(JSArray::kPreallocatedArrayElements), LeaveCC, eq);
+ ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+ __ str(array_size,
+ MemOperand(elements_array_storage, kPointerSize, PostIndex));
+
+ // Calculate elements array and elements array end.
+ // result: JSObject
+ // elements_array_storage: elements array element storage
+ // array_size: size of elements array
+ __ add(elements_array_end,
+ elements_array_storage,
+ Operand(array_size, LSL, kPointerSizeLog2));
+
+ // Fill the allocated FixedArray with the hole value if requested.
+ // result: JSObject
+ // elements_array_storage: elements array element storage
+ // elements_array_end: start of next object
+ if (fill_with_hole) {
+ Label loop, entry;
+ __ LoadRoot(scratch1, Heap::kTheHoleValueRootIndex);
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ str(scratch1,
+ MemOperand(elements_array_storage, kPointerSize, PostIndex));
+ __ bind(&entry);
+ __ cmp(elements_array_storage, elements_array_end);
+ __ b(lt, &loop);
+ }
+}
+
+// Create a new array for the built-in Array function. This function allocates
+// the JSArray object and the FixedArray elements array and initializes these.
+// If the Array cannot be constructed in native code the runtime is called. This
+// function assumes the following state:
+// r0: argc
+// r1: constructor (built-in Array function)
+// lr: return address
+// sp[0]: last argument
+// This function is used for both construct and normal calls of Array. The only
+// difference between handling a construct call and a normal call is that for a
+// construct call the constructor function in r1 needs to be preserved for
+// entering the generic code. In both cases argc in r0 needs to be preserved.
+// Both registers are preserved by this code so no need to differentiate between
+// construct call and normal call.
+static void ArrayNativeCode(MacroAssembler* masm,
+ Label *call_generic_code) {
+ Label argc_one_or_more, argc_two_or_more;
+
+ // Check for array construction with zero arguments or one.
+ __ cmp(r0, Operand(0));
+ __ b(ne, &argc_one_or_more);
+
+ // Handle construction of an empty array.
+ AllocateEmptyJSArray(masm,
+ r1,
+ r2,
+ r3,
+ r4,
+ r5,
+ JSArray::kPreallocatedArrayElements,
+ call_generic_code);
+ __ IncrementCounter(&Counters::array_function_native, 1, r3, r4);
+ // Setup return value, remove receiver from stack and return.
+ __ mov(r0, r2);
+ __ add(sp, sp, Operand(kPointerSize));
+ __ Jump(lr);
+
+ // Check for one argument. Bail out if argument is not smi or if it is
+ // negative.
+ __ bind(&argc_one_or_more);
+ __ cmp(r0, Operand(1));
+ __ b(ne, &argc_two_or_more);
+ ASSERT(kSmiTag == 0);
+ __ ldr(r2, MemOperand(sp)); // Get the argument from the stack.
+ __ and_(r3, r2, Operand(kIntptrSignBit | kSmiTagMask), SetCC);
+ __ b(ne, call_generic_code);
+
+ // Handle construction of an empty array of a certain size. Bail out if size
+ // is too large to actually allocate an elements array.
+ ASSERT(kSmiTag == 0);
+ __ cmp(r2, Operand(JSObject::kInitialMaxFastElementArray << kSmiTagSize));
+ __ b(ge, call_generic_code);
+
+ // r0: argc
+ // r1: constructor
+ // r2: array_size (smi)
+ // sp[0]: argument
+ AllocateJSArray(masm,
+ r1,
+ r2,
+ r3,
+ r4,
+ r5,
+ r6,
+ r7,
+ true,
+ call_generic_code);
+ __ IncrementCounter(&Counters::array_function_native, 1, r2, r4);
+ // Setup return value, remove receiver and argument from stack and return.
+ __ mov(r0, r3);
+ __ add(sp, sp, Operand(2 * kPointerSize));
+ __ Jump(lr);
+
+ // Handle construction of an array from a list of arguments.
+ __ bind(&argc_two_or_more);
+ __ mov(r2, Operand(r0, LSL, kSmiTagSize)); // Convet argc to a smi.
+
+ // r0: argc
+ // r1: constructor
+ // r2: array_size (smi)
+ // sp[0]: last argument
+ AllocateJSArray(masm,
+ r1,
+ r2,
+ r3,
+ r4,
+ r5,
+ r6,
+ r7,
+ false,
+ call_generic_code);
+ __ IncrementCounter(&Counters::array_function_native, 1, r2, r6);
+
+ // Fill arguments as array elements. Copy from the top of the stack (last
+ // element) to the array backing store filling it backwards. Note:
+ // elements_array_end points after the backing store therefore PreIndex is
+ // used when filling the backing store.
+ // r0: argc
+ // r3: JSArray
+ // r4: elements_array storage start (untagged)
+ // r5: elements_array_end (untagged)
+ // sp[0]: last argument
+ Label loop, entry;
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ ldr(r2, MemOperand(sp, kPointerSize, PostIndex));
+ __ str(r2, MemOperand(r5, -kPointerSize, PreIndex));
+ __ bind(&entry);
+ __ cmp(r4, r5);
+ __ b(lt, &loop);
+
+ // Remove caller arguments and receiver from the stack, setup return value and
+ // return.
+ // r0: argc
+ // r3: JSArray
+ // sp[0]: receiver
+ __ add(sp, sp, Operand(kPointerSize));
+ __ mov(r0, r3);
+ __ Jump(lr);
}
void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
- // Just jump to the generic array code.
+ // ----------- S t a t e -------------
+ // -- r0 : number of arguments
+ // -- lr : return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+ Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
+
+ // Get the Array function.
+ GenerateLoadArrayFunction(masm, r1);
+
+ if (FLAG_debug_code) {
+ // Initial map for the builtin Array function shoud be a map.
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ tst(r2, Operand(kSmiTagMask));
+ __ Assert(ne, "Unexpected initial map for Array function");
+ __ CompareObjectType(r2, r3, r4, MAP_TYPE);
+ __ Assert(eq, "Unexpected initial map for Array function");
+ }
+
+ // Run the native code for the Array function called as a normal function.
+ ArrayNativeCode(masm, &generic_array_code);
+
+ // Jump to the generic array code if the specialized code cannot handle
+ // the construction.
+ __ bind(&generic_array_code);
Code* code = Builtins::builtin(Builtins::ArrayCodeGeneric);
Handle<Code> array_code(code);
__ Jump(array_code, RelocInfo::CODE_TARGET);
@@ -60,7 +424,34 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
- // Just jump to the generic construct code.
+ // ----------- S t a t e -------------
+ // -- r0 : number of arguments
+ // -- r1 : constructor function
+ // -- lr : return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+ Label generic_constructor;
+
+ if (FLAG_debug_code) {
+ // The array construct code is only set for the builtin Array function which
+ // always have a map.
+ GenerateLoadArrayFunction(masm, r2);
+ __ cmp(r1, r2);
+ __ Assert(eq, "Unexpected Array function");
+ // Initial map for the builtin Array function should be a map.
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ tst(r2, Operand(kSmiTagMask));
+ __ Assert(ne, "Unexpected initial map for Array function");
+ __ CompareObjectType(r2, r3, r4, MAP_TYPE);
+ __ Assert(eq, "Unexpected initial map for Array function");
+ }
+
+ // Run the native code for the Array function called as a constructor.
+ ArrayNativeCode(masm, &generic_constructor);
+
+ // Jump to the generic construct code in case the specialized code cannot
+ // handle the construction.
+ __ bind(&generic_constructor);
Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric);
Handle<Code> generic_construct_stub(code);
__ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
@@ -149,7 +540,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// r2: initial map
// r7: undefined
__ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
- __ AllocateObjectInNewSpace(r3, r4, r5, r6, &rt_call, NO_ALLOCATION_FLAGS);
+ __ AllocateInNewSpace(r3, r4, r5, r6, &rt_call, NO_ALLOCATION_FLAGS);
// Allocated the JSObject, now initialize the fields. Map is set to initial
// map and properties and elements are set to empty fixed array.
@@ -220,12 +611,12 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// r5: start of next object
// r7: undefined
__ add(r0, r3, Operand(FixedArray::kHeaderSize / kPointerSize));
- __ AllocateObjectInNewSpace(r0,
- r5,
- r6,
- r2,
- &undo_allocation,
- RESULT_CONTAINS_TOP);
+ __ AllocateInNewSpace(r0,
+ r5,
+ r6,
+ r2,
+ &undo_allocation,
+ RESULT_CONTAINS_TOP);
// Initialize the FixedArray.
// r1: constructor
View
301 deps/v8/src/arm/cfg-arm.cc
@@ -1,301 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "cfg.h"
-#include "codegen-inl.h"
-#include "codegen-arm.h" // Include after codegen-inl.h.
-#include "macro-assembler-arm.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-void InstructionBlock::Compile(MacroAssembler* masm) {
- ASSERT(!is_marked());
- is_marked_ = true;
- {
- Comment cmt(masm, "[ InstructionBlock");
- for (int i = 0, len = instructions_.length(); i < len; i++) {
- // If the location of the current instruction is a temp, then the
- // instruction cannot be in tail position in the block. Allocate the
- // temp based on peeking ahead to the next instruction.
- Instruction* instr = instructions_[i];
- Location* loc = instr->location();
- if (loc->is_temporary()) {
- instructions_[i+1]->FastAllocate(TempLocation::cast(loc));
- }
- instructions_[i]->Compile(masm);
- }
- }
- successor_->Compile(masm);
-}
-
-
-void EntryNode::Compile(MacroAssembler* masm) {
- ASSERT(!is_marked());
- is_marked_ = true;
- {
- Comment cmnt(masm, "[ EntryNode");
- __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
- __ add(fp, sp, Operand(2 * kPointerSize));
- int count = CfgGlobals::current()->fun()->scope()->num_stack_slots();
- if (count > 0) {
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- for (int i = 0; i < count; i++) {
- __ push(ip);
- }
- }
- if (FLAG_trace) {
- __ CallRuntime(Runtime::kTraceEnter, 0);
- }
- if (FLAG_check_stack) {
- StackCheckStub stub;
- __ CallStub(&stub);
- }
- }
- successor_->Compile(masm);
-}
-
-
-void ExitNode::Compile(MacroAssembler* masm) {
- ASSERT(!is_marked());
- is_marked_ = true;
- Comment cmnt(masm, "[ ExitNode");
- if (FLAG_trace) {
- __ push(r0);
- __ CallRuntime(Runtime::kTraceExit, 1);
- }
- __ mov(sp, fp);
- __ ldm(ia_w, sp, fp.bit() | lr.bit());
- int count = CfgGlobals::current()->fun()->scope()->num_parameters();
- __ add(sp, sp, Operand((count + 1) * kPointerSize));
- __ Jump(lr);
-}
-
-
-void PropLoadInstr::Compile(MacroAssembler* masm) {
- // The key should not be on the stack---if it is a compiler-generated
- // temporary it is in the accumulator.
- ASSERT(!key()->is_on_stack());
-
- Comment cmnt(masm, "[ Load from Property");
- // If the key is known at compile-time we may be able to use a load IC.
- bool is_keyed_load = true;
- if (key()->is_constant()) {
- // Still use the keyed load IC if the key can be parsed as an integer so
- // we will get into the case that handles [] on string objects.
- Handle<Object> key_val = Constant::cast(key())->handle();
- uint32_t ignored;
- if (key_val->IsSymbol() &&
- !String::cast(*key_val)->AsArrayIndex(&ignored)) {
- is_keyed_load = false;
- }
- }
-
- if (!object()->is_on_stack()) object()->Push(masm);
-
- if (is_keyed_load) {
- key()->Push(masm);
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- // Discard key and receiver.
- __ add(sp, sp, Operand(2 * kPointerSize));
- } else {
- key()->Get(masm, r2);
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- __ pop(); // Discard receiver.
- }
- location()->Set(masm, r0);
-}
-
-
-void BinaryOpInstr::Compile(MacroAssembler* masm) {
- // The right-hand value should not be on the stack---if it is a
- // compiler-generated temporary it is in the accumulator.
- ASSERT(!right()->is_on_stack());
-
- Comment cmnt(masm, "[ BinaryOpInstr");
- // We can overwrite one of the operands if it is a temporary.
- OverwriteMode mode = NO_OVERWRITE;
- if (left()->is_temporary()) {
- mode = OVERWRITE_LEFT;
- } else if (right()->is_temporary()) {
- mode = OVERWRITE_RIGHT;
- }
-
- // Move left to r1 and right to r0.
- left()->Get(masm, r1);
- right()->Get(masm, r0);
- GenericBinaryOpStub stub(op(), mode);
- __ CallStub(&stub);
- location()->Set(masm, r0);
-}
-
-
-void ReturnInstr::Compile(MacroAssembler* masm) {
- // The location should be 'Effect'. As a side effect, move the value to
- // the accumulator.
- Comment cmnt(masm, "[ ReturnInstr");
- value()->Get(masm, r0);
-}
-
-
-void Constant::Get(MacroAssembler* masm, Register reg) {
- __ mov(reg, Operand(handle_));
-}
-
-
-void Constant::Push(MacroAssembler* masm) {
- __ mov(ip, Operand(handle_));
- __ push(ip);
-}
-
-
-static MemOperand ToMemOperand(SlotLocation* loc) {
- switch (loc->type()) {
- case Slot::PARAMETER: {
- int count = CfgGlobals::current()->fun()->scope()->num_parameters();
- return MemOperand(fp, (1 + count - loc->index()) * kPointerSize);
- }
- case Slot::LOCAL: {
- const int kOffset = JavaScriptFrameConstants::kLocal0Offset;
- return MemOperand(fp, kOffset - loc->index() * kPointerSize);
- }
- default:
- UNREACHABLE();
- return MemOperand(r0);
- }
-}
-
-
-void Constant::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) {
- __ mov(ip, Operand(handle_));
- __ str(ip, ToMemOperand(loc));
-}
-
-
-void SlotLocation::Get(MacroAssembler* masm, Register reg) {
- __ ldr(reg, ToMemOperand(this));
-}
-
-
-void SlotLocation::Set(MacroAssembler* masm, Register reg) {
- __ str(reg, ToMemOperand(this));
-}
-
-
-void SlotLocation::Push(MacroAssembler* masm) {
- __ ldr(ip, ToMemOperand(this));
- __ push(ip); // Push will not destroy ip.
-}
-
-
-void SlotLocation::Move(MacroAssembler* masm, Value* value) {
- // Double dispatch.
- value->MoveToSlot(masm, this);
-}
-
-
-void SlotLocation::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) {
- __ ldr(ip, ToMemOperand(this));
- __ str(ip, ToMemOperand(loc));
-}
-
-
-void TempLocation::Get(MacroAssembler* masm, Register reg) {
- switch (where_) {
- case ACCUMULATOR:
- if (!reg.is(r0)) __ mov(reg, r0);
- break;
- case STACK:
- __ pop(reg);
- break;
- case NOT_ALLOCATED:
- UNREACHABLE();
- }
-}
-
-
-void TempLocation::Set(MacroAssembler* masm, Register reg) {
- switch (where_) {
- case ACCUMULATOR:
- if (!reg.is(r0)) __ mov(r0, reg);
- break;
- case STACK:
- __ push(reg);
- break;
- case NOT_ALLOCATED:
- UNREACHABLE();
- }
-}
-
-
-void TempLocation::Push(MacroAssembler* masm) {
- switch (where_) {
- case ACCUMULATOR:
- __ push(r0);
- break;
- case STACK:
- case NOT_ALLOCATED:
- UNREACHABLE();
- }
-}
-
-
-void TempLocation::Move(MacroAssembler* masm, Value* value) {
- switch (where_) {
- case ACCUMULATOR:
- value->Get(masm, r0);
- case STACK:
- value->Push(masm);
- break;
- case NOT_ALLOCATED:
- UNREACHABLE();
- }
-}
-
-
-void TempLocation::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) {
- switch (where_) {
- case ACCUMULATOR:
- __ str(r0, ToMemOperand(loc));
- case STACK:
- __ pop(ip);
- __ str(ip, ToMemOperand(loc));
- break;
- case NOT_ALLOCATED:
- UNREACHABLE();
- }
-}
-
-#undef __
-
-} } // namespace v8::internal
View
134 deps/v8/src/arm/codegen-arm.cc
@@ -1188,7 +1188,6 @@ void CodeGenerator::VisitDeclaration(Declaration* node) {
#endif
VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ Declaration");
- CodeForStatementPosition(node);
Variable* var = node->proxy()->var();
ASSERT(var != NULL); // must have been resolved
Slot* slot = var->slot();
@@ -2811,7 +2810,6 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
#endif
VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ Assignment");
- CodeForStatementPosition(node);
{ Reference target(this, node->target());
if (target.is_illegal()) {
@@ -2909,13 +2907,11 @@ void CodeGenerator::VisitCall(Call* node) {
VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ Call");
+ Expression* function = node->expression();
ZoneList<Expression*>* args = node->arguments();
- CodeForStatementPosition(node);
// Standard function call.
-
// Check if the function is a variable or a property.
- Expression* function = node->expression();
Variable* var = function->AsVariableProxy()->AsVariable();
Property* property = function->AsProperty();
@@ -2928,7 +2924,56 @@ void CodeGenerator::VisitCall(Call* node) {
// is resolved in cache misses (this also holds for megamorphic calls).
// ------------------------------------------------------------------------
- if (var != NULL && !var->is_this() && var->is_global()) {
+ if (var != NULL && var->is_possibly_eval()) {
+ // ----------------------------------
+ // JavaScript example: 'eval(arg)' // eval is not known to be shadowed
+ // ----------------------------------
+
+ // In a call to eval, we first call %ResolvePossiblyDirectEval to
+ // resolve the function we need to call and the receiver of the
+ // call. Then we call the resolved function using the given
+ // arguments.
+ // Prepare stack for call to resolved function.
+ LoadAndSpill(function);
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ frame_->EmitPush(r2); // Slot for receiver
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ LoadAndSpill(args->at(i));
+ }
+
+ // Prepare stack for call to ResolvePossiblyDirectEval.
+ __ ldr(r1, MemOperand(sp, arg_count * kPointerSize + kPointerSize));
+ frame_->EmitPush(r1);
+ if (arg_count > 0) {
+ __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
+ frame_->EmitPush(r1);
+ } else {
+ frame_->EmitPush(r2);
+ }
+
+ // Resolve the call.
+ frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 2);
+
+ // Touch up stack with the right values for the function and the receiver.
+ __ ldr(r1, FieldMemOperand(r0, FixedArray::kHeaderSize));
+ __ str(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+ __ ldr(r1, FieldMemOperand(r0, FixedArray::kHeaderSize + kPointerSize));
+ __ str(r1, MemOperand(sp, arg_count * kPointerSize));
+
+ // Call the function.
+ CodeForSourcePosition(node->position());
+
+ InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+ CallFunctionStub call_function(arg_count, in_loop);
+ frame_->CallStub(&call_function, arg_count + 1);
+
+ __ ldr(cp, frame_->Context());
+ // Remove the function from the stack.
+ frame_->Drop();
+ frame_->EmitPush(r0);
+
+ } else if (var != NULL && !var->is_this() && var->is_global()) {
// ----------------------------------
// JavaScript example: 'foo(1, 2, 3)' // foo is global
// ----------------------------------
@@ -3053,72 +3098,12 @@ void CodeGenerator::VisitCall(Call* node) {
}
-void CodeGenerator::VisitCallEval(CallEval* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ CallEval");
-
- // In a call to eval, we first call %ResolvePossiblyDirectEval to resolve
- // the function we need to call and the receiver of the call.
- // Then we call the resolved function using the given arguments.
-
- ZoneList<Expression*>* args = node->arguments();
- Expression* function = node->expression();
-
- CodeForStatementPosition(node);
-
- // Prepare stack for call to resolved function.
- LoadAndSpill(function);
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- frame_->EmitPush(r2); // Slot for receiver
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- LoadAndSpill(args->at(i));
- }
-
- // Prepare stack for call to ResolvePossiblyDirectEval.
- __ ldr(r1, MemOperand(sp, arg_count * kPointerSize + kPointerSize));
- frame_->EmitPush(r1);
- if (arg_count > 0) {
- __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
- frame_->EmitPush(r1);
- } else {
- frame_->EmitPush(r2);
- }
-
- // Resolve the call.
- frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 2);
-
- // Touch up stack with the right values for the function and the receiver.
- __ ldr(r1, FieldMemOperand(r0, FixedArray::kHeaderSize));
- __ str(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ ldr(r1, FieldMemOperand(r0, FixedArray::kHeaderSize + kPointerSize));
- __ str(r1, MemOperand(sp, arg_count * kPointerSize));
-
- // Call the function.
- CodeForSourcePosition(node->position());
-
- InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- CallFunctionStub call_function(arg_count, in_loop);
- frame_->CallStub(&call_function, arg_count + 1);
-
- __ ldr(cp, frame_->Context());
- // Remove the function from the stack.
- frame_->Drop();
- frame_->EmitPush(r0);
- ASSERT(frame_->height() == original_height + 1);
-}
-
-
void CodeGenerator::VisitCallNew(CallNew* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ CallNew");
- CodeForStatementPosition(node);
// According to ECMA-262, section 11.2.2, page 44, the function
// expression in new calls must be evaluated before the
@@ -4960,12 +4945,12 @@ static void AllocateHeapNumber(
Register scratch2) { // Another scratch register.
// Allocate an object in the heap for the heap number and tag it as a heap
// object.
- __ AllocateObjectInNewSpace(HeapNumber::kSize / kPointerSize,
- result,
- scratch1,
- scratch2,
- need_gc,
- TAG_OBJECT);
+ __ AllocateInNewSpace(HeapNumber::kSize / kPointerSize,
+ result,
+ scratch1,
+ scratch2,
+ need_gc,
+ TAG_OBJECT);
// Get heap number map and store it in the allocated object.
__ LoadRoot(scratch1, Heap::kHeapNumberMapRootIndex);
@@ -5076,11 +5061,14 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
// r5: Address of heap number for result.
__ push(lr); // For later.
__ push(r5); // Address of heap number that is answer.
+ __ AlignStack(0);
// Call C routine that may not cause GC or other trouble.
__ mov(r5, Operand(ExternalReference::double_fp_operation(operation)));
__ Call(r5);
+ __ pop(r4); // Address of heap number.
+ __ cmp(r4, Operand(Smi::FromInt(0)));
+ __ pop(r4, eq); // Conditional pop instruction to get rid of alignment push.
// Store answer in the overwritable heap number.
- __ pop(r4);
#if !defined(USE_ARM_EABI)
// Double returned in fp coprocessor register 0 and 1, encoded as register
// cr8. Offsets must be divisible by 4 for coprocessor so we need to
View
2  deps/v8/src/arm/codegen-arm.h
@@ -370,7 +370,7 @@ class CodeGenerator: public AstVisitor {
// information.
void CodeForFunctionPosition(FunctionLiteral* fun);
void CodeForReturnPosition(FunctionLiteral* fun);
- void CodeForStatementPosition(AstNode* node);
+ void CodeForStatementPosition(Statement* node);
void CodeForSourcePosition(int pos);
#ifdef DEBUG
View
77 deps/v8/src/arm/macro-assembler-arm.cc
@@ -291,27 +291,8 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
// Align the stack at this point. After this point we have 5 pushes,
// so in fact we have to unalign here! See also the assert on the
- // alignment immediately below.
-#if defined(V8_HOST_ARCH_ARM)
- // Running on the real platform. Use the alignment as mandated by the local
- // environment.
- // Note: This will break if we ever start generating snapshots on one ARM
- // platform for another ARM platform with a different alignment.
- int activation_frame_alignment = OS::ActivationFrameAlignment();
-#else // defined(V8_HOST_ARCH_ARM)
- // If we are using the simulator then we should always align to the expected
- // alignment. As the simulator is used to generate snapshots we do not know
- // if the target platform will need alignment, so we will always align at
- // this point here.
- int activation_frame_alignment = 2 * kPointerSize;
-#endif // defined(V8_HOST_ARCH_ARM)
- if (activation_frame_alignment != kPointerSize) {
- // This code needs to be made more general if this assert doesn't hold.
- ASSERT(activation_frame_alignment == 2 * kPointerSize);
- mov(r7, Operand(Smi::FromInt(0)));
- tst(sp, Operand(activation_frame_alignment - 1));
- push(r7, eq); // Conditional push instruction.
- }
+ // alignment in AlignStack.
+ AlignStack(1);
// Push in reverse order: caller_fp, sp_on_exit, and caller_pc.
stm(db_w, sp, fp.bit() | ip.bit() | lr.bit());
@@ -343,6 +324,30 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
}
+void MacroAssembler::AlignStack(int offset) {
+#if defined(V8_HOST_ARCH_ARM)
+ // Running on the real platform. Use the alignment as mandated by the local
+ // environment.
+ // Note: This will break if we ever start generating snapshots on one ARM
+ // platform for another ARM platform with a different alignment.
+ int activation_frame_alignment = OS::ActivationFrameAlignment();
+#else // defined(V8_HOST_ARCH_ARM)
+ // If we are using the simulator then we should always align to the expected
+ // alignment. As the simulator is used to generate snapshots we do not know
+ // if the target platform will need alignment, so we will always align at
+ // this point here.
+ int activation_frame_alignment = 2 * kPointerSize;
+#endif // defined(V8_HOST_ARCH_ARM)
+ if (activation_frame_alignment != kPointerSize) {
+ // This code needs to be made more general if this assert doesn't hold.
+ ASSERT(activation_frame_alignment == 2 * kPointerSize);
+ mov(r7, Operand(Smi::FromInt(0)));
+ tst(sp, Operand(activation_frame_alignment - offset));
+ push(r7, eq); // Conditional push instruction.
+ }
+}
+
+
void MacroAssembler::LeaveExitFrame(StackFrame::Type type) {
#ifdef ENABLE_DEBUGGER_SUPPORT
// Restore the memory copy of the registers by digging them out from
@@ -763,12 +768,12 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
}
-void MacroAssembler::AllocateObjectInNewSpace(int object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags) {
+void MacroAssembler::AllocateInNewSpace(int object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags) {
ASSERT(!result.is(scratch1));
ASSERT(!scratch1.is(scratch2));
@@ -813,12 +818,12 @@ void MacroAssembler::AllocateObjectInNewSpace(int object_size,
}
-void MacroAssembler::AllocateObjectInNewSpace(Register object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags) {
+void MacroAssembler::AllocateInNewSpace(Register object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags) {
ASSERT(!result.is(scratch1));
ASSERT(!scratch1.is(scratch2));
@@ -1001,11 +1006,11 @@ void MacroAssembler::TailCallRuntime(const ExternalReference& ext,
// should remove this need and make the runtime routine entry code
// smarter.
mov(r0, Operand(num_arguments));
- JumpToBuiltin(ext);
+ JumpToRuntime(ext);
}
-void MacroAssembler::JumpToBuiltin(const ExternalReference& builtin) {
+void MacroAssembler::JumpToRuntime(const ExternalReference& builtin) {
#if defined(__thumb__)
// Thumb mode builtin.
ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
@@ -1046,7 +1051,6 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
int argc = Builtins::GetArgumentsCount(id);
uint32_t flags =
Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
- Bootstrapper::FixupFlagsIsPCRelative::encode(true) |
Bootstrapper::FixupFlagsUseCodeObject::encode(false);
Unresolved entry = { pc_offset() - kInstrSize, flags, name };
unresolved_.Add(entry);
@@ -1064,7 +1068,6 @@ void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
int argc = Builtins::GetArgumentsCount(id);
uint32_t flags =
Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
- Bootstrapper::FixupFlagsIsPCRelative::encode(true) |
Bootstrapper::FixupFlagsUseCodeObject::encode(true);
Unresolved entry = { pc_offset() - kInstrSize, flags, name };
unresolved_.Add(entry);
View
44 deps/v8/src/arm/macro-assembler-arm.h
@@ -96,6 +96,8 @@ class MacroAssembler: public Assembler {
// Leave the current exit frame. Expects the return value in r0.
void LeaveExitFrame(StackFrame::Type type);
+ // Align the stack by optionally pushing a Smi zero.
+ void AlignStack(int offset);
// ---------------------------------------------------------------------------
// JavaScript invokes
@@ -171,18 +173,18 @@ class MacroAssembler: public Assembler {
// bytes). If the new space is exhausted control continues at the gc_required
// label. The allocated object is returned in result. If the flag
// tag_allocated_object is true the result is tagged as as a heap object.
- void AllocateObjectInNewSpace(int object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags);
- void AllocateObjectInNewSpace(Register object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags);
+ void AllocateInNewSpace(int object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags);
+ void AllocateInNewSpace(Register object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags);
// Undo allocation in new space. The object passed and objects allocated after
// it will no longer be allocated. The caller must make sure that no pointers
@@ -257,14 +259,14 @@ class MacroAssembler: public Assembler {
void CallRuntime(Runtime::FunctionId fid, int num_arguments);
// Tail call of a runtime routine (jump).
- // Like JumpToBuiltin, but also takes care of passing the number
+ // Like JumpToRuntime, but also takes care of passing the number
// of parameters.
void TailCallRuntime(const ExternalReference& ext,
int num_arguments,
int result_size);
- // Jump to the builtin routine.
- void JumpToBuiltin(const ExternalReference& builtin);
+ // Jump to a runtime routine.
+ void JumpToRuntime(const ExternalReference& builtin);
// Invoke specified builtin JavaScript function. Adds an entry to
// the unresolved list if the name does not resolve.
@@ -329,8 +331,16 @@ class MacroAssembler: public Assembler {
Label* done,
InvokeFlag flag);
- // Get the code for the given builtin. Returns if able to resolve
- // the function in the 'resolved' flag.
+ // Prepares for a call or jump to a builtin by doing two things:
+ // 1. Emits code that fetches the builtin's function object from the context
+ // at runtime, and puts it in the register rdi.
+ // 2. Fetches the builtin's code object, and returns it in a handle, at
+ // compile time, so that later code can emit instructions to jump or call
+ // the builtin directly. If the code object has not yet been created, it
+ // returns the builtin code object for IllegalFunction, and sets the
+ // output parameter "resolved" to false. Code that uses the return value
+ // should then add the address and the builtin name to the list of fixups
+ // called unresolved_, which is fixed up by the bootstrapper.
Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
// Activation support.
View
3  deps/v8/src/arm/simulator-arm.cc
@@ -409,7 +409,7 @@ void Simulator::Initialize() {
Simulator::Simulator() {
- ASSERT(initialized_);
+ Initialize();
// Setup simulator support first. Some of this information is needed to
// setup the architecture state.
size_t stack_size = 1 * 1024*1024; // allocate 1MB for stack
@@ -501,6 +501,7 @@ void* Simulator::RedirectExternalReference(void* external_function,
// Get the active Simulator for the current thread.
Simulator* Simulator::current() {
+ Initialize();
Simulator* sim = reinterpret_cast<Simulator*>(
v8::internal::Thread::GetThreadLocal(simulator_key));
if (sim == NULL) {
View
37 deps/v8/src/arm/simulator-arm.h
@@ -36,18 +36,23 @@
#ifndef V8_ARM_SIMULATOR_ARM_H_
#define V8_ARM_SIMULATOR_ARM_H_
+#include "allocation.h"
+
#if defined(__arm__)
// When running without a simulator we call the entry directly.
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
(entry(p0, p1, p2, p3, p4))
-// Calculated the stack limit beyond which we will throw stack overflow errors.
-// This macro must be called from a C++ method. It relies on being able to take
-// the address of "this" to get a value on the current execution stack and then
-// calculates the stack limit based on that value.
-#define GENERATED_CODE_STACK_LIMIT(limit) \
- (reinterpret_cast<uintptr_t>(this) - limit)
+// The stack limit beyond which we will throw stack overflow errors in
+// generated code. Because generated code on arm uses the C stack, we
+// just use the C stack limit.
+class SimulatorStack : public v8::internal::AllStatic {
+ public:
+ static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
+ return c_limit;
+ }
+};
// Call the generated regexp code directly. The entry function pointer should
@@ -64,12 +69,6 @@
assembler::arm::Simulator::current()->Call(FUNCTION_ADDR(entry), 5, \
p0, p1, p2, p3, p4))
-// The simulator has its own stack. Thus it has a different stack limit from
-// the C-based native code.