diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog index a15cce80d0d..2c0da0c6ed0 100644 --- a/deps/v8/ChangeLog +++ b/deps/v8/ChangeLog @@ -1,3 +1,26 @@ +2010-04-14: Version 2.2.3 + + Added stack command and mem command to ARM simulator debugger. + + Fixed scons snapshot and ARM build, and Windows X64 build issues. + + Performance improvements on all platforms. + + +2010-04-12: Version 2.2.2 + + Introduced new profiler API. + + Fixed random number generator to produce full 32 random bits. + + +2010-04-06: Version 2.2.1 + + Debugger improvements. + + Fixed minor bugs. + + 2010-03-29: Version 2.2.0 Fixed a few minor bugs. diff --git a/deps/v8/SConstruct b/deps/v8/SConstruct index 99271ca02fd..7242b37ae1a 100644 --- a/deps/v8/SConstruct +++ b/deps/v8/SConstruct @@ -1,4 +1,4 @@ -# Copyright 2008 the V8 project authors. All rights reserved. +# Copyright 2010 the V8 project authors. All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: @@ -52,9 +52,10 @@ else: GCC_EXTRA_CCFLAGS = [] GCC_DTOA_EXTRA_CCFLAGS = [] -ANDROID_FLAGS = ['-march=armv5te', - '-mtune=xscale', - '-msoft-float', +ANDROID_FLAGS = ['-march=armv7-a', + '-mtune=cortex-a8', + '-mfloat-abi=softfp', + '-mfpu=vfp', '-fpic', '-mthumb-interwork', '-funwind-tables', @@ -69,6 +70,8 @@ ANDROID_FLAGS = ['-march=armv5te', '-fomit-frame-pointer', '-fno-strict-aliasing', '-finline-limit=64', + '-DCAN_USE_VFP_INSTRUCTIONS=1', + '-DCAN_USE_ARMV7_INSTRUCTIONS=1', '-MD'] ANDROID_INCLUDES = [ANDROID_TOP + '/bionic/libc/arch-arm/include', @@ -102,8 +105,17 @@ LIBRARY_FLAGS = { 'mode:debug': { 'CPPDEFINES': ['V8_ENABLE_CHECKS'] }, + 'vmstate:on': { + 'CPPDEFINES': ['ENABLE_VMSTATE_TRACKING'], + }, + 'protectheap:on': { + 'CPPDEFINES': ['ENABLE_VMSTATE_TRACKING', 'ENABLE_HEAP_PROTECTION'], + }, 'profilingsupport:on': { - 'CPPDEFINES': ['ENABLE_LOGGING_AND_PROFILING'], + 'CPPDEFINES': ['ENABLE_VMSTATE_TRACKING', 'ENABLE_LOGGING_AND_PROFILING'], + }, + 'cppprofilesprocessor:on': { + 'CPPDEFINES': ['ENABLE_CPP_PROFILES_PROCESSOR'], }, 'debuggersupport:on': { 'CPPDEFINES': ['ENABLE_DEBUGGER_SUPPORT'], @@ -668,11 +680,26 @@ SIMPLE_OPTIONS = { 'default': 'static', 'help': 'the type of library to produce' }, + 'vmstate': { + 'values': ['on', 'off'], + 'default': 'off', + 'help': 'enable VM state tracking' + }, + 'protectheap': { + 'values': ['on', 'off'], + 'default': 'off', + 'help': 'enable heap protection' + }, 'profilingsupport': { 'values': ['on', 'off'], 'default': 'on', 'help': 'enable profiling of JavaScript code' }, + 'cppprofilesprocessor': { + 'values': ['on', 'off'], + 'default': 'on', + 'help': 'enable C++ profiles processor' + }, 'debuggersupport': { 'values': ['on', 'off'], 'default': 'on', diff --git a/deps/v8/include/v8-profiler.h b/deps/v8/include/v8-profiler.h new file mode 100644 index 00000000000..eca6548687b --- /dev/null +++ b/deps/v8/include/v8-profiler.h @@ -0,0 +1,176 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef V8_V8_PROFILER_H_ +#define V8_V8_PROFILER_H_ + +#include "v8.h" + +#ifdef _WIN32 +// Setup for Windows DLL export/import. See v8.h in this directory for +// information on how to build/use V8 as a DLL. +#if defined(BUILDING_V8_SHARED) && defined(USING_V8_SHARED) +#error both BUILDING_V8_SHARED and USING_V8_SHARED are set - please check the\ + build configuration to ensure that at most one of these is set +#endif + +#ifdef BUILDING_V8_SHARED +#define V8EXPORT __declspec(dllexport) +#elif USING_V8_SHARED +#define V8EXPORT __declspec(dllimport) +#else +#define V8EXPORT +#endif + +#else // _WIN32 + +// Setup for Linux shared library export. See v8.h in this directory for +// information on how to build/use V8 as shared library. +#if defined(__GNUC__) && (__GNUC__ >= 4) && defined(V8_SHARED) +#define V8EXPORT __attribute__ ((visibility("default"))) +#else // defined(__GNUC__) && (__GNUC__ >= 4) +#define V8EXPORT +#endif // defined(__GNUC__) && (__GNUC__ >= 4) + +#endif // _WIN32 + + +/** + * Profiler support for the V8 JavaScript engine. + */ +namespace v8 { + + +/** + * CpuProfileNode represents a node in a call graph. + */ +class V8EXPORT CpuProfileNode { + public: + /** Returns function name (empty string for anonymous functions.) */ + Handle GetFunctionName() const; + + /** Returns resource name for script from where the function originates. */ + Handle GetScriptResourceName() const; + + /** + * Returns the number, 1-based, of the line where the function originates. + * kNoLineNumberInfo if no line number information is available. + */ + int GetLineNumber() const; + + /** + * Returns total (self + children) execution time of the function, + * in milliseconds, estimated by samples count. + */ + double GetTotalTime() const; + + /** + * Returns self execution time of the function, in milliseconds, + * estimated by samples count. + */ + double GetSelfTime() const; + + /** Returns the count of samples where function exists. */ + double GetTotalSamplesCount() const; + + /** Returns the count of samples where function was currently executing. */ + double GetSelfSamplesCount() const; + + /** Returns function entry UID. */ + unsigned GetCallUid() const; + + /** Returns child nodes count of the node. */ + int GetChildrenCount() const; + + /** Retrieves a child node by index. */ + const CpuProfileNode* GetChild(int index) const; + + static const int kNoLineNumberInfo = 0; +}; + + +/** + * CpuProfile contains a CPU profile in a form of two call trees: + * - top-down (from main() down to functions that do all the work); + * - bottom-up call graph (in backward direction). + */ +class V8EXPORT CpuProfile { + public: + /** Returns CPU profile UID (assigned by the profiler.) */ + unsigned GetUid() const; + + /** Returns CPU profile title. */ + Handle GetTitle() const; + + /** Returns the root node of the bottom up call tree. */ + const CpuProfileNode* GetBottomUpRoot() const; + + /** Returns the root node of the top down call tree. */ + const CpuProfileNode* GetTopDownRoot() const; +}; + + +/** + * Interface for controlling CPU profiling. + */ +class V8EXPORT CpuProfiler { + public: + /** + * Returns the number of profiles collected (doesn't include + * profiles that are being collected at the moment of call.) + */ + static int GetProfilesCount(); + + /** Returns a profile by index. */ + static const CpuProfile* GetProfile(int index); + + /** Returns a profile by uid. */ + static const CpuProfile* FindProfile(unsigned uid); + + /** + * Starts collecting CPU profile. Title may be an empty string. It + * is allowed to have several profiles being collected at + * once. Attempts to start collecting several profiles with the same + * title are silently ignored. + */ + static void StartProfiling(Handle title); + + /** + * Stops collecting CPU profile with a given title and returns it. + * If the title given is empty, finishes the last profile started. + */ + static const CpuProfile* StopProfiling(Handle title); +}; + + +} // namespace v8 + + +#undef V8EXPORT + + +#endif // V8_V8_PROFILER_H_ diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h index c4b4db5001b..d90289ab99d 100644 --- a/deps/v8/include/v8.h +++ b/deps/v8/include/v8.h @@ -855,22 +855,27 @@ class V8EXPORT String : public Primitive { * \param start The starting position within the string at which * copying begins. * \param length The number of bytes to copy from the string. - * \param nchars The number of characters written. + * \param nchars_ref The number of characters written, can be NULL. * \return The number of bytes copied to the buffer * excluding the NULL terminator. */ - int Write(uint16_t* buffer, int start = 0, int length = -1) const; // UTF-16 - int WriteAscii(char* buffer, int start = 0, int length = -1) const; // ASCII + enum WriteHints { + NO_HINTS = 0, + HINT_MANY_WRITES_EXPECTED = 1 + }; + + int Write(uint16_t* buffer, + int start = 0, + int length = -1, + WriteHints hints = NO_HINTS) const; // UTF-16 + int WriteAscii(char* buffer, + int start = 0, + int length = -1, + WriteHints hints = NO_HINTS) const; // ASCII int WriteUtf8(char* buffer, int length = -1, - int* nchars = NULL) const; // UTF-8 - - /** - * Flatten internal memory. Operations on the string tend to run faster - * after flattening especially if the string is a concatenation of many - * others. - */ - void Flatten(); + int* nchars_ref = NULL, + WriteHints hints = NO_HINTS) const; // UTF-8 /** * A zero length string. diff --git a/deps/v8/samples/lineprocessor.cc b/deps/v8/samples/lineprocessor.cc index 61517d36e54..1606a8f99c7 100644 --- a/deps/v8/samples/lineprocessor.cc +++ b/deps/v8/samples/lineprocessor.cc @@ -25,8 +25,20 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// This controls whether this sample is compiled with debugger support. +// You may trace its usages in source text to see what parts of program +// are responsible for debugging support. +// Note that V8 itself should be compiled with enabled debugger support +// to have it all working. +#define SUPPORT_DEBUGGING + #include + +#ifdef SUPPORT_DEBUGGING #include +#endif + #include #include #include @@ -103,8 +115,9 @@ v8::Handle ReadLine(const v8::Arguments& args); bool RunCppCycle(v8::Handle script, v8::Local context, bool report_exceptions); -v8::Persistent debug_message_context; +#ifdef SUPPORT_DEBUGGING +v8::Persistent debug_message_context; void DispatchDebugMessages() { // We are in some random thread. We should already have v8::Locker acquired @@ -122,6 +135,7 @@ void DispatchDebugMessages() { v8::Debug::ProcessDebugMessages(); } +#endif int RunMain(int argc, char* argv[]) { @@ -132,9 +146,12 @@ int RunMain(int argc, char* argv[]) { v8::Handle script_name(NULL); int script_param_counter = 0; +#ifdef SUPPORT_DEBUGGING int port_number = -1; bool wait_for_connection = false; bool support_callback = false; +#endif + MainCycleType cycle_type = CycleInCpp; for (int i = 1; i < argc; i++) { @@ -143,17 +160,19 @@ int RunMain(int argc, char* argv[]) { // Ignore any -f flags for compatibility with the other stand- // alone JavaScript engines. continue; - } else if (strcmp(str, "--callback") == 0) { - support_callback = true; - } else if (strcmp(str, "--wait-for-connection") == 0) { - wait_for_connection = true; } else if (strcmp(str, "--main-cycle-in-cpp") == 0) { cycle_type = CycleInCpp; } else if (strcmp(str, "--main-cycle-in-js") == 0) { cycle_type = CycleInJs; +#ifdef SUPPORT_DEBUGGING + } else if (strcmp(str, "--callback") == 0) { + support_callback = true; + } else if (strcmp(str, "--wait-for-connection") == 0) { + wait_for_connection = true; } else if (strcmp(str, "-p") == 0 && i + 1 < argc) { port_number = atoi(argv[i + 1]); // NOLINT i++; +#endif } else if (strncmp(str, "--", 2) == 0) { printf("Warning: unknown flag %s.\nTry --help for options\n", str); } else if (strcmp(str, "-e") == 0 && i + 1 < argc) { @@ -197,12 +216,12 @@ int RunMain(int argc, char* argv[]) { // Create a new execution environment containing the built-in // functions v8::Handle context = v8::Context::New(NULL, global); - debug_message_context = v8::Persistent::New(context); - - // Enter the newly created execution environment. v8::Context::Scope context_scope(context); +#ifdef SUPPORT_DEBUGGING + debug_message_context = v8::Persistent::New(context); + v8::Locker locker; if (support_callback) { @@ -210,10 +229,9 @@ int RunMain(int argc, char* argv[]) { } if (port_number != -1) { - const char* auto_break_param = "--debugger_auto_break"; - v8::V8::SetFlagsFromString(auto_break_param, strlen(auto_break_param)); v8::Debug::EnableAgent("lineprocessor", port_number, wait_for_connection); } +#endif bool report_exceptions = true; @@ -254,7 +272,9 @@ int RunMain(int argc, char* argv[]) { bool RunCppCycle(v8::Handle script, v8::Local context, bool report_exceptions) { +#ifdef SUPPORT_DEBUGGING v8::Locker lock; +#endif v8::Handle fun_name = v8::String::New("ProcessLine"); v8::Handle process_val = @@ -407,7 +427,9 @@ v8::Handle ReadLine() { char* res; { +#ifdef SUPPORT_DEBUGGING v8::Unlocker unlocker; +#endif res = fgets(buffer, kBufferSize, stdin); } if (res == NULL) { diff --git a/deps/v8/src/SConscript b/deps/v8/src/SConscript index 1f1c1c18339..5add9999d1c 100755 --- a/deps/v8/src/SConscript +++ b/deps/v8/src/SConscript @@ -111,6 +111,7 @@ SOURCES = { variables.cc version.cc virtual-frame.cc + vm-state.cc zone.cc """), 'arch:arm': Split(""" @@ -305,7 +306,12 @@ def ConfigureObjectFiles(): source_objs = context.ConfigureObject(env, source_files) non_snapshot_files = [dtoa_obj, source_objs] - # Create snapshot if necessary. + # Create snapshot if necessary. For cross compilation you should either + # do without snapshots and take the performance hit or you should build a + # host VM with the simulator=arm and snapshot=on options and then take the + # resulting snapshot.cc file from obj/release and put it in the src + # directory. Then rebuild the VM with the cross compiler and specify + # snapshot=nobuild on the scons command line. empty_snapshot_obj = context.ConfigureObject(env, 'snapshot-empty.cc') mksnapshot_env = env.Copy() mksnapshot_env.Replace(**context.flags['mksnapshot']) @@ -315,7 +321,7 @@ def ConfigureObjectFiles(): if context.build_snapshot: snapshot_cc = env.Snapshot('snapshot.cc', mksnapshot, LOGFILE=File('snapshot.log').abspath) else: - snapshot_cc = Command('snapshot.cc', [], []) + snapshot_cc = 'snapshot.cc' snapshot_obj = context.ConfigureObject(env, snapshot_cc, CPPPATH=['.']) else: snapshot_obj = empty_snapshot_obj diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc index ed48503e24a..47950ebe182 100644 --- a/deps/v8/src/api.cc +++ b/deps/v8/src/api.cc @@ -36,6 +36,7 @@ #include "global-handles.h" #include "messages.h" #include "platform.h" +#include "profile-generator-inl.h" #include "serialize.h" #include "snapshot.h" #include "top.h" @@ -43,6 +44,7 @@ #include "v8threads.h" #include "version.h" +#include "../include/v8-profiler.h" #define LOG_API(expr) LOG(ApiEntryCall(expr)) @@ -2639,12 +2641,20 @@ int String::Utf8Length() const { } -int String::WriteUtf8(char* buffer, int capacity, int *ncharsRef) const { +int String::WriteUtf8(char* buffer, + int capacity, + int* nchars_ref, + WriteHints hints) const { if (IsDeadCheck("v8::String::WriteUtf8()")) return 0; LOG_API("String::WriteUtf8"); ENTER_V8; i::Handle str = Utils::OpenHandle(this); StringTracker::RecordWrite(str); + if (hints & HINT_MANY_WRITES_EXPECTED) { + // Flatten the string for efficiency. This applies whether we are + // using StringInputBuffer or Get(i) to access the characters. + str->TryFlatten(); + } write_input_buffer.Reset(0, *str); int len = str->length(); // Encode the first K - 3 bytes directly into the buffer since we @@ -2679,23 +2689,28 @@ int String::WriteUtf8(char* buffer, int capacity, int *ncharsRef) const { } } } - if (ncharsRef) *ncharsRef = nchars; + if (nchars_ref != NULL) *nchars_ref = nchars; if (i == len && (capacity == -1 || pos < capacity)) buffer[pos++] = '\0'; return pos; } -int String::WriteAscii(char* buffer, int start, int length) const { +int String::WriteAscii(char* buffer, + int start, + int length, + WriteHints hints) const { if (IsDeadCheck("v8::String::WriteAscii()")) return 0; LOG_API("String::WriteAscii"); ENTER_V8; ASSERT(start >= 0 && length >= -1); i::Handle str = Utils::OpenHandle(this); StringTracker::RecordWrite(str); - // Flatten the string for efficiency. This applies whether we are - // using StringInputBuffer or Get(i) to access the characters. - str->TryFlatten(); + if (hints & HINT_MANY_WRITES_EXPECTED) { + // Flatten the string for efficiency. This applies whether we are + // using StringInputBuffer or Get(i) to access the characters. + str->TryFlatten(); + } int end = length; if ( (length == -1) || (length > str->length() - start) ) end = str->length() - start; @@ -2713,13 +2728,21 @@ int String::WriteAscii(char* buffer, int start, int length) const { } -int String::Write(uint16_t* buffer, int start, int length) const { +int String::Write(uint16_t* buffer, + int start, + int length, + WriteHints hints) const { if (IsDeadCheck("v8::String::Write()")) return 0; LOG_API("String::Write"); ENTER_V8; ASSERT(start >= 0 && length >= -1); i::Handle str = Utils::OpenHandle(this); StringTracker::RecordWrite(str); + if (hints & HINT_MANY_WRITES_EXPECTED) { + // Flatten the string for efficiency. This applies whether we are + // using StringInputBuffer or Get(i) to access the characters. + str->TryFlatten(); + } int end = length; if ( (length == -1) || (length > str->length() - start) ) end = str->length() - start; @@ -2731,13 +2754,6 @@ int String::Write(uint16_t* buffer, int start, int length) const { } -void v8::String::Flatten() { - EnsureInitialized("v8::String::Flatten()"); - i::Handle str = Utils::OpenHandle(this); - i::FlattenString(str); -} - - bool v8::String::IsExternal() const { EnsureInitialized("v8::String::IsExternal()"); i::Handle str = Utils::OpenHandle(this); @@ -2866,6 +2882,7 @@ void v8::Object::SetInternalField(int index, v8::Handle value) { void v8::Object::SetPointerInInternalField(int index, void* value) { + ENTER_V8; i::Object* as_object = reinterpret_cast(value); if (as_object->IsSmi()) { Utils::OpenHandle(this)->SetInternalField(index, as_object); @@ -3430,6 +3447,7 @@ Local Array::CloneElementAt(uint32_t index) { } i::Handle paragon_handle(i::JSObject::cast(paragon)); EXCEPTION_PREAMBLE(); + ENTER_V8; i::Handle result = i::Copy(paragon_handle); has_pending_exception = result.is_null(); EXCEPTION_BAILOUT_CHECK(Local()); @@ -4008,6 +4026,131 @@ Local Debug::GetDebugContext() { #endif // ENABLE_DEBUGGER_SUPPORT + +#ifdef ENABLE_CPP_PROFILES_PROCESSOR + +Handle CpuProfileNode::GetFunctionName() const { + IsDeadCheck("v8::CpuProfileNode::GetFunctionName"); + const i::ProfileNode* node = reinterpret_cast(this); + const i::CodeEntry* entry = node->entry(); + if (!entry->has_name_prefix()) { + return Handle(ToApi( + i::Factory::LookupAsciiSymbol(entry->name()))); + } else { + return Handle(ToApi(i::Factory::NewConsString( + i::Factory::LookupAsciiSymbol(entry->name_prefix()), + i::Factory::LookupAsciiSymbol(entry->name())))); + } +} + + +Handle CpuProfileNode::GetScriptResourceName() const { + IsDeadCheck("v8::CpuProfileNode::GetScriptResourceName"); + const i::ProfileNode* node = reinterpret_cast(this); + return Handle(ToApi(i::Factory::LookupAsciiSymbol( + node->entry()->resource_name()))); +} + + +int CpuProfileNode::GetLineNumber() const { + IsDeadCheck("v8::CpuProfileNode::GetLineNumber"); + return reinterpret_cast(this)->entry()->line_number(); +} + + +double CpuProfileNode::GetTotalSamplesCount() const { + IsDeadCheck("v8::CpuProfileNode::GetTotalSamplesCount"); + return reinterpret_cast(this)->total_ticks(); +} + + +double CpuProfileNode::GetSelfSamplesCount() const { + IsDeadCheck("v8::CpuProfileNode::GetSelfSamplesCount"); + return reinterpret_cast(this)->self_ticks(); +} + + +unsigned CpuProfileNode::GetCallUid() const { + IsDeadCheck("v8::CpuProfileNode::GetCallUid"); + return reinterpret_cast(this)->entry()->call_uid(); +} + + +int CpuProfileNode::GetChildrenCount() const { + IsDeadCheck("v8::CpuProfileNode::GetChildrenCount"); + return reinterpret_cast(this)->children()->length(); +} + + +const CpuProfileNode* CpuProfileNode::GetChild(int index) const { + IsDeadCheck("v8::CpuProfileNode::GetChild"); + const i::ProfileNode* child = + reinterpret_cast(this)->children()->at(index); + return reinterpret_cast(child); +} + + +unsigned CpuProfile::GetUid() const { + IsDeadCheck("v8::CpuProfile::GetUid"); + return reinterpret_cast(this)->uid(); +} + + +Handle CpuProfile::GetTitle() const { + IsDeadCheck("v8::CpuProfile::GetTitle"); + const i::CpuProfile* profile = reinterpret_cast(this); + return Handle(ToApi(i::Factory::LookupAsciiSymbol( + profile->title()))); +} + + +const CpuProfileNode* CpuProfile::GetBottomUpRoot() const { + IsDeadCheck("v8::CpuProfile::GetBottomUpRoot"); + const i::CpuProfile* profile = reinterpret_cast(this); + return reinterpret_cast(profile->bottom_up()->root()); +} + + +const CpuProfileNode* CpuProfile::GetTopDownRoot() const { + IsDeadCheck("v8::CpuProfile::GetTopDownRoot"); + const i::CpuProfile* profile = reinterpret_cast(this); + return reinterpret_cast(profile->top_down()->root()); +} + + +int CpuProfiler::GetProfilesCount() { + IsDeadCheck("v8::CpuProfiler::GetProfilesCount"); + return i::CpuProfiler::GetProfilesCount(); +} + + +const CpuProfile* CpuProfiler::GetProfile(int index) { + IsDeadCheck("v8::CpuProfiler::GetProfile"); + return reinterpret_cast(i::CpuProfiler::GetProfile(index)); +} + + +const CpuProfile* CpuProfiler::FindProfile(unsigned uid) { + IsDeadCheck("v8::CpuProfiler::FindProfile"); + return reinterpret_cast(i::CpuProfiler::FindProfile(uid)); +} + + +void CpuProfiler::StartProfiling(Handle title) { + IsDeadCheck("v8::CpuProfiler::StartProfiling"); + i::CpuProfiler::StartProfiling(*Utils::OpenHandle(*title)); +} + + +const CpuProfile* CpuProfiler::StopProfiling(Handle title) { + IsDeadCheck("v8::CpuProfiler::StopProfiling"); + return reinterpret_cast( + i::CpuProfiler::StopProfiling(*Utils::OpenHandle(*title))); +} + +#endif // ENABLE_CPP_PROFILES_PROCESSOR + + namespace internal { diff --git a/deps/v8/src/arm/codegen-arm-inl.h b/deps/v8/src/arm/codegen-arm-inl.h index 17e18d9fde4..6edec4d760e 100644 --- a/deps/v8/src/arm/codegen-arm-inl.h +++ b/deps/v8/src/arm/codegen-arm-inl.h @@ -29,6 +29,8 @@ #ifndef V8_ARM_CODEGEN_ARM_INL_H_ #define V8_ARM_CODEGEN_ARM_INL_H_ +#include "virtual-frame-arm.h" + namespace v8 { namespace internal { @@ -43,6 +45,7 @@ void CodeGenerator::LoadConditionAndSpill(Expression* expression, void CodeGenerator::LoadAndSpill(Expression* expression) { + ASSERT(VirtualFrame::SpilledScope::is_spilled()); Load(expression); } @@ -57,11 +60,6 @@ void CodeGenerator::VisitStatementsAndSpill(ZoneList* statements) { } -void Reference::GetValueAndSpill() { - GetValue(); -} - - // Platform-specific inline functions. void DeferredCode::Jump() { __ jmp(&entry_label_); } diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc index 0ca4d3560e0..0fc7b6d1d67 100644 --- a/deps/v8/src/arm/codegen-arm.cc +++ b/deps/v8/src/arm/codegen-arm.cc @@ -181,7 +181,7 @@ void CodeGenerator::Generate(CompilationInfo* info) { // for stack overflow. frame_->AllocateStackSlots(); - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); int heap_slots = scope()->num_heap_slots(); if (heap_slots > 0) { // Allocate local context. @@ -274,8 +274,6 @@ void CodeGenerator::Generate(CompilationInfo* info) { // fp, and lr have been pushed on the stack. Adjust the virtual // frame to match this state. frame_->Adjust(4); - allocator_->Unuse(r1); - allocator_->Unuse(lr); // Bind all the bailout labels to the beginning of the function. List* bailouts = info->bailouts(); @@ -505,6 +503,7 @@ void CodeGenerator::LoadCondition(Expression* x, has_valid_frame() && !has_cc() && frame_->height() == original_height) { + frame_->SpillAll(); true_target->Jump(); } } @@ -529,6 +528,7 @@ void CodeGenerator::Load(Expression* expr) { if (has_cc()) { // Convert cc_reg_ into a boolean value. + VirtualFrame::SpilledScope scope(frame_); JumpTarget loaded; JumpTarget materialize_true; materialize_true.Branch(cc_reg_); @@ -543,6 +543,7 @@ void CodeGenerator::Load(Expression* expr) { } if (true_target.is_linked() || false_target.is_linked()) { + VirtualFrame::SpilledScope scope(frame_); // We have at least one condition value that has been "translated" // into a branch, thus it needs to be loaded explicitly. JumpTarget loaded; @@ -577,14 +578,14 @@ void CodeGenerator::Load(Expression* expr) { void CodeGenerator::LoadGlobal() { - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); __ ldr(r0, GlobalObject()); frame_->EmitPush(r0); } void CodeGenerator::LoadGlobalReceiver(Register scratch) { - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); __ ldr(scratch, ContextOperand(cp, Context::GLOBAL_INDEX)); __ ldr(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalReceiverOffset)); @@ -594,7 +595,7 @@ void CodeGenerator::LoadGlobalReceiver(Register scratch) { void CodeGenerator::LoadTypeofExpression(Expression* expr) { // Special handling of identifiers as subexpressions of typeof. - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Variable* variable = expr->AsVariableProxy()->AsVariable(); if (variable != NULL && !variable->is_this() && variable->is_global()) { // For a global variable we build the property reference @@ -604,7 +605,7 @@ void CodeGenerator::LoadTypeofExpression(Expression* expr) { Literal key(variable->name()); Property property(&global, &key, RelocInfo::kNoPosition); Reference ref(this, &property); - ref.GetValueAndSpill(); + ref.GetValue(); } else if (variable != NULL && variable->slot() != NULL) { // For a variable that rewrites to a slot, we signal it is the immediate // subexpression of a typeof. @@ -634,7 +635,7 @@ Reference::~Reference() { void CodeGenerator::LoadReference(Reference* ref) { - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ LoadReference"); Expression* e = ref->expression(); Property* property = e->AsProperty(); @@ -669,16 +670,18 @@ void CodeGenerator::LoadReference(Reference* ref) { void CodeGenerator::UnloadReference(Reference* ref) { - VirtualFrame::SpilledScope spilled_scope; + int size = ref->size(); + ref->set_unloaded(); + if (size == 0) return; + // Pop a reference from the stack while preserving TOS. + VirtualFrame::RegisterAllocationScope scope(this); Comment cmnt(masm_, "[ UnloadReference"); - int size = ref->size(); if (size > 0) { - frame_->EmitPop(r0); + Register tos = frame_->PopToRegister(); frame_->Drop(size); - frame_->EmitPush(r0); + frame_->EmitPush(tos); } - ref->set_unloaded(); } @@ -687,7 +690,7 @@ void CodeGenerator::UnloadReference(Reference* ref) { // may jump to 'false_target' in case the register converts to 'false'. void CodeGenerator::ToBoolean(JumpTarget* true_target, JumpTarget* false_target) { - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); // Note: The generated code snippet does not change stack variables. // Only the condition code should be set. frame_->EmitPop(r0); @@ -729,15 +732,15 @@ void CodeGenerator::ToBoolean(JumpTarget* true_target, void CodeGenerator::GenericBinaryOperation(Token::Value op, OverwriteMode overwrite_mode, int constant_rhs) { - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); // sp[0] : y // sp[1] : x // result : r0 // Stub is entered with a call: 'return address' is in lr. switch (op) { - case Token::ADD: // fall through. - case Token::SUB: // fall through. + case Token::ADD: + case Token::SUB: case Token::MUL: case Token::DIV: case Token::MOD: @@ -749,14 +752,14 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op, case Token::SAR: { frame_->EmitPop(r0); // r0 : y frame_->EmitPop(r1); // r1 : x - GenericBinaryOpStub stub(op, overwrite_mode, constant_rhs); + GenericBinaryOpStub stub(op, overwrite_mode, r1, r0, constant_rhs); frame_->CallStub(&stub, 0); break; } case Token::COMMA: frame_->EmitPop(r0); - // simply discard left value + // Simply discard left value. frame_->Drop(); break; @@ -768,16 +771,65 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op, } +void CodeGenerator::VirtualFrameBinaryOperation(Token::Value op, + OverwriteMode overwrite_mode, + int constant_rhs) { + // top of virtual frame: y + // 2nd elt. on virtual frame : x + // result : top of virtual frame + + // Stub is entered with a call: 'return address' is in lr. + switch (op) { + case Token::ADD: // fall through. + case Token::SUB: // fall through. + case Token::MUL: + case Token::DIV: + case Token::MOD: + case Token::BIT_OR: + case Token::BIT_AND: + case Token::BIT_XOR: + case Token::SHL: + case Token::SHR: + case Token::SAR: { + Register rhs = frame_->PopToRegister(); + Register lhs = frame_->PopToRegister(rhs); // Don't pop to rhs register. + { + VirtualFrame::SpilledScope spilled_scope(frame_); + GenericBinaryOpStub stub(op, overwrite_mode, lhs, rhs, constant_rhs); + frame_->CallStub(&stub, 0); + } + frame_->EmitPush(r0); + break; + } + + case Token::COMMA: { + Register scratch = frame_->PopToRegister(); + // Simply discard left value. + frame_->Drop(); + frame_->EmitPush(scratch); + break; + } + + default: + // Other cases should have been handled before this point. + UNREACHABLE(); + break; + } +} + + class DeferredInlineSmiOperation: public DeferredCode { public: DeferredInlineSmiOperation(Token::Value op, int value, bool reversed, - OverwriteMode overwrite_mode) + OverwriteMode overwrite_mode, + Register tos) : op_(op), value_(value), reversed_(reversed), - overwrite_mode_(overwrite_mode) { + overwrite_mode_(overwrite_mode), + tos_register_(tos) { set_comment("[ DeferredInlinedSmiOperation"); } @@ -788,18 +840,21 @@ class DeferredInlineSmiOperation: public DeferredCode { int value_; bool reversed_; OverwriteMode overwrite_mode_; + Register tos_register_; }; void DeferredInlineSmiOperation::Generate() { + Register lhs = r1; + Register rhs = r0; switch (op_) { case Token::ADD: { // Revert optimistic add. if (reversed_) { - __ sub(r0, r0, Operand(Smi::FromInt(value_))); + __ sub(r0, tos_register_, Operand(Smi::FromInt(value_))); __ mov(r1, Operand(Smi::FromInt(value_))); } else { - __ sub(r1, r0, Operand(Smi::FromInt(value_))); + __ sub(r1, tos_register_, Operand(Smi::FromInt(value_))); __ mov(r0, Operand(Smi::FromInt(value_))); } break; @@ -808,10 +863,10 @@ void DeferredInlineSmiOperation::Generate() { case Token::SUB: { // Revert optimistic sub. if (reversed_) { - __ rsb(r0, r0, Operand(Smi::FromInt(value_))); + __ rsb(r0, tos_register_, Operand(Smi::FromInt(value_))); __ mov(r1, Operand(Smi::FromInt(value_))); } else { - __ add(r1, r0, Operand(Smi::FromInt(value_))); + __ add(r1, tos_register_, Operand(Smi::FromInt(value_))); __ mov(r0, Operand(Smi::FromInt(value_))); } break; @@ -825,10 +880,23 @@ void DeferredInlineSmiOperation::Generate() { case Token::BIT_XOR: case Token::BIT_AND: { if (reversed_) { - __ mov(r1, Operand(Smi::FromInt(value_))); + if (tos_register_.is(r0)) { + __ mov(r1, Operand(Smi::FromInt(value_))); + } else { + ASSERT(tos_register_.is(r1)); + __ mov(r0, Operand(Smi::FromInt(value_))); + lhs = r0; + rhs = r1; + } } else { - __ mov(r1, Operand(r0)); - __ mov(r0, Operand(Smi::FromInt(value_))); + if (tos_register_.is(r1)) { + __ mov(r0, Operand(Smi::FromInt(value_))); + } else { + ASSERT(tos_register_.is(r0)); + __ mov(r1, Operand(Smi::FromInt(value_))); + lhs = r0; + rhs = r1; + } } break; } @@ -837,8 +905,14 @@ void DeferredInlineSmiOperation::Generate() { case Token::SHR: case Token::SAR: { if (!reversed_) { - __ mov(r1, Operand(r0)); - __ mov(r0, Operand(Smi::FromInt(value_))); + if (tos_register_.is(r1)) { + __ mov(r0, Operand(Smi::FromInt(value_))); + } else { + ASSERT(tos_register_.is(r0)); + __ mov(r1, Operand(Smi::FromInt(value_))); + lhs = r0; + rhs = r1; + } } else { UNREACHABLE(); // Should have been handled in SmiOperation. } @@ -851,8 +925,13 @@ void DeferredInlineSmiOperation::Generate() { break; } - GenericBinaryOpStub stub(op_, overwrite_mode_, value_); + GenericBinaryOpStub stub(op_, overwrite_mode_, lhs, rhs, value_); __ CallStub(&stub); + // The generic stub returns its value in r0, but that's not + // necessarily what we want. We want whatever the inlined code + // expected, which is that the answer is in the same register as + // the operand was. + __ Move(tos_register_, r0); } @@ -877,11 +956,235 @@ static int BitPosition(unsigned x) { } +void CodeGenerator::VirtualFrameSmiOperation(Token::Value op, + Handle value, + bool reversed, + OverwriteMode mode) { + int int_value = Smi::cast(*value)->value(); + + bool something_to_inline; + switch (op) { + case Token::ADD: + case Token::SUB: + case Token::BIT_AND: + case Token::BIT_OR: + case Token::BIT_XOR: { + something_to_inline = true; + break; + } + case Token::SHL: + case Token::SHR: + case Token::SAR: { + if (reversed) { + something_to_inline = false; + } else { + something_to_inline = true; + } + break; + } + case Token::MOD: { + if (reversed || int_value < 2 || !IsPowerOf2(int_value)) { + something_to_inline = false; + } else { + something_to_inline = true; + } + break; + } + case Token::MUL: { + if (!IsEasyToMultiplyBy(int_value)) { + something_to_inline = false; + } else { + something_to_inline = true; + } + break; + } + default: { + something_to_inline = false; + break; + } + } + + if (!something_to_inline) { + if (!reversed) { + // Push the rhs onto the virtual frame by putting it in a TOS register. + Register rhs = frame_->GetTOSRegister(); + __ mov(rhs, Operand(value)); + frame_->EmitPush(rhs); + VirtualFrameBinaryOperation(op, mode, int_value); + } else { + // Pop the rhs, then push lhs and rhs in the right order. Only performs + // at most one pop, the rest takes place in TOS registers. + Register lhs = frame_->GetTOSRegister(); // Get reg for pushing. + Register rhs = frame_->PopToRegister(lhs); // Don't use lhs for this. + __ mov(lhs, Operand(value)); + frame_->EmitPush(lhs); + frame_->EmitPush(rhs); + VirtualFrameBinaryOperation(op, mode, kUnknownIntValue); + } + return; + } + + // We move the top of stack to a register (normally no move is invoved). + Register tos = frame_->PopToRegister(); + // All other registers are spilled. The deferred code expects one argument + // in a register and all other values are flushed to the stack. The + // answer is returned in the same register that the top of stack argument was + // in. + frame_->SpillAll(); + + switch (op) { + case Token::ADD: { + DeferredCode* deferred = + new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos); + + __ add(tos, tos, Operand(value), SetCC); + deferred->Branch(vs); + __ tst(tos, Operand(kSmiTagMask)); + deferred->Branch(ne); + deferred->BindExit(); + frame_->EmitPush(tos); + break; + } + + case Token::SUB: { + DeferredCode* deferred = + new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos); + + if (reversed) { + __ rsb(tos, tos, Operand(value), SetCC); + } else { + __ sub(tos, tos, Operand(value), SetCC); + } + deferred->Branch(vs); + __ tst(tos, Operand(kSmiTagMask)); + deferred->Branch(ne); + deferred->BindExit(); + frame_->EmitPush(tos); + break; + } + + + case Token::BIT_OR: + case Token::BIT_XOR: + case Token::BIT_AND: { + DeferredCode* deferred = + new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos); + __ tst(tos, Operand(kSmiTagMask)); + deferred->Branch(ne); + switch (op) { + case Token::BIT_OR: __ orr(tos, tos, Operand(value)); break; + case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break; + case Token::BIT_AND: __ and_(tos, tos, Operand(value)); break; + default: UNREACHABLE(); + } + deferred->BindExit(); + frame_->EmitPush(tos); + break; + } + + case Token::SHL: + case Token::SHR: + case Token::SAR: { + ASSERT(!reversed); + Register scratch = VirtualFrame::scratch0(); + Register scratch2 = VirtualFrame::scratch1(); + int shift_value = int_value & 0x1f; // least significant 5 bits + DeferredCode* deferred = + new DeferredInlineSmiOperation(op, shift_value, false, mode, tos); + __ tst(tos, Operand(kSmiTagMask)); + deferred->Branch(ne); + __ mov(scratch, Operand(tos, ASR, kSmiTagSize)); // remove tags + switch (op) { + case Token::SHL: { + if (shift_value != 0) { + __ mov(scratch, Operand(scratch, LSL, shift_value)); + } + // check that the *signed* result fits in a smi + __ add(scratch2, scratch, Operand(0x40000000), SetCC); + deferred->Branch(mi); + break; + } + case Token::SHR: { + // LSR by immediate 0 means shifting 32 bits. + if (shift_value != 0) { + __ mov(scratch, Operand(scratch, LSR, shift_value)); + } + // check that the *unsigned* result fits in a smi + // neither of the two high-order bits can be set: + // - 0x80000000: high bit would be lost when smi tagging + // - 0x40000000: this number would convert to negative when + // smi tagging these two cases can only happen with shifts + // by 0 or 1 when handed a valid smi + __ tst(scratch, Operand(0xc0000000)); + deferred->Branch(ne); + break; + } + case Token::SAR: { + if (shift_value != 0) { + // ASR by immediate 0 means shifting 32 bits. + __ mov(scratch, Operand(scratch, ASR, shift_value)); + } + break; + } + default: UNREACHABLE(); + } + __ mov(tos, Operand(scratch, LSL, kSmiTagSize)); + deferred->BindExit(); + frame_->EmitPush(tos); + break; + } + + case Token::MOD: { + ASSERT(!reversed); + ASSERT(int_value >= 2); + ASSERT(IsPowerOf2(int_value)); + DeferredCode* deferred = + new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos); + unsigned mask = (0x80000000u | kSmiTagMask); + __ tst(tos, Operand(mask)); + deferred->Branch(ne); // Go to deferred code on non-Smis and negative. + mask = (int_value << kSmiTagSize) - 1; + __ and_(tos, tos, Operand(mask)); + deferred->BindExit(); + frame_->EmitPush(tos); + break; + } + + case Token::MUL: { + ASSERT(IsEasyToMultiplyBy(int_value)); + DeferredCode* deferred = + new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos); + unsigned max_smi_that_wont_overflow = Smi::kMaxValue / int_value; + max_smi_that_wont_overflow <<= kSmiTagSize; + unsigned mask = 0x80000000u; + while ((mask & max_smi_that_wont_overflow) == 0) { + mask |= mask >> 1; + } + mask |= kSmiTagMask; + // This does a single mask that checks for a too high value in a + // conservative way and for a non-Smi. It also filters out negative + // numbers, unfortunately, but since this code is inline we prefer + // brevity to comprehensiveness. + __ tst(tos, Operand(mask)); + deferred->Branch(ne); + MultiplyByKnownInt(masm_, tos, tos, int_value); + deferred->BindExit(); + frame_->EmitPush(tos); + break; + } + + default: + UNREACHABLE(); + break; + } +} + + void CodeGenerator::SmiOperation(Token::Value op, Handle value, bool reversed, OverwriteMode mode) { - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); // NOTE: This is an attempt to inline (a bit) more of the code for // some possible smi operations (like + and -) when (at least) one // of the operands is a literal smi. With this optimization, the @@ -900,7 +1203,7 @@ void CodeGenerator::SmiOperation(Token::Value op, switch (op) { case Token::ADD: { DeferredCode* deferred = - new DeferredInlineSmiOperation(op, int_value, reversed, mode); + new DeferredInlineSmiOperation(op, int_value, reversed, mode, r0); __ add(r0, r0, Operand(value), SetCC); deferred->Branch(vs); @@ -912,7 +1215,7 @@ void CodeGenerator::SmiOperation(Token::Value op, case Token::SUB: { DeferredCode* deferred = - new DeferredInlineSmiOperation(op, int_value, reversed, mode); + new DeferredInlineSmiOperation(op, int_value, reversed, mode, r0); if (reversed) { __ rsb(r0, r0, Operand(value), SetCC); @@ -931,7 +1234,7 @@ void CodeGenerator::SmiOperation(Token::Value op, case Token::BIT_XOR: case Token::BIT_AND: { DeferredCode* deferred = - new DeferredInlineSmiOperation(op, int_value, reversed, mode); + new DeferredInlineSmiOperation(op, int_value, reversed, mode, r0); __ tst(r0, Operand(kSmiTagMask)); deferred->Branch(ne); switch (op) { @@ -953,7 +1256,7 @@ void CodeGenerator::SmiOperation(Token::Value op, } int shift_value = int_value & 0x1f; // least significant 5 bits DeferredCode* deferred = - new DeferredInlineSmiOperation(op, shift_value, false, mode); + new DeferredInlineSmiOperation(op, shift_value, false, mode, r0); __ tst(r0, Operand(kSmiTagMask)); deferred->Branch(ne); __ mov(r2, Operand(r0, ASR, kSmiTagSize)); // remove tags @@ -1002,7 +1305,7 @@ void CodeGenerator::SmiOperation(Token::Value op, break; } DeferredCode* deferred = - new DeferredInlineSmiOperation(op, int_value, reversed, mode); + new DeferredInlineSmiOperation(op, int_value, reversed, mode, r0); unsigned mask = (0x80000000u | kSmiTagMask); __ tst(r0, Operand(mask)); deferred->Branch(ne); // Go to deferred code on non-Smis and negative. @@ -1018,7 +1321,7 @@ void CodeGenerator::SmiOperation(Token::Value op, break; } DeferredCode* deferred = - new DeferredInlineSmiOperation(op, int_value, reversed, mode); + new DeferredInlineSmiOperation(op, int_value, reversed, mode, r0); unsigned max_smi_that_wont_overflow = Smi::kMaxValue / int_value; max_smi_that_wont_overflow <<= kSmiTagSize; unsigned mask = 0x80000000u; @@ -1064,10 +1367,11 @@ void CodeGenerator::Comparison(Condition cc, Expression* left, Expression* right, bool strict) { - if (left != NULL) LoadAndSpill(left); - if (right != NULL) LoadAndSpill(right); + VirtualFrame::RegisterAllocationScope scope(this); + + if (left != NULL) Load(left); + if (right != NULL) Load(right); - VirtualFrame::SpilledScope spilled_scope; // sp[0] : y // sp[1] : x // result : cc register @@ -1075,32 +1379,49 @@ void CodeGenerator::Comparison(Condition cc, // Strict only makes sense for equality comparisons. ASSERT(!strict || cc == eq); - JumpTarget exit; - JumpTarget smi; + Register lhs; + Register rhs; + + // We load the top two stack positions into registers chosen by the virtual + // frame. This should keep the register shuffling to a minimum. // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order. if (cc == gt || cc == le) { cc = ReverseCondition(cc); - frame_->EmitPop(r1); - frame_->EmitPop(r0); + lhs = frame_->PopToRegister(); + rhs = frame_->PopToRegister(lhs); // Don't pop to the same register again! } else { - frame_->EmitPop(r0); - frame_->EmitPop(r1); + rhs = frame_->PopToRegister(); + lhs = frame_->PopToRegister(rhs); // Don't pop to the same register again! } - __ orr(r2, r0, Operand(r1)); - __ tst(r2, Operand(kSmiTagMask)); + + ASSERT(rhs.is(r0) || rhs.is(r1)); + ASSERT(lhs.is(r0) || lhs.is(r1)); + + // Now we have the two sides in r0 and r1. We flush any other registers + // because the stub doesn't know about register allocation. + frame_->SpillAll(); + Register scratch = VirtualFrame::scratch0(); + __ orr(scratch, lhs, Operand(rhs)); + __ tst(scratch, Operand(kSmiTagMask)); + JumpTarget smi; smi.Branch(eq); // Perform non-smi comparison by stub. // CompareStub takes arguments in r0 and r1, returns <0, >0 or 0 in r0. // We call with 0 args because there are 0 on the stack. + if (!rhs.is(r0)) { + __ Swap(rhs, lhs, ip); + } + CompareStub stub(cc, strict); frame_->CallStub(&stub, 0); __ cmp(r0, Operand(0)); + JumpTarget exit; exit.Jump(); // Do smi comparisons by pointer comparison. smi.Bind(); - __ cmp(r1, Operand(r0)); + __ cmp(lhs, Operand(rhs)); exit.Bind(); cc_reg_ = cc; @@ -1111,7 +1432,7 @@ void CodeGenerator::Comparison(Condition cc, void CodeGenerator::CallWithArguments(ZoneList* args, CallFunctionFlags flags, int position) { - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); // Push the arguments ("left-to-right") on the stack. int arg_count = args->length(); for (int i = 0; i < arg_count; i++) { @@ -1133,7 +1454,7 @@ void CodeGenerator::CallWithArguments(ZoneList* args, void CodeGenerator::Branch(bool if_true, JumpTarget* target) { - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); ASSERT(has_cc()); Condition cc = if_true ? cc_reg_ : NegateCondition(cc_reg_); target->Branch(cc); @@ -1142,7 +1463,7 @@ void CodeGenerator::Branch(bool if_true, JumpTarget* target) { void CodeGenerator::CheckStack() { - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ check stack"); __ LoadRoot(ip, Heap::kStackLimitRootIndex); // Put the lr setup instruction in the delay slot. kInstrSize is added to @@ -1164,7 +1485,7 @@ void CodeGenerator::VisitStatements(ZoneList* statements) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); for (int i = 0; frame_ != NULL && i < statements->length(); i++) { VisitAndSpill(statements->at(i)); } @@ -1176,7 +1497,7 @@ void CodeGenerator::VisitBlock(Block* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ Block"); CodeForStatementPosition(node); node->break_target()->set_direction(JumpTarget::FORWARD_ONLY); @@ -1190,7 +1511,7 @@ void CodeGenerator::VisitBlock(Block* node) { void CodeGenerator::DeclareGlobals(Handle pairs) { - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); frame_->EmitPush(cp); __ mov(r0, Operand(pairs)); frame_->EmitPush(r0); @@ -1205,7 +1526,7 @@ void CodeGenerator::VisitDeclaration(Declaration* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ Declaration"); Variable* var = node->proxy()->var(); ASSERT(var != NULL); // must have been resolved @@ -1274,7 +1595,7 @@ void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ ExpressionStatement"); CodeForStatementPosition(node); Expression* expression = node->expression(); @@ -1289,7 +1610,7 @@ void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "// EmptyStatement"); CodeForStatementPosition(node); // nothing to do @@ -1301,7 +1622,7 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ IfStatement"); // Generate different code depending on which parts of the if statement // are present or not. @@ -1387,7 +1708,7 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) { void CodeGenerator::VisitContinueStatement(ContinueStatement* node) { - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ ContinueStatement"); CodeForStatementPosition(node); node->target()->continue_target()->Jump(); @@ -1395,7 +1716,7 @@ void CodeGenerator::VisitContinueStatement(ContinueStatement* node) { void CodeGenerator::VisitBreakStatement(BreakStatement* node) { - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ BreakStatement"); CodeForStatementPosition(node); node->target()->break_target()->Jump(); @@ -1403,7 +1724,7 @@ void CodeGenerator::VisitBreakStatement(BreakStatement* node) { void CodeGenerator::VisitReturnStatement(ReturnStatement* node) { - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ ReturnStatement"); CodeForStatementPosition(node); @@ -1426,7 +1747,7 @@ void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ WithEnterStatement"); CodeForStatementPosition(node); LoadAndSpill(node->expression()); @@ -1452,7 +1773,7 @@ void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ WithExitStatement"); CodeForStatementPosition(node); // Pop context. @@ -1467,7 +1788,7 @@ void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ SwitchStatement"); CodeForStatementPosition(node); node->break_target()->set_direction(JumpTarget::FORWARD_ONLY); @@ -1556,7 +1877,7 @@ void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ DoWhileStatement"); CodeForStatementPosition(node); node->break_target()->set_direction(JumpTarget::FORWARD_ONLY); @@ -1629,7 +1950,7 @@ void CodeGenerator::VisitWhileStatement(WhileStatement* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ WhileStatement"); CodeForStatementPosition(node); @@ -1678,7 +1999,7 @@ void CodeGenerator::VisitForStatement(ForStatement* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ ForStatement"); CodeForStatementPosition(node); if (node->init() != NULL) { @@ -1753,7 +2074,7 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ ForInStatement"); CodeForStatementPosition(node); @@ -1989,7 +2310,7 @@ void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ TryCatchStatement"); CodeForStatementPosition(node); @@ -2110,7 +2431,7 @@ void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ TryFinallyStatement"); CodeForStatementPosition(node); @@ -2294,7 +2615,7 @@ void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ DebuggerStatament"); CodeForStatementPosition(node); #ifdef ENABLE_DEBUGGER_SUPPORT @@ -2307,7 +2628,7 @@ void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) { void CodeGenerator::InstantiateFunction( Handle function_info) { - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); __ mov(r0, Operand(function_info)); // Use the fast case closure allocation code that allocates in new // space for nested functions that don't need literals cloning. @@ -2330,7 +2651,7 @@ void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ FunctionLiteral"); // Build the function info and instantiate it. @@ -2351,7 +2672,7 @@ void CodeGenerator::VisitSharedFunctionInfoLiteral( #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ SharedFunctionInfoLiteral"); InstantiateFunction(node->shared_function_info()); ASSERT(frame_->height() == original_height + 1); @@ -2362,7 +2683,7 @@ void CodeGenerator::VisitConditional(Conditional* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ Conditional"); JumpTarget then; JumpTarget else_; @@ -2386,8 +2707,8 @@ void CodeGenerator::VisitConditional(Conditional* node) { void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) { - VirtualFrame::SpilledScope spilled_scope; if (slot->type() == Slot::LOOKUP) { + VirtualFrame::SpilledScope spilled_scope(frame_); ASSERT(slot->var()->is_dynamic()); JumpTarget slow; @@ -2445,19 +2766,18 @@ void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) { frame_->EmitPush(r0); } else { - // Special handling for locals allocated in registers. - __ ldr(r0, SlotOperand(slot, r2)); - frame_->EmitPush(r0); + Register scratch = VirtualFrame::scratch0(); + frame_->EmitPush(SlotOperand(slot, scratch)); if (slot->var()->mode() == Variable::CONST) { // Const slots may contain 'the hole' value (the constant hasn't been // initialized yet) which needs to be converted into the 'undefined' // value. Comment cmnt(masm_, "[ Unhole const"); - frame_->EmitPop(r0); + frame_->EmitPop(scratch); __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); - __ cmp(r0, ip); - __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq); - frame_->EmitPush(r0); + __ cmp(scratch, ip); + __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex, eq); + frame_->EmitPush(scratch); } } } @@ -2466,6 +2786,7 @@ void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) { void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) { ASSERT(slot != NULL); if (slot->type() == Slot::LOOKUP) { + VirtualFrame::SpilledScope spilled_scope(frame_); ASSERT(slot->var()->is_dynamic()); // For now, just do a runtime call. @@ -2499,17 +2820,22 @@ void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) { } else { ASSERT(!slot->var()->is_dynamic()); + Register scratch = VirtualFrame::scratch0(); + VirtualFrame::RegisterAllocationScope scope(this); + // The frame must be spilled when branching to this target. JumpTarget exit; + if (init_state == CONST_INIT) { ASSERT(slot->var()->mode() == Variable::CONST); // Only the first const initialization must be executed (the slot // still contains 'the hole' value). When the assignment is // executed, the code is identical to a normal store (see below). Comment cmnt(masm_, "[ Init const"); - __ ldr(r2, SlotOperand(slot, r2)); + __ ldr(scratch, SlotOperand(slot, scratch)); __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); - __ cmp(r2, ip); + __ cmp(scratch, ip); + frame_->SpillAll(); exit.Branch(ne); } @@ -2522,22 +2848,25 @@ void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) { // initialize consts to 'the hole' value and by doing so, end up // calling this code. r2 may be loaded with context; used below in // RecordWrite. - frame_->EmitPop(r0); - __ str(r0, SlotOperand(slot, r2)); - frame_->EmitPush(r0); + Register tos = frame_->Peek(); + __ str(tos, SlotOperand(slot, scratch)); if (slot->type() == Slot::CONTEXT) { // Skip write barrier if the written value is a smi. - __ tst(r0, Operand(kSmiTagMask)); + __ tst(tos, Operand(kSmiTagMask)); + // We don't use tos any more after here. + VirtualFrame::SpilledScope spilled_scope(frame_); exit.Branch(eq); - // r2 is loaded with context when calling SlotOperand above. + // scratch is loaded with context when calling SlotOperand above. int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize; __ mov(r3, Operand(offset)); - __ RecordWrite(r2, r3, r1); + // r1 could be identical with tos, but that doesn't matter. + __ RecordWrite(scratch, r3, r1); } // If we definitely did not jump over the assignment, we do not need // to bind the exit label. Doing so can defeat peephole // optimization. if (init_state == CONST_INIT || slot->type() == Slot::CONTEXT) { + frame_->SpillAll(); exit.Bind(); } } @@ -2574,9 +2903,7 @@ void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot, if (s->is_eval_scope()) { Label next, fast; - if (!context.is(tmp)) { - __ mov(tmp, Operand(context)); - } + __ Move(tmp, context); __ bind(&next); // Terminate at global context. __ ldr(tmp2, FieldMemOperand(tmp, HeapObject::kMapOffset)); @@ -2617,7 +2944,6 @@ void CodeGenerator::VisitSlot(Slot* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; Comment cmnt(masm_, "[ Slot"); LoadFromSlot(node, NOT_INSIDE_TYPEOF); ASSERT(frame_->height() == original_height + 1); @@ -2628,7 +2954,6 @@ void CodeGenerator::VisitVariableProxy(VariableProxy* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; Comment cmnt(masm_, "[ VariableProxy"); Variable* var = node->var(); @@ -2638,7 +2963,7 @@ void CodeGenerator::VisitVariableProxy(VariableProxy* node) { } else { ASSERT(var->is_global()); Reference ref(this, node); - ref.GetValueAndSpill(); + ref.GetValue(); } ASSERT(frame_->height() == original_height + 1); } @@ -2648,10 +2973,10 @@ void CodeGenerator::VisitLiteral(Literal* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; Comment cmnt(masm_, "[ Literal"); - __ mov(r0, Operand(node->handle())); - frame_->EmitPush(r0); + Register reg = frame_->GetTOSRegister(); + __ mov(reg, Operand(node->handle())); + frame_->EmitPush(reg); ASSERT(frame_->height() == original_height + 1); } @@ -2660,7 +2985,7 @@ void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ RexExp Literal"); // Retrieve the literal array and check the allocated entry. @@ -2704,7 +3029,7 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ ObjectLiteral"); // Load the function of this activation. @@ -2785,7 +3110,7 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ ArrayLiteral"); // Load the function of this activation. @@ -2844,7 +3169,7 @@ void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); // Call runtime routine to allocate the catch extension object and // assign the exception value to the catch variable. Comment cmnt(masm_, "[ CatchExtensionObject"); @@ -2857,18 +3182,19 @@ void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) { void CodeGenerator::VisitAssignment(Assignment* node) { + VirtualFrame::RegisterAllocationScope scope(this); #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; Comment cmnt(masm_, "[ Assignment"); { Reference target(this, node->target(), node->is_compound()); if (target.is_illegal()) { // Fool the virtual frame into thinking that we left the assignment's // value on the frame. - __ mov(r0, Operand(Smi::FromInt(0))); - frame_->EmitPush(r0); + Register tos = frame_->GetTOSRegister(); + __ mov(tos, Operand(Smi::FromInt(0))); + frame_->EmitPush(tos); ASSERT(frame_->height() == original_height + 1); return; } @@ -2876,27 +3202,24 @@ void CodeGenerator::VisitAssignment(Assignment* node) { if (node->op() == Token::ASSIGN || node->op() == Token::INIT_VAR || node->op() == Token::INIT_CONST) { - LoadAndSpill(node->value()); + Load(node->value()); } else { // Assignment is a compound assignment. // Get the old value of the lhs. - target.GetValueAndSpill(); + target.GetValue(); Literal* literal = node->value()->AsLiteral(); bool overwrite = (node->value()->AsBinaryOperation() != NULL && node->value()->AsBinaryOperation()->ResultOverwriteAllowed()); if (literal != NULL && literal->handle()->IsSmi()) { - SmiOperation(node->binary_op(), - literal->handle(), - false, - overwrite ? OVERWRITE_RIGHT : NO_OVERWRITE); - frame_->EmitPush(r0); - + VirtualFrameSmiOperation(node->binary_op(), + literal->handle(), + false, + overwrite ? OVERWRITE_RIGHT : NO_OVERWRITE); } else { - LoadAndSpill(node->value()); - GenericBinaryOperation(node->binary_op(), - overwrite ? OVERWRITE_RIGHT : NO_OVERWRITE); - frame_->EmitPush(r0); + Load(node->value()); + VirtualFrameBinaryOperation(node->binary_op(), + overwrite ? OVERWRITE_RIGHT : NO_OVERWRITE); } } Variable* var = node->target()->AsVariableProxy()->AsVariable(); @@ -2925,7 +3248,7 @@ void CodeGenerator::VisitThrow(Throw* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ Throw"); LoadAndSpill(node->exception()); @@ -2940,11 +3263,11 @@ void CodeGenerator::VisitProperty(Property* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ Property"); { Reference property(this, node); - property.GetValueAndSpill(); + property.GetValue(); } ASSERT(frame_->height() == original_height + 1); } @@ -2954,7 +3277,7 @@ void CodeGenerator::VisitCall(Call* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ Call"); Expression* function = node->expression(); @@ -3145,7 +3468,7 @@ void CodeGenerator::VisitCallNew(CallNew* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ CallNew"); // According to ECMA-262, section 11.2.2, page 44, the function @@ -3185,7 +3508,7 @@ void CodeGenerator::VisitCallNew(CallNew* node) { void CodeGenerator::GenerateClassOf(ZoneList* args) { - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); ASSERT(args->length() == 1); JumpTarget leave, null, function, non_function_constructor; @@ -3245,7 +3568,7 @@ void CodeGenerator::GenerateClassOf(ZoneList* args) { void CodeGenerator::GenerateValueOf(ZoneList* args) { - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); ASSERT(args->length() == 1); JumpTarget leave; LoadAndSpill(args->at(0)); @@ -3264,7 +3587,7 @@ void CodeGenerator::GenerateValueOf(ZoneList* args) { void CodeGenerator::GenerateSetValueOf(ZoneList* args) { - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); ASSERT(args->length() == 2); JumpTarget leave; LoadAndSpill(args->at(0)); // Load the object. @@ -3289,7 +3612,7 @@ void CodeGenerator::GenerateSetValueOf(ZoneList* args) { void CodeGenerator::GenerateIsSmi(ZoneList* args) { - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); ASSERT(args->length() == 1); LoadAndSpill(args->at(0)); frame_->EmitPop(r0); @@ -3299,7 +3622,7 @@ void CodeGenerator::GenerateIsSmi(ZoneList* args) { void CodeGenerator::GenerateLog(ZoneList* args) { - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); // See comment in CodeGenerator::GenerateLog in codegen-ia32.cc. ASSERT_EQ(args->length(), 3); #ifdef ENABLE_LOGGING_AND_PROFILING @@ -3315,7 +3638,7 @@ void CodeGenerator::GenerateLog(ZoneList* args) { void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList* args) { - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); ASSERT(args->length() == 1); LoadAndSpill(args->at(0)); frame_->EmitPop(r0); @@ -3347,7 +3670,7 @@ void CodeGenerator::GenerateMathSqrt(ZoneList* args) { // undefined in order to trigger the slow case, Runtime_StringCharCodeAt. // It is not yet implemented on ARM, so it always goes to the slow case. void CodeGenerator::GenerateFastCharCodeAt(ZoneList* args) { - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); ASSERT(args->length() == 2); Comment(masm_, "[ GenerateFastCharCodeAt"); @@ -3465,7 +3788,7 @@ void CodeGenerator::GenerateCharFromCode(ZoneList* args) { void CodeGenerator::GenerateIsArray(ZoneList* args) { - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); ASSERT(args->length() == 1); LoadAndSpill(args->at(0)); JumpTarget answer; @@ -3484,7 +3807,7 @@ void CodeGenerator::GenerateIsArray(ZoneList* args) { void CodeGenerator::GenerateIsRegExp(ZoneList* args) { - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); ASSERT(args->length() == 1); LoadAndSpill(args->at(0)); JumpTarget answer; @@ -3505,7 +3828,7 @@ void CodeGenerator::GenerateIsRegExp(ZoneList* args) { void CodeGenerator::GenerateIsObject(ZoneList* args) { // This generates a fast version of: // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp') - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); ASSERT(args->length() == 1); LoadAndSpill(args->at(0)); frame_->EmitPop(r1); @@ -3535,7 +3858,7 @@ void CodeGenerator::GenerateIsObject(ZoneList* args) { void CodeGenerator::GenerateIsFunction(ZoneList* args) { // This generates a fast version of: // (%_ClassOf(arg) === 'Function') - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); ASSERT(args->length() == 1); LoadAndSpill(args->at(0)); frame_->EmitPop(r0); @@ -3548,7 +3871,7 @@ void CodeGenerator::GenerateIsFunction(ZoneList* args) { void CodeGenerator::GenerateIsUndetectableObject(ZoneList* args) { - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); ASSERT(args->length() == 1); LoadAndSpill(args->at(0)); frame_->EmitPop(r0); @@ -3562,7 +3885,7 @@ void CodeGenerator::GenerateIsUndetectableObject(ZoneList* args) { void CodeGenerator::GenerateIsConstructCall(ZoneList* args) { - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); ASSERT(args->length() == 0); // Get the frame pointer for the calling frame. @@ -3584,22 +3907,31 @@ void CodeGenerator::GenerateIsConstructCall(ZoneList* args) { void CodeGenerator::GenerateArgumentsLength(ZoneList* args) { - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); ASSERT(args->length() == 0); - // Seed the result with the formal parameters count, which will be used - // in case no arguments adaptor frame is found below the current frame. + Label exit; + + // Get the number of formal parameters. __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters()))); - // Call the shared stub to get to the arguments.length. - ArgumentsAccessStub stub(ArgumentsAccessStub::READ_LENGTH); - frame_->CallStub(&stub, 0); + // Check if the calling frame is an arguments adaptor frame. + __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); + __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset)); + __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); + __ b(ne, &exit); + + // Arguments adaptor case: Read the arguments length from the + // adaptor frame. + __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset)); + + __ bind(&exit); frame_->EmitPush(r0); } void CodeGenerator::GenerateArguments(ZoneList* args) { - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); ASSERT(args->length() == 1); // Satisfy contract with ArgumentsAccessStub: @@ -3615,11 +3947,26 @@ void CodeGenerator::GenerateArguments(ZoneList* args) { } -void CodeGenerator::GenerateRandomPositiveSmi(ZoneList* args) { - VirtualFrame::SpilledScope spilled_scope; +void CodeGenerator::GenerateRandomHeapNumber( + ZoneList* args) { + VirtualFrame::SpilledScope spilled_scope(frame_); ASSERT(args->length() == 0); - __ Call(ExternalReference::random_positive_smi_function().address(), - RelocInfo::RUNTIME_ENTRY); + + Label slow_allocate_heapnumber; + Label heapnumber_allocated; + + __ AllocateHeapNumber(r0, r1, r2, &slow_allocate_heapnumber); + __ jmp(&heapnumber_allocated); + + __ bind(&slow_allocate_heapnumber); + __ mov(r0, Operand(Smi::FromInt(0))); + __ push(r0); + __ CallRuntime(Runtime::kNumberUnaryMinus, 1); + + __ bind(&heapnumber_allocated); + __ PrepareCallCFunction(1, r1); + __ CallCFunction( + ExternalReference::fill_heap_number_with_random_function(), 1); frame_->EmitPush(r0); } @@ -3674,6 +4021,100 @@ void CodeGenerator::GenerateRegExpExec(ZoneList* args) { } +void CodeGenerator::GenerateRegExpConstructResult(ZoneList* args) { + // No stub. This code only occurs a few times in regexp.js. + const int kMaxInlineLength = 100; + ASSERT_EQ(3, args->length()); + Load(args->at(0)); // Size of array, smi. + Load(args->at(1)); // "index" property value. + Load(args->at(2)); // "input" property value. + { + VirtualFrame::SpilledScope spilled_scope(frame_); + Label slowcase; + Label done; + __ ldr(r1, MemOperand(sp, kPointerSize * 2)); + STATIC_ASSERT(kSmiTag == 0); + STATIC_ASSERT(kSmiTagSize == 1); + __ tst(r1, Operand(kSmiTagMask)); + __ b(ne, &slowcase); + __ cmp(r1, Operand(Smi::FromInt(kMaxInlineLength))); + __ b(hi, &slowcase); + // Smi-tagging is equivalent to multiplying by 2. + // Allocate RegExpResult followed by FixedArray with size in ebx. + // JSArray: [Map][empty properties][Elements][Length-smi][index][input] + // Elements: [Map][Length][..elements..] + // Size of JSArray with two in-object properties and the header of a + // FixedArray. + int objects_size = + (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize; + __ mov(r5, Operand(r1, LSR, kSmiTagSize + kSmiShiftSize)); + __ add(r2, r5, Operand(objects_size)); + __ AllocateInNewSpace(r2, // In: Size, in words. + r0, // Out: Start of allocation (tagged). + r3, // Scratch register. + r4, // Scratch register. + &slowcase, + TAG_OBJECT); + // r0: Start of allocated area, object-tagged. + // r1: Number of elements in array, as smi. + // r5: Number of elements, untagged. + + // Set JSArray map to global.regexp_result_map(). + // Set empty properties FixedArray. + // Set elements to point to FixedArray allocated right after the JSArray. + // Interleave operations for better latency. + __ ldr(r2, ContextOperand(cp, Context::GLOBAL_INDEX)); + __ add(r3, r0, Operand(JSRegExpResult::kSize)); + __ mov(r4, Operand(Factory::empty_fixed_array())); + __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset)); + __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset)); + __ ldr(r2, ContextOperand(r2, Context::REGEXP_RESULT_MAP_INDEX)); + __ str(r4, FieldMemOperand(r0, JSObject::kPropertiesOffset)); + __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); + + // Set input, index and length fields from arguments. + __ ldm(ia_w, sp, static_cast(r2.bit() | r4.bit())); + __ str(r1, FieldMemOperand(r0, JSArray::kLengthOffset)); + __ add(sp, sp, Operand(kPointerSize)); + __ str(r4, FieldMemOperand(r0, JSRegExpResult::kIndexOffset)); + __ str(r2, FieldMemOperand(r0, JSRegExpResult::kInputOffset)); + + // Fill out the elements FixedArray. + // r0: JSArray, tagged. + // r3: FixedArray, tagged. + // r5: Number of elements in array, untagged. + + // Set map. + __ mov(r2, Operand(Factory::fixed_array_map())); + __ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset)); + // Set FixedArray length. + __ str(r5, FieldMemOperand(r3, FixedArray::kLengthOffset)); + // Fill contents of fixed-array with the-hole. + __ mov(r2, Operand(Factory::the_hole_value())); + __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + // Fill fixed array elements with hole. + // r0: JSArray, tagged. + // r2: the hole. + // r3: Start of elements in FixedArray. + // r5: Number of elements to fill. + Label loop; + __ tst(r5, Operand(r5)); + __ bind(&loop); + __ b(le, &done); // Jump if r1 is negative or zero. + __ sub(r5, r5, Operand(1), SetCC); + __ str(r2, MemOperand(r3, r5, LSL, kPointerSizeLog2)); + __ jmp(&loop); + + __ bind(&slowcase); + __ CallRuntime(Runtime::kRegExpConstructResult, 3); + + __ bind(&done); + } + frame_->Forget(3); + frame_->EmitPush(r0); +} + + void CodeGenerator::GenerateNumberToString(ZoneList* args) { ASSERT_EQ(args->length(), 1); @@ -3686,6 +4127,22 @@ void CodeGenerator::GenerateNumberToString(ZoneList* args) { } +void CodeGenerator::GenerateCallFunction(ZoneList* args) { + Comment cmnt(masm_, "[ GenerateCallFunction"); + + ASSERT(args->length() >= 2); + + int n_args = args->length() - 2; // for receiver and function. + Load(args->at(0)); // receiver + for (int i = 0; i < n_args; i++) { + Load(args->at(i + 1)); + } + Load(args->at(n_args + 1)); // function + frame_->CallJSFunction(n_args); + frame_->EmitPush(r0); +} + + void CodeGenerator::GenerateMathSin(ZoneList* args) { ASSERT_EQ(args->length(), 1); // Load the argument on the stack and jump to the runtime. @@ -3705,7 +4162,7 @@ void CodeGenerator::GenerateMathCos(ZoneList* args) { void CodeGenerator::GenerateObjectEquals(ZoneList* args) { - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); ASSERT(args->length() == 2); // Load the two objects into registers and perform the comparison. @@ -3722,7 +4179,7 @@ void CodeGenerator::VisitCallRuntime(CallRuntime* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); if (CheckForInlineRuntimeCall(node)) { ASSERT((has_cc() && frame_->height() == original_height) || (!has_cc() && frame_->height() == original_height + 1)); @@ -3768,7 +4225,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ UnaryOperation"); Token::Value op = node->op(); @@ -3899,7 +4356,7 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ CountOperation"); bool is_postfix = node->is_postfix(); @@ -3927,7 +4384,7 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) { ASSERT(frame_->height() == original_height + 1); return; } - target.GetValueAndSpill(); + target.GetValue(); frame_->EmitPop(r0); JumpTarget slow; @@ -4108,10 +4565,10 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; Comment cmnt(masm_, "[ BinaryOperation"); if (node->op() == Token::AND || node->op() == Token::OR) { + VirtualFrame::SpilledScope spilled_scope(frame_); GenerateLogicalBooleanOperation(node); } else { // Optimize for the case where (at least) one of the expressions @@ -4128,31 +4585,32 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) { node->right()->AsBinaryOperation()->ResultOverwriteAllowed()); if (rliteral != NULL && rliteral->handle()->IsSmi()) { - LoadAndSpill(node->left()); - SmiOperation(node->op(), - rliteral->handle(), - false, - overwrite_right ? OVERWRITE_RIGHT : NO_OVERWRITE); - + VirtualFrame::RegisterAllocationScope scope(this); + Load(node->left()); + VirtualFrameSmiOperation( + node->op(), + rliteral->handle(), + false, + overwrite_right ? OVERWRITE_RIGHT : NO_OVERWRITE); } else if (lliteral != NULL && lliteral->handle()->IsSmi()) { - LoadAndSpill(node->right()); - SmiOperation(node->op(), - lliteral->handle(), - true, - overwrite_left ? OVERWRITE_LEFT : NO_OVERWRITE); - + VirtualFrame::RegisterAllocationScope scope(this); + Load(node->right()); + VirtualFrameSmiOperation(node->op(), + lliteral->handle(), + true, + overwrite_left ? OVERWRITE_LEFT : NO_OVERWRITE); } else { + VirtualFrame::RegisterAllocationScope scope(this); OverwriteMode overwrite_mode = NO_OVERWRITE; if (overwrite_left) { overwrite_mode = OVERWRITE_LEFT; } else if (overwrite_right) { overwrite_mode = OVERWRITE_RIGHT; } - LoadAndSpill(node->left()); - LoadAndSpill(node->right()); - GenericBinaryOperation(node->op(), overwrite_mode); + Load(node->left()); + Load(node->right()); + VirtualFrameBinaryOperation(node->op(), overwrite_mode); } - frame_->EmitPush(r0); } ASSERT(!has_valid_frame() || (has_cc() && frame_->height() == original_height) || @@ -4164,7 +4622,7 @@ void CodeGenerator::VisitThisFunction(ThisFunction* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; + VirtualFrame::SpilledScope spilled_scope(frame_); __ ldr(r0, frame_->Function()); frame_->EmitPush(r0); ASSERT(frame_->height() == original_height + 1); @@ -4175,9 +4633,10 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope; Comment cmnt(masm_, "[ CompareOperation"); + VirtualFrame::RegisterAllocationScope nonspilled_scope(this); + // Get the expressions from the node. Expression* left = node->left(); Expression* right = node->right(); @@ -4194,10 +4653,12 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) { right->AsLiteral() != NULL && right->AsLiteral()->IsNull(); // The 'null' value can only be equal to 'null' or 'undefined'. if (left_is_null || right_is_null) { - LoadAndSpill(left_is_null ? right : left); - frame_->EmitPop(r0); + Load(left_is_null ? right : left); + Register tos = frame_->PopToRegister(); + // JumpTargets can't cope with register allocation yet. + frame_->SpillAll(); __ LoadRoot(ip, Heap::kNullValueRootIndex); - __ cmp(r0, ip); + __ cmp(tos, ip); // The 'null' value is only equal to 'undefined' if using non-strict // comparisons. @@ -4205,17 +4666,17 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) { true_target()->Branch(eq); __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); - __ cmp(r0, Operand(ip)); + __ cmp(tos, Operand(ip)); true_target()->Branch(eq); - __ tst(r0, Operand(kSmiTagMask)); + __ tst(tos, Operand(kSmiTagMask)); false_target()->Branch(eq); // It can be an undetectable object. - __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset)); - __ ldrb(r0, FieldMemOperand(r0, Map::kBitFieldOffset)); - __ and_(r0, r0, Operand(1 << Map::kIsUndetectable)); - __ cmp(r0, Operand(1 << Map::kIsUndetectable)); + __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset)); + __ ldrb(tos, FieldMemOperand(tos, Map::kBitFieldOffset)); + __ and_(tos, tos, Operand(1 << Map::kIsUndetectable)); + __ cmp(tos, Operand(1 << Map::kIsUndetectable)); } cc_reg_ = eq; @@ -4234,90 +4695,95 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) { right->AsLiteral()->handle()->IsString())) { Handle check(String::cast(*right->AsLiteral()->handle())); - // Load the operand, move it to register r1. + // Load the operand, move it to a register. LoadTypeofExpression(operation->expression()); - frame_->EmitPop(r1); + Register tos = frame_->PopToRegister(); + + // JumpTargets can't cope with register allocation yet. + frame_->SpillAll(); + + Register scratch = VirtualFrame::scratch0(); if (check->Equals(Heap::number_symbol())) { - __ tst(r1, Operand(kSmiTagMask)); + __ tst(tos, Operand(kSmiTagMask)); true_target()->Branch(eq); - __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset)); + __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset)); __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); - __ cmp(r1, ip); + __ cmp(tos, ip); cc_reg_ = eq; } else if (check->Equals(Heap::string_symbol())) { - __ tst(r1, Operand(kSmiTagMask)); + __ tst(tos, Operand(kSmiTagMask)); false_target()->Branch(eq); - __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset)); + __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset)); // It can be an undetectable string object. - __ ldrb(r2, FieldMemOperand(r1, Map::kBitFieldOffset)); - __ and_(r2, r2, Operand(1 << Map::kIsUndetectable)); - __ cmp(r2, Operand(1 << Map::kIsUndetectable)); + __ ldrb(scratch, FieldMemOperand(tos, Map::kBitFieldOffset)); + __ and_(scratch, scratch, Operand(1 << Map::kIsUndetectable)); + __ cmp(scratch, Operand(1 << Map::kIsUndetectable)); false_target()->Branch(eq); - __ ldrb(r2, FieldMemOperand(r1, Map::kInstanceTypeOffset)); - __ cmp(r2, Operand(FIRST_NONSTRING_TYPE)); + __ ldrb(scratch, FieldMemOperand(tos, Map::kInstanceTypeOffset)); + __ cmp(scratch, Operand(FIRST_NONSTRING_TYPE)); cc_reg_ = lt; } else if (check->Equals(Heap::boolean_symbol())) { __ LoadRoot(ip, Heap::kTrueValueRootIndex); - __ cmp(r1, ip); + __ cmp(tos, ip); true_target()->Branch(eq); __ LoadRoot(ip, Heap::kFalseValueRootIndex); - __ cmp(r1, ip); + __ cmp(tos, ip); cc_reg_ = eq; } else if (check->Equals(Heap::undefined_symbol())) { __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); - __ cmp(r1, ip); + __ cmp(tos, ip); true_target()->Branch(eq); - __ tst(r1, Operand(kSmiTagMask)); + __ tst(tos, Operand(kSmiTagMask)); false_target()->Branch(eq); // It can be an undetectable object. - __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset)); - __ ldrb(r2, FieldMemOperand(r1, Map::kBitFieldOffset)); - __ and_(r2, r2, Operand(1 << Map::kIsUndetectable)); - __ cmp(r2, Operand(1 << Map::kIsUndetectable)); + __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset)); + __ ldrb(scratch, FieldMemOperand(tos, Map::kBitFieldOffset)); + __ and_(scratch, scratch, Operand(1 << Map::kIsUndetectable)); + __ cmp(scratch, Operand(1 << Map::kIsUndetectable)); cc_reg_ = eq; } else if (check->Equals(Heap::function_symbol())) { - __ tst(r1, Operand(kSmiTagMask)); + __ tst(tos, Operand(kSmiTagMask)); false_target()->Branch(eq); - Register map_reg = r2; - __ CompareObjectType(r1, map_reg, r1, JS_FUNCTION_TYPE); + Register map_reg = scratch; + __ CompareObjectType(tos, map_reg, tos, JS_FUNCTION_TYPE); true_target()->Branch(eq); // Regular expressions are callable so typeof == 'function'. - __ CompareInstanceType(map_reg, r1, JS_REGEXP_TYPE); + __ CompareInstanceType(map_reg, tos, JS_REGEXP_TYPE); cc_reg_ = eq; } else if (check->Equals(Heap::object_symbol())) { - __ tst(r1, Operand(kSmiTagMask)); + __ tst(tos, Operand(kSmiTagMask)); false_target()->Branch(eq); __ LoadRoot(ip, Heap::kNullValueRootIndex); - __ cmp(r1, ip); + __ cmp(tos, ip); true_target()->Branch(eq); - Register map_reg = r2; - __ CompareObjectType(r1, map_reg, r1, JS_REGEXP_TYPE); + Register map_reg = scratch; + __ CompareObjectType(tos, map_reg, tos, JS_REGEXP_TYPE); false_target()->Branch(eq); // It can be an undetectable object. - __ ldrb(r1, FieldMemOperand(map_reg, Map::kBitFieldOffset)); - __ and_(r1, r1, Operand(1 << Map::kIsUndetectable)); - __ cmp(r1, Operand(1 << Map::kIsUndetectable)); + __ ldrb(tos, FieldMemOperand(map_reg, Map::kBitFieldOffset)); + __ and_(tos, tos, Operand(1 << Map::kIsUndetectable)); + __ cmp(tos, Operand(1 << Map::kIsUndetectable)); false_target()->Branch(eq); - __ ldrb(r1, FieldMemOperand(map_reg, Map::kInstanceTypeOffset)); - __ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE)); + __ ldrb(tos, FieldMemOperand(map_reg, Map::kInstanceTypeOffset)); + __ cmp(tos, Operand(FIRST_JS_OBJECT_TYPE)); false_target()->Branch(lt); - __ cmp(r1, Operand(LAST_JS_OBJECT_TYPE)); + __ cmp(tos, Operand(LAST_JS_OBJECT_TYPE)); cc_reg_ = le; } else { @@ -4356,6 +4822,7 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) { break; case Token::IN: { + VirtualFrame::SpilledScope scope(frame_); LoadAndSpill(left); LoadAndSpill(right); frame_->InvokeBuiltin(Builtins::IN, CALL_JS, 2); @@ -4364,6 +4831,7 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) { } case Token::INSTANCEOF: { + VirtualFrame::SpilledScope scope(frame_); LoadAndSpill(left); LoadAndSpill(right); InstanceofStub stub; @@ -4495,6 +4963,7 @@ void Reference::SetValue(InitState init_state) { } case NAMED: { + VirtualFrame::SpilledScope scope(frame); Comment cmnt(masm, "[ Store to named Property"); // Call the appropriate IC code. Handle ic(Builtins::builtin(Builtins::StoreIC_Initialize)); @@ -4510,6 +4979,7 @@ void Reference::SetValue(InitState init_state) { } case KEYED: { + VirtualFrame::SpilledScope scope(frame); Comment cmnt(masm, "[ Store to keyed Property"); Property* property = expression_->AsProperty(); ASSERT(property != NULL); @@ -5429,45 +5899,214 @@ void CompareStub::Generate(MacroAssembler* masm) { // to call the C-implemented binary fp operation routines we need to end up // with the double precision floating point operands in r0 and r1 (for the // value in r1) and r2 and r3 (for the value in r0). -static void HandleBinaryOpSlowCases(MacroAssembler* masm, - Label* not_smi, - const Builtins::JavaScript& builtin, - Token::Value operation, - OverwriteMode mode) { +void GenericBinaryOpStub::HandleBinaryOpSlowCases( + MacroAssembler* masm, + Label* not_smi, + Register lhs, + Register rhs, + const Builtins::JavaScript& builtin) { Label slow, slow_pop_2_first, do_the_call; Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1; - // Smi-smi case (overflow). - // Since both are Smis there is no heap number to overwrite, so allocate. - // The new heap number is in r5. r6 and r7 are scratch. - __ AllocateHeapNumber(r5, r6, r7, &slow); - - // If we have floating point hardware, inline ADD, SUB, MUL, and DIV, - // using registers d7 and d6 for the double values. - bool use_fp_registers = CpuFeatures::IsSupported(VFP3) && - Token::MOD != operation; - if (use_fp_registers) { - CpuFeatures::Scope scope(VFP3); - __ mov(r7, Operand(r0, ASR, kSmiTagSize)); - __ vmov(s15, r7); - __ vcvt_f64_s32(d7, s15); - __ mov(r7, Operand(r1, ASR, kSmiTagSize)); - __ vmov(s13, r7); - __ vcvt_f64_s32(d6, s13); - } else { - // Write Smi from r0 to r3 and r2 in double format. r6 is scratch. - __ mov(r7, Operand(r0)); - ConvertToDoubleStub stub1(r3, r2, r7, r6); - __ push(lr); - __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); - // Write Smi from r1 to r1 and r0 in double format. r6 is scratch. - __ mov(r7, Operand(r1)); - ConvertToDoubleStub stub2(r1, r0, r7, r6); - __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); - __ pop(lr); + bool use_fp_registers = CpuFeatures::IsSupported(VFP3) && Token::MOD != op_; + + ASSERT((lhs.is(r0) && rhs.is(r1)) || lhs.is(r1) && rhs.is(r0)); + + if (ShouldGenerateSmiCode()) { + // Smi-smi case (overflow). + // Since both are Smis there is no heap number to overwrite, so allocate. + // The new heap number is in r5. r6 and r7 are scratch. + __ AllocateHeapNumber(r5, r6, r7, &slow); + + // If we have floating point hardware, inline ADD, SUB, MUL, and DIV, + // using registers d7 and d6 for the double values. + if (use_fp_registers) { + CpuFeatures::Scope scope(VFP3); + __ mov(r7, Operand(rhs, ASR, kSmiTagSize)); + __ vmov(s15, r7); + __ vcvt_f64_s32(d7, s15); + __ mov(r7, Operand(lhs, ASR, kSmiTagSize)); + __ vmov(s13, r7); + __ vcvt_f64_s32(d6, s13); + } else { + // Write Smi from rhs to r3 and r2 in double format. r6 is scratch. + __ mov(r7, Operand(rhs)); + ConvertToDoubleStub stub1(r3, r2, r7, r6); + __ push(lr); + __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); + // Write Smi from lhs to r1 and r0 in double format. r6 is scratch. + __ mov(r7, Operand(lhs)); + ConvertToDoubleStub stub2(r1, r0, r7, r6); + __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); + __ pop(lr); + } + __ jmp(&do_the_call); // Tail call. No return. + } + + // We branch here if at least one of r0 and r1 is not a Smi. + __ bind(not_smi); + + if (lhs.is(r0)) { + __ Swap(r0, r1, ip); } - __ jmp(&do_the_call); // Tail call. No return. + if (ShouldGenerateFPCode()) { + if (runtime_operands_type_ == BinaryOpIC::DEFAULT) { + switch (op_) { + case Token::ADD: + case Token::SUB: + case Token::MUL: + case Token::DIV: + GenerateTypeTransition(masm); + break; + default: + break; + } + } + + if (mode_ == NO_OVERWRITE) { + // In the case where there is no chance of an overwritable float we may as + // well do the allocation immediately while r0 and r1 are untouched. + __ AllocateHeapNumber(r5, r6, r7, &slow); + } + + // Move r0 to a double in r2-r3. + __ tst(r0, Operand(kSmiTagMask)); + __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number. + __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE); + __ b(ne, &slow); + if (mode_ == OVERWRITE_RIGHT) { + __ mov(r5, Operand(r0)); // Overwrite this heap number. + } + if (use_fp_registers) { + CpuFeatures::Scope scope(VFP3); + // Load the double from tagged HeapNumber r0 to d7. + __ sub(r7, r0, Operand(kHeapObjectTag)); + __ vldr(d7, r7, HeapNumber::kValueOffset); + } else { + // Calling convention says that second double is in r2 and r3. + __ ldr(r2, FieldMemOperand(r0, HeapNumber::kValueOffset)); + __ ldr(r3, FieldMemOperand(r0, HeapNumber::kValueOffset + 4)); + } + __ jmp(&finished_loading_r0); + __ bind(&r0_is_smi); + if (mode_ == OVERWRITE_RIGHT) { + // We can't overwrite a Smi so get address of new heap number into r5. + __ AllocateHeapNumber(r5, r6, r7, &slow); + } + + if (use_fp_registers) { + CpuFeatures::Scope scope(VFP3); + // Convert smi in r0 to double in d7. + __ mov(r7, Operand(r0, ASR, kSmiTagSize)); + __ vmov(s15, r7); + __ vcvt_f64_s32(d7, s15); + } else { + // Write Smi from r0 to r3 and r2 in double format. + __ mov(r7, Operand(r0)); + ConvertToDoubleStub stub3(r3, r2, r7, r6); + __ push(lr); + __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET); + __ pop(lr); + } + + __ bind(&finished_loading_r0); + + // Move r1 to a double in r0-r1. + __ tst(r1, Operand(kSmiTagMask)); + __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number. + __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE); + __ b(ne, &slow); + if (mode_ == OVERWRITE_LEFT) { + __ mov(r5, Operand(r1)); // Overwrite this heap number. + } + if (use_fp_registers) { + CpuFeatures::Scope scope(VFP3); + // Load the double from tagged HeapNumber r1 to d6. + __ sub(r7, r1, Operand(kHeapObjectTag)); + __ vldr(d6, r7, HeapNumber::kValueOffset); + } else { + // Calling convention says that first double is in r0 and r1. + __ ldr(r0, FieldMemOperand(r1, HeapNumber::kValueOffset)); + __ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + 4)); + } + __ jmp(&finished_loading_r1); + __ bind(&r1_is_smi); + if (mode_ == OVERWRITE_LEFT) { + // We can't overwrite a Smi so get address of new heap number into r5. + __ AllocateHeapNumber(r5, r6, r7, &slow); + } + + if (use_fp_registers) { + CpuFeatures::Scope scope(VFP3); + // Convert smi in r1 to double in d6. + __ mov(r7, Operand(r1, ASR, kSmiTagSize)); + __ vmov(s13, r7); + __ vcvt_f64_s32(d6, s13); + } else { + // Write Smi from r1 to r1 and r0 in double format. + __ mov(r7, Operand(r1)); + ConvertToDoubleStub stub4(r1, r0, r7, r6); + __ push(lr); + __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET); + __ pop(lr); + } + + __ bind(&finished_loading_r1); + + __ bind(&do_the_call); + // If we are inlining the operation using VFP3 instructions for + // add, subtract, multiply, or divide, the arguments are in d6 and d7. + if (use_fp_registers) { + CpuFeatures::Scope scope(VFP3); + // ARMv7 VFP3 instructions to implement + // double precision, add, subtract, multiply, divide. + + if (Token::MUL == op_) { + __ vmul(d5, d6, d7); + } else if (Token::DIV == op_) { + __ vdiv(d5, d6, d7); + } else if (Token::ADD == op_) { + __ vadd(d5, d6, d7); + } else if (Token::SUB == op_) { + __ vsub(d5, d6, d7); + } else { + UNREACHABLE(); + } + __ sub(r0, r5, Operand(kHeapObjectTag)); + __ vstr(d5, r0, HeapNumber::kValueOffset); + __ add(r0, r0, Operand(kHeapObjectTag)); + __ mov(pc, lr); + } else { + // If we did not inline the operation, then the arguments are in: + // r0: Left value (least significant part of mantissa). + // r1: Left value (sign, exponent, top of mantissa). + // r2: Right value (least significant part of mantissa). + // r3: Right value (sign, exponent, top of mantissa). + // r5: Address of heap number for result. + + __ push(lr); // For later. + __ PrepareCallCFunction(4, r4); // Two doubles count as 4 arguments. + // Call C routine that may not cause GC or other trouble. r5 is callee + // save. + __ CallCFunction(ExternalReference::double_fp_operation(op_), 4); + // Store answer in the overwritable heap number. + #if !defined(USE_ARM_EABI) + // Double returned in fp coprocessor register 0 and 1, encoded as register + // cr8. Offsets must be divisible by 4 for coprocessor so we need to + // substract the tag from r5. + __ sub(r4, r5, Operand(kHeapObjectTag)); + __ stc(p1, cr8, MemOperand(r4, HeapNumber::kValueOffset)); + #else + // Double returned in registers 0 and 1. + __ str(r0, FieldMemOperand(r5, HeapNumber::kValueOffset)); + __ str(r1, FieldMemOperand(r5, HeapNumber::kValueOffset + 4)); + #endif + __ mov(r0, Operand(r5)); + // And we are done. + __ pop(pc); + } + } // We jump to here if something goes wrong (one param is not a number of any // sort or new-space allocation fails). __ bind(&slow); @@ -5476,7 +6115,7 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm, __ push(r1); __ push(r0); - if (Token::ADD == operation) { + if (Token::ADD == op_) { // Test for string arguments before calling runtime. // r1 : first argument // r0 : second argument @@ -5528,156 +6167,6 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm, } __ InvokeBuiltin(builtin, JUMP_JS); // Tail call. No return. - - // We branch here if at least one of r0 and r1 is not a Smi. - __ bind(not_smi); - if (mode == NO_OVERWRITE) { - // In the case where there is no chance of an overwritable float we may as - // well do the allocation immediately while r0 and r1 are untouched. - __ AllocateHeapNumber(r5, r6, r7, &slow); - } - - // Move r0 to a double in r2-r3. - __ tst(r0, Operand(kSmiTagMask)); - __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number. - __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE); - __ b(ne, &slow); - if (mode == OVERWRITE_RIGHT) { - __ mov(r5, Operand(r0)); // Overwrite this heap number. - } - if (use_fp_registers) { - CpuFeatures::Scope scope(VFP3); - // Load the double from tagged HeapNumber r0 to d7. - __ sub(r7, r0, Operand(kHeapObjectTag)); - __ vldr(d7, r7, HeapNumber::kValueOffset); - } else { - // Calling convention says that second double is in r2 and r3. - __ ldr(r2, FieldMemOperand(r0, HeapNumber::kValueOffset)); - __ ldr(r3, FieldMemOperand(r0, HeapNumber::kValueOffset + 4)); - } - __ jmp(&finished_loading_r0); - __ bind(&r0_is_smi); - if (mode == OVERWRITE_RIGHT) { - // We can't overwrite a Smi so get address of new heap number into r5. - __ AllocateHeapNumber(r5, r6, r7, &slow); - } - - if (use_fp_registers) { - CpuFeatures::Scope scope(VFP3); - // Convert smi in r0 to double in d7. - __ mov(r7, Operand(r0, ASR, kSmiTagSize)); - __ vmov(s15, r7); - __ vcvt_f64_s32(d7, s15); - } else { - // Write Smi from r0 to r3 and r2 in double format. - __ mov(r7, Operand(r0)); - ConvertToDoubleStub stub3(r3, r2, r7, r6); - __ push(lr); - __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET); - __ pop(lr); - } - - __ bind(&finished_loading_r0); - - // Move r1 to a double in r0-r1. - __ tst(r1, Operand(kSmiTagMask)); - __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number. - __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE); - __ b(ne, &slow); - if (mode == OVERWRITE_LEFT) { - __ mov(r5, Operand(r1)); // Overwrite this heap number. - } - if (use_fp_registers) { - CpuFeatures::Scope scope(VFP3); - // Load the double from tagged HeapNumber r1 to d6. - __ sub(r7, r1, Operand(kHeapObjectTag)); - __ vldr(d6, r7, HeapNumber::kValueOffset); - } else { - // Calling convention says that first double is in r0 and r1. - __ ldr(r0, FieldMemOperand(r1, HeapNumber::kValueOffset)); - __ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + 4)); - } - __ jmp(&finished_loading_r1); - __ bind(&r1_is_smi); - if (mode == OVERWRITE_LEFT) { - // We can't overwrite a Smi so get address of new heap number into r5. - __ AllocateHeapNumber(r5, r6, r7, &slow); - } - - if (use_fp_registers) { - CpuFeatures::Scope scope(VFP3); - // Convert smi in r1 to double in d6. - __ mov(r7, Operand(r1, ASR, kSmiTagSize)); - __ vmov(s13, r7); - __ vcvt_f64_s32(d6, s13); - } else { - // Write Smi from r1 to r1 and r0 in double format. - __ mov(r7, Operand(r1)); - ConvertToDoubleStub stub4(r1, r0, r7, r6); - __ push(lr); - __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET); - __ pop(lr); - } - - __ bind(&finished_loading_r1); - - __ bind(&do_the_call); - // If we are inlining the operation using VFP3 instructions for - // add, subtract, multiply, or divide, the arguments are in d6 and d7. - if (use_fp_registers) { - CpuFeatures::Scope scope(VFP3); - // ARMv7 VFP3 instructions to implement - // double precision, add, subtract, multiply, divide. - - if (Token::MUL == operation) { - __ vmul(d5, d6, d7); - } else if (Token::DIV == operation) { - __ vdiv(d5, d6, d7); - } else if (Token::ADD == operation) { - __ vadd(d5, d6, d7); - } else if (Token::SUB == operation) { - __ vsub(d5, d6, d7); - } else { - UNREACHABLE(); - } - __ sub(r0, r5, Operand(kHeapObjectTag)); - __ vstr(d5, r0, HeapNumber::kValueOffset); - __ add(r0, r0, Operand(kHeapObjectTag)); - __ mov(pc, lr); - return; - } - - // If we did not inline the operation, then the arguments are in: - // r0: Left value (least significant part of mantissa). - // r1: Left value (sign, exponent, top of mantissa). - // r2: Right value (least significant part of mantissa). - // r3: Right value (sign, exponent, top of mantissa). - // r5: Address of heap number for result. - - __ push(lr); // For later. - __ push(r5); // Address of heap number that is answer. - __ AlignStack(0); - // Call C routine that may not cause GC or other trouble. - __ mov(r5, Operand(ExternalReference::double_fp_operation(operation))); - __ Call(r5); - __ pop(r4); // Address of heap number. - __ cmp(r4, Operand(Smi::FromInt(0))); - __ pop(r4, eq); // Conditional pop instruction to get rid of alignment push. - // Store answer in the overwritable heap number. -#if !defined(USE_ARM_EABI) - // Double returned in fp coprocessor register 0 and 1, encoded as register - // cr8. Offsets must be divisible by 4 for coprocessor so we need to - // substract the tag from r4. - __ sub(r5, r4, Operand(kHeapObjectTag)); - __ stc(p1, cr8, MemOperand(r5, HeapNumber::kValueOffset)); -#else - // Double returned in registers 0 and 1. - __ str(r0, FieldMemOperand(r4, HeapNumber::kValueOffset)); - __ str(r1, FieldMemOperand(r4, HeapNumber::kValueOffset + 4)); -#endif - __ mov(r0, Operand(r4)); - // And we are done. - __ pop(pc); } @@ -5771,31 +6260,35 @@ static void GetInt32(MacroAssembler* masm, // by the ES spec. If this is the case we do the bitwise op and see if the // result is a Smi. If so, great, otherwise we try to find a heap number to // write the answer into (either by allocating or by overwriting). -// On entry the operands are in r0 and r1. On exit the answer is in r0. -void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm) { +// On entry the operands are in lhs and rhs. On exit the answer is in r0. +void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm, + Register lhs, + Register rhs) { Label slow, result_not_a_smi; - Label r0_is_smi, r1_is_smi; - Label done_checking_r0, done_checking_r1; + Label rhs_is_smi, lhs_is_smi; + Label done_checking_rhs, done_checking_lhs; - __ tst(r1, Operand(kSmiTagMask)); - __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number. - __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE); + __ tst(lhs, Operand(kSmiTagMask)); + __ b(eq, &lhs_is_smi); // It's a Smi so don't check it's a heap number. + __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE); __ b(ne, &slow); - GetInt32(masm, r1, r3, r5, r4, &slow); - __ jmp(&done_checking_r1); - __ bind(&r1_is_smi); - __ mov(r3, Operand(r1, ASR, 1)); - __ bind(&done_checking_r1); - - __ tst(r0, Operand(kSmiTagMask)); - __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number. - __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE); + GetInt32(masm, lhs, r3, r5, r4, &slow); + __ jmp(&done_checking_lhs); + __ bind(&lhs_is_smi); + __ mov(r3, Operand(lhs, ASR, 1)); + __ bind(&done_checking_lhs); + + __ tst(rhs, Operand(kSmiTagMask)); + __ b(eq, &rhs_is_smi); // It's a Smi so don't check it's a heap number. + __ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE); __ b(ne, &slow); - GetInt32(masm, r0, r2, r5, r4, &slow); - __ jmp(&done_checking_r0); - __ bind(&r0_is_smi); - __ mov(r2, Operand(r0, ASR, 1)); - __ bind(&done_checking_r0); + GetInt32(masm, rhs, r2, r5, r4, &slow); + __ jmp(&done_checking_rhs); + __ bind(&rhs_is_smi); + __ mov(r2, Operand(rhs, ASR, 1)); + __ bind(&done_checking_rhs); + + ASSERT(((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0)))); // r0 and r1: Original operands (Smi or heap numbers). // r2 and r3: Signed int32 operands. @@ -5835,15 +6328,15 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm) { __ bind(&result_not_a_smi); switch (mode_) { case OVERWRITE_RIGHT: { - __ tst(r0, Operand(kSmiTagMask)); + __ tst(rhs, Operand(kSmiTagMask)); __ b(eq, &have_to_allocate); - __ mov(r5, Operand(r0)); + __ mov(r5, Operand(rhs)); break; } case OVERWRITE_LEFT: { - __ tst(r1, Operand(kSmiTagMask)); + __ tst(lhs, Operand(kSmiTagMask)); __ b(eq, &have_to_allocate); - __ mov(r5, Operand(r1)); + __ mov(r5, Operand(lhs)); break; } case NO_OVERWRITE: { @@ -5874,8 +6367,8 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm) { // If all else failed then we go to the runtime system. __ bind(&slow); - __ push(r1); // restore stack - __ push(r0); + __ push(lhs); // restore stack + __ push(rhs); switch (op_) { case Token::BIT_OR: __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS); @@ -6005,115 +6498,134 @@ const char* GenericBinaryOpStub::GetName() { void GenericBinaryOpStub::Generate(MacroAssembler* masm) { - // r1 : x - // r0 : y - // result : r0 + // lhs_ : x + // rhs_ : y + // r0 : result + + Register result = r0; + Register lhs = lhs_; + Register rhs = rhs_; - // All ops need to know whether we are dealing with two Smis. Set up r2 to - // tell us that. - __ orr(r2, r1, Operand(r0)); // r2 = x | y; + // This code can't cope with other register allocations yet. + ASSERT(result.is(r0) && + ((lhs.is(r0) && rhs.is(r1)) || + (lhs.is(r1) && rhs.is(r0)))); + + Register smi_test_reg = VirtualFrame::scratch0(); + Register scratch = VirtualFrame::scratch1(); + + // All ops need to know whether we are dealing with two Smis. Set up + // smi_test_reg to tell us that. + if (ShouldGenerateSmiCode()) { + __ orr(smi_test_reg, lhs, Operand(rhs)); + } switch (op_) { case Token::ADD: { Label not_smi; // Fast path. - ASSERT(kSmiTag == 0); // Adjust code below. - __ tst(r2, Operand(kSmiTagMask)); - __ b(ne, ¬_smi); - __ add(r0, r1, Operand(r0), SetCC); // Add y optimistically. - // Return if no overflow. - __ Ret(vc); - __ sub(r0, r0, Operand(r1)); // Revert optimistic add. - - HandleBinaryOpSlowCases(masm, - ¬_smi, - Builtins::ADD, - Token::ADD, - mode_); + if (ShouldGenerateSmiCode()) { + ASSERT(kSmiTag == 0); // Adjust code below. + __ tst(smi_test_reg, Operand(kSmiTagMask)); + __ b(ne, ¬_smi); + __ add(r0, r1, Operand(r0), SetCC); // Add y optimistically. + // Return if no overflow. + __ Ret(vc); + __ sub(r0, r0, Operand(r1)); // Revert optimistic add. + } + HandleBinaryOpSlowCases(masm, ¬_smi, lhs, rhs, Builtins::ADD); break; } case Token::SUB: { Label not_smi; // Fast path. - ASSERT(kSmiTag == 0); // Adjust code below. - __ tst(r2, Operand(kSmiTagMask)); - __ b(ne, ¬_smi); - __ sub(r0, r1, Operand(r0), SetCC); // Subtract y optimistically. - // Return if no overflow. - __ Ret(vc); - __ sub(r0, r1, Operand(r0)); // Revert optimistic subtract. - - HandleBinaryOpSlowCases(masm, - ¬_smi, - Builtins::SUB, - Token::SUB, - mode_); + if (ShouldGenerateSmiCode()) { + ASSERT(kSmiTag == 0); // Adjust code below. + __ tst(smi_test_reg, Operand(kSmiTagMask)); + __ b(ne, ¬_smi); + if (lhs.is(r1)) { + __ sub(r0, r1, Operand(r0), SetCC); // Subtract y optimistically. + // Return if no overflow. + __ Ret(vc); + __ sub(r0, r1, Operand(r0)); // Revert optimistic subtract. + } else { + __ sub(r0, r0, Operand(r1), SetCC); // Subtract y optimistically. + // Return if no overflow. + __ Ret(vc); + __ add(r0, r0, Operand(r1)); // Revert optimistic subtract. + } + } + HandleBinaryOpSlowCases(masm, ¬_smi, lhs, rhs, Builtins::SUB); break; } case Token::MUL: { Label not_smi, slow; - ASSERT(kSmiTag == 0); // adjust code below - __ tst(r2, Operand(kSmiTagMask)); - __ b(ne, ¬_smi); - // Remove tag from one operand (but keep sign), so that result is Smi. - __ mov(ip, Operand(r0, ASR, kSmiTagSize)); - // Do multiplication - __ smull(r3, r2, r1, ip); // r3 = lower 32 bits of ip*r1. - // Go slow on overflows (overflow bit is not set). - __ mov(ip, Operand(r3, ASR, 31)); - __ cmp(ip, Operand(r2)); // no overflow if higher 33 bits are identical - __ b(ne, &slow); - // Go slow on zero result to handle -0. - __ tst(r3, Operand(r3)); - __ mov(r0, Operand(r3), LeaveCC, ne); - __ Ret(ne); - // We need -0 if we were multiplying a negative number with 0 to get 0. - // We know one of them was zero. - __ add(r2, r0, Operand(r1), SetCC); - __ mov(r0, Operand(Smi::FromInt(0)), LeaveCC, pl); - __ Ret(pl); // Return Smi 0 if the non-zero one was positive. - // Slow case. We fall through here if we multiplied a negative number - // with 0, because that would mean we should produce -0. - __ bind(&slow); - - HandleBinaryOpSlowCases(masm, - ¬_smi, - Builtins::MUL, - Token::MUL, - mode_); + if (ShouldGenerateSmiCode()) { + ASSERT(kSmiTag == 0); // adjust code below + __ tst(smi_test_reg, Operand(kSmiTagMask)); + Register scratch2 = smi_test_reg; + smi_test_reg = no_reg; + __ b(ne, ¬_smi); + // Remove tag from one operand (but keep sign), so that result is Smi. + __ mov(ip, Operand(rhs, ASR, kSmiTagSize)); + // Do multiplication + // scratch = lower 32 bits of ip * lhs. + __ smull(scratch, scratch2, lhs, ip); + // Go slow on overflows (overflow bit is not set). + __ mov(ip, Operand(scratch, ASR, 31)); + // No overflow if higher 33 bits are identical. + __ cmp(ip, Operand(scratch2)); + __ b(ne, &slow); + // Go slow on zero result to handle -0. + __ tst(scratch, Operand(scratch)); + __ mov(result, Operand(scratch), LeaveCC, ne); + __ Ret(ne); + // We need -0 if we were multiplying a negative number with 0 to get 0. + // We know one of them was zero. + __ add(scratch2, rhs, Operand(lhs), SetCC); + __ mov(result, Operand(Smi::FromInt(0)), LeaveCC, pl); + __ Ret(pl); // Return Smi 0 if the non-zero one was positive. + // Slow case. We fall through here if we multiplied a negative number + // with 0, because that would mean we should produce -0. + __ bind(&slow); + } + HandleBinaryOpSlowCases(masm, ¬_smi, lhs, rhs, Builtins::MUL); break; } case Token::DIV: case Token::MOD: { Label not_smi; - if (specialized_on_rhs_) { + if (ShouldGenerateSmiCode() && specialized_on_rhs_) { Label smi_is_unsuitable; - __ BranchOnNotSmi(r1, ¬_smi); + __ BranchOnNotSmi(lhs, ¬_smi); if (IsPowerOf2(constant_rhs_)) { if (op_ == Token::MOD) { - __ and_(r0, - r1, + __ and_(rhs, + lhs, Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)), SetCC); // We now have the answer, but if the input was negative we also // have the sign bit. Our work is done if the result is // positive or zero: + if (!rhs.is(r0)) { + __ mov(r0, rhs, LeaveCC, pl); + } __ Ret(pl); // A mod of a negative left hand side must return a negative number. // Unfortunately if the answer is 0 then we must return -0. And we - // already optimistically trashed r0 so we may need to restore it. - __ eor(r0, r0, Operand(0x80000000u), SetCC); + // already optimistically trashed rhs so we may need to restore it. + __ eor(rhs, rhs, Operand(0x80000000u), SetCC); // Next two instructions are conditional on the answer being -0. - __ mov(r0, Operand(Smi::FromInt(constant_rhs_)), LeaveCC, eq); + __ mov(rhs, Operand(Smi::FromInt(constant_rhs_)), LeaveCC, eq); __ b(eq, &smi_is_unsuitable); // We need to subtract the dividend. Eg. -3 % 4 == -3. - __ sub(r0, r0, Operand(Smi::FromInt(constant_rhs_))); + __ sub(result, rhs, Operand(Smi::FromInt(constant_rhs_))); } else { ASSERT(op_ == Token::DIV); - __ tst(r1, + __ tst(lhs, Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1))); __ b(ne, &smi_is_unsuitable); // Go slow on negative or remainder. int shift = 0; @@ -6122,12 +6634,12 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) { d >>= 1; shift++; } - __ mov(r0, Operand(r1, LSR, shift)); + __ mov(r0, Operand(lhs, LSR, shift)); __ bic(r0, r0, Operand(kSmiTagMask)); } } else { // Not a power of 2. - __ tst(r1, Operand(0x80000000u)); + __ tst(lhs, Operand(0x80000000u)); __ b(ne, &smi_is_unsuitable); // Find a fixed point reciprocal of the divisor so we can divide by // multiplying. @@ -6143,40 +6655,42 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) { shift++; } mul++; - __ mov(r2, Operand(mul)); - __ umull(r3, r2, r2, r1); - __ mov(r2, Operand(r2, LSR, shift - 31)); - // r2 is r1 / rhs. r2 is not Smi tagged. - // r0 is still the known rhs. r0 is Smi tagged. - // r1 is still the unkown lhs. r1 is Smi tagged. - int required_r4_shift = 0; // Including the Smi tag shift of 1. - // r4 = r2 * r0. + Register scratch2 = smi_test_reg; + smi_test_reg = no_reg; + __ mov(scratch2, Operand(mul)); + __ umull(scratch, scratch2, scratch2, lhs); + __ mov(scratch2, Operand(scratch2, LSR, shift - 31)); + // scratch2 is lhs / rhs. scratch2 is not Smi tagged. + // rhs is still the known rhs. rhs is Smi tagged. + // lhs is still the unkown lhs. lhs is Smi tagged. + int required_scratch_shift = 0; // Including the Smi tag shift of 1. + // scratch = scratch2 * rhs. MultiplyByKnownInt2(masm, - r4, - r2, - r0, + scratch, + scratch2, + rhs, constant_rhs_, - &required_r4_shift); - // r4 << required_r4_shift is now the Smi tagged rhs * (r1 / rhs). + &required_scratch_shift); + // scratch << required_scratch_shift is now the Smi tagged rhs * + // (lhs / rhs) where / indicates integer division. if (op_ == Token::DIV) { - __ sub(r3, r1, Operand(r4, LSL, required_r4_shift), SetCC); + __ cmp(lhs, Operand(scratch, LSL, required_scratch_shift)); __ b(ne, &smi_is_unsuitable); // There was a remainder. - __ mov(r0, Operand(r2, LSL, kSmiTagSize)); + __ mov(result, Operand(scratch2, LSL, kSmiTagSize)); } else { ASSERT(op_ == Token::MOD); - __ sub(r0, r1, Operand(r4, LSL, required_r4_shift)); + __ sub(result, lhs, Operand(scratch, LSL, required_scratch_shift)); } } __ Ret(); __ bind(&smi_is_unsuitable); - } else { - __ jmp(¬_smi); } - HandleBinaryOpSlowCases(masm, - ¬_smi, - op_ == Token::MOD ? Builtins::MOD : Builtins::DIV, - op_, - mode_); + HandleBinaryOpSlowCases( + masm, + ¬_smi, + lhs, + rhs, + op_ == Token::MOD ? Builtins::MOD : Builtins::DIV); break; } @@ -6188,47 +6702,49 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) { case Token::SHL: { Label slow; ASSERT(kSmiTag == 0); // adjust code below - __ tst(r2, Operand(kSmiTagMask)); + __ tst(smi_test_reg, Operand(kSmiTagMask)); __ b(ne, &slow); + Register scratch2 = smi_test_reg; + smi_test_reg = no_reg; switch (op_) { - case Token::BIT_OR: __ orr(r0, r0, Operand(r1)); break; - case Token::BIT_AND: __ and_(r0, r0, Operand(r1)); break; - case Token::BIT_XOR: __ eor(r0, r0, Operand(r1)); break; + case Token::BIT_OR: __ orr(result, rhs, Operand(lhs)); break; + case Token::BIT_AND: __ and_(result, rhs, Operand(lhs)); break; + case Token::BIT_XOR: __ eor(result, rhs, Operand(lhs)); break; case Token::SAR: // Remove tags from right operand. - __ GetLeastBitsFromSmi(r2, r0, 5); - __ mov(r0, Operand(r1, ASR, r2)); + __ GetLeastBitsFromSmi(scratch2, rhs, 5); + __ mov(result, Operand(lhs, ASR, scratch2)); // Smi tag result. - __ bic(r0, r0, Operand(kSmiTagMask)); + __ bic(result, result, Operand(kSmiTagMask)); break; case Token::SHR: // Remove tags from operands. We can't do this on a 31 bit number // because then the 0s get shifted into bit 30 instead of bit 31. - __ mov(r3, Operand(r1, ASR, kSmiTagSize)); // x - __ GetLeastBitsFromSmi(r2, r0, 5); - __ mov(r3, Operand(r3, LSR, r2)); + __ mov(scratch, Operand(lhs, ASR, kSmiTagSize)); // x + __ GetLeastBitsFromSmi(scratch2, rhs, 5); + __ mov(scratch, Operand(scratch, LSR, scratch2)); // Unsigned shift is not allowed to produce a negative number, so // check the sign bit and the sign bit after Smi tagging. - __ tst(r3, Operand(0xc0000000)); + __ tst(scratch, Operand(0xc0000000)); __ b(ne, &slow); // Smi tag result. - __ mov(r0, Operand(r3, LSL, kSmiTagSize)); + __ mov(result, Operand(scratch, LSL, kSmiTagSize)); break; case Token::SHL: // Remove tags from operands. - __ mov(r3, Operand(r1, ASR, kSmiTagSize)); // x - __ GetLeastBitsFromSmi(r2, r0, 5); - __ mov(r3, Operand(r3, LSL, r2)); + __ mov(scratch, Operand(lhs, ASR, kSmiTagSize)); // x + __ GetLeastBitsFromSmi(scratch2, rhs, 5); + __ mov(scratch, Operand(scratch, LSL, scratch2)); // Check that the signed result fits in a Smi. - __ add(r2, r3, Operand(0x40000000), SetCC); + __ add(scratch2, scratch, Operand(0x40000000), SetCC); __ b(mi, &slow); - __ mov(r0, Operand(r3, LSL, kSmiTagSize)); + __ mov(result, Operand(scratch, LSL, kSmiTagSize)); break; default: UNREACHABLE(); } __ Ret(); __ bind(&slow); - HandleNonSmiBitwiseOp(masm); + HandleNonSmiBitwiseOp(masm, lhs, rhs); break; } @@ -6236,11 +6752,52 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) { } // This code should be unreachable. __ stop("Unreachable"); + + // Generate an unreachable reference to the DEFAULT stub so that it can be + // found at the end of this stub when clearing ICs at GC. + // TODO(kaznacheev): Check performance impact and get rid of this. + if (runtime_operands_type_ != BinaryOpIC::DEFAULT) { + GenericBinaryOpStub uninit(MinorKey(), BinaryOpIC::DEFAULT); + __ CallStub(&uninit); + } +} + + +void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { + Label get_result; + + __ push(r1); + __ push(r0); + + // Internal frame is necessary to handle exceptions properly. + __ EnterInternalFrame(); + // Call the stub proper to get the result in r0. + __ Call(&get_result); + __ LeaveInternalFrame(); + + __ push(r0); + + __ mov(r0, Operand(Smi::FromInt(MinorKey()))); + __ push(r0); + __ mov(r0, Operand(Smi::FromInt(op_))); + __ push(r0); + __ mov(r0, Operand(Smi::FromInt(runtime_operands_type_))); + __ push(r0); + + __ TailCallExternalReference( + ExternalReference(IC_Utility(IC::kBinaryOp_Patch)), + 6, + 1); + + // The entry point for the result calculation is assumed to be immediately + // after this sequence. + __ bind(&get_result); } Handle GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) { - return Handle::null(); + GenericBinaryOpStub stub(key, type_info); + return stub.GetCode(); } @@ -6791,26 +7348,6 @@ void InstanceofStub::Generate(MacroAssembler* masm) { } -void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) { - // Check if the calling frame is an arguments adaptor frame. - Label adaptor; - __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); - __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset)); - __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); - __ b(eq, &adaptor); - - // Nothing to do: The formal number of parameters has already been - // passed in register r0 by calling function. Just return it. - __ Jump(lr); - - // Arguments adaptor case: Read the arguments length from the - // adaptor frame and return it. - __ bind(&adaptor); - __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset)); - __ Jump(lr); -} - - void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { // The displacement is the offset of the last parameter (if any) // relative to the frame pointer. @@ -7380,9 +7917,7 @@ void StringStubBase::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, // Scratch register contains result when we fall through to here. Register result = scratch; __ bind(&found_in_symbol_table); - if (!result.is(r0)) { - __ mov(r0, result); - } + __ Move(r0, result); } diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h index 0d1a3855975..74aed1d79e3 100644 --- a/deps/v8/src/arm/codegen-arm.h +++ b/deps/v8/src/arm/codegen-arm.h @@ -28,6 +28,8 @@ #ifndef V8_ARM_CODEGEN_ARM_H_ #define V8_ARM_CODEGEN_ARM_H_ +#include "ic-inl.h" + namespace v8 { namespace internal { @@ -90,10 +92,6 @@ class Reference BASE_EMBEDDED { // If the reference is not consumed, it is left in place under its value. void GetValue(); - // Generate code to pop a reference, push the value of the reference, - // and then spill the stack frame. - inline void GetValueAndSpill(); - // Generate code to store the value on top of the expression stack in the // reference. The reference is expected to be immediately below the value // on the expression stack. The value is stored in the location specified @@ -312,6 +310,9 @@ class CodeGenerator: public AstVisitor { void GenericBinaryOperation(Token::Value op, OverwriteMode overwrite_mode, int known_rhs = kUnknownIntValue); + void VirtualFrameBinaryOperation(Token::Value op, + OverwriteMode overwrite_mode, + int known_rhs = kUnknownIntValue); void Comparison(Condition cc, Expression* left, Expression* right, @@ -322,6 +323,11 @@ class CodeGenerator: public AstVisitor { bool reversed, OverwriteMode mode); + void VirtualFrameSmiOperation(Token::Value op, + Handle value, + bool reversed, + OverwriteMode mode); + void CallWithArguments(ZoneList* arguments, CallFunctionFlags flags, int position); @@ -387,7 +393,7 @@ class CodeGenerator: public AstVisitor { void GenerateLog(ZoneList* args); // Fast support for Math.random(). - void GenerateRandomPositiveSmi(ZoneList* args); + void GenerateRandomHeapNumber(ZoneList* args); // Fast support for StringAdd. void GenerateStringAdd(ZoneList* args); @@ -401,9 +407,14 @@ class CodeGenerator: public AstVisitor { // Support for direct calls from JavaScript to native RegExp code. void GenerateRegExpExec(ZoneList* args); + void GenerateRegExpConstructResult(ZoneList* args); + // Fast support for number to string. void GenerateNumberToString(ZoneList* args); + // Fast call for custom callbacks. + void GenerateCallFunction(ZoneList* args); + // Fast call to math functions. void GenerateMathPow(ZoneList* args); void GenerateMathSin(ZoneList* args); @@ -470,37 +481,68 @@ class GenericBinaryOpStub : public CodeStub { public: GenericBinaryOpStub(Token::Value op, OverwriteMode mode, + Register lhs, + Register rhs, int constant_rhs = CodeGenerator::kUnknownIntValue) : op_(op), mode_(mode), + lhs_(lhs), + rhs_(rhs), constant_rhs_(constant_rhs), specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)), + runtime_operands_type_(BinaryOpIC::DEFAULT), + name_(NULL) { } + + GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) + : op_(OpBits::decode(key)), + mode_(ModeBits::decode(key)), + lhs_(LhsRegister(RegisterBits::decode(key))), + rhs_(RhsRegister(RegisterBits::decode(key))), + constant_rhs_(KnownBitsForMinorKey(KnownIntBits::decode(key))), + specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op_, constant_rhs_)), + runtime_operands_type_(type_info), name_(NULL) { } private: Token::Value op_; OverwriteMode mode_; + Register lhs_; + Register rhs_; int constant_rhs_; bool specialized_on_rhs_; + BinaryOpIC::TypeInfo runtime_operands_type_; char* name_; static const int kMaxKnownRhs = 0x40000000; + static const int kKnownRhsKeyBits = 6; - // Minor key encoding in 16 bits. + // Minor key encoding in 17 bits. class ModeBits: public BitField {}; class OpBits: public BitField {}; - class KnownIntBits: public BitField {}; + class TypeInfoBits: public BitField {}; + class RegisterBits: public BitField {}; + class KnownIntBits: public BitField {}; Major MajorKey() { return GenericBinaryOp; } int MinorKey() { - // Encode the parameters in a unique 16 bit value. + ASSERT((lhs_.is(r0) && rhs_.is(r1)) || + (lhs_.is(r1) && rhs_.is(r0))); + // Encode the parameters in a unique 18 bit value. return OpBits::encode(op_) | ModeBits::encode(mode_) - | KnownIntBits::encode(MinorKeyForKnownInt()); + | KnownIntBits::encode(MinorKeyForKnownInt()) + | TypeInfoBits::encode(runtime_operands_type_) + | RegisterBits::encode(lhs_.is(r0)); } void Generate(MacroAssembler* masm); - void HandleNonSmiBitwiseOp(MacroAssembler* masm); + void HandleNonSmiBitwiseOp(MacroAssembler* masm, Register lhs, Register rhs); + void HandleBinaryOpSlowCases(MacroAssembler* masm, + Label* not_smi, + Register lhs, + Register rhs, + const Builtins::JavaScript& builtin); + void GenerateTypeTransition(MacroAssembler* masm); static bool RhsIsOneWeWantToOptimizeFor(Token::Value op, int constant_rhs) { if (constant_rhs == CodeGenerator::kUnknownIntValue) return false; @@ -524,9 +566,45 @@ class GenericBinaryOpStub : public CodeStub { key++; d >>= 1; } + ASSERT(key >= 0 && key < (1 << kKnownRhsKeyBits)); return key; } + int KnownBitsForMinorKey(int key) { + if (!key) return 0; + if (key <= 11) return key - 1; + int d = 1; + while (key != 12) { + key--; + d <<= 1; + } + return d; + } + + Register LhsRegister(bool lhs_is_r0) { + return lhs_is_r0 ? r0 : r1; + } + + Register RhsRegister(bool lhs_is_r0) { + return lhs_is_r0 ? r1 : r0; + } + + bool ShouldGenerateSmiCode() { + return ((op_ != Token::DIV && op_ != Token::MOD) || specialized_on_rhs_) && + runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS && + runtime_operands_type_ != BinaryOpIC::STRINGS; + } + + bool ShouldGenerateFPCode() { + return runtime_operands_type_ != BinaryOpIC::STRINGS; + } + + virtual int GetCodeKind() { return Code::BINARY_OP_IC; } + + virtual InlineCacheState GetICState() { + return BinaryOpIC::ToState(runtime_operands_type_); + } + const char* GetName(); #ifdef DEBUG diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h index 36d2fb67b62..5eed13ff9d8 100644 --- a/deps/v8/src/arm/constants-arm.h +++ b/deps/v8/src/arm/constants-arm.h @@ -151,24 +151,19 @@ enum Opcode { }; -// Some special instructions encoded as a TEQ with S=0 (bit 20). -enum Opcode9Bits { +// The bits for bit 7-4 for some type 0 miscellaneous instructions. +enum MiscInstructionsBits74 { + // With bits 22-21 01. BX = 1, BXJ = 2, BLX = 3, - BKPT = 7 -}; - + BKPT = 7, -// Some special instructions encoded as a CMN with S=0 (bit 20). -enum Opcode11Bits { + // With bits 22-21 11. CLZ = 1 }; -// S - - // Shifter types for Data-processing operands as defined in section A5.1.2. enum Shift { no_shift = -1, @@ -310,6 +305,12 @@ class Instr { // as well as multiplications). inline bool IsSpecialType0() const { return (Bit(7) == 1) && (Bit(4) == 1); } + // Test for miscellaneous instructions encodings of type 0 instructions. + inline bool IsMiscType0() const { return (Bit(24) == 1) + && (Bit(23) == 0) + && (Bit(20) == 0) + && ((Bit(7) == 0)); } + // Special accessors that test for existence of a value. inline bool HasS() const { return SField() == 1; } inline bool HasB() const { return BField() == 1; } diff --git a/deps/v8/src/arm/debug-arm.cc b/deps/v8/src/arm/debug-arm.cc index bc81b19d21a..1aca7cee44c 100644 --- a/deps/v8/src/arm/debug-arm.cc +++ b/deps/v8/src/arm/debug-arm.cc @@ -216,8 +216,23 @@ void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) { } +void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) { + masm->Abort("LiveEdit frame dropping is not supported on arm"); +} + +void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) { + masm->Abort("LiveEdit frame dropping is not supported on arm"); +} + #undef __ + +void Debug::SetUpFrameDropperFrame(StackFrame* bottom_js_frame, + Handle code) { + UNREACHABLE(); +} +const int Debug::kFrameDropperFrameSize = -1; + #endif // ENABLE_DEBUGGER_SUPPORT } } // namespace v8::internal diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc index 8e1776d98c4..a89c6b8d56d 100644 --- a/deps/v8/src/arm/disasm-arm.cc +++ b/deps/v8/src/arm/disasm-arm.cc @@ -449,6 +449,14 @@ int Decoder::FormatOption(Instr* instr, const char* format) { out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", instr->ShiftAmountField()); return 8; + } else if (format[3] == '0') { + // 'off0to3and8to19 16-bit immediate encoded in bits 19-8 and 3-0. + ASSERT(STRING_STARTS_WITH(format, "off0to3and8to19")); + out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_, + "%d", + (instr->Bits(19, 8) << 4) + + instr->Bits(3, 0)); + return 15; } // 'off8: 8-bit offset for extra load and store instructions ASSERT(STRING_STARTS_WITH(format, "off8")); @@ -650,6 +658,34 @@ void Decoder::DecodeType01(Instr* instr) { } return; } + } else if ((type == 0) && instr->IsMiscType0()) { + if (instr->Bits(22, 21) == 1) { + switch (instr->Bits(7, 4)) { + case BX: + Format(instr, "bx'cond 'rm"); + break; + case BLX: + Format(instr, "blx'cond 'rm"); + break; + case BKPT: + Format(instr, "bkpt 'off0to3and8to19"); + break; + default: + Unknown(instr); // not used by V8 + break; + } + } else if (instr->Bits(22, 21) == 3) { + switch (instr->Bits(7, 4)) { + case CLZ: + Format(instr, "clz'cond 'rd, 'rm"); + break; + default: + Unknown(instr); // not used by V8 + break; + } + } else { + Unknown(instr); // not used by V8 + } } else { switch (instr->OpcodeField()) { case AND: { @@ -696,17 +732,9 @@ void Decoder::DecodeType01(Instr* instr) { if (instr->HasS()) { Format(instr, "teq'cond 'rn, 'shift_op"); } else { - switch (instr->Bits(7, 4)) { - case BX: - Format(instr, "bx'cond 'rm"); - break; - case BLX: - Format(instr, "blx'cond 'rm"); - break; - default: - Unknown(instr); // not used by V8 - break; - } + // Other instructions matching this pattern are handled in the + // miscellaneous instructions part above. + UNREACHABLE(); } break; } @@ -722,14 +750,9 @@ void Decoder::DecodeType01(Instr* instr) { if (instr->HasS()) { Format(instr, "cmn'cond 'rn, 'shift_op"); } else { - switch (instr->Bits(7, 4)) { - case CLZ: - Format(instr, "clz'cond 'rd, 'rm"); - break; - default: - Unknown(instr); // not used by V8 - break; - } + // Other instructions matching this pattern are handled in the + // miscellaneous instructions part above. + UNREACHABLE(); } break; } diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc index cc9e70b8518..72f4128e6c7 100644 --- a/deps/v8/src/arm/full-codegen-arm.cc +++ b/deps/v8/src/arm/full-codegen-arm.cc @@ -1013,7 +1013,7 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) { void FullCodeGenerator::EmitBinaryOp(Token::Value op, Expression::Context context) { __ pop(r1); - GenericBinaryOpStub stub(op, NO_OVERWRITE); + GenericBinaryOpStub stub(op, NO_OVERWRITE, r1, r0); __ CallStub(&stub); Apply(context, r0); } @@ -1609,7 +1609,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { __ mov(r1, Operand(expr->op() == Token::INC ? Smi::FromInt(1) : Smi::FromInt(-1))); - GenericBinaryOpStub stub(Token::ADD, NO_OVERWRITE); + GenericBinaryOpStub stub(Token::ADD, NO_OVERWRITE, r1, r0); __ CallStub(&stub); __ bind(&done); diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc index cc7cab7e206..0ac42173ed4 100644 --- a/deps/v8/src/arm/ic-arm.cc +++ b/deps/v8/src/arm/ic-arm.cc @@ -706,6 +706,29 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) { // -- sp[4] : receiver // ----------------------------------- + Label miss, index_ok; + + // Get the key and receiver object from the stack. + __ ldm(ia, sp, r0.bit() | r1.bit()); + + // Check that the receiver isn't a smi. + __ BranchOnSmi(r1, &miss); + + // Check that the receiver is a string. + Condition is_string = masm->IsObjectStringType(r1, r2); + __ b(NegateCondition(is_string), &miss); + + // Check if key is a smi or a heap number. + __ BranchOnSmi(r0, &index_ok); + __ CheckMap(r0, r2, Factory::heap_number_map(), &miss, false); + + __ bind(&index_ok); + // Duplicate receiver and key since they are expected on the stack after + // the KeyedLoadIC call. + __ stm(db_w, sp, r0.bit() | r1.bit()); + __ InvokeBuiltin(Builtins::STRING_CHAR_AT, JUMP_JS); + + __ bind(&miss); GenerateGeneric(masm); } diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc index ac1c14fd96f..1131760db3c 100644 --- a/deps/v8/src/arm/macro-assembler-arm.cc +++ b/deps/v8/src/arm/macro-assembler-arm.cc @@ -180,6 +180,19 @@ void MacroAssembler::Drop(int count, Condition cond) { } +void MacroAssembler::Swap(Register reg1, Register reg2, Register scratch) { + if (scratch.is(no_reg)) { + eor(reg1, reg1, Operand(reg2)); + eor(reg2, reg2, Operand(reg1)); + eor(reg1, reg1, Operand(reg2)); + } else { + mov(scratch, reg1); + mov(reg1, reg2); + mov(reg2, scratch); + } +} + + void MacroAssembler::Call(Label* target) { bl(target); } @@ -190,6 +203,13 @@ void MacroAssembler::Move(Register dst, Handle value) { } +void MacroAssembler::Move(Register dst, Register src) { + if (!dst.is(src)) { + mov(dst, src); + } +} + + void MacroAssembler::SmiJumpTable(Register index, Vector targets) { // Empty the const pool. CheckConstPool(true, true); @@ -1537,6 +1557,45 @@ void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type, } +void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) { + int frameAlignment = OS::ActivationFrameAlignment(); + // Up to four simple arguments are passed in registers r0..r3. + int stack_passed_arguments = (num_arguments <= 4) ? 0 : num_arguments - 4; + if (frameAlignment > kPointerSize) { + // Make stack end at alignment and make room for num_arguments - 4 words + // and the original value of sp. + mov(scratch, sp); + sub(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize)); + ASSERT(IsPowerOf2(frameAlignment)); + and_(sp, sp, Operand(-frameAlignment)); + str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize)); + } else { + sub(sp, sp, Operand(stack_passed_arguments * kPointerSize)); + } +} + + +void MacroAssembler::CallCFunction(ExternalReference function, + int num_arguments) { + mov(ip, Operand(function)); + CallCFunction(ip, num_arguments); +} + + +void MacroAssembler::CallCFunction(Register function, int num_arguments) { + // Just call directly. The function called cannot cause a GC, or + // allow preemption, so the return address in the link register + // stays correct. + Call(function); + int stack_passed_arguments = (num_arguments <= 4) ? 0 : num_arguments - 4; + if (OS::ActivationFrameAlignment() > kPointerSize) { + ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize)); + } else { + add(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize))); + } +} + + #ifdef ENABLE_DEBUGGER_SUPPORT CodePatcher::CodePatcher(byte* address, int instructions) : address_(address), diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h index 1097bd9d14f..fa3a7ee65fc 100644 --- a/deps/v8/src/arm/macro-assembler-arm.h +++ b/deps/v8/src/arm/macro-assembler-arm.h @@ -70,8 +70,15 @@ class MacroAssembler: public Assembler { // from the stack, clobbering only the sp register. void Drop(int count, Condition cond = al); + + // Swap two registers. If the scratch register is omitted then a slightly + // less efficient form using xor instead of mov is emitted. + void Swap(Register reg1, Register reg2, Register scratch = no_reg); + void Call(Label* target); void Move(Register dst, Handle value); + // May do nothing if the registers are identical. + void Move(Register dst, Register src); // Jumps to the label at the index given by the Smi in "index". void SmiJumpTable(Register index, Vector targets); // Load an object from the root table. @@ -366,6 +373,24 @@ class MacroAssembler: public Assembler { int num_arguments, int result_size); + // Before calling a C-function from generated code, align arguments on stack. + // After aligning the frame, non-register arguments must be stored in + // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments + // are word sized. + // Some compilers/platforms require the stack to be aligned when calling + // C++ code. + // Needs a scratch register to do some arithmetic. This register will be + // trashed. + void PrepareCallCFunction(int num_arguments, Register scratch); + + // Calls a C function and cleans up the space for arguments allocated + // by PrepareCallCFunction. The called function is not allowed to trigger a + // garbage collection, since that might move the code and invalidate the + // return address (unless this is somehow accounted for by the called + // function). + void CallCFunction(ExternalReference function, int num_arguments); + void CallCFunction(Register function, int num_arguments); + // Jump to a runtime routine. void JumpToExternalReference(const ExternalReference& builtin); diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/arm/regexp-macro-assembler-arm.cc index 13d464d1766..beb6bf12ded 100644 --- a/deps/v8/src/arm/regexp-macro-assembler-arm.cc +++ b/deps/v8/src/arm/regexp-macro-assembler-arm.cc @@ -163,7 +163,7 @@ void RegExpMacroAssemblerARM::Backtrack() { CheckPreemption(); // Pop Code* offset from backtrack stack, add Code* and jump to location. Pop(r0); - __ add(pc, r0, Operand(r5)); + __ add(pc, r0, Operand(code_pointer())); } @@ -338,7 +338,7 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase( } else { ASSERT(mode_ == UC16); int argument_count = 3; - FrameAlign(argument_count, r2); + __ PrepareCallCFunction(argument_count, r2); // r0 - offset of start of capture // r1 - length of capture @@ -360,7 +360,7 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase( ExternalReference function = ExternalReference::re_case_insensitive_compare_uc16(); - CallCFunction(function, argument_count); + __ CallCFunction(function, argument_count); // Check if function returned non-zero for success or zero for failure. __ cmp(r0, Operand(0)); @@ -770,12 +770,12 @@ Handle RegExpMacroAssemblerARM::GetCode(Handle source) { // Call GrowStack(backtrack_stackpointer()) static const int num_arguments = 2; - FrameAlign(num_arguments, r0); + __ PrepareCallCFunction(num_arguments, r0); __ mov(r0, backtrack_stackpointer()); __ add(r1, frame_pointer(), Operand(kStackHighEnd)); ExternalReference grow_stack = ExternalReference::re_grow_stack(); - CallCFunction(grow_stack, num_arguments); + __ CallCFunction(grow_stack, num_arguments); // If return NULL, we have failed to grow the stack, and // must exit with a stack-overflow exception. __ cmp(r0, Operand(0)); @@ -800,7 +800,7 @@ Handle RegExpMacroAssemblerARM::GetCode(Handle source) { NULL, Code::ComputeFlags(Code::REGEXP), masm_->CodeObject()); - LOG(RegExpCodeCreateEvent(*code, *source)); + PROFILE(RegExpCodeCreateEvent(*code, *source)); return Handle::cast(code); } @@ -971,7 +971,7 @@ void RegExpMacroAssemblerARM::WriteStackPointerToRegister(int reg) { void RegExpMacroAssemblerARM::CallCheckStackGuardState(Register scratch) { static const int num_arguments = 3; - FrameAlign(num_arguments, scratch); + __ PrepareCallCFunction(num_arguments, scratch); // RegExp code frame pointer. __ mov(r2, frame_pointer()); // Code* of self. @@ -1183,47 +1183,12 @@ int RegExpMacroAssemblerARM::GetBacktrackConstantPoolEntry() { } -void RegExpMacroAssemblerARM::FrameAlign(int num_arguments, Register scratch) { - int frameAlignment = OS::ActivationFrameAlignment(); - // Up to four simple arguments are passed in registers r0..r3. - int stack_passed_arguments = (num_arguments <= 4) ? 0 : num_arguments - 4; - if (frameAlignment != 0) { - // Make stack end at alignment and make room for num_arguments - 4 words - // and the original value of sp. - __ mov(scratch, sp); - __ sub(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize)); - ASSERT(IsPowerOf2(frameAlignment)); - __ and_(sp, sp, Operand(-frameAlignment)); - __ str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize)); - } else { - __ sub(sp, sp, Operand(stack_passed_arguments * kPointerSize)); - } -} - - -void RegExpMacroAssemblerARM::CallCFunction(ExternalReference function, - int num_arguments) { - __ mov(r5, Operand(function)); - // Just call directly. The function called cannot cause a GC, or - // allow preemption, so the return address in the link register - // stays correct. - __ Call(r5); - int stack_passed_arguments = (num_arguments <= 4) ? 0 : num_arguments - 4; - if (OS::ActivationFrameAlignment() > kIntSize) { - __ ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize)); - } else { - __ add(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize))); - } - __ mov(code_pointer(), Operand(masm_->CodeObject())); -} - - void RegExpMacroAssemblerARM::CallCFunctionUsingStub( ExternalReference function, int num_arguments) { // Must pass all arguments in registers. The stub pushes on the stack. ASSERT(num_arguments <= 4); - __ mov(r5, Operand(function)); + __ mov(code_pointer(), Operand(function)); RegExpCEntryStub stub; __ CallStub(&stub); if (OS::ActivationFrameAlignment() != 0) { diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.h b/deps/v8/src/arm/regexp-macro-assembler-arm.h index 7de5f93d732..ef54388029e 100644 --- a/deps/v8/src/arm/regexp-macro-assembler-arm.h +++ b/deps/v8/src/arm/regexp-macro-assembler-arm.h @@ -206,22 +206,6 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler { // and increments it by a word size. inline void Pop(Register target); - // Before calling a C-function from generated code, align arguments on stack. - // After aligning the frame, non-register arguments must be stored in - // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments - // are word sized. - // Some compilers/platforms require the stack to be aligned when calling - // C++ code. - // Needs a scratch register to do some arithmetic. This register will be - // trashed. - inline void FrameAlign(int num_arguments, Register scratch); - - // Calls a C function and cleans up the space for arguments allocated - // by FrameAlign. The called function is not allowed to trigger a garbage - // collection. - inline void CallCFunction(ExternalReference function, - int num_arguments); - // Calls a C function and cleans up the frame alignment done by // by FrameAlign. The called function *is* allowed to trigger a garbage // collection, but may not take more than four arguments (no arguments diff --git a/deps/v8/src/arm/register-allocator-arm-inl.h b/deps/v8/src/arm/register-allocator-arm-inl.h index 4691f297436..945cdeb3cc1 100644 --- a/deps/v8/src/arm/register-allocator-arm-inl.h +++ b/deps/v8/src/arm/register-allocator-arm-inl.h @@ -92,9 +92,6 @@ Register RegisterAllocator::ToRegister(int num) { void RegisterAllocator::Initialize() { Reset(); - // The non-reserved r1 and lr registers are live on JS function entry. - Use(r1); // JS function. - Use(lr); // Return address. } diff --git a/deps/v8/src/arm/register-allocator-arm.h b/deps/v8/src/arm/register-allocator-arm.h index f953ed9f1d5..fdbc88f5dc8 100644 --- a/deps/v8/src/arm/register-allocator-arm.h +++ b/deps/v8/src/arm/register-allocator-arm.h @@ -33,7 +33,8 @@ namespace internal { class RegisterAllocatorConstants : public AllStatic { public: - static const int kNumRegisters = 12; + // No registers are currently managed by the register allocator on ARM. + static const int kNumRegisters = 0; static const int kInvalidRegister = -1; }; diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc index 49b4a5b4eef..b18fd79b38b 100644 --- a/deps/v8/src/arm/simulator-arm.cc +++ b/deps/v8/src/arm/simulator-arm.cc @@ -150,7 +150,11 @@ bool Debugger::GetValue(const char* desc, int32_t* value) { *value = GetRegisterValue(regnum); return true; } else { - return SScanF(desc, "%i", value) == 1; + if (strncmp(desc, "0x", 2) == 0) { + return SScanF(desc + 2, "%x", reinterpret_cast(value)) == 1; + } else { + return SScanF(desc, "%u", reinterpret_cast(value)) == 1; + } } return false; } @@ -231,6 +235,7 @@ void Debugger::Debug() { char cmd[COMMAND_SIZE + 1]; char arg1[ARG_SIZE + 1]; char arg2[ARG_SIZE + 1]; + char* argv[3] = { cmd, arg1, arg2 }; // make sure to have a proper terminating character if reaching the limit cmd[COMMAND_SIZE] = 0; @@ -258,7 +263,7 @@ void Debugger::Debug() { } else { // Use sscanf to parse the individual parts of the command line. At the // moment no command expects more than two parameters. - int args = SScanF(line, + int argc = SScanF(line, "%" XSTR(COMMAND_SIZE) "s " "%" XSTR(ARG_SIZE) "s " "%" XSTR(ARG_SIZE) "s", @@ -271,7 +276,7 @@ void Debugger::Debug() { // Leave the debugger shell. done = true; } else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) { - if (args == 2) { + if (argc == 2) { int32_t value; float svalue; double dvalue; @@ -296,7 +301,7 @@ void Debugger::Debug() { } } else if ((strcmp(cmd, "po") == 0) || (strcmp(cmd, "printobject") == 0)) { - if (args == 2) { + if (argc == 2) { int32_t value; if (GetValue(arg1, &value)) { Object* obj = reinterpret_cast(value); @@ -313,6 +318,37 @@ void Debugger::Debug() { } else { PrintF("printobject \n"); } + } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0) { + int32_t* cur = NULL; + int32_t* end = NULL; + int next_arg = 1; + + if (strcmp(cmd, "stack") == 0) { + cur = reinterpret_cast(sim_->get_register(Simulator::sp)); + } else { // "mem" + int32_t value; + if (!GetValue(arg1, &value)) { + PrintF("%s unrecognized\n", arg1); + continue; + } + cur = reinterpret_cast(value); + next_arg++; + } + + int32_t words; + if (argc == next_arg) { + words = 10; + } else if (argc == next_arg + 1) { + if (!GetValue(argv[next_arg], &words)) { + words = 10; + } + } + end = cur + words; + + while (cur < end) { + PrintF(" 0x%08x: 0x%08x %10d\n", cur, *cur, *cur); + cur++; + } } else if (strcmp(cmd, "disasm") == 0) { disasm::NameConverter converter; disasm::Disassembler dasm(converter); @@ -322,10 +358,10 @@ void Debugger::Debug() { byte* cur = NULL; byte* end = NULL; - if (args == 1) { + if (argc == 1) { cur = reinterpret_cast(sim_->get_pc()); end = cur + (10 * Instr::kInstrSize); - } else if (args == 2) { + } else if (argc == 2) { int32_t value; if (GetValue(arg1, &value)) { cur = reinterpret_cast(value); @@ -351,7 +387,7 @@ void Debugger::Debug() { v8::internal::OS::DebugBreak(); PrintF("regaining control from gdb\n"); } else if (strcmp(cmd, "break") == 0) { - if (args == 2) { + if (argc == 2) { int32_t value; if (GetValue(arg1, &value)) { if (!SetBreakpoint(reinterpret_cast(value))) { @@ -401,6 +437,10 @@ void Debugger::Debug() { PrintF(" print an object from a register (alias 'po')\n"); PrintF("flags\n"); PrintF(" print flags\n"); + PrintF("stack []\n"); + PrintF(" dump stack content, default dump 10 words)\n"); + PrintF("mem
[]\n"); + PrintF(" dump memory content, default dump 10 words)\n"); PrintF("disasm []\n"); PrintF("disasm [[
] ]\n"); PrintF(" disassemble code, default is 10 instructions from pc\n"); @@ -414,7 +454,7 @@ void Debugger::Debug() { PrintF(" ignore the stop instruction at the current location"); PrintF(" from now on\n"); PrintF("trace (alias 't')\n"); - PrintF(" toogle the tracing of all executed statements"); + PrintF(" toogle the tracing of all executed statements\n"); } else { PrintF("Unknown command: %s\n", cmd); } @@ -1465,6 +1505,50 @@ void Simulator::DecodeType01(Instr* instr) { } return; } + } else if ((type == 0) && instr->IsMiscType0()) { + if (instr->Bits(22, 21) == 1) { + int rm = instr->RmField(); + switch (instr->Bits(7, 4)) { + case BX: + set_pc(get_register(rm)); + break; + case BLX: { + uint32_t old_pc = get_pc(); + set_pc(get_register(rm)); + set_register(lr, old_pc + Instr::kInstrSize); + break; + } + case BKPT: + v8::internal::OS::DebugBreak(); + break; + default: + UNIMPLEMENTED(); + } + } else if (instr->Bits(22, 21) == 3) { + int rm = instr->RmField(); + int rd = instr->RdField(); + switch (instr->Bits(7, 4)) { + case CLZ: { + uint32_t bits = get_register(rm); + int leading_zeros = 0; + if (bits == 0) { + leading_zeros = 32; + } else { + while ((bits & 0x80000000u) == 0) { + bits <<= 1; + leading_zeros++; + } + } + set_register(rd, leading_zeros); + break; + } + default: + UNIMPLEMENTED(); + } + } else { + PrintF("%08x\n", instr->InstructionBits()); + UNIMPLEMENTED(); + } } else { int rd = instr->RdField(); int rn = instr->RnField(); @@ -1582,21 +1666,9 @@ void Simulator::DecodeType01(Instr* instr) { SetNZFlags(alu_out); SetCFlag(shifter_carry_out); } else { - ASSERT(type == 0); - int rm = instr->RmField(); - switch (instr->Bits(7, 4)) { - case BX: - set_pc(get_register(rm)); - break; - case BLX: { - uint32_t old_pc = get_pc(); - set_pc(get_register(rm)); - set_register(lr, old_pc + Instr::kInstrSize); - break; - } - default: - UNIMPLEMENTED(); - } + // Other instructions matching this pattern are handled in the + // miscellaneous instructions part above. + UNREACHABLE(); } break; } @@ -1624,27 +1696,9 @@ void Simulator::DecodeType01(Instr* instr) { SetCFlag(!CarryFrom(rn_val, shifter_operand)); SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, true)); } else { - ASSERT(type == 0); - int rm = instr->RmField(); - int rd = instr->RdField(); - switch (instr->Bits(7, 4)) { - case CLZ: { - uint32_t bits = get_register(rm); - int leading_zeros = 0; - if (bits == 0) { - leading_zeros = 32; - } else { - while ((bits & 0x80000000u) == 0) { - bits <<= 1; - leading_zeros++; - } - } - set_register(rd, leading_zeros); - break; - } - default: - UNIMPLEMENTED(); - } + // Other instructions matching this pattern are handled in the + // miscellaneous instructions part above. + UNREACHABLE(); } break; } @@ -1798,6 +1852,7 @@ void Simulator::DecodeType3(Instr* instr) { break; } case 3: { + // UBFX. if (instr->HasW() && (instr->Bits(6, 4) == 0x5)) { uint32_t widthminus1 = static_cast(instr->Bits(20, 16)); uint32_t lsbit = static_cast(instr->ShiftAmountField()); diff --git a/deps/v8/src/arm/virtual-frame-arm.cc b/deps/v8/src/arm/virtual-frame-arm.cc index 1e3a865658b..cf33e369452 100644 --- a/deps/v8/src/arm/virtual-frame-arm.cc +++ b/deps/v8/src/arm/virtual-frame-arm.cc @@ -37,34 +37,126 @@ namespace internal { #define __ ACCESS_MASM(masm()) -void VirtualFrame::SyncElementBelowStackPointer(int index) { - UNREACHABLE(); +void VirtualFrame::PopToR1R0() { + VirtualFrame where_to_go = *this; + // Shuffle things around so the top of stack is in r0 and r1. + where_to_go.top_of_stack_state_ = R0_R1_TOS; + MergeTo(&where_to_go); + // Pop the two registers off the stack so they are detached from the frame. + element_count_ -= 2; + top_of_stack_state_ = NO_TOS_REGISTERS; } -void VirtualFrame::SyncElementByPushing(int index) { - UNREACHABLE(); +void VirtualFrame::PopToR1() { + VirtualFrame where_to_go = *this; + // Shuffle things around so the top of stack is only in r1. + where_to_go.top_of_stack_state_ = R1_TOS; + MergeTo(&where_to_go); + // Pop the register off the stack so it is detached from the frame. + element_count_ -= 1; + top_of_stack_state_ = NO_TOS_REGISTERS; } -void VirtualFrame::MergeTo(VirtualFrame* expected) { - // ARM frames are currently always in memory. - ASSERT(Equals(expected)); -} - - -void VirtualFrame::MergeMoveRegistersToMemory(VirtualFrame* expected) { - UNREACHABLE(); -} - - -void VirtualFrame::MergeMoveRegistersToRegisters(VirtualFrame* expected) { - UNREACHABLE(); +void VirtualFrame::PopToR0() { + VirtualFrame where_to_go = *this; + // Shuffle things around so the top of stack only in r0. + where_to_go.top_of_stack_state_ = R0_TOS; + MergeTo(&where_to_go); + // Pop the register off the stack so it is detached from the frame. + element_count_ -= 1; + top_of_stack_state_ = NO_TOS_REGISTERS; } -void VirtualFrame::MergeMoveMemoryToRegisters(VirtualFrame* expected) { - UNREACHABLE(); +void VirtualFrame::MergeTo(VirtualFrame* expected) { + if (Equals(expected)) return; +#define CASE_NUMBER(a, b) ((a) * TOS_STATES + (b)) + switch (CASE_NUMBER(top_of_stack_state_, expected->top_of_stack_state_)) { + case CASE_NUMBER(NO_TOS_REGISTERS, NO_TOS_REGISTERS): + break; + case CASE_NUMBER(NO_TOS_REGISTERS, R0_TOS): + __ pop(r0); + break; + case CASE_NUMBER(NO_TOS_REGISTERS, R1_TOS): + __ pop(r1); + break; + case CASE_NUMBER(NO_TOS_REGISTERS, R0_R1_TOS): + __ pop(r0); + __ pop(r1); + break; + case CASE_NUMBER(NO_TOS_REGISTERS, R1_R0_TOS): + __ pop(r1); + __ pop(r1); + break; + case CASE_NUMBER(R0_TOS, NO_TOS_REGISTERS): + __ push(r0); + break; + case CASE_NUMBER(R0_TOS, R0_TOS): + break; + case CASE_NUMBER(R0_TOS, R1_TOS): + __ mov(r1, r0); + break; + case CASE_NUMBER(R0_TOS, R0_R1_TOS): + __ pop(r1); + break; + case CASE_NUMBER(R0_TOS, R1_R0_TOS): + __ mov(r1, r0); + __ pop(r0); + break; + case CASE_NUMBER(R1_TOS, NO_TOS_REGISTERS): + __ push(r1); + break; + case CASE_NUMBER(R1_TOS, R0_TOS): + __ mov(r0, r1); + break; + case CASE_NUMBER(R1_TOS, R1_TOS): + break; + case CASE_NUMBER(R1_TOS, R0_R1_TOS): + __ mov(r0, r1); + __ pop(r1); + break; + case CASE_NUMBER(R1_TOS, R1_R0_TOS): + __ pop(r0); + break; + case CASE_NUMBER(R0_R1_TOS, NO_TOS_REGISTERS): + __ push(r1); + __ push(r0); + break; + case CASE_NUMBER(R0_R1_TOS, R0_TOS): + __ push(r1); + break; + case CASE_NUMBER(R0_R1_TOS, R1_TOS): + __ push(r1); + __ mov(r1, r0); + break; + case CASE_NUMBER(R0_R1_TOS, R0_R1_TOS): + break; + case CASE_NUMBER(R0_R1_TOS, R1_R0_TOS): + __ Swap(r0, r1, ip); + break; + case CASE_NUMBER(R1_R0_TOS, NO_TOS_REGISTERS): + __ push(r0); + __ push(r1); + break; + case CASE_NUMBER(R1_R0_TOS, R0_TOS): + __ push(r0); + __ mov(r0, r1); + break; + case CASE_NUMBER(R1_R0_TOS, R1_TOS): + __ push(r0); + break; + case CASE_NUMBER(R1_R0_TOS, R0_R1_TOS): + __ Swap(r0, r1, ip); + break; + case CASE_NUMBER(R1_R0_TOS, R1_R0_TOS): + break; + default: + UNREACHABLE(); +#undef CASE_NUMBER + } + ASSERT(register_allocation_map_ == expected->register_allocation_map_); } @@ -92,8 +184,6 @@ void VirtualFrame::Enter() { __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit()); // Adjust FP to point to saved FP. __ add(fp, sp, Operand(2 * kPointerSize)); - cgen()->allocator()->Unuse(r1); - cgen()->allocator()->Unuse(lr); } @@ -152,37 +242,11 @@ void VirtualFrame::AllocateStackSlots() { -void VirtualFrame::SaveContextRegister() { - UNIMPLEMENTED(); -} - - -void VirtualFrame::RestoreContextRegister() { - UNIMPLEMENTED(); -} - - void VirtualFrame::PushReceiverSlotAddress() { UNIMPLEMENTED(); } -int VirtualFrame::InvalidateFrameSlotAt(int index) { - UNIMPLEMENTED(); - return kIllegalIndex; -} - - -void VirtualFrame::TakeFrameSlotAt(int index) { - UNIMPLEMENTED(); -} - - -void VirtualFrame::StoreToFrameSlotAt(int index) { - UNIMPLEMENTED(); -} - - void VirtualFrame::PushTryHandler(HandlerType type) { // Grow the expression stack by handler size less one (the return // address in lr is already counted by a call instruction). @@ -191,6 +255,20 @@ void VirtualFrame::PushTryHandler(HandlerType type) { } +void VirtualFrame::CallJSFunction(int arg_count) { + // InvokeFunction requires function in r1. + EmitPop(r1); + + // +1 for receiver. + Forget(arg_count + 1); + ASSERT(cgen()->HasValidEntryRegisters()); + ParameterCount count(arg_count); + __ InvokeFunction(r1, count, CALL_FUNCTION); + // Restore the context. + __ ldr(cp, Context()); +} + + void VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) { Forget(arg_count); ASSERT(cgen()->HasValidEntryRegisters()); @@ -247,52 +325,192 @@ void VirtualFrame::CallCodeObject(Handle code, } +// NO_TOS_REGISTERS, R0_TOS, R1_TOS, R1_R0_TOS, R0_R1_TOS. +const bool VirtualFrame::kR0InUse[TOS_STATES] = + { false, true, false, true, true }; +const bool VirtualFrame::kR1InUse[TOS_STATES] = + { false, false, true, true, true }; +const int VirtualFrame::kVirtualElements[TOS_STATES] = + { 0, 1, 1, 2, 2 }; +const Register VirtualFrame::kTopRegister[TOS_STATES] = + { r0, r0, r1, r1, r0 }; +const Register VirtualFrame::kBottomRegister[TOS_STATES] = + { r0, r0, r1, r0, r1 }; +const Register VirtualFrame::kAllocatedRegisters[ + VirtualFrame::kNumberOfAllocatedRegisters] = { r2, r3, r4, r5, r6 }; +// Popping is done by the transition implied by kStateAfterPop. Of course if +// there were no stack slots allocated to registers then the physical SP must +// be adjusted. +const VirtualFrame::TopOfStack VirtualFrame::kStateAfterPop[TOS_STATES] = + { NO_TOS_REGISTERS, NO_TOS_REGISTERS, NO_TOS_REGISTERS, R0_TOS, R1_TOS }; +// Pushing is done by the transition implied by kStateAfterPush. Of course if +// the maximum number of registers was already allocated to the top of stack +// slots then one register must be physically pushed onto the stack. +const VirtualFrame::TopOfStack VirtualFrame::kStateAfterPush[TOS_STATES] = + { R0_TOS, R1_R0_TOS, R0_R1_TOS, R0_R1_TOS, R1_R0_TOS }; + + +bool VirtualFrame::SpilledScope::is_spilled_ = false; + + void VirtualFrame::Drop(int count) { ASSERT(count >= 0); ASSERT(height() >= count); - int num_virtual_elements = (element_count() - 1) - stack_pointer_; - - // Emit code to lower the stack pointer if necessary. - if (num_virtual_elements < count) { - int num_dropped = count - num_virtual_elements; - stack_pointer_ -= num_dropped; - __ add(sp, sp, Operand(num_dropped * kPointerSize)); - } - // Discard elements from the virtual frame and free any registers. + int num_virtual_elements = kVirtualElements[top_of_stack_state_]; + while (num_virtual_elements > 0) { + Pop(); + num_virtual_elements--; + count--; + if (count == 0) return; + } + if (count == 0) return; + __ add(sp, sp, Operand(count * kPointerSize)); element_count_ -= count; } -Result VirtualFrame::Pop() { - UNIMPLEMENTED(); - return Result(); +void VirtualFrame::Pop() { + if (top_of_stack_state_ == NO_TOS_REGISTERS) { + __ add(sp, sp, Operand(kPointerSize)); + } else { + top_of_stack_state_ = kStateAfterPop[top_of_stack_state_]; + } + element_count_--; } void VirtualFrame::EmitPop(Register reg) { - ASSERT(stack_pointer_ == element_count() - 1); - stack_pointer_--; + ASSERT(!is_used(reg)); + if (top_of_stack_state_ == NO_TOS_REGISTERS) { + __ pop(reg); + } else { + __ mov(reg, kTopRegister[top_of_stack_state_]); + top_of_stack_state_ = kStateAfterPop[top_of_stack_state_]; + } + element_count_--; +} + + +Register VirtualFrame::Peek() { + AssertIsNotSpilled(); + if (top_of_stack_state_ == NO_TOS_REGISTERS) { + top_of_stack_state_ = kStateAfterPush[top_of_stack_state_]; + Register answer = kTopRegister[top_of_stack_state_]; + __ pop(answer); + return answer; + } else { + return kTopRegister[top_of_stack_state_]; + } +} + + +Register VirtualFrame::PopToRegister(Register but_not_to_this_one) { + ASSERT(but_not_to_this_one.is(r0) || + but_not_to_this_one.is(r1) || + but_not_to_this_one.is(no_reg)); + AssertIsNotSpilled(); element_count_--; - __ pop(reg); + if (top_of_stack_state_ == NO_TOS_REGISTERS) { + if (but_not_to_this_one.is(r0)) { + __ pop(r1); + return r1; + } else { + __ pop(r0); + return r0; + } + } else { + Register answer = kTopRegister[top_of_stack_state_]; + ASSERT(!answer.is(but_not_to_this_one)); + top_of_stack_state_ = kStateAfterPop[top_of_stack_state_]; + return answer; + } +} + + +void VirtualFrame::EnsureOneFreeTOSRegister() { + if (kVirtualElements[top_of_stack_state_] == kMaxTOSRegisters) { + __ push(kBottomRegister[top_of_stack_state_]); + top_of_stack_state_ = kStateAfterPush[top_of_stack_state_]; + top_of_stack_state_ = kStateAfterPop[top_of_stack_state_]; + } + ASSERT(kVirtualElements[top_of_stack_state_] != kMaxTOSRegisters); } void VirtualFrame::EmitPush(Register reg) { - ASSERT(stack_pointer_ == element_count() - 1); element_count_++; - stack_pointer_++; - __ push(reg); + if (SpilledScope::is_spilled()) { + __ push(reg); + return; + } + if (top_of_stack_state_ == NO_TOS_REGISTERS) { + if (reg.is(r0)) { + top_of_stack_state_ = R0_TOS; + return; + } + if (reg.is(r1)) { + top_of_stack_state_ = R1_TOS; + return; + } + } + EnsureOneFreeTOSRegister(); + top_of_stack_state_ = kStateAfterPush[top_of_stack_state_]; + Register dest = kTopRegister[top_of_stack_state_]; + __ Move(dest, reg); +} + + +Register VirtualFrame::GetTOSRegister() { + if (SpilledScope::is_spilled()) return r0; + + EnsureOneFreeTOSRegister(); + return kTopRegister[kStateAfterPush[top_of_stack_state_]]; +} + + +void VirtualFrame::EmitPush(MemOperand operand) { + element_count_++; + if (SpilledScope::is_spilled()) { + __ ldr(r0, operand); + __ push(r0); + return; + } + EnsureOneFreeTOSRegister(); + top_of_stack_state_ = kStateAfterPush[top_of_stack_state_]; + __ ldr(kTopRegister[top_of_stack_state_], operand); } void VirtualFrame::EmitPushMultiple(int count, int src_regs) { - ASSERT(stack_pointer_ == element_count() - 1); + ASSERT(SpilledScope::is_spilled()); Adjust(count); __ stm(db_w, sp, src_regs); } +void VirtualFrame::SpillAll() { + switch (top_of_stack_state_) { + case R1_R0_TOS: + masm()->push(r0); + // Fall through. + case R1_TOS: + masm()->push(r1); + top_of_stack_state_ = NO_TOS_REGISTERS; + break; + case R0_R1_TOS: + masm()->push(r1); + // Fall through. + case R0_TOS: + masm()->push(r0); + top_of_stack_state_ = NO_TOS_REGISTERS; + // Fall through. + case NO_TOS_REGISTERS: + break; + } + ASSERT(register_allocation_map_ == 0); // Not yet implemented. +} + #undef __ } } // namespace v8::internal diff --git a/deps/v8/src/arm/virtual-frame-arm.h b/deps/v8/src/arm/virtual-frame-arm.h index 6ba1eecc019..1350677b5e7 100644 --- a/deps/v8/src/arm/virtual-frame-arm.h +++ b/deps/v8/src/arm/virtual-frame-arm.h @@ -45,14 +45,69 @@ namespace internal { class VirtualFrame : public ZoneObject { public: + class RegisterAllocationScope; // A utility class to introduce a scope where the virtual frame is // expected to remain spilled. The constructor spills the code - // generator's current frame, but no attempt is made to require it - // to stay spilled. It is intended as documentation while the code - // generator is being transformed. + // generator's current frame, and keeps it spilled. class SpilledScope BASE_EMBEDDED { public: - SpilledScope() {} + explicit SpilledScope(VirtualFrame* frame) + : old_is_spilled_(is_spilled_) { + if (frame != NULL) { + if (!is_spilled_) { + frame->SpillAll(); + } else { + frame->AssertIsSpilled(); + } + } + is_spilled_ = true; + } + ~SpilledScope() { + is_spilled_ = old_is_spilled_; + } + static bool is_spilled() { return is_spilled_; } + + private: + static bool is_spilled_; + int old_is_spilled_; + + SpilledScope() { } + + friend class RegisterAllocationScope; + }; + + class RegisterAllocationScope BASE_EMBEDDED { + public: + // A utility class to introduce a scope where the virtual frame + // is not spilled, ie. where register allocation occurs. Eventually + // when RegisterAllocationScope is ubiquitous it can be removed + // along with the (by then unused) SpilledScope class. + explicit RegisterAllocationScope(CodeGenerator* cgen) + : cgen_(cgen), + old_is_spilled_(SpilledScope::is_spilled_) { + SpilledScope::is_spilled_ = false; + if (old_is_spilled_) { + VirtualFrame* frame = cgen->frame(); + if (frame != NULL) { + frame->AssertIsSpilled(); + } + } + } + ~RegisterAllocationScope() { + SpilledScope::is_spilled_ = old_is_spilled_; + if (old_is_spilled_) { + VirtualFrame* frame = cgen_->frame(); + if (frame != NULL) { + frame->SpillAll(); + } + } + } + + private: + CodeGenerator* cgen_; + bool old_is_spilled_; + + RegisterAllocationScope() { } }; // An illegal index into the virtual frame. @@ -75,27 +130,38 @@ class VirtualFrame : public ZoneObject { return element_count() - expression_base_index(); } - int register_location(int num) { - ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters); - return register_locations_[num]; - } - - int register_location(Register reg) { - return register_locations_[RegisterAllocator::ToNumber(reg)]; - } - - void set_register_location(Register reg, int index) { - register_locations_[RegisterAllocator::ToNumber(reg)] = index; - } - bool is_used(int num) { - ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters); - return register_locations_[num] != kIllegalIndex; + switch (num) { + case 0: { // r0. + return kR0InUse[top_of_stack_state_]; + } + case 1: { // r1. + return kR1InUse[top_of_stack_state_]; + } + case 2: + case 3: + case 4: + case 5: + case 6: { // r2 to r6. + ASSERT(num - kFirstAllocatedRegister < kNumberOfAllocatedRegisters); + ASSERT(num >= kFirstAllocatedRegister); + if ((register_allocation_map_ & + (1 << (num - kFirstAllocatedRegister))) == 0) { + return false; + } else { + return true; + } + } + default: { + ASSERT(num < kFirstAllocatedRegister || + num >= kFirstAllocatedRegister + kNumberOfAllocatedRegisters); + return false; + } + } } bool is_used(Register reg) { - return register_locations_[RegisterAllocator::ToNumber(reg)] - != kIllegalIndex; + return is_used(RegisterAllocator::ToNumber(reg)); } // Add extra in-memory elements to the top of the frame to match an actual @@ -104,39 +170,35 @@ class VirtualFrame : public ZoneObject { void Adjust(int count); // Forget elements from the top of the frame to match an actual frame (eg, - // the frame after a runtime call). No code is emitted. + // the frame after a runtime call). No code is emitted except to bring the + // frame to a spilled state. void Forget(int count) { - ASSERT(count >= 0); - ASSERT(stack_pointer_ == element_count() - 1); - stack_pointer_ -= count; - // On ARM, all elements are in memory, so there is no extra bookkeeping - // (registers, copies, etc.) beyond dropping the elements. + SpillAll(); element_count_ -= count; } - // Forget count elements from the top of the frame and adjust the stack - // pointer downward. This is used, for example, before merging frames at - // break, continue, and return targets. - void ForgetElements(int count); - // Spill all values from the frame to memory. - inline void SpillAll(); + void SpillAll(); + + void AssertIsSpilled() { + ASSERT(top_of_stack_state_ == NO_TOS_REGISTERS); + ASSERT(register_allocation_map_ == 0); + } + + void AssertIsNotSpilled() { + ASSERT(!SpilledScope::is_spilled()); + } // Spill all occurrences of a specific register from the frame. void Spill(Register reg) { - if (is_used(reg)) SpillElementAt(register_location(reg)); + UNIMPLEMENTED(); } // Spill all occurrences of an arbitrary register if possible. Return the // register spilled or no_reg if it was not possible to free any register - // (ie, they all have frame-external references). + // (ie, they all have frame-external references). Unimplemented. Register SpillAnyRegister(); - // Prepare this virtual frame for merging to an expected frame by - // performing some state changes that do not require generating - // code. It is guaranteed that no code will be generated. - void PrepareMergeTo(VirtualFrame* expected); - // Make this virtual frame have a state identical to an expected virtual // frame. As a side effect, code may be emitted to make this frame match // the expected one. @@ -147,10 +209,7 @@ class VirtualFrame : public ZoneObject { // registers. Used when the code generator's frame is switched from this // one to NULL by an unconditional jump. void DetachFromCodeGenerator() { - RegisterAllocator* cgen_allocator = cgen()->allocator(); - for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) { - if (is_used(i)) cgen_allocator->Unuse(i); - } + AssertIsSpilled(); } // (Re)attach a frame to its code generator. This informs the register @@ -158,10 +217,7 @@ class VirtualFrame : public ZoneObject { // Used when a code generator's frame is switched from NULL to this one by // binding a label. void AttachToCodeGenerator() { - RegisterAllocator* cgen_allocator = cgen()->allocator(); - for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) { - if (is_used(i)) cgen_allocator->Unuse(i); - } + AssertIsSpilled(); } // Emit code for the physical JS entry and exit frame sequences. After @@ -184,23 +240,17 @@ class VirtualFrame : public ZoneObject { void AllocateStackSlots(); // The current top of the expression stack as an assembly operand. - MemOperand Top() { return MemOperand(sp, 0); } + MemOperand Top() { + AssertIsSpilled(); + return MemOperand(sp, 0); + } // An element of the expression stack as an assembly operand. MemOperand ElementAt(int index) { + AssertIsSpilled(); return MemOperand(sp, index * kPointerSize); } - // Random-access store to a frame-top relative frame element. The result - // becomes owned by the frame and is invalidated. - void SetElementAt(int index, Result* value); - - // Set a frame element to a constant. The index is frame-top relative. - void SetElementAt(int index, Handle value) { - Result temp(value); - SetElementAt(index, &temp); - } - // A frame-allocated local as an assembly operand. MemOperand LocalAt(int index) { ASSERT(0 <= index); @@ -208,13 +258,6 @@ class VirtualFrame : public ZoneObject { return MemOperand(fp, kLocal0Offset - index * kPointerSize); } - // Push the value of a local frame slot on top of the frame and invalidate - // the local slot. The slot should be written to before trying to read - // from it again. - void TakeLocalAt(int index) { - TakeFrameSlotAt(local0_index() + index); - } - // Push the address of the receiver slot on the frame. void PushReceiverSlotAddress(); @@ -224,13 +267,6 @@ class VirtualFrame : public ZoneObject { // The context frame slot. MemOperand Context() { return MemOperand(fp, kContextOffset); } - // Save the value of the esi register to the context frame slot. - void SaveContextRegister(); - - // Restore the esi register from the value of the context frame - // slot. - void RestoreContextRegister(); - // A parameter as an assembly operand. MemOperand ParameterAt(int index) { // Index -1 corresponds to the receiver. @@ -239,19 +275,6 @@ class VirtualFrame : public ZoneObject { return MemOperand(fp, (1 + parameter_count() - index) * kPointerSize); } - // Push the value of a paramter frame slot on top of the frame and - // invalidate the parameter slot. The slot should be written to before - // trying to read from it again. - void TakeParameterAt(int index) { - TakeFrameSlotAt(param0_index() + index); - } - - // Store the top value on the virtual frame into a parameter frame slot. - // The value is left in place on top of the frame. - void StoreToParameterAt(int index) { - StoreToFrameSlotAt(param0_index() + index); - } - // The receiver frame slot. MemOperand Receiver() { return ParameterAt(-1); } @@ -261,11 +284,15 @@ class VirtualFrame : public ZoneObject { // Call stub given the number of arguments it expects on (and // removes from) the stack. void CallStub(CodeStub* stub, int arg_count) { - Forget(arg_count); + if (arg_count != 0) Forget(arg_count); ASSERT(cgen()->HasValidEntryRegisters()); masm()->CallStub(stub); } + // Call JS function from top of the stack with arguments + // taken from the stack. + void CallJSFunction(int arg_count); + // Call runtime given the number of arguments expected on (and // removed from) the stack. void CallRuntime(Runtime::Function* f, int arg_count); @@ -296,34 +323,49 @@ class VirtualFrame : public ZoneObject { // Drop one element. void Drop() { Drop(1); } - // Pop an element from the top of the expression stack. Returns a - // Result, which may be a constant or a register. - Result Pop(); + // Pop an element from the top of the expression stack. Discards + // the result. + void Pop(); + + // Pop an element from the top of the expression stack. The register + // will be one normally used for the top of stack register allocation + // so you can't hold on to it if you push on the stack. + Register PopToRegister(Register but_not_to_this_one = no_reg); + + // Look at the top of the stack. The register returned is aliased and + // must be copied to a scratch register before modification. + Register Peek(); // Pop and save an element from the top of the expression stack and // emit a corresponding pop instruction. void EmitPop(Register reg); + // Takes the top two elements and puts them in r0 (top element) and r1 + // (second element). + void PopToR1R0(); + + // Takes the top element and puts it in r1. + void PopToR1(); + + // Takes the top element and puts it in r0. + void PopToR0(); + // Push an element on top of the expression stack and emit a // corresponding push instruction. void EmitPush(Register reg); + void EmitPush(MemOperand operand); + + // Get a register which is free and which must be immediately used to + // push on the top of the stack. + Register GetTOSRegister(); // Push multiple registers on the stack and the virtual frame // Register are selected by setting bit in src_regs and // are pushed in decreasing order: r15 .. r0. void EmitPushMultiple(int count, int src_regs); - // Push an element on the virtual frame. - inline void Push(Handle value); - inline void Push(Smi* value); - - // Nip removes zero or more elements from immediately below the top - // of the frame, leaving the previous top-of-frame value on top of - // the frame. Nip(k) is equivalent to x = Pop(), Drop(k), Push(x). - inline void Nip(int num_dropped); - - inline void SetTypeForLocalAt(int index, TypeInfo info); - inline void SetTypeForParamAt(int index, TypeInfo info); + static Register scratch0() { return r7; } + static Register scratch1() { return r9; } private: static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset; @@ -333,16 +375,40 @@ class VirtualFrame : public ZoneObject { static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize; static const int kPreallocatedElements = 5 + 8; // 8 expression stack slots. + // 5 states for the top of stack, which can be in memory or in r0 and r1. + enum TopOfStack { NO_TOS_REGISTERS, R0_TOS, R1_TOS, R1_R0_TOS, R0_R1_TOS, + TOS_STATES}; + static const int kMaxTOSRegisters = 2; + + static const bool kR0InUse[TOS_STATES]; + static const bool kR1InUse[TOS_STATES]; + static const int kVirtualElements[TOS_STATES]; + static const TopOfStack kStateAfterPop[TOS_STATES]; + static const TopOfStack kStateAfterPush[TOS_STATES]; + static const Register kTopRegister[TOS_STATES]; + static const Register kBottomRegister[TOS_STATES]; + + // We allocate up to 5 locals in registers. + static const int kNumberOfAllocatedRegisters = 5; + // r2 to r6 are allocated to locals. + static const int kFirstAllocatedRegister = 2; + + static const Register kAllocatedRegisters[kNumberOfAllocatedRegisters]; + + static Register AllocatedRegister(int r) { + ASSERT(r >= 0 && r < kNumberOfAllocatedRegisters); + return kAllocatedRegisters[r]; + } + // The number of elements on the stack frame. int element_count_; + TopOfStack top_of_stack_state_:3; + int register_allocation_map_:kNumberOfAllocatedRegisters; // The index of the element that is at the processor's stack pointer - // (the sp register). - int stack_pointer_; - - // The index of the register frame element using each register, or - // kIllegalIndex if a register is not on the frame. - int register_locations_[RegisterAllocator::kNumRegisters]; + // (the sp register). For now since everything is in memory it is given + // by the number of elements on the not-very-virtual stack frame. + int stack_pointer() { return element_count_ - 1; } // The number of frame-allocated locals and parameters respectively. int parameter_count() { return cgen()->scope()->num_parameters(); } @@ -380,80 +446,15 @@ class VirtualFrame : public ZoneObject { return (frame_pointer() - index) * kPointerSize; } - // Record an occurrence of a register in the virtual frame. This has the - // effect of incrementing the register's external reference count and - // of updating the index of the register's location in the frame. - void Use(Register reg, int index) { - ASSERT(!is_used(reg)); - set_register_location(reg, index); - cgen()->allocator()->Use(reg); - } - - // Record that a register reference has been dropped from the frame. This - // decrements the register's external reference count and invalidates the - // index of the register's location in the frame. - void Unuse(Register reg) { - ASSERT(is_used(reg)); - set_register_location(reg, kIllegalIndex); - cgen()->allocator()->Unuse(reg); - } - - // Spill the element at a particular index---write it to memory if - // necessary, free any associated register, and forget its value if - // constant. - void SpillElementAt(int index); - - // Sync the element at a particular index. If it is a register or - // constant that disagrees with the value on the stack, write it to memory. - // Keep the element type as register or constant, and clear the dirty bit. - void SyncElementAt(int index); - - // Sync a single unsynced element that lies beneath or at the stack pointer. - void SyncElementBelowStackPointer(int index); - - // Sync a single unsynced element that lies just above the stack pointer. - void SyncElementByPushing(int index); - - // Push a the value of a frame slot (typically a local or parameter) on - // top of the frame and invalidate the slot. - void TakeFrameSlotAt(int index); - - // Store the value on top of the frame to a frame slot (typically a local - // or parameter). - void StoreToFrameSlotAt(int index); - // Spill all elements in registers. Spill the top spilled_args elements // on the frame. Sync all other frame elements. // Then drop dropped_args elements from the virtual frame, to match // the effect of an upcoming call that will drop them from the stack. void PrepareForCall(int spilled_args, int dropped_args); - // Move frame elements currently in registers or constants, that - // should be in memory in the expected frame, to memory. - void MergeMoveRegistersToMemory(VirtualFrame* expected); - - // Make the register-to-register moves necessary to - // merge this frame with the expected frame. - // Register to memory moves must already have been made, - // and memory to register moves must follow this call. - // This is because some new memory-to-register moves are - // created in order to break cycles of register moves. - // Used in the implementation of MergeTo(). - void MergeMoveRegistersToRegisters(VirtualFrame* expected); - - // Make the memory-to-register and constant-to-register moves - // needed to make this frame equal the expected frame. - // Called after all register-to-memory and register-to-register - // moves have been made. After this function returns, the frames - // should be equal. - void MergeMoveMemoryToRegisters(VirtualFrame* expected); - - // Invalidates a frame slot (puts an invalid frame element in it). - // Copies on the frame are correctly handled, and if this slot was - // the backing store of copies, the index of the new backing store - // is returned. Otherwise, returns kIllegalIndex. - // Register counts are correctly updated. - int InvalidateFrameSlotAt(int index); + // If all top-of-stack registers are in use then the lowest one is pushed + // onto the physical stack and made free. + void EnsureOneFreeTOSRegister(); inline bool Equals(VirtualFrame* other); diff --git a/deps/v8/src/array.js b/deps/v8/src/array.js index a29015a5ef8..54d7e57b833 100644 --- a/deps/v8/src/array.js +++ b/deps/v8/src/array.js @@ -644,16 +644,62 @@ function ArraySort(comparefn) { // In-place QuickSort algorithm. // For short (length <= 22) arrays, insertion sort is used for efficiency. - var custom_compare = IS_FUNCTION(comparefn); + var global_receiver; + + function InsertionSortWithFunc(a, from, to) { + for (var i = from + 1; i < to; i++) { + var element = a[i]; + for (var j = i - 1; j >= from; j--) { + var tmp = a[j]; + var order = %_CallFunction(global_receiver, tmp, element, comparefn); + if (order > 0) { + a[j + 1] = tmp; + } else { + break; + } + } + a[j + 1] = element; + } + } + + function QuickSortWithFunc(a, from, to) { + // Insertion sort is faster for short arrays. + if (to - from <= 22) { + InsertionSortWithFunc(a, from, to); + return; + } + var pivot_index = $floor($random() * (to - from)) + from; + var pivot = a[pivot_index]; + // Issue 95: Keep the pivot element out of the comparisons to avoid + // infinite recursion if comparefn(pivot, pivot) != 0. + a[pivot_index] = a[from]; + a[from] = pivot; + var low_end = from; // Upper bound of the elements lower than pivot. + var high_start = to; // Lower bound of the elements greater than pivot. + // From low_end to i are elements equal to pivot. + // From i to high_start are elements that haven't been compared yet. + for (var i = from + 1; i < high_start; ) { + var element = a[i]; + var order = %_CallFunction(global_receiver, element, pivot, comparefn); + if (order < 0) { + a[i] = a[low_end]; + a[low_end] = element; + i++; + low_end++; + } else if (order > 0) { + high_start--; + a[i] = a[high_start]; + a[high_start] = element; + } else { // order == 0 + i++; + } + } + QuickSortWithFunc(a, from, low_end); + QuickSortWithFunc(a, high_start, to); + } function Compare(x,y) { - // Assume the comparefn, if any, is a consistent comparison function. - // If it isn't, we are allowed arbitrary behavior by ECMA 15.4.4.11. if (x === y) return 0; - if (custom_compare) { - // Don't call directly to avoid exposing the builtin's global object. - return comparefn.call(null, x, y); - } if (%_IsSmi(x) && %_IsSmi(y)) { return %SmiLexicographicCompare(x, y); } @@ -666,33 +712,17 @@ function ArraySort(comparefn) { function InsertionSort(a, from, to) { for (var i = from + 1; i < to; i++) { var element = a[i]; - // Pre-convert the element to a string for comparison if we know - // it will happen on each compare anyway. - var key = - (custom_compare || %_IsSmi(element)) ? element : ToString(element); - // place element in a[from..i[ - // binary search - var min = from; - var max = i; - // The search interval is a[min..max[ - while (min < max) { - var mid = min + ((max - min) >> 1); - var order = Compare(a[mid], key); - if (order == 0) { - min = max = mid; - break; - } - if (order < 0) { - min = mid + 1; + var key = %_IsSmi(element) ? element : ToString(element); + for (var j = i - 1; j >= from; j--) { + var tmp = a[j]; + var order = Compare(tmp, key); + if (order > 0) { + a[j + 1] = tmp; } else { - max = mid; + break; } } - // place element at position min==max. - for (var j = i; j > min; j--) { - a[j] = a[j - 1]; - } - a[min] = element; + a[j + 1] = element; } } @@ -706,8 +736,7 @@ function ArraySort(comparefn) { var pivot = a[pivot_index]; // Pre-convert the element to a string for comparison if we know // it will happen on each compare anyway. - var pivot_key = - (custom_compare || %_IsSmi(pivot)) ? pivot : ToString(pivot); + var pivot_key = %_IsSmi(pivot) ? pivot : ToString(pivot); // Issue 95: Keep the pivot element out of the comparisons to avoid // infinite recursion if comparefn(pivot, pivot) != 0. a[pivot_index] = a[from]; @@ -736,8 +765,6 @@ function ArraySort(comparefn) { QuickSort(a, high_start, to); } - var length; - // Copies elements in the range 0..length from obj's prototype chain // to obj itself, if obj has holes. Returns one more than the maximal index // of a prototype property. @@ -855,7 +882,7 @@ function ArraySort(comparefn) { return first_undefined; } - length = TO_UINT32(this.length); + var length = TO_UINT32(this.length); if (length < 2) return this; var is_array = IS_ARRAY(this); @@ -880,7 +907,12 @@ function ArraySort(comparefn) { num_non_undefined = SafeRemoveArrayHoles(this); } - QuickSort(this, 0, num_non_undefined); + if(IS_FUNCTION(comparefn)) { + global_receiver = %GetGlobalReceiver(); + QuickSortWithFunc(this, 0, num_non_undefined); + } else { + QuickSort(this, 0, num_non_undefined); + } if (!is_array && (num_non_undefined + 1 < max_prototype_element)) { // For compatibility with JSC, we shadow any elements in the prototype @@ -1150,7 +1182,7 @@ function SetupArray() { "reduce", getFunction("reduce", ArrayReduce, 1), "reduceRight", getFunction("reduceRight", ArrayReduceRight, 1) )); - + %FinishArrayPrototypeSetup($Array.prototype); } diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc index bb010c829f4..ac27a656ec7 100644 --- a/deps/v8/src/assembler.cc +++ b/deps/v8/src/assembler.cc @@ -574,8 +574,14 @@ ExternalReference ExternalReference::perform_gc_function() { } -ExternalReference ExternalReference::random_positive_smi_function() { - return ExternalReference(Redirect(FUNCTION_ADDR(V8::RandomPositiveSmi))); +ExternalReference ExternalReference::fill_heap_number_with_random_function() { + return + ExternalReference(Redirect(FUNCTION_ADDR(V8::FillHeapNumberWithRandom))); +} + + +ExternalReference ExternalReference::random_uint32_function() { + return ExternalReference(Redirect(FUNCTION_ADDR(V8::Random))); } diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h index b4834e53f85..31ac44c8411 100644 --- a/deps/v8/src/assembler.h +++ b/deps/v8/src/assembler.h @@ -398,7 +398,8 @@ class ExternalReference BASE_EMBEDDED { // ExternalReferenceTable in serialize.cc manually. static ExternalReference perform_gc_function(); - static ExternalReference random_positive_smi_function(); + static ExternalReference fill_heap_number_with_random_function(); + static ExternalReference random_uint32_function(); static ExternalReference transcendental_cache_array_address(); // Static data in the keyed lookup cache. diff --git a/deps/v8/src/ast.cc b/deps/v8/src/ast.cc index 9204a840c84..75b2945d9cb 100644 --- a/deps/v8/src/ast.cc +++ b/deps/v8/src/ast.cc @@ -47,11 +47,8 @@ Call Call::sentinel_(NULL, NULL, 0); // ---------------------------------------------------------------------------- // All the Accept member functions for each syntax tree node type. -#define DECL_ACCEPT(type) \ - void type::Accept(AstVisitor* v) { \ - if (v->CheckStackOverflow()) return; \ - v->Visit##type(this); \ - } +#define DECL_ACCEPT(type) \ + void type::Accept(AstVisitor* v) { v->Visit##type(this); } AST_NODE_LIST(DECL_ACCEPT) #undef DECL_ACCEPT @@ -241,6 +238,13 @@ bool Expression::GuaranteedSmiResult() { // ---------------------------------------------------------------------------- // Implementation of AstVisitor +bool AstVisitor::CheckStackOverflow() { + if (stack_overflow_) return true; + StackLimitCheck check; + if (!check.HasOverflowed()) return false; + return (stack_overflow_ = true); +} + void AstVisitor::VisitDeclarations(ZoneList* declarations) { for (int i = 0; i < declarations->length(); i++) { @@ -749,117 +753,6 @@ bool CompareOperation::IsCritical() { } -static inline void MarkIfNotLive(Expression* expr, List* stack) { - if (!expr->is_live()) { - expr->mark_as_live(); - stack->Add(expr); - } -} - - -// Overloaded functions for marking children of live code as live. -void VariableProxy::ProcessNonLiveChildren( - List* stack, - ZoneList* body_definitions, - int variable_count) { - // A reference to a stack-allocated variable depends on all the - // definitions reaching it. - BitVector* defs = reaching_definitions(); - if (defs != NULL) { - ASSERT(var()->IsStackAllocated()); - // The first variable_count definitions are the initial parameter and - // local declarations. - for (int i = variable_count; i < defs->length(); i++) { - if (defs->Contains(i)) { - MarkIfNotLive(body_definitions->at(i - variable_count), stack); - } - } - } -} - - -void Literal::ProcessNonLiveChildren(List* stack, - ZoneList* body_definitions, - int variable_count) { - // Leaf node, no children. -} - - -void Assignment::ProcessNonLiveChildren( - List* stack, - ZoneList* body_definitions, - int variable_count) { - Property* prop = target()->AsProperty(); - VariableProxy* proxy = target()->AsVariableProxy(); - - if (prop != NULL) { - if (!prop->key()->IsPropertyName()) MarkIfNotLive(prop->key(), stack); - MarkIfNotLive(prop->obj(), stack); - } else if (proxy == NULL) { - // Must be a reference error. - ASSERT(!target()->IsValidLeftHandSide()); - MarkIfNotLive(target(), stack); - } else if (is_compound()) { - // A variable assignment so lhs is an operand to the operation. - MarkIfNotLive(target(), stack); - } - MarkIfNotLive(value(), stack); -} - - -void Property::ProcessNonLiveChildren(List* stack, - ZoneList* body_definitions, - int variable_count) { - if (!key()->IsPropertyName()) MarkIfNotLive(key(), stack); - MarkIfNotLive(obj(), stack); -} - - -void Call::ProcessNonLiveChildren(List* stack, - ZoneList* body_definitions, - int variable_count) { - ZoneList* args = arguments(); - for (int i = args->length() - 1; i >= 0; i--) { - MarkIfNotLive(args->at(i), stack); - } - MarkIfNotLive(expression(), stack); -} - - -void UnaryOperation::ProcessNonLiveChildren( - List* stack, - ZoneList* body_definitions, - int variable_count) { - MarkIfNotLive(expression(), stack); -} - - -void CountOperation::ProcessNonLiveChildren( - List* stack, - ZoneList* body_definitions, - int variable_count) { - MarkIfNotLive(expression(), stack); -} - - -void BinaryOperation::ProcessNonLiveChildren( - List* stack, - ZoneList* body_definitions, - int variable_count) { - MarkIfNotLive(right(), stack); - MarkIfNotLive(left(), stack); -} - - -void CompareOperation::ProcessNonLiveChildren( - List* stack, - ZoneList* body_definitions, - int variable_count) { - MarkIfNotLive(right(), stack); - MarkIfNotLive(left(), stack); -} - - // Implementation of a copy visitor. The visitor create a deep copy // of ast nodes. Nodes that do not require a deep copy are copied // with the default copy constructor. @@ -963,13 +856,11 @@ UnaryOperation::UnaryOperation(UnaryOperation* other, Expression* expression) : Expression(other), op_(other->op_), expression_(expression) {} -BinaryOperation::BinaryOperation(BinaryOperation* other, +BinaryOperation::BinaryOperation(Expression* other, + Token::Value op, Expression* left, Expression* right) - : Expression(other), - op_(other->op_), - left_(left), - right_(right) {} + : Expression(other), op_(op), left_(left), right_(right) {} CountOperation::CountOperation(CountOperation* other, Expression* expression) @@ -1221,6 +1112,7 @@ void CopyAstVisitor::VisitCountOperation(CountOperation* expr) { void CopyAstVisitor::VisitBinaryOperation(BinaryOperation* expr) { expr_ = new BinaryOperation(expr, + expr->op(), DeepCopyExpr(expr->left()), DeepCopyExpr(expr->right())); } diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h index fa85eee6fdf..dfc08ee0714 100644 --- a/deps/v8/src/ast.h +++ b/deps/v8/src/ast.h @@ -294,19 +294,6 @@ class Expression: public AstNode { bitfields_ |= NumBitOpsField::encode(num_bit_ops); } - // Functions used for dead-code elimination. Predicate is true if the - // expression is not dead code. - int is_live() const { return LiveField::decode(bitfields_); } - void mark_as_live() { bitfields_ |= LiveField::encode(true); } - - // Mark non-live children as live and push them on a stack for further - // processing. - virtual void ProcessNonLiveChildren( - List* stack, - ZoneList* body_definitions, - int variable_count) { - } - private: static const int kMaxNumBitOps = (1 << 5) - 1; @@ -319,7 +306,6 @@ class Expression: public AstNode { class ToInt32Field : public BitField {}; class NumBitOpsField : public BitField {}; class LoopConditionField: public BitField {}; - class LiveField: public BitField {}; }; @@ -907,10 +893,6 @@ class Literal: public Expression { virtual bool IsTrivial() { return true; } virtual bool IsPrimitive(); virtual bool IsCritical(); - virtual void ProcessNonLiveChildren( - List* stack, - ZoneList* body_definitions, - int variable_count); // Identity testers. bool IsNull() const { return handle_.is_identical_to(Factory::null_value()); } @@ -1118,10 +1100,6 @@ class VariableProxy: public Expression { virtual bool IsPrimitive(); virtual bool IsCritical(); - virtual void ProcessNonLiveChildren( - List* stack, - ZoneList* body_definitions, - int variable_count); void SetIsPrimitive(bool value) { is_primitive_ = value; } @@ -1260,10 +1238,6 @@ class Property: public Expression { virtual bool IsPrimitive(); virtual bool IsCritical(); - virtual void ProcessNonLiveChildren( - List* stack, - ZoneList* body_definitions, - int variable_count); Expression* obj() const { return obj_; } Expression* key() const { return key_; } @@ -1299,10 +1273,6 @@ class Call: public Expression { virtual bool IsPrimitive(); virtual bool IsCritical(); - virtual void ProcessNonLiveChildren( - List* stack, - ZoneList* body_definitions, - int variable_count); Expression* expression() const { return expression_; } ZoneList* arguments() const { return arguments_; } @@ -1382,10 +1352,6 @@ class UnaryOperation: public Expression { virtual bool IsPrimitive(); virtual bool IsCritical(); - virtual void ProcessNonLiveChildren( - List* stack, - ZoneList* body_definitions, - int variable_count); Token::Value op() const { return op_; } Expression* expression() const { return expression_; } @@ -1403,7 +1369,13 @@ class BinaryOperation: public Expression { ASSERT(Token::IsBinaryOp(op)); } - BinaryOperation(BinaryOperation* other, Expression* left, Expression* right); + // Construct a binary operation with a given operator and left and right + // subexpressions. The rest of the expression state is copied from + // another expression. + BinaryOperation(Expression* other, + Token::Value op, + Expression* left, + Expression* right); virtual void Accept(AstVisitor* v); @@ -1412,10 +1384,6 @@ class BinaryOperation: public Expression { virtual bool IsPrimitive(); virtual bool IsCritical(); - virtual void ProcessNonLiveChildren( - List* stack, - ZoneList* body_definitions, - int variable_count); // True iff the result can be safely overwritten (to avoid allocation). // False for operations that can return one of their operands. @@ -1473,10 +1441,6 @@ class CountOperation: public Expression { virtual bool IsPrimitive(); virtual bool IsCritical(); - virtual void ProcessNonLiveChildren( - List* stack, - ZoneList* body_definitions, - int variable_count); bool is_prefix() const { return is_prefix_; } bool is_postfix() const { return !is_prefix_; } @@ -1510,10 +1474,6 @@ class CompareOperation: public Expression { virtual bool IsPrimitive(); virtual bool IsCritical(); - virtual void ProcessNonLiveChildren( - List* stack, - ZoneList* body_definitions, - int variable_count); Token::Value op() const { return op_; } Expression* left() const { return left_; } @@ -1568,10 +1528,6 @@ class Assignment: public Expression { virtual bool IsPrimitive(); virtual bool IsCritical(); - virtual void ProcessNonLiveChildren( - List* stack, - ZoneList* body_definitions, - int variable_count); Assignment* AsSimpleAssignment() { return !is_compound() ? this : NULL; } @@ -2110,29 +2066,23 @@ class AstVisitor BASE_EMBEDDED { AstVisitor() : stack_overflow_(false) { } virtual ~AstVisitor() { } - // Dispatch - void Visit(AstNode* node) { node->Accept(this); } + // Stack overflow check and dynamic dispatch. + void Visit(AstNode* node) { if (!CheckStackOverflow()) node->Accept(this); } - // Iteration + // Iteration left-to-right. virtual void VisitDeclarations(ZoneList* declarations); virtual void VisitStatements(ZoneList* statements); virtual void VisitExpressions(ZoneList* expressions); // Stack overflow tracking support. bool HasStackOverflow() const { return stack_overflow_; } - bool CheckStackOverflow() { - if (stack_overflow_) return true; - StackLimitCheck check; - if (!check.HasOverflowed()) return false; - return (stack_overflow_ = true); - } + bool CheckStackOverflow(); // If a stack-overflow exception is encountered when visiting a // node, calling SetStackOverflow will make sure that the visitor // bails out without visiting more nodes. void SetStackOverflow() { stack_overflow_ = true; } - // Individual nodes #define DEF_VISIT(type) \ virtual void Visit##type(type* node) = 0; diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc index 8a9fa4bf697..d88c8e7f0c3 100644 --- a/deps/v8/src/bootstrapper.cc +++ b/deps/v8/src/bootstrapper.cc @@ -723,8 +723,68 @@ void Genesis::InitializeGlobal(Handle inner_global, InstallFunction(global, "RegExp", JS_REGEXP_TYPE, JSRegExp::kSize, Top::initial_object_prototype(), Builtins::Illegal, true); - global_context()->set_regexp_function(*regexp_fun); + + ASSERT(regexp_fun->has_initial_map()); + Handle initial_map(regexp_fun->initial_map()); + + ASSERT_EQ(0, initial_map->inobject_properties()); + + Handle descriptors = Factory::NewDescriptorArray(5); + PropertyAttributes final = + static_cast(DONT_ENUM | DONT_DELETE | READ_ONLY); + int enum_index = 0; + { + // ECMA-262, section 15.10.7.1. + FieldDescriptor field(Heap::source_symbol(), + JSRegExp::kSourceFieldIndex, + final, + enum_index++); + descriptors->Set(0, &field); + } + { + // ECMA-262, section 15.10.7.2. + FieldDescriptor field(Heap::global_symbol(), + JSRegExp::kGlobalFieldIndex, + final, + enum_index++); + descriptors->Set(1, &field); + } + { + // ECMA-262, section 15.10.7.3. + FieldDescriptor field(Heap::ignore_case_symbol(), + JSRegExp::kIgnoreCaseFieldIndex, + final, + enum_index++); + descriptors->Set(2, &field); + } + { + // ECMA-262, section 15.10.7.4. + FieldDescriptor field(Heap::multiline_symbol(), + JSRegExp::kMultilineFieldIndex, + final, + enum_index++); + descriptors->Set(3, &field); + } + { + // ECMA-262, section 15.10.7.5. + PropertyAttributes writable = + static_cast(DONT_ENUM | DONT_DELETE); + FieldDescriptor field(Heap::last_index_symbol(), + JSRegExp::kLastIndexFieldIndex, + writable, + enum_index++); + descriptors->Set(4, &field); + } + descriptors->SetNextEnumerationIndex(enum_index); + descriptors->Sort(); + + initial_map->set_inobject_properties(5); + initial_map->set_pre_allocated_property_fields(5); + initial_map->set_unused_property_fields(0); + initial_map->set_instance_size( + initial_map->instance_size() + 5 * kPointerSize); + initial_map->set_instance_descriptors(*descriptors); } { // -- J S O N @@ -1177,6 +1237,62 @@ bool Genesis::InstallNatives() { apply->shared()->set_length(2); } + // Create a constructor for RegExp results (a variant of Array that + // predefines the two properties index and match). + { + // RegExpResult initial map. + + // Find global.Array.prototype to inherit from. + Handle array_constructor(global_context()->array_function()); + Handle array_prototype( + JSObject::cast(array_constructor->instance_prototype())); + + // Add initial map. + Handle initial_map = + Factory::NewMap(JS_ARRAY_TYPE, JSRegExpResult::kSize); + initial_map->set_constructor(*array_constructor); + + // Set prototype on map. + initial_map->set_non_instance_prototype(false); + initial_map->set_prototype(*array_prototype); + + // Update map with length accessor from Array and add "index" and "input". + Handle array_map(global_context()->js_array_map()); + Handle array_descriptors( + array_map->instance_descriptors()); + ASSERT_EQ(1, array_descriptors->number_of_descriptors()); + + Handle reresult_descriptors = + Factory::NewDescriptorArray(3); + + reresult_descriptors->CopyFrom(0, *array_descriptors, 0); + + int enum_index = 0; + { + FieldDescriptor index_field(Heap::index_symbol(), + JSRegExpResult::kIndexIndex, + NONE, + enum_index++); + reresult_descriptors->Set(1, &index_field); + } + + { + FieldDescriptor input_field(Heap::input_symbol(), + JSRegExpResult::kInputIndex, + NONE, + enum_index++); + reresult_descriptors->Set(2, &input_field); + } + reresult_descriptors->Sort(); + + initial_map->set_inobject_properties(2); + initial_map->set_pre_allocated_property_fields(2); + initial_map->set_unused_property_fields(0); + initial_map->set_instance_descriptors(*reresult_descriptors); + + global_context()->set_regexp_result_map(*initial_map); + } + #ifdef DEBUG builtins->Verify(); #endif diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc index feb912f4147..767820acb8c 100644 --- a/deps/v8/src/builtins.cc +++ b/deps/v8/src/builtins.cc @@ -268,9 +268,10 @@ static void CopyElements(AssertNoAllocation* no_gc, int src_index, int len) { ASSERT(dst != src); // Use MoveElements instead. - memcpy(dst->data_start() + dst_index, - src->data_start() + src_index, - len * kPointerSize); + ASSERT(len > 0); + CopyWords(dst->data_start() + dst_index, + src->data_start() + src_index, + len); WriteBarrierMode mode = dst->GetWriteBarrierMode(*no_gc); if (mode == UPDATE_WRITE_BARRIER) { Heap::RecordWrites(dst->address(), dst->OffsetOfElementAt(dst_index), len); @@ -299,6 +300,73 @@ static void FillWithHoles(FixedArray* dst, int from, int to) { } +static FixedArray* LeftTrimFixedArray(FixedArray* elms) { + // For now this trick is only applied to fixed arrays in new space. + // In large object space the object's start must coincide with chunk + // and thus the trick is just not applicable. + // In old space we do not use this trick to avoid dealing with + // remembered sets. + ASSERT(Heap::new_space()->Contains(elms)); + + STATIC_ASSERT(FixedArray::kMapOffset == 0); + STATIC_ASSERT(FixedArray::kLengthOffset == kPointerSize); + STATIC_ASSERT(FixedArray::kHeaderSize == 2 * kPointerSize); + + Object** former_start = HeapObject::RawField(elms, 0); + + const int len = elms->length(); + + // Technically in new space this write might be omitted (except for + // debug mode which iterates through the heap), but to play safer + // we still do it. + former_start[0] = Heap::raw_unchecked_one_pointer_filler_map(); + + former_start[1] = Heap::fixed_array_map(); + former_start[2] = reinterpret_cast(len - 1); + + ASSERT_EQ(elms->address() + kPointerSize, (elms + kPointerSize)->address()); + return elms + kPointerSize; +} + + +static FixedArray* LeftTrimFixedArray(FixedArray* elms, int to_trim) { + // For now this trick is only applied to fixed arrays in new space. + // In large object space the object's start must coincide with chunk + // and thus the trick is just not applicable. + // In old space we do not use this trick to avoid dealing with + // remembered sets. + ASSERT(Heap::new_space()->Contains(elms)); + + STATIC_ASSERT(FixedArray::kMapOffset == 0); + STATIC_ASSERT(FixedArray::kLengthOffset == kPointerSize); + STATIC_ASSERT(FixedArray::kHeaderSize == 2 * kPointerSize); + + Object** former_start = HeapObject::RawField(elms, 0); + + const int len = elms->length(); + + // Technically in new space this write might be omitted (except for + // debug mode which iterates through the heap), but to play safer + // we still do it. + if (to_trim == 1) { + former_start[0] = Heap::raw_unchecked_one_pointer_filler_map(); + } else if (to_trim == 2) { + former_start[0] = Heap::raw_unchecked_two_pointer_filler_map(); + } else { + former_start[0] = Heap::raw_unchecked_byte_array_map(); + ByteArray* as_byte_array = reinterpret_cast(elms); + as_byte_array->set_length(ByteArray::LengthFor(to_trim * kPointerSize)); + } + + former_start[to_trim] = Heap::fixed_array_map(); + former_start[to_trim + 1] = reinterpret_cast(len - to_trim); + + ASSERT_EQ(elms->address() + to_trim * kPointerSize, + (elms + to_trim * kPointerSize)->address()); + return elms + to_trim * kPointerSize; +} + + static bool ArrayPrototypeHasNoElements() { // This method depends on non writability of Object and Array prototype // fields. @@ -390,7 +458,9 @@ BUILTIN(ArrayPush) { FixedArray* new_elms = FixedArray::cast(obj); AssertNoAllocation no_gc; - CopyElements(&no_gc, new_elms, 0, elms, 0, len); + if (len > 0) { + CopyElements(&no_gc, new_elms, 0, elms, 0, len); + } FillWithHoles(new_elms, new_length, capacity); elms = new_elms; @@ -443,38 +513,6 @@ BUILTIN(ArrayPop) { } -static FixedArray* LeftTrimFixedArray(FixedArray* elms) { - // For now this trick is only applied to fixed arrays in new space. - // In large object space the object's start must coincide with chunk - // and thus the trick is just not applicable. - // In old space we do not use this trick to avoid dealing with - // remembered sets. - ASSERT(Heap::new_space()->Contains(elms)); - - Object** former_map = - HeapObject::RawField(elms, FixedArray::kMapOffset); - Object** former_length = - HeapObject::RawField(elms, FixedArray::kLengthOffset); - Object** former_first = - HeapObject::RawField(elms, FixedArray::kHeaderSize); - // Check that we don't forget to copy all the bits. - STATIC_ASSERT(FixedArray::kMapOffset + 2 * kPointerSize - == FixedArray::kHeaderSize); - - int len = elms->length(); - - *former_first = reinterpret_cast(len - 1); - *former_length = Heap::fixed_array_map(); - // Technically in new space this write might be omitted (except for - // debug mode which iterates through the heap), but to play safer - // we still do it. - *former_map = Heap::raw_unchecked_one_pointer_filler_map(); - - ASSERT(elms->address() + kPointerSize == (elms + kPointerSize)->address()); - return elms + kPointerSize; -} - - BUILTIN(ArrayShift) { Object* receiver = *args.receiver(); FixedArray* elms = NULL; @@ -537,7 +575,9 @@ BUILTIN(ArrayUnshift) { FixedArray* new_elms = FixedArray::cast(obj); AssertNoAllocation no_gc; - CopyElements(&no_gc, new_elms, to_add, elms, 0, len); + if (len > 0) { + CopyElements(&no_gc, new_elms, to_add, elms, 0, len); + } FillWithHoles(new_elms, new_length, capacity); elms = new_elms; @@ -713,12 +753,27 @@ BUILTIN(ArraySplice) { if (item_count < actual_delete_count) { // Shrink the array. - AssertNoAllocation no_gc; - MoveElements(&no_gc, - elms, actual_start + item_count, - elms, actual_start + actual_delete_count, - (len - actual_delete_count - actual_start)); - FillWithHoles(elms, new_length, len); + const bool trim_array = Heap::new_space()->Contains(elms) && + ((actual_start + item_count) < + (len - actual_delete_count - actual_start)); + if (trim_array) { + const int delta = actual_delete_count - item_count; + + if (actual_start > 0) { + Object** start = elms->data_start(); + memmove(start + delta, start, actual_start * kPointerSize); + } + + elms = LeftTrimFixedArray(elms, delta); + array->set_elements(elms, SKIP_WRITE_BARRIER); + } else { + AssertNoAllocation no_gc; + MoveElements(&no_gc, + elms, actual_start + item_count, + elms, actual_start + actual_delete_count, + (len - actual_delete_count - actual_start)); + FillWithHoles(elms, new_length, len); + } } else if (item_count > actual_delete_count) { // Currently fixed arrays cannot grow too big, so // we should never hit this case. @@ -734,11 +789,16 @@ BUILTIN(ArraySplice) { AssertNoAllocation no_gc; // Copy the part before actual_start as is. - CopyElements(&no_gc, new_elms, 0, elms, 0, actual_start); - CopyElements(&no_gc, - new_elms, actual_start + item_count, - elms, actual_start + actual_delete_count, - (len - actual_delete_count - actual_start)); + if (actual_start > 0) { + CopyElements(&no_gc, new_elms, 0, elms, 0, actual_start); + } + const int to_copy = len - actual_delete_count - actual_start; + if (to_copy > 0) { + CopyElements(&no_gc, + new_elms, actual_start + item_count, + elms, actual_start + actual_delete_count, + to_copy); + } FillWithHoles(new_elms, new_length, capacity); elms = new_elms; @@ -812,10 +872,12 @@ BUILTIN(ArrayConcat) { int start_pos = 0; for (int i = 0; i < n_arguments; i++) { JSArray* array = JSArray::cast(args[i]); - FixedArray* elms = FixedArray::cast(array->elements()); int len = Smi::cast(array->length())->value(); - CopyElements(&no_gc, result_elms, start_pos, elms, 0, len); - start_pos += len; + if (len > 0) { + FixedArray* elms = FixedArray::cast(array->elements()); + CopyElements(&no_gc, result_elms, start_pos, elms, 0, len); + start_pos += len; + } } ASSERT(start_pos == result_len); @@ -1330,6 +1392,14 @@ static void Generate_Return_DebugBreak(MacroAssembler* masm) { static void Generate_StubNoRegisters_DebugBreak(MacroAssembler* masm) { Debug::GenerateStubNoRegistersDebugBreak(masm); } + +static void Generate_PlainReturn_LiveEdit(MacroAssembler* masm) { + Debug::GeneratePlainReturnLiveEdit(masm); +} + +static void Generate_FrameDropper_LiveEdit(MacroAssembler* masm) { + Debug::GenerateFrameDropperLiveEdit(masm); +} #endif Object* Builtins::builtins_[builtin_count] = { NULL, }; @@ -1431,8 +1501,8 @@ void Builtins::Setup(bool create_heap_objects) { } } // Log the event and add the code to the builtins array. - LOG(CodeCreateEvent(Logger::BUILTIN_TAG, - Code::cast(code), functions[i].s_name)); + PROFILE(CodeCreateEvent(Logger::BUILTIN_TAG, + Code::cast(code), functions[i].s_name)); builtins_[i] = code; #ifdef ENABLE_DISASSEMBLER if (FLAG_print_builtin_code) { diff --git a/deps/v8/src/builtins.h b/deps/v8/src/builtins.h index 1378c5455c6..ccb6c0c5338 100644 --- a/deps/v8/src/builtins.h +++ b/deps/v8/src/builtins.h @@ -126,7 +126,9 @@ enum BuiltinExtraArguments { V(LoadIC_DebugBreak, LOAD_IC, DEBUG_BREAK) \ V(KeyedLoadIC_DebugBreak, KEYED_LOAD_IC, DEBUG_BREAK) \ V(StoreIC_DebugBreak, STORE_IC, DEBUG_BREAK) \ - V(KeyedStoreIC_DebugBreak, KEYED_STORE_IC, DEBUG_BREAK) + V(KeyedStoreIC_DebugBreak, KEYED_STORE_IC, DEBUG_BREAK) \ + V(PlainReturn_LiveEdit, BUILTIN, DEBUG_BREAK) \ + V(FrameDropper_LiveEdit, BUILTIN, DEBUG_BREAK) #else #define BUILTIN_LIST_DEBUG_A(V) #endif diff --git a/deps/v8/src/circular-queue-inl.h b/deps/v8/src/circular-queue-inl.h index 962b069fb0f..90ab0f5c9bc 100644 --- a/deps/v8/src/circular-queue-inl.h +++ b/deps/v8/src/circular-queue-inl.h @@ -38,7 +38,8 @@ template CircularQueue::CircularQueue(int desired_buffer_size_in_bytes) : buffer_(NewArray(desired_buffer_size_in_bytes / sizeof(Record))), buffer_end_(buffer_ + desired_buffer_size_in_bytes / sizeof(Record)), - enqueue_semaphore_(OS::CreateSemaphore((buffer_end_ - buffer_) - 1)), + enqueue_semaphore_( + OS::CreateSemaphore(static_cast(buffer_end_ - buffer_) - 1)), enqueue_pos_(buffer_), dequeue_pos_(buffer_) { // To be able to distinguish between a full and an empty queue diff --git a/deps/v8/src/circular-queue.cc b/deps/v8/src/circular-queue.cc index a7c25323e82..af650de5e79 100644 --- a/deps/v8/src/circular-queue.cc +++ b/deps/v8/src/circular-queue.cc @@ -58,8 +58,10 @@ SamplingCircularQueue::SamplingCircularQueue(int record_size_in_bytes, // updates of positions by different processor cores. const int positions_size = RoundUp(1, kProcessorCacheLineSize) + - RoundUp(sizeof(ProducerPosition), kProcessorCacheLineSize) + - RoundUp(sizeof(ConsumerPosition), kProcessorCacheLineSize); + RoundUp(static_cast(sizeof(ProducerPosition)), + kProcessorCacheLineSize) + + RoundUp(static_cast(sizeof(ConsumerPosition)), + kProcessorCacheLineSize); positions_ = NewArray(positions_size); producer_pos_ = reinterpret_cast( diff --git a/deps/v8/src/circular-queue.h b/deps/v8/src/circular-queue.h index dce7fc2ad94..486f10748fd 100644 --- a/deps/v8/src/circular-queue.h +++ b/deps/v8/src/circular-queue.h @@ -119,6 +119,8 @@ class SamplingCircularQueue { byte* positions_; ProducerPosition* producer_pos_; ConsumerPosition* consumer_pos_; + + DISALLOW_COPY_AND_ASSIGN(SamplingCircularQueue); }; diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc index ea748980168..9d5969bb46e 100644 --- a/deps/v8/src/code-stubs.cc +++ b/deps/v8/src/code-stubs.cc @@ -64,7 +64,7 @@ void CodeStub::RecordCodeGeneration(Code* code, MacroAssembler* masm) { OPROFILE(CreateNativeCodeRegion(GetName(), code->instruction_start(), code->instruction_size())); - LOG(CodeCreateEvent(Logger::STUB_TAG, code, GetName())); + PROFILE(CodeCreateEvent(Logger::STUB_TAG, code, GetName())); Counters::total_stubs_code_size.Increment(code->instruction_size()); #ifdef ENABLE_DISASSEMBLER diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc index 56d8f4bb9b8..5bbf050ccab 100644 --- a/deps/v8/src/codegen.cc +++ b/deps/v8/src/codegen.cc @@ -235,7 +235,7 @@ Handle CodeGenerator::MakeCode(CompilationInfo* info) { bool CodeGenerator::ShouldGenerateLog(Expression* type) { ASSERT(type != NULL); - if (!Logger::is_logging()) return false; + if (!Logger::is_logging() && !CpuProfiler::is_profiling()) return false; Handle name = Handle::cast(type->AsLiteral()->handle()); if (FLAG_log_regexp) { static Vector kRegexp = CStrVector("regexp"); @@ -454,7 +454,6 @@ const char* GenericUnaryOpStub::GetName() { void ArgumentsAccessStub::Generate(MacroAssembler* masm) { switch (type_) { - case READ_LENGTH: GenerateReadLength(masm); break; case READ_ELEMENT: GenerateReadElement(masm); break; case NEW_OBJECT: GenerateNewObject(masm); break; } diff --git a/deps/v8/src/codegen.h b/deps/v8/src/codegen.h index 0dfea8da495..d56d4eee20d 100644 --- a/deps/v8/src/codegen.h +++ b/deps/v8/src/codegen.h @@ -104,6 +104,7 @@ namespace internal { F(IsNonNegativeSmi, 1, 1) \ F(IsArray, 1, 1) \ F(IsRegExp, 1, 1) \ + F(CallFunction, -1 /* receiver + n args + function */, 1) \ F(IsConstructCall, 0, 1) \ F(ArgumentsLength, 0, 1) \ F(Arguments, 1, 1) \ @@ -114,7 +115,7 @@ namespace internal { F(CharFromCode, 1, 1) \ F(ObjectEquals, 2, 1) \ F(Log, 3, 1) \ - F(RandomPositiveSmi, 0, 1) \ + F(RandomHeapNumber, 0, 1) \ F(IsObject, 1, 1) \ F(IsFunction, 1, 1) \ F(IsUndetectableObject, 1, 1) \ @@ -122,6 +123,7 @@ namespace internal { F(SubString, 3, 1) \ F(StringCompare, 2, 1) \ F(RegExpExec, 4, 1) \ + F(RegExpConstructResult, 3, 1) \ F(NumberToString, 1, 1) \ F(MathPow, 2, 1) \ F(MathSin, 1, 1) \ @@ -229,7 +231,12 @@ class DeferredCode: public ZoneObject { Label entry_label_; Label exit_label_; - int registers_[RegisterAllocator::kNumRegisters]; + // C++ doesn't allow zero length arrays, so we make the array length 1 even + // if we don't need it. + static const int kRegistersArrayLength = + (RegisterAllocator::kNumRegisters == 0) ? + 1 : RegisterAllocator::kNumRegisters; + int registers_[kRegistersArrayLength]; #ifdef DEBUG const char* comment_; @@ -498,7 +505,6 @@ class JSConstructEntryStub : public JSEntryStub { class ArgumentsAccessStub: public CodeStub { public: enum Type { - READ_LENGTH, READ_ELEMENT, NEW_OBJECT }; @@ -512,7 +518,6 @@ class ArgumentsAccessStub: public CodeStub { int MinorKey() { return type_; } void Generate(MacroAssembler* masm); - void GenerateReadLength(MacroAssembler* masm); void GenerateReadElement(MacroAssembler* masm); void GenerateNewObject(MacroAssembler* masm); diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc index c9dd1079658..aa80a029a75 100755 --- a/deps/v8/src/compiler.cc +++ b/deps/v8/src/compiler.cc @@ -90,33 +90,13 @@ static Handle MakeCode(Handle context, CompilationInfo* info) { } if (FLAG_use_flow_graph) { - int variable_count = - function->num_parameters() + function->scope()->num_stack_slots(); - FlowGraphBuilder builder(variable_count); - builder.Build(function); - - if (!builder.HasStackOverflow()) { - if (variable_count > 0) { - ReachingDefinitions rd(builder.postorder(), - builder.body_definitions(), - variable_count); - rd.Compute(); - - TypeAnalyzer ta(builder.postorder(), - builder.body_definitions(), - variable_count, - function->num_parameters()); - ta.Compute(); - - MarkLiveCode(builder.preorder(), - builder.body_definitions(), - variable_count); - } - } + FlowGraphBuilder builder; + FlowGraph* graph = builder.Build(function); + USE(graph); #ifdef DEBUG if (FLAG_print_graph_text && !builder.HasStackOverflow()) { - builder.graph()->PrintText(function, builder.postorder()); + graph->PrintAsText(function->name()); } #endif } @@ -237,14 +217,18 @@ static Handle MakeFunctionInfo(bool is_global, } if (script->name()->IsString()) { - LOG(CodeCreateEvent(is_eval ? Logger::EVAL_TAG : Logger::SCRIPT_TAG, - *code, String::cast(script->name()))); + PROFILE(CodeCreateEvent( + is_eval ? Logger::EVAL_TAG : + Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script), + *code, String::cast(script->name()))); OPROFILE(CreateNativeCodeRegion(String::cast(script->name()), code->instruction_start(), code->instruction_size())); } else { - LOG(CodeCreateEvent(is_eval ? Logger::EVAL_TAG : Logger::SCRIPT_TAG, - *code, "")); + PROFILE(CodeCreateEvent( + is_eval ? Logger::EVAL_TAG : + Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script), + *code, "")); OPROFILE(CreateNativeCodeRegion(is_eval ? "Eval" : "Script", code->instruction_start(), code->instruction_size())); @@ -499,33 +483,13 @@ Handle Compiler::BuildFunctionInfo(FunctionLiteral* literal, } if (FLAG_use_flow_graph) { - int variable_count = - literal->num_parameters() + literal->scope()->num_stack_slots(); - FlowGraphBuilder builder(variable_count); - builder.Build(literal); - - if (!builder.HasStackOverflow()) { - if (variable_count > 0) { - ReachingDefinitions rd(builder.postorder(), - builder.body_definitions(), - variable_count); - rd.Compute(); - - TypeAnalyzer ta(builder.postorder(), - builder.body_definitions(), - variable_count, - literal->num_parameters()); - ta.Compute(); - - MarkLiveCode(builder.preorder(), - builder.body_definitions(), - variable_count); - } - } + FlowGraphBuilder builder; + FlowGraph* graph = builder.Build(literal); + USE(graph); #ifdef DEBUG if (FLAG_print_graph_text && !builder.HasStackOverflow()) { - builder.graph()->PrintText(literal, builder.postorder()); + graph->PrintAsText(literal->name()); } #endif } @@ -625,20 +589,24 @@ void Compiler::RecordFunctionCompilation(Logger::LogEventsAndTags tag, // Log the code generation. If source information is available // include script name and line number. Check explicitly whether // logging is enabled as finding the line number is not free. - if (Logger::is_logging() || OProfileAgent::is_enabled()) { + if (Logger::is_logging() + || OProfileAgent::is_enabled() + || CpuProfiler::is_profiling()) { Handle func_name(name->length() > 0 ? *name : *inferred_name); if (script->name()->IsString()) { int line_num = GetScriptLineNumber(script, start_position) + 1; USE(line_num); - LOG(CodeCreateEvent(tag, *code, *func_name, - String::cast(script->name()), line_num)); + PROFILE(CodeCreateEvent(Logger::ToNativeByScript(tag, *script), + *code, *func_name, + String::cast(script->name()), line_num)); OPROFILE(CreateNativeCodeRegion(*func_name, String::cast(script->name()), line_num, code->instruction_start(), code->instruction_size())); } else { - LOG(CodeCreateEvent(tag, *code, *func_name)); + PROFILE(CodeCreateEvent(Logger::ToNativeByScript(tag, *script), + *code, *func_name)); OPROFILE(CreateNativeCodeRegion(*func_name, code->instruction_start(), code->instruction_size())); diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h index ecc7b1c7adc..ade21f57425 100644 --- a/deps/v8/src/compiler.h +++ b/deps/v8/src/compiler.h @@ -138,10 +138,7 @@ class CompilationInfo BASE_EMBEDDED { // There should always be a function literal, but it may be set after // construction (for lazy compilation). FunctionLiteral* function() { return function_; } - void set_function(FunctionLiteral* literal) { - ASSERT(function_ == NULL); - function_ = literal; - } + void set_function(FunctionLiteral* literal) { function_ = literal; } // Simple accessors. bool is_eval() { return is_eval_; } diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h index 44c90b6428f..ce112f3e800 100644 --- a/deps/v8/src/contexts.h +++ b/deps/v8/src/contexts.h @@ -76,6 +76,7 @@ enum ContextLookupFlags { V(FUNCTION_MAP_INDEX, Map, function_map) \ V(FUNCTION_INSTANCE_MAP_INDEX, Map, function_instance_map) \ V(JS_ARRAY_MAP_INDEX, Map, js_array_map)\ + V(REGEXP_RESULT_MAP_INDEX, Map, regexp_result_map)\ V(ARGUMENTS_BOILERPLATE_INDEX, JSObject, arguments_boilerplate) \ V(MESSAGE_LISTENERS_INDEX, JSObject, message_listeners) \ V(MAKE_MESSAGE_FUN_INDEX, JSFunction, make_message_fun) \ @@ -175,6 +176,7 @@ class Context: public FixedArray { SECURITY_TOKEN_INDEX, ARGUMENTS_BOILERPLATE_INDEX, JS_ARRAY_MAP_INDEX, + REGEXP_RESULT_MAP_INDEX, FUNCTION_MAP_INDEX, FUNCTION_INSTANCE_MAP_INDEX, INITIAL_OBJECT_PROTOTYPE_INDEX, diff --git a/deps/v8/src/conversions.cc b/deps/v8/src/conversions.cc index 5c46752e6cb..2929191a2e5 100644 --- a/deps/v8/src/conversions.cc +++ b/deps/v8/src/conversions.cc @@ -48,51 +48,6 @@ int HexValue(uc32 c) { return -1; } - -// Provide a common interface to getting a character at a certain -// index from a char* or a String object. -static inline int GetChar(const char* str, int index) { - ASSERT(index >= 0 && index < StrLength(str)); - return str[index]; -} - - -static inline int GetChar(String* str, int index) { - return str->Get(index); -} - - -static inline int GetLength(const char* str) { - return StrLength(str); -} - - -static inline int GetLength(String* str) { - return str->length(); -} - - -static inline const char* GetCString(const char* str, int index) { - return str + index; -} - - -static inline const char* GetCString(String* str, int index) { - int length = str->length(); - char* result = NewArray(length + 1); - for (int i = index; i < length; i++) { - uc16 c = str->Get(i); - if (c <= 127) { - result[i - index] = static_cast(c); - } else { - result[i - index] = 127; // Force number parsing to fail. - } - } - result[length - index] = '\0'; - return result; -} - - namespace { // C++-style iterator adaptor for StringInputBuffer @@ -134,15 +89,6 @@ void StringInputBufferIterator::operator++() { } -static inline void ReleaseCString(const char* original, const char* str) { -} - - -static inline void ReleaseCString(String* original, const char* str) { - DeleteArray(const_cast(str)); -} - - template static bool SubStringEquals(Iterator* current, EndMark end, @@ -168,179 +114,309 @@ extern "C" double gay_strtod(const char* s00, const char** se); // we don't need to preserve all the digits. const int kMaxSignificantDigits = 772; -// Parse an int from a string starting a given index and in a given -// radix. The string can be either a char* or a String*. -template -static int InternalStringToInt(S* s, int i, int radix, double* value) { - int len = GetLength(s); - // Setup limits for computing the value. - ASSERT(2 <= radix && radix <= 36); - int lim_0 = '0' + (radix < 10 ? radix : 10); - int lim_a = 'a' + (radix - 10); - int lim_A = 'A' + (radix - 10); +static const double JUNK_STRING_VALUE = OS::nan_value(); - // NOTE: The code for computing the value may seem a bit complex at - // first glance. It is structured to use 32-bit multiply-and-add - // loops as long as possible to avoid loosing precision. - double v = 0.0; - int j; - for (j = i; j < len;) { - // Parse the longest part of the string starting at index j - // possible while keeping the multiplier, and thus the part - // itself, within 32 bits. - uint32_t part = 0, multiplier = 1; - int k; - for (k = j; k < len; k++) { - int c = GetChar(s, k); - if (c >= '0' && c < lim_0) { - c = c - '0'; - } else if (c >= 'a' && c < lim_a) { - c = c - 'a' + 10; - } else if (c >= 'A' && c < lim_A) { - c = c - 'A' + 10; - } else { - break; - } +// Returns true if a nonspace found and false if the end has reached. +template +static inline bool AdvanceToNonspace(Iterator* current, EndMark end) { + while (*current != end) { + if (!Scanner::kIsWhiteSpace.get(**current)) return true; + ++*current; + } + return false; +} - // Update the value of the part as long as the multiplier fits - // in 32 bits. When we can't guarantee that the next iteration - // will not overflow the multiplier, we stop parsing the part - // by leaving the loop. - static const uint32_t kMaximumMultiplier = 0xffffffffU / 36; - uint32_t m = multiplier * radix; - if (m > kMaximumMultiplier) break; - part = part * radix + c; - multiplier = m; - ASSERT(multiplier > part); - } - // Compute the number of part digits. If no digits were parsed; - // we're done parsing the entire string. - int digits = k - j; - if (digits == 0) break; +static bool isDigit(int x, int radix) { + return (x >= '0' && x <= '9' && x < '0' + radix) + || (radix > 10 && x >= 'a' && x < 'a' + radix - 10) + || (radix > 10 && x >= 'A' && x < 'A' + radix - 10); +} - // Update the value and skip the part in the string. - ASSERT(multiplier == - pow(static_cast(radix), static_cast(digits))); - v = v * multiplier + part; - j = k; - } - // If the resulting value is larger than 2^53 the value does not fit - // in the mantissa of the double and there is a loss of precision. - // When the value is larger than 2^53 the rounding depends on the - // code generation. If the code generator spills the double value - // it uses 64 bits and if it does not it uses 80 bits. - // - // If there is a potential for overflow we resort to strtod for - // radix 10 numbers to get higher precision. For numbers in another - // radix we live with the loss of precision. - static const double kPreciseConversionLimit = 9007199254740992.0; - if (radix == 10 && v > kPreciseConversionLimit) { - const char* cstr = GetCString(s, i); - const char* end; - v = gay_strtod(cstr, &end); - ReleaseCString(s, cstr); - } - - *value = v; - return j; +static double SignedZero(bool sign) { + return sign ? -0.0 : 0.0; } -int StringToInt(String* str, int index, int radix, double* value) { - return InternalStringToInt(str, index, radix, value); -} +// Parsing integers with radix 2, 4, 8, 16, 32. Assumes current != end. +template +static double InternalStringToIntDouble(Iterator current, + EndMark end, + bool sign, + bool allow_trailing_junk) { + ASSERT(current != end); + // Skip leading 0s. + while (*current == '0') { + ++current; + if (current == end) return SignedZero(sign); + } -int StringToInt(const char* str, int index, int radix, double* value) { - return InternalStringToInt(const_cast(str), index, radix, value); -} + int64_t number = 0; + int exponent = 0; + const int radix = (1 << radix_log_2); + do { + int digit; + if (*current >= '0' && *current <= '9' && *current < '0' + radix) { + digit = static_cast(*current) - '0'; + } else if (radix > 10 && *current >= 'a' && *current < 'a' + radix - 10) { + digit = static_cast(*current) - 'a' + 10; + } else if (radix > 10 && *current >= 'A' && *current < 'A' + radix - 10) { + digit = static_cast(*current) - 'A' + 10; + } else { + if (allow_trailing_junk || !AdvanceToNonspace(¤t, end)) { + break; + } else { + return JUNK_STRING_VALUE; + } + } -static const double JUNK_STRING_VALUE = OS::nan_value(); + number = number * radix + digit; + int overflow = static_cast(number >> 53); + if (overflow != 0) { + // Overflow occurred. Need to determine which direction to round the + // result. + int overflow_bits_count = 1; + while (overflow > 1) { + overflow_bits_count++; + overflow >>= 1; + } + int dropped_bits_mask = ((1 << overflow_bits_count) - 1); + int dropped_bits = static_cast(number) & dropped_bits_mask; + number >>= overflow_bits_count; + exponent = overflow_bits_count; -// Returns true if a nonspace found and false if the end has reached. -template -static inline bool AdvanceToNonspace(Iterator* current, EndMark end) { - while (*current != end) { - if (!Scanner::kIsWhiteSpace.get(**current)) return true; - ++*current; + bool zero_tail = true; + while (true) { + ++current; + if (current == end || !isDigit(*current, radix)) break; + zero_tail = zero_tail && *current == '0'; + exponent += radix_log_2; + } + + if (!allow_trailing_junk && AdvanceToNonspace(¤t, end)) { + return JUNK_STRING_VALUE; + } + + int middle_value = (1 << (overflow_bits_count - 1)); + if (dropped_bits > middle_value) { + number++; // Rounding up. + } else if (dropped_bits == middle_value) { + // Rounding to even to consistency with decimals: half-way case rounds + // up if significant part is odd and down otherwise. + if ((number & 1) != 0 || !zero_tail) { + number++; // Rounding up. + } + } + + // Rounding up may cause overflow. + if ((number & ((int64_t)1 << 53)) != 0) { + exponent++; + number >>= 1; + } + break; + } + ++current; + } while (current != end); + + ASSERT(number < ((int64_t)1 << 53)); + ASSERT(static_cast(static_cast(number)) == number); + + if (exponent == 0) { + if (sign) { + if (number == 0) return -0.0; + number = -number; + } + return static_cast(number); } - return false; + + ASSERT(number != 0); + // The double could be constructed faster from number (mantissa), exponent + // and sign. Assuming it's a rare case more simple code is used. + return static_cast(sign ? -number : number) * pow(2.0, exponent); } template -static double InternalHexadecimalStringToDouble(Iterator current, - EndMark end, - char* buffer, - bool allow_trailing_junk) { - ASSERT(current != end); +static double InternalStringToInt(Iterator current, EndMark end, int radix) { + const bool allow_trailing_junk = true; + const double empty_string_val = JUNK_STRING_VALUE; - const int max_hex_significant_digits = 52 / 4 + 2; - // We reuse the buffer of InternalStringToDouble. Since hexadecimal - // numbers may have much less digits than decimal the buffer won't overflow. - ASSERT(max_hex_significant_digits < kMaxSignificantDigits); + if (!AdvanceToNonspace(¤t, end)) return empty_string_val; - int significant_digits = 0; - int insignificant_digits = 0; + bool sign = false; bool leading_zero = false; - // A double has a 53bit significand (once the hidden bit has been added). - // Halfway cases thus have at most 54bits. Therefore 54/4 + 1 digits are - // sufficient to represent halfway cases. By adding another digit we can keep - // track of dropped digits. - int buffer_pos = 0; - bool nonzero_digit_dropped = false; - // Skip leading 0s. - while (*current == '0') { - leading_zero = true; + if (*current == '+') { + // Ignore leading sign; skip following spaces. ++current; - if (current == end) return 0; + if (!AdvanceToNonspace(¤t, end)) return JUNK_STRING_VALUE; + } else if (*current == '-') { + ++current; + if (!AdvanceToNonspace(¤t, end)) return JUNK_STRING_VALUE; + sign = true; } - int begin_pos = buffer_pos; - while ((*current >= '0' && *current <= '9') - || (*current >= 'a' && *current <= 'f') - || (*current >= 'A' && *current <= 'F')) { - if (significant_digits <= max_hex_significant_digits) { - buffer[buffer_pos++] = static_cast(*current); - significant_digits++; + if (radix == 0) { + // Radix detection. + if (*current == '0') { + ++current; + if (current == end) return SignedZero(sign); + if (*current == 'x' || *current == 'X') { + radix = 16; + ++current; + if (current == end) return JUNK_STRING_VALUE; + } else { + radix = 8; + leading_zero = true; + } } else { - insignificant_digits++; - nonzero_digit_dropped = nonzero_digit_dropped || *current != '0'; + radix = 10; + } + } else if (radix == 16) { + if (*current == '0') { + // Allow "0x" prefix. + ++current; + if (current == end) return SignedZero(sign); + if (*current == 'x' || *current == 'X') { + ++current; + if (current == end) return JUNK_STRING_VALUE; + } else { + leading_zero = true; + } } + } + + if (radix < 2 || radix > 36) return JUNK_STRING_VALUE; + + // Skip leading zeros. + while (*current == '0') { + leading_zero = true; ++current; - if (current == end) break; + if (current == end) return SignedZero(sign); } - if (!allow_trailing_junk && AdvanceToNonspace(¤t, end)) { + if (!leading_zero && !isDigit(*current, radix)) { return JUNK_STRING_VALUE; } - if (significant_digits == 0) { - return leading_zero ? 0 : JUNK_STRING_VALUE; + if (IsPowerOf2(radix)) { + switch (radix) { + case 2: + return InternalStringToIntDouble<1>( + current, end, sign, allow_trailing_junk); + case 4: + return InternalStringToIntDouble<2>( + current, end, sign, allow_trailing_junk); + case 8: + return InternalStringToIntDouble<3>( + current, end, sign, allow_trailing_junk); + + case 16: + return InternalStringToIntDouble<4>( + current, end, sign, allow_trailing_junk); + + case 32: + return InternalStringToIntDouble<5>( + current, end, sign, allow_trailing_junk); + default: + UNREACHABLE(); + } } - if (nonzero_digit_dropped) { - ASSERT(insignificant_digits > 0); - insignificant_digits--; - buffer[buffer_pos++] = '1'; + if (radix == 10) { + // Parsing with strtod. + const int kMaxSignificantDigits = 309; // Doubles are less than 1.8e308. + // The buffer may contain up to kMaxSignificantDigits + 1 digits and a zero + // end. + const int kBufferSize = kMaxSignificantDigits + 2; + char buffer[kBufferSize]; + int buffer_pos = 0; + while (*current >= '0' && *current <= '9') { + if (buffer_pos <= kMaxSignificantDigits) { + // If the number has more than kMaxSignificantDigits it will be parsed + // as infinity. + ASSERT(buffer_pos < kBufferSize); + buffer[buffer_pos++] = static_cast(*current); + } + ++current; + if (current == end) break; + } + + if (!allow_trailing_junk && AdvanceToNonspace(¤t, end)) { + return JUNK_STRING_VALUE; + } + + ASSERT(buffer_pos < kBufferSize); + buffer[buffer_pos++] = '\0'; + return sign ? -gay_strtod(buffer, NULL) : gay_strtod(buffer, NULL); } - buffer[buffer_pos] = '\0'; + // The following code causes accumulating rounding error for numbers greater + // than ~2^56. It's explicitly allowed in the spec: "if R is not 2, 4, 8, 10, + // 16, or 32, then mathInt may be an implementation-dependent approximation to + // the mathematical integer value" (15.1.2.2). + + int lim_0 = '0' + (radix < 10 ? radix : 10); + int lim_a = 'a' + (radix - 10); + int lim_A = 'A' + (radix - 10); + + // NOTE: The code for computing the value may seem a bit complex at + // first glance. It is structured to use 32-bit multiply-and-add + // loops as long as possible to avoid loosing precision. + + double v = 0.0; + bool done = false; + do { + // Parse the longest part of the string starting at index j + // possible while keeping the multiplier, and thus the part + // itself, within 32 bits. + unsigned int part = 0, multiplier = 1; + while (true) { + int d; + if (*current >= '0' && *current < lim_0) { + d = *current - '0'; + } else if (*current >= 'a' && *current < lim_a) { + d = *current - 'a' + 10; + } else if (*current >= 'A' && *current < lim_A) { + d = *current - 'A' + 10; + } else { + done = true; + break; + } + + // Update the value of the part as long as the multiplier fits + // in 32 bits. When we can't guarantee that the next iteration + // will not overflow the multiplier, we stop parsing the part + // by leaving the loop. + const unsigned int kMaximumMultiplier = 0xffffffffU / 36; + uint32_t m = multiplier * radix; + if (m > kMaximumMultiplier) break; + part = part * radix + d; + multiplier = m; + ASSERT(multiplier > part); + + ++current; + if (current == end) { + done = true; + break; + } + } - double result; - StringToInt(buffer, begin_pos, 16, &result); - if (insignificant_digits > 0) { - // Multiplying by a power of 2 doesn't cause a loss of precision. - result *= pow(16.0, insignificant_digits); + // Update the value and skip the part in the string. + v = v * multiplier + part; + } while (!done); + + if (!allow_trailing_junk && AdvanceToNonspace(¤t, end)) { + return JUNK_STRING_VALUE; } - return result; + + return sign ? -v : v; } @@ -377,8 +453,9 @@ static double InternalStringToDouble(Iterator current, int significant_digits = 0; int insignificant_digits = 0; bool nonzero_digit_dropped = false; + bool fractional_part = false; - double signed_zero = 0.0; + bool sign = false; if (*current == '+') { // Ignore leading sign; skip following spaces. @@ -388,7 +465,7 @@ static double InternalStringToDouble(Iterator current, buffer[buffer_pos++] = '-'; ++current; if (!AdvanceToNonspace(¤t, end)) return JUNK_STRING_VALUE; - signed_zero = -0.0; + sign = true; } static const char kInfinitySymbol[] = "Infinity"; @@ -408,26 +485,28 @@ static double InternalStringToDouble(Iterator current, bool leading_zero = false; if (*current == '0') { ++current; - if (current == end) return signed_zero; + if (current == end) return SignedZero(sign); leading_zero = true; // It could be hexadecimal value. if ((flags & ALLOW_HEX) && (*current == 'x' || *current == 'X')) { ++current; - if (current == end) return JUNK_STRING_VALUE; // "0x". + if (current == end || !isDigit(*current, 16)) { + return JUNK_STRING_VALUE; // "0x". + } - double result = InternalHexadecimalStringToDouble(current, - end, - buffer + buffer_pos, - allow_trailing_junk); - return (buffer_pos > 0 && buffer[0] == '-') ? -result : result; + bool sign = (buffer_pos > 0 && buffer[0] == '-'); + return InternalStringToIntDouble<4>(current, + end, + sign, + allow_trailing_junk); } // Ignore leading zeros in the integer part. while (*current == '0') { ++current; - if (current == end) return signed_zero; + if (current == end) return SignedZero(sign); } } @@ -454,8 +533,6 @@ static double InternalStringToDouble(Iterator current, } if (*current == '.') { - ASSERT(buffer_pos < kBufferSize); - buffer[buffer_pos++] = '.'; ++current; if (current == end) { if (significant_digits == 0 && !leading_zero) { @@ -471,11 +548,15 @@ static double InternalStringToDouble(Iterator current, // leading zeros (if any). while (*current == '0') { ++current; - if (current == end) return signed_zero; + if (current == end) return SignedZero(sign); exponent--; // Move this 0 into the exponent. } } + ASSERT(buffer_pos < kBufferSize); + buffer[buffer_pos++] = '.'; + fractional_part = true; + // There is the fractional part. while (*current >= '0' && *current <= '9') { if (significant_digits < kMaxSignificantDigits) { @@ -557,22 +638,13 @@ static double InternalStringToDouble(Iterator current, exponent += insignificant_digits; if (octal) { - buffer[buffer_pos] = '\0'; - // ALLOW_OCTALS is set and there is no '8' or '9' in insignificant - // digits. Check significant digits now. - char sign = '+'; - const char* s = buffer; - if (*s == '-' || *s == '+') sign = *s++; + bool sign = buffer[0] == '-'; + int start_pos = (sign ? 1 : 0); - double result; - s += StringToInt(s, 0, 8, &result); - if (!allow_trailing_junk && *s != '\0') return JUNK_STRING_VALUE; - - if (sign == '-') result = -result; - if (insignificant_digits > 0) { - result *= pow(8.0, insignificant_digits); - } - return result; + return InternalStringToIntDouble<3>(buffer + start_pos, + buffer + buffer_pos, + sign, + allow_trailing_junk); } if (nonzero_digit_dropped) { @@ -580,6 +652,11 @@ static double InternalStringToDouble(Iterator current, buffer[buffer_pos++] = '1'; } + // If the number has no more than kMaxDigitsInInt digits and doesn't have + // fractional part it could be parsed faster (without checks for + // spaces, overflow, etc.). + const int kMaxDigitsInInt = 9 * sizeof(int) / 4; // NOLINT + if (exponent != 0) { ASSERT(buffer_pos < kBufferSize); buffer[buffer_pos++] = 'e'; @@ -597,6 +674,16 @@ static double InternalStringToDouble(Iterator current, } ASSERT(exponent == 0); buffer_pos += exp_digits; + } else if (!fractional_part && significant_digits <= kMaxDigitsInInt) { + if (significant_digits == 0) return SignedZero(sign); + ASSERT(buffer_pos > 0); + int num = 0; + int start_pos = (buffer[0] == '-' ? 1 : 0); + for (int i = start_pos; i < buffer_pos; i++) { + ASSERT(buffer[i] >= '0' && buffer[i] <= '9'); + num = 10 * num + (buffer[i] - '0'); + } + return static_cast(start_pos == 0 ? num : -num); } ASSERT(buffer_pos < kBufferSize); @@ -625,6 +712,25 @@ double StringToDouble(String* str, int flags, double empty_string_val) { } +double StringToInt(String* str, int radix) { + StringShape shape(str); + if (shape.IsSequentialAscii()) { + const char* begin = SeqAsciiString::cast(str)->GetChars(); + const char* end = begin + str->length(); + return InternalStringToInt(begin, end, radix); + } else if (shape.IsSequentialTwoByte()) { + const uc16* begin = SeqTwoByteString::cast(str)->GetChars(); + const uc16* end = begin + str->length(); + return InternalStringToInt(begin, end, radix); + } else { + StringInputBuffer buffer(str); + return InternalStringToInt(StringInputBufferIterator(&buffer), + StringInputBufferIterator::EndMarker(), + radix); + } +} + + double StringToDouble(const char* str, int flags, double empty_string_val) { const char* end = str + StrLength(str); diff --git a/deps/v8/src/conversions.h b/deps/v8/src/conversions.h index 4aaf0c01ba6..c4ceea6b908 100644 --- a/deps/v8/src/conversions.h +++ b/deps/v8/src/conversions.h @@ -100,8 +100,7 @@ double StringToDouble(const char* str, int flags, double empty_string_val = 0); double StringToDouble(String* str, int flags, double empty_string_val = 0); // Converts a string into an integer. -int StringToInt(String* str, int index, int radix, double* value); -int StringToInt(const char* str, int index, int radix, double* value); +double StringToInt(String* str, int radix); // Converts a double to a string value according to ECMA-262 9.8.1. // The buffer should be large enough for any floating point number. diff --git a/deps/v8/src/cpu-profiler-inl.h b/deps/v8/src/cpu-profiler-inl.h index 26ab643c3a2..036e1108e50 100644 --- a/deps/v8/src/cpu-profiler-inl.h +++ b/deps/v8/src/cpu-profiler-inl.h @@ -28,23 +28,71 @@ #ifndef V8_CPU_PROFILER_INL_H_ #define V8_CPU_PROFILER_INL_H_ +#include "cpu-profiler.h" + +#ifdef ENABLE_CPP_PROFILES_PROCESSOR + #include "circular-queue-inl.h" #include "profile-generator-inl.h" -#include "cpu-profiler.h" - namespace v8 { namespace internal { +void CodeCreateEventRecord::UpdateCodeMap(CodeMap* code_map) { + code_map->AddCode(start, entry, size); +} + + +void CodeMoveEventRecord::UpdateCodeMap(CodeMap* code_map) { + code_map->MoveCode(from, to); +} + + +void CodeDeleteEventRecord::UpdateCodeMap(CodeMap* code_map) { + code_map->DeleteCode(start); +} + + +void CodeAliasEventRecord::UpdateCodeMap(CodeMap* code_map) { + code_map->AddAlias(alias, start); +} + + +TickSampleEventRecord* TickSampleEventRecord::init(void* value) { + TickSampleEventRecord* result = + reinterpret_cast(value); + result->filler = 1; + ASSERT(result->filler != SamplingCircularQueue::kClear); + // Init the required fields only. + result->sample.pc = NULL; + result->sample.frames_count = 0; + return result; +} + TickSample* ProfilerEventsProcessor::TickSampleEvent() { TickSampleEventRecord* evt = - reinterpret_cast(ticks_buffer_.Enqueue()); + TickSampleEventRecord::init(ticks_buffer_.Enqueue()); evt->order = enqueue_order_; // No increment! return &evt->sample; } +bool ProfilerEventsProcessor::FilterOutCodeCreateEvent( + Logger::LogEventsAndTags tag) { + // In browser mode, leave only callbacks and non-native JS entries. + // We filter out regular expressions as currently we can't tell + // whether they origin from native scripts, so let's not confise people by + // showing them weird regexes they didn't wrote. + return FLAG_prof_browser_mode + && (tag != Logger::CALLBACK_TAG + && tag != Logger::FUNCTION_TAG + && tag != Logger::LAZY_COMPILE_TAG + && tag != Logger::SCRIPT_TAG); +} + } } // namespace v8::internal +#endif // ENABLE_CPP_PROFILES_PROCESSOR + #endif // V8_CPU_PROFILER_INL_H_ diff --git a/deps/v8/src/cpu-profiler.cc b/deps/v8/src/cpu-profiler.cc index d16c17f4c05..22937c0fbbe 100644 --- a/deps/v8/src/cpu-profiler.cc +++ b/deps/v8/src/cpu-profiler.cc @@ -29,10 +29,15 @@ #include "cpu-profiler-inl.h" +#ifdef ENABLE_CPP_PROFILES_PROCESSOR + +#include "log-inl.h" + +#include "../include/v8-profiler.h" + namespace v8 { namespace internal { - static const int kEventsBufferSize = 256*KB; static const int kTickSamplesBufferChunkSize = 64*KB; static const int kTickSamplesBufferChunksCount = 16; @@ -48,12 +53,29 @@ ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator) enqueue_order_(0) { } +void ProfilerEventsProcessor::CallbackCreateEvent(Logger::LogEventsAndTags tag, + const char* prefix, + String* name, + Address start) { + if (FilterOutCodeCreateEvent(tag)) return; + CodeEventsContainer evt_rec; + CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_; + rec->type = CodeEventRecord::CODE_CREATION; + rec->order = ++enqueue_order_; + rec->start = start; + rec->entry = generator_->NewCodeEntry(tag, prefix, name); + rec->size = 1; + events_buffer_.Enqueue(evt_rec); +} + + void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag, String* name, String* resource_name, int line_number, Address start, unsigned size) { + if (FilterOutCodeCreateEvent(tag)) return; CodeEventsContainer evt_rec; CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_; rec->type = CodeEventRecord::CODE_CREATION; @@ -69,6 +91,7 @@ void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag, const char* name, Address start, unsigned size) { + if (FilterOutCodeCreateEvent(tag)) return; CodeEventsContainer evt_rec; CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_; rec->type = CodeEventRecord::CODE_CREATION; @@ -84,6 +107,7 @@ void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag, int args_count, Address start, unsigned size) { + if (FilterOutCodeCreateEvent(tag)) return; CodeEventsContainer evt_rec; CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_; rec->type = CodeEventRecord::CODE_CREATION; @@ -138,6 +162,24 @@ void ProfilerEventsProcessor::FunctionDeleteEvent(Address from) { } +void ProfilerEventsProcessor::RegExpCodeCreateEvent( + Logger::LogEventsAndTags tag, + const char* prefix, + String* name, + Address start, + unsigned size) { + if (FilterOutCodeCreateEvent(tag)) return; + CodeEventsContainer evt_rec; + CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_; + rec->type = CodeEventRecord::CODE_CREATION; + rec->order = ++enqueue_order_; + rec->start = start; + rec->entry = generator_->NewCodeEntry(tag, prefix, name); + rec->size = size; + events_buffer_.Enqueue(evt_rec); +} + + bool ProfilerEventsProcessor::ProcessCodeEvent(unsigned* dequeue_order) { if (!events_buffer_.IsEmpty()) { CodeEventsContainer record; @@ -163,7 +205,7 @@ bool ProfilerEventsProcessor::ProcessCodeEvent(unsigned* dequeue_order) { bool ProfilerEventsProcessor::ProcessTicks(unsigned dequeue_order) { while (true) { const TickSampleEventRecord* rec = - reinterpret_cast(ticks_buffer_.StartDequeue()); + TickSampleEventRecord::cast(ticks_buffer_.StartDequeue()); if (rec == NULL) return false; if (rec->order == dequeue_order) { generator_->RecordTickSample(rec->sample); @@ -196,4 +238,255 @@ void ProfilerEventsProcessor::Run() { } +CpuProfiler* CpuProfiler::singleton_ = NULL; + +void CpuProfiler::StartProfiling(const char* title) { + ASSERT(singleton_ != NULL); + singleton_->StartCollectingProfile(title); +} + + +void CpuProfiler::StartProfiling(String* title) { + ASSERT(singleton_ != NULL); + singleton_->StartCollectingProfile(title); +} + + +CpuProfile* CpuProfiler::StopProfiling(const char* title) { + ASSERT(singleton_ != NULL); + return singleton_->StopCollectingProfile(title); +} + + +CpuProfile* CpuProfiler::StopProfiling(String* title) { + ASSERT(singleton_ != NULL); + return singleton_->StopCollectingProfile(title); +} + + +int CpuProfiler::GetProfilesCount() { + ASSERT(singleton_ != NULL); + return singleton_->profiles_->profiles()->length(); +} + + +CpuProfile* CpuProfiler::GetProfile(int index) { + ASSERT(singleton_ != NULL); + return singleton_->profiles_->profiles()->at(index); +} + + +CpuProfile* CpuProfiler::FindProfile(unsigned uid) { + ASSERT(singleton_ != NULL); + return singleton_->profiles_->GetProfile(uid); +} + + +TickSample* CpuProfiler::TickSampleEvent() { + if (CpuProfiler::is_profiling()) { + return singleton_->processor_->TickSampleEvent(); + } else { + return NULL; + } +} + + +void CpuProfiler::CallbackEvent(String* name, Address entry_point) { + singleton_->processor_->CallbackCreateEvent( + Logger::CALLBACK_TAG, CodeEntry::kEmptyNamePrefix, name, entry_point); +} + + +void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag, + Code* code, const char* comment) { + singleton_->processor_->CodeCreateEvent( + tag, comment, code->address(), code->ExecutableSize()); +} + + +void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag, + Code* code, String* name) { + singleton_->processor_->CodeCreateEvent( + tag, + name, + Heap::empty_string(), + v8::CpuProfileNode::kNoLineNumberInfo, + code->address(), + code->ExecutableSize()); +} + + +void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag, + Code* code, String* name, + String* source, int line) { + singleton_->processor_->CodeCreateEvent( + tag, + name, + source, + line, + code->address(), + code->ExecutableSize()); +} + + +void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag, + Code* code, int args_count) { + singleton_->processor_->CodeCreateEvent( + tag, + args_count, + code->address(), + code->ExecutableSize()); +} + + +void CpuProfiler::CodeMoveEvent(Address from, Address to) { + singleton_->processor_->CodeMoveEvent(from, to); +} + + +void CpuProfiler::CodeDeleteEvent(Address from) { + singleton_->processor_->CodeDeleteEvent(from); +} + + +void CpuProfiler::FunctionCreateEvent(JSFunction* function) { + singleton_->processor_->FunctionCreateEvent( + function->address(), function->code()->address()); +} + + +void CpuProfiler::FunctionMoveEvent(Address from, Address to) { + singleton_->processor_->FunctionMoveEvent(from, to); +} + + +void CpuProfiler::FunctionDeleteEvent(Address from) { + singleton_->processor_->FunctionDeleteEvent(from); +} + + +void CpuProfiler::GetterCallbackEvent(String* name, Address entry_point) { + singleton_->processor_->CallbackCreateEvent( + Logger::CALLBACK_TAG, "get ", name, entry_point); +} + + +void CpuProfiler::RegExpCodeCreateEvent(Code* code, String* source) { + singleton_->processor_->RegExpCodeCreateEvent( + Logger::REG_EXP_TAG, + "RegExp: ", + source, + code->address(), + code->ExecutableSize()); +} + + +void CpuProfiler::SetterCallbackEvent(String* name, Address entry_point) { + singleton_->processor_->CallbackCreateEvent( + Logger::CALLBACK_TAG, "set ", name, entry_point); +} + + +CpuProfiler::CpuProfiler() + : profiles_(new CpuProfilesCollection()), + next_profile_uid_(1), + generator_(NULL), + processor_(NULL) { +} + + +CpuProfiler::~CpuProfiler() { + delete profiles_; +} + + +void CpuProfiler::StartCollectingProfile(const char* title) { + if (profiles_->StartProfiling(title, next_profile_uid_++)) { + StartProcessorIfNotStarted(); + } +} + + +void CpuProfiler::StartCollectingProfile(String* title) { + if (profiles_->StartProfiling(title, next_profile_uid_++)) { + StartProcessorIfNotStarted(); + } +} + + +void CpuProfiler::StartProcessorIfNotStarted() { + if (processor_ == NULL) { + // Disable logging when using the new implementation. + saved_logging_nesting_ = Logger::logging_nesting_; + Logger::logging_nesting_ = 0; + generator_ = new ProfileGenerator(profiles_); + processor_ = new ProfilerEventsProcessor(generator_); + processor_->Start(); + // Enumerate stuff we already have in the heap. + if (Heap::HasBeenSetup()) { + Logger::LogCodeObjects(); + Logger::LogCompiledFunctions(); + Logger::LogFunctionObjects(); + Logger::LogAccessorCallbacks(); + } + // Enable stack sampling. + reinterpret_cast(Logger::ticker_)->Start(); + } +} + + +CpuProfile* CpuProfiler::StopCollectingProfile(const char* title) { + StopProcessorIfLastProfile(); + CpuProfile* result = profiles_->StopProfiling(title); + if (result != NULL) { + result->Print(); + } + return result; +} + + +CpuProfile* CpuProfiler::StopCollectingProfile(String* title) { + StopProcessorIfLastProfile(); + return profiles_->StopProfiling(title); +} + + +void CpuProfiler::StopProcessorIfLastProfile() { + if (profiles_->is_last_profile()) { + reinterpret_cast(Logger::ticker_)->Stop(); + processor_->Stop(); + processor_->Join(); + delete processor_; + delete generator_; + processor_ = NULL; + generator_ = NULL; + Logger::logging_nesting_ = saved_logging_nesting_; + } +} + +} } // namespace v8::internal + +#endif // ENABLE_CPP_PROFILES_PROCESSOR + +namespace v8 { +namespace internal { + +void CpuProfiler::Setup() { +#ifdef ENABLE_CPP_PROFILES_PROCESSOR + if (singleton_ == NULL) { + singleton_ = new CpuProfiler(); + } +#endif +} + + +void CpuProfiler::TearDown() { +#ifdef ENABLE_CPP_PROFILES_PROCESSOR + if (singleton_ != NULL) { + delete singleton_; + } + singleton_ = NULL; +#endif +} + } } // namespace v8::internal diff --git a/deps/v8/src/cpu-profiler.h b/deps/v8/src/cpu-profiler.h index 8a7d2fdd318..594e44ec685 100644 --- a/deps/v8/src/cpu-profiler.h +++ b/deps/v8/src/cpu-profiler.h @@ -28,12 +28,20 @@ #ifndef V8_CPU_PROFILER_H_ #define V8_CPU_PROFILER_H_ +#ifdef ENABLE_CPP_PROFILES_PROCESSOR + #include "circular-queue.h" -#include "profile-generator.h" namespace v8 { namespace internal { +// Forward declarations. +class CodeEntry; +class CodeMap; +class CpuProfile; +class CpuProfilesCollection; +class ProfileGenerator; + #define CODE_EVENTS_TYPE_LIST(V) \ V(CODE_CREATION, CodeCreateEventRecord) \ @@ -63,9 +71,7 @@ class CodeCreateEventRecord : public CodeEventRecord { CodeEntry* entry; unsigned size; - INLINE(void UpdateCodeMap(CodeMap* code_map)) { - code_map->AddCode(start, entry, size); - } + INLINE(void UpdateCodeMap(CodeMap* code_map)); }; @@ -74,9 +80,7 @@ class CodeMoveEventRecord : public CodeEventRecord { Address from; Address to; - INLINE(void UpdateCodeMap(CodeMap* code_map)) { - code_map->MoveCode(from, to); - } + INLINE(void UpdateCodeMap(CodeMap* code_map)); }; @@ -84,9 +88,7 @@ class CodeDeleteEventRecord : public CodeEventRecord { public: Address start; - INLINE(void UpdateCodeMap(CodeMap* code_map)) { - code_map->DeleteCode(start); - } + INLINE(void UpdateCodeMap(CodeMap* code_map)); }; @@ -95,33 +97,29 @@ class CodeAliasEventRecord : public CodeEventRecord { Address alias; Address start; - INLINE(void UpdateCodeMap(CodeMap* code_map)) { - code_map->AddAlias(alias, start); - } + INLINE(void UpdateCodeMap(CodeMap* code_map)); }; -class TickSampleEventRecord { +class TickSampleEventRecord BASE_EMBEDDED { public: - // In memory, the first machine word of a TickSampleEventRecord will be the - // first entry of TickSample, that is -- a program counter field. - // TickSample is put first, because 'order' can become equal to - // SamplingCircularQueue::kClear, while program counter can't. - TickSample sample; + // The first machine word of a TickSampleEventRecord must not ever + // become equal to SamplingCircularQueue::kClear. As both order and + // TickSample's first field are not reliable in this sense (order + // can overflow, TickSample can have all fields reset), we are + // forced to use an artificial filler field. + int filler; unsigned order; + TickSample sample; -#if defined(__GNUC__) && (__GNUC__ < 4) - // Added to avoid 'all member functions in class are private' warning. - INLINE(unsigned get_order() const) { return order; } - // Added to avoid 'class only defines private constructors and - // has no friends' warning. - friend class TickSampleEventRecordFriend; -#endif - private: - // Disable instantiation. - TickSampleEventRecord(); + static TickSampleEventRecord* cast(void* value) { + return reinterpret_cast(value); + } - DISALLOW_COPY_AND_ASSIGN(TickSampleEventRecord); + INLINE(static TickSampleEventRecord* init(void* value)); + + private: + DISALLOW_IMPLICIT_CONSTRUCTORS(TickSampleEventRecord); }; @@ -138,6 +136,9 @@ class ProfilerEventsProcessor : public Thread { INLINE(bool running()) { return running_; } // Events adding methods. Called by VM threads. + void CallbackCreateEvent(Logger::LogEventsAndTags tag, + const char* prefix, String* name, + Address start); void CodeCreateEvent(Logger::LogEventsAndTags tag, String* name, String* resource_name, int line_number, @@ -153,6 +154,9 @@ class ProfilerEventsProcessor : public Thread { void FunctionCreateEvent(Address alias, Address start); void FunctionMoveEvent(Address from, Address to); void FunctionDeleteEvent(Address from); + void RegExpCodeCreateEvent(Logger::LogEventsAndTags tag, + const char* prefix, String* name, + Address start, unsigned size); // Tick sample events are filled directly in the buffer of the circular // queue (because the structure is of fixed width, but usually not all @@ -172,6 +176,8 @@ class ProfilerEventsProcessor : public Thread { bool ProcessCodeEvent(unsigned* dequeue_order); bool ProcessTicks(unsigned dequeue_order); + INLINE(static bool FilterOutCodeCreateEvent(Logger::LogEventsAndTags tag)); + ProfileGenerator* generator_; bool running_; CircularQueue events_buffer_; @@ -179,7 +185,93 @@ class ProfilerEventsProcessor : public Thread { unsigned enqueue_order_; }; +} } // namespace v8::internal + + +#define PROFILE(Call) \ + LOG(Call); \ + do { \ + if (v8::internal::CpuProfiler::is_profiling()) { \ + v8::internal::CpuProfiler::Call; \ + } \ + } while (false) +#else +#define PROFILE(Call) LOG(Call) +#endif // ENABLE_CPP_PROFILES_PROCESSOR + + +namespace v8 { +namespace internal { + +class CpuProfiler { + public: + static void Setup(); + static void TearDown(); + +#ifdef ENABLE_CPP_PROFILES_PROCESSOR + static void StartProfiling(const char* title); + static void StartProfiling(String* title); + static CpuProfile* StopProfiling(const char* title); + static CpuProfile* StopProfiling(String* title); + static int GetProfilesCount(); + static CpuProfile* GetProfile(int index); + static CpuProfile* FindProfile(unsigned uid); + + // Invoked from stack sampler (thread or signal handler.) + static TickSample* TickSampleEvent(); + + // Must be called via PROFILE macro, otherwise will crash when + // profiling is not enabled. + static void CallbackEvent(String* name, Address entry_point); + static void CodeCreateEvent(Logger::LogEventsAndTags tag, + Code* code, const char* comment); + static void CodeCreateEvent(Logger::LogEventsAndTags tag, + Code* code, String* name); + static void CodeCreateEvent(Logger::LogEventsAndTags tag, + Code* code, String* name, + String* source, int line); + static void CodeCreateEvent(Logger::LogEventsAndTags tag, + Code* code, int args_count); + static void CodeMoveEvent(Address from, Address to); + static void CodeDeleteEvent(Address from); + static void FunctionCreateEvent(JSFunction* function); + static void FunctionMoveEvent(Address from, Address to); + static void FunctionDeleteEvent(Address from); + static void GetterCallbackEvent(String* name, Address entry_point); + static void RegExpCodeCreateEvent(Code* code, String* source); + static void SetterCallbackEvent(String* name, Address entry_point); + + static INLINE(bool is_profiling()) { + return singleton_ != NULL && singleton_->processor_ != NULL; + } + + private: + CpuProfiler(); + ~CpuProfiler(); + void StartCollectingProfile(const char* title); + void StartCollectingProfile(String* title); + void StartProcessorIfNotStarted(); + CpuProfile* StopCollectingProfile(const char* title); + CpuProfile* StopCollectingProfile(String* title); + void StopProcessorIfLastProfile(); + + CpuProfilesCollection* profiles_; + unsigned next_profile_uid_; + ProfileGenerator* generator_; + ProfilerEventsProcessor* processor_; + int saved_logging_nesting_; + + static CpuProfiler* singleton_; + +#else + static INLINE(bool is_profiling()) { return false; } +#endif // ENABLE_CPP_PROFILES_PROCESSOR + + private: + DISALLOW_COPY_AND_ASSIGN(CpuProfiler); +}; } } // namespace v8::internal + #endif // V8_CPU_PROFILER_H_ diff --git a/deps/v8/src/d8-posix.cc b/deps/v8/src/d8-posix.cc index 2535ce05e59..335bd2b4bce 100644 --- a/deps/v8/src/d8-posix.cc +++ b/deps/v8/src/d8-posix.cc @@ -663,10 +663,28 @@ Handle Shell::SetEnvironment(const Arguments& args) { } +Handle Shell::UnsetEnvironment(const Arguments& args) { + if (args.Length() != 1) { + const char* message = "unsetenv() takes one argument"; + return ThrowException(String::New(message)); + } + String::Utf8Value var(args[0]); + if (*var == NULL) { + const char* message = + "os.setenv(): String conversion of variable name failed."; + return ThrowException(String::New(message)); + } + unsetenv(*var); + return v8::Undefined(); +} + + void Shell::AddOSMethods(Handle os_templ) { os_templ->Set(String::New("system"), FunctionTemplate::New(System)); os_templ->Set(String::New("chdir"), FunctionTemplate::New(ChangeDirectory)); os_templ->Set(String::New("setenv"), FunctionTemplate::New(SetEnvironment)); + os_templ->Set(String::New("unsetenv"), + FunctionTemplate::New(UnsetEnvironment)); os_templ->Set(String::New("umask"), FunctionTemplate::New(SetUMask)); os_templ->Set(String::New("mkdirp"), FunctionTemplate::New(MakeDirectory)); os_templ->Set(String::New("rmdir"), FunctionTemplate::New(RemoveDirectory)); diff --git a/deps/v8/src/d8.h b/deps/v8/src/d8.h index c93ea461d81..9df291b32a0 100644 --- a/deps/v8/src/d8.h +++ b/deps/v8/src/d8.h @@ -175,6 +175,7 @@ class Shell: public i::AllStatic { static Handle System(const Arguments& args); static Handle ChangeDirectory(const Arguments& args); static Handle SetEnvironment(const Arguments& args); + static Handle UnsetEnvironment(const Arguments& args); static Handle SetUMask(const Arguments& args); static Handle MakeDirectory(const Arguments& args); static Handle RemoveDirectory(const Arguments& args); diff --git a/deps/v8/src/d8.js b/deps/v8/src/d8.js index be4a0519c5f..369ab65cf58 100644 --- a/deps/v8/src/d8.js +++ b/deps/v8/src/d8.js @@ -164,7 +164,7 @@ function DebugEventDetails(response) { Debug.State.currentFrame = 0; details.text = result; break; - + case 'exception': if (body.uncaught) { result += 'Uncaught: '; @@ -212,7 +212,7 @@ function DebugEventDetails(response) { function SourceInfo(body) { var result = ''; - + if (body.script) { if (body.script.name) { result += body.script.name; @@ -224,7 +224,7 @@ function SourceInfo(body) { result += body.sourceLine + 1; result += ' column '; result += body.sourceColumn + 1; - + return result; } @@ -297,20 +297,20 @@ function DebugRequest(cmd_line) { case 'bt': this.request_ = this.backtraceCommandToJSONRequest_(args); break; - + case 'frame': case 'f': this.request_ = this.frameCommandToJSONRequest_(args); break; - + case 'scopes': this.request_ = this.scopesCommandToJSONRequest_(args); break; - + case 'scope': this.request_ = this.scopeCommandToJSONRequest_(args); break; - + case 'print': case 'p': this.request_ = this.printCommandToJSONRequest_(args); @@ -331,16 +331,16 @@ function DebugRequest(cmd_line) { case 'source': this.request_ = this.sourceCommandToJSONRequest_(args); break; - + case 'scripts': this.request_ = this.scriptsCommandToJSONRequest_(args); break; - + case 'break': case 'b': this.request_ = this.breakCommandToJSONRequest_(args); break; - + case 'clear': this.request_ = this.clearCommandToJSONRequest_(args); break; @@ -365,7 +365,7 @@ function DebugRequest(cmd_line) { default: throw new Error('Unknown command "' + cmd + '"'); } - + last_cmd = cmd; } @@ -490,22 +490,22 @@ DebugRequest.prototype.stepCommandToJSONRequest_ = function(args) { case 'i': request.arguments.stepaction = 'in'; break; - + case 'min': case 'm': request.arguments.stepaction = 'min'; break; - + case 'next': case 'n': request.arguments.stepaction = 'next'; break; - + case 'out': case 'o': request.arguments.stepaction = 'out'; break; - + default: throw new Error('Invalid step argument "' + args[0] + '".'); } @@ -523,7 +523,7 @@ DebugRequest.prototype.stepCommandToJSONRequest_ = function(args) { DebugRequest.prototype.backtraceCommandToJSONRequest_ = function(args) { // Build a backtrace request from the text command. var request = this.createRequest('backtrace'); - + // Default is to show top 10 frames. request.arguments = {}; request.arguments.fromFrame = 0; @@ -626,7 +626,7 @@ DebugRequest.prototype.referencesCommandToJSONRequest_ = function(args) { if (args.length == 0) { throw new Error('Missing object id.'); } - + return this.makeReferencesJSONRequest_(args, 'referencedBy'); }; @@ -637,7 +637,7 @@ DebugRequest.prototype.instancesCommandToJSONRequest_ = function(args) { if (args.length == 0) { throw new Error('Missing object id.'); } - + // Build a references request. return this.makeReferencesJSONRequest_(args, 'constructedBy'); }; @@ -691,18 +691,18 @@ DebugRequest.prototype.scriptsCommandToJSONRequest_ = function(args) { case 'natives': request.arguments.types = ScriptTypeFlag(Debug.ScriptType.Native); break; - + case 'extensions': request.arguments.types = ScriptTypeFlag(Debug.ScriptType.Extension); break; - + case 'all': request.arguments.types = ScriptTypeFlag(Debug.ScriptType.Normal) | ScriptTypeFlag(Debug.ScriptType.Native) | ScriptTypeFlag(Debug.ScriptType.Extension); break; - + default: throw new Error('Invalid argument "' + args[0] + '".'); } @@ -740,7 +740,7 @@ DebugRequest.prototype.breakCommandToJSONRequest_ = function(args) { type = 'script'; var tmp = target.substring(pos + 1, target.length); target = target.substring(0, pos); - + // Check for both line and column. pos = tmp.indexOf(':'); if (pos > 0) { @@ -755,7 +755,7 @@ DebugRequest.prototype.breakCommandToJSONRequest_ = function(args) { } else { type = 'function'; } - + request.arguments = {}; request.arguments.type = type; request.arguments.target = target; @@ -936,13 +936,13 @@ function DebugResponseDetails(response) { result += body.breakpoint; details.text = result; break; - + case 'clearbreakpoint': result = 'cleared breakpoint #'; result += body.breakpoint; details.text = result; break; - + case 'backtrace': if (body.totalFrames == 0) { result = '(empty stack)'; @@ -956,14 +956,14 @@ function DebugResponseDetails(response) { } details.text = result; break; - + case 'frame': details.text = SourceUnderline(body.sourceLineText, body.column); Debug.State.currentSourceLine = body.line; Debug.State.currentFrame = body.index; break; - + case 'scopes': if (body.totalScopes == 0) { result = '(no scopes)'; @@ -987,7 +987,7 @@ function DebugResponseDetails(response) { result += formatObject_(scope_object_value, true); details.text = result; break; - + case 'evaluate': case 'lookup': if (last_cmd == 'p' || last_cmd == 'print') { @@ -1031,7 +1031,7 @@ function DebugResponseDetails(response) { } details.text = result; break; - + case 'source': // Get the source from the response. var source = body.source; @@ -1066,7 +1066,7 @@ function DebugResponseDetails(response) { } details.text = result; break; - + case 'scripts': var result = ''; for (i = 0; i < body.length; i++) { @@ -1128,7 +1128,7 @@ function DebugResponseDetails(response) { case 'continue': details.text = "(running)"; break; - + default: details.text = 'Response for unknown command \'' + response.command + '\'' + @@ -1137,7 +1137,7 @@ function DebugResponseDetails(response) { } catch (e) { details.text = 'Error: "' + e + '" formatting response'; } - + return details; }; @@ -1254,7 +1254,7 @@ ProtocolValue.prototype.type = function() { /** - * Get a metadata field from a protocol value. + * Get a metadata field from a protocol value. * @return {Object} the metadata field value */ ProtocolValue.prototype.field = function(name) { @@ -1435,12 +1435,12 @@ function ArrayToJSONArray_(content) { function BooleanToJSON_(value) { - return String(value); + return String(value); } function NumberToJSON_(value) { - return String(value); + return String(value); } diff --git a/deps/v8/src/data-flow.cc b/deps/v8/src/data-flow.cc index 1bc77c03382..4e7620acf70 100644 --- a/deps/v8/src/data-flow.cc +++ b/deps/v8/src/data-flow.cc @@ -28,7 +28,6 @@ #include "v8.h" #include "data-flow.h" -#include "flow-graph.h" #include "scopes.h" namespace v8 { @@ -621,21 +620,34 @@ void AssignedVariablesAnalyzer::VisitCatchExtensionObject( void AssignedVariablesAnalyzer::VisitAssignment(Assignment* expr) { ASSERT(av_.IsEmpty()); - if (expr->target()->AsProperty() != NULL) { - // Visit receiver and key of property store and rhs. - Visit(expr->target()->AsProperty()->obj()); - ProcessExpression(expr->target()->AsProperty()->key()); - ProcessExpression(expr->value()); + // There are three kinds of assignments: variable assignments, property + // assignments, and reference errors (invalid left-hand sides). + Variable* var = expr->target()->AsVariableProxy()->AsVariable(); + Property* prop = expr->target()->AsProperty(); + ASSERT(var == NULL || prop == NULL); + + if (var != NULL) { + MarkIfTrivial(expr->value()); + Visit(expr->value()); + if (expr->is_compound()) { + // Left-hand side occurs also as an rvalue. + MarkIfTrivial(expr->target()); + ProcessExpression(expr->target()); + } + RecordAssignedVar(var); + + } else if (prop != NULL) { + MarkIfTrivial(expr->value()); + Visit(expr->value()); + if (!prop->key()->IsPropertyName()) { + MarkIfTrivial(prop->key()); + ProcessExpression(prop->key()); + } + MarkIfTrivial(prop->obj()); + ProcessExpression(prop->obj()); - // If we have a variable as a receiver in a property store, check if - // we can mark it as trivial. - MarkIfTrivial(expr->target()->AsProperty()->obj()); } else { Visit(expr->target()); - ProcessExpression(expr->value()); - - Variable* var = expr->target()->AsVariableProxy()->AsVariable(); - if (var != NULL) RecordAssignedVar(var); } } @@ -648,12 +660,12 @@ void AssignedVariablesAnalyzer::VisitThrow(Throw* expr) { void AssignedVariablesAnalyzer::VisitProperty(Property* expr) { ASSERT(av_.IsEmpty()); - Visit(expr->obj()); - ProcessExpression(expr->key()); - - // In case we have a variable as a receiver, check if we can mark - // it as trivial. + if (!expr->key()->IsPropertyName()) { + MarkIfTrivial(expr->key()); + Visit(expr->key()); + } MarkIfTrivial(expr->obj()); + ProcessExpression(expr->obj()); } @@ -713,25 +725,19 @@ void AssignedVariablesAnalyzer::VisitCountOperation(CountOperation* expr) { void AssignedVariablesAnalyzer::VisitBinaryOperation(BinaryOperation* expr) { ASSERT(av_.IsEmpty()); - Visit(expr->left()); - - ProcessExpression(expr->right()); - - // In case we have a variable on the left side, check if we can mark - // it as trivial. + MarkIfTrivial(expr->right()); + Visit(expr->right()); MarkIfTrivial(expr->left()); + ProcessExpression(expr->left()); } void AssignedVariablesAnalyzer::VisitCompareOperation(CompareOperation* expr) { ASSERT(av_.IsEmpty()); - Visit(expr->left()); - - ProcessExpression(expr->right()); - - // In case we have a variable on the left side, check if we can mark - // it as trivial. + MarkIfTrivial(expr->right()); + Visit(expr->right()); MarkIfTrivial(expr->left()); + ProcessExpression(expr->left()); } @@ -746,802 +752,4 @@ void AssignedVariablesAnalyzer::VisitDeclaration(Declaration* decl) { } -int ReachingDefinitions::IndexFor(Variable* var, int variable_count) { - // Parameters are numbered left-to-right from the beginning of the bit - // set. Stack-allocated locals are allocated right-to-left from the end. - ASSERT(var != NULL && var->IsStackAllocated()); - Slot* slot = var->slot(); - if (slot->type() == Slot::PARAMETER) { - return slot->index(); - } else { - return (variable_count - 1) - slot->index(); - } -} - - -void Node::InitializeReachingDefinitions(int definition_count, - List* variables, - WorkList* worklist, - bool mark) { - ASSERT(!IsMarkedWith(mark)); - rd_.Initialize(definition_count); - MarkWith(mark); - worklist->Insert(this); -} - - -void BlockNode::InitializeReachingDefinitions(int definition_count, - List* variables, - WorkList* worklist, - bool mark) { - ASSERT(!IsMarkedWith(mark)); - int instruction_count = instructions_.length(); - int variable_count = variables->length(); - - rd_.Initialize(definition_count); - // The RD_in set for the entry node has a definition for each parameter - // and local. - if (predecessor_ == NULL) { - for (int i = 0; i < variable_count; i++) rd_.rd_in()->Add(i); - } - - for (int i = 0; i < instruction_count; i++) { - Expression* expr = instructions_[i]->AsExpression(); - if (expr == NULL) continue; - Variable* var = expr->AssignedVariable(); - if (var == NULL || !var->IsStackAllocated()) continue; - - // All definitions of this variable are killed. - BitVector* def_set = - variables->at(ReachingDefinitions::IndexFor(var, variable_count)); - rd_.kill()->Union(*def_set); - - // All previously generated definitions are not generated. - rd_.gen()->Subtract(*def_set); - - // This one is generated. - rd_.gen()->Add(expr->num()); - } - - // Add all blocks except the entry node to the worklist. - if (predecessor_ != NULL) { - MarkWith(mark); - worklist->Insert(this); - } -} - - -void ExitNode::ComputeRDOut(BitVector* result) { - // Should not be the predecessor of any node. - UNREACHABLE(); -} - - -void BlockNode::ComputeRDOut(BitVector* result) { - // All definitions reaching this block ... - *result = *rd_.rd_in(); - // ... except those killed by the block ... - result->Subtract(*rd_.kill()); - // ... but including those generated by the block. - result->Union(*rd_.gen()); -} - - -void BranchNode::ComputeRDOut(BitVector* result) { - // Branch nodes don't kill or generate definitions. - *result = *rd_.rd_in(); -} - - -void JoinNode::ComputeRDOut(BitVector* result) { - // Join nodes don't kill or generate definitions. - *result = *rd_.rd_in(); -} - - -void ExitNode::UpdateRDIn(WorkList* worklist, bool mark) { - // The exit node has no successors so we can just update in place. New - // RD_in is the union over all predecessors. - int definition_count = rd_.rd_in()->length(); - rd_.rd_in()->Clear(); - - BitVector temp(definition_count); - for (int i = 0, len = predecessors_.length(); i < len; i++) { - // Because ComputeRDOut always overwrites temp and its value is - // always read out before calling ComputeRDOut again, we do not - // have to clear it on each iteration of the loop. - predecessors_[i]->ComputeRDOut(&temp); - rd_.rd_in()->Union(temp); - } -} - - -void BlockNode::UpdateRDIn(WorkList* worklist, bool mark) { - // The entry block has no predecessor. Its RD_in does not change. - if (predecessor_ == NULL) return; - - BitVector new_rd_in(rd_.rd_in()->length()); - predecessor_->ComputeRDOut(&new_rd_in); - - if (rd_.rd_in()->Equals(new_rd_in)) return; - - // Update RD_in. - *rd_.rd_in() = new_rd_in; - // Add the successor to the worklist if not already present. - if (!successor_->IsMarkedWith(mark)) { - successor_->MarkWith(mark); - worklist->Insert(successor_); - } -} - - -void BranchNode::UpdateRDIn(WorkList* worklist, bool mark) { - BitVector new_rd_in(rd_.rd_in()->length()); - predecessor_->ComputeRDOut(&new_rd_in); - - if (rd_.rd_in()->Equals(new_rd_in)) return; - - // Update RD_in. - *rd_.rd_in() = new_rd_in; - // Add the successors to the worklist if not already present. - if (!successor0_->IsMarkedWith(mark)) { - successor0_->MarkWith(mark); - worklist->Insert(successor0_); - } - if (!successor1_->IsMarkedWith(mark)) { - successor1_->MarkWith(mark); - worklist->Insert(successor1_); - } -} - - -void JoinNode::UpdateRDIn(WorkList* worklist, bool mark) { - int definition_count = rd_.rd_in()->length(); - BitVector new_rd_in(definition_count); - - // New RD_in is the union over all predecessors. - BitVector temp(definition_count); - for (int i = 0, len = predecessors_.length(); i < len; i++) { - predecessors_[i]->ComputeRDOut(&temp); - new_rd_in.Union(temp); - } - - if (rd_.rd_in()->Equals(new_rd_in)) return; - - // Update RD_in. - *rd_.rd_in() = new_rd_in; - // Add the successor to the worklist if not already present. - if (!successor_->IsMarkedWith(mark)) { - successor_->MarkWith(mark); - worklist->Insert(successor_); - } -} - - -void Node::PropagateReachingDefinitions(List* variables) { - // Nothing to do. -} - - -void BlockNode::PropagateReachingDefinitions(List* variables) { - // Propagate RD_in from the start of the block to all the variable - // references. - int variable_count = variables->length(); - BitVector rd = *rd_.rd_in(); - for (int i = 0, len = instructions_.length(); i < len; i++) { - Expression* expr = instructions_[i]->AsExpression(); - if (expr == NULL) continue; - - // Look for a variable reference to record its reaching definitions. - VariableProxy* proxy = expr->AsVariableProxy(); - if (proxy == NULL) { - // Not a VariableProxy? Maybe it's a count operation. - CountOperation* count_operation = expr->AsCountOperation(); - if (count_operation != NULL) { - proxy = count_operation->expression()->AsVariableProxy(); - } - } - if (proxy == NULL) { - // OK, Maybe it's a compound assignment. - Assignment* assignment = expr->AsAssignment(); - if (assignment != NULL && assignment->is_compound()) { - proxy = assignment->target()->AsVariableProxy(); - } - } - - if (proxy != NULL && - proxy->var()->IsStackAllocated() && - !proxy->var()->is_this()) { - // All definitions for this variable. - BitVector* definitions = - variables->at(ReachingDefinitions::IndexFor(proxy->var(), - variable_count)); - BitVector* reaching_definitions = new BitVector(*definitions); - // Intersected with all definitions (of any variable) reaching this - // instruction. - reaching_definitions->Intersect(rd); - proxy->set_reaching_definitions(reaching_definitions); - } - - // It may instead (or also) be a definition. If so update the running - // value of reaching definitions for the block. - Variable* var = expr->AssignedVariable(); - if (var == NULL || !var->IsStackAllocated()) continue; - - // All definitions of this variable are killed. - BitVector* def_set = - variables->at(ReachingDefinitions::IndexFor(var, variable_count)); - rd.Subtract(*def_set); - // This definition is generated. - rd.Add(expr->num()); - } -} - - -void ReachingDefinitions::Compute() { - // The definitions in the body plus an implicit definition for each - // variable at function entry. - int definition_count = body_definitions_->length() + variable_count_; - int node_count = postorder_->length(); - - // Step 1: For each stack-allocated variable, identify the set of all its - // definitions. - List variables; - for (int i = 0; i < variable_count_; i++) { - // Add the initial definition for each variable. - BitVector* initial = new BitVector(definition_count); - initial->Add(i); - variables.Add(initial); - } - for (int i = 0, len = body_definitions_->length(); i < len; i++) { - // Account for each definition in the body as a definition of the - // defined variable. - Variable* var = body_definitions_->at(i)->AssignedVariable(); - variables[IndexFor(var, variable_count_)]->Add(i + variable_count_); - } - - // Step 2: Compute KILL and GEN for each block node, initialize RD_in for - // all nodes, and mark and add all nodes to the worklist in reverse - // postorder. All nodes should currently have the same mark. - bool mark = postorder_->at(0)->IsMarkedWith(false); // Negation of current. - WorkList worklist(node_count); - for (int i = node_count - 1; i >= 0; i--) { - postorder_->at(i)->InitializeReachingDefinitions(definition_count, - &variables, - &worklist, - mark); - } - - // Step 3: Until the worklist is empty, remove an item compute and update - // its rd_in based on its predecessor's rd_out. If rd_in has changed, add - // all necessary successors to the worklist. - while (!worklist.is_empty()) { - Node* node = worklist.Remove(); - node->MarkWith(!mark); - node->UpdateRDIn(&worklist, mark); - } - - // Step 4: Based on RD_in for block nodes, propagate reaching definitions - // to all variable uses in the block. - for (int i = 0; i < node_count; i++) { - postorder_->at(i)->PropagateReachingDefinitions(&variables); - } -} - - -bool TypeAnalyzer::IsPrimitiveDef(int def_num) { - if (def_num < param_count_) return false; - if (def_num < variable_count_) return true; - return body_definitions_->at(def_num - variable_count_)->IsPrimitive(); -} - - -void TypeAnalyzer::Compute() { - bool changed; - int count = 0; - - do { - changed = false; - - if (FLAG_print_graph_text) { - PrintF("TypeAnalyzer::Compute - iteration %d\n", count++); - } - - for (int i = postorder_->length() - 1; i >= 0; --i) { - Node* node = postorder_->at(i); - if (node->IsBlockNode()) { - BlockNode* block = BlockNode::cast(node); - for (int j = 0; j < block->instructions()->length(); j++) { - Expression* expr = block->instructions()->at(j)->AsExpression(); - if (expr != NULL) { - // For variable uses: Compute new type from reaching definitions. - VariableProxy* proxy = expr->AsVariableProxy(); - if (proxy != NULL && proxy->reaching_definitions() != NULL) { - BitVector* rd = proxy->reaching_definitions(); - bool prim_type = true; - // TODO(fsc): A sparse set representation of reaching - // definitions would speed up iterating here. - for (int k = 0; k < rd->length(); k++) { - if (rd->Contains(k) && !IsPrimitiveDef(k)) { - prim_type = false; - break; - } - } - // Reset changed flag if new type information was computed. - if (prim_type != proxy->IsPrimitive()) { - changed = true; - proxy->SetIsPrimitive(prim_type); - } - } - } - } - } - } - } while (changed); -} - - -void Node::MarkCriticalInstructions( - List* stack, - ZoneList* body_definitions, - int variable_count) { -} - - -void BlockNode::MarkCriticalInstructions( - List* stack, - ZoneList* body_definitions, - int variable_count) { - for (int i = instructions_.length() - 1; i >= 0; i--) { - // Only expressions can appear in the flow graph for now. - Expression* expr = instructions_[i]->AsExpression(); - if (expr != NULL && !expr->is_live() && - (expr->is_loop_condition() || expr->IsCritical())) { - expr->mark_as_live(); - expr->ProcessNonLiveChildren(stack, body_definitions, variable_count); - } - } -} - - -void MarkLiveCode(ZoneList* nodes, - ZoneList* body_definitions, - int variable_count) { - List stack(20); - - // Mark the critical AST nodes as live; mark their dependencies and - // add them to the marking stack. - for (int i = nodes->length() - 1; i >= 0; i--) { - nodes->at(i)->MarkCriticalInstructions(&stack, body_definitions, - variable_count); - } - - // Continue marking dependencies until no more. - while (!stack.is_empty()) { - // Only expressions can appear in the flow graph for now. - Expression* expr = stack.RemoveLast()->AsExpression(); - if (expr != NULL) { - expr->ProcessNonLiveChildren(&stack, body_definitions, variable_count); - } - } -} - - -#ifdef DEBUG - -// Print a textual representation of an instruction in a flow graph. Using -// the AstVisitor is overkill because there is no recursion here. It is -// only used for printing in debug mode. -class TextInstructionPrinter: public AstVisitor { - public: - TextInstructionPrinter() : number_(0) {} - - int NextNumber() { return number_; } - void AssignNumber(AstNode* node) { node->set_num(number_++); } - - private: - // AST node visit functions. -#define DECLARE_VISIT(type) virtual void Visit##type(type* node); - AST_NODE_LIST(DECLARE_VISIT) -#undef DECLARE_VISIT - - int number_; - - DISALLOW_COPY_AND_ASSIGN(TextInstructionPrinter); -}; - - -void TextInstructionPrinter::VisitDeclaration(Declaration* decl) { - UNREACHABLE(); -} - - -void TextInstructionPrinter::VisitBlock(Block* stmt) { - PrintF("Block"); -} - - -void TextInstructionPrinter::VisitExpressionStatement( - ExpressionStatement* stmt) { - PrintF("ExpressionStatement"); -} - - -void TextInstructionPrinter::VisitEmptyStatement(EmptyStatement* stmt) { - PrintF("EmptyStatement"); -} - - -void TextInstructionPrinter::VisitIfStatement(IfStatement* stmt) { - PrintF("IfStatement"); -} - - -void TextInstructionPrinter::VisitContinueStatement(ContinueStatement* stmt) { - UNREACHABLE(); -} - - -void TextInstructionPrinter::VisitBreakStatement(BreakStatement* stmt) { - UNREACHABLE(); -} - - -void TextInstructionPrinter::VisitReturnStatement(ReturnStatement* stmt) { - PrintF("return @%d", stmt->expression()->num()); -} - - -void TextInstructionPrinter::VisitWithEnterStatement(WithEnterStatement* stmt) { - PrintF("WithEnterStatement"); -} - - -void TextInstructionPrinter::VisitWithExitStatement(WithExitStatement* stmt) { - PrintF("WithExitStatement"); -} - - -void TextInstructionPrinter::VisitSwitchStatement(SwitchStatement* stmt) { - UNREACHABLE(); -} - - -void TextInstructionPrinter::VisitDoWhileStatement(DoWhileStatement* stmt) { - PrintF("DoWhileStatement"); -} - - -void TextInstructionPrinter::VisitWhileStatement(WhileStatement* stmt) { - PrintF("WhileStatement"); -} - - -void TextInstructionPrinter::VisitForStatement(ForStatement* stmt) { - PrintF("ForStatement"); -} - - -void TextInstructionPrinter::VisitForInStatement(ForInStatement* stmt) { - PrintF("ForInStatement"); -} - - -void TextInstructionPrinter::VisitTryCatchStatement(TryCatchStatement* stmt) { - UNREACHABLE(); -} - - -void TextInstructionPrinter::VisitTryFinallyStatement( - TryFinallyStatement* stmt) { - UNREACHABLE(); -} - - -void TextInstructionPrinter::VisitDebuggerStatement(DebuggerStatement* stmt) { - PrintF("DebuggerStatement"); -} - - -void TextInstructionPrinter::VisitFunctionLiteral(FunctionLiteral* expr) { - PrintF("FunctionLiteral"); -} - - -void TextInstructionPrinter::VisitSharedFunctionInfoLiteral( - SharedFunctionInfoLiteral* expr) { - PrintF("SharedFunctionInfoLiteral"); -} - - -void TextInstructionPrinter::VisitConditional(Conditional* expr) { - PrintF("Conditional"); -} - - -void TextInstructionPrinter::VisitSlot(Slot* expr) { - UNREACHABLE(); -} - - -void TextInstructionPrinter::VisitVariableProxy(VariableProxy* expr) { - Variable* var = expr->AsVariable(); - if (var != NULL) { - PrintF("%s", *var->name()->ToCString()); - if (var->IsStackAllocated() && expr->reaching_definitions() != NULL) { - expr->reaching_definitions()->Print(); - } - } else { - ASSERT(expr->AsProperty() != NULL); - VisitProperty(expr->AsProperty()); - } -} - - -void TextInstructionPrinter::VisitLiteral(Literal* expr) { - expr->handle()->ShortPrint(); -} - - -void TextInstructionPrinter::VisitRegExpLiteral(RegExpLiteral* expr) { - PrintF("RegExpLiteral"); -} - - -void TextInstructionPrinter::VisitObjectLiteral(ObjectLiteral* expr) { - PrintF("ObjectLiteral"); -} - - -void TextInstructionPrinter::VisitArrayLiteral(ArrayLiteral* expr) { - PrintF("ArrayLiteral"); -} - - -void TextInstructionPrinter::VisitCatchExtensionObject( - CatchExtensionObject* expr) { - PrintF("CatchExtensionObject"); -} - - -void TextInstructionPrinter::VisitAssignment(Assignment* expr) { - Variable* var = expr->target()->AsVariableProxy()->AsVariable(); - Property* prop = expr->target()->AsProperty(); - - if (var == NULL && prop == NULL) { - // Throw reference error. - Visit(expr->target()); - return; - } - - // Print the left-hand side. - if (var != NULL) { - PrintF("%s", *var->name()->ToCString()); - } else if (prop != NULL) { - PrintF("@%d", prop->obj()->num()); - if (prop->key()->IsPropertyName()) { - PrintF("."); - ASSERT(prop->key()->AsLiteral() != NULL); - prop->key()->AsLiteral()->handle()->Print(); - } else { - PrintF("[@%d]", prop->key()->num()); - } - } - - // Print the operation. - if (expr->is_compound()) { - PrintF(" = "); - // Print the left-hand side again when compound. - if (var != NULL) { - PrintF("@%d", expr->target()->num()); - } else { - PrintF("@%d", prop->obj()->num()); - if (prop->key()->IsPropertyName()) { - PrintF("."); - ASSERT(prop->key()->AsLiteral() != NULL); - prop->key()->AsLiteral()->handle()->Print(); - } else { - PrintF("[@%d]", prop->key()->num()); - } - } - // Print the corresponding binary operator. - PrintF(" %s ", Token::String(expr->binary_op())); - } else { - PrintF(" %s ", Token::String(expr->op())); - } - - // Print the right-hand side. - PrintF("@%d", expr->value()->num()); - - if (expr->num() != AstNode::kNoNumber) { - PrintF(" ;; D%d", expr->num()); - } -} - - -void TextInstructionPrinter::VisitThrow(Throw* expr) { - PrintF("throw @%d", expr->exception()->num()); -} - - -void TextInstructionPrinter::VisitProperty(Property* expr) { - if (expr->key()->IsPropertyName()) { - PrintF("@%d.", expr->obj()->num()); - ASSERT(expr->key()->AsLiteral() != NULL); - expr->key()->AsLiteral()->handle()->Print(); - } else { - PrintF("@%d[@%d]", expr->obj()->num(), expr->key()->num()); - } -} - - -void TextInstructionPrinter::VisitCall(Call* expr) { - PrintF("@%d(", expr->expression()->num()); - ZoneList* arguments = expr->arguments(); - for (int i = 0, len = arguments->length(); i < len; i++) { - if (i != 0) PrintF(", "); - PrintF("@%d", arguments->at(i)->num()); - } - PrintF(")"); -} - - -void TextInstructionPrinter::VisitCallNew(CallNew* expr) { - PrintF("new @%d(", expr->expression()->num()); - ZoneList* arguments = expr->arguments(); - for (int i = 0, len = arguments->length(); i < len; i++) { - if (i != 0) PrintF(", "); - PrintF("@%d", arguments->at(i)->num()); - } - PrintF(")"); -} - - -void TextInstructionPrinter::VisitCallRuntime(CallRuntime* expr) { - PrintF("%s(", *expr->name()->ToCString()); - ZoneList* arguments = expr->arguments(); - for (int i = 0, len = arguments->length(); i < len; i++) { - if (i != 0) PrintF(", "); - PrintF("@%d", arguments->at(i)->num()); - } - PrintF(")"); -} - - -void TextInstructionPrinter::VisitUnaryOperation(UnaryOperation* expr) { - PrintF("%s(@%d)", Token::String(expr->op()), expr->expression()->num()); -} - - -void TextInstructionPrinter::VisitCountOperation(CountOperation* expr) { - if (expr->is_prefix()) { - PrintF("%s@%d", Token::String(expr->op()), expr->expression()->num()); - } else { - PrintF("@%d%s", expr->expression()->num(), Token::String(expr->op())); - } - - if (expr->num() != AstNode::kNoNumber) { - PrintF(" ;; D%d", expr->num()); - } -} - - -void TextInstructionPrinter::VisitBinaryOperation(BinaryOperation* expr) { - ASSERT(expr->op() != Token::COMMA); - ASSERT(expr->op() != Token::OR); - ASSERT(expr->op() != Token::AND); - PrintF("@%d %s @%d", - expr->left()->num(), - Token::String(expr->op()), - expr->right()->num()); -} - - -void TextInstructionPrinter::VisitCompareOperation(CompareOperation* expr) { - PrintF("@%d %s @%d", - expr->left()->num(), - Token::String(expr->op()), - expr->right()->num()); -} - - -void TextInstructionPrinter::VisitThisFunction(ThisFunction* expr) { - PrintF("ThisFunction"); -} - - -static int node_count = 0; -static int instruction_count = 0; - - -void Node::AssignNodeNumber() { - set_number(node_count++); -} - - -void Node::PrintReachingDefinitions() { - if (rd_.rd_in() != NULL) { - ASSERT(rd_.kill() != NULL && rd_.gen() != NULL); - - PrintF("RD_in = "); - rd_.rd_in()->Print(); - PrintF("\n"); - - PrintF("RD_kill = "); - rd_.kill()->Print(); - PrintF("\n"); - - PrintF("RD_gen = "); - rd_.gen()->Print(); - PrintF("\n"); - } -} - - -void ExitNode::PrintText() { - PrintReachingDefinitions(); - PrintF("L%d: Exit\n\n", number()); -} - - -void BlockNode::PrintText() { - PrintReachingDefinitions(); - // Print the instructions in the block. - PrintF("L%d: Block\n", number()); - TextInstructionPrinter printer; - for (int i = 0, len = instructions_.length(); i < len; i++) { - AstNode* instr = instructions_[i]; - // Print a star next to dead instructions. - if (instr->AsExpression() != NULL && instr->AsExpression()->is_live()) { - PrintF(" "); - } else { - PrintF("* "); - } - PrintF("%d ", printer.NextNumber()); - printer.Visit(instr); - printer.AssignNumber(instr); - PrintF("\n"); - } - PrintF("goto L%d\n\n", successor_->number()); -} - - -void BranchNode::PrintText() { - PrintReachingDefinitions(); - PrintF("L%d: Branch\n", number()); - PrintF("goto (L%d, L%d)\n\n", successor0_->number(), successor1_->number()); -} - - -void JoinNode::PrintText() { - PrintReachingDefinitions(); - PrintF("L%d: Join(", number()); - for (int i = 0, len = predecessors_.length(); i < len; i++) { - if (i != 0) PrintF(", "); - PrintF("L%d", predecessors_[i]->number()); - } - PrintF(")\ngoto L%d\n\n", successor_->number()); -} - - -void FlowGraph::PrintText(FunctionLiteral* fun, ZoneList* postorder) { - PrintF("\n========\n"); - PrintF("name = %s\n", *fun->name()->ToCString()); - - // Number nodes and instructions in reverse postorder. - node_count = 0; - instruction_count = 0; - for (int i = postorder->length() - 1; i >= 0; i--) { - postorder->at(i)->AssignNodeNumber(); - } - - // Print basic blocks in reverse postorder. - for (int i = postorder->length() - 1; i >= 0; i--) { - postorder->at(i)->PrintText(); - } -} - -#endif // DEBUG - - } } // namespace v8::internal diff --git a/deps/v8/src/data-flow.h b/deps/v8/src/data-flow.h index 66df63514ec..079da65b4d9 100644 --- a/deps/v8/src/data-flow.h +++ b/deps/v8/src/data-flow.h @@ -272,65 +272,6 @@ class AssignedVariablesAnalyzer : public AstVisitor { }; -class ReachingDefinitions BASE_EMBEDDED { - public: - ReachingDefinitions(ZoneList* postorder, - ZoneList* body_definitions, - int variable_count) - : postorder_(postorder), - body_definitions_(body_definitions), - variable_count_(variable_count) { - } - - static int IndexFor(Variable* var, int variable_count); - - void Compute(); - - private: - // A (postorder) list of flow-graph nodes in the body. - ZoneList* postorder_; - - // A list of all the definitions in the body. - ZoneList* body_definitions_; - - int variable_count_; - - DISALLOW_COPY_AND_ASSIGN(ReachingDefinitions); -}; - - -class TypeAnalyzer BASE_EMBEDDED { - public: - TypeAnalyzer(ZoneList* postorder, - ZoneList* body_definitions, - int variable_count, - int param_count) - : postorder_(postorder), - body_definitions_(body_definitions), - variable_count_(variable_count), - param_count_(param_count) {} - - void Compute(); - - private: - // Get the primitity of definition number i. Definitions are numbered - // by the flow graph builder. - bool IsPrimitiveDef(int def_num); - - ZoneList* postorder_; - ZoneList* body_definitions_; - int variable_count_; - int param_count_; - - DISALLOW_COPY_AND_ASSIGN(TypeAnalyzer); -}; - - -void MarkLiveCode(ZoneList* nodes, - ZoneList* body_definitions, - int variable_count); - - } } // namespace v8::internal diff --git a/deps/v8/src/date.js b/deps/v8/src/date.js index 46769c6bdfa..216d5df9968 100644 --- a/deps/v8/src/date.js +++ b/deps/v8/src/date.js @@ -588,6 +588,20 @@ function TimeString(time) { function LocalTimezoneString(time) { + var old_timezone = timezone_cache_timezone; + var timezone = LocalTimezone(time); + if (old_timezone && timezone != old_timezone) { + // If the timezone string has changed from the one that we cached, + // the local time offset may now be wrong. So we need to update it + // and try again. + local_time_offset = %DateLocalTimeOffset(); + // We also need to invalidate the DST cache as the new timezone may have + // different DST times. + var dst_cache = DST_offset_cache; + dst_cache.start = 0; + dst_cache.end = -1; + } + var timezoneOffset = (DaylightSavingsOffset(time) + local_time_offset) / msPerMinute; var sign = (timezoneOffset >= 0) ? 1 : -1; @@ -595,7 +609,7 @@ function LocalTimezoneString(time) { var min = FLOOR((sign * timezoneOffset)%60); var gmt = ' GMT' + ((sign == 1) ? '+' : '-') + TwoDigitString(hours) + TwoDigitString(min); - return gmt + ' (' + LocalTimezone(time) + ')'; + return gmt + ' (' + timezone + ')'; } @@ -654,7 +668,8 @@ function DateNow() { function DateToString() { var t = DATE_VALUE(this); if (NUMBER_IS_NAN(t)) return kInvalidDate; - return DatePrintString(LocalTimeNoCheck(t)) + LocalTimezoneString(t); + var time_zone_string = LocalTimezoneString(t); // May update local offset. + return DatePrintString(LocalTimeNoCheck(t)) + time_zone_string; } @@ -670,8 +685,8 @@ function DateToDateString() { function DateToTimeString() { var t = DATE_VALUE(this); if (NUMBER_IS_NAN(t)) return kInvalidDate; - var lt = LocalTimeNoCheck(t); - return TimeString(lt) + LocalTimezoneString(lt); + var time_zone_string = LocalTimezoneString(t); // May update local offset. + return TimeString(LocalTimeNoCheck(t)) + time_zone_string; } diff --git a/deps/v8/src/debug-debugger.js b/deps/v8/src/debug-debugger.js index a81530e8438..ccec6af2fce 100644 --- a/deps/v8/src/debug-debugger.js +++ b/deps/v8/src/debug-debugger.js @@ -124,6 +124,12 @@ BreakPoint.prototype.source_position = function() { }; +BreakPoint.prototype.updateSourcePosition = function(new_position, script) { + this.source_position_ = new_position; + // TODO(635): also update line and column. +}; + + BreakPoint.prototype.hit_count = function() { return this.hit_count_; }; @@ -327,7 +333,7 @@ ScriptBreakPoint.prototype.matchesScript = function(script) { if (this.type_ == Debug.ScriptBreakPointType.ScriptId) { return this.script_id_ == script.id; } else { // this.type_ == Debug.ScriptBreakPointType.ScriptName - return this.script_name_ == script.name && + return this.script_name_ == script.nameOrSourceURL() && script.line_offset <= this.line_ && this.line_ < script.line_offset + script.lineCount(); } @@ -474,6 +480,11 @@ Debug.disassembleConstructor = function(f) { return %DebugDisassembleConstructor(f); }; +Debug.ExecuteInDebugContext = function(f, without_debugger) { + if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.'); + return %ExecuteInDebugContext(f, !!without_debugger); +}; + Debug.sourcePosition = function(f) { if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.'); return %FunctionGetScriptSourcePosition(f); @@ -1274,7 +1285,7 @@ DebugCommandProcessor.prototype.processDebugJSONRequest = function(json_request) // Response controls running state. this.running_ = response.running; } - response.running = this.running_; + response.running = this.running_; return response.toJSONProtocol(); } catch (e) { // Failed to generate response - return generic error. @@ -1870,12 +1881,12 @@ DebugCommandProcessor.prototype.scriptsRequest_ = function(request, response) { return response.failed('Invalid types "' + request.arguments.types + '"'); } } - + if (!IS_UNDEFINED(request.arguments.includeSource)) { includeSource = %ToBoolean(request.arguments.includeSource); response.setOption('includeSource', includeSource); } - + if (IS_ARRAY(request.arguments.ids)) { idsToInclude = {}; var ids = request.arguments.ids; @@ -1966,13 +1977,6 @@ DebugCommandProcessor.prototype.changeLiveRequest_ = function(request, response) return response.failed('Missing arguments'); } var script_id = request.arguments.script_id; - var change_pos = parseInt(request.arguments.change_pos); - var change_len = parseInt(request.arguments.change_len); - var new_string = request.arguments.new_string; - if (!IS_STRING(new_string)) { - response.failed('Argument "new_string" is not a string value'); - return; - } var scripts = %DebugGetLoadedScripts(); @@ -1986,16 +1990,38 @@ DebugCommandProcessor.prototype.changeLiveRequest_ = function(request, response) response.failed('Script not found'); return; } + + // A function that calls a proper signature of LiveEdit API. + var invocation; var change_log = new Array(); + + if (IS_STRING(request.arguments.new_source)) { + var new_source = request.arguments.new_source; + invocation = function() { + return Debug.LiveEdit.SetScriptSource(the_script, new_source, change_log); + } + } else { + var change_pos = parseInt(request.arguments.change_pos); + var change_len = parseInt(request.arguments.change_len); + var new_string = request.arguments.new_string; + if (!IS_STRING(new_string)) { + response.failed('Argument "new_string" is not a string value'); + return; + } + invocation = function() { + return Debug.LiveEditChangeScript(the_script, change_pos, change_len, + new_string, change_log); + } + } + try { - Debug.LiveEditChangeScript(the_script, change_pos, change_len, new_string, - change_log); + invocation(); } catch (e) { if (e instanceof Debug.LiveEditChangeScript.Failure) { // Let's treat it as a "success" so that body with change_log will be // sent back. "change_log" will have "failure" field set. - change_log.push( { failure: true } ); + change_log.push( { failure: true, message: e.toString() } ); } else { throw e; } @@ -2076,7 +2102,7 @@ function ObjectToProtocolObject_(object, mirror_serializer) { } } } - + return content; } @@ -2099,7 +2125,7 @@ function ArrayToProtocolArray_(array, mirror_serializer) { /** - * Convert a value to its debugger protocol representation. + * Convert a value to its debugger protocol representation. * @param {*} value The value to format as protocol value. * @param {MirrorSerializer} mirror_serializer The serializer to use if any * mirror objects are encountered. diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc index 4dce4cff053..bac05a06c44 100644 --- a/deps/v8/src/debug.cc +++ b/deps/v8/src/debug.cc @@ -814,6 +814,8 @@ Object* Debug::Break(Arguments args) { HandleScope scope; ASSERT(args.length() == 0); + thread_local_.frames_are_dropped_ = false; + // Get the top-most JavaScript frame. JavaScriptFrameIterator it; JavaScriptFrame* frame = it.frame(); @@ -890,8 +892,13 @@ Object* Debug::Break(Arguments args) { PrepareStep(step_action, step_count); } - // Install jump to the call address which was overwritten. - SetAfterBreakTarget(frame); + if (thread_local_.frames_are_dropped_) { + // We must have been calling IC stub. Do not return there anymore. + Code* plain_return = Builtins::builtin(Builtins::PlainReturn_LiveEdit); + thread_local_.after_break_target_ = plain_return->entry(); + } else { + SetAfterBreakTarget(frame); + } return Heap::undefined_value(); } @@ -1655,6 +1662,12 @@ void Debug::SetAfterBreakTarget(JavaScriptFrame* frame) { } +void Debug::FramesHaveBeenDropped(StackFrame::Id new_break_frame_id) { + thread_local_.frames_are_dropped_ = true; + thread_local_.break_frame_id_ = new_break_frame_id; +} + + bool Debug::IsDebugGlobal(GlobalObject* global) { return IsLoaded() && global == Debug::debug_context()->global(); } diff --git a/deps/v8/src/debug.h b/deps/v8/src/debug.h index 546512b1130..a0e8b818e7a 100644 --- a/deps/v8/src/debug.h +++ b/deps/v8/src/debug.h @@ -377,10 +377,18 @@ class Debug { static void GenerateConstructCallDebugBreak(MacroAssembler* masm); static void GenerateReturnDebugBreak(MacroAssembler* masm); static void GenerateStubNoRegistersDebugBreak(MacroAssembler* masm); + static void GeneratePlainReturnLiveEdit(MacroAssembler* masm); + static void GenerateFrameDropperLiveEdit(MacroAssembler* masm); // Called from stub-cache.cc. static void GenerateCallICDebugBreak(MacroAssembler* masm); + static void FramesHaveBeenDropped(StackFrame::Id new_break_frame_id); + + static void SetUpFrameDropperFrame(StackFrame* bottom_js_frame, + Handle code); + static const int kFrameDropperFrameSize; + private: static bool CompileDebuggerScript(int index); static void ClearOneShot(); @@ -446,6 +454,9 @@ class Debug { // Storage location for jump when exiting debug break calls. Address after_break_target_; + // Indicates that LiveEdit has patched the stack. + bool frames_are_dropped_; + // Top debugger entry. EnterDebugger* debugger_entry_; diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution.cc index 20684136cec..e8b0d942ad6 100644 --- a/deps/v8/src/execution.cc +++ b/deps/v8/src/execution.cc @@ -221,8 +221,8 @@ bool StackGuard::IsStackOverflow() { void StackGuard::EnableInterrupts() { ExecutionAccess access; - if (IsSet(access)) { - set_limits(kInterruptLimit, access); + if (has_pending_interrupts(access)) { + set_interrupt_limits(access); } } @@ -249,11 +249,6 @@ void StackGuard::DisableInterrupts() { } -bool StackGuard::IsSet(const ExecutionAccess& lock) { - return thread_local_.interrupt_flags_ != 0; -} - - bool StackGuard::IsInterrupted() { ExecutionAccess access; return thread_local_.interrupt_flags_ & INTERRUPT; @@ -263,7 +258,7 @@ bool StackGuard::IsInterrupted() { void StackGuard::Interrupt() { ExecutionAccess access; thread_local_.interrupt_flags_ |= INTERRUPT; - set_limits(kInterruptLimit, access); + set_interrupt_limits(access); } @@ -276,7 +271,7 @@ bool StackGuard::IsPreempted() { void StackGuard::Preempt() { ExecutionAccess access; thread_local_.interrupt_flags_ |= PREEMPT; - set_limits(kInterruptLimit, access); + set_interrupt_limits(access); } @@ -289,7 +284,7 @@ bool StackGuard::IsTerminateExecution() { void StackGuard::TerminateExecution() { ExecutionAccess access; thread_local_.interrupt_flags_ |= TERMINATE; - set_limits(kInterruptLimit, access); + set_interrupt_limits(access); } @@ -303,7 +298,7 @@ bool StackGuard::IsDebugBreak() { void StackGuard::DebugBreak() { ExecutionAccess access; thread_local_.interrupt_flags_ |= DEBUGBREAK; - set_limits(kInterruptLimit, access); + set_interrupt_limits(access); } @@ -317,7 +312,7 @@ void StackGuard::DebugCommand() { if (FLAG_debugger_auto_break) { ExecutionAccess access; thread_local_.interrupt_flags_ |= DEBUGCOMMAND; - set_limits(kInterruptLimit, access); + set_interrupt_limits(access); } } #endif @@ -325,7 +320,7 @@ void StackGuard::DebugCommand() { void StackGuard::Continue(InterruptFlag after_what) { ExecutionAccess access; thread_local_.interrupt_flags_ &= ~static_cast(after_what); - if (thread_local_.interrupt_flags_ == 0) { + if (!should_postpone_interrupts(access) && !has_pending_interrupts(access)) { reset_limits(access); } } diff --git a/deps/v8/src/execution.h b/deps/v8/src/execution.h index 10683d69e2b..e683e122592 100644 --- a/deps/v8/src/execution.h +++ b/deps/v8/src/execution.h @@ -199,12 +199,24 @@ class StackGuard : public AllStatic { private: // You should hold the ExecutionAccess lock when calling this method. - static bool IsSet(const ExecutionAccess& lock); + static bool has_pending_interrupts(const ExecutionAccess& lock) { + // Sanity check: We shouldn't be asking about pending interrupts + // unless we're not postponing them anymore. + ASSERT(!should_postpone_interrupts(lock)); + return thread_local_.interrupt_flags_ != 0; + } + + // You should hold the ExecutionAccess lock when calling this method. + static bool should_postpone_interrupts(const ExecutionAccess& lock) { + return thread_local_.postpone_interrupts_nesting_ > 0; + } // You should hold the ExecutionAccess lock when calling this method. - static void set_limits(uintptr_t value, const ExecutionAccess& lock) { - thread_local_.jslimit_ = value; - thread_local_.climit_ = value; + static void set_interrupt_limits(const ExecutionAccess& lock) { + // Ignore attempts to interrupt when interrupts are postponed. + if (should_postpone_interrupts(lock)) return; + thread_local_.jslimit_ = kInterruptLimit; + thread_local_.climit_ = kInterruptLimit; Heap::SetStackLimits(); } diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h index b32ee9fb873..2e2074b7a63 100644 --- a/deps/v8/src/flag-definitions.h +++ b/deps/v8/src/flag-definitions.h @@ -391,6 +391,8 @@ DEFINE_bool(prof_auto, true, DEFINE_bool(prof_lazy, false, "Used with --prof, only does sampling and logging" " when profiler is active (implies --noprof_auto).") +DEFINE_bool(prof_browser_mode, true, + "Used with --prof, turns on browser-compatible mode for profiling.") DEFINE_bool(log_regexp, false, "Log regular expression execution.") DEFINE_bool(sliding_state_window, false, "Update sliding state window counters.") diff --git a/deps/v8/src/flow-graph.cc b/deps/v8/src/flow-graph.cc index bd9602f8405..02a2cd9cfe5 100644 --- a/deps/v8/src/flow-graph.cc +++ b/deps/v8/src/flow-graph.cc @@ -26,232 +26,87 @@ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "flow-graph.h" +#include "scopes.h" namespace v8 { namespace internal { -void FlowGraph::AppendInstruction(AstNode* instruction) { - // Add a (non-null) AstNode to the end of the graph fragment. - ASSERT(instruction != NULL); - if (exit()->IsExitNode()) return; - if (!exit()->IsBlockNode()) AppendNode(new BlockNode()); - BlockNode::cast(exit())->AddInstruction(instruction); -} - - -void FlowGraph::AppendNode(Node* node) { - // Add a node to the end of the graph. An empty block is added to - // maintain edge-split form (that no join nodes or exit nodes as - // successors to branch nodes). - ASSERT(node != NULL); - if (exit()->IsExitNode()) return; - if (exit()->IsBranchNode() && (node->IsJoinNode() || node->IsExitNode())) { - AppendNode(new BlockNode()); - } - exit()->AddSuccessor(node); - node->AddPredecessor(exit()); - exit_ = node; -} - - -void FlowGraph::AppendGraph(FlowGraph* graph) { - // Add a flow graph fragment to the end of this one. An empty block is - // added to maintain edge-split form (that no join nodes or exit nodes as - // successors to branch nodes). - ASSERT(graph != NULL); - if (exit()->IsExitNode()) return; - Node* node = graph->entry(); - if (exit()->IsBranchNode() && (node->IsJoinNode() || node->IsExitNode())) { - AppendNode(new BlockNode()); - } - exit()->AddSuccessor(node); - node->AddPredecessor(exit()); - exit_ = graph->exit(); -} - - -void FlowGraph::Split(BranchNode* branch, - FlowGraph* left, - FlowGraph* right, - JoinNode* join) { - // Add the branch node, left flowgraph, join node. - AppendNode(branch); - AppendGraph(left); - AppendNode(join); - - // Splice in the right flowgraph. - right->AppendNode(join); - branch->AddSuccessor(right->entry()); - right->entry()->AddPredecessor(branch); -} - - -void FlowGraph::Loop(JoinNode* join, - FlowGraph* condition, - BranchNode* branch, - FlowGraph* body) { - // Add the join, condition and branch. Add join's predecessors in - // left-to-right order. - AppendNode(join); - body->AppendNode(join); - AppendGraph(condition); - AppendNode(branch); - - // Splice in the body flowgraph. - branch->AddSuccessor(body->entry()); - body->entry()->AddPredecessor(branch); -} - - -void ExitNode::Traverse(bool mark, - ZoneList* preorder, - ZoneList* postorder) { - preorder->Add(this); - postorder->Add(this); -} - - -void BlockNode::Traverse(bool mark, - ZoneList* preorder, - ZoneList* postorder) { - ASSERT(successor_ != NULL); +void BasicBlock::BuildTraversalOrder(ZoneList* preorder, + ZoneList* postorder, + bool mark) { + if (mark_ == mark) return; + mark_ = mark; preorder->Add(this); - if (!successor_->IsMarkedWith(mark)) { - successor_->MarkWith(mark); - successor_->Traverse(mark, preorder, postorder); + if (right_successor_ != NULL) { + right_successor_->BuildTraversalOrder(preorder, postorder, mark); } - postorder->Add(this); -} - - -void BranchNode::Traverse(bool mark, - ZoneList* preorder, - ZoneList* postorder) { - ASSERT(successor0_ != NULL && successor1_ != NULL); - preorder->Add(this); - if (!successor1_->IsMarkedWith(mark)) { - successor1_->MarkWith(mark); - successor1_->Traverse(mark, preorder, postorder); - } - if (!successor0_->IsMarkedWith(mark)) { - successor0_->MarkWith(mark); - successor0_->Traverse(mark, preorder, postorder); + if (left_successor_ != NULL) { + left_successor_->BuildTraversalOrder(preorder, postorder, mark); } postorder->Add(this); } -void JoinNode::Traverse(bool mark, - ZoneList* preorder, - ZoneList* postorder) { - ASSERT(successor_ != NULL); - preorder->Add(this); - if (!successor_->IsMarkedWith(mark)) { - successor_->MarkWith(mark); - successor_->Traverse(mark, preorder, postorder); - } - postorder->Add(this); -} - +FlowGraph* FlowGraphBuilder::Build(FunctionLiteral* lit) { + // Create new entry and exit nodes. These will not change during + // construction. + entry_ = new BasicBlock(NULL); + exit_ = new BasicBlock(NULL); + // Begin accumulating instructions in the entry block. + current_ = entry_; -void FlowGraphBuilder::Build(FunctionLiteral* lit) { - global_exit_ = new ExitNode(); + VisitDeclarations(lit->scope()->declarations()); VisitStatements(lit->body()); - - if (HasStackOverflow()) return; - - // The graph can end with a branch node (if the function ended with a - // loop). Maintain edge-split form (no join nodes or exit nodes as - // successors to branch nodes). - if (graph_.exit()->IsBranchNode()) graph_.AppendNode(new BlockNode()); - graph_.AppendNode(global_exit_); - - // Build preorder and postorder traversal orders. All the nodes in - // the graph have the same mark flag. For the traversal, use that - // flag's negation. Traversal will flip all the flags. - bool mark = graph_.entry()->IsMarkedWith(false); - graph_.entry()->MarkWith(mark); - graph_.entry()->Traverse(mark, &preorder_, &postorder_); -} - - -// This function peels off one iteration of a for-loop. The return value -// is either a block statement containing the peeled loop or NULL in case -// there is a stack overflow. -static Statement* PeelForLoop(ForStatement* stmt) { - // Mark this for-statement as processed. - stmt->set_peel_this_loop(false); - - // Create new block containing the init statement of the for-loop and - // an if-statement containing the peeled iteration and the original - // loop without the init-statement. - Block* block = new Block(NULL, 2, false); - if (stmt->init() != NULL) { - Statement* init = stmt->init(); - // The init statement gets the statement position of the for-loop - // to make debugging of peeled loops possible. - init->set_statement_pos(stmt->statement_pos()); - block->AddStatement(init); + // In the event of stack overflow or failure to handle a syntactic + // construct, return an invalid flow graph. + if (HasStackOverflow()) return new FlowGraph(NULL, NULL); + + // If current is not the exit, add a link to the exit. + if (current_ != exit_) { + // If current already has a successor (i.e., will be a branch node) and + // if the exit already has a predecessor, insert an empty block to + // maintain edge split form. + if (current_->HasSuccessor() && exit_->HasPredecessor()) { + current_ = new BasicBlock(current_); + } + Literal* undefined = new Literal(Factory::undefined_value()); + current_->AddInstruction(new ReturnStatement(undefined)); + exit_->AddPredecessor(current_); } - // Copy the condition. - CopyAstVisitor copy_visitor; - Expression* cond_copy = stmt->cond() != NULL - ? copy_visitor.DeepCopyExpr(stmt->cond()) - : new Literal(Factory::true_value()); - if (copy_visitor.HasStackOverflow()) return NULL; - - // Construct a block with the peeled body and the rest of the for-loop. - Statement* body_copy = copy_visitor.DeepCopyStmt(stmt->body()); - if (copy_visitor.HasStackOverflow()) return NULL; - - Statement* next_copy = stmt->next() != NULL - ? copy_visitor.DeepCopyStmt(stmt->next()) - : new EmptyStatement(); - if (copy_visitor.HasStackOverflow()) return NULL; + FlowGraph* graph = new FlowGraph(entry_, exit_); + bool mark = !entry_->GetMark(); + entry_->BuildTraversalOrder(graph->preorder(), graph->postorder(), mark); - Block* peeled_body = new Block(NULL, 3, false); - peeled_body->AddStatement(body_copy); - peeled_body->AddStatement(next_copy); - peeled_body->AddStatement(stmt); - - // Remove the duplicated init statement from the for-statement. - stmt->set_init(NULL); - - // Create new test at the top and add it to the newly created block. - IfStatement* test = new IfStatement(cond_copy, - peeled_body, - new EmptyStatement()); - block->AddStatement(test); - return block; -} - - -void FlowGraphBuilder::VisitStatements(ZoneList* stmts) { - for (int i = 0, len = stmts->length(); i < len; i++) { - stmts->at(i) = ProcessStatement(stmts->at(i)); +#ifdef DEBUG + // Number the nodes in reverse postorder. + int n = 0; + for (int i = graph->postorder()->length() - 1; i >= 0; --i) { + graph->postorder()->at(i)->set_number(n++); } -} +#endif - -Statement* FlowGraphBuilder::ProcessStatement(Statement* stmt) { - if (FLAG_loop_peeling && - stmt->AsForStatement() != NULL && - stmt->AsForStatement()->peel_this_loop()) { - Statement* tmp_stmt = PeelForLoop(stmt->AsForStatement()); - if (tmp_stmt == NULL) { - SetStackOverflow(); - } else { - stmt = tmp_stmt; - } - } - Visit(stmt); - return stmt; + return graph; } void FlowGraphBuilder::VisitDeclaration(Declaration* decl) { - UNREACHABLE(); + Variable* var = decl->proxy()->AsVariable(); + Slot* slot = var->slot(); + // We allow only declarations that do not require code generation. + // The following all require code generation: global variables and + // functions, variables with slot type LOOKUP, declarations with + // mode CONST, and functions. + + if (var->is_global() || + (slot != NULL && slot->type() == Slot::LOOKUP) || + decl->mode() == Variable::CONST || + decl->fun() != NULL) { + // Here and in the rest of the flow graph builder we indicate an + // unsupported syntactic construct by setting the stack overflow + // flag on the visitor. This causes bailout of the visitor. + SetStackOverflow(); + } } @@ -271,21 +126,24 @@ void FlowGraphBuilder::VisitEmptyStatement(EmptyStatement* stmt) { void FlowGraphBuilder::VisitIfStatement(IfStatement* stmt) { + // Build a diamond in the flow graph. First accumulate the instructions + // of the test in the current basic block. Visit(stmt->condition()); - BranchNode* branch = new BranchNode(); - FlowGraph original = graph_; - graph_ = FlowGraph::Empty(); - stmt->set_then_statement(ProcessStatement(stmt->then_statement())); + // Remember the branch node and accumulate the true branch as its left + // successor. This relies on the successors being added left to right. + BasicBlock* branch = current_; + current_ = new BasicBlock(branch); + Visit(stmt->then_statement()); - FlowGraph left = graph_; - graph_ = FlowGraph::Empty(); - stmt->set_else_statement(ProcessStatement(stmt->else_statement())); + // Construct a join node and then accumulate the false branch in a fresh + // successor of the branch node. + BasicBlock* join = new BasicBlock(current_); + current_ = new BasicBlock(branch); + Visit(stmt->else_statement()); + join->AddPredecessor(current_); - if (HasStackOverflow()) return; - JoinNode* join = new JoinNode(); - original.Split(branch, &left, &graph_, join); - graph_ = original; + current_ = join; } @@ -330,23 +188,26 @@ void FlowGraphBuilder::VisitWhileStatement(WhileStatement* stmt) { void FlowGraphBuilder::VisitForStatement(ForStatement* stmt) { - if (stmt->init() != NULL) stmt->set_init(ProcessStatement(stmt->init())); + // Build a loop in the flow graph. First accumulate the instructions of + // the initializer in the current basic block. + if (stmt->init() != NULL) Visit(stmt->init()); - JoinNode* join = new JoinNode(); - FlowGraph original = graph_; - graph_ = FlowGraph::Empty(); + // Create a new basic block for the test. This will be the join node. + BasicBlock* join = new BasicBlock(current_); + current_ = join; if (stmt->cond() != NULL) Visit(stmt->cond()); - BranchNode* branch = new BranchNode(); - FlowGraph condition = graph_; - graph_ = FlowGraph::Empty(); - stmt->set_body(ProcessStatement(stmt->body())); + // The current node is the branch node. Create a new basic block to begin + // the body. + BasicBlock* branch = current_; + current_ = new BasicBlock(branch); + Visit(stmt->body()); + if (stmt->next() != NULL) Visit(stmt->next()); - if (stmt->next() != NULL) stmt->set_next(ProcessStatement(stmt->next())); - - if (HasStackOverflow()) return; - original.Loop(join, &condition, branch, &graph_); - graph_ = original; + // Add the backward edge from the end of the body and continue with the + // false arm of the branch. + join->AddPredecessor(current_); + current_ = new BasicBlock(branch); } @@ -387,17 +248,18 @@ void FlowGraphBuilder::VisitConditional(Conditional* expr) { void FlowGraphBuilder::VisitSlot(Slot* expr) { + // Slots do not appear in the AST. UNREACHABLE(); } void FlowGraphBuilder::VisitVariableProxy(VariableProxy* expr) { - graph_.AppendInstruction(expr); + current_->AddInstruction(expr); } void FlowGraphBuilder::VisitLiteral(Literal* expr) { - graph_.AppendInstruction(expr); + current_->AddInstruction(expr); } @@ -422,29 +284,30 @@ void FlowGraphBuilder::VisitCatchExtensionObject(CatchExtensionObject* expr) { void FlowGraphBuilder::VisitAssignment(Assignment* expr) { + // There are three basic kinds of assignment: variable assignments, + // property assignments, and invalid left-hand sides (which are translated + // to "throw ReferenceError" by the parser). Variable* var = expr->target()->AsVariableProxy()->AsVariable(); Property* prop = expr->target()->AsProperty(); - // Left-hand side can be a variable or property (or reference error) but - // not both. ASSERT(var == NULL || prop == NULL); if (var != NULL) { - if (expr->is_compound()) Visit(expr->target()); - Visit(expr->value()); - if (var->IsStackAllocated()) { - // The first definition in the body is numbered n, where n is the - // number of parameters and stack-allocated locals. - expr->set_num(body_definitions_.length() + variable_count_); - body_definitions_.Add(expr); + if (expr->is_compound() && !expr->target()->IsTrivial()) { + Visit(expr->target()); } + if (!expr->value()->IsTrivial()) Visit(expr->value()); + current_->AddInstruction(expr); } else if (prop != NULL) { - Visit(prop->obj()); - if (!prop->key()->IsPropertyName()) Visit(prop->key()); - Visit(expr->value()); - } + if (!prop->obj()->IsTrivial()) Visit(prop->obj()); + if (!prop->key()->IsPropertyName() && !prop->key()->IsTrivial()) { + Visit(prop->key()); + } + if (!expr->value()->IsTrivial()) Visit(expr->value()); + current_->AddInstruction(expr); - if (HasStackOverflow()) return; - graph_.AppendInstruction(expr); + } else { + Visit(expr->target()); + } } @@ -454,23 +317,18 @@ void FlowGraphBuilder::VisitThrow(Throw* expr) { void FlowGraphBuilder::VisitProperty(Property* expr) { - Visit(expr->obj()); - if (!expr->key()->IsPropertyName()) Visit(expr->key()); - - if (HasStackOverflow()) return; - graph_.AppendInstruction(expr); + if (!expr->obj()->IsTrivial()) Visit(expr->obj()); + if (!expr->key()->IsPropertyName() && !expr->key()->IsTrivial()) { + Visit(expr->key()); + } + current_->AddInstruction(expr); } void FlowGraphBuilder::VisitCall(Call* expr) { Visit(expr->expression()); - ZoneList* arguments = expr->arguments(); - for (int i = 0, len = arguments->length(); i < len; i++) { - Visit(arguments->at(i)); - } - - if (HasStackOverflow()) return; - graph_.AppendInstruction(expr); + VisitExpressions(expr->arguments()); + current_->AddInstruction(expr); } @@ -497,8 +355,7 @@ void FlowGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) { case Token::ADD: case Token::SUB: Visit(expr->expression()); - if (HasStackOverflow()) return; - graph_.AppendInstruction(expr); + current_->AddInstruction(expr); break; default: @@ -509,16 +366,7 @@ void FlowGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) { void FlowGraphBuilder::VisitCountOperation(CountOperation* expr) { Visit(expr->expression()); - Variable* var = expr->expression()->AsVariableProxy()->AsVariable(); - if (var != NULL && var->IsStackAllocated()) { - // The first definition in the body is numbered n, where n is the number - // of parameters and stack-allocated locals. - expr->set_num(body_definitions_.length() + variable_count_); - body_definitions_.Add(expr); - } - - if (HasStackOverflow()) return; - graph_.AppendInstruction(expr); + current_->AddInstruction(expr); } @@ -534,17 +382,16 @@ void FlowGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) { case Token::BIT_XOR: case Token::BIT_AND: case Token::SHL: + case Token::SAR: case Token::SHR: case Token::ADD: case Token::SUB: case Token::MUL: case Token::DIV: case Token::MOD: - case Token::SAR: - Visit(expr->left()); - Visit(expr->right()); - if (HasStackOverflow()) return; - graph_.AppendInstruction(expr); + if (!expr->left()->IsTrivial()) Visit(expr->left()); + if (!expr->right()->IsTrivial()) Visit(expr->right()); + current_->AddInstruction(expr); break; default: @@ -568,10 +415,9 @@ void FlowGraphBuilder::VisitCompareOperation(CompareOperation* expr) { case Token::GT: case Token::LTE: case Token::GTE: - Visit(expr->left()); - Visit(expr->right()); - if (HasStackOverflow()) return; - graph_.AppendInstruction(expr); + if (!expr->left()->IsTrivial()) Visit(expr->left()); + if (!expr->right()->IsTrivial()) Visit(expr->right()); + current_->AddInstruction(expr); break; default: @@ -585,4 +431,333 @@ void FlowGraphBuilder::VisitThisFunction(ThisFunction* expr) { } +#ifdef DEBUG + +// Print a textual representation of an instruction in a flow graph. +class InstructionPrinter: public AstVisitor { + public: + InstructionPrinter() {} + + private: + // Overridden from the base class. + virtual void VisitExpressions(ZoneList* exprs); + + // AST node visit functions. +#define DECLARE_VISIT(type) virtual void Visit##type(type* node); + AST_NODE_LIST(DECLARE_VISIT) +#undef DECLARE_VISIT + + DISALLOW_COPY_AND_ASSIGN(InstructionPrinter); +}; + + +static void PrintSubexpression(Expression* expr) { + if (!expr->IsTrivial()) { + PrintF("@%d", expr->num()); + } else if (expr->AsLiteral() != NULL) { + expr->AsLiteral()->handle()->Print(); + } else if (expr->AsVariableProxy() != NULL) { + PrintF("%s", *expr->AsVariableProxy()->name()->ToCString()); + } else { + UNREACHABLE(); + } +} + + +void InstructionPrinter::VisitExpressions(ZoneList* exprs) { + for (int i = 0; i < exprs->length(); ++i) { + if (i != 0) PrintF(", "); + PrintF("@%d", exprs->at(i)->num()); + } +} + + +// We only define printing functions for the node types that can occur as +// instructions in a flow graph. The rest are unreachable. +void InstructionPrinter::VisitDeclaration(Declaration* decl) { + UNREACHABLE(); +} + + +void InstructionPrinter::VisitBlock(Block* stmt) { + UNREACHABLE(); +} + + +void InstructionPrinter::VisitExpressionStatement(ExpressionStatement* stmt) { + UNREACHABLE(); +} + + +void InstructionPrinter::VisitEmptyStatement(EmptyStatement* stmt) { + UNREACHABLE(); +} + + +void InstructionPrinter::VisitIfStatement(IfStatement* stmt) { + UNREACHABLE(); +} + + +void InstructionPrinter::VisitContinueStatement(ContinueStatement* stmt) { + UNREACHABLE(); +} + + +void InstructionPrinter::VisitBreakStatement(BreakStatement* stmt) { + UNREACHABLE(); +} + + +void InstructionPrinter::VisitReturnStatement(ReturnStatement* stmt) { + PrintF("return "); + PrintSubexpression(stmt->expression()); +} + + +void InstructionPrinter::VisitWithEnterStatement(WithEnterStatement* stmt) { + UNREACHABLE(); +} + + +void InstructionPrinter::VisitWithExitStatement(WithExitStatement* stmt) { + UNREACHABLE(); +} + + +void InstructionPrinter::VisitSwitchStatement(SwitchStatement* stmt) { + UNREACHABLE(); +} + + +void InstructionPrinter::VisitDoWhileStatement(DoWhileStatement* stmt) { + UNREACHABLE(); +} + + +void InstructionPrinter::VisitWhileStatement(WhileStatement* stmt) { + UNREACHABLE(); +} + + +void InstructionPrinter::VisitForStatement(ForStatement* stmt) { + UNREACHABLE(); +} + + +void InstructionPrinter::VisitForInStatement(ForInStatement* stmt) { + UNREACHABLE(); +} + + +void InstructionPrinter::VisitTryCatchStatement(TryCatchStatement* stmt) { + UNREACHABLE(); +} + + +void InstructionPrinter::VisitTryFinallyStatement(TryFinallyStatement* stmt) { + UNREACHABLE(); +} + + +void InstructionPrinter::VisitDebuggerStatement(DebuggerStatement* stmt) { + UNREACHABLE(); +} + + +void InstructionPrinter::VisitFunctionLiteral(FunctionLiteral* expr) { + UNREACHABLE(); +} + + +void InstructionPrinter::VisitSharedFunctionInfoLiteral( + SharedFunctionInfoLiteral* expr) { + UNREACHABLE(); +} + + +void InstructionPrinter::VisitConditional(Conditional* expr) { + UNREACHABLE(); +} + + +void InstructionPrinter::VisitSlot(Slot* expr) { + UNREACHABLE(); +} + + +void InstructionPrinter::VisitVariableProxy(VariableProxy* expr) { + Variable* var = expr->AsVariable(); + if (var != NULL) { + PrintF("%s", *var->name()->ToCString()); + } else { + ASSERT(expr->AsProperty() != NULL); + Visit(expr->AsProperty()); + } +} + + +void InstructionPrinter::VisitLiteral(Literal* expr) { + expr->handle()->Print(); +} + + +void InstructionPrinter::VisitRegExpLiteral(RegExpLiteral* expr) { + UNREACHABLE(); +} + + +void InstructionPrinter::VisitObjectLiteral(ObjectLiteral* expr) { + UNREACHABLE(); +} + + +void InstructionPrinter::VisitArrayLiteral(ArrayLiteral* expr) { + UNREACHABLE(); +} + + +void InstructionPrinter::VisitCatchExtensionObject( + CatchExtensionObject* expr) { + UNREACHABLE(); +} + + +void InstructionPrinter::VisitAssignment(Assignment* expr) { + Variable* var = expr->target()->AsVariableProxy()->AsVariable(); + Property* prop = expr->target()->AsProperty(); + + // Print the left-hand side. + Visit(expr->target()); + if (var == NULL && prop == NULL) return; // Throw reference error. + PrintF(" = "); + // For compound assignments, print the left-hand side again and the + // corresponding binary operator. + if (expr->is_compound()) { + PrintSubexpression(expr->target()); + PrintF(" %s ", Token::String(expr->binary_op())); + } + + // Print the right-hand side. + PrintSubexpression(expr->value()); +} + + +void InstructionPrinter::VisitThrow(Throw* expr) { + UNREACHABLE(); +} + + +void InstructionPrinter::VisitProperty(Property* expr) { + PrintSubexpression(expr->obj()); + if (expr->key()->IsPropertyName()) { + PrintF("."); + ASSERT(expr->key()->AsLiteral() != NULL); + expr->key()->AsLiteral()->handle()->Print(); + } else { + PrintF("["); + PrintSubexpression(expr->key()); + PrintF("]"); + } +} + + +void InstructionPrinter::VisitCall(Call* expr) { + PrintF("@%d(", expr->expression()->num()); + VisitExpressions(expr->arguments()); + PrintF(")"); +} + + +void InstructionPrinter::VisitCallNew(CallNew* expr) { + UNREACHABLE(); +} + + +void InstructionPrinter::VisitCallRuntime(CallRuntime* expr) { + UNREACHABLE(); +} + + +void InstructionPrinter::VisitUnaryOperation(UnaryOperation* expr) { + PrintF("%s(@%d)", Token::String(expr->op()), expr->expression()->num()); +} + + +void InstructionPrinter::VisitCountOperation(CountOperation* expr) { + if (expr->is_prefix()) { + PrintF("%s@%d", Token::String(expr->op()), expr->expression()->num()); + } else { + PrintF("@%d%s", expr->expression()->num(), Token::String(expr->op())); + } +} + + +void InstructionPrinter::VisitBinaryOperation(BinaryOperation* expr) { + PrintSubexpression(expr->left()); + PrintF(" %s ", Token::String(expr->op())); + PrintSubexpression(expr->right()); +} + + +void InstructionPrinter::VisitCompareOperation(CompareOperation* expr) { + PrintSubexpression(expr->left()); + PrintF(" %s ", Token::String(expr->op())); + PrintSubexpression(expr->right()); +} + + +void InstructionPrinter::VisitThisFunction(ThisFunction* expr) { + UNREACHABLE(); +} + + +int BasicBlock::PrintAsText(int instruction_number) { + // Print a label for all blocks except the entry. + if (HasPredecessor()) { + PrintF("L%d:", number()); + } + + // Number and print the instructions. Since AST child nodes are visited + // before their parents, the parent nodes can refer to them by number. + InstructionPrinter printer; + for (int i = 0; i < instructions_.length(); ++i) { + PrintF("\n%d ", instruction_number); + instructions_[i]->set_num(instruction_number++); + instructions_[i]->Accept(&printer); + } + + // If this is the exit, print "exit". If there is a single successor, + // print "goto" successor on a separate line. If there are two + // successors, print "goto" successor on the same line as the last + // instruction in the block. There is a blank line between blocks (and + // after the last one). + if (left_successor_ == NULL) { + PrintF("\nexit\n\n"); + } else if (right_successor_ == NULL) { + PrintF("\ngoto L%d\n\n", left_successor_->number()); + } else { + PrintF(", goto (L%d, L%d)\n\n", + left_successor_->number(), + right_successor_->number()); + } + + return instruction_number; +} + + +void FlowGraph::PrintAsText(Handle name) { + PrintF("\n==== name = \"%s\" ====\n", *name->ToCString()); + // Print nodes in reverse postorder. Note that AST node numbers are used + // during printing of instructions and thus their current values are + // destroyed. + int number = 0; + for (int i = postorder_.length() - 1; i >= 0; --i) { + number = postorder_[i]->PrintAsText(number); + } +} + +#endif // DEBUG + + } } // namespace v8::internal diff --git a/deps/v8/src/flow-graph.h b/deps/v8/src/flow-graph.h index 183b71d5b08..f6af8410aec 100644 --- a/deps/v8/src/flow-graph.h +++ b/deps/v8/src/flow-graph.h @@ -36,339 +36,140 @@ namespace v8 { namespace internal { -// Flow-graph nodes. -class Node: public ZoneObject { - public: - Node() : number_(-1), mark_(false) {} - - virtual ~Node() {} - - virtual bool IsExitNode() { return false; } - virtual bool IsBlockNode() { return false; } - virtual bool IsBranchNode() { return false; } - virtual bool IsJoinNode() { return false; } - - virtual void AddPredecessor(Node* predecessor) = 0; - virtual void AddSuccessor(Node* successor) = 0; - - bool IsMarkedWith(bool mark) { return mark_ == mark; } - void MarkWith(bool mark) { mark_ = mark; } - - // Perform a depth first search and record preorder and postorder - // traversal orders. - virtual void Traverse(bool mark, - ZoneList* preorder, - ZoneList* postorder) = 0; - - int number() { return number_; } - void set_number(int number) { number_ = number; } - - // Functions used by data-flow analyses. - virtual void InitializeReachingDefinitions(int definition_count, - List* variables, - WorkList* worklist, - bool mark); - virtual void ComputeRDOut(BitVector* result) = 0; - virtual void UpdateRDIn(WorkList* worklist, bool mark) = 0; - virtual void PropagateReachingDefinitions(List* variables); - - // Functions used by dead-code elimination. - virtual void MarkCriticalInstructions( - List* stack, - ZoneList* body_definitions, - int variable_count); - -#ifdef DEBUG - void AssignNodeNumber(); - void PrintReachingDefinitions(); - virtual void PrintText() = 0; -#endif - - protected: - ReachingDefinitionsData rd_; - - private: - int number_; - bool mark_; - - DISALLOW_COPY_AND_ASSIGN(Node); -}; - - -// An exit node has a arbitrarily many predecessors and no successors. -class ExitNode: public Node { +// The nodes of a flow graph are basic blocks. Basic blocks consist of +// instructions represented as pointers to AST nodes in the order that they +// would be visited by the code generator. A block can have arbitrarily many +// (even zero) predecessors and up to two successors. Blocks with multiple +// predecessors are "join nodes" and blocks with multiple successors are +// "branch nodes". A block can be both a branch and a join node. +// +// Flow graphs are in edge split form: a branch node is never the +// predecessor of a merge node. Empty basic blocks are inserted to maintain +// edge split form. +class BasicBlock: public ZoneObject { public: - ExitNode() : predecessors_(4) {} + // Construct a basic block with a given predecessor. NULL indicates no + // predecessor or that the predecessor will be set later. + explicit BasicBlock(BasicBlock* predecessor) + : predecessors_(2), + instructions_(8), + left_successor_(NULL), + right_successor_(NULL), + mark_(false) { + if (predecessor != NULL) AddPredecessor(predecessor); + } - virtual bool IsExitNode() { return true; } + bool HasPredecessor() { return !predecessors_.is_empty(); } + bool HasSuccessor() { return left_successor_ != NULL; } - virtual void AddPredecessor(Node* predecessor) { + // Add a given basic block as a predecessor of this block. This function + // also adds this block as a successor of the given block. + void AddPredecessor(BasicBlock* predecessor) { ASSERT(predecessor != NULL); predecessors_.Add(predecessor); + predecessor->AddSuccessor(this); } - virtual void AddSuccessor(Node* successor) { UNREACHABLE(); } - - virtual void Traverse(bool mark, - ZoneList* preorder, - ZoneList* postorder); - - virtual void ComputeRDOut(BitVector* result); - virtual void UpdateRDIn(WorkList* worklist, bool mark); - -#ifdef DEBUG - virtual void PrintText(); -#endif - - private: - ZoneList predecessors_; - - DISALLOW_COPY_AND_ASSIGN(ExitNode); -}; - - -// Block nodes have a single successor and predecessor and a list of -// instructions. -class BlockNode: public Node { - public: - BlockNode() : predecessor_(NULL), successor_(NULL), instructions_(4) {} - - static BlockNode* cast(Node* node) { - ASSERT(node->IsBlockNode()); - return reinterpret_cast(node); - } - - virtual bool IsBlockNode() { return true; } - - bool is_empty() { return instructions_.is_empty(); } - - ZoneList* instructions() { return &instructions_; } - - virtual void AddPredecessor(Node* predecessor) { - ASSERT(predecessor_ == NULL && predecessor != NULL); - predecessor_ = predecessor; - } - - virtual void AddSuccessor(Node* successor) { - ASSERT(successor_ == NULL && successor != NULL); - successor_ = successor; - } - + // Add an instruction to the end of this block. The block must be "open" + // by not having a successor yet. void AddInstruction(AstNode* instruction) { + ASSERT(!HasSuccessor() && instruction != NULL); instructions_.Add(instruction); } - virtual void Traverse(bool mark, - ZoneList* preorder, - ZoneList* postorder); - - virtual void InitializeReachingDefinitions(int definition_count, - List* variables, - WorkList* worklist, - bool mark); - virtual void ComputeRDOut(BitVector* result); - virtual void UpdateRDIn(WorkList* worklist, bool mark); - virtual void PropagateReachingDefinitions(List* variables); - - virtual void MarkCriticalInstructions( - List* stack, - ZoneList* body_definitions, - int variable_count); + // Perform a depth-first traversal of graph rooted at this node, + // accumulating pre- and postorder traversal orders. Visited nodes are + // marked with mark. + void BuildTraversalOrder(ZoneList* preorder, + ZoneList* postorder, + bool mark); + bool GetMark() { return mark_; } #ifdef DEBUG - virtual void PrintText(); + // In debug mode, blocks are numbered in reverse postorder to help with + // printing. + int number() { return number_; } + void set_number(int n) { number_ = n; } + + // Print a basic block, given the number of the first instruction. + // Returns the next number after the number of the last instruction. + int PrintAsText(int instruction_number); #endif private: - Node* predecessor_; - Node* successor_; - ZoneList instructions_; - - DISALLOW_COPY_AND_ASSIGN(BlockNode); -}; - - -// Branch nodes have a single predecessor and a pair of successors. -class BranchNode: public Node { - public: - BranchNode() : predecessor_(NULL), successor0_(NULL), successor1_(NULL) {} - - virtual bool IsBranchNode() { return true; } - - virtual void AddPredecessor(Node* predecessor) { - ASSERT(predecessor_ == NULL && predecessor != NULL); - predecessor_ = predecessor; - } - - virtual void AddSuccessor(Node* successor) { - ASSERT(successor1_ == NULL && successor != NULL); - if (successor0_ == NULL) { - successor0_ = successor; + // Add a given basic block as successor to this block. This function does + // not add this block as a predecessor of the given block so as to avoid + // circularity. + void AddSuccessor(BasicBlock* successor) { + ASSERT(right_successor_ == NULL && successor != NULL); + if (HasSuccessor()) { + right_successor_ = successor; } else { - successor1_ = successor; + left_successor_ = successor; } } - virtual void Traverse(bool mark, - ZoneList* preorder, - ZoneList* postorder); - - virtual void ComputeRDOut(BitVector* result); - virtual void UpdateRDIn(WorkList* worklist, bool mark); - -#ifdef DEBUG - virtual void PrintText(); -#endif - - private: - Node* predecessor_; - Node* successor0_; - Node* successor1_; - - DISALLOW_COPY_AND_ASSIGN(BranchNode); -}; - - -// Join nodes have arbitrarily many predecessors and a single successor. -class JoinNode: public Node { - public: - JoinNode() : predecessors_(2), successor_(NULL) {} - - static JoinNode* cast(Node* node) { - ASSERT(node->IsJoinNode()); - return reinterpret_cast(node); - } - - virtual bool IsJoinNode() { return true; } - - virtual void AddPredecessor(Node* predecessor) { - ASSERT(predecessor != NULL); - predecessors_.Add(predecessor); - } - - virtual void AddSuccessor(Node* successor) { - ASSERT(successor_ == NULL && successor != NULL); - successor_ = successor; - } - - virtual void Traverse(bool mark, - ZoneList* preorder, - ZoneList* postorder); + ZoneList predecessors_; + ZoneList instructions_; + BasicBlock* left_successor_; + BasicBlock* right_successor_; - virtual void ComputeRDOut(BitVector* result); - virtual void UpdateRDIn(WorkList* worklist, bool mark); + // Support for graph traversal. Before traversal, all nodes in the graph + // have the same mark (true or false). Traversal marks already-visited + // nodes with the opposite mark. After traversal, all nodes again have + // the same mark. Traversal of the same graph is not reentrant. + bool mark_; #ifdef DEBUG - virtual void PrintText(); + int number_; #endif - private: - ZoneList predecessors_; - Node* successor_; - - DISALLOW_COPY_AND_ASSIGN(JoinNode); + DISALLOW_COPY_AND_ASSIGN(BasicBlock); }; -// Flow graphs have a single entry and single exit. The empty flowgraph is -// represented by both entry and exit being NULL. -class FlowGraph BASE_EMBEDDED { +// A flow graph has distinguished entry and exit blocks. The entry block is +// the only one with no predecessors and the exit block is the only one with +// no successors. +class FlowGraph: public ZoneObject { public: - static FlowGraph Empty() { - FlowGraph graph; - graph.entry_ = new BlockNode(); - graph.exit_ = graph.entry_; - return graph; + FlowGraph(BasicBlock* entry, BasicBlock* exit) + : entry_(entry), exit_(exit), preorder_(8), postorder_(8) { } - bool is_empty() const { - return entry_ == exit_ && BlockNode::cast(entry_)->is_empty(); - } - Node* entry() const { return entry_; } - Node* exit() const { return exit_; } - - // Add a single instruction to the end of this flowgraph. - void AppendInstruction(AstNode* instruction); - - // Add a single node to the end of this flow graph. - void AppendNode(Node* node); - - // Add a flow graph fragment to the end of this one. - void AppendGraph(FlowGraph* graph); - - // Concatenate an if-then-else flow-graph to this one. Control is split - // and merged, so the graph remains single-entry, single-exit. - void Split(BranchNode* branch, - FlowGraph* left, - FlowGraph* right, - JoinNode* merge); - - // Concatenate a forward loop (e.g., while or for loop) flow-graph to this - // one. Control is split by the condition and merged back from the back - // edge at end of the body to the beginning of the condition. The single - // (free) exit of the result graph is the right (false) arm of the branch - // node. - void Loop(JoinNode* merge, - FlowGraph* condition, - BranchNode* branch, - FlowGraph* body); + ZoneList* preorder() { return &preorder_; } + ZoneList* postorder() { return &postorder_; } #ifdef DEBUG - void PrintText(FunctionLiteral* fun, ZoneList* postorder); + void PrintAsText(Handle name); #endif private: - FlowGraph() : entry_(NULL), exit_(NULL) {} - - Node* entry_; - Node* exit_; + BasicBlock* entry_; + BasicBlock* exit_; + ZoneList preorder_; + ZoneList postorder_; }; -// Construct a flow graph from a function literal. Build pre- and postorder -// traversal orders as a byproduct. +// The flow graph builder walks the AST adding reachable AST nodes to the +// flow graph as instructions. It remembers the entry and exit nodes of the +// graph, and keeps a pointer to the current block being constructed. class FlowGraphBuilder: public AstVisitor { public: - explicit FlowGraphBuilder(int variable_count) - : graph_(FlowGraph::Empty()), - global_exit_(NULL), - preorder_(4), - postorder_(4), - variable_count_(variable_count), - body_definitions_(4) { - } - - void Build(FunctionLiteral* lit); + FlowGraphBuilder() {} - FlowGraph* graph() { return &graph_; } - ZoneList* preorder() { return &preorder_; } - ZoneList* postorder() { return &postorder_; } - ZoneList* body_definitions() { return &body_definitions_; } + FlowGraph* Build(FunctionLiteral* lit); private: - ExitNode* global_exit() { return global_exit_; } - - // Helpers to allow tranforming the ast during flow graph construction. - void VisitStatements(ZoneList* stmts); - Statement* ProcessStatement(Statement* stmt); - // AST node visit functions. #define DECLARE_VISIT(type) virtual void Visit##type(type* node); AST_NODE_LIST(DECLARE_VISIT) #undef DECLARE_VISIT - FlowGraph graph_; - ExitNode* global_exit_; - ZoneList preorder_; - ZoneList postorder_; - - // The flow graph builder collects a list of explicit definitions - // (assignments and count operations) to stack-allocated variables to use - // for reaching definitions analysis. It does not count the implicit - // definition at function entry. AST node numbers in the AST are used to - // refer into this list. - int variable_count_; - ZoneList body_definitions_; + BasicBlock* entry_; + BasicBlock* exit_; + BasicBlock* current_; DISALLOW_COPY_AND_ASSIGN(FlowGraphBuilder); }; diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc index 5e81a54d8da..9cf83c91cfc 100644 --- a/deps/v8/src/frames.cc +++ b/deps/v8/src/frames.cc @@ -382,6 +382,12 @@ void EntryFrame::ComputeCallerState(State* state) const { } +void EntryFrame::SetCallerFp(Address caller_fp) { + const int offset = EntryFrameConstants::kCallerFPOffset; + Memory::Address_at(this->fp() + offset) = caller_fp; +} + + StackFrame::Type EntryFrame::GetCallerState(State* state) const { const int offset = EntryFrameConstants::kCallerFPOffset; Address fp = Memory::Address_at(this->fp() + offset); @@ -414,6 +420,11 @@ void ExitFrame::ComputeCallerState(State* state) const { } +void ExitFrame::SetCallerFp(Address caller_fp) { + Memory::Address_at(fp() + ExitFrameConstants::kCallerFPOffset) = caller_fp; +} + + Address ExitFrame::GetCallerStackPointer() const { return fp() + ExitFrameConstants::kCallerSPDisplacement; } @@ -443,6 +454,12 @@ void StandardFrame::ComputeCallerState(State* state) const { } +void StandardFrame::SetCallerFp(Address caller_fp) { + Memory::Address_at(fp() + StandardFrameConstants::kCallerFPOffset) = + caller_fp; +} + + bool StandardFrame::IsExpressionInsideHandler(int n) const { Address address = GetExpressionAddress(n); for (StackHandlerIterator it(this, top_handler()); !it.done(); it.Advance()) { @@ -767,4 +784,40 @@ int JSCallerSavedCode(int n) { } +#define DEFINE_WRAPPER(type, field) \ +class field##_Wrapper : public ZoneObject { \ + public: /* NOLINT */ \ + field##_Wrapper(const field& original) : frame_(original) { \ + } \ + field frame_; \ +}; +STACK_FRAME_TYPE_LIST(DEFINE_WRAPPER) +#undef DEFINE_WRAPPER + +static StackFrame* AllocateFrameCopy(StackFrame* frame) { +#define FRAME_TYPE_CASE(type, field) \ + case StackFrame::type: { \ + field##_Wrapper* wrapper = \ + new field##_Wrapper(*(reinterpret_cast(frame))); \ + return &wrapper->frame_; \ + } + + switch (frame->type()) { + STACK_FRAME_TYPE_LIST(FRAME_TYPE_CASE) + default: UNREACHABLE(); + } +#undef FRAME_TYPE_CASE + return NULL; +} + +Vector CreateStackMap() { + ZoneList list(10); + for (StackFrameIterator it; !it.done(); it.Advance()) { + StackFrame* frame = AllocateFrameCopy(it.frame()); + list.Add(frame); + } + return list.ToVector(); +} + + } } // namespace v8::internal diff --git a/deps/v8/src/frames.h b/deps/v8/src/frames.h index 8cbbc626797..98aaead28bd 100644 --- a/deps/v8/src/frames.h +++ b/deps/v8/src/frames.h @@ -114,6 +114,12 @@ class StackFrame BASE_EMBEDDED { // by the debugger. enum Id { NO_ID = 0 }; + // Copy constructor; it breaks the connection to host iterator. + StackFrame(const StackFrame& original) { + this->state_ = original.state_; + this->iterator_ = NULL; + } + // Type testers. bool is_entry() const { return type() == ENTRY; } bool is_entry_construct() const { return type() == ENTRY_CONSTRUCT; } @@ -132,6 +138,8 @@ class StackFrame BASE_EMBEDDED { Address pc() const { return *pc_address(); } void set_pc(Address pc) { *pc_address() = pc; } + virtual void SetCallerFp(Address caller_fp) = 0; + Address* pc_address() const { return state_.pc_address; } // Get the id of this stack frame. @@ -200,7 +208,8 @@ class StackFrame BASE_EMBEDDED { friend class StackHandlerIterator; friend class SafeStackFrameIterator; - DISALLOW_IMPLICIT_CONSTRUCTORS(StackFrame); + private: + void operator=(const StackFrame& original); }; @@ -218,6 +227,7 @@ class EntryFrame: public StackFrame { ASSERT(frame->is_entry()); return static_cast(frame); } + virtual void SetCallerFp(Address caller_fp); protected: explicit EntryFrame(StackFrameIterator* iterator) : StackFrame(iterator) { } @@ -268,6 +278,8 @@ class ExitFrame: public StackFrame { // Garbage collection support. virtual void Iterate(ObjectVisitor* v) const; + virtual void SetCallerFp(Address caller_fp); + static ExitFrame* cast(StackFrame* frame) { ASSERT(frame->is_exit()); return static_cast(frame); @@ -303,6 +315,8 @@ class StandardFrame: public StackFrame { inline void SetExpression(int index, Object* value); int ComputeExpressionsCount() const; + virtual void SetCallerFp(Address caller_fp); + static StandardFrame* cast(StackFrame* frame) { ASSERT(frame->is_standard()); return static_cast(frame); @@ -658,6 +672,10 @@ class StackFrameLocator BASE_EMBEDDED { }; +// Reads all frames on the current stack and copies them into the current +// zone memory. +Vector CreateStackMap(); + } } // namespace v8::internal #endif // V8_FRAMES_H_ diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h index b85e19d5174..3d48e2d78df 100644 --- a/deps/v8/src/globals.h +++ b/deps/v8/src/globals.h @@ -112,8 +112,9 @@ typedef byte* Address; #define V8PRIxPTR "lx" #endif -#if defined(__APPLE__) && defined(__MACH__) -#define USING_MAC_ABI +#if (defined(__APPLE__) && defined(__MACH__)) || \ + defined(__FreeBSD__) || defined(__OpenBSD__) +#define USING_BSD_ABI #endif // Code-point values in Unicode 4.0 are 21 bits wide. @@ -457,7 +458,7 @@ struct AccessorDescriptor { // Logging and profiling. // A StateTag represents a possible state of the VM. When compiled with -// ENABLE_LOGGING_AND_PROFILING, the logger maintains a stack of these. +// ENABLE_VMSTATE_TRACKING, the logger maintains a stack of these. // Creating a VMState object enters a state by pushing on the stack, and // destroying a VMState object leaves a state by popping the current state // from the stack. diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc index d4c593f9aa2..05cb3f2b383 100644 --- a/deps/v8/src/handles.cc +++ b/deps/v8/src/handles.cc @@ -737,7 +737,7 @@ bool CompileLazy(Handle function, ClearExceptionFlag flag) { CompilationInfo info(function, 0, receiver); bool result = CompileLazyHelper(&info, flag); - LOG(FunctionCreateEvent(*function)); + PROFILE(FunctionCreateEvent(*function)); return result; } @@ -747,7 +747,7 @@ bool CompileLazyInLoop(Handle function, ClearExceptionFlag flag) { CompilationInfo info(function, 1, receiver); bool result = CompileLazyHelper(&info, flag); - LOG(FunctionCreateEvent(*function)); + PROFILE(FunctionCreateEvent(*function)); return result; } diff --git a/deps/v8/src/heap-inl.h b/deps/v8/src/heap-inl.h index 892c2892ba3..bf9c535d4f7 100644 --- a/deps/v8/src/heap-inl.h +++ b/deps/v8/src/heap-inl.h @@ -236,19 +236,27 @@ AllocationSpace Heap::TargetSpaceId(InstanceType type) { void Heap::CopyBlock(Object** dst, Object** src, int byte_size) { ASSERT(IsAligned(byte_size, kPointerSize)); + CopyWords(dst, src, byte_size / kPointerSize); +} - // Use block copying memcpy if the segment we're copying is - // enough to justify the extra call/setup overhead. - static const int kBlockCopyLimit = 16 * kPointerSize; - if (byte_size >= kBlockCopyLimit) { - memcpy(dst, src, byte_size); - } else { - int remaining = byte_size / kPointerSize; - do { - remaining--; +void Heap::MoveBlock(Object** dst, Object** src, size_t byte_size) { + ASSERT(IsAligned(byte_size, kPointerSize)); + + int size_in_words = byte_size / kPointerSize; + + if ((dst < src) || (dst >= (src + size_in_words))) { + ASSERT((dst >= (src + size_in_words)) || + ((OffsetFrom(reinterpret_cast
(src)) - + OffsetFrom(reinterpret_cast
(dst))) >= kPointerSize)); + + Object** end = src + size_in_words; + + while (src != end) { *dst++ = *src++; - } while (remaining > 0); + } + } else { + memmove(dst, src, byte_size); } } diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc index 5421dcc1956..7a5188fd153 100644 --- a/deps/v8/src/heap.cc +++ b/deps/v8/src/heap.cc @@ -562,23 +562,18 @@ void Heap::PerformGarbageCollection(AllocationSpace space, EnsureFromSpaceIsCommitted(); - // Perform mark-sweep with optional compaction. if (collector == MARK_COMPACTOR) { + // Perform mark-sweep with optional compaction. MarkCompact(tracer); - } - - // Always perform a scavenge to make room in new space. - Scavenge(); - // Update the old space promotion limits after the scavenge due to - // promotions during scavenge. - if (collector == MARK_COMPACTOR) { int old_gen_size = PromotedSpaceSize(); old_gen_promotion_limit_ = old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3); old_gen_allocation_limit_ = old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2); old_gen_exhausted_ = false; + } else { + Scavenge(); } Counters::objs_since_last_young.Set(0); @@ -764,6 +759,17 @@ static void VerifyNonPointerSpacePointers() { #endif +void Heap::CheckNewSpaceExpansionCriteria() { + if (new_space_.Capacity() < new_space_.MaximumCapacity() && + survived_since_last_expansion_ > new_space_.Capacity()) { + // Grow the size of new space if there is room to grow and enough + // data has survived scavenge since the last expansion. + new_space_.Grow(); + survived_since_last_expansion_ = 0; + } +} + + void Heap::Scavenge() { #ifdef DEBUG if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers(); @@ -780,13 +786,7 @@ void Heap::Scavenge() { // Used for updating survived_since_last_expansion_ at function end. int survived_watermark = PromotedSpaceSize(); - if (new_space_.Capacity() < new_space_.MaximumCapacity() && - survived_since_last_expansion_ > new_space_.Capacity()) { - // Grow the size of new space if there is room to grow and enough - // data has survived scavenge since the last expansion. - new_space_.Grow(); - survived_since_last_expansion_ = 0; - } + CheckNewSpaceExpansionCriteria(); // Flip the semispaces. After flipping, to space is empty, from space has // live objects. @@ -837,15 +837,17 @@ void Heap::Scavenge() { new_space_front = DoScavenge(&scavenge_visitor, new_space_front); - ScavengeExternalStringTable(); + UpdateNewSpaceReferencesInExternalStringTable( + &UpdateNewSpaceReferenceInExternalStringTableEntry); + ASSERT(new_space_front == new_space_.top()); // Set age mark. new_space_.set_age_mark(new_space_.top()); // Update how much has survived scavenge. - survived_since_last_expansion_ += - (PromotedSpaceSize() - survived_watermark) + new_space_.Size(); + IncrementYoungSurvivorsCounter( + (PromotedSpaceSize() - survived_watermark) + new_space_.Size()); LOG(ResourceEvent("scavenge", "end")); @@ -853,7 +855,22 @@ void Heap::Scavenge() { } -void Heap::ScavengeExternalStringTable() { +String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Object** p) { + MapWord first_word = HeapObject::cast(*p)->map_word(); + + if (!first_word.IsForwardingAddress()) { + // Unreachable external string can be finalized. + FinalizeExternalString(String::cast(*p)); + return NULL; + } + + // String is still reachable. + return String::cast(first_word.ToForwardingAddress()); +} + + +void Heap::UpdateNewSpaceReferencesInExternalStringTable( + ExternalStringTableUpdaterCallback updater_func) { ExternalStringTable::Verify(); if (ExternalStringTable::new_space_strings_.is_empty()) return; @@ -864,16 +881,10 @@ void Heap::ScavengeExternalStringTable() { for (Object** p = start; p < end; ++p) { ASSERT(Heap::InFromSpace(*p)); - MapWord first_word = HeapObject::cast(*p)->map_word(); + String* target = updater_func(p); - if (!first_word.IsForwardingAddress()) { - // Unreachable external string can be finalized. - FinalizeExternalString(String::cast(*p)); - continue; - } + if (target == NULL) continue; - // String is still reachable. - String* target = String::cast(first_word.ToForwardingAddress()); ASSERT(target->IsExternalString()); if (Heap::InNewSpace(target)) { @@ -1487,10 +1498,9 @@ Object* Heap::AllocateJSGlobalPropertyCell(Object* value) { } -Object* Heap::CreateOddball(Map* map, - const char* to_string, +Object* Heap::CreateOddball(const char* to_string, Object* to_number) { - Object* result = Allocate(map, OLD_DATA_SPACE); + Object* result = Allocate(oddball_map(), OLD_DATA_SPACE); if (result->IsFailure()) return result; return Oddball::cast(result)->Initialize(to_string, to_number); } @@ -1594,34 +1604,27 @@ bool Heap::CreateInitialObjects() { Oddball::cast(undefined_value())->set_to_string(String::cast(symbol)); Oddball::cast(undefined_value())->set_to_number(nan_value()); - // Assign the print strings for oddballs after creating symboltable. - symbol = LookupAsciiSymbol("null"); - if (symbol->IsFailure()) return false; - Oddball::cast(null_value())->set_to_string(String::cast(symbol)); - Oddball::cast(null_value())->set_to_number(Smi::FromInt(0)); - // Allocate the null_value obj = Oddball::cast(null_value())->Initialize("null", Smi::FromInt(0)); if (obj->IsFailure()) return false; - obj = CreateOddball(oddball_map(), "true", Smi::FromInt(1)); + obj = CreateOddball("true", Smi::FromInt(1)); if (obj->IsFailure()) return false; set_true_value(obj); - obj = CreateOddball(oddball_map(), "false", Smi::FromInt(0)); + obj = CreateOddball("false", Smi::FromInt(0)); if (obj->IsFailure()) return false; set_false_value(obj); - obj = CreateOddball(oddball_map(), "hole", Smi::FromInt(-1)); + obj = CreateOddball("hole", Smi::FromInt(-1)); if (obj->IsFailure()) return false; set_the_hole_value(obj); - obj = CreateOddball( - oddball_map(), "no_interceptor_result_sentinel", Smi::FromInt(-2)); + obj = CreateOddball("no_interceptor_result_sentinel", Smi::FromInt(-2)); if (obj->IsFailure()) return false; set_no_interceptor_result_sentinel(obj); - obj = CreateOddball(oddball_map(), "termination_exception", Smi::FromInt(-3)); + obj = CreateOddball("termination_exception", Smi::FromInt(-3)); if (obj->IsFailure()) return false; set_termination_exception(obj); @@ -1797,11 +1800,13 @@ Object* Heap::SmiOrNumberFromDouble(double value, } -Object* Heap::NumberToString(Object* number) { +Object* Heap::NumberToString(Object* number, bool check_number_string_cache) { Counters::number_to_string_runtime.Increment(); - Object* cached = GetNumberStringCache(number); - if (cached != undefined_value()) { - return cached; + if (check_number_string_cache) { + Object* cached = GetNumberStringCache(number); + if (cached != undefined_value()) { + return cached; + } } char arr[100]; @@ -2313,7 +2318,8 @@ Object* Heap::CopyCode(Code* code, Vector reloc_info) { Address old_addr = code->address(); - int relocation_offset = code->relocation_start() - old_addr; + size_t relocation_offset = + static_cast(code->relocation_start() - old_addr); Object* result; if (new_obj_size > MaxObjectSizeInPagedSpace()) { diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h index 2a0de236ee7..fbd77ff4a9c 100644 --- a/deps/v8/src/heap.h +++ b/deps/v8/src/heap.h @@ -149,6 +149,13 @@ class ZoneScopeInfo; V(number_symbol, "number") \ V(Number_symbol, "Number") \ V(RegExp_symbol, "RegExp") \ + V(source_symbol, "source") \ + V(global_symbol, "global") \ + V(ignore_case_symbol, "ignoreCase") \ + V(multiline_symbol, "multiline") \ + V(input_symbol, "input") \ + V(index_symbol, "index") \ + V(last_index_symbol, "lastIndex") \ V(object_symbol, "object") \ V(prototype_symbol, "prototype") \ V(string_symbol, "string") \ @@ -195,6 +202,9 @@ class GCTracer; class HeapStats; +typedef String* (*ExternalStringTableUpdaterCallback)(Object** pointer); + + // The all static Heap captures the interface to the global object heap. // All JavaScript contexts by this process share the same object heap. @@ -930,7 +940,8 @@ class Heap : public AllStatic { kRootListLength }; - static Object* NumberToString(Object* number); + static Object* NumberToString(Object* number, + bool check_number_string_cache = true); static Map* MapForExternalArrayType(ExternalArrayType array_type); static RootListIndex RootIndexForExternalArrayType( @@ -938,6 +949,30 @@ class Heap : public AllStatic { static void RecordStats(HeapStats* stats); + // Copy block of memory from src to dst. Size of block should be aligned + // by pointer size. + static inline void CopyBlock(Object** dst, Object** src, int byte_size); + + // Optimized version of memmove for blocks with pointer size aligned sizes and + // pointer size aligned addresses. + static inline void MoveBlock(Object** dst, Object** src, size_t byte_size); + + // Check new space expansion criteria and expand semispaces if it was hit. + static void CheckNewSpaceExpansionCriteria(); + + static inline void IncrementYoungSurvivorsCounter(int survived) { + survived_since_last_expansion_ += survived; + } + + static void UpdateNewSpaceReferencesInExternalStringTable( + ExternalStringTableUpdaterCallback updater_func); + + // Helper function that governs the promotion policy from new space to + // old. If the object's old address lies below the new space's age + // mark or if we've already filled the bottom 1/16th of the to space, + // we try to promote this object. + static inline bool ShouldBePromoted(Address old_address, int object_size); + static int MaxObjectSizeInNewSpace() { return kMaxObjectSizeInNewSpace; } private: @@ -1125,16 +1160,17 @@ class Heap : public AllStatic { static void CreateFixedStubs(); - static Object* CreateOddball(Map* map, - const char* to_string, - Object* to_number); + static Object* CreateOddball(const char* to_string, Object* to_number); // Allocate empty fixed array. static Object* AllocateEmptyFixedArray(); // Performs a minor collection in new generation. static void Scavenge(); - static void ScavengeExternalStringTable(); + + static String* UpdateNewSpaceReferenceInExternalStringTableEntry( + Object** pointer); + static Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front); @@ -1152,11 +1188,6 @@ class Heap : public AllStatic { HeapObject* target, int size); - // Helper function that governs the promotion policy from new space to - // old. If the object's old address lies below the new space's age - // mark or if we've already filled the bottom 1/16th of the to space, - // we try to promote this object. - static inline bool ShouldBePromoted(Address old_address, int object_size); #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) // Record the copy of an object in the NewSpace's statistics. static void RecordCopiedObject(HeapObject* obj); @@ -1175,9 +1206,6 @@ class Heap : public AllStatic { // Slow part of scavenge object. static void ScavengeObjectSlow(HeapObject** p, HeapObject* object); - // Copy memory from src to dst. - static inline void CopyBlock(Object** dst, Object** src, int byte_size); - // Initializes a function with a shared part and prototype. // Returns the function. // Note: this code was factored out of AllocateFunction such that diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc index 7f0d5d4385d..26e40b15bc3 100644 --- a/deps/v8/src/ia32/assembler-ia32.cc +++ b/deps/v8/src/ia32/assembler-ia32.cc @@ -123,8 +123,8 @@ void CpuFeatures::Probe() { Code::ComputeFlags(Code::STUB), Handle::null()); if (!code->IsCode()) return; - LOG(CodeCreateEvent(Logger::BUILTIN_TAG, - Code::cast(code), "CpuFeatures::Probe")); + PROFILE(CodeCreateEvent(Logger::BUILTIN_TAG, + Code::cast(code), "CpuFeatures::Probe")); typedef uint64_t (*F0)(); F0 probe = FUNCTION_CAST(Code::cast(code)->entry()); supported_ = probe(); diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc index 200e3ef4e54..bac4ee59978 100644 --- a/deps/v8/src/ia32/codegen-ia32.cc +++ b/deps/v8/src/ia32/codegen-ia32.cc @@ -1132,9 +1132,9 @@ void DeferredInlineBinaryOperation::Generate() { static TypeInfo CalculateTypeInfo(TypeInfo operands_type, - Token::Value op, - const Result& right, - const Result& left) { + Token::Value op, + const Result& right, + const Result& left) { // Set TypeInfo of result according to the operation performed. // Rely on the fact that smis have a 31 bit payload on ia32. ASSERT(kSmiValueSize == 31); @@ -1193,11 +1193,12 @@ static TypeInfo CalculateTypeInfo(TypeInfo operands_type, if (operands_type.IsSmi()) { // The Integer32 range is big enough to take the sum of any two Smis. return TypeInfo::Integer32(); + } else if (operands_type.IsNumber()) { + return TypeInfo::Number(); + } else if (left.type_info().IsString() || right.type_info().IsString()) { + return TypeInfo::String(); } else { - // Result could be a string or a number. Check types of inputs. - return operands_type.IsNumber() - ? TypeInfo::Number() - : TypeInfo::Unknown(); + return TypeInfo::Unknown(); } case Token::SHL: return TypeInfo::Integer32(); @@ -1220,11 +1221,10 @@ static TypeInfo CalculateTypeInfo(TypeInfo operands_type, } -void CodeGenerator::GenericBinaryOperation(Token::Value op, - StaticType* type, - OverwriteMode overwrite_mode, - bool no_negative_zero) { +void CodeGenerator::GenericBinaryOperation(BinaryOperation* expr, + OverwriteMode overwrite_mode) { Comment cmnt(masm_, "[ BinaryOperation"); + Token::Value op = expr->op(); Comment cmnt_token(masm_, Token::String(op)); if (op == Token::COMMA) { @@ -1237,8 +1237,13 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op, Result left = frame_->Pop(); if (op == Token::ADD) { - bool left_is_string = left.is_constant() && left.handle()->IsString(); - bool right_is_string = right.is_constant() && right.handle()->IsString(); + const bool left_is_string = left.type_info().IsString(); + const bool right_is_string = right.type_info().IsString(); + // Make sure constant strings have string type info. + ASSERT(!(left.is_constant() && left.handle()->IsString()) || + left_is_string); + ASSERT(!(right.is_constant() && right.handle()->IsString()) || + right_is_string); if (left_is_string || right_is_string) { frame_->Push(&left); frame_->Push(&right); @@ -1247,7 +1252,8 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op, if (right_is_string) { // TODO(lrn): if both are constant strings // -- do a compile time cons, if allocation during codegen is allowed. - answer = frame_->CallRuntime(Runtime::kStringAdd, 2); + StringAddStub stub(NO_STRING_CHECK_IN_STUB); + answer = frame_->CallStub(&stub, 2); } else { answer = frame_->InvokeBuiltin(Builtins::STRING_ADD_LEFT, CALL_FUNCTION, 2); @@ -1256,6 +1262,7 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op, answer = frame_->InvokeBuiltin(Builtins::STRING_ADD_RIGHT, CALL_FUNCTION, 2); } + answer.set_type_info(TypeInfo::String()); frame_->Push(&answer); return; } @@ -1290,13 +1297,11 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op, operands_type); answer = stub.GenerateCall(masm_, frame_, &left, &right); } else if (right_is_smi_constant) { - answer = ConstantSmiBinaryOperation(op, &left, right.handle(), - type, false, overwrite_mode, - no_negative_zero); + answer = ConstantSmiBinaryOperation(expr, &left, right.handle(), + false, overwrite_mode); } else if (left_is_smi_constant) { - answer = ConstantSmiBinaryOperation(op, &right, left.handle(), - type, true, overwrite_mode, - no_negative_zero); + answer = ConstantSmiBinaryOperation(expr, &right, left.handle(), + true, overwrite_mode); } else { // Set the flags based on the operation, type and loop nesting level. // Bit operations always assume they likely operate on Smis. Still only @@ -1306,9 +1311,8 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op, if (loop_nesting() > 0 && (Token::IsBitOp(op) || operands_type.IsInteger32() || - type->IsLikelySmi())) { - answer = LikelySmiBinaryOperation(op, &left, &right, - overwrite_mode, no_negative_zero); + expr->type()->IsLikelySmi())) { + answer = LikelySmiBinaryOperation(expr, &left, &right, overwrite_mode); } else { GenericBinaryOpStub stub(op, overwrite_mode, @@ -1412,11 +1416,11 @@ static void CheckTwoForSminess(MacroAssembler* masm, // Implements a binary operation using a deferred code object and some // inline code to operate on smis quickly. -Result CodeGenerator::LikelySmiBinaryOperation(Token::Value op, +Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr, Result* left, Result* right, - OverwriteMode overwrite_mode, - bool no_negative_zero) { + OverwriteMode overwrite_mode) { + Token::Value op = expr->op(); Result answer; // Special handling of div and mod because they use fixed registers. if (op == Token::DIV || op == Token::MOD) { @@ -1522,7 +1526,7 @@ Result CodeGenerator::LikelySmiBinaryOperation(Token::Value op, // virtual frame is unchanged in this block, so local control flow // can use a Label rather than a JumpTarget. If the context of this // expression will treat -0 like 0, do not do this test. - if (!no_negative_zero) { + if (!expr->no_negative_zero()) { Label non_zero_result; __ test(left->reg(), Operand(left->reg())); __ j(not_zero, &non_zero_result); @@ -1551,7 +1555,7 @@ Result CodeGenerator::LikelySmiBinaryOperation(Token::Value op, // the dividend is negative, return a floating point negative // zero. The frame is unchanged in this block, so local control // flow can use a Label rather than a JumpTarget. - if (!no_negative_zero) { + if (!expr->no_negative_zero()) { Label non_zero_result; __ test(edx, Operand(edx)); __ j(not_zero, &non_zero_result, taken); @@ -1735,7 +1739,7 @@ Result CodeGenerator::LikelySmiBinaryOperation(Token::Value op, // argument is negative, go to slow case. The frame is unchanged // in this block, so local control flow can use a Label rather // than a JumpTarget. - if (!no_negative_zero) { + if (!expr->no_negative_zero()) { Label non_zero_result; __ test(answer.reg(), Operand(answer.reg())); __ j(not_zero, &non_zero_result, taken); @@ -1978,13 +1982,12 @@ void DeferredInlineSmiSub::Generate() { } -Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op, - Result* operand, - Handle value, - StaticType* type, - bool reversed, - OverwriteMode overwrite_mode, - bool no_negative_zero) { +Result CodeGenerator::ConstantSmiBinaryOperation( + BinaryOperation* expr, + Result* operand, + Handle value, + bool reversed, + OverwriteMode overwrite_mode) { // NOTE: This is an attempt to inline (a bit) more of the code for // some possible smi operations (like + and -) when (at least) one // of the operands is a constant smi. @@ -1994,11 +1997,11 @@ Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op, if (IsUnsafeSmi(value)) { Result unsafe_operand(value); if (reversed) { - return LikelySmiBinaryOperation(op, &unsafe_operand, operand, - overwrite_mode, no_negative_zero); + return LikelySmiBinaryOperation(expr, &unsafe_operand, operand, + overwrite_mode); } else { - return LikelySmiBinaryOperation(op, operand, &unsafe_operand, - overwrite_mode, no_negative_zero); + return LikelySmiBinaryOperation(expr, operand, &unsafe_operand, + overwrite_mode); } } @@ -2006,6 +2009,7 @@ Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op, Smi* smi_value = Smi::cast(*value); int int_value = smi_value->value(); + Token::Value op = expr->op(); Result answer; switch (op) { case Token::ADD: { @@ -2081,8 +2085,8 @@ Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op, case Token::SAR: if (reversed) { Result constant_operand(value); - answer = LikelySmiBinaryOperation(op, &constant_operand, operand, - overwrite_mode, no_negative_zero); + answer = LikelySmiBinaryOperation(expr, &constant_operand, operand, + overwrite_mode); } else { // Only the least significant 5 bits of the shift value are used. // In the slow case, this masking is done inside the runtime call. @@ -2118,8 +2122,8 @@ Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op, case Token::SHR: if (reversed) { Result constant_operand(value); - answer = LikelySmiBinaryOperation(op, &constant_operand, operand, - overwrite_mode, no_negative_zero); + answer = LikelySmiBinaryOperation(expr, &constant_operand, operand, + overwrite_mode); } else { // Only the least significant 5 bits of the shift value are used. // In the slow case, this masking is done inside the runtime call. @@ -2319,11 +2323,11 @@ Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op, // default case here. Result constant_operand(value); if (reversed) { - answer = LikelySmiBinaryOperation(op, &constant_operand, operand, - overwrite_mode, no_negative_zero); + answer = LikelySmiBinaryOperation(expr, &constant_operand, operand, + overwrite_mode); } else { - answer = LikelySmiBinaryOperation(op, operand, &constant_operand, - overwrite_mode, no_negative_zero); + answer = LikelySmiBinaryOperation(expr, operand, &constant_operand, + overwrite_mode); } } break; @@ -2359,11 +2363,11 @@ Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op, default: { Result constant_operand(value); if (reversed) { - answer = LikelySmiBinaryOperation(op, &constant_operand, operand, - overwrite_mode, no_negative_zero); + answer = LikelySmiBinaryOperation(expr, &constant_operand, operand, + overwrite_mode); } else { - answer = LikelySmiBinaryOperation(op, operand, &constant_operand, - overwrite_mode, no_negative_zero); + answer = LikelySmiBinaryOperation(expr, operand, &constant_operand, + overwrite_mode); } break; } @@ -2428,7 +2432,8 @@ void CodeGenerator::Comparison(AstNode* node, left_side_constant_null = left_side.handle()->IsNull(); left_side_constant_1_char_string = (left_side.handle()->IsString() && - (String::cast(*left_side.handle())->length() == 1)); + String::cast(*left_side.handle())->length() == 1 && + String::cast(*left_side.handle())->IsAsciiRepresentation()); } bool right_side_constant_smi = false; bool right_side_constant_null = false; @@ -2438,7 +2443,8 @@ void CodeGenerator::Comparison(AstNode* node, right_side_constant_null = right_side.handle()->IsNull(); right_side_constant_1_char_string = (right_side.handle()->IsString() && - (String::cast(*right_side.handle())->length() == 1)); + String::cast(*right_side.handle())->length() == 1 && + String::cast(*right_side.handle())->IsAsciiRepresentation()); } if (left_side_constant_smi || right_side_constant_smi) { @@ -2627,6 +2633,7 @@ void CodeGenerator::Comparison(AstNode* node, JumpTarget is_not_string, is_string; Register left_reg = left_side.reg(); Handle right_val = right_side.handle(); + ASSERT(StringShape(String::cast(*right_val)).IsSymbol()); __ test(left_side.reg(), Immediate(kSmiTagMask)); is_not_string.Branch(zero, &left_side); Result temp = allocator_->Allocate(); @@ -2651,7 +2658,7 @@ void CodeGenerator::Comparison(AstNode* node, dest->false_target()->Branch(not_equal); __ bind(¬_a_symbol); } - // If the receiver is not a string of the type we handle call the stub. + // Call the compare stub if the left side is not a flat ascii string. __ and_(temp.reg(), kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask); __ cmp(temp.reg(), kStringTag | kSeqStringTag | kAsciiStringTag); @@ -2669,7 +2676,7 @@ void CodeGenerator::Comparison(AstNode* node, dest->false_target()->Jump(); is_string.Bind(&left_side); - // Here we know we have a sequential ASCII string. + // left_side is a sequential ASCII string. left_side = Result(left_reg); right_side = Result(right_val); Result temp2 = allocator_->Allocate(); @@ -2681,7 +2688,7 @@ void CodeGenerator::Comparison(AstNode* node, Immediate(1)); __ j(not_equal, &comparison_done); uint8_t char_value = - static_cast(String::cast(*right_side.handle())->Get(0)); + static_cast(String::cast(*right_val)->Get(0)); __ cmpb(FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize), char_value); __ bind(&comparison_done); @@ -2690,17 +2697,17 @@ void CodeGenerator::Comparison(AstNode* node, FieldOperand(left_side.reg(), String::kLengthOffset)); __ sub(Operand(temp2.reg()), Immediate(1)); Label comparison; - // If the length is 0 then our subtraction gave -1 which compares less + // If the length is 0 then the subtraction gave -1 which compares less // than any character. __ j(negative, &comparison); // Otherwise load the first character. __ movzx_b(temp2.reg(), FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize)); __ bind(&comparison); - // Compare the first character of the string with out constant - // 1-character string. + // Compare the first character of the string with the + // constant 1-character string. uint8_t char_value = - static_cast(String::cast(*right_side.handle())->Get(0)); + static_cast(String::cast(*right_val)->Get(0)); __ cmp(Operand(temp2.reg()), Immediate(char_value)); Label characters_were_different; __ j(not_equal, &characters_were_different); @@ -5363,10 +5370,11 @@ void CodeGenerator::EmitSlotAssignment(Assignment* node) { bool overwrite_value = (node->value()->AsBinaryOperation() != NULL && node->value()->AsBinaryOperation()->ResultOverwriteAllowed()); - GenericBinaryOperation(node->binary_op(), - node->type(), - overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE, - node->no_negative_zero()); + // Construct the implicit binary operation. + BinaryOperation expr(node, node->binary_op(), node->target(), + node->value()); + GenericBinaryOperation(&expr, + overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE); } else { Load(node->value()); } @@ -5441,10 +5449,11 @@ void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) { bool overwrite_value = (node->value()->AsBinaryOperation() != NULL && node->value()->AsBinaryOperation()->ResultOverwriteAllowed()); - GenericBinaryOperation(node->binary_op(), - node->type(), - overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE, - node->no_negative_zero()); + // Construct the implicit binary operation. + BinaryOperation expr(node, node->binary_op(), node->target(), + node->value()); + GenericBinaryOperation(&expr, + overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE); } else { Load(node->value()); } @@ -5521,10 +5530,10 @@ void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) { bool overwrite_value = (node->value()->AsBinaryOperation() != NULL && node->value()->AsBinaryOperation()->ResultOverwriteAllowed()); - GenericBinaryOperation(node->binary_op(), - node->type(), - overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE, - node->no_negative_zero()); + BinaryOperation expr(node, node->binary_op(), node->target(), + node->value()); + GenericBinaryOperation(&expr, + overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE); } else { Load(node->value()); } @@ -6222,12 +6231,30 @@ void CodeGenerator::GenerateIsConstructCall(ZoneList* args) { void CodeGenerator::GenerateArgumentsLength(ZoneList* args) { ASSERT(args->length() == 0); - // ArgumentsAccessStub takes the parameter count as an input argument - // in register eax. Create a constant result for it. - Result count(Handle(Smi::FromInt(scope()->num_parameters()))); - // Call the shared stub to get to the arguments.length. - ArgumentsAccessStub stub(ArgumentsAccessStub::READ_LENGTH); - Result result = frame_->CallStub(&stub, &count); + + Result fp = allocator_->Allocate(); + Result result = allocator_->Allocate(); + ASSERT(fp.is_valid() && result.is_valid()); + + Label exit; + + // Get the number of formal parameters. + __ Set(result.reg(), Immediate(Smi::FromInt(scope()->num_parameters()))); + + // Check if the calling frame is an arguments adaptor frame. + __ mov(fp.reg(), Operand(ebp, StandardFrameConstants::kCallerFPOffset)); + __ cmp(Operand(fp.reg(), StandardFrameConstants::kContextOffset), + Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); + __ j(not_equal, &exit); + + // Arguments adaptor case: Read the arguments length from the + // adaptor frame. + __ mov(result.reg(), + Operand(fp.reg(), ArgumentsAdaptorFrameConstants::kLengthOffset)); + + __ bind(&exit); + result.set_type_info(TypeInfo::Smi()); + if (FLAG_debug_code) __ AbortIfNotSmi(result.reg()); frame_->Push(&result); } @@ -6406,16 +6433,55 @@ void CodeGenerator::GenerateGetFramePointer(ZoneList* args) { } -void CodeGenerator::GenerateRandomPositiveSmi(ZoneList* args) { +void CodeGenerator::GenerateRandomHeapNumber( + ZoneList* args) { ASSERT(args->length() == 0); frame_->SpillAll(); - static const int num_arguments = 0; - __ PrepareCallCFunction(num_arguments, eax); + Label slow_allocate_heapnumber; + Label heapnumber_allocated; + + __ AllocateHeapNumber(edi, ebx, ecx, &slow_allocate_heapnumber); + __ jmp(&heapnumber_allocated); + + __ bind(&slow_allocate_heapnumber); + // To allocate a heap number, and ensure that it is not a smi, we + // call the runtime function FUnaryMinus on 0, returning the double + // -0.0. A new, distinct heap number is returned each time. + __ push(Immediate(Smi::FromInt(0))); + __ CallRuntime(Runtime::kNumberUnaryMinus, 1); + __ mov(edi, eax); + + __ bind(&heapnumber_allocated); + + __ PrepareCallCFunction(0, ebx); + __ CallCFunction(ExternalReference::random_uint32_function(), 0); - // Call V8::RandomPositiveSmi(). - __ CallCFunction(ExternalReference::random_positive_smi_function(), - num_arguments); + // Convert 32 random bits in eax to 0.(32 random bits) in a double + // by computing: + // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)). + // This is implemented on both SSE2 and FPU. + if (CpuFeatures::IsSupported(SSE2)) { + CpuFeatures::Scope fscope(SSE2); + __ mov(ebx, Immediate(0x49800000)); // 1.0 x 2^20 as single. + __ movd(xmm1, Operand(ebx)); + __ movd(xmm0, Operand(eax)); + __ cvtss2sd(xmm1, xmm1); + __ pxor(xmm0, xmm1); + __ subsd(xmm0, xmm1); + __ movdbl(FieldOperand(edi, HeapNumber::kValueOffset), xmm0); + } else { + // 0x4130000000000000 is 1.0 x 2^20 as a double. + __ mov(FieldOperand(edi, HeapNumber::kExponentOffset), + Immediate(0x41300000)); + __ mov(FieldOperand(edi, HeapNumber::kMantissaOffset), eax); + __ fld_d(FieldOperand(edi, HeapNumber::kValueOffset)); + __ mov(FieldOperand(edi, HeapNumber::kMantissaOffset), Immediate(0)); + __ fld_d(FieldOperand(edi, HeapNumber::kValueOffset)); + __ fsubp(1); + __ fstp_d(FieldOperand(edi, HeapNumber::kValueOffset)); + } + __ mov(eax, edi); Result result = allocator_->Allocate(eax); frame_->Push(&result); @@ -6460,7 +6526,7 @@ void CodeGenerator::GenerateStringCompare(ZoneList* args) { void CodeGenerator::GenerateRegExpExec(ZoneList* args) { - ASSERT_EQ(args->length(), 4); + ASSERT_EQ(4, args->length()); // Load the arguments on the stack and call the stub. Load(args->at(0)); @@ -6473,6 +6539,95 @@ void CodeGenerator::GenerateRegExpExec(ZoneList* args) { } +void CodeGenerator::GenerateRegExpConstructResult(ZoneList* args) { + // No stub. This code only occurs a few times in regexp.js. + const int kMaxInlineLength = 100; + ASSERT_EQ(3, args->length()); + Load(args->at(0)); // Size of array, smi. + Load(args->at(1)); // "index" property value. + Load(args->at(2)); // "input" property value. + { + VirtualFrame::SpilledScope spilled_scope; + + Label slowcase; + Label done; + __ mov(ebx, Operand(esp, kPointerSize * 2)); + __ test(ebx, Immediate(kSmiTagMask)); + __ j(not_zero, &slowcase); + __ cmp(Operand(ebx), Immediate(Smi::FromInt(kMaxInlineLength))); + __ j(above, &slowcase); + // Smi-tagging is equivalent to multiplying by 2. + STATIC_ASSERT(kSmiTag == 0); + STATIC_ASSERT(kSmiTagSize == 1); + // Allocate RegExpResult followed by FixedArray with size in ebx. + // JSArray: [Map][empty properties][Elements][Length-smi][index][input] + // Elements: [Map][Length][..elements..] + __ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize, + times_half_pointer_size, + ebx, // In: Number of elements (times 2, being a smi) + eax, // Out: Start of allocation (tagged). + ecx, // Out: End of allocation. + edx, // Scratch register + &slowcase, + TAG_OBJECT); + // eax: Start of allocated area, object-tagged. + + // Set JSArray map to global.regexp_result_map(). + // Set empty properties FixedArray. + // Set elements to point to FixedArray allocated right after the JSArray. + // Interleave operations for better latency. + __ mov(edx, ContextOperand(esi, Context::GLOBAL_INDEX)); + __ mov(ecx, Immediate(Factory::empty_fixed_array())); + __ lea(ebx, Operand(eax, JSRegExpResult::kSize)); + __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalContextOffset)); + __ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx); + __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ecx); + __ mov(edx, ContextOperand(edx, Context::REGEXP_RESULT_MAP_INDEX)); + __ mov(FieldOperand(eax, HeapObject::kMapOffset), edx); + + // Set input, index and length fields from arguments. + __ pop(FieldOperand(eax, JSRegExpResult::kInputOffset)); + __ pop(FieldOperand(eax, JSRegExpResult::kIndexOffset)); + __ pop(ecx); + __ mov(FieldOperand(eax, JSArray::kLengthOffset), ecx); + + // Fill out the elements FixedArray. + // eax: JSArray. + // ebx: FixedArray. + // ecx: Number of elements in array, as smi. + + // Set map. + __ mov(FieldOperand(ebx, HeapObject::kMapOffset), + Immediate(Factory::fixed_array_map())); + // Set length. + __ SmiUntag(ecx); + __ mov(FieldOperand(ebx, FixedArray::kLengthOffset), ecx); + // Fill contents of fixed-array with the-hole. + __ mov(edx, Immediate(Factory::the_hole_value())); + __ lea(ebx, FieldOperand(ebx, FixedArray::kHeaderSize)); + // Fill fixed array elements with hole. + // eax: JSArray. + // ecx: Number of elements to fill. + // ebx: Start of elements in FixedArray. + // edx: the hole. + Label loop; + __ test(ecx, Operand(ecx)); + __ bind(&loop); + __ j(less_equal, &done); // Jump if ecx is negative or zero. + __ sub(Operand(ecx), Immediate(1)); + __ mov(Operand(ebx, ecx, times_pointer_size, 0), edx); + __ jmp(&loop); + + __ bind(&slowcase); + __ CallRuntime(Runtime::kRegExpConstructResult, 3); + + __ bind(&done); + } + frame_->Forget(3); + frame_->Push(eax); +} + + void CodeGenerator::GenerateNumberToString(ZoneList* args) { ASSERT_EQ(args->length(), 1); @@ -6484,6 +6639,22 @@ void CodeGenerator::GenerateNumberToString(ZoneList* args) { } +void CodeGenerator::GenerateCallFunction(ZoneList* args) { + Comment cmnt(masm_, "[ GenerateCallFunction"); + + ASSERT(args->length() >= 2); + + int n_args = args->length() - 2; // for receiver and function. + Load(args->at(0)); // receiver + for (int i = 0; i < n_args; i++) { + Load(args->at(i + 1)); + } + Load(args->at(n_args + 1)); // function + Result result = frame_->CallJSFunction(n_args); + frame_->Push(&result); +} + + // Generates the Math.pow method - only handles special cases and branches to // the runtime system if not.Please note - this function assumes that // the callsite has executed ToNumber on both arguments and that the @@ -7003,8 +7174,10 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) { // specialized add or subtract stub. The result is left in dst. class DeferredPrefixCountOperation: public DeferredCode { public: - DeferredPrefixCountOperation(Register dst, bool is_increment) - : dst_(dst), is_increment_(is_increment) { + DeferredPrefixCountOperation(Register dst, + bool is_increment, + TypeInfo input_type) + : dst_(dst), is_increment_(is_increment), input_type_(input_type) { set_comment("[ DeferredCountOperation"); } @@ -7013,6 +7186,7 @@ class DeferredPrefixCountOperation: public DeferredCode { private: Register dst_; bool is_increment_; + TypeInfo input_type_; }; @@ -7023,15 +7197,21 @@ void DeferredPrefixCountOperation::Generate() { } else { __ add(Operand(dst_), Immediate(Smi::FromInt(1))); } - __ push(dst_); - __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION); - __ push(eax); - __ push(Immediate(Smi::FromInt(1))); - if (is_increment_) { - __ CallRuntime(Runtime::kNumberAdd, 2); + Register left; + if (input_type_.IsNumber()) { + left = dst_; } else { - __ CallRuntime(Runtime::kNumberSub, 2); + __ push(dst_); + __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION); + left = eax; } + + GenericBinaryOpStub stub(is_increment_ ? Token::ADD : Token::SUB, + NO_OVERWRITE, + NO_GENERIC_BINARY_FLAGS, + TypeInfo::Number()); + stub.GenerateCall(masm_, left, Smi::FromInt(1)); + if (!dst_.is(eax)) __ mov(dst_, eax); } @@ -7043,8 +7223,14 @@ void DeferredPrefixCountOperation::Generate() { // The result is left in dst. class DeferredPostfixCountOperation: public DeferredCode { public: - DeferredPostfixCountOperation(Register dst, Register old, bool is_increment) - : dst_(dst), old_(old), is_increment_(is_increment) { + DeferredPostfixCountOperation(Register dst, + Register old, + bool is_increment, + TypeInfo input_type) + : dst_(dst), + old_(old), + is_increment_(is_increment), + input_type_(input_type) { set_comment("[ DeferredCountOperation"); } @@ -7054,6 +7240,7 @@ class DeferredPostfixCountOperation: public DeferredCode { Register dst_; Register old_; bool is_increment_; + TypeInfo input_type_; }; @@ -7064,20 +7251,23 @@ void DeferredPostfixCountOperation::Generate() { } else { __ add(Operand(dst_), Immediate(Smi::FromInt(1))); } - __ push(dst_); - __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION); - - // Save the result of ToNumber to use as the old value. - __ push(eax); - - // Call the runtime for the addition or subtraction. - __ push(eax); - __ push(Immediate(Smi::FromInt(1))); - if (is_increment_) { - __ CallRuntime(Runtime::kNumberAdd, 2); + Register left; + if (input_type_.IsNumber()) { + __ push(dst_); // Save the input to use as the old value. + left = dst_; } else { - __ CallRuntime(Runtime::kNumberSub, 2); + __ push(dst_); + __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION); + __ push(eax); // Save the result of ToNumber to use as the old value. + left = eax; } + + GenericBinaryOpStub stub(is_increment_ ? Token::ADD : Token::SUB, + NO_OVERWRITE, + NO_GENERIC_BINARY_FLAGS, + TypeInfo::Number()); + stub.GenerateCall(masm_, left, Smi::FromInt(1)); + if (!dst_.is(eax)) __ mov(dst_, eax); __ pop(old_); } @@ -7120,9 +7310,13 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) { ASSERT(old_value.is_valid()); __ mov(old_value.reg(), new_value.reg()); - // The return value for postfix operations is the - // same as the input, and has the same number info. - old_value.set_type_info(new_value.type_info()); + // The return value for postfix operations is ToNumber(input). + // Keep more precise type info if the input is some kind of + // number already. If the input is not a number we have to wait + // for the deferred code to convert it. + if (new_value.type_info().IsNumber()) { + old_value.set_type_info(new_value.type_info()); + } } // Ensure the new value is writable. @@ -7156,10 +7350,12 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) { if (is_postfix) { deferred = new DeferredPostfixCountOperation(new_value.reg(), old_value.reg(), - is_increment); + is_increment, + new_value.type_info()); } else { deferred = new DeferredPrefixCountOperation(new_value.reg(), - is_increment); + is_increment, + new_value.type_info()); } if (new_value.is_smi()) { @@ -7186,6 +7382,13 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) { } deferred->BindExit(); + // Postfix count operations return their input converted to + // number. The case when the input is already a number is covered + // above in the allocation code for old_value. + if (is_postfix && !new_value.type_info().IsNumber()) { + old_value.set_type_info(TypeInfo::Number()); + } + // The result of ++ or -- is an Integer32 if the // input is a smi. Otherwise it is a number. if (new_value.is_smi()) { @@ -7596,8 +7799,7 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) { Load(node->left()); Load(node->right()); } - GenericBinaryOperation(node->op(), node->type(), - overwrite_mode, node->no_negative_zero()); + GenericBinaryOperation(node, overwrite_mode); } } @@ -9449,13 +9651,6 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) { default: UNREACHABLE(); } - - // Generate an unreachable reference to the DEFAULT stub so that it can be - // found at the end of this stub when clearing ICs at GC. - if (runtime_operands_type_ != BinaryOpIC::DEFAULT) { - GenericBinaryOpStub uninit(MinorKey(), BinaryOpIC::DEFAULT); - __ TailCallStub(&uninit); - } } @@ -10374,30 +10569,6 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) { } -void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) { - // Check if the calling frame is an arguments adaptor frame. - __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); - __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset)); - __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); - - // Arguments adaptor case: Read the arguments length from the - // adaptor frame and return it. - // Otherwise nothing to do: The number of formal parameters has already been - // passed in register eax by calling function. Just return it. - if (CpuFeatures::IsSupported(CMOV)) { - CpuFeatures::Scope use_cmov(CMOV); - __ cmov(equal, eax, - Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset)); - } else { - Label exit; - __ j(not_equal, &exit); - __ mov(eax, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset)); - __ bind(&exit); - } - __ ret(0); -} - - void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { // The key is in edx and the parameter count is in eax. @@ -10893,14 +11064,6 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, Register scratch2, bool object_is_smi, Label* not_found) { - // Currently only lookup for smis. Check for smi if object is not known to be - // a smi. - if (!object_is_smi) { - ASSERT(kSmiTag == 0); - __ test(object, Immediate(kSmiTagMask)); - __ j(not_zero, not_found); - } - // Use of registers. Register result is used as a temporary. Register number_string_cache = result; Register mask = scratch1; @@ -10916,23 +11079,74 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, __ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset)); __ shr(mask, 1); // Divide length by two (length is not a smi). __ sub(Operand(mask), Immediate(1)); // Make mask. + // Calculate the entry in the number string cache. The hash value in the - // number string cache for smis is just the smi value. - __ mov(scratch, object); - __ SmiUntag(scratch); + // number string cache for smis is just the smi value, and the hash for + // doubles is the xor of the upper and lower words. See + // Heap::GetNumberStringCache. + Label smi_hash_calculated; + Label load_result_from_cache; + if (object_is_smi) { + __ mov(scratch, object); + __ SmiUntag(scratch); + } else { + Label not_smi, hash_calculated; + ASSERT(kSmiTag == 0); + __ test(object, Immediate(kSmiTagMask)); + __ j(not_zero, ¬_smi); + __ mov(scratch, object); + __ SmiUntag(scratch); + __ jmp(&smi_hash_calculated); + __ bind(¬_smi); + __ cmp(FieldOperand(object, HeapObject::kMapOffset), + Factory::heap_number_map()); + __ j(not_equal, not_found); + ASSERT_EQ(8, kDoubleSize); + __ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset)); + __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4)); + // Object is heap number and hash is now in scratch. Calculate cache index. + __ and_(scratch, Operand(mask)); + Register index = scratch; + Register probe = mask; + __ mov(probe, + FieldOperand(number_string_cache, + index, + times_twice_pointer_size, + FixedArray::kHeaderSize)); + __ test(probe, Immediate(kSmiTagMask)); + __ j(zero, not_found); + if (CpuFeatures::IsSupported(SSE2)) { + CpuFeatures::Scope fscope(SSE2); + __ movdbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset)); + __ movdbl(xmm1, FieldOperand(probe, HeapNumber::kValueOffset)); + __ comisd(xmm0, xmm1); + } else { + __ fld_d(FieldOperand(object, HeapNumber::kValueOffset)); + __ fld_d(FieldOperand(probe, HeapNumber::kValueOffset)); + __ FCmp(); + } + __ j(parity_even, not_found); // Bail out if NaN is involved. + __ j(not_equal, not_found); // The cache did not contain this value. + __ jmp(&load_result_from_cache); + } + + __ bind(&smi_hash_calculated); + // Object is smi and hash is now in scratch. Calculate cache index. __ and_(scratch, Operand(mask)); + Register index = scratch; // Check if the entry is the smi we are looking for. __ cmp(object, FieldOperand(number_string_cache, - scratch, + index, times_twice_pointer_size, FixedArray::kHeaderSize)); __ j(not_equal, not_found); // Get the result from the cache. + __ bind(&load_result_from_cache); __ mov(result, FieldOperand(number_string_cache, - scratch, + index, times_twice_pointer_size, FixedArray::kHeaderSize + kPointerSize)); __ IncrementCounter(&Counters::number_to_string_native, 1); @@ -10950,7 +11164,7 @@ void NumberToStringStub::Generate(MacroAssembler* masm) { __ bind(&runtime); // Handle number to string in the runtime system if not found in the cache. - __ TailCallRuntime(Runtime::kNumberToString, 1, 1); + __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1); } @@ -10960,62 +11174,94 @@ void RecordWriteStub::Generate(MacroAssembler* masm) { } +static int NegativeComparisonResult(Condition cc) { + ASSERT(cc != equal); + ASSERT((cc == less) || (cc == less_equal) + || (cc == greater) || (cc == greater_equal)); + return (cc == greater || cc == greater_equal) ? LESS : GREATER; +} + + void CompareStub::Generate(MacroAssembler* masm) { Label call_builtin, done; // NOTICE! This code is only reached after a smi-fast-case check, so // it is certain that at least one operand isn't a smi. - if (cc_ == equal) { // Both strict and non-strict. - Label slow; // Fallthrough label. - // Equality is almost reflexive (everything but NaN), so start by testing - // for "identity and not NaN". - { - Label not_identical; - __ cmp(eax, Operand(edx)); - __ j(not_equal, ¬_identical); - // Test for NaN. Sadly, we can't just compare to Factory::nan_value(), - // so we do the second best thing - test it ourselves. - - if (never_nan_nan_) { - __ Set(eax, Immediate(0)); + // Identical objects can be compared fast, but there are some tricky cases + // for NaN and undefined. + { + Label not_identical; + __ cmp(eax, Operand(edx)); + __ j(not_equal, ¬_identical); + + if (cc_ != equal) { + // Check for undefined. undefined OP undefined is false even though + // undefined == undefined. + Label check_for_nan; + __ cmp(edx, Factory::undefined_value()); + __ j(not_equal, &check_for_nan); + __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_)))); + __ ret(0); + __ bind(&check_for_nan); + } + + // Test for NaN. Sadly, we can't just compare to Factory::nan_value(), + // so we do the second best thing - test it ourselves. + // Note: if cc_ != equal, never_nan_nan_ is not used. + if (never_nan_nan_ && (cc_ == equal)) { + __ Set(eax, Immediate(Smi::FromInt(EQUAL))); + __ ret(0); + } else { + Label return_equal; + Label heap_number; + // If it's not a heap number, then return equal. + __ cmp(FieldOperand(edx, HeapObject::kMapOffset), + Immediate(Factory::heap_number_map())); + __ j(equal, &heap_number); + __ bind(&return_equal); + __ Set(eax, Immediate(Smi::FromInt(EQUAL))); + __ ret(0); + + __ bind(&heap_number); + // It is a heap number, so return non-equal if it's NaN and equal if + // it's not NaN. + // The representation of NaN values has all exponent bits (52..62) set, + // and not all mantissa bits (0..51) clear. + // We only accept QNaNs, which have bit 51 set. + // Read top bits of double representation (second word of value). + + // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e., + // all bits in the mask are set. We only need to check the word + // that contains the exponent and high bit of the mantissa. + ASSERT_NE(0, (kQuietNaNHighBitsMask << 1) & 0x80000000u); + __ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset)); + __ xor_(eax, Operand(eax)); + // Shift value and mask so kQuietNaNHighBitsMask applies to topmost + // bits. + __ add(edx, Operand(edx)); + __ cmp(edx, kQuietNaNHighBitsMask << 1); + if (cc_ == equal) { + ASSERT_NE(1, EQUAL); + __ setcc(above_equal, eax); __ ret(0); } else { - Label return_equal; - Label heap_number; - // If it's not a heap number, then return equal. - __ cmp(FieldOperand(edx, HeapObject::kMapOffset), - Immediate(Factory::heap_number_map())); - __ j(equal, &heap_number); - __ bind(&return_equal); - __ Set(eax, Immediate(0)); + Label nan; + __ j(above_equal, &nan); + __ Set(eax, Immediate(Smi::FromInt(EQUAL))); __ ret(0); - - __ bind(&heap_number); - // It is a heap number, so return non-equal if it's NaN and equal if - // it's not NaN. - // The representation of NaN values has all exponent bits (52..62) set, - // and not all mantissa bits (0..51) clear. - // We only accept QNaNs, which have bit 51 set. - // Read top bits of double representation (second word of value). - - // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e., - // all bits in the mask are set. We only need to check the word - // that contains the exponent and high bit of the mantissa. - ASSERT_NE(0, (kQuietNaNHighBitsMask << 1) & 0x80000000u); - __ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset)); - __ xor_(eax, Operand(eax)); - // Shift value and mask so kQuietNaNHighBitsMask applies to topmost - // bits. - __ add(edx, Operand(edx)); - __ cmp(edx, kQuietNaNHighBitsMask << 1); - __ setcc(above_equal, eax); + __ bind(&nan); + __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_)))); __ ret(0); } - - __ bind(¬_identical); } + __ bind(¬_identical); + } + + if (cc_ == equal) { // Both strict and non-strict. + Label slow; // Fallthrough label. + // If we're doing a strict equality comparison, we don't have to do // type conversion, so we generate code to do fast comparison for objects // and oddballs. Non-smi numbers and strings still go through the usual @@ -11205,14 +11451,7 @@ void CompareStub::Generate(MacroAssembler* masm) { builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS; } else { builtin = Builtins::COMPARE; - int ncr; // NaN compare result - if (cc_ == less || cc_ == less_equal) { - ncr = GREATER; - } else { - ASSERT(cc_ == greater || cc_ == greater_equal); // remaining cases - ncr = LESS; - } - __ push(Immediate(Smi::FromInt(ncr))); + __ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc_)))); } // Restore return address on the stack. @@ -11345,7 +11584,7 @@ void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) { // If true, a Handle passed by value is passed and returned by // using the location_ field directly. If false, it is passed and // returned as a pointer to a handle. -#ifdef USING_MAC_ABI +#ifdef USING_BSD_ABI static const bool kPassHandlesDirectly = true; #else static const bool kPassHandlesDirectly = false; diff --git a/deps/v8/src/ia32/codegen-ia32.h b/deps/v8/src/ia32/codegen-ia32.h index 9fcc466e36c..a8568f0a68d 100644 --- a/deps/v8/src/ia32/codegen-ia32.h +++ b/deps/v8/src/ia32/codegen-ia32.h @@ -492,11 +492,8 @@ class CodeGenerator: public AstVisitor { // Generate code that computes a shortcutting logical operation. void GenerateLogicalBooleanOperation(BinaryOperation* node); - void GenericBinaryOperation( - Token::Value op, - StaticType* type, - OverwriteMode overwrite_mode, - bool no_negative_zero); + void GenericBinaryOperation(BinaryOperation* expr, + OverwriteMode overwrite_mode); // If possible, combine two constant smi values using op to produce // a smi result, and push it on the virtual frame, all at compile time. @@ -505,22 +502,19 @@ class CodeGenerator: public AstVisitor { // Emit code to perform a binary operation on a constant // smi and a likely smi. Consumes the Result operand. - Result ConstantSmiBinaryOperation(Token::Value op, + Result ConstantSmiBinaryOperation(BinaryOperation* expr, Result* operand, Handle constant_operand, - StaticType* type, bool reversed, - OverwriteMode overwrite_mode, - bool no_negative_zero); + OverwriteMode overwrite_mode); // Emit code to perform a binary operation on two likely smis. // The code to handle smi arguments is produced inline. // Consumes the Results left and right. - Result LikelySmiBinaryOperation(Token::Value op, + Result LikelySmiBinaryOperation(BinaryOperation* expr, Result* left, Result* right, - OverwriteMode overwrite_mode, - bool no_negative_zero); + OverwriteMode overwrite_mode); // Emit code to perform a binary operation on two untagged int32 values. @@ -620,7 +614,7 @@ class CodeGenerator: public AstVisitor { void GenerateGetFramePointer(ZoneList* args); // Fast support for Math.random(). - void GenerateRandomPositiveSmi(ZoneList* args); + void GenerateRandomHeapNumber(ZoneList* args); // Fast support for StringAdd. void GenerateStringAdd(ZoneList* args); @@ -634,9 +628,14 @@ class CodeGenerator: public AstVisitor { // Support for direct calls from JavaScript to native RegExp code. void GenerateRegExpExec(ZoneList* args); + void GenerateRegExpConstructResult(ZoneList* args); + // Fast support for number to string. void GenerateNumberToString(ZoneList* args); + // Fast call for custom callbacks. + void GenerateCallFunction(ZoneList* args); + // Fast call to math functions. void GenerateMathPow(ZoneList* args); void GenerateMathSin(ZoneList* args); diff --git a/deps/v8/src/ia32/debug-ia32.cc b/deps/v8/src/ia32/debug-ia32.cc index 5d18a0354e8..d142b11cf7b 100644 --- a/deps/v8/src/ia32/debug-ia32.cc +++ b/deps/v8/src/ia32/debug-ia32.cc @@ -206,8 +206,58 @@ void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) { } +void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) { + masm->ret(0); +} + +// FrameDropper is a code replacement for a JavaScript frame with possibly +// several frames above. +// There is no calling conventions here, because it never actually gets called, +// it only gets returned to. +// Frame structure (conforms InternalFrame structure): +// -- JSFunction +// -- code +// -- SMI maker +// -- context +// -- frame base +void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) { + // We do not know our frame height, but set esp based on ebp. + __ lea(esp, Operand(ebp, -4 * kPointerSize)); + + __ pop(edi); // function + + // Skip code self-reference and marker. + __ add(Operand(esp), Immediate(2 * kPointerSize)); + + __ pop(esi); // Context. + __ pop(ebp); + + // Get function code. + __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); + __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset)); + __ lea(edx, FieldOperand(edx, Code::kHeaderSize)); + + // Re-run JSFunction, edi is function, esi is context. + __ jmp(Operand(edx)); +} + #undef __ + +void Debug::SetUpFrameDropperFrame(StackFrame* bottom_js_frame, + Handle code) { + ASSERT(bottom_js_frame->is_java_script()); + + Address fp = bottom_js_frame->fp(); + Memory::Object_at(fp - 4 * kPointerSize) = + Memory::Object_at(fp - 2 * kPointerSize); // Move edi (function). + + Memory::Object_at(fp - 3 * kPointerSize) = *code; + Memory::Object_at(fp - 2 * kPointerSize) = Smi::FromInt(StackFrame::INTERNAL); +} +const int Debug::kFrameDropperFrameSize = 5; + + #endif // ENABLE_DEBUGGER_SUPPORT } } // namespace v8::internal diff --git a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc index c6accbda99b..7e82528c9d9 100644 --- a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc +++ b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc @@ -832,7 +832,7 @@ Handle RegExpMacroAssemblerIA32::GetCode(Handle source) { NULL, Code::ComputeFlags(Code::REGEXP), masm_->CodeObject()); - LOG(RegExpCodeCreateEvent(*code, *source)); + PROFILE(RegExpCodeCreateEvent(*code, *source)); return Handle::cast(code); } diff --git a/deps/v8/src/ia32/virtual-frame-ia32.cc b/deps/v8/src/ia32/virtual-frame-ia32.cc index bc27e1de7a5..10aaa52b833 100644 --- a/deps/v8/src/ia32/virtual-frame-ia32.cc +++ b/deps/v8/src/ia32/virtual-frame-ia32.cc @@ -909,6 +909,25 @@ Result VirtualFrame::CallStub(CodeStub* stub, Result* arg0, Result* arg1) { } +Result VirtualFrame::CallJSFunction(int arg_count) { + Result function = Pop(); + + // InvokeFunction requires function in edi. Move it in there. + function.ToRegister(edi); + function.Unuse(); + + // +1 for receiver. + PrepareForCall(arg_count + 1, arg_count + 1); + ASSERT(cgen()->HasValidEntryRegisters()); + ParameterCount count(arg_count); + __ InvokeFunction(edi, count, CALL_FUNCTION); + RestoreContextRegister(); + Result result = cgen()->allocator()->Allocate(eax); + ASSERT(result.is_valid()); + return result; +} + + Result VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) { PrepareForCall(arg_count, arg_count); ASSERT(cgen()->HasValidEntryRegisters()); diff --git a/deps/v8/src/ia32/virtual-frame-ia32.h b/deps/v8/src/ia32/virtual-frame-ia32.h index 9b6892a51bf..14fe4662dc2 100644 --- a/deps/v8/src/ia32/virtual-frame-ia32.h +++ b/deps/v8/src/ia32/virtual-frame-ia32.h @@ -331,6 +331,10 @@ class VirtualFrame: public ZoneObject { // arguments are consumed by the call. Result CallStub(CodeStub* stub, Result* arg0, Result* arg1); + // Call JS function from top of the stack with arguments + // taken from the stack. + Result CallJSFunction(int arg_count); + // Call runtime given the number of arguments expected on (and // removed from) the stack. Result CallRuntime(Runtime::Function* f, int arg_count); diff --git a/deps/v8/src/ic.cc b/deps/v8/src/ic.cc index a6d2020b440..b9ca00f8293 100644 --- a/deps/v8/src/ic.cc +++ b/deps/v8/src/ic.cc @@ -224,7 +224,8 @@ void IC::Clear(Address address) { case Code::STORE_IC: return StoreIC::Clear(address, target); case Code::KEYED_STORE_IC: return KeyedStoreIC::Clear(address, target); case Code::CALL_IC: return CallIC::Clear(address, target); - case Code::BINARY_OP_IC: return BinaryOpIC::Clear(address, target); + case Code::BINARY_OP_IC: return; // Clearing these is tricky and does not + // make any performance difference. default: UNREACHABLE(); } } @@ -1404,25 +1405,6 @@ void BinaryOpIC::patch(Code* code) { } -void BinaryOpIC::Clear(Address address, Code* target) { - if (target->ic_state() == UNINITIALIZED) return; - - // At the end of a fast case stub there should be a reference to - // a corresponding UNINITIALIZED stub, so look for the last reloc info item. - RelocInfo* rinfo = NULL; - for (RelocIterator it(target, RelocInfo::kCodeTargetMask); - !it.done(); it.next()) { - rinfo = it.rinfo(); - } - - ASSERT(rinfo != NULL); - Code* uninit_stub = Code::GetCodeFromTargetAddress(rinfo->target_address()); - ASSERT(uninit_stub->ic_state() == UNINITIALIZED && - uninit_stub->kind() == Code::BINARY_OP_IC); - SetTargetAtAddress(address, uninit_stub); -} - - const char* BinaryOpIC::GetName(TypeInfo type_info) { switch (type_info) { case DEFAULT: return "Default"; @@ -1451,8 +1433,9 @@ BinaryOpIC::State BinaryOpIC::ToState(TypeInfo type_info) { BinaryOpIC::TypeInfo BinaryOpIC::GetTypeInfo(Object* left, Object* right) { - // Patching is never requested for the two smis. - ASSERT(!left->IsSmi() || !right->IsSmi()); + if (left->IsSmi() && right->IsSmi()) { + return GENERIC; + } if (left->IsNumber() && right->IsNumber()) { return HEAP_NUMBERS; diff --git a/deps/v8/src/jump-target-light.cc b/deps/v8/src/jump-target-light.cc index 098d97deef3..befb4307367 100644 --- a/deps/v8/src/jump-target-light.cc +++ b/deps/v8/src/jump-target-light.cc @@ -77,23 +77,10 @@ DeferredCode::DeferredCode() ASSERT(position_ != RelocInfo::kNoPosition); CodeGeneratorScope::Current()->AddDeferred(this); + #ifdef DEBUG - comment_ = ""; + CodeGeneratorScope::Current()->frame()->AssertIsSpilled(); #endif - - // Copy the register locations from the code generator's frame. - // These are the registers that will be spilled on entry to the - // deferred code and restored on exit. - VirtualFrame* frame = CodeGeneratorScope::Current()->frame(); - for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) { - int loc = frame->register_location(i); - if (loc == VirtualFrame::kIllegalIndex) { - registers_[i] = kIgnore; - } else { - // Needs to be restored on exit but not saved on entry. - registers_[i] = frame->fp_relative(loc) | kSyncedFlag; - } - } } } } // namespace v8::internal diff --git a/deps/v8/src/liveedit-debugger.js b/deps/v8/src/liveedit-debugger.js index e336db71b95..6ef7a2daed6 100644 --- a/deps/v8/src/liveedit-debugger.js +++ b/deps/v8/src/liveedit-debugger.js @@ -32,14 +32,14 @@ // Changes script text and recompiles all relevant functions if possible. // The change is always a substring (change_pos, change_pos + change_len) // being replaced with a completely different string new_str. -// +// // Only one function will have its Code changed in result of this function. // All nested functions (should they have any instances at the moment) are left // unchanged and re-linked to a newly created script instance representing old // version of the source. (Generally speaking, // during the change all nested functions are erased and completely different // set of nested functions are introduced.) All other functions just have -// their positions updated. +// their positions updated. // // @param {Script} script that is being changed // @param {Array} change_log a list that collects engineer-readable description @@ -56,9 +56,9 @@ Debug.LiveEditChangeScript = function(script, change_pos, change_len, new_str, // Elements of array are ordered by start positions of functions (from top // to bottom) in the source. Fields outer_index and next_sibling_index help // to navigate the nesting structure of functions. - // - // The script is used for compilation, because it produces code that - // needs to be linked with some particular script (for nested functions). + // + // The script is used for compilation, because it produces code that + // needs to be linked with some particular script (for nested functions). function DebugGatherCompileInfo(source) { // Get function info, elements are partially sorted (it is a tree // of nested functions serialized as parent followed by serialized children. @@ -71,7 +71,7 @@ Debug.LiveEditChangeScript = function(script, change_pos, change_len, new_str, compile_info.push(new liveedit.FunctionCompileInfo(raw_compile_info[i])); old_index_map.push(i); } - + for (var i = 0; i < compile_info.length; i++) { var k = i; for (var j = i + 1; j < compile_info.length; j++) { @@ -112,12 +112,12 @@ Debug.LiveEditChangeScript = function(script, change_pos, change_len, new_str, compile_info[previous_sibling].next_sibling_index = -1; } } - + ResetIndexes(-1, -1); Assert(current_index == compile_info.length); - + return compile_info; - } + } // Given a positions, finds a function that fully includes the entire change. function FindChangedFunction(compile_info, offset, len) { @@ -148,7 +148,7 @@ Debug.LiveEditChangeScript = function(script, change_pos, change_len, new_str, var old_info = old_compile_info[index]; for (var i = 0; i < shared_infos.length; i++) { var info = shared_infos[i]; - if (info.start_position == old_info.start_position && + if (info.start_position == old_info.start_position && info.end_position == old_info.end_position) { return info; } @@ -161,7 +161,7 @@ Debug.LiveEditChangeScript = function(script, change_pos, change_len, new_str, change_log.push( {function_patched: new_info.function_name} ); } - + var change_len_old; var change_len_new; // Translate position in old version of script into position in new @@ -175,19 +175,26 @@ Debug.LiveEditChangeScript = function(script, change_pos, change_len, new_str, } return -1; } - + var position_change_array; var position_patch_report; function PatchPositions(new_info, shared_info) { if (!shared_info) { - // TODO: explain what is happening. + // TODO(LiveEdit): explain what is happening. return; } - %LiveEditPatchFunctionPositions(shared_info.raw_array, - position_change_array); + var breakpoint_position_update = %LiveEditPatchFunctionPositions( + shared_info.raw_array, position_change_array); + for (var i = 0; i < breakpoint_position_update.length; i += 2) { + var new_pos = breakpoint_position_update[i]; + var break_point_object = breakpoint_position_update[i + 1]; + change_log.push( { breakpoint_position_update: + { from: break_point_object.source_position(), to: new_pos } } ); + break_point_object.updateSourcePosition(new_pos, script); + } position_patch_report.push( { name: new_info.function_name } ); } - + var link_to_old_script_report; var old_script; // Makes a function associated with another instance of a script (the @@ -195,16 +202,16 @@ Debug.LiveEditChangeScript = function(script, change_pos, change_len, new_str, // may access its own text. function LinkToOldScript(shared_info) { %LiveEditRelinkFunctionToScript(shared_info.raw_array, old_script); - + link_to_old_script_report.push( { name: shared_info.function_name } ); } - + var old_source = script.source; var change_len_old = change_len; var change_len_new = new_str.length; - + // Prepare new source string. var new_source = old_source.substring(0, change_pos) + new_str + old_source.substring(change_pos + change_len); @@ -217,10 +224,10 @@ Debug.LiveEditChangeScript = function(script, change_pos, change_len, new_str, for (var i = 0; i < shared_raw_list.length; i++) { shared_infos.push(new liveedit.SharedInfoWrapper(shared_raw_list[i])); } - + // Gather compile information about old version of script. var old_compile_info = DebugGatherCompileInfo(old_source); - + // Gather compile information about new version of script. var new_compile_info; try { @@ -247,20 +254,20 @@ Debug.LiveEditChangeScript = function(script, change_pos, change_len, new_str, old_compile_info[function_being_patched], new_compile_info[function_being_patched])) { - Assert(old_compile_info[function_being_patched].outer_index == + Assert(old_compile_info[function_being_patched].outer_index == new_compile_info[function_being_patched].outer_index); function_being_patched = old_compile_info[function_being_patched].outer_index; Assert(function_being_patched != -1); } - + // Check that function being patched is not currently on stack. liveedit.CheckStackActivations( [ FindFunctionInfo(function_being_patched) ], change_log ); - + // Committing all changes. - var old_script_name = liveedit.CreateNameForOldScript(script); + var old_script_name = liveedit.CreateNameForOldScript(script); // Update the script text and create a new script representing an old // version of the script. @@ -271,11 +278,11 @@ Debug.LiveEditChangeScript = function(script, change_pos, change_len, new_str, var position_patch_report = new Array(); change_log.push( {position_patched: position_patch_report} ); - + var position_change_array = [ change_pos, change_pos + change_len_old, change_pos + change_len_new ]; - + // Update positions of all outer functions (i.e. all functions, that // are partially below the function being patched). for (var i = new_compile_info[function_being_patched].outer_index; @@ -308,7 +315,7 @@ Debug.LiveEditChangeScript = function(script, change_pos, change_len, new_str, var link_to_old_script_report = new Array(); change_log.push( { linked_to_old_script: link_to_old_script_report } ); - // We need to link to old script all former nested functions. + // We need to link to old script all former nested functions. for (var i = function_being_patched + 1; i < old_next_sibling; i++) { LinkToOldScript(FindFunctionInfo(i), old_script); } @@ -323,7 +330,7 @@ Debug.LiveEditChangeScript.Assert = function(condition, message) { } } } - + // An object describing function compilation details. Its index fields // apply to indexes inside array that stores these objects. Debug.LiveEditChangeScript.FunctionCompileInfo = function(raw_array) { @@ -337,7 +344,7 @@ Debug.LiveEditChangeScript.FunctionCompileInfo = function(raw_array) { this.next_sibling_index = null; this.raw_array = raw_array; } - + // A structure describing SharedFunctionInfo. Debug.LiveEditChangeScript.SharedInfoWrapper = function(raw_array) { this.function_name = raw_array[0]; @@ -364,18 +371,18 @@ Debug.LiveEditChangeScript.CompareFunctionExpectations = } var scope_info1 = function_info1.scope_info; var scope_info2 = function_info2.scope_info; - + if (!scope_info1) { return !scope_info2; } - + if (scope_info1.length != scope_info2.length) { return false; } // Check that outer scope structure is not changed. Otherwise the function // will not properly work with existing scopes. - return scope_info1.toString() == scope_info2.toString(); + return scope_info1.toString() == scope_info2.toString(); } // For array of wrapped shared function infos checks that none of them @@ -384,24 +391,37 @@ Debug.LiveEditChangeScript.CompareFunctionExpectations = Debug.LiveEditChangeScript.CheckStackActivations = function(shared_wrapper_list, change_log) { var liveedit = Debug.LiveEditChangeScript; - + var shared_list = new Array(); for (var i = 0; i < shared_wrapper_list.length; i++) { shared_list[i] = shared_wrapper_list[i].info; } - var result = %LiveEditCheckStackActivations(shared_list); + var result = %LiveEditCheckAndDropActivations(shared_list, true); + if (result[shared_list.length]) { + // Extra array element may contain error message. + throw new liveedit.Failure(result[shared_list.length]); + } + var problems = new Array(); + var dropped = new Array(); for (var i = 0; i < shared_list.length; i++) { - if (result[i] == liveedit.FunctionPatchabilityStatus.FUNCTION_BLOCKED_ON_STACK) { - var shared = shared_list[i]; + var shared = shared_wrapper_list[i]; + if (result[i] == liveedit.FunctionPatchabilityStatus.REPLACED_ON_ACTIVE_STACK) { + dropped.push({ name: shared.function_name } ); + } else if (result[i] != liveedit.FunctionPatchabilityStatus.AVAILABLE_FOR_PATCH) { var description = { name: shared.function_name, start_pos: shared.start_position, - end_pos: shared.end_position + end_pos: shared.end_position, + replace_problem: + liveedit.FunctionPatchabilityStatus.SymbolName(result[i]) }; problems.push(description); } } + if (dropped.length > 0) { + change_log.push({ dropped_from_stack: dropped }); + } if (problems.length > 0) { change_log.push( { functions_on_stack: problems } ); throw new liveedit.Failure("Blocked by functions on stack"); @@ -410,8 +430,21 @@ Debug.LiveEditChangeScript.CheckStackActivations = function(shared_wrapper_list, // A copy of the FunctionPatchabilityStatus enum from liveedit.h Debug.LiveEditChangeScript.FunctionPatchabilityStatus = { - FUNCTION_AVAILABLE_FOR_PATCH: 0, - FUNCTION_BLOCKED_ON_STACK: 1 + AVAILABLE_FOR_PATCH: 1, + BLOCKED_ON_ACTIVE_STACK: 2, + BLOCKED_ON_OTHER_STACK: 3, + BLOCKED_UNDER_NATIVE_CODE: 4, + REPLACED_ON_ACTIVE_STACK: 5 +} + +Debug.LiveEditChangeScript.FunctionPatchabilityStatus.SymbolName = + function(code) { + var enum = Debug.LiveEditChangeScript.FunctionPatchabilityStatus; + for (name in enum) { + if (enum[name] == code) { + return name; + } + } } @@ -422,10 +455,93 @@ Debug.LiveEditChangeScript.Failure = function(message) { } Debug.LiveEditChangeScript.Failure.prototype.toString = function() { - return "LiveEdit Failure: " + this.message; + return "LiveEdit Failure: " + this.message; } // A testing entry. Debug.LiveEditChangeScript.GetPcFromSourcePos = function(func, source_pos) { return %GetFunctionCodePositionFromSource(func, source_pos); } + +// A LiveEdit namespace is declared inside a single function constructor. +Debug.LiveEdit = new function() { + var LiveEdit = this; + + + // LiveEdit main entry point: changes a script text to a new string. + LiveEdit.SetScriptSource = function(script, new_source, change_log) { + var old_source = script.source; + var diff = FindSimpleDiff(old_source, new_source); + if (!diff) { + return; + } + Debug.LiveEditChangeScript(script, diff.change_pos, diff.old_len, + new_source.substring(diff.change_pos, diff.change_pos + diff.new_len), + change_log); + } + + + // Finds a difference between 2 strings in form of a single chunk. + // This is a temporary solution. We should calculate a read diff instead. + function FindSimpleDiff(old_source, new_source) { + var change_pos; + var old_len; + var new_len; + + // A find range block. Whenever control leaves it, it should set 3 local + // variables declared above. + find_range: + { + // First look from the beginning of strings. + var pos1; + { + var next_pos; + for (pos1 = 0; true; pos1 = next_pos) { + if (pos1 >= old_source.length) { + change_pos = pos1; + old_len = 0; + new_len = new_source.length - pos1; + break find_range; + } + if (pos1 >= new_source.length) { + change_pos = pos1; + old_len = old_source.length - pos1; + new_len = 0; + break find_range; + } + if (old_source[pos1] != new_source[pos1]) { + break; + } + next_pos = pos1 + 1; + } + } + // Now compare strings from the ends. + change_pos = pos1; + var pos_old; + var pos_new; + { + for (pos_old = old_source.length - 1, pos_new = new_source.length - 1; + true; + pos_old--, pos_new--) { + if (pos_old - change_pos + 1 < 0 || pos_new - change_pos + 1 < 0) { + old_len = pos_old - change_pos + 2; + new_len = pos_new - change_pos + 2; + break find_range; + } + if (old_source[pos_old] != new_source[pos_new]) { + old_len = pos_old - change_pos + 1; + new_len = pos_new - change_pos + 1; + break find_range; + } + } + } + } + + if (old_len == 0 && new_len == 0) { + // no change + return; + } + + return { "change_pos": change_pos, "old_len": old_len, "new_len": new_len }; + } +} diff --git a/deps/v8/src/liveedit.cc b/deps/v8/src/liveedit.cc index 55308ab67ce..8c1316b8490 100644 --- a/deps/v8/src/liveedit.cc +++ b/deps/v8/src/liveedit.cc @@ -34,6 +34,7 @@ #include "scopes.h" #include "global-handles.h" #include "debug.h" +#include "memory.h" namespace v8 { namespace internal { @@ -446,6 +447,13 @@ static void ReplaceCodeObject(Code* original, Code* substitution) { } +// Check whether the code is natural function code (not a lazy-compile stub +// code). +static bool IsJSFunctionCode(Code* code) { + return code->kind() == Code::FUNCTION; +} + + void LiveEdit::ReplaceFunctionCode(Handle new_compile_info_array, Handle shared_info_array) { HandleScope scope; @@ -455,15 +463,30 @@ void LiveEdit::ReplaceFunctionCode(Handle new_compile_info_array, Handle shared_info = shared_info_wrapper.GetInfo(); - ReplaceCodeObject(shared_info->code(), - *(compile_info_wrapper.GetFunctionCode())); + + if (IsJSFunctionCode(shared_info->code())) { + ReplaceCodeObject(shared_info->code(), + *(compile_info_wrapper.GetFunctionCode())); + } + + if (shared_info->debug_info()->IsDebugInfo()) { + Handle debug_info(DebugInfo::cast(shared_info->debug_info())); + Handle new_original_code = + Factory::CopyCode(compile_info_wrapper.GetFunctionCode()); + debug_info->set_original_code(*new_original_code); + } shared_info->set_start_position(compile_info_wrapper.GetStartPosition()); shared_info->set_end_position(compile_info_wrapper.GetEndPosition()); - // update breakpoints, original code, constructor stub + + shared_info->set_construct_stub( + Builtins::builtin(Builtins::JSConstructStubGeneric)); + // update breakpoints } +// TODO(635): Eval caches its scripts (same text -- same compiled info). +// Make sure we clear such caches. void LiveEdit::RelinkFunctionToScript(Handle shared_info_array, Handle