Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
tree: a821f8a428
Fetching contributors…

Cannot retrieve contributors at this time

15301 lines (13433 sloc) 511.104 kb
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=4 sw=4 et tw=99:
*
* ***** BEGIN LICENSE BLOCK *****
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* The Original Code is Mozilla SpiderMonkey JavaScript 1.9 code, released
* May 28, 2008.
*
* The Initial Developer of the Original Code is
* Brendan Eich <brendan@mozilla.org>
*
* Contributor(s):
* Andreas Gal <gal@mozilla.com>
* Mike Shaver <shaver@mozilla.org>
* David Anderson <danderson@mozilla.com>
*
* Alternatively, the contents of this file may be used under the terms of
* either of the GNU General Public License Version 2 or later (the "GPL"),
* or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
* in which case the provisions of the GPL or the LGPL are applicable instead
* of those above. If you wish to allow use of your version of this file only
* under the terms of either the GPL or the LGPL, and not to allow others to
* use your version of this file under the terms of the MPL, indicate your
* decision by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL or the LGPL. If you do not delete
* the provisions above, a recipient may use your version of this file under
* the terms of any one of the MPL, the GPL or the LGPL.
*
* ***** END LICENSE BLOCK ***** */
#include "jsstdint.h"
#include "jsbit.h" // low-level (NSPR-based) headers next
#include "jsprf.h"
#include <math.h> // standard headers next
#if defined(_MSC_VER) || defined(__MINGW32__)
#include <malloc.h>
#ifdef _MSC_VER
#define alloca _alloca
#endif
#endif
#ifdef SOLARIS
#include <alloca.h>
#endif
#include <limits.h>
#include "nanojit/nanojit.h"
#include "jsapi.h" // higher-level library and API headers
#include "jsarray.h"
#include "jsbool.h"
#include "jscntxt.h"
#include "jsdate.h"
#include "jsdbgapi.h"
#include "jsemit.h"
#include "jsfun.h"
#include "jsinterp.h"
#include "jsiter.h"
#include "jsmath.h"
#include "jsobj.h"
#include "jsopcode.h"
#include "jsregexp.h"
#include "jsscope.h"
#include "jsscript.h"
#include "jsstaticcheck.h"
#include "jstracer.h"
#include "jsxml.h"
#include "jstypedarray.h"
#include "jsatominlines.h"
#include "jsinterpinlines.h"
#include "jsobjinlines.h"
#include "jsscopeinlines.h"
#include "jsscriptinlines.h"
#include "jsautooplen.h" // generated headers last
#include "imacros.c.out"
#if defined(NANOJIT_ARM) && defined(__GNUC__) && defined(AVMPLUS_LINUX)
#include <stdlib.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include <fcntl.h>
#include <string.h>
#include <elf.h>
#endif
namespace nanojit {
using namespace js;
/* Implement embedder-specific nanojit members. */
void*
nanojit::Allocator::allocChunk(size_t nbytes)
{
VMAllocator *vma = (VMAllocator*)this;
JS_ASSERT(!vma->outOfMemory());
void *p = calloc(1, nbytes);
if (!p) {
JS_ASSERT(nbytes < sizeof(vma->mReserve));
vma->mOutOfMemory = true;
p = (void*) &vma->mReserve[0];
}
vma->mSize += nbytes;
return p;
}
void
nanojit::Allocator::freeChunk(void *p) {
VMAllocator *vma = (VMAllocator*)this;
if (p != &vma->mReserve[0])
free(p);
}
void
nanojit::Allocator::postReset() {
VMAllocator *vma = (VMAllocator*)this;
vma->mOutOfMemory = false;
vma->mSize = 0;
}
void
StackFilter::getTops(LIns* guard, int& spTop, int& rpTop)
{
VMSideExit* e = (VMSideExit*)guard->record()->exit;
spTop = e->sp_adj;
rpTop = e->rp_adj;
}
#if defined NJ_VERBOSE
void
LirNameMap::formatGuard(LIns *i, char *out)
{
VMSideExit *x;
x = (VMSideExit *)i->record()->exit;
sprintf(out,
"%s: %s %s -> pc=%p imacpc=%p sp%+ld rp%+ld (GuardID=%03d)",
formatRef(i),
lirNames[i->opcode()],
i->oprnd1() ? formatRef(i->oprnd1()) : "",
(void *)x->pc,
(void *)x->imacpc,
(long int)x->sp_adj,
(long int)x->rp_adj,
i->record()->profGuardID);
}
void
LirNameMap::formatGuardXov(LIns *i, char *out)
{
VMSideExit *x;
x = (VMSideExit *)i->record()->exit;
sprintf(out,
"%s = %s %s, %s -> pc=%p imacpc=%p sp%+ld rp%+ld (GuardID=%03d)",
formatRef(i),
lirNames[i->opcode()],
formatRef(i->oprnd1()),
formatRef(i->oprnd2()),
(void *)x->pc,
(void *)x->imacpc,
(long int)x->sp_adj,
(long int)x->rp_adj,
i->record()->profGuardID);
}
#endif
} /* namespace nanojit */
namespace js {
using namespace nanojit;
#if JS_HAS_XML_SUPPORT
#define RETURN_VALUE_IF_XML(val, ret) \
JS_BEGIN_MACRO \
if (!JSVAL_IS_PRIMITIVE(val) && JSVAL_TO_OBJECT(val)->isXML()) \
RETURN_VALUE("xml detected", ret); \
JS_END_MACRO
#else
#define RETURN_IF_XML(val, ret) ((void) 0)
#endif
#define RETURN_IF_XML_A(val) RETURN_VALUE_IF_XML(val, ARECORD_STOP)
#define RETURN_IF_XML(val) RETURN_VALUE_IF_XML(val, RECORD_STOP)
/*
* Never use JSVAL_IS_BOOLEAN because it restricts the value (true, false) and
* the type. What you want to use is JSVAL_IS_SPECIAL(x) and then handle the
* undefined case properly (bug 457363).
*/
#undef JSVAL_IS_BOOLEAN
#define JSVAL_IS_BOOLEAN(x) JS_STATIC_ASSERT(0)
JS_STATIC_ASSERT(sizeof(TraceType) == 1);
JS_STATIC_ASSERT(offsetof(TraceNativeStorage, stack_global_buf) % 16 == 0);
/* Map to translate a type tag into a printable representation. */
static const char typeChar[] = "OIDXSNBF";
static const char tagChar[] = "OIDISIBI";
/* Blacklist parameters. */
/*
* Number of iterations of a loop where we start tracing. That is, we don't
* start tracing until the beginning of the HOTLOOP-th iteration.
*/
#define HOTLOOP 2
/* Attempt recording this many times before blacklisting permanently. */
#define BL_ATTEMPTS 2
/* Skip this many hits before attempting recording again, after an aborted attempt. */
#define BL_BACKOFF 32
/* Number of times we wait to exit on a side exit before we try to extend the tree. */
#define HOTEXIT 1
/* Number of times we try to extend the tree along a side exit. */
#define MAXEXIT 3
/* Maximum number of peer trees allowed. */
#define MAXPEERS 9
/* Max number of hits to a RECURSIVE_UNLINKED exit before we trash the tree. */
#define MAX_RECURSIVE_UNLINK_HITS 64
/* Max call depths for inlining. */
#define MAX_CALLDEPTH 10
/* Max number of slots in a table-switch. */
#define MAX_TABLE_SWITCH 256
/* Max memory needed to rebuild the interpreter stack when falling off trace. */
#define MAX_INTERP_STACK_BYTES \
(MAX_NATIVE_STACK_SLOTS * sizeof(jsval) + \
MAX_CALL_STACK_ENTRIES * sizeof(JSInlineFrame) + \
sizeof(JSInlineFrame)) /* possibly slow native frame at top of stack */
/* Max number of branches per tree. */
#define MAX_BRANCHES 32
#define CHECK_STATUS(expr) \
JS_BEGIN_MACRO \
RecordingStatus _status = (expr); \
if (_status != RECORD_CONTINUE) \
return _status; \
JS_END_MACRO
#define CHECK_STATUS_A(expr) \
JS_BEGIN_MACRO \
AbortableRecordingStatus _status = InjectStatus((expr)); \
if (_status != ARECORD_CONTINUE) \
return _status; \
JS_END_MACRO
#ifdef JS_JIT_SPEW
#define RETURN_VALUE(msg, value) \
JS_BEGIN_MACRO \
debug_only_printf(LC_TMAbort, "trace stopped: %d: %s\n", __LINE__, (msg)); \
return (value); \
JS_END_MACRO
#else
#define RETURN_VALUE(msg, value) return (value)
#endif
#define RETURN_STOP(msg) RETURN_VALUE(msg, RECORD_STOP)
#define RETURN_STOP_A(msg) RETURN_VALUE(msg, ARECORD_STOP)
#define RETURN_ERROR(msg) RETURN_VALUE(msg, RECORD_ERROR)
#define RETURN_ERROR_A(msg) RETURN_VALUE(msg, ARECORD_ERROR)
#ifdef JS_JIT_SPEW
struct __jitstats {
#define JITSTAT(x) uint64 x;
#include "jitstats.tbl"
#undef JITSTAT
} jitstats = { 0LL, };
JS_STATIC_ASSERT(sizeof(jitstats) % sizeof(uint64) == 0);
enum jitstat_ids {
#define JITSTAT(x) STAT ## x ## ID,
#include "jitstats.tbl"
#undef JITSTAT
STAT_IDS_TOTAL
};
static JSPropertySpec jitstats_props[] = {
#define JITSTAT(x) { #x, STAT ## x ## ID, JSPROP_ENUMERATE | JSPROP_READONLY | JSPROP_PERMANENT },
#include "jitstats.tbl"
#undef JITSTAT
{ 0 }
};
static JSBool
jitstats_getProperty(JSContext *cx, JSObject *obj, jsid id, jsval *vp)
{
int index = -1;
if (JSVAL_IS_STRING(id)) {
JSString* str = JSVAL_TO_STRING(id);
if (strcmp(JS_GetStringBytes(str), "HOTLOOP") == 0) {
*vp = INT_TO_JSVAL(HOTLOOP);
return JS_TRUE;
}
}
if (JSVAL_IS_INT(id))
index = JSVAL_TO_INT(id);
uint64 result = 0;
switch (index) {
#define JITSTAT(x) case STAT ## x ## ID: result = jitstats.x; break;
#include "jitstats.tbl"
#undef JITSTAT
default:
*vp = JSVAL_VOID;
return JS_TRUE;
}
if (result < JSVAL_INT_MAX) {
*vp = INT_TO_JSVAL(jsint(result));
return JS_TRUE;
}
char retstr[64];
JS_snprintf(retstr, sizeof retstr, "%llu", result);
*vp = STRING_TO_JSVAL(JS_NewStringCopyZ(cx, retstr));
return JS_TRUE;
}
JSClass jitstats_class = {
"jitstats",
0,
JS_PropertyStub, JS_PropertyStub,
jitstats_getProperty, JS_PropertyStub,
JS_EnumerateStub, JS_ResolveStub,
JS_ConvertStub, NULL,
JSCLASS_NO_OPTIONAL_MEMBERS
};
void
InitJITStatsClass(JSContext *cx, JSObject *glob)
{
JS_InitClass(cx, glob, NULL, &jitstats_class, NULL, 0, jitstats_props, NULL, NULL, NULL);
}
#define AUDIT(x) (jitstats.x++)
#else
#define AUDIT(x) ((void)0)
#endif /* JS_JIT_SPEW */
/*
* INS_CONSTPTR can be used to embed arbitrary pointers into the native code. It should not
* be used directly to embed GC thing pointers. Instead, use the INS_CONSTOBJ/FUN/STR/SPROP
* variants which ensure that the embedded pointer will be kept alive across GCs.
*/
#define INS_CONST(c) addName(lir->insImm(c), #c)
#define INS_CONSTPTR(p) addName(lir->insImmPtr(p), #p)
#define INS_CONSTWORD(v) addName(lir->insImmPtr((void *) (v)), #v)
#define INS_CONSTVAL(v) addName(insImmVal(v), #v)
#define INS_CONSTOBJ(obj) addName(insImmObj(obj), #obj)
#define INS_CONSTFUN(fun) addName(insImmFun(fun), #fun)
#define INS_CONSTSTR(str) addName(insImmStr(str), #str)
#define INS_CONSTSPROP(sprop) addName(insImmSprop(sprop), #sprop)
#define INS_ATOM(atom) INS_CONSTSTR(ATOM_TO_STRING(atom))
#define INS_NULL() INS_CONSTPTR(NULL)
#define INS_VOID() INS_CONST(JSVAL_TO_SPECIAL(JSVAL_VOID))
static avmplus::AvmCore s_core = avmplus::AvmCore();
static avmplus::AvmCore* core = &s_core;
static void OutOfMemoryAbort()
{
JS_NOT_REACHED("out of memory");
abort();
}
#ifdef JS_JIT_SPEW
static void
DumpPeerStability(TraceMonitor* tm, const void* ip, JSObject* globalObj, uint32 globalShape, uint32 argc);
#endif
/*
* We really need a better way to configure the JIT. Shaver, where is
* my fancy JIT object?
*
* NB: this is raced on, if jstracer.cpp should ever be running MT.
* I think it's harmless tho.
*/
static bool did_we_check_processor_features = false;
/* ------ Debug logging control ------ */
/*
* All the logging control stuff lives in here. It is shared between
* all threads, but I think that's OK.
*/
LogControl LogController;
#ifdef JS_JIT_SPEW
/*
* NB: this is raced on too, if jstracer.cpp should ever be running MT.
* Also harmless.
*/
static bool did_we_set_up_debug_logging = false;
static void
InitJITLogController()
{
char *tm, *tmf;
uint32_t bits;
LogController.lcbits = 0;
tm = getenv("TRACEMONKEY");
if (tm) {
fflush(NULL);
printf(
"The environment variable $TRACEMONKEY has been replaced by $TMFLAGS.\n"
"Try 'TMFLAGS=help js -j' for a list of options.\n"
);
exit(0);
}
tmf = getenv("TMFLAGS");
if (!tmf) return;
/* Using strstr() is really a cheap hack as far as flag decoding goes. */
if (strstr(tmf, "help")) {
fflush(NULL);
printf(
"usage: TMFLAGS=option,option,option,... where options can be:\n"
"\n"
" help show this message\n"
" ------ options for jstracer & jsregexp ------\n"
" minimal ultra-minimalist output; try this first\n"
" full everything except 'treevis' and 'fragprofile'\n"
" tracer tracer lifetime (FIXME:better description)\n"
" recorder trace recording stuff (FIXME:better description)\n"
" abort show trace recording aborts\n"
" stats show trace recording stats\n"
" regexp show compilation & entry for regexps\n"
" treevis spew that tracevis/tree.py can parse\n"
" ------ options for Nanojit ------\n"
" fragprofile count entries and exits for each fragment\n"
" liveness show LIR liveness at start of rdr pipeline\n"
" readlir show LIR as it enters the reader pipeline\n"
" aftersf show LIR after StackFilter\n"
" assembly show final aggregated assembly code\n"
" regalloc show regalloc state in 'assembly' output\n"
" activation show activation state in 'assembly' output\n"
"\n"
);
exit(0);
/*NOTREACHED*/
}
bits = 0;
/* flags for jstracer.cpp */
if (strstr(tmf, "minimal") || strstr(tmf, "full")) bits |= LC_TMMinimal;
if (strstr(tmf, "tracer") || strstr(tmf, "full")) bits |= LC_TMTracer;
if (strstr(tmf, "recorder") || strstr(tmf, "full")) bits |= LC_TMRecorder;
if (strstr(tmf, "abort") || strstr(tmf, "full")) bits |= LC_TMAbort;
if (strstr(tmf, "stats") || strstr(tmf, "full")) bits |= LC_TMStats;
if (strstr(tmf, "regexp") || strstr(tmf, "full")) bits |= LC_TMRegexp;
if (strstr(tmf, "treevis")) bits |= LC_TMTreeVis;
/* flags for nanojit */
if (strstr(tmf, "fragprofile")) bits |= LC_FragProfile;
if (strstr(tmf, "liveness") || strstr(tmf, "full")) bits |= LC_Liveness;
if (strstr(tmf, "activation") || strstr(tmf, "full")) bits |= LC_Activation;
if (strstr(tmf, "readlir") || strstr(tmf, "full")) bits |= LC_ReadLIR;
if (strstr(tmf, "aftersf") || strstr(tmf, "full")) bits |= LC_AfterSF;
if (strstr(tmf, "regalloc") || strstr(tmf, "full")) bits |= LC_RegAlloc;
if (strstr(tmf, "assembly") || strstr(tmf, "full")) bits |= LC_Assembly;
LogController.lcbits = bits;
return;
}
#endif
/* ------------------ Frag-level profiling support ------------------ */
#ifdef JS_JIT_SPEW
/*
* All the allocations done by this profile data-collection and
* display machinery, are done in TraceMonitor::profAlloc. That is
* emptied out at the end of js_FinishJIT. It has a lifetime from
* js_InitJIT to js_FinishJIT, which exactly matches the span
* js_FragProfiling_init to js_FragProfiling_showResults.
*/
template<class T>
static
Seq<T>* reverseInPlace(Seq<T>* seq)
{
Seq<T>* prev = NULL;
Seq<T>* curr = seq;
while (curr) {
Seq<T>* next = curr->tail;
curr->tail = prev;
prev = curr;
curr = next;
}
return prev;
}
// The number of top blocks to show in the profile
#define N_TOP_BLOCKS 50
// Contains profile info for a single guard
struct GuardPI {
uint32_t guardID; // identifying number
uint32_t count; // count.
};
struct FragPI {
uint32_t count; // entry count for this Fragment
uint32_t nStaticExits; // statically: the number of exits
size_t nCodeBytes; // statically: the number of insn bytes in the main fragment
size_t nExitBytes; // statically: the number of insn bytes in the exit paths
Seq<GuardPI>* guards; // guards, each with its own count
uint32_t largestGuardID; // that exists in .guards
};
void
FragProfiling_FragFinalizer(Fragment* f, TraceMonitor* tm)
{
// Recover profiling data from 'f', which is logically at the end
// of its useful lifetime.
if (!(LogController.lcbits & LC_FragProfile))
return;
NanoAssert(f);
// Valid profFragIDs start at 1
NanoAssert(f->profFragID >= 1);
// Should be called exactly once per Fragment. This will assert if
// you issue the same FragID to more than one Fragment.
NanoAssert(!tm->profTab->containsKey(f->profFragID));
FragPI pi = { f->profCount,
f->nStaticExits,
f->nCodeBytes,
f->nExitBytes,
NULL, 0 };
// Begin sanity check on the guards
SeqBuilder<GuardPI> guardsBuilder(*tm->profAlloc);
GuardRecord* gr;
uint32_t nGs = 0;
uint32_t sumOfDynExits = 0;
for (gr = f->guardsForFrag; gr; gr = gr->nextInFrag) {
nGs++;
// Also copy the data into our auxiliary structure.
// f->guardsForFrag is in reverse order, and so this
// copy preserves that ordering (->add adds at end).
// Valid profGuardIDs start at 1.
NanoAssert(gr->profGuardID > 0);
sumOfDynExits += gr->profCount;
GuardPI gpi = { gr->profGuardID, gr->profCount };
guardsBuilder.add(gpi);
if (gr->profGuardID > pi.largestGuardID)
pi.largestGuardID = gr->profGuardID;
}
pi.guards = guardsBuilder.get();
// And put the guard list in forwards order
pi.guards = reverseInPlace(pi.guards);
// Why is this so? Because nGs is the number of guards
// at the time the LIR was generated, whereas f->nStaticExits
// is the number of them observed by the time it makes it
// through to the assembler. It can be the case that LIR
// optimisation removes redundant guards; hence we expect
// nGs to always be the same or higher.
NanoAssert(nGs >= f->nStaticExits);
// Also we can assert that the sum of the exit counts
// can't exceed the entry count. It'd be nice to assert that
// they are exactly equal, but we can't because we don't know
// how many times we got to the end of the trace.
NanoAssert(f->profCount >= sumOfDynExits);
// End sanity check on guards
tm->profTab->put(f->profFragID, pi);
}
static void
FragProfiling_showResults(TraceMonitor* tm)
{
uint32_t topFragID[N_TOP_BLOCKS];
FragPI topPI[N_TOP_BLOCKS];
uint64_t totCount = 0, cumulCount;
uint32_t totSE = 0;
size_t totCodeB = 0, totExitB = 0;
memset(topFragID, 0, sizeof(topFragID));
memset(topPI, 0, sizeof(topPI));
FragStatsMap::Iter iter(*tm->profTab);
while (iter.next()) {
uint32_t fragID = iter.key();
FragPI pi = iter.value();
uint32_t count = pi.count;
totCount += (uint64_t)count;
/* Find the rank for this entry, in tops */
int r = N_TOP_BLOCKS-1;
while (true) {
if (r == -1)
break;
if (topFragID[r] == 0) {
r--;
continue;
}
if (count > topPI[r].count) {
r--;
continue;
}
break;
}
r++;
AvmAssert(r >= 0 && r <= N_TOP_BLOCKS);
/* This entry should be placed at topPI[r], and entries
at higher numbered slots moved up one. */
if (r < N_TOP_BLOCKS) {
for (int s = N_TOP_BLOCKS-1; s > r; s--) {
topFragID[s] = topFragID[s-1];
topPI[s] = topPI[s-1];
}
topFragID[r] = fragID;
topPI[r] = pi;
}
}
LogController.printf(
"\n----------------- Per-fragment execution counts ------------------\n");
LogController.printf(
"\nTotal count = %llu\n\n", (unsigned long long int)totCount);
LogController.printf(
" Entry counts Entry counts ----- Static -----\n");
LogController.printf(
" ------Self------ ----Cumulative--- Exits Cbytes Xbytes FragID\n");
LogController.printf("\n");
if (totCount == 0)
totCount = 1; /* avoid division by zero */
cumulCount = 0;
int r;
for (r = 0; r < N_TOP_BLOCKS; r++) {
if (topFragID[r] == 0)
break;
cumulCount += (uint64_t)topPI[r].count;
LogController.printf("%3d: %5.2f%% %9u %6.2f%% %9llu"
" %3d %5u %5u %06u\n",
r,
(double)topPI[r].count * 100.0 / (double)totCount,
topPI[r].count,
(double)cumulCount * 100.0 / (double)totCount,
(unsigned long long int)cumulCount,
topPI[r].nStaticExits,
(unsigned int)topPI[r].nCodeBytes,
(unsigned int)topPI[r].nExitBytes,
topFragID[r]);
totSE += (uint32_t)topPI[r].nStaticExits;
totCodeB += topPI[r].nCodeBytes;
totExitB += topPI[r].nExitBytes;
}
LogController.printf("\nTotal displayed code bytes = %u, "
"exit bytes = %u\n"
"Total displayed static exits = %d\n\n",
(unsigned int)totCodeB, (unsigned int)totExitB, totSE);
LogController.printf("Analysis by exit counts\n\n");
for (r = 0; r < N_TOP_BLOCKS; r++) {
if (topFragID[r] == 0)
break;
LogController.printf("FragID=%06u, total count %u:\n", topFragID[r],
topPI[r].count);
uint32_t madeItToEnd = topPI[r].count;
uint32_t totThisFrag = topPI[r].count;
if (totThisFrag == 0)
totThisFrag = 1;
GuardPI gpi;
// visit the guards, in forward order
for (Seq<GuardPI>* guards = topPI[r].guards; guards; guards = guards->tail) {
gpi = (*guards).head;
if (gpi.count == 0)
continue;
madeItToEnd -= gpi.count;
LogController.printf(" GuardID=%03u %7u (%5.2f%%)\n",
gpi.guardID, gpi.count,
100.0 * (double)gpi.count / (double)totThisFrag);
}
LogController.printf(" Looped (%03u) %7u (%5.2f%%)\n",
topPI[r].largestGuardID+1,
madeItToEnd,
100.0 * (double)madeItToEnd / (double)totThisFrag);
NanoAssert(madeItToEnd <= topPI[r].count); // else unsigned underflow
LogController.printf("\n");
}
tm->profTab = NULL;
}
#endif
/* ----------------------------------------------------------------- */
#ifdef DEBUG
static const char*
getExitName(ExitType type)
{
static const char* exitNames[] =
{
#define MAKE_EXIT_STRING(x) #x,
JS_TM_EXITCODES(MAKE_EXIT_STRING)
#undef MAKE_EXIT_STRING
NULL
};
JS_ASSERT(type < TOTAL_EXIT_TYPES);
return exitNames[type];
}
static JSBool FASTCALL
PrintOnTrace(char* format, uint32 argc, double *argv)
{
union {
struct {
uint32 lo;
uint32 hi;
} i;
double d;
char *cstr;
JSObject *o;
JSString *s;
} u;
#define GET_ARG() JS_BEGIN_MACRO \
if (argi >= argc) { \
fprintf(out, "[too few args for format]"); \
break; \
} \
u.d = argv[argi++]; \
JS_END_MACRO
FILE *out = stderr;
uint32 argi = 0;
for (char *p = format; *p; ++p) {
if (*p != '%') {
putc(*p, out);
continue;
}
char ch = *++p;
if (!ch) {
fprintf(out, "[trailing %%]");
continue;
}
switch (ch) {
case 'a':
GET_ARG();
fprintf(out, "[%u:%u 0x%x:0x%x %f]", u.i.lo, u.i.hi, u.i.lo, u.i.hi, u.d);
break;
case 'd':
GET_ARG();
fprintf(out, "%d", u.i.lo);
break;
case 'u':
GET_ARG();
fprintf(out, "%u", u.i.lo);
break;
case 'x':
GET_ARG();
fprintf(out, "%x", u.i.lo);
break;
case 'f':
GET_ARG();
fprintf(out, "%f", u.d);
break;
case 'o':
GET_ARG();
js_DumpObject(u.o);
break;
case 's':
GET_ARG();
{
size_t length = u.s->length();
// protect against massive spew if u.s is a bad pointer.
if (length > 1 << 16)
length = 1 << 16;
jschar *chars = u.s->chars();
for (unsigned i = 0; i < length; ++i) {
jschar co = chars[i];
if (co < 128)
putc(co, out);
else if (co < 256)
fprintf(out, "\\u%02x", co);
else
fprintf(out, "\\u%04x", co);
}
}
break;
case 'S':
GET_ARG();
fprintf(out, "%s", u.cstr);
break;
default:
fprintf(out, "[invalid %%%c]", *p);
}
}
#undef GET_ARG
return JS_TRUE;
}
JS_DEFINE_CALLINFO_3(extern, BOOL, PrintOnTrace, CHARPTR, UINT32, DOUBLEPTR, 0, 0)
// This version is not intended to be called directly: usually it is easier to
// use one of the other overloads.
void
TraceRecorder::tprint(const char *format, int count, nanojit::LIns *insa[])
{
size_t size = strlen(format) + 1;
char* data = (char*) traceMonitor->traceAlloc->alloc(size);
memcpy(data, format, size);
double *args = (double*) traceMonitor->traceAlloc->alloc(count * sizeof(double));
for (int i = 0; i < count; ++i) {
JS_ASSERT(insa[i]);
lir->insStorei(insa[i], INS_CONSTPTR(args), sizeof(double) * i);
}
LIns* args_ins[] = { INS_CONSTPTR(args), INS_CONST(count), INS_CONSTPTR(data) };
LIns* call_ins = lir->insCall(&PrintOnTrace_ci, args_ins);
guard(false, lir->ins_eq0(call_ins), MISMATCH_EXIT);
}
// Generate a 'printf'-type call from trace for debugging.
void
TraceRecorder::tprint(const char *format)
{
LIns* insa[] = { NULL };
tprint(format, 0, insa);
}
void
TraceRecorder::tprint(const char *format, LIns *ins)
{
LIns* insa[] = { ins };
tprint(format, 1, insa);
}
void
TraceRecorder::tprint(const char *format, LIns *ins1, LIns *ins2)
{
LIns* insa[] = { ins1, ins2 };
tprint(format, 2, insa);
}
void
TraceRecorder::tprint(const char *format, LIns *ins1, LIns *ins2, LIns *ins3)
{
LIns* insa[] = { ins1, ins2, ins3 };
tprint(format, 3, insa);
}
void
TraceRecorder::tprint(const char *format, LIns *ins1, LIns *ins2, LIns *ins3, LIns *ins4)
{
LIns* insa[] = { ins1, ins2, ins3, ins4 };
tprint(format, 4, insa);
}
void
TraceRecorder::tprint(const char *format, LIns *ins1, LIns *ins2, LIns *ins3, LIns *ins4,
LIns *ins5)
{
LIns* insa[] = { ins1, ins2, ins3, ins4, ins5 };
tprint(format, 5, insa);
}
void
TraceRecorder::tprint(const char *format, LIns *ins1, LIns *ins2, LIns *ins3, LIns *ins4,
LIns *ins5, LIns *ins6)
{
LIns* insa[] = { ins1, ins2, ins3, ins4, ins5, ins6 };
tprint(format, 6, insa);
}
#endif
/*
* The entire VM shares one oracle. Collisions and concurrent updates are
* tolerated and worst case cause performance regressions.
*/
static Oracle oracle;
Tracker::Tracker()
{
pagelist = NULL;
}
Tracker::~Tracker()
{
clear();
}
inline jsuword
Tracker::getTrackerPageBase(const void* v) const
{
return jsuword(v) & ~TRACKER_PAGE_MASK;
}
inline jsuword
Tracker::getTrackerPageOffset(const void* v) const
{
return (jsuword(v) & TRACKER_PAGE_MASK) >> 2;
}
struct Tracker::TrackerPage*
Tracker::findTrackerPage(const void* v) const
{
jsuword base = getTrackerPageBase(v);
struct Tracker::TrackerPage* p = pagelist;
while (p) {
if (p->base == base)
return p;
p = p->next;
}
return NULL;
}
struct Tracker::TrackerPage*
Tracker::addTrackerPage(const void* v)
{
jsuword base = getTrackerPageBase(v);
struct TrackerPage* p = (struct TrackerPage*) calloc(1, sizeof(*p));
p->base = base;
p->next = pagelist;
pagelist = p;
return p;
}
void
Tracker::clear()
{
while (pagelist) {
TrackerPage* p = pagelist;
pagelist = pagelist->next;
free(p);
}
}
bool
Tracker::has(const void *v) const
{
return get(v) != NULL;
}
LIns*
Tracker::get(const void* v) const
{
struct Tracker::TrackerPage* p = findTrackerPage(v);
if (!p)
return NULL;
return p->map[getTrackerPageOffset(v)];
}
void
Tracker::set(const void* v, LIns* i)
{
struct Tracker::TrackerPage* p = findTrackerPage(v);
if (!p)
p = addTrackerPage(v);
p->map[getTrackerPageOffset(v)] = i;
}
static inline jsuint
argSlots(JSStackFrame* fp)
{
return JS_MAX(fp->argc, fp->fun->nargs);
}
static inline bool
isNumber(jsval v)
{
return JSVAL_IS_INT(v) || JSVAL_IS_DOUBLE(v);
}
static inline jsdouble
asNumber(jsval v)
{
JS_ASSERT(isNumber(v));
if (JSVAL_IS_DOUBLE(v))
return *JSVAL_TO_DOUBLE(v);
return (jsdouble)JSVAL_TO_INT(v);
}
static inline bool
isInt32(jsval v)
{
if (!isNumber(v))
return false;
jsdouble d = asNumber(v);
jsint i;
return !!JSDOUBLE_IS_INT(d, i);
}
static inline jsint
asInt32(jsval v)
{
JS_ASSERT(isNumber(v));
if (JSVAL_IS_INT(v))
return JSVAL_TO_INT(v);
#ifdef DEBUG
jsint i;
JS_ASSERT(JSDOUBLE_IS_INT(*JSVAL_TO_DOUBLE(v), i));
#endif
return jsint(*JSVAL_TO_DOUBLE(v));
}
/* Return TT_DOUBLE for all numbers (int and double) and the tag otherwise. */
static inline TraceType
GetPromotedType(jsval v)
{
if (JSVAL_IS_INT(v))
return TT_DOUBLE;
if (JSVAL_IS_OBJECT(v)) {
if (JSVAL_IS_NULL(v))
return TT_NULL;
if (HAS_FUNCTION_CLASS(JSVAL_TO_OBJECT(v)))
return TT_FUNCTION;
return TT_OBJECT;
}
uint8_t tag = JSVAL_TAG(v);
JS_ASSERT(tag == JSVAL_DOUBLE || tag == JSVAL_STRING || tag == JSVAL_SPECIAL);
JS_STATIC_ASSERT(static_cast<jsvaltag>(TT_DOUBLE) == JSVAL_DOUBLE);
JS_STATIC_ASSERT(static_cast<jsvaltag>(TT_STRING) == JSVAL_STRING);
JS_STATIC_ASSERT(static_cast<jsvaltag>(TT_PSEUDOBOOLEAN) == JSVAL_SPECIAL);
return TraceType(tag);
}
/* Return TT_INT32 for all whole numbers that fit into signed 32-bit and the tag otherwise. */
static inline TraceType
getCoercedType(jsval v)
{
if (isInt32(v))
return TT_INT32;
if (JSVAL_IS_OBJECT(v)) {
if (JSVAL_IS_NULL(v))
return TT_NULL;
if (HAS_FUNCTION_CLASS(JSVAL_TO_OBJECT(v)))
return TT_FUNCTION;
return TT_OBJECT;
}
uint8_t tag = JSVAL_TAG(v);
JS_ASSERT(tag == JSVAL_DOUBLE || tag == JSVAL_STRING || tag == JSVAL_SPECIAL);
JS_STATIC_ASSERT(static_cast<jsvaltag>(TT_DOUBLE) == JSVAL_DOUBLE);
JS_STATIC_ASSERT(static_cast<jsvaltag>(TT_STRING) == JSVAL_STRING);
JS_STATIC_ASSERT(static_cast<jsvaltag>(TT_PSEUDOBOOLEAN) == JSVAL_SPECIAL);
return TraceType(tag);
}
/* Constant seed and accumulate step borrowed from the DJB hash. */
const uintptr_t ORACLE_MASK = ORACLE_SIZE - 1;
JS_STATIC_ASSERT((ORACLE_MASK & ORACLE_SIZE) == 0);
const uintptr_t FRAGMENT_TABLE_MASK = FRAGMENT_TABLE_SIZE - 1;
JS_STATIC_ASSERT((FRAGMENT_TABLE_MASK & FRAGMENT_TABLE_SIZE) == 0);
const uintptr_t HASH_SEED = 5381;
static inline void
HashAccum(uintptr_t& h, uintptr_t i, uintptr_t mask)
{
h = ((h << 5) + h + (mask & i)) & mask;
}
static JS_REQUIRES_STACK inline int
StackSlotHash(JSContext* cx, unsigned slot, const void* pc)
{
uintptr_t h = HASH_SEED;
HashAccum(h, uintptr_t(cx->fp->script), ORACLE_MASK);
HashAccum(h, uintptr_t(pc), ORACLE_MASK);
HashAccum(h, uintptr_t(slot), ORACLE_MASK);
return int(h);
}
static JS_REQUIRES_STACK inline int
GlobalSlotHash(JSContext* cx, unsigned slot)
{
uintptr_t h = HASH_SEED;
JSStackFrame* fp = cx->fp;
while (fp->down)
fp = fp->down;
HashAccum(h, uintptr_t(fp->script), ORACLE_MASK);
HashAccum(h, uintptr_t(OBJ_SHAPE(JS_GetGlobalForObject(cx, fp->scopeChain))), ORACLE_MASK);
HashAccum(h, uintptr_t(slot), ORACLE_MASK);
return int(h);
}
static inline int
PCHash(jsbytecode* pc)
{
return int(uintptr_t(pc) & ORACLE_MASK);
}
Oracle::Oracle()
{
/* Grow the oracle bitsets to their (fixed) size here, once. */
_stackDontDemote.set(ORACLE_SIZE-1);
_globalDontDemote.set(ORACLE_SIZE-1);
clear();
}
/* Tell the oracle that a certain global variable should not be demoted. */
JS_REQUIRES_STACK void
Oracle::markGlobalSlotUndemotable(JSContext* cx, unsigned slot)
{
#ifdef DEBUG_dvander
printf("MGSU: %d [%08x]: %d\n", slot, GlobalSlotHash(cx, slot),
_globalDontDemote.get(GlobalSlotHash(cx, slot)));
#endif
_globalDontDemote.set(GlobalSlotHash(cx, slot));
}
/* Consult with the oracle whether we shouldn't demote a certain global variable. */
JS_REQUIRES_STACK bool
Oracle::isGlobalSlotUndemotable(JSContext* cx, unsigned slot) const
{
#ifdef DEBUG_dvander
printf("IGSU: %d [%08x]: %d\n", slot, GlobalSlotHash(cx, slot),
_globalDontDemote.get(GlobalSlotHash(cx, slot)));
#endif
return _globalDontDemote.get(GlobalSlotHash(cx, slot));
}
/* Tell the oracle that a certain slot at a certain stack slot should not be demoted. */
JS_REQUIRES_STACK void
Oracle::markStackSlotUndemotable(JSContext* cx, unsigned slot, const void* pc)
{
#ifdef DEBUG_dvander
printf("MSSU: %p:%d [%08x]: %d\n", pc, slot, StackSlotHash(cx, slot, pc),
_stackDontDemote.get(StackSlotHash(cx, slot, pc)));
#endif
_stackDontDemote.set(StackSlotHash(cx, slot, pc));
}
JS_REQUIRES_STACK void
Oracle::markStackSlotUndemotable(JSContext* cx, unsigned slot)
{
markStackSlotUndemotable(cx, slot, cx->fp->regs->pc);
}
/* Consult with the oracle whether we shouldn't demote a certain slot. */
JS_REQUIRES_STACK bool
Oracle::isStackSlotUndemotable(JSContext* cx, unsigned slot, const void* pc) const
{
#ifdef DEBUG_dvander
printf("ISSU: %p:%d [%08x]: %d\n", pc, slot, StackSlotHash(cx, slot, pc),
_stackDontDemote.get(StackSlotHash(cx, slot, pc)));
#endif
return _stackDontDemote.get(StackSlotHash(cx, slot, pc));
}
JS_REQUIRES_STACK bool
Oracle::isStackSlotUndemotable(JSContext* cx, unsigned slot) const
{
return isStackSlotUndemotable(cx, slot, cx->fp->regs->pc);
}
/* Tell the oracle that a certain slot at a certain bytecode location should not be demoted. */
void
Oracle::markInstructionUndemotable(jsbytecode* pc)
{
_pcDontDemote.set(PCHash(pc));
}
/* Consult with the oracle whether we shouldn't demote a certain bytecode location. */
bool
Oracle::isInstructionUndemotable(jsbytecode* pc) const
{
return _pcDontDemote.get(PCHash(pc));
}
void
Oracle::clearDemotability()
{
_stackDontDemote.reset();
_globalDontDemote.reset();
_pcDontDemote.reset();
}
JS_REQUIRES_STACK static JS_INLINE void
MarkSlotUndemotable(JSContext* cx, LinkableFragment* f, unsigned slot)
{
if (slot < f->nStackTypes) {
oracle.markStackSlotUndemotable(cx, slot);
return;
}
uint16* gslots = f->globalSlots->data();
oracle.markGlobalSlotUndemotable(cx, gslots[slot - f->nStackTypes]);
}
JS_REQUIRES_STACK static JS_INLINE void
MarkSlotUndemotable(JSContext* cx, LinkableFragment* f, unsigned slot, const void* pc)
{
if (slot < f->nStackTypes) {
oracle.markStackSlotUndemotable(cx, slot, pc);
return;
}
uint16* gslots = f->globalSlots->data();
oracle.markGlobalSlotUndemotable(cx, gslots[slot - f->nStackTypes]);
}
static JS_REQUIRES_STACK inline bool
IsSlotUndemotable(JSContext* cx, LinkableFragment* f, unsigned slot, const void* ip)
{
if (slot < f->nStackTypes)
return oracle.isStackSlotUndemotable(cx, slot, ip);
uint16* gslots = f->globalSlots->data();
return oracle.isGlobalSlotUndemotable(cx, gslots[slot - f->nStackTypes]);
}
static JS_REQUIRES_STACK inline bool
IsSlotUndemotable(JSContext* cx, LinkableFragment* f, unsigned slot)
{
return IsSlotUndemotable(cx, f, slot, cx->fp->regs->pc);
}
class FrameInfoCache
{
struct HashPolicy
{
typedef FrameInfo *Lookup;
static HashNumber hash(const FrameInfo* fi) {
size_t len = sizeof(FrameInfo) + fi->callerHeight * sizeof(TraceType);
HashNumber h = 0;
const unsigned char *s = (const unsigned char*)fi;
for (size_t i = 0; i < len; i++, s++)
h = JS_ROTATE_LEFT32(h, 4) ^ *s;
return h;
}
static bool match(const FrameInfo* fi1, const FrameInfo* fi2) {
if (memcmp(fi1, fi2, sizeof(FrameInfo)) != 0)
return false;
return memcmp(fi1->get_typemap(), fi2->get_typemap(),
fi1->callerHeight * sizeof(TraceType)) == 0;
}
};
typedef HashSet<FrameInfo *, HashPolicy, SystemAllocPolicy> FrameSet;
FrameSet set;
VMAllocator *allocator;
public:
FrameInfoCache(VMAllocator *allocator);
void reset() {
set.clear();
}
FrameInfo *memoize(FrameInfo *fi) {
FrameSet::AddPtr p = set.lookupForAdd(fi);
if (!p) {
FrameInfo* n = (FrameInfo*)
allocator->alloc(sizeof(FrameInfo) + fi->callerHeight * sizeof(TraceType));
memcpy(n, fi, sizeof(FrameInfo) + fi->callerHeight * sizeof(TraceType));
if (!set.add(p, n))
return NULL;
}
return *p;
}
};
FrameInfoCache::FrameInfoCache(VMAllocator *allocator)
: allocator(allocator)
{
if (!set.init())
OutOfMemoryAbort();
}
#define PC_HASH_COUNT 1024
static void
Blacklist(jsbytecode* pc)
{
AUDIT(blacklisted);
JS_ASSERT(*pc == JSOP_TRACE || *pc == JSOP_NOP || *pc == JSOP_CALL);
if (*pc == JSOP_CALL) {
JS_ASSERT(*(pc + JSOP_CALL_LENGTH) == JSOP_TRACE ||
*(pc + JSOP_CALL_LENGTH) == JSOP_NOP);
*(pc + JSOP_CALL_LENGTH) = JSOP_NOP;
} else if (*pc == JSOP_TRACE) {
*pc = JSOP_NOP;
}
}
static bool
IsBlacklisted(jsbytecode* pc)
{
if (*pc == JSOP_NOP)
return true;
if (*pc == JSOP_CALL)
return *(pc + JSOP_CALL_LENGTH) == JSOP_NOP;
return false;
}
static void
Backoff(JSContext *cx, jsbytecode* pc, Fragment* tree = NULL)
{
/* N.B. This code path cannot assume the recorder is/is not alive. */
RecordAttemptMap &table = *JS_TRACE_MONITOR(cx).recordAttempts;
if (RecordAttemptMap::AddPtr p = table.lookupForAdd(pc)) {
if (p->value++ > (BL_ATTEMPTS * MAXPEERS)) {
p->value = 0;
Blacklist(pc);
return;
}
} else {
table.add(p, pc, 0);
}
if (tree) {
tree->hits() -= BL_BACKOFF;
/*
* In case there is no entry or no table (due to OOM) or some
* serious imbalance in the recording-attempt distribution on a
* multitree, give each tree another chance to blacklist here as
* well.
*/
if (++tree->recordAttempts > BL_ATTEMPTS)
Blacklist(pc);
}
}
static void
ResetRecordingAttempts(JSContext *cx, jsbytecode* pc)
{
RecordAttemptMap &table = *JS_TRACE_MONITOR(cx).recordAttempts;
if (RecordAttemptMap::Ptr p = table.lookup(pc))
p->value = 0;
}
static inline size_t
FragmentHash(const void *ip, JSObject* globalObj, uint32 globalShape, uint32 argc)
{
uintptr_t h = HASH_SEED;
HashAccum(h, uintptr_t(ip), FRAGMENT_TABLE_MASK);
HashAccum(h, uintptr_t(globalObj), FRAGMENT_TABLE_MASK);
HashAccum(h, uintptr_t(globalShape), FRAGMENT_TABLE_MASK);
HashAccum(h, uintptr_t(argc), FRAGMENT_TABLE_MASK);
return size_t(h);
}
static void
RawLookupFirstPeer(TraceMonitor* tm, const void *ip, JSObject* globalObj,
uint32 globalShape, uint32 argc,
TreeFragment*& firstInBucket, TreeFragment**& prevTreeNextp)
{
size_t h = FragmentHash(ip, globalObj, globalShape, argc);
TreeFragment** ppf = &tm->vmfragments[h];
firstInBucket = *ppf;
for (; TreeFragment* pf = *ppf; ppf = &pf->next) {
if (pf->globalObj == globalObj &&
pf->globalShape == globalShape &&
pf->ip == ip &&
pf->argc == argc) {
prevTreeNextp = ppf;
return;
}
}
prevTreeNextp = ppf;
return;
}
static TreeFragment*
LookupLoop(TraceMonitor* tm, const void *ip, JSObject* globalObj,
uint32 globalShape, uint32 argc)
{
TreeFragment *_, **prevTreeNextp;
RawLookupFirstPeer(tm, ip, globalObj, globalShape, argc, _, prevTreeNextp);
return *prevTreeNextp;
}
static TreeFragment*
LookupOrAddLoop(TraceMonitor* tm, const void *ip, JSObject* globalObj,
uint32 globalShape, uint32 argc)
{
TreeFragment *firstInBucket, **prevTreeNextp;
RawLookupFirstPeer(tm, ip, globalObj, globalShape, argc, firstInBucket, prevTreeNextp);
if (TreeFragment *f = *prevTreeNextp)
return f;
verbose_only(
uint32_t profFragID = (LogController.lcbits & LC_FragProfile)
? (++(tm->lastFragID)) : 0;
)
TreeFragment* f = new (*tm->dataAlloc) TreeFragment(ip, tm->dataAlloc, globalObj, globalShape,
argc verbose_only(, profFragID));
f->root = f; /* f is the root of a new tree */
*prevTreeNextp = f; /* insert f at the end of the vmfragments bucket-list */
f->next = NULL;
f->first = f; /* initialize peer-list at f */
f->peer = NULL;
return f;
}
static TreeFragment*
AddNewPeerToPeerList(TraceMonitor* tm, TreeFragment* peer)
{
JS_ASSERT(peer);
verbose_only(
uint32_t profFragID = (LogController.lcbits & LC_FragProfile)
? (++(tm->lastFragID)) : 0;
)
TreeFragment* f = new (*tm->dataAlloc) TreeFragment(peer->ip, tm->dataAlloc, peer->globalObj,
peer->globalShape, peer->argc
verbose_only(, profFragID));
f->root = f; /* f is the root of a new tree */
f->first = peer->first; /* add f to peer list */
f->peer = peer->peer;
peer->peer = f;
/* only the |first| Fragment of a peer list needs a valid |next| field */
debug_only(f->next = (TreeFragment*)0xcdcdcdcd);
return f;
}
JS_REQUIRES_STACK void
TreeFragment::initialize(JSContext* cx, SlotList *globalSlots)
{
this->dependentTrees.clear();
this->linkedTrees.clear();
this->globalSlots = globalSlots;
/* Capture the coerced type of each active slot in the type map. */
this->typeMap.captureTypes(cx, globalObj, *globalSlots, 0 /* callDepth */);
this->nStackTypes = this->typeMap.length() - globalSlots->length();
#ifdef DEBUG
this->treeFileName = cx->fp->script->filename;
this->treeLineNumber = js_FramePCToLineNumber(cx, cx->fp);
this->treePCOffset = FramePCOffset(cx->fp);
#endif
this->script = cx->fp->script;
this->recursion = Recursion_None;
this->gcthings.clear();
this->sprops.clear();
this->unstableExits = NULL;
this->sideExits.clear();
/* Determine the native frame layout at the entry point. */
this->nativeStackBase = (nStackTypes - (cx->fp->regs->sp - StackBase(cx->fp))) *
sizeof(double);
this->maxNativeStackSlots = nStackTypes;
this->maxCallDepth = 0;
}
UnstableExit*
TreeFragment::removeUnstableExit(VMSideExit* exit)
{
/* Now erase this exit from the unstable exit list. */
UnstableExit** tail = &this->unstableExits;
for (UnstableExit* uexit = this->unstableExits; uexit != NULL; uexit = uexit->next) {
if (uexit->exit == exit) {
*tail = uexit->next;
return *tail;
}
tail = &uexit->next;
}
JS_NOT_REACHED("exit not in unstable exit list");
return NULL;
}
#ifdef DEBUG
static void
AssertTreeIsUnique(TraceMonitor* tm, TreeFragment* f)
{
JS_ASSERT(f->root == f);
/*
* Check for duplicate entry type maps. This is always wrong and hints at
* trace explosion since we are trying to stabilize something without
* properly connecting peer edges.
*/
for (TreeFragment* peer = LookupLoop(tm, f->ip, f->globalObj, f->globalShape, f->argc);
peer != NULL;
peer = peer->peer) {
if (!peer->code() || peer == f)
continue;
JS_ASSERT(!f->typeMap.matches(peer->typeMap));
}
}
#endif
static void
AttemptCompilation(JSContext *cx, TraceMonitor* tm, JSObject* globalObj, jsbytecode* pc,
uint32 argc)
{
/* If we already permanently blacklisted the location, undo that. */
JS_ASSERT(*pc == JSOP_NOP || *pc == JSOP_TRACE || *pc == JSOP_CALL);
if (*pc == JSOP_NOP)
*pc = JSOP_TRACE;
ResetRecordingAttempts(cx, pc);
/* Breathe new life into all peer fragments at the designated loop header. */
TreeFragment* f = LookupLoop(tm, pc, globalObj, OBJ_SHAPE(globalObj), argc);
if (!f) {
/*
* If the global object's shape changed, we can't easily find the
* corresponding loop header via a hash table lookup. In this
* we simply bail here and hope that the fragment has another
* outstanding compilation attempt. This case is extremely rare.
*/
return;
}
JS_ASSERT(f->root == f);
f = f->first;
while (f) {
JS_ASSERT(f->root == f);
--f->recordAttempts;
f->hits() = HOTLOOP;
f = f->peer;
}
}
static bool
isfop(LIns* i, LOpcode op)
{
if (i->isop(op))
return true;
#if NJ_SOFTFLOAT_SUPPORTED
if (nanojit::AvmCore::config.soft_float &&
i->isop(LIR_qjoin) &&
i->oprnd1()->isop(LIR_icall) &&
i->oprnd2()->isop(LIR_callh)) {
return i->oprnd1()->callInfo() == softFloatOps.opmap[op];
}
#endif
return false;
}
static const CallInfo *
fcallinfo(LIns *i)
{
#if NJ_SOFTFLOAT_SUPPORTED
if (nanojit::AvmCore::config.soft_float) {
if (!i->isop(LIR_qjoin))
return NULL;
i = i->oprnd1();
return i->isop(LIR_icall) ? i->callInfo() : NULL;
}
#endif
return i->isop(LIR_fcall) ? i->callInfo() : NULL;
}
static LIns*
fcallarg(LIns* i, int n)
{
#if NJ_SOFTFLOAT_SUPPORTED
if (nanojit::AvmCore::config.soft_float) {
NanoAssert(i->isop(LIR_qjoin));
return i->oprnd1()->callArgN(n);
}
#endif
NanoAssert(i->isop(LIR_fcall));
return i->callArgN(n);
}
static LIns*
foprnd1(LIns* i)
{
#if NJ_SOFTFLOAT_SUPPORTED
if (nanojit::AvmCore::config.soft_float)
return fcallarg(i, 0);
#endif
return i->oprnd1();
}
static LIns*
foprnd2(LIns* i)
{
#if NJ_SOFTFLOAT_SUPPORTED
if (nanojit::AvmCore::config.soft_float)
return fcallarg(i, 1);
#endif
return i->oprnd2();
}
static LIns*
demote(LirWriter *out, LIns* i)
{
if (i->isCall())
return i->callArgN(0);
if (isfop(i, LIR_i2f) || isfop(i, LIR_u2f))
return foprnd1(i);
if (i->isconst())
return i;
JS_ASSERT(i->isconstf());
double cf = i->imm64f();
int32_t ci = cf > 0x7fffffff ? uint32_t(cf) : int32_t(cf);
return out->insImm(ci);
}
static bool
isPromoteInt(LIns* i)
{
if (isfop(i, LIR_i2f) || i->isconst())
return true;
if (!i->isconstf())
return false;
jsdouble d = i->imm64f();
return d == jsdouble(jsint(d)) && !JSDOUBLE_IS_NEGZERO(d);
}
static bool
isPromoteUint(LIns* i)
{
if (isfop(i, LIR_u2f) || i->isconst())
return true;
if (!i->isconstf())
return false;
jsdouble d = i->imm64f();
return d == jsdouble(jsuint(d)) && !JSDOUBLE_IS_NEGZERO(d);
}
static bool
isPromote(LIns* i)
{
return isPromoteInt(i) || isPromoteUint(i);
}
static bool
IsConst(LIns* i, int32_t c)
{
return i->isconst() && i->imm32() == c;
}
/*
* Determine whether this operand is guaranteed to not overflow the specified
* integer operation.
*/
static bool
IsOverflowSafe(LOpcode op, LIns* i)
{
LIns* c;
switch (op) {
case LIR_add:
case LIR_sub:
return (i->isop(LIR_and) && ((c = i->oprnd2())->isconst()) &&
((c->imm32() & 0xc0000000) == 0)) ||
(i->isop(LIR_rsh) && ((c = i->oprnd2())->isconst()) &&
((c->imm32() > 0)));
default:
JS_ASSERT(op == LIR_mul);
}
return (i->isop(LIR_and) && ((c = i->oprnd2())->isconst()) &&
((c->imm32() & 0xffff0000) == 0)) ||
(i->isop(LIR_ush) && ((c = i->oprnd2())->isconst()) &&
((c->imm32() >= 16)));
}
class FuncFilter: public LirWriter
{
public:
FuncFilter(LirWriter* out):
LirWriter(out)
{
}
LIns* ins2(LOpcode v, LIns* s0, LIns* s1)
{
if (s0 == s1 && v == LIR_feq) {
if (isPromote(s0)) {
// double(int) and double(uint) cannot be nan
return insImm(1);
}
if (s0->isop(LIR_fmul) || s0->isop(LIR_fsub) || s0->isop(LIR_fadd)) {
LIns* lhs = s0->oprnd1();
LIns* rhs = s0->oprnd2();
if (isPromote(lhs) && isPromote(rhs)) {
// add/sub/mul promoted ints can't be nan
return insImm(1);
}
}
} else if (LIR_feq <= v && v <= LIR_fge) {
if (isPromoteInt(s0) && isPromoteInt(s1)) {
// demote fcmp to cmp
v = LOpcode(v + (LIR_eq - LIR_feq));
return out->ins2(v, demote(out, s0), demote(out, s1));
} else if (isPromoteUint(s0) && isPromoteUint(s1)) {
// uint compare
v = LOpcode(v + (LIR_eq - LIR_feq));
if (v != LIR_eq)
v = LOpcode(v + (LIR_ult - LIR_lt)); // cmp -> ucmp
return out->ins2(v, demote(out, s0), demote(out, s1));
}
} else if (v == LIR_or &&
s0->isop(LIR_lsh) && IsConst(s0->oprnd2(), 16) &&
s1->isop(LIR_and) && IsConst(s1->oprnd2(), 0xffff)) {
LIns* msw = s0->oprnd1();
LIns* lsw = s1->oprnd1();
LIns* x;
LIns* y;
if (lsw->isop(LIR_add) &&
lsw->oprnd1()->isop(LIR_and) &&
lsw->oprnd2()->isop(LIR_and) &&
IsConst(lsw->oprnd1()->oprnd2(), 0xffff) &&
IsConst(lsw->oprnd2()->oprnd2(), 0xffff) &&
msw->isop(LIR_add) &&
msw->oprnd1()->isop(LIR_add) &&
msw->oprnd2()->isop(LIR_rsh) &&
msw->oprnd1()->oprnd1()->isop(LIR_rsh) &&
msw->oprnd1()->oprnd2()->isop(LIR_rsh) &&
IsConst(msw->oprnd2()->oprnd2(), 16) &&
IsConst(msw->oprnd1()->oprnd1()->oprnd2(), 16) &&
IsConst(msw->oprnd1()->oprnd2()->oprnd2(), 16) &&
(x = lsw->oprnd1()->oprnd1()) == msw->oprnd1()->oprnd1()->oprnd1() &&
(y = lsw->oprnd2()->oprnd1()) == msw->oprnd1()->oprnd2()->oprnd1() &&
lsw == msw->oprnd2()->oprnd1()) {
return out->ins2(LIR_add, x, y);
}
}
return out->ins2(v, s0, s1);
}
};
/*
* Visit the values in the given JSStackFrame that the tracer cares about. This
* visitor function is (implicitly) the primary definition of the native stack
* area layout. There are a few other independent pieces of code that must be
* maintained to assume the same layout. They are marked like this:
*
* Duplicate native stack layout computation: see VisitFrameSlots header comment.
*/
template <typename Visitor>
static JS_REQUIRES_STACK bool
VisitFrameSlots(Visitor &visitor, unsigned depth, JSStackFrame *fp,
JSStackFrame *up)
{
if (depth > 0 && !VisitFrameSlots(visitor, depth-1, fp->down, fp))
return false;
if (fp->argv) {
if (depth == 0) {
visitor.setStackSlotKind("args");
if (!visitor.visitStackSlots(&fp->argv[-2], argSlots(fp) + 2, fp))
return false;
}
visitor.setStackSlotKind("arguments");
if (!visitor.visitStackSlots(&fp->argsobj, 1, fp))
return false;
// We want to import and track |JSObject *scopeChain|, but the tracker
// requires type |jsval|. But the bits are the same, so we can import
// it with a cast and the (identity function) unboxing will be OK.
visitor.setStackSlotKind("scopeChain");
if (!visitor.visitStackSlots((jsval*) &fp->scopeChain, 1, fp))
return false;
visitor.setStackSlotKind("var");
if (!visitor.visitStackSlots(fp->slots, fp->script->nfixed, fp))
return false;
}
visitor.setStackSlotKind("stack");
JS_ASSERT(fp->regs->sp >= StackBase(fp));
if (!visitor.visitStackSlots(StackBase(fp),
size_t(fp->regs->sp - StackBase(fp)),
fp)) {
return false;
}
if (up) {
int missing = up->fun->nargs - up->argc;
if (missing > 0) {
visitor.setStackSlotKind("missing");
if (!visitor.visitStackSlots(fp->regs->sp, size_t(missing), fp))
return false;
}
}
return true;
}
// Number of native frame slots used for 'special' values between args and vars.
// Currently the two values are |arguments| (args object) and |scopeChain|.
const int SPECIAL_FRAME_SLOTS = 2;
template <typename Visitor>
static JS_REQUIRES_STACK JS_ALWAYS_INLINE bool
VisitStackSlots(Visitor &visitor, JSContext *cx, unsigned callDepth)
{
return VisitFrameSlots(visitor, callDepth, cx->fp, NULL);
}
template <typename Visitor>
static JS_REQUIRES_STACK JS_ALWAYS_INLINE void
VisitGlobalSlots(Visitor &visitor, JSContext *cx, JSObject *globalObj,
unsigned ngslots, uint16 *gslots)
{
for (unsigned n = 0; n < ngslots; ++n) {
unsigned slot = gslots[n];
visitor.visitGlobalSlot(&STOBJ_GET_SLOT(globalObj, slot), n, slot);
}
}
template <typename Visitor>
static JS_REQUIRES_STACK JS_ALWAYS_INLINE void
VisitGlobalSlots(Visitor &visitor, JSContext *cx, TreeFragment *f)
{
JSObject* globalObj = f->globalObj();
SlotList& gslots = *f->globalSlots;
VisitGlobalSlots(visitor, cx, globalObj, gslots.length(), gslots.data());
}
class AdjustCallerTypeVisitor;
template <typename Visitor>
static JS_REQUIRES_STACK JS_ALWAYS_INLINE void
VisitGlobalSlots(Visitor &visitor, JSContext *cx, SlotList &gslots)
{
VisitGlobalSlots(visitor, cx, JS_GetGlobalForObject(cx, cx->fp->scopeChain),
gslots.length(), gslots.data());
}
template <typename Visitor>
static JS_REQUIRES_STACK JS_ALWAYS_INLINE void
VisitSlots(Visitor& visitor, JSContext* cx, JSObject* globalObj,
unsigned callDepth, unsigned ngslots, uint16* gslots)
{
if (VisitStackSlots(visitor, cx, callDepth))
VisitGlobalSlots(visitor, cx, globalObj, ngslots, gslots);
}
template <typename Visitor>
static JS_REQUIRES_STACK JS_ALWAYS_INLINE void
VisitSlots(Visitor& visitor, JSContext* cx, unsigned callDepth,
unsigned ngslots, uint16* gslots)
{
VisitSlots(visitor, cx, JS_GetGlobalForObject(cx, cx->fp->scopeChain),
callDepth, ngslots, gslots);
}
template <typename Visitor>
static JS_REQUIRES_STACK JS_ALWAYS_INLINE void
VisitSlots(Visitor &visitor, JSContext *cx, JSObject *globalObj,
unsigned callDepth, const SlotList& slots)
{
VisitSlots(visitor, cx, globalObj, callDepth, slots.length(),
slots.data());
}
template <typename Visitor>
static JS_REQUIRES_STACK JS_ALWAYS_INLINE void
VisitSlots(Visitor &visitor, JSContext *cx, unsigned callDepth,
const SlotList& slots)
{
VisitSlots(visitor, cx, JS_GetGlobalForObject(cx, cx->fp->scopeChain),
callDepth, slots.length(), slots.data());
}
class SlotVisitorBase {
#if defined JS_JIT_SPEW
protected:
char const *mStackSlotKind;
public:
SlotVisitorBase() : mStackSlotKind(NULL) {}
JS_ALWAYS_INLINE const char *stackSlotKind() { return mStackSlotKind; }
JS_ALWAYS_INLINE void setStackSlotKind(char const *k) {
mStackSlotKind = k;
}
#else
public:
JS_ALWAYS_INLINE const char *stackSlotKind() { return NULL; }
JS_ALWAYS_INLINE void setStackSlotKind(char const *k) {}
#endif
};
struct CountSlotsVisitor : public SlotVisitorBase
{
unsigned mCount;
bool mDone;
jsval* mStop;
public:
JS_ALWAYS_INLINE CountSlotsVisitor(jsval* stop = NULL) :
mCount(0),
mDone(false),
mStop(stop)
{}
JS_REQUIRES_STACK JS_ALWAYS_INLINE bool
visitStackSlots(jsval *vp, size_t count, JSStackFrame* fp) {
if (mDone)
return false;
if (mStop && size_t(mStop - vp) < count) {
mCount += size_t(mStop - vp);
mDone = true;
return false;
}
mCount += count;
return true;
}
JS_ALWAYS_INLINE unsigned count() {
return mCount;
}
JS_ALWAYS_INLINE bool stopped() {
return mDone;
}
};
/*
* Calculate the total number of native frame slots we need from this frame all
* the way back to the entry frame, including the current stack usage.
*/
JS_REQUIRES_STACK unsigned
NativeStackSlots(JSContext *cx, unsigned callDepth)
{
JSStackFrame* fp = cx->fp;
unsigned slots = 0;
unsigned depth = callDepth;
for (;;) {
/*
* Duplicate native stack layout computation: see VisitFrameSlots
* header comment.
*/
unsigned operands = fp->regs->sp - StackBase(fp);
slots += operands;
if (fp->argv)
slots += fp->script->nfixed + SPECIAL_FRAME_SLOTS;
if (depth-- == 0) {
if (fp->argv)
slots += 2/*callee,this*/ + argSlots(fp);
#ifdef DEBUG
CountSlotsVisitor visitor;
VisitStackSlots(visitor, cx, callDepth);
JS_ASSERT(visitor.count() == slots && !visitor.stopped());
#endif
return slots;
}
JSStackFrame* fp2 = fp;
fp = fp->down;
int missing = fp2->fun->nargs - fp2->argc;
if (missing > 0)
slots += missing;
}
JS_NOT_REACHED("NativeStackSlots");
}
class CaptureTypesVisitor : public SlotVisitorBase
{
JSContext* mCx;
TraceType* mTypeMap;
TraceType* mPtr;
public:
JS_ALWAYS_INLINE CaptureTypesVisitor(JSContext* cx, TraceType* typeMap) :
mCx(cx),
mTypeMap(typeMap),
mPtr(typeMap)
{}
JS_REQUIRES_STACK JS_ALWAYS_INLINE void
visitGlobalSlot(jsval *vp, unsigned n, unsigned slot) {
TraceType type = getCoercedType(*vp);
if (type == TT_INT32 &&
oracle.isGlobalSlotUndemotable(mCx, slot))
type = TT_DOUBLE;
JS_ASSERT(type != TT_JSVAL);
debug_only_printf(LC_TMTracer,
"capture type global%d: %d=%c\n",
n, type, typeChar[type]);
*mPtr++ = type;
}
JS_REQUIRES_STACK JS_ALWAYS_INLINE bool
visitStackSlots(jsval *vp, int count, JSStackFrame* fp) {
for (int i = 0; i < count; ++i) {
TraceType type = getCoercedType(vp[i]);
if (type == TT_INT32 &&
oracle.isStackSlotUndemotable(mCx, length()))
type = TT_DOUBLE;
JS_ASSERT(type != TT_JSVAL);
debug_only_printf(LC_TMTracer,
"capture type %s%d: %d=%c\n",
stackSlotKind(), i, type, typeChar[type]);
*mPtr++ = type;
}
return true;
}
JS_ALWAYS_INLINE uintptr_t length() {
return mPtr - mTypeMap;
}
};
void
TypeMap::set(unsigned stackSlots, unsigned ngslots,
const TraceType* stackTypeMap, const TraceType* globalTypeMap)
{
setLength(ngslots + stackSlots);
memcpy(data(), stackTypeMap, stackSlots * sizeof(TraceType));
memcpy(data() + stackSlots, globalTypeMap, ngslots * sizeof(TraceType));
}
/*
* Capture the type map for the selected slots of the global object and currently pending
* stack frames.
*/
JS_REQUIRES_STACK void
TypeMap::captureTypes(JSContext* cx, JSObject* globalObj, SlotList& slots, unsigned callDepth)
{
setLength(NativeStackSlots(cx, callDepth) + slots.length());
CaptureTypesVisitor visitor(cx, data());
VisitSlots(visitor, cx, globalObj, callDepth, slots);
JS_ASSERT(visitor.length() == length());
}
JS_REQUIRES_STACK void
TypeMap::captureMissingGlobalTypes(JSContext* cx, JSObject* globalObj, SlotList& slots, unsigned stackSlots)
{
unsigned oldSlots = length() - stackSlots;
int diff = slots.length() - oldSlots;
JS_ASSERT(diff >= 0);
setLength(length() + diff);
CaptureTypesVisitor visitor(cx, data() + stackSlots + oldSlots);
VisitGlobalSlots(visitor, cx, globalObj, diff, slots.data() + oldSlots);
}
/* Compare this type map to another one and see whether they match. */
bool
TypeMap::matches(TypeMap& other) const
{
if (length() != other.length())
return false;
return !memcmp(data(), other.data(), length());
}
void
TypeMap::fromRaw(TraceType* other, unsigned numSlots)
{
unsigned oldLength = length();
setLength(length() + numSlots);
for (unsigned i = 0; i < numSlots; i++)
get(oldLength + i) = other[i];
}
/*
* Use the provided storage area to create a new type map that contains the
* partial type map with the rest of it filled up from the complete type
* map.
*/
static void
MergeTypeMaps(TraceType** partial, unsigned* plength, TraceType* complete, unsigned clength, TraceType* mem)
{
unsigned l = *plength;
JS_ASSERT(l < clength);
memcpy(mem, *partial, l * sizeof(TraceType));
memcpy(mem + l, complete + l, (clength - l) * sizeof(TraceType));
*partial = mem;
*plength = clength;
}
/*
* Specializes a tree to any specifically missing globals, including any
* dependent trees.
*/
static JS_REQUIRES_STACK void
SpecializeTreesToLateGlobals(JSContext* cx, TreeFragment* root, TraceType* globalTypeMap,
unsigned numGlobalSlots)
{
for (unsigned i = root->nGlobalTypes(); i < numGlobalSlots; i++)
root->typeMap.add(globalTypeMap[i]);
JS_ASSERT(root->nGlobalTypes() == numGlobalSlots);
for (unsigned i = 0; i < root->dependentTrees.length(); i++) {
TreeFragment* tree = root->dependentTrees[i];
if (tree->code() && tree->nGlobalTypes() < numGlobalSlots)
SpecializeTreesToLateGlobals(cx, tree, globalTypeMap, numGlobalSlots);
}
for (unsigned i = 0; i < root->linkedTrees.length(); i++) {
TreeFragment* tree = root->linkedTrees[i];
if (tree->code() && tree->nGlobalTypes() < numGlobalSlots)
SpecializeTreesToLateGlobals(cx, tree, globalTypeMap, numGlobalSlots);
}
}
/* Specializes a tree to any missing globals, including any dependent trees. */
static JS_REQUIRES_STACK void
SpecializeTreesToMissingGlobals(JSContext* cx, JSObject* globalObj, TreeFragment* root)
{
root->typeMap.captureMissingGlobalTypes(cx, globalObj, *root->globalSlots, root->nStackTypes);
JS_ASSERT(root->globalSlots->length() == root->typeMap.length() - root->nStackTypes);
SpecializeTreesToLateGlobals(cx, root, root->globalTypeMap(), root->nGlobalTypes());
}
static void
ResetJITImpl(JSContext* cx);
#ifdef MOZ_TRACEVIS
static JS_INLINE void
ResetJIT(JSContext* cx, TraceVisFlushReason r)
{
LogTraceVisEvent(cx, S_RESET, r);
ResetJITImpl(cx);
}
#else
# define ResetJIT(cx, reason) ResetJITImpl(cx)
#endif
void
FlushJITCache(JSContext *cx)
{
ResetJIT(cx, FR_OOM);
}
static void
TrashTree(JSContext* cx, TreeFragment* f);
template <class T>
static T&
InitConst(const T &t)
{
return const_cast<T &>(t);
}
JS_REQUIRES_STACK
TraceRecorder::TraceRecorder(JSContext* cx, VMSideExit* anchor, VMFragment* fragment,
unsigned stackSlots, unsigned ngslots, TraceType* typeMap,
VMSideExit* innermost, jsbytecode* outer, uint32 outerArgc,
RecordReason recordReason)
: cx(cx),
traceMonitor(&JS_TRACE_MONITOR(cx)),
fragment(fragment),
tree(fragment->root),
recordReason(recordReason),
globalObj(tree->globalObj),
outer(outer),
outerArgc(outerArgc),
lexicalBlock(cx->fp->blockChain),
anchor(anchor),
lir(NULL),
cx_ins(NULL),
eos_ins(NULL),
eor_ins(NULL),
loopLabel(NULL),
importTypeMap(&tempAlloc()),
lirbuf(new (tempAlloc()) LirBuffer(tempAlloc())),
mark(*traceMonitor->traceAlloc),
numSideExitsBefore(tree->sideExits.length()),
tracker(),
nativeFrameTracker(),
global_dslots(NULL),
callDepth(anchor ? anchor->calldepth : 0),
atoms(FrameAtomBase(cx, cx->fp)),
cfgMerges(&tempAlloc()),
trashSelf(false),
whichTreesToTrash(&tempAlloc()),
guardedShapeTable(cx),
rval_ins(NULL),
native_rval_ins(NULL),
newobj_ins(NULL),
pendingSpecializedNative(NULL),
pendingUnboxSlot(NULL),
pendingGuardCondition(NULL),
pendingLoop(true),
generatedSpecializedNative(),
tempTypeMap(cx)
{
JS_ASSERT(globalObj == JS_GetGlobalForObject(cx, cx->fp->scopeChain));
JS_ASSERT(cx->fp->regs->pc == (jsbytecode*)fragment->ip);
fragment->lirbuf = lirbuf;
#ifdef DEBUG
LabelMap* labels = new (tempAlloc()) LabelMap(tempAlloc(), &LogController);
lirbuf->names = new (tempAlloc()) LirNameMap(tempAlloc(), labels);
#endif
/*
* Reset the fragment state we care about in case we got a recycled
* fragment. This includes resetting any profiling data we might have
* accumulated.
*/
fragment->lastIns = NULL;
fragment->setCode(NULL);
fragment->lirbuf = lirbuf;
verbose_only( fragment->profCount = 0; )
verbose_only( fragment->nStaticExits = 0; )
verbose_only( fragment->nCodeBytes = 0; )
verbose_only( fragment->nExitBytes = 0; )
verbose_only( fragment->guardNumberer = 1; )
verbose_only( fragment->guardsForFrag = NULL; )
verbose_only( fragment->loopLabel = NULL; )
/*
* Don't change fragment->profFragID, though. Once the identity of the
* Fragment is set up (for profiling purposes), we can't change it.
*/
if (!guardedShapeTable.init())
abort();
#ifdef JS_JIT_SPEW
debug_only_print0(LC_TMMinimal, "\n");
debug_only_printf(LC_TMMinimal, "Recording starting from %s:%u@%u (FragID=%06u)\n",
tree->treeFileName, tree->treeLineNumber, tree->treePCOffset,
fragment->profFragID);
debug_only_printf(LC_TMTracer, "globalObj=%p, shape=%d\n",
(void*)this->globalObj, OBJ_SHAPE(this->globalObj));
debug_only_printf(LC_TMTreeVis, "TREEVIS RECORD FRAG=%p ANCHOR=%p\n", (void*)fragment,
(void*)anchor);
#endif
nanojit::LirWriter*& lir = InitConst(this->lir);
lir = new (tempAlloc()) LirBufWriter(lirbuf, nanojit::AvmCore::config);
#ifdef DEBUG
lir = new (tempAlloc()) ValidateWriter(lir, "end of writer pipeline");
#endif
debug_only_stmt(
if (LogController.lcbits & LC_TMRecorder) {
lir = new (tempAlloc()) VerboseWriter(tempAlloc(), lir, lirbuf->names,
&LogController);
}
)
// CseFilter must be downstream of SoftFloatFilter (see bug 527754 for why).
if (avmplus::AvmCore::config.cseopt)
lir = new (tempAlloc()) CseFilter(lir, tempAlloc());
#if NJ_SOFTFLOAT_SUPPORTED
if (nanojit::AvmCore::config.soft_float)
lir = new (tempAlloc()) SoftFloatFilter(lir);
#endif
lir = new (tempAlloc()) ExprFilter(lir);
lir = new (tempAlloc()) FuncFilter(lir);
#ifdef DEBUG
lir = new (tempAlloc()) ValidateWriter(lir, "start of writer pipeline");
#endif
lir->ins0(LIR_start);
for (int i = 0; i < NumSavedRegs; ++i)
lir->insParam(i, 1);
#ifdef DEBUG
for (int i = 0; i < NumSavedRegs; ++i)
addName(lirbuf->savedRegs[i], regNames[Assembler::savedRegs[i]]);
#endif
lirbuf->state = addName(lir->insParam(0, 0), "state");
if (fragment == fragment->root)
InitConst(loopLabel) = lir->ins0(LIR_label);
// if profiling, drop a label, so the assembler knows to put a
// frag-entry-counter increment at this point. If there's a
// loopLabel, use that; else we'll have to make a dummy label
// especially for this purpose.
verbose_only( if (LogController.lcbits & LC_FragProfile) {
LIns* entryLabel = NULL;
if (fragment == fragment->root) {
entryLabel = loopLabel;
} else {
entryLabel = lir->ins0(LIR_label);
}
NanoAssert(entryLabel);
NanoAssert(!fragment->loopLabel);
fragment->loopLabel = entryLabel;
})
lirbuf->sp = addName(lir->insLoad(LIR_ldp, lirbuf->state, offsetof(InterpState, sp)), "sp");
lirbuf->rp = addName(lir->insLoad(LIR_ldp, lirbuf->state, offsetof(InterpState, rp)), "rp");
InitConst(cx_ins) = addName(lir->insLoad(LIR_ldp, lirbuf->state, offsetof(InterpState, cx)), "cx");
InitConst(eos_ins) = addName(lir->insLoad(LIR_ldp, lirbuf->state, offsetof(InterpState, eos)), "eos");
InitConst(eor_ins) = addName(lir->insLoad(LIR_ldp, lirbuf->state, offsetof(InterpState, eor)), "eor");
/* If we came from exit, we might not have enough global types. */
if (tree->globalSlots->length() > tree->nGlobalTypes())
SpecializeTreesToMissingGlobals(cx, globalObj, tree);
/* read into registers all values on the stack and all globals we know so far */
import(tree, lirbuf->sp, stackSlots, ngslots, callDepth, typeMap);
/* Finish handling RECURSIVE_SLURP_FAIL_EXIT in startRecorder. */
if (anchor && anchor->exitType == RECURSIVE_SLURP_FAIL_EXIT)
return;
if (fragment == fragment->root) {
/*
* We poll the operation callback request flag. It is updated asynchronously whenever
* the callback is to be invoked.
*/
LIns* x = lir->insLoad(LIR_ld, cx_ins, offsetof(JSContext, operationCallbackFlag));
guard(true, lir->ins_eq0(x), snapshot(TIMEOUT_EXIT));
}
/*
* If we are attached to a tree call guard, make sure the guard the inner
* tree exited from is what we expect it to be.
*/
if (anchor && anchor->exitType == NESTED_EXIT) {
LIns* nested_ins = addName(lir->insLoad(LIR_ldp, lirbuf->state,
offsetof(InterpState, outermostTreeExitGuard)),
"outermostTreeExitGuard");
guard(true, lir->ins2(LIR_peq, nested_ins, INS_CONSTPTR(innermost)), NESTED_EXIT);
}
}
TraceRecorder::~TraceRecorder()
{
/* Should already have been adjusted by callers before calling delete. */
JS_ASSERT(traceMonitor->recorder != this);
if (trashSelf)
TrashTree(cx, fragment->root);
for (unsigned int i = 0; i < whichTreesToTrash.length(); i++)
TrashTree(cx, whichTreesToTrash[i]);
/* Purge the tempAlloc used during recording. */
tempAlloc().reset();
forgetGuardedShapes();
}
inline bool
TraceMonitor::outOfMemory() const
{
return dataAlloc->outOfMemory() ||
tempAlloc->outOfMemory() ||
traceAlloc->outOfMemory();
}
/*
* This function destroys the recorder after a successful recording, possibly
* starting a suspended outer recorder.
*/
AbortableRecordingStatus
TraceRecorder::finishSuccessfully()
{
JS_ASSERT(traceMonitor->recorder == this);
JS_ASSERT(fragment->lastIns && fragment->code());
AUDIT(traceCompleted);
mark.commit();
/* Grab local copies of members needed after |delete this|. */
JSContext* localcx = cx;
TraceMonitor* localtm = traceMonitor;
localtm->recorder = NULL;
delete this;
/* Catch OOM that occurred during recording. */
if (localtm->outOfMemory() || OverfullJITCache(localtm)) {
ResetJIT(localcx, FR_OOM);
return ARECORD_ABORTED;
}
return ARECORD_COMPLETED;
}
/* This function aborts a recorder and any pending outer recorders. */
JS_REQUIRES_STACK AbortableRecordingStatus
TraceRecorder::finishAbort(const char* reason)
{
JS_ASSERT(traceMonitor->recorder == this);
JS_ASSERT(!fragment->code());
AUDIT(recorderAborted);
#ifdef DEBUG
debug_only_printf(LC_TMAbort,
"Abort recording of tree %s:%d@%d at %s:%d@%d: %s.\n",
tree->treeFileName,
tree->treeLineNumber,
tree->treePCOffset,
cx->fp->script->filename,
js_FramePCToLineNumber(cx, cx->fp),
FramePCOffset(cx->fp),
reason);
#endif
Backoff(cx, (jsbytecode*) fragment->root->ip, fragment->root);
/*
* If this is the primary trace and we didn't succeed compiling, trash the
* tree. Otherwise, remove the VMSideExits we added while recording, which
* are about to be invalid.
*
* BIG FAT WARNING: resetting the length is only a valid strategy as long as
* there may be only one recorder active for a single TreeInfo at a time.
* Otherwise, we may be throwing away another recorder's valid side exits.
*/
if (fragment->root == fragment) {
TrashTree(cx, fragment->toTreeFragment());
} else {
JS_ASSERT(numSideExitsBefore <= fragment->root->sideExits.length());
fragment->root->sideExits.setLength(numSideExitsBefore);
}
/* Grab local copies of members needed after |delete this|. */
JSContext* localcx = cx;
TraceMonitor* localtm = traceMonitor;
localtm->recorder = NULL;
delete this;
if (localtm->outOfMemory() || OverfullJITCache(localtm))
ResetJIT(localcx, FR_OOM);
return ARECORD_ABORTED;
}
/* Add debug information to a LIR instruction as we emit it. */
inline LIns*
TraceRecorder::addName(LIns* ins, const char* name)
{
#ifdef JS_JIT_SPEW
/*
* We'll only ask for verbose Nanojit when .lcbits > 0, so there's no point
* in adding names otherwise.
*/
if (LogController.lcbits > 0)
lirbuf->names->addName(ins, name);
#endif
return ins;
}
inline LIns*
TraceRecorder::insImmVal(jsval val)
{
if (JSVAL_IS_TRACEABLE(val))
tree->gcthings.addUnique(val);
return lir->insImmWord(val);
}
inline LIns*
TraceRecorder::insImmObj(JSObject* obj)
{
tree->gcthings.addUnique(OBJECT_TO_JSVAL(obj));
return lir->insImmPtr((void*)obj);
}
inline LIns*
TraceRecorder::insImmFun(JSFunction* fun)
{
tree->gcthings.addUnique(OBJECT_TO_JSVAL(FUN_OBJECT(fun)));
return lir->insImmPtr((void*)fun);
}
inline LIns*
TraceRecorder::insImmStr(JSString* str)
{
tree->gcthings.addUnique(STRING_TO_JSVAL(str));
return lir->insImmPtr((void*)str);
}
inline LIns*
TraceRecorder::insImmSprop(JSScopeProperty* sprop)
{
tree->sprops.addUnique(sprop);
return lir->insImmPtr((void*)sprop);
}
inline LIns*
TraceRecorder::p2i(nanojit::LIns* ins)
{
#ifdef NANOJIT_64BIT
return lir->ins1(LIR_q2i, ins);
#else
return ins;
#endif
}
ptrdiff_t
TraceRecorder::nativeGlobalSlot(jsval* p) const
{
JS_ASSERT(isGlobal(p));
if (size_t(p - globalObj->fslots) < JS_INITIAL_NSLOTS)
return ptrdiff_t(p - globalObj->fslots);
return ptrdiff_t((p - globalObj->dslots) + JS_INITIAL_NSLOTS);
}
/* Determine the offset in the native global frame for a jsval we track. */
ptrdiff_t
TraceRecorder::nativeGlobalOffset(jsval* p) const
{
return nativeGlobalSlot(p) * sizeof(double);
}
/* Determine whether a value is a global stack slot. */
bool
TraceRecorder::isGlobal(jsval* p) const
{
return ((size_t(p - globalObj->fslots) < JS_INITIAL_NSLOTS) ||
(size_t(p - globalObj->dslots) < (STOBJ_NSLOTS(globalObj) - JS_INITIAL_NSLOTS)));
}
/*
* Return the offset in the native stack for the given jsval. More formally,
* |p| must be the address of a jsval that is represented in the native stack
* area. The return value is the offset, from InterpState::stackBase, in bytes,
* where the native representation of |*p| is stored. To get the offset
* relative to InterpState::sp, subtract TreeFragment::nativeStackBase.
*/
JS_REQUIRES_STACK ptrdiff_t
TraceRecorder::nativeStackOffset(jsval* p) const
{
CountSlotsVisitor visitor(p);
VisitStackSlots(visitor, cx, callDepth);
size_t offset = visitor.count() * sizeof(double);
/*
* If it's not in a pending frame, it must be on the stack of the current
* frame above sp but below fp->slots + script->nslots.
*/
if (!visitor.stopped()) {
JS_ASSERT(size_t(p - cx->fp->slots) < cx->fp->script->nslots);
offset += size_t(p - cx->fp->regs->sp) * sizeof(double);
}
return offset;
}
JS_REQUIRES_STACK ptrdiff_t
TraceRecorder::nativeStackSlot(jsval* p) const
{
return nativeStackOffset(p) / sizeof(double);
}
/*
* Return the offset, from InterpState:sp, for the given jsval. Shorthand for:
* -TreeFragment::nativeStackBase + nativeStackOffset(p).
*/
inline JS_REQUIRES_STACK ptrdiff_t
TraceRecorder::nativespOffset(jsval* p) const
{
return -tree->nativeStackBase + nativeStackOffset(p);
}
/* Track the maximum number of native frame slots we need during execution. */
inline void
TraceRecorder::trackNativeStackUse(unsigned slots)
{
if (slots > tree->maxNativeStackSlots)
tree->maxNativeStackSlots = slots;
}
/*
* Unbox a jsval into a slot. Slots are wide enough to hold double values
* directly (instead of storing a pointer to them). We assert instead of
* type checking. The caller must ensure the types are compatible.
*/
static void
ValueToNative(JSContext* cx, jsval v, TraceType type, double* slot)
{
uint8_t tag = JSVAL_TAG(v);
switch (type) {
case TT_OBJECT:
JS_ASSERT(tag == JSVAL_OBJECT);
JS_ASSERT(!JSVAL_IS_NULL(v) && !HAS_FUNCTION_CLASS(JSVAL_TO_OBJECT(v)));
*(JSObject**)slot = JSVAL_TO_OBJECT(v);
debug_only_printf(LC_TMTracer,
"object<%p:%s> ", (void*)JSVAL_TO_OBJECT(v),
JSVAL_IS_NULL(v)
? "null"
: JSVAL_TO_OBJECT(v)->getClass()->name);
return;
case TT_INT32:
jsint i;
if (JSVAL_IS_INT(v))
*(jsint*)slot = JSVAL_TO_INT(v);
else if (tag == JSVAL_DOUBLE && JSDOUBLE_IS_INT(*JSVAL_TO_DOUBLE(v), i))
*(jsint*)slot = i;
else
JS_ASSERT(JSVAL_IS_INT(v));
debug_only_printf(LC_TMTracer, "int<%d> ", *(jsint*)slot);
return;
case TT_DOUBLE:
jsdouble d;
if (JSVAL_IS_INT(v))
d = JSVAL_TO_INT(v);
else
d = *JSVAL_TO_DOUBLE(v);
JS_ASSERT(JSVAL_IS_INT(v) || JSVAL_IS_DOUBLE(v));
*(jsdouble*)slot = d;
debug_only_printf(LC_TMTracer, "double<%g> ", d);
return;
case TT_JSVAL:
JS_NOT_REACHED("found jsval type in an entry type map");
return;
case TT_STRING:
JS_ASSERT(tag == JSVAL_STRING);
*(JSString**)slot = JSVAL_TO_STRING(v);
debug_only_printf(LC_TMTracer, "string<%p> ", (void*)(*(JSString**)slot));
return;
case TT_NULL:
JS_ASSERT(tag == JSVAL_OBJECT);
*(JSObject**)slot = NULL;
debug_only_print0(LC_TMTracer, "null ");
return;
case TT_PSEUDOBOOLEAN:
/* Watch out for pseudo-booleans. */
JS_ASSERT(tag == JSVAL_SPECIAL);
*(JSBool*)slot = JSVAL_TO_SPECIAL(v);
debug_only_printf(LC_TMTracer, "pseudoboolean<%d> ", *(JSBool*)slot);
return;
case TT_FUNCTION: {
JS_ASSERT(tag == JSVAL_OBJECT);
JSObject* obj = JSVAL_TO_OBJECT(v);
*(JSObject**)slot = obj;
#ifdef DEBUG
JSFunction* fun = GET_FUNCTION_PRIVATE(cx, obj);
debug_only_printf(LC_TMTracer,
"function<%p:%s> ", (void*) obj,
fun->atom
? JS_GetStringBytes(ATOM_TO_STRING(fun->atom))
: "unnamed");
#endif
return;
}
default:
JS_NOT_REACHED("unexpected type");
break;
}
}
void
TraceMonitor::flush()
{
/* flush should only be called after all recorders have been aborted. */
JS_ASSERT(!recorder);
AUDIT(cacheFlushed);
// recover profiling data from expiring Fragments
verbose_only(
for (size_t i = 0; i < FRAGMENT_TABLE_SIZE; ++i) {
for (TreeFragment *f = vmfragments[i]; f; f = f->next) {
JS_ASSERT(f->root == f);
for (TreeFragment *p = f; p; p = p->peer)
FragProfiling_FragFinalizer(p, this);
}
}
)
verbose_only(
for (Seq<Fragment*>* f = branches; f; f = f->tail)
FragProfiling_FragFinalizer(f->head, this);
)
frameCache->reset();
dataAlloc->reset();
traceAlloc->reset();
codeAlloc->reset();
tempAlloc->reset();
reTempAlloc->reset();
Allocator& alloc = *dataAlloc;
for (size_t i = 0; i < MONITOR_N_GLOBAL_STATES; ++i) {
globalStates[i].globalShape = -1;
globalStates[i].globalSlots = new (alloc) SlotList(&alloc);
}
assembler = new (alloc) Assembler(*codeAlloc, alloc, alloc, core, &LogController, avmplus::AvmCore::config);
verbose_only( branches = NULL; )
memset(&vmfragments[0], 0, FRAGMENT_TABLE_SIZE * sizeof(TreeFragment*));
reFragments = new (alloc) REHashMap(alloc);
needFlush = JS_FALSE;
}
static inline void
MarkTree(JSTracer* trc, TreeFragment *f)
{
jsval* vp = f->gcthings.data();
unsigned len = f->gcthings.length();
while (len--) {
jsval v = *vp++;
JS_SET_TRACING_NAME(trc, "jitgcthing");
js_CallGCMarker(trc, JSVAL_TO_TRACEABLE(v), JSVAL_TRACE_KIND(v));
}
JSScopeProperty** spropp = f->sprops.data();
len = f->sprops.length();
while (len--) {
JSScopeProperty* sprop = *spropp++;
sprop->trace(trc);
}
}
void
TraceMonitor::mark(JSTracer* trc)
{
if (!trc->context->runtime->gcFlushCodeCaches) {
for (size_t i = 0; i < FRAGMENT_TABLE_SIZE; ++i) {
TreeFragment* f = vmfragments[i];
while (f) {
if (f->code())
MarkTree(trc, f);
TreeFragment* peer = f->peer;
while (peer) {
if (peer->code())
MarkTree(trc, peer);
peer = peer->peer;
}
f = f->next;
}
}
if (recorder)
MarkTree(trc, recorder->getTree());
}
}
/*
* Box a value from the native stack back into the jsval format. Integers that
* are too large to fit into a jsval are automatically boxed into
* heap-allocated doubles.
*/
bool
NativeToValue(JSContext* cx, jsval& v, TraceType type, double* slot)
{
bool ok;
jsint i;
jsdouble d;
switch (type) {
case TT_OBJECT:
v = OBJECT_TO_JSVAL(*(JSObject**)slot);
JS_ASSERT(v != JSVAL_ERROR_COOKIE); /* don't leak JSVAL_ERROR_COOKIE */
debug_only_printf(LC_TMTracer,
"object<%p:%s> ", (void*)JSVAL_TO_OBJECT(v),
JSVAL_IS_NULL(v)
? "null"
: JSVAL_TO_OBJECT(v)->getClass()->name);
break;
case TT_INT32:
i = *(jsint*)slot;
debug_only_printf(LC_TMTracer, "int<%d> ", i);
store_int:
if (INT_FITS_IN_JSVAL(i)) {
v = INT_TO_JSVAL(i);
break;
}
d = (jsdouble)i;
goto store_double;
case TT_DOUBLE:
d = *slot;
debug_only_printf(LC_TMTracer, "double<%g> ", d);
if (JSDOUBLE_IS_INT(d, i))
goto store_int;
store_double:
ok = js_NewDoubleInRootedValue(cx, d, &v);
if (!ok) {
js_ReportOutOfMemory(cx);
return false;
}
return true;
case TT_JSVAL:
v = *(jsval*)slot;
JS_ASSERT(v != JSVAL_ERROR_COOKIE); /* don't leak JSVAL_ERROR_COOKIE */
debug_only_printf(LC_TMTracer, "box<%p> ", (void*)v);
break;
case TT_STRING:
v = STRING_TO_JSVAL(*(JSString**)slot);
debug_only_printf(LC_TMTracer, "string<%p> ", (void*)(*(JSString**)slot));
break;
case TT_NULL:
JS_ASSERT(*(JSObject**)slot == NULL);
v = JSVAL_NULL;
debug_only_printf(LC_TMTracer, "null<%p> ", (void*)(*(JSObject**)slot));
break;
case TT_PSEUDOBOOLEAN:
/* Watch out for pseudo-booleans. */
v = SPECIAL_TO_JSVAL(*(JSBool*)slot);
debug_only_printf(LC_TMTracer, "boolean<%d> ", *(JSBool*)slot);
break;
case TT_FUNCTION: {
JS_ASSERT(HAS_FUNCTION_CLASS(*(JSObject**)slot));
v = OBJECT_TO_JSVAL(*(JSObject**)slot);
#ifdef DEBUG
JSFunction* fun = GET_FUNCTION_PRIVATE(cx, JSVAL_TO_OBJECT(v));
debug_only_printf(LC_TMTracer,
"function<%p:%s> ", (void*)JSVAL_TO_OBJECT(v),
fun->atom
? JS_GetStringBytes(ATOM_TO_STRING(fun->atom))
: "unnamed");
#endif
break;
}
default:
JS_NOT_REACHED("unexpected type");
break;
}
return true;
}
class BuildNativeFrameVisitor : public SlotVisitorBase
{
JSContext *mCx;
TraceType *mTypeMap;
double *mGlobal;
double *mStack;
public:
BuildNativeFrameVisitor(JSContext *cx,
TraceType *typemap,
double *global,
double *stack) :
mCx(cx),
mTypeMap(typemap),
mGlobal(global),
mStack(stack)
{}
JS_REQUIRES_STACK JS_ALWAYS_INLINE void
visitGlobalSlot(jsval *vp, unsigned n, unsigned slot) {
debug_only_printf(LC_TMTracer, "global%d: ", n);
ValueToNative(mCx, *vp, *mTypeMap++, &mGlobal[slot]);
}
JS_REQUIRES_STACK JS_ALWAYS_INLINE bool
visitStackSlots(jsval *vp, int count, JSStackFrame* fp) {
for (int i = 0; i < count; ++i) {
debug_only_printf(LC_TMTracer, "%s%d: ", stackSlotKind(), i);
ValueToNative(mCx, *vp++, *mTypeMap++, mStack++);
}
return true;
}
};
static JS_REQUIRES_STACK void
BuildNativeFrame(JSContext *cx, JSObject *globalObj, unsigned callDepth,
unsigned ngslots, uint16 *gslots,
TraceType *typeMap, double *global, double *stack)
{
BuildNativeFrameVisitor visitor(cx, typeMap, global, stack);
VisitSlots(visitor, cx, globalObj, callDepth, ngslots, gslots);
debug_only_print0(LC_TMTracer, "\n");
}
class FlushNativeGlobalFrameVisitor : public SlotVisitorBase
{
JSContext *mCx;
TraceType *mTypeMap;
double *mGlobal;
public:
FlushNativeGlobalFrameVisitor(JSContext *cx,
TraceType *typeMap,
double *global) :
mCx(cx),
mTypeMap(typeMap),
mGlobal(global)
{}
JS_REQUIRES_STACK JS_ALWAYS_INLINE void
visitGlobalSlot(jsval *vp, unsigned n, unsigned slot) {
debug_only_printf(LC_TMTracer, "global%d=", n);
JS_ASSERT(JS_THREAD_DATA(mCx)->waiveGCQuota);
if (!NativeToValue(mCx, *vp, *mTypeMap++, &mGlobal[slot]))
OutOfMemoryAbort();
}
};
class FlushNativeStackFrameVisitor : public SlotVisitorBase
{
JSContext *mCx;
const TraceType *mInitTypeMap;
const TraceType *mTypeMap;
double *mStack;
jsval *mStop;
unsigned mIgnoreSlots;
public:
FlushNativeStackFrameVisitor(JSContext *cx,
const TraceType *typeMap,
double *stack,
jsval *stop,
unsigned ignoreSlots) :
mCx(cx),
mInitTypeMap(typeMap),
mTypeMap(typeMap),
mStack(stack),
mStop(stop),
mIgnoreSlots(ignoreSlots)
{}
const TraceType* getTypeMap()
{
return mTypeMap;
}
JS_REQUIRES_STACK JS_ALWAYS_INLINE bool
visitStackSlots(jsval *vp, size_t count, JSStackFrame* fp) {
JS_ASSERT(JS_THREAD_DATA(mCx)->waiveGCQuota);
for (size_t i = 0; i < count; ++i) {
if (vp == mStop)
return false;
debug_only_printf(LC_TMTracer, "%s%u=", stackSlotKind(), unsigned(i));
if (unsigned(mTypeMap - mInitTypeMap) >= mIgnoreSlots) {
if (!NativeToValue(mCx, *vp, *mTypeMap, mStack))
OutOfMemoryAbort();
}
vp++;
mTypeMap++;
mStack++;
}
return true;
}
};
/* Box the given native frame into a JS frame. This is infallible. */
static JS_REQUIRES_STACK void
FlushNativeGlobalFrame(JSContext *cx, JSObject *globalObj, double *global, unsigned ngslots,
uint16 *gslots, TraceType *typemap)
{
FlushNativeGlobalFrameVisitor visitor(cx, typemap, global);
VisitGlobalSlots(visitor, cx, globalObj, ngslots, gslots);
debug_only_print0(LC_TMTracer, "\n");
}
/*
* Returns the number of values on the native stack, excluding the innermost
* frame. This walks all FrameInfos on the native frame stack and sums the
* slot usage of each frame.
*/
static int32
StackDepthFromCallStack(InterpState* state, uint32 callDepth)
{
int32 nativeStackFramePos = 0;
// Duplicate native stack layout computation: see VisitFrameSlots header comment.
for (FrameInfo** fip = state->callstackBase; fip < state->rp + callDepth; fip++)
nativeStackFramePos += (*fip)->callerHeight;
return nativeStackFramePos;
}
/*
* Generic function to read upvars on trace from slots of active frames.
* T Traits type parameter. Must provide static functions:
* interp_get(fp, slot) Read the value out of an interpreter frame.
* native_slot(argc, slot) Return the position of the desired value in the on-trace
* stack frame (with position 0 being callee).
*
* upvarLevel Static level of the function containing the upvar definition
* slot Identifies the value to get. The meaning is defined by the traits type.
* callDepth Call depth of current point relative to trace entry
*/
template<typename T>
inline TraceType
GetUpvarOnTrace(JSContext* cx, uint32 upvarLevel, int32 slot, uint32 callDepth, double* result)
{
InterpState* state = cx->interpState;
FrameInfo** fip = state->rp + callDepth;
/*
* First search the FrameInfo call stack for an entry containing our
* upvar, namely one with level == upvarLevel. The first FrameInfo is a
* transition from the entry frame to some callee. However, it is not
* known (from looking at the FrameInfo) whether the entry frame had a
* callee. Rather than special-case this or insert more logic into the
* loop, instead just stop before that FrameInfo (i.e. |> base| instead of
* |>= base|), and let the code after the loop handle it.
*/
int32 stackOffset = StackDepthFromCallStack(state, callDepth);
while (--fip > state->callstackBase) {
FrameInfo* fi = *fip;
/*
* The loop starts aligned to the top of the stack, so move down to the first meaningful
* callee. Then read the callee directly from the frame.
*/
stackOffset -= fi->callerHeight;
JSObject* callee = *(JSObject**)(&state->stackBase[stackOffset]);
JSFunction* fun = GET_FUNCTION_PRIVATE(cx, callee);
uintN calleeLevel = fun->u.i.script->staticLevel;
if (calleeLevel == upvarLevel) {
/*
* Now find the upvar's value in the native stack. stackOffset is
* the offset of the start of the activation record corresponding
* to *fip in the native stack.
*/
uint32 native_slot = T::native_slot(fi->callerArgc, slot);
*result = state->stackBase[stackOffset + native_slot];
return fi->get_typemap()[native_slot];
}
}
// Next search the trace entry frame, which is not in the FrameInfo stack.
if (state->outermostTree->script->staticLevel == upvarLevel) {
uint32 argc = state->outermostTree->argc;
uint32 native_slot = T::native_slot(argc, slot);
*result = state->stackBase[native_slot];
return state->callstackBase[0]->get_typemap()[native_slot];
}
/*
* If we did not find the upvar in the frames for the active traces,
* then we simply get the value from the interpreter state.
*/
JS_ASSERT(upvarLevel < JS_DISPLAY_SIZE);
JSStackFrame* fp = cx->display[upvarLevel];
jsval v = T::interp_get(fp, slot);
TraceType type = getCoercedType(v);
ValueToNative(cx, v, type, result);
return type;
}
// For this traits type, 'slot' is the argument index, which may be -2 for callee.
struct UpvarArgTraits {
static jsval interp_get(JSStackFrame* fp, int32 slot) {
return fp->argv[slot];
}
static uint32 native_slot(uint32 argc, int32 slot) {
return 2 /*callee,this*/ + slot;
}
};
uint32 JS_FASTCALL
GetUpvarArgOnTrace(JSContext* cx, uint32 upvarLevel, int32 slot, uint32 callDepth, double* result)
{
return GetUpvarOnTrace<UpvarArgTraits>(cx, upvarLevel, slot, callDepth, result);
}
// For this traits type, 'slot' is an index into the local slots array.
struct UpvarVarTraits {
static jsval interp_get(JSStackFrame* fp, int32 slot) {
return fp->slots[slot];
}
static uint32 native_slot(uint32 argc, int32 slot) {
return 4 /*callee,this,arguments,scopeChain*/ + argc + slot;
}
};
uint32 JS_FASTCALL
GetUpvarVarOnTrace(JSContext* cx, uint32 upvarLevel, int32 slot, uint32 callDepth, double* result)
{
return GetUpvarOnTrace<UpvarVarTraits>(cx, upvarLevel, slot, callDepth, result);
}
/*
* For this traits type, 'slot' is an index into the stack area (within slots,
* after nfixed) of a frame with no function. (On trace, the top-level frame is
* the only one that can have no function.)
*/
struct UpvarStackTraits {
static jsval interp_get(JSStackFrame* fp, int32 slot) {
return fp->slots[slot + fp->script->nfixed];
}
static uint32 native_slot(uint32 argc, int32 slot) {
/*
* Locals are not imported by the tracer when the frame has no
* function, so we do not add fp->script->nfixed.
*/
JS_ASSERT(argc == 0);
return slot;
}
};
uint32 JS_FASTCALL
GetUpvarStackOnTrace(JSContext* cx, uint32 upvarLevel, int32 slot, uint32 callDepth,
double* result)
{
return GetUpvarOnTrace<UpvarStackTraits>(cx, upvarLevel, slot, callDepth, result);
}
// Parameters needed to access a value from a closure on trace.
struct ClosureVarInfo
{
uint32 slot;
#ifdef DEBUG
uint32 callDepth;
#endif
};
/*
* Generic function to read upvars from Call objects of active heavyweight functions.
* call Callee Function object in which the upvar is accessed.
*/
template<typename T>
inline uint32
GetFromClosure(JSContext* cx, JSObject* call, const ClosureVarInfo* cv, double* result)
{
JS_ASSERT(OBJ_GET_CLASS(cx, call) == &js_CallClass);
InterpState* state = cx->interpState;
#ifdef DEBUG
FrameInfo** fip = state->rp + cv->callDepth;
int32 stackOffset = StackDepthFromCallStack(state, cv->callDepth);
while (--fip > state->callstackBase) {
FrameInfo* fi = *fip;
/*
* The loop starts aligned to the top of the stack, so move down to the first meaningful
* callee. Then read the callee directly from the frame.
*/
stackOffset -= fi->callerHeight;
JSObject* callee = *(JSObject**)(&state->stackBase[stackOffset]);
if (callee == call) {
// This is not reachable as long as the tracer guards on the identity of the callee's
// parent when making a call:
//
// - We can only reach this point if we execute JSOP_LAMBDA on trace, then call the
// function created by the lambda, and then execute a JSOP_NAME on trace.
// - Each time we execute JSOP_LAMBDA we get a function with a different parent.
// - When we execute the call to the new function, we exit trace because the parent
// is different.
JS_NOT_REACHED("JSOP_NAME variable found in outer trace");
}
}
#endif
/*
* Here we specifically want to check the call object of the trace entry frame.
*/
uint32 slot = cv->slot;
VOUCH_DOES_NOT_REQUIRE_STACK();
if (cx->fp->callobj == call) {
slot = T::adj_slot(cx->fp, slot);
*result = state->stackBase[slot];
return state->callstackBase[0]->get_typemap()[slot];
}
JSStackFrame* fp = (JSStackFrame*) call->getPrivate();
jsval v;
if (fp) {
v = T::slots(fp)[slot];
} else {
/*
* Get the value from the object. We know we have a Call object, and
* that our slot index is fine, so don't monkey around with calling the
* property getter (which just looks in the slot) or calling
* js_GetReservedSlot. Just get the slot directly. Note the static
* asserts in jsfun.cpp which make sure Call objects use dslots.
*/
JS_ASSERT(slot < T::slot_count(call));
v = T::slots(call)[slot];
}
TraceType type = getCoercedType(v);
ValueToNative(cx, v, type, result);
return type;
}
struct ArgClosureTraits
{
// Adjust our slot to point to the correct slot on the native stack.
// See also UpvarArgTraits.
static inline uint32 adj_slot(JSStackFrame* fp, uint32 slot) { return 2 + slot; }
// Generate the adj_slot computation in LIR.
static inline LIns* adj_slot_lir(LirWriter* lir, LIns* fp_ins, unsigned slot) {
return lir->insImm(2 + slot);
}
// Get the right frame slots to use our slot index with.
// See also UpvarArgTraits.
static inline jsval* slots(JSStackFrame* fp) { return fp->argv; }
// Get the right object slots to use our slot index with.
static inline jsval* slots(JSObject* obj) {
// We know Call objects use dslots.
return obj->dslots + slot_offset(obj);
}
// Get the offset of our object slots from the object's dslots pointer.
static inline uint32 slot_offset(JSObject* obj) {
return JSSLOT_START(&js_CallClass) +
CALL_CLASS_FIXED_RESERVED_SLOTS - JS_INITIAL_NSLOTS;
}
// Get the maximum slot index of this type that should be allowed
static inline uint16 slot_count(JSObject* obj) {
return js_GetCallObjectFunction(obj)->nargs;
}
private:
ArgClosureTraits();
};
uint32 JS_FASTCALL
GetClosureArg(JSContext* cx, JSObject* callee, const ClosureVarInfo* cv, double* result)
{
return GetFromClosure<ArgClosureTraits>(cx, callee, cv, result);
}
struct VarClosureTraits
{
// See documentation on ArgClosureTraits for what these functions
// should be doing.
// See also UpvarVarTraits.
static inline uint32 adj_slot(JSStackFrame* fp, uint32 slot) { return 4 + fp->argc + slot; }
static inline LIns* adj_slot_lir(LirWriter* lir, LIns* fp_ins, unsigned slot) {
LIns *argc_ins = lir->insLoad(LIR_ld, fp_ins, offsetof(JSStackFrame, argc));
return lir->ins2(LIR_add, lir->insImm(4 + slot), argc_ins);
}
// See also UpvarVarTraits.
static inline jsval* slots(JSStackFrame* fp) { return fp->slots; }
static inline jsval* slots(JSObject* obj) {
// We know Call objects use dslots.
return obj->dslots + slot_offset(obj);
}
static inline uint32 slot_offset(JSObject* obj) {
return JSSLOT_START(&js_CallClass) +
CALL_CLASS_FIXED_RESERVED_SLOTS - JS_INITIAL_NSLOTS +
js_GetCallObjectFunction(obj)->nargs;
}
static inline uint16 slot_count(JSObject* obj) {
return js_GetCallObjectFunction(obj)->u.i.nvars;
}
private:
VarClosureTraits();
};
uint32 JS_FASTCALL
GetClosureVar(JSContext* cx, JSObject* callee, const ClosureVarInfo* cv, double* result)
{
return GetFromClosure<VarClosureTraits>(cx, callee, cv, result);
}
/**
* Box the given native stack frame into the virtual machine stack. This
* is infallible.
*
* @param callDepth the distance between the entry frame into our trace and
* cx->fp when we make this call. If this is not called as a
* result of a nested exit, callDepth is 0.
* @param mp an array of TraceType that indicate what the types of the things
* on the stack are.
* @param np pointer to the native stack. We want to copy values from here to
* the JS stack as needed.
* @param stopFrame if non-null, this frame and everything above it should not
* be restored.
* @return the number of things we popped off of np.
*/
static JS_REQUIRES_STACK int
FlushNativeStackFrame(JSContext* cx, unsigned callDepth, const TraceType* mp, double* np,
JSStackFrame* stopFrame, unsigned ignoreSlots)
{
jsval* stopAt = stopFrame ? &stopFrame->argv[-2] : NULL;
/* Root all string and object references first (we don't need to call the GC for this). */
FlushNativeStackFrameVisitor visitor(cx, mp, np, stopAt, ignoreSlots);
VisitStackSlots(visitor, cx, callDepth);
// Restore thisv from the now-restored argv[-1] in each pending frame.
// Keep in mind that we didn't restore frames at stopFrame and above!
// Scope to keep |fp| from leaking into the macros we're using.
{
unsigned n = callDepth+1; // +1 to make sure we restore the entry frame
JSStackFrame* fp = cx->fp;
if (stopFrame) {
for (; fp != stopFrame; fp = fp->down) {
JS_ASSERT(n != 0);
--n;
}
// Skip over stopFrame itself.
JS_ASSERT(n != 0);
--n;
fp = fp->down;
}
for (; n != 0; fp = fp->down) {
--n;
if (fp->argv) {
if (fp->argsobj &&
js_GetArgsPrivateNative(JSVAL_TO_OBJECT(fp->argsobj))) {
JSVAL_TO_OBJECT(fp->argsobj)->setPrivate(fp);
}
JS_ASSERT(JSVAL_IS_OBJECT(fp->argv[-1]));
JS_ASSERT(HAS_FUNCTION_CLASS(fp->calleeObject()));
JS_ASSERT(GET_FUNCTION_PRIVATE(cx, fp->callee()) == fp->fun);
if (FUN_INTERPRETED(fp->fun) &&
(fp->fun->flags & JSFUN_HEAVYWEIGHT)) {
// Iff these fields are NULL, then |fp| was synthesized on trace exit, so
// we need to update the frame fields.
if (!fp->callobj)
fp->callobj = fp->scopeChain;
// Iff scope chain's private is NULL, then |fp->scopeChain| was created
// on trace for a call, so we set the private field now. (Call objects
// that correspond to returned frames also have a NULL private, but such
// a call object would not occur as the |scopeChain| member of a frame,
// so we cannot be in that case here.)
if (!fp->scopeChain->getPrivate())
fp->scopeChain->setPrivate(fp);
}
fp->thisv = fp->argv[-1];
if (fp->flags & JSFRAME_CONSTRUCTING) // constructors always compute 'this'
fp->flags |= JSFRAME_COMPUTED_THIS;
}
}
}
debug_only_print0(LC_TMTracer, "\n");
return visitor.getTypeMap() - mp;
}
/* Emit load instructions onto the trace that read the initial stack state. */
JS_REQUIRES_STACK void
TraceRecorder::import(LIns* base, ptrdiff_t offset, jsval* p, TraceType t,
const char *prefix, uintN index, JSStackFrame *fp)
{
LIns* ins;
if (t == TT_INT32) { /* demoted */
JS_ASSERT(isInt32(*p));
/*
* Ok, we have a valid demotion attempt pending, so insert an integer
* read and promote it to double since all arithmetic operations expect
* to see doubles on entry. The first op to use this slot will emit a
* f2i cast which will cancel out the i2f we insert here.
*/
ins = lir->insLoad(LIR_ld, base, offset);
ins = lir->ins1(LIR_i2f, ins);
} else {
JS_ASSERT_IF(t != TT_JSVAL, isNumber(*p) == (t == TT_DOUBLE));
if (t == TT_DOUBLE) {
ins = lir->insLoad(LIR_ldf, base, offset);
} else if (t == TT_PSEUDOBOOLEAN) {
ins = lir->insLoad(LIR_ld, base, offset);
} else {
ins = lir->insLoad(LIR_ldp, base, offset);
}
}
checkForGlobalObjectReallocation();
tracker.set(p, ins);
#ifdef DEBUG
char name[64];
JS_ASSERT(strlen(prefix) < 11);
void* mark = NULL;
jsuword* localNames = NULL;
const char* funName = NULL;
if (*prefix == 'a' || *prefix == 'v') {
mark = JS_ARENA_MARK(&cx->tempPool);
if (fp->fun->hasLocalNames())
localNames = js_GetLocalNameArray(cx, fp->fun, &cx->tempPool);
funName = fp->fun->atom ? js_AtomToPrintableString(cx, fp->fun->atom) : "<anonymous>";
}
if (!strcmp(prefix, "argv")) {
if (index < fp->fun->nargs) {
JSAtom *atom = JS_LOCAL_NAME_TO_ATOM(localNames[index]);
JS_snprintf(name, sizeof name, "$%s.%s", funName, js_AtomToPrintableString(cx, atom));
} else {
JS_snprintf(name, sizeof name, "$%s.<arg%d>", funName, index);
}
} else if (!strcmp(prefix, "vars")) {
JSAtom *atom = JS_LOCAL_NAME_TO_ATOM(localNames[fp->fun->nargs + index]);
JS_snprintf(name, sizeof name, "$%s.%s", funName, js_AtomToPrintableString(cx, atom));
} else {
JS_snprintf(name, sizeof name, "$%s%d", prefix, index);
}
if (mark)
JS_ARENA_RELEASE(&cx->tempPool, mark);
addName(ins, name);
static const char* typestr[] = {
"object", "int", "double", "jsval", "string", "null", "boolean", "function"
};
debug_only_printf(LC_TMTracer, "import vp=%p name=%s type=%s flags=%d\n",
(void*)p, name, typestr[t & 7], t >> 3);
#endif
}
class ImportGlobalSlotVisitor : public SlotVisitorBase
{
TraceRecorder &mRecorder;
LIns *mBase;
TraceType *mTypemap;
public:
ImportGlobalSlotVisitor(TraceRecorder &recorder,
LIns *base,
TraceType *typemap) :
mRecorder(recorder),
mBase(base),
mTypemap(typemap)
{}
JS_REQUIRES_STACK JS_ALWAYS_INLINE void
visitGlobalSlot(jsval *vp, unsigned n, unsigned slot) {
JS_ASSERT(*mTypemap != TT_JSVAL);
mRecorder.import(mBase, mRecorder.nativeGlobalOffset(vp),
vp, *mTypemap++, "global", n, NULL);
}
};
class ImportBoxedStackSlotVisitor : public SlotVisitorBase
{
TraceRecorder &mRecorder;
LIns *mBase;
ptrdiff_t mStackOffset;
TraceType *mTypemap;
JSStackFrame *mFp;
public:
ImportBoxedStackSlotVisitor(TraceRecorder &recorder,
LIns *base,
ptrdiff_t stackOffset,
TraceType *typemap) :
mRecorder(recorder),
mBase(base),
mStackOffset(stackOffset),
mTypemap(typemap)
{}
JS_REQUIRES_STACK JS_ALWAYS_INLINE bool
visitStackSlots(jsval *vp, size_t count, JSStackFrame* fp) {
for (size_t i = 0; i < count; ++i) {
if (*mTypemap == TT_JSVAL) {
mRecorder.import(mBase, mStackOffset, vp, TT_JSVAL,
"jsval", i, fp);
LIns *vp_ins = mRecorder.unbox_jsval(*vp, mRecorder.get(vp),
mRecorder.copy(mRecorder.anchor));
mRecorder.set(vp, vp_ins);
}
vp++;
mTypemap++;
mStackOffset += sizeof(double);
}
return true;
}
};
JS_REQUIRES_STACK void
TraceRecorder::import(TreeFragment* tree, LIns* sp, unsigned stackSlots, unsigned ngslots,
unsigned callDepth, TraceType* typeMap)
{
/*
* If we get a partial list that doesn't have all the types (i.e. recording
* from a side exit that was recorded but we added more global slots
* later), merge the missing types from the entry type map. This is safe
* because at the loop edge we verify that we have compatible types for all
* globals (entry type and loop edge type match). While a different trace
* of the tree might have had a guard with a different type map for these
* slots we just filled in here (the guard we continue from didn't know
* about them), since we didn't take that particular guard the only way we
* could have ended up here is if that other trace had at its end a
* compatible type distribution with the entry map. Since that's exactly
* what we used to fill in the types our current side exit didn't provide,
* this is always safe to do.
*/
TraceType* globalTypeMap = typeMap + stackSlots;
unsigned length = tree->nGlobalTypes();
/*
* This is potentially the typemap of the side exit and thus shorter than
* the tree's global type map.
*/
if (ngslots < length) {
MergeTypeMaps(&globalTypeMap /* out param */, &ngslots /* out param */,
tree->globalTypeMap(), length,
(TraceType*)alloca(sizeof(TraceType) * length));
}
JS_ASSERT(ngslots == tree->nGlobalTypes());
/*
* Check whether there are any values on the stack we have to unbox and do
* that first before we waste any time fetching the state from the stack.
*/
if (!anchor || anchor->exitType != RECURSIVE_SLURP_FAIL_EXIT) {
ImportBoxedStackSlotVisitor boxedStackVisitor(*this, sp, -tree->nativeStackBase, typeMap);
VisitStackSlots(boxedStackVisitor, cx, callDepth);
}
/*
* Remember the import type map so we can lazily import later whatever
* we need.
*/
importTypeMap.set(importStackSlots = stackSlots,
importGlobalSlots = ngslots,
typeMap, globalTypeMap);
}
JS_REQUIRES_STACK bool
TraceRecorder::isValidSlot(JSScope* scope, JSScopeProperty* sprop)
{
uint32 setflags = (js_CodeSpec[*cx->fp->regs->pc].format & (JOF_SET | JOF_INCDEC | JOF_FOR));
if (setflags) {
if (!SPROP_HAS_STUB_SETTER(sprop))
RETURN_VALUE("non-stub setter", false);
if (sprop->attrs & JSPROP_READONLY)
RETURN_VALUE("writing to a read-only property", false);
}
/* This check applies even when setflags == 0. */
if (setflags != JOF_SET && !SPROP_HAS_STUB_GETTER(sprop)) {
JS_ASSERT(!sprop->isMethod());
RETURN_VALUE("non-stub getter", false);
}
if (!SPROP_HAS_VALID_SLOT(sprop, scope))
RETURN_VALUE("slotless obj property", false);
return true;
}
/* Lazily import a global slot if we don't already have it in the tracker. */
JS_REQUIRES_STACK void
TraceRecorder::importGlobalSlot(unsigned slot)
{
JS_ASSERT(slot == uint16(slot));
JS_ASSERT(STOBJ_NSLOTS(globalObj) <= MAX_GLOBAL_SLOTS);
jsval* vp = &STOBJ_GET_SLOT(globalObj, slot);
JS_ASSERT(!known(vp));
/* Add the slot to the list of interned global slots. */
TraceType type;
int index = tree->globalSlots->offsetOf(slot);
if (index == -1) {
type = getCoercedType(*vp);
if (type == TT_INT32 && oracle.isGlobalSlotUndemotable(cx, slot))
type = TT_DOUBLE;
index = (int)tree->globalSlots->length();
tree->globalSlots->add(slot);
tree->typeMap.add(type);
SpecializeTreesToMissingGlobals(cx, globalObj, tree);
JS_ASSERT(tree->nGlobalTypes() == tree->globalSlots->length());
} else {
type = importTypeMap[importStackSlots + index];
JS_ASSERT(type != TT_IGNORE);
}
import(eos_ins, slot * sizeof(double), vp, type, "global", index, NULL);
}
/* Lazily import a global slot if we don't already have it in the tracker. */
JS_REQUIRES_STACK bool
TraceRecorder::lazilyImportGlobalSlot(unsigned slot)
{
if (slot != uint16(slot)) /* we use a table of 16-bit ints, bail out if that's not enough */
return false;
/*
* If the global object grows too large, alloca in ExecuteTree might fail,
* so abort tracing on global objects with unreasonably many slots.
*/
if (STOBJ_NSLOTS(globalObj) > MAX_GLOBAL_SLOTS)
return false;
jsval* vp = &STOBJ_GET_SLOT(globalObj, slot);
if (known(vp))
return true; /* we already have it */
importGlobalSlot(slot);
return true;
}
/* Write back a value onto the stack or global frames. */
LIns*
TraceRecorder::writeBack(LIns* i, LIns* base, ptrdiff_t offset, bool shouldDemote)
{
/*
* Sink all type casts targeting the stack into the side exit by simply storing the original
* (uncasted) value. Each guard generates the side exit map based on the types of the
* last stores to every stack location, so it's safe to not perform them on-trace.
*/
if (shouldDemote && isPromoteInt(i))
i = demote(lir, i);
return lir->insStorei(i, base, offset);
}
/* Update the tracker, then issue a write back store. */
JS_REQUIRES_STACK void
TraceRecorder::set(jsval* p, LIns* i, bool initializing, bool demote)
{
JS_ASSERT(i != NULL);
checkForGlobalObjectReallocation();
tracker.set(p, i);
/*
* If we are writing to this location for the first time, calculate the
* offset into the native frame manually. Otherwise just look up the last
* load or store associated with the same source address (p) and use the
* same offset/base.
*/
LIns* x = nativeFrameTracker.get(p);
if (!x) {
if (isGlobal(p))
x = writeBack(i, eos_ins, nativeGlobalOffset(p), demote);
else
x = writeBack(i, lirbuf->sp, nativespOffset(p), demote);
nativeFrameTracker.set(p, x);
} else {
#if defined NANOJIT_64BIT
JS_ASSERT( x->isop(LIR_stqi) || x->isop(LIR_sti) || x->isop(LIR_stfi));
#else
JS_ASSERT( x->isop(LIR_sti) || x->isop(LIR_stfi));
#endif
int disp;
LIns *base = x->oprnd2();
#ifdef NANOJIT_ARM
if (base->isop(LIR_piadd)) {
disp = base->oprnd2()->imm32();
base = base->oprnd1();
} else
#endif
disp = x->disp();
JS_ASSERT(base == lirbuf->sp || base == eos_ins);
JS_ASSERT(disp == ((base == lirbuf->sp)
? nativespOffset(p)
: nativeGlobalOffset(p)));
writeBack(i, base, disp, demote);
}
}
JS_REQUIRES_STACK LIns*
TraceRecorder::attemptImport(jsval* p)
{
if (LIns* i = getFromTracker(p))
return i;
/* If the variable was not known, it could require a lazy import. */
CountSlotsVisitor countVisitor(p);
VisitStackSlots(countVisitor, cx, callDepth);
if (countVisitor.stopped() || size_t(p - cx->fp->slots) < cx->fp->script->nslots)
return get(p);
return NULL;
}
nanojit::LIns*
TraceRecorder::getFromTracker(jsval* p)
{
checkForGlobalObjectReallocation();
return tracker.get(p);
}
JS_REQUIRES_STACK LIns*
TraceRecorder::get(jsval* p)
{
LIns* x = getFromTracker(p);
if (x)
return x;
if (isGlobal(p)) {
unsigned slot = nativeGlobalSlot(p);
JS_ASSERT(tree->globalSlots->offsetOf(slot) != -1);
importGlobalSlot(slot);
} else {
unsigned slot = nativeStackSlot(p);
TraceType type = importTypeMap[slot];
JS_ASSERT(type != TT_IGNORE);
import(lirbuf->sp, -tree->nativeStackBase + slot * sizeof(jsdouble),
p, type, "stack", slot, cx->fp);
}
JS_ASSERT(known(p));
return tracker.get(p);
}
JS_REQUIRES_STACK LIns*
TraceRecorder::addr(jsval* p)
{
return isGlobal(p)
? lir->ins2(LIR_piadd, eos_ins, INS_CONSTWORD(nativeGlobalOffset(p)))
: lir->ins2(LIR_piadd, lirbuf->sp,
INS_CONSTWORD(nativespOffset(p)));
}
JS_REQUIRES_STACK bool
TraceRecorder::known(jsval* p)
{
checkForGlobalObjectReallocation();
return tracker.has(p);
}
/*
* The dslots of the global object are sometimes reallocated by the interpreter.
* This function check for that condition and re-maps the entries of the tracker
* accordingly.
*/
JS_REQUIRES_STACK void
TraceRecorder::checkForGlobalObjectReallocation()
{
if (global_dslots != globalObj->dslots) {
debug_only_print0(LC_TMTracer,
"globalObj->dslots relocated, updating tracker\n");
jsval* src = global_dslots;
jsval* dst = globalObj->dslots;
jsuint length = globalObj->dslots[-1] - JS_INITIAL_NSLOTS;
LIns** map = (LIns**)alloca(sizeof(LIns*) * length);
for (jsuint n = 0; n < length; ++n) {
map[n] = tracker.get(src);
tracker.set(src++, NULL);
}
for (jsuint n = 0; n < length; ++n)
tracker.set(dst++, map[n]);
global_dslots = globalObj->dslots;
}
}
/* Determine whether the current branch is a loop edge (taken or not taken). */
static JS_REQUIRES_STACK bool
IsLoopEdge(jsbytecode* pc, jsbytecode* header)
{
switch (*pc) {
case JSOP_IFEQ:
case JSOP_IFNE:
return ((pc + GET_JUMP_OFFSET(pc)) == header);
case JSOP_IFEQX:
case JSOP_IFNEX:
return ((pc + GET_JUMPX_OFFSET(pc)) == header);
default:
JS_ASSERT((*pc == JSOP_AND) || (*pc == JSOP_ANDX) ||
(*pc == JSOP_OR) || (*pc == JSOP_ORX));
}
return false;
}
class AdjustCallerGlobalTypesVisitor : public SlotVisitorBase
{
TraceRecorder &mRecorder;
JSContext *mCx;
nanojit::LirBuffer *mLirbuf;
nanojit::LirWriter *mLir;
TraceType *mTypeMap;
public:
AdjustCallerGlobalTypesVisitor(TraceRecorder &recorder,
TraceType *typeMap) :
mRecorder(recorder),
mCx(mRecorder.cx),
mLirbuf(mRecorder.lirbuf),
mLir(mRecorder.lir),
mTypeMap(typeMap)
{}
TraceType* getTypeMap()
{
return mTypeMap;
}
JS_REQUIRES_STACK JS_ALWAYS_INLINE void
visitGlobalSlot(jsval *vp, unsigned n, unsigned slot) {
LIns *ins = mRecorder.get(vp);
bool isPromote = isPromoteInt(ins);
if (isPromote && *mTypeMap == TT_DOUBLE) {
mLir->insStorei(mRecorder.get(vp), mRecorder.eos_ins,
mRecorder.nativeGlobalOffset(vp));
/*
* Aggressively undo speculation so the inner tree will compile
* if this fails.
*/
oracle.markGlobalSlotUndemotable(mCx, slot);
}
JS_ASSERT(!(!isPromote && *mTypeMap == TT_INT32));
++mTypeMap;
}
};
class AdjustCallerStackTypesVisitor : public SlotVisitorBase
{
TraceRecorder &mRecorder;
JSContext *mCx;
nanojit::LirBuffer *mLirbuf;
nanojit::LirWriter *mLir;
unsigned mSlotnum;
TraceType *mTypeMap;
public:
AdjustCallerStackTypesVisitor(TraceRecorder &recorder,
TraceType *typeMap) :
mRecorder(recorder),
mCx(mRecorder.cx),
mLirbuf(mRecorder.lirbuf),
mLir(mRecorder.lir),
mSlotnum(0),
mTypeMap(typeMap)
{}
TraceType* getTypeMap()
{
return mTypeMap;
}
JS_REQUIRES_STACK JS_ALWAYS_INLINE bool
visitStackSlots(jsval *vp, size_t count, JSStackFrame* fp) {
for (size_t i = 0; i < count; ++i) {
LIns *ins = mRecorder.get(vp);
bool isPromote = isPromoteInt(ins);
if (isPromote && *mTypeMap == TT_DOUBLE) {
mLir->insStorei(mRecorder.get(vp), mLirbuf->sp,
mRecorder.nativespOffset(vp));
/*
* Aggressively undo speculation so the inner tree will compile
* if this fails.
*/
oracle.markStackSlotUndemotable(mCx, mSlotnum);
}
JS_ASSERT(!(!isPromote && *mTypeMap == TT_INT32));
++vp;
++mTypeMap;
++mSlotnum;
}
return true;
}
};
/*
* Promote slots if necessary to match the called tree's type map. This
* function is infallible and must only be called if we are certain that it is
* possible to reconcile the types for each slot in the inner and outer trees.
*/
JS_REQUIRES_STACK void
TraceRecorder::adjustCallerTypes(TreeFragment* f)
{
AdjustCallerGlobalTypesVisitor globalVisitor(*this, f->globalTypeMap());
VisitGlobalSlots(globalVisitor, cx, *tree->globalSlots);
AdjustCallerStackTypesVisitor stackVisitor(*this, f->stackTypeMap());
VisitStackSlots(stackVisitor, cx, 0);
JS_ASSERT(f == f->root);
}
JS_REQUIRES_STACK TraceType
TraceRecorder::determineSlotType(jsval* vp)
{
TraceType m;
if (isNumber(*vp)) {
LIns* i = getFromTracker(vp);
if (i) {
m = isPromoteInt(i) ? TT_INT32 : TT_DOUBLE;
} else if (isGlobal(vp)) {
int offset = tree->globalSlots->offsetOf(nativeGlobalSlot(vp));
JS_ASSERT(offset != -1);
m = importTypeMap[importStackSlots + offset];
} else {
m = importTypeMap[nativeStackSlot(vp)];
}
JS_ASSERT(m != TT_IGNORE);
} else if (JSVAL_IS_OBJECT(*vp)) {
if (JSVAL_IS_NULL(*vp))
m = TT_NULL;
else if (HAS_FUNCTION_CLASS(JSVAL_TO_OBJECT(*vp)))
m = TT_FUNCTION;
else
m = TT_OBJECT;
} else {
JS_ASSERT(JSVAL_TAG(*vp) == JSVAL_STRING || JSVAL_IS_SPECIAL(*vp));
JS_STATIC_ASSERT(static_cast<jsvaltag>(TT_STRING) == JSVAL_STRING);
JS_STATIC_ASSERT(static_cast<jsvaltag>(TT_PSEUDOBOOLEAN) == JSVAL_SPECIAL);
m = TraceType(JSVAL_TAG(*vp));
}
JS_ASSERT(m != TT_INT32 || isInt32(*vp));
return m;
}
class DetermineTypesVisitor : public SlotVisitorBase
{
TraceRecorder &mRecorder;
TraceType *mTypeMap;
public:
DetermineTypesVisitor(TraceRecorder &recorder,
TraceType *typeMap) :
mRecorder(recorder),
mTypeMap(typeMap)
{}
JS_REQUIRES_STACK JS_ALWAYS_INLINE void
visitGlobalSlot(jsval *vp, unsigned n, unsigned slot) {
*mTypeMap++ = mRecorder.determineSlotType(vp);
}
JS_REQUIRES_STACK JS_ALWAYS_INLINE bool
visitStackSlots(jsval *vp, size_t count, JSStackFrame* fp) {
for (size_t i = 0; i < count; ++i)
*mTypeMap++ = mRecorder.determineSlotType(vp++);
return true;
}
TraceType* getTypeMap()
{
return mTypeMap;
}
};
#if defined JS_JIT_SPEW
JS_REQUIRES_STACK static void
TreevisLogExit(JSContext* cx, VMSideExit* exit)
{
debug_only_printf(LC_TMTreeVis, "TREEVIS ADDEXIT EXIT=%p TYPE=%s FRAG=%p PC=%p FILE=\"%s\""
" LINE=%d OFFS=%d", (void*)exit, getExitName(exit->exitType),
(void*)exit->from, (void*)cx->fp->regs->pc, cx->fp->script->filename,
js_FramePCToLineNumber(cx, cx->fp), FramePCOffset(cx->fp));
debug_only_print0(LC_TMTreeVis, " STACK=\"");
for (unsigned i = 0; i < exit->numStackSlots; i++)
debug_only_printf(LC_TMTreeVis, "%c", typeChar[exit->stackTypeMap()[i]]);
debug_only_print0(LC_TMTreeVis, "\" GLOBALS=\"");
for (unsigned i = 0; i < exit->numGlobalSlots; i++)
debug_only_printf(LC_TMTreeVis, "%c", typeChar[exit->globalTypeMap()[i]]);
debug_only_print0(LC_TMTreeVis, "\"\n");
}
#endif
JS_REQUIRES_STACK VMSideExit*
TraceRecorder::snapshot(ExitType exitType)
{
JSStackFrame* fp = cx->fp;
JSFrameRegs* regs = fp->regs;
jsbytecode* pc = regs->pc;
/*
* Check for a return-value opcode that needs to restart at the next
* instruction.
*/
const JSCodeSpec& cs = js_CodeSpec[*pc];
/*
* When calling a _FAIL native, make the snapshot's pc point to the next
* instruction after the CALL or APPLY. Even on failure, a _FAIL native
* must not be called again from the interpreter.
*/
bool resumeAfter = (pendingSpecializedNative &&
JSTN_ERRTYPE(pendingSpecializedNative) == FAIL_STATUS);
if (resumeAfter) {
JS_ASSERT(*pc == JSOP_CALL || *pc == JSOP_APPLY || *pc == JSOP_NEW ||
*pc == JSOP_SETPROP || *pc == JSOP_SETNAME);
pc += cs.length;
regs->pc = pc;
MUST_FLOW_THROUGH("restore_pc");
}
/*
* Generate the entry map for the (possibly advanced) pc and stash it in
* the trace.
*/
unsigned stackSlots = NativeStackSlots(cx, callDepth);
/*
* It's sufficient to track the native stack use here since all stores
* above the stack watermark defined by guards are killed.
*/
trackNativeStackUse(stackSlots + 1);
/* Capture the type map into a temporary location. */
unsigned ngslots = tree->globalSlots->length();
unsigned typemap_size = (stackSlots + ngslots) * sizeof(TraceType);
/* Use the recorder-local temporary type map. */
TraceType* typemap = NULL;
if (tempTypeMap.resize(typemap_size))
typemap = tempTypeMap.begin(); /* crash if resize() fails. */
/*
* Determine the type of a store by looking at the current type of the
* actual value the interpreter is using. For numbers we have to check what
* kind of store we used last (integer or double) to figure out what the
* side exit show reflect in its typemap.
*/
DetermineTypesVisitor detVisitor(*this, typemap);
VisitSlots(detVisitor, cx, callDepth, ngslots,
tree->globalSlots->data());
JS_ASSERT(unsigned(detVisitor.getTypeMap() - typemap) ==
ngslots + stackSlots);
/*
* If this snapshot is for a side exit that leaves a boxed jsval result on
* the stack, make a note of this in the typemap. Examples include the
* builtinStatus guard after calling a _FAIL builtin, a JSFastNative, or
* GetPropertyByName; and the type guard in unbox_jsval after such a call
* (also at the beginning of a trace branched from such a type guard).
*/
if (pendingUnboxSlot ||
(pendingSpecializedNative && (pendingSpecializedNative->flags & JSTN_UNBOX_AFTER))) {
unsigned pos = stackSlots - 1;
if (pendingUnboxSlot == cx->fp->regs->sp - 2)
pos = stackSlots - 2;
typemap[pos] = TT_JSVAL;
}
/* Now restore the the original pc (after which early returns are ok). */
if (resumeAfter) {
MUST_FLOW_LABEL(restore_pc);
regs->pc = pc - cs.length;
} else {
/*
* If we take a snapshot on a goto, advance to the target address. This
* avoids inner trees returning on a break goto, which the outer
* recorder then would confuse with a break in the outer tree.
*/
if (*pc == JSOP_GOTO)
pc += GET_JUMP_OFFSET(pc);
else if (*pc == JSOP_GOTOX)
pc += GET_JUMPX_OFFSET(pc);
}
/*
* Check if we already have a matching side exit; if so we can return that
* side exit instead of creating a new one.
*/
VMSideExit** exits = tree->sideExits.data();
unsigned nexits = tree->sideExits.length();
if (exitType == LOOP_EXIT) {
for (unsigned n = 0; n < nexits; ++n) {
VMSideExit* e = exits[n];
if (e->pc == pc && e->imacpc == fp->imacpc &&
ngslots == e->numGlobalSlots &&
!memcmp(exits[n]->fullTypeMap(), typemap, typemap_size)) {
AUDIT(mergedLoopExits);
#if defined JS_JIT_SPEW
TreevisLogExit(cx, e);
#endif
return e;
}
}
}
/* We couldn't find a matching side exit, so create a new one. */
VMSideExit* exit = (VMSideExit*)
traceAlloc().alloc(sizeof(VMSideExit) + (stackSlots + ngslots) * sizeof(TraceType));
/* Setup side exit structure. */
exit->from = fragment;
exit->calldepth = callDepth;
exit->numGlobalSlots = ngslots;
exit->numStackSlots = stackSlots;
exit->numStackSlotsBelowCurrentFrame = cx->fp->argv ?
nativeStackOffset(&cx->fp->argv[-2]) / sizeof(double) :
0;
exit->exitType = exitType;
exit->block = fp->blockChain;
if (fp->blockChain)
tree->gcthings.addUnique(OBJECT_TO_JSVAL(fp->blockChain));
exit->pc = pc;
exit->imacpc = fp->imacpc;
exit->sp_adj = (stackSlots * sizeof(double)) - tree->nativeStackBase;
exit->rp_adj = exit->calldepth * sizeof(FrameInfo*);
exit->nativeCalleeWord = 0;
exit->lookupFlags = js_InferFlags(cx, 0);
memcpy(exit->fullTypeMap(), typemap, typemap_size);
#if defined JS_JIT_SPEW
TreevisLogExit(cx, exit);
#endif
return exit;
}
JS_REQUIRES_STACK GuardRecord*
TraceRecorder::createGuardRecord(VMSideExit* exit)
{
GuardRecord* gr = new (traceAlloc()) GuardRecord();
gr->exit = exit;
exit->addGuard(gr);
// gr->profCount is calloc'd to zero
verbose_only(
gr->profGuardID = fragment->guardNumberer++;
gr->nextInFrag = fragment->guardsForFrag;
fragment->guardsForFrag = gr;
)
return gr;
}
/*
* Emit a guard for condition (cond), expecting to evaluate to boolean result
* (expected) and using the supplied side exit if the conditon doesn't hold.
*/
JS_REQUIRES_STACK void
TraceRecorder::guard(bool expected, LIns* cond, VMSideExit* exit)
{
debug_only_printf(LC_TMRecorder,
" About to try emitting guard code for "
"SideExit=%p exitType=%s\n",
(void*)exit, getExitName(exit->exitType));
GuardRecord* guardRec = createGuardRecord(exit);
if (exit->exitType == LOOP_EXIT)
tree->sideExits.add(exit);
if (!cond->isCmp()) {
expected = !expected;
cond = cond->isI32() ? lir->ins_eq0(cond) : lir->ins_peq0(cond);
}
LIns* guardIns =
lir->insGuard(expected ? LIR_xf : LIR_xt, cond, guardRec);
if (!guardIns) {
debug_only_print0(LC_TMRecorder,
" redundant guard, eliminated, no codegen\n");
}
}
/*
* Emit a guard a 32-bit integer arithmetic operation op(d0, d1) and
* using the supplied side exit if it overflows.
*/
JS_REQUIRES_STACK LIns*
TraceRecorder::guard_xov(LOpcode op, LIns* d0, LIns* d1, VMSideExit* exit)
{
debug_only_printf(LC_TMRecorder,
" About to try emitting guard_xov code for "
"SideExit=%p exitType=%s\n",
(void*)exit, getExitName(exit->exitType));
GuardRecord* guardRec = createGuardRecord(exit);
JS_ASSERT(exit->exitType == OVERFLOW_EXIT);
switch (op) {
case LIR_add:
op = LIR_addxov;
break;
case LIR_sub:
op = LIR_subxov;
break;
case LIR_mul:
op = LIR_mulxov;
break;
default:
JS_NOT_REACHED("unexpected comparison op");
break;
}
LIns* guardIns = lir->insGuardXov(op, d0, d1, guardRec);
NanoAssert(guardIns);
return guardIns;
}
JS_REQUIRES_STACK VMSideExit*
TraceRecorder::copy(VMSideExit* copy)
{
size_t typemap_size = copy->numGlobalSlots + copy->numStackSlots;
VMSideExit* exit = (VMSideExit*)
traceAlloc().alloc(sizeof(VMSideExit) + typemap_size * sizeof(TraceType));
/* Copy side exit structure. */
memcpy(exit, copy, sizeof(VMSideExit) + typemap_size * sizeof(TraceType));
exit->guards = NULL;
exit->from = fragment;
exit->target = NULL;
if (exit->exitType == LOOP_EXIT)
tree->sideExits.add(exit);
#if defined JS_JIT_SPEW
TreevisLogExit(cx, exit);
#endif
return exit;
}
/*
* Emit a guard for condition (cond), expecting to evaluate to boolean result
* (expected) and generate a side exit with type exitType to jump to if the
* condition does not hold.
*/
JS_REQUIRES_STACK void
TraceRecorder::guard(bool expected, LIns* cond, ExitType exitType)
{
guard(expected, cond, snapshot(exitType));
}
/*
* Determine whether any context associated with the same thread as cx is
* executing native code.
*/
static inline bool
ProhibitFlush(JSContext* cx)
{
if (cx->interpState) // early out if the given is in native code
return true;
JSCList *cl;
#ifdef JS_THREADSAFE
JSThread* thread = cx->thread;
for (cl = thread->contextList.next; cl != &thread->contextList; cl = cl->next)
if (CX_FROM_THREAD_LINKS(cl)->interpState)
return true;
#else
JSRuntime* rt = cx->runtime;
for (cl = rt->contextList.next; cl != &rt->contextList; cl = cl->next)
if (js_ContextFromLinkField(cl)->interpState)
return true;
#endif
return false;
}
static void
ResetJITImpl(JSContext* cx)
{
if (!TRACING_ENABLED(cx))
return;
TraceMonitor* tm = &JS_TRACE_MONITOR(cx);
debug_only_print0(LC_TMTracer, "Flushing cache.\n");
if (tm->recorder) {
JS_ASSERT_NOT_ON_TRACE(cx);
AbortRecording(cx, "flush cache");
}
if (ProhibitFlush(cx)) {
debug_only_print0(LC_TMTracer, "Deferring JIT flush due to deep bail.\n");
tm->needFlush = JS_TRUE;
return;
}
tm->flush();
}
/* Compile the current fragment. */
JS_REQUIRES_STACK AbortableRecordingStatus
TraceRecorder::compile()
{
#ifdef MOZ_TRACEVIS
TraceVisStateObj tvso(cx, S_COMPILE);
#endif
if (traceMonitor->needFlush) {
ResetJIT(cx, FR_DEEP_BAIL);
return ARECORD_ABORTED;
}
if (tree->maxNativeStackSlots >= MAX_NATIVE_STACK_SLOTS) {
debug_only_print0(LC_TMTracer, "Blacklist: excessive stack use.\n");
Blacklist((jsbytecode*) fragment->root->ip);
return ARECORD_STOP;
}
if (anchor && anchor->exitType != CASE_EXIT)
++fragment->root->branchCount;
if (outOfMemory())
return ARECORD_STOP;
/* :TODO: windows support */
#if defined DEBUG && !defined WIN32
/* Associate a filename and line number with the fragment. */
const char* filename = cx->fp->script->filename;
char* label = (char*)js_malloc((filename ? strlen(filename) : 7) + 16);
sprintf(label, "%s:%u", filename ? filename : "<stdin>",
js_FramePCToLineNumber(cx, cx->fp));
lirbuf->names->labels->add(fragment, sizeof(Fragment), 0, label);
js_free(label);
#endif
Assembler *assm = traceMonitor->assembler;
JS_ASSERT(assm->error() == nanojit::None);
assm->compile(fragment, tempAlloc(), /*optimize*/true
verbose_only(, lirbuf->names->labels));
if (assm->error() != nanojit::None) {
assm->setError(nanojit::None);
debug_only_print0(LC_TMTracer, "Blacklisted: error during compilation\n");
Blacklist((jsbytecode*) fragment->root->ip);
return ARECORD_STOP;
}
if (outOfMemory())
return ARECORD_STOP;
ResetRecordingAttempts(cx, (jsbytecode*) fragment->ip);
ResetRecordingAttempts(cx, (jsbytecode*) fragment->root->ip);
if (anchor) {
#ifdef NANOJIT_IA32
if (anchor->exitType == CASE_EXIT)
assm->patch(anchor, anchor->switchInfo);
else
#endif
assm->patch(anchor);
}
JS_ASSERT(fragment->code());
JS_ASSERT_IF(fragment == fragment->root, fragment->root == tree);
return ARECORD_CONTINUE;
}
static void
JoinPeers(Assembler* assm, VMSideExit* exit, TreeFragment* target)
{
exit->target = target;
assm->patch(exit);
debug_only_printf(LC_TMTreeVis, "TREEVIS JOIN ANCHOR=%p FRAG=%p\n", (void*)exit, (void*)target);
if (exit->root() == target)
return;
target->dependentTrees.addUnique(exit->root());
exit->root()->linkedTrees.addUnique(target);
}
/* Results of trying to connect an arbitrary type A with arbitrary type B */
enum TypeCheckResult
{
TypeCheck_Okay, /* Okay: same type */
TypeCheck_Promote, /* Okay: Type A needs f2i() */
TypeCheck_Demote, /* Okay: Type A needs i2f() */
TypeCheck_Undemote, /* Bad: Slot is undemotable */
TypeCheck_Bad /* Bad: incompatible types */
};
class SlotMap : public SlotVisitorBase
{
public:
struct SlotInfo
{
SlotInfo()
: vp(NULL), promoteInt(false), lastCheck(TypeCheck_Bad)
{}
SlotInfo(jsval* vp, bool promoteInt)
: vp(vp), promoteInt(promoteInt), lastCheck(TypeCheck_Bad), type(getCoercedType(*vp))
{}
SlotInfo(jsval* vp, TraceType t)
: vp(vp), promoteInt(t == TT_INT32), lastCheck(TypeCheck_Bad), type(t)
{}
jsval *vp;
bool promoteInt;
TypeCheckResult lastCheck;
TraceType type;
};
SlotMap(TraceRecorder& rec)
: mRecorder(rec),
mCx(rec.cx),
slots(NULL)
{
}