Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Fetching contributors…

Cannot retrieve contributors at this time

10194 lines (8298 sloc) 265.065 kb
/* Copyright (c) 2008-2011, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#include "machine.h"
#include "util.h"
#include "vector.h"
#include "process.h"
#include "assembler.h"
#include "target.h"
#include "compiler.h"
#include "arch.h"
using namespace vm;
extern "C" uint64_t
vmInvoke(void* thread, void* function, void* arguments,
unsigned argumentFootprint, unsigned frameSize, unsigned returnType);
extern "C" void
vmInvoke_returnAddress();
extern "C" void
vmInvoke_safeStack();
extern "C" void
vmJumpAndInvoke(void* thread, void* function, void* stack,
unsigned argumentFootprint, uintptr_t* arguments,
unsigned frameSize);
namespace {
namespace local {
const bool DebugCompile = false;
const bool DebugNatives = false;
const bool DebugCallTable = false;
const bool DebugMethodTree = false;
const bool DebugFrameMaps = false;
const bool DebugIntrinsics = false;
const bool CheckArrayBounds = true;
#ifdef AVIAN_CONTINUATIONS
const bool Continuations = true;
#else
const bool Continuations = false;
#endif
const unsigned MaxNativeCallFootprint = TargetBytesPerWord == 8 ? 4 : 5;
const unsigned InitialZoneCapacityInBytes = 64 * 1024;
const unsigned ExecutableAreaSizeInBytes = 30 * 1024 * 1024;
enum Root {
CallTable,
MethodTree,
MethodTreeSentinal,
ObjectPools,
StaticTableArray,
VirtualThunks,
ReceiveMethod,
WindMethod,
RewindMethod
};
enum ThunkIndex {
compileMethodIndex,
compileVirtualMethodIndex,
invokeNativeIndex,
throwArrayIndexOutOfBoundsIndex,
throwStackOverflowIndex,
#define THUNK(s) s##Index,
#include "thunks.cpp"
#undef THUNK
dummyIndex
};
const unsigned RootCount = RewindMethod + 1;
inline bool
isVmInvokeUnsafeStack(void* ip)
{
return reinterpret_cast<uintptr_t>(ip)
>= reinterpret_cast<uintptr_t>(voidPointer(vmInvoke_returnAddress))
and reinterpret_cast<uintptr_t>(ip)
< reinterpret_cast<uintptr_t> (voidPointer(vmInvoke_safeStack));
}
class MyThread;
void*
getIp(MyThread*);
class MyThread: public Thread {
public:
class CallTrace {
public:
CallTrace(MyThread* t, object method):
t(t),
ip(getIp(t)),
stack(t->stack),
scratch(t->scratch),
continuation(t->continuation),
nativeMethod((methodFlags(t, method) & ACC_NATIVE) ? method : 0),
targetMethod(0),
originalMethod(method),
next(t->trace)
{
doTransition(t, 0, 0, 0, this);
}
~CallTrace() {
assert(t, t->stack == 0);
t->scratch = scratch;
doTransition(t, ip, stack, continuation, next);
}
MyThread* t;
void* ip;
void* stack;
void* scratch;
object continuation;
object nativeMethod;
object targetMethod;
object originalMethod;
CallTrace* next;
};
class Context {
public:
class MyProtector: public Thread::Protector {
public:
MyProtector(MyThread* t, Context* context):
Protector(t), context(context)
{ }
virtual void visit(Heap::Visitor* v) {
v->visit(&(context->continuation));
}
Context* context;
};
Context(MyThread* t, void* ip, void* stack, object continuation,
CallTrace* trace):
ip(ip),
stack(stack),
continuation(continuation),
trace(trace),
protector(t, this)
{ }
void* ip;
void* stack;
object continuation;
CallTrace* trace;
MyProtector protector;
};
class TraceContext: public Context {
public:
TraceContext(MyThread* t, void* ip, void* stack, object continuation,
CallTrace* trace):
Context(t, ip, stack, continuation, trace),
t(t),
link(0),
next(t->traceContext),
methodIsMostRecent(false)
{
t->traceContext = this;
}
TraceContext(MyThread* t, void* link):
Context(t, t->ip, t->stack, t->continuation, t->trace),
t(t),
link(link),
next(t->traceContext),
methodIsMostRecent(false)
{
t->traceContext = this;
}
~TraceContext() {
t->traceContext = next;
}
MyThread* t;
void* link;
TraceContext* next;
bool methodIsMostRecent;
};
static void doTransition(MyThread* t, void* ip, void* stack,
object continuation, MyThread::CallTrace* trace)
{
// in this function, we "atomically" update the thread context
// fields in such a way to ensure that another thread may
// interrupt us at any time and still get a consistent, accurate
// stack trace. See MyProcessor::getStackTrace for details.
assert(t, t->transition == 0);
Context c(t, ip, stack, continuation, trace);
compileTimeMemoryBarrier();
t->transition = &c;
compileTimeMemoryBarrier();
t->ip = ip;
t->stack = stack;
t->continuation = continuation;
t->trace = trace;
compileTimeMemoryBarrier();
t->transition = 0;
}
MyThread(Machine* m, object javaThread, MyThread* parent,
bool useNativeFeatures):
Thread(m, javaThread, parent),
ip(0),
stack(0),
scratch(0),
continuation(0),
exceptionStackAdjustment(0),
exceptionOffset(0),
exceptionHandler(0),
tailAddress(0),
virtualCallTarget(0),
virtualCallIndex(0),
heapImage(0),
codeImage(0),
thunkTable(0),
trace(0),
reference(0),
arch(parent
? parent->arch
: makeArchitecture(m->system, useNativeFeatures)),
transition(0),
traceContext(0),
stackLimit(0),
methodLockIsClean(true)
{
arch->acquire();
}
void* ip;
void* stack;
void* scratch;
object continuation;
uintptr_t exceptionStackAdjustment;
uintptr_t exceptionOffset;
void* exceptionHandler;
void* tailAddress;
void* virtualCallTarget;
uintptr_t virtualCallIndex;
uintptr_t* heapImage;
uint8_t* codeImage;
void** thunkTable;
CallTrace* trace;
Reference* reference;
Assembler::Architecture* arch;
Context* transition;
TraceContext* traceContext;
uintptr_t stackLimit;
bool methodLockIsClean;
};
void
transition(MyThread* t, void* ip, void* stack, object continuation,
MyThread::CallTrace* trace)
{
MyThread::doTransition(t, ip, stack, continuation, trace);
}
unsigned
parameterOffset(MyThread* t, object method)
{
return methodParameterFootprint(t, method)
+ t->arch->frameFooterSize()
+ t->arch->frameReturnAddressSize() - 1;
}
object
resolveThisPointer(MyThread* t, void* stack)
{
return reinterpret_cast<object*>(stack)
[t->arch->frameFooterSize() + t->arch->frameReturnAddressSize()];
}
object
findMethod(Thread* t, object method, object instance)
{
if ((methodFlags(t, method) & ACC_STATIC) == 0) {
if (classFlags(t, methodClass(t, method)) & ACC_INTERFACE) {
return findInterfaceMethod(t, method, objectClass(t, instance));
} else if (methodVirtual(t, method)) {
return findVirtualMethod(t, method, objectClass(t, instance));
}
}
return method;
}
object
resolveTarget(MyThread* t, void* stack, object method)
{
object class_ = objectClass(t, resolveThisPointer(t, stack));
if (classVmFlags(t, class_) & BootstrapFlag) {
PROTECT(t, method);
PROTECT(t, class_);
resolveSystemClass(t, root(t, Machine::BootLoader), className(t, class_));
}
if (classFlags(t, methodClass(t, method)) & ACC_INTERFACE) {
return findInterfaceMethod(t, method, class_);
} else {
return findVirtualMethod(t, method, class_);
}
}
object
resolveTarget(MyThread* t, object class_, unsigned index)
{
if (classVmFlags(t, class_) & BootstrapFlag) {
PROTECT(t, class_);
resolveSystemClass(t, root(t, Machine::BootLoader), className(t, class_));
}
return arrayBody(t, classVirtualTable(t, class_), index);
}
object&
root(Thread* t, Root root);
void
setRoot(Thread* t, Root root, object value);
intptr_t
methodCompiled(Thread* t, object method)
{
return codeCompiled(t, methodCode(t, method));
}
unsigned
methodCompiledSize(Thread* t, object method)
{
return codeCompiledSize(t, methodCode(t, method));
}
intptr_t
compareIpToMethodBounds(Thread* t, intptr_t ip, object method)
{
intptr_t start = methodCompiled(t, method);
if (DebugMethodTree) {
fprintf(stderr, "find %p in (%p,%p)\n",
reinterpret_cast<void*>(ip),
reinterpret_cast<void*>(start),
reinterpret_cast<void*>(start + methodCompiledSize(t, method)));
}
if (ip < start) {
return -1;
} else if (ip < start + static_cast<intptr_t>
(methodCompiledSize(t, method)))
{
return 0;
} else {
return 1;
}
}
object
methodForIp(MyThread* t, void* ip)
{
if (DebugMethodTree) {
fprintf(stderr, "query for method containing %p\n", ip);
}
// we must use a version of the method tree at least as recent as the
// compiled form of the method containing the specified address (see
// compile(MyThread*, FixedAllocator*, BootContext*, object)):
loadMemoryBarrier();
return treeQuery(t, root(t, MethodTree), reinterpret_cast<intptr_t>(ip),
root(t, MethodTreeSentinal), compareIpToMethodBounds);
}
unsigned
localSize(MyThread* t, object method)
{
unsigned size = codeMaxLocals(t, methodCode(t, method));
if ((methodFlags(t, method) & (ACC_SYNCHRONIZED | ACC_STATIC))
== ACC_SYNCHRONIZED)
{
++ size;
}
return size;
}
unsigned
alignedFrameSize(MyThread* t, object method)
{
return t->arch->alignFrameSize
(localSize(t, method)
- methodParameterFootprint(t, method)
+ codeMaxStack(t, methodCode(t, method))
+ t->arch->frameFootprint(MaxNativeCallFootprint));
}
void
nextFrame(MyThread* t, void** ip, void** sp, object method, object target,
bool mostRecent)
{
object code = methodCode(t, method);
intptr_t start = codeCompiled(t, code);
void* link;
bool methodIsMostRecent;
if (t->traceContext) {
link = t->traceContext->link;
methodIsMostRecent = mostRecent and t->traceContext->methodIsMostRecent;
} else {
link = 0;
methodIsMostRecent = false;
}
// fprintf(stderr, "nextFrame %s.%s%s target %s.%s%s ip %p sp %p\n",
// &byteArrayBody(t, className(t, methodClass(t, method)), 0),
// &byteArrayBody(t, methodName(t, method), 0),
// &byteArrayBody(t, methodSpec(t, method), 0),
// target
// ? &byteArrayBody(t, className(t, methodClass(t, target)), 0)
// : 0,
// target
// ? &byteArrayBody(t, methodName(t, target), 0)
// : 0,
// target
// ? &byteArrayBody(t, methodSpec(t, target), 0)
// : 0,
// *ip, *sp);
t->arch->nextFrame
(reinterpret_cast<void*>(start), codeCompiledSize(t, code),
alignedFrameSize(t, method), link, methodIsMostRecent,
target ? methodParameterFootprint(t, target) : -1, ip, sp);
// fprintf(stderr, "next frame ip %p sp %p\n", *ip, *sp);
}
void*
getIp(MyThread* t, void* ip, void* stack)
{
// Here we use the convention that, if the return address is neither
// pushed on to the stack automatically as part of the call nor
// stored in the caller's frame, it will be saved in MyThread::ip
// instead of on the stack. See the various implementations of
// Assembler::saveFrame for details on how this is done.
return t->arch->returnAddressOffset() < 0 ? ip : t->arch->frameIp(stack);
}
void*
getIp(MyThread* t)
{
return getIp(t, t->ip, t->stack);
}
class MyStackWalker: public Processor::StackWalker {
public:
enum State {
Start,
Next,
Trace,
Continuation,
Method,
NativeMethod,
Finish
};
class MyProtector: public Thread::Protector {
public:
MyProtector(MyStackWalker* walker):
Protector(walker->t), walker(walker)
{ }
virtual void visit(Heap::Visitor* v) {
v->visit(&(walker->method_));
v->visit(&(walker->target));
v->visit(&(walker->continuation));
}
MyStackWalker* walker;
};
MyStackWalker(MyThread* t):
t(t),
state(Start),
method_(0),
target(0),
count_(0),
protector(this)
{
if (t->traceContext) {
ip_ = t->traceContext->ip;
stack = t->traceContext->stack;
trace = t->traceContext->trace;
continuation = t->traceContext->continuation;
} else {
ip_ = getIp(t);
stack = t->stack;
trace = t->trace;
continuation = t->continuation;
}
}
MyStackWalker(MyStackWalker* w):
t(w->t),
state(w->state),
ip_(w->ip_),
stack(w->stack),
trace(w->trace),
method_(w->method_),
target(w->target),
continuation(w->continuation),
count_(0),
protector(this)
{ }
virtual void walk(Processor::StackVisitor* v) {
for (MyStackWalker it(this); it.valid();) {
MyStackWalker walker(&it);
if (not v->visit(&walker)) {
break;
}
it.next();
}
}
bool valid() {
while (true) {
// fprintf(stderr, "state: %d\n", state);
switch (state) {
case Start:
if (trace and trace->nativeMethod) {
method_ = trace->nativeMethod;
state = NativeMethod;
} else {
state = Next;
}
break;
case Next:
if (stack) {
target = method_;
method_ = methodForIp(t, ip_);
if (method_) {
state = Method;
} else if (continuation) {
method_ = continuationMethod(t, continuation);
state = Continuation;
} else {
state = Trace;
}
} else {
state = Trace;
}
break;
case Trace: {
if (trace) {
continuation = trace->continuation;
stack = trace->stack;
ip_ = trace->ip;
trace = trace->next;
state = Start;
} else {
state = Finish;
}
} break;
case Continuation:
case Method:
case NativeMethod:
return true;
case Finish:
return false;
default:
abort(t);
}
}
}
void next() {
expect(t, count_ <= StackSizeInWords);
switch (state) {
case Continuation:
continuation = continuationNext(t, continuation);
break;
case Method:
nextFrame(t, &ip_, &stack, method_, target, count_ == 0);
break;
case NativeMethod:
break;
default:
abort(t);
}
++ count_;
state = Next;
}
virtual object method() {
// fprintf(stderr, "method %s.%s\n", &byteArrayBody
// (t, className(t, methodClass(t, method_)), 0),
// &byteArrayBody(t, methodName(t, method_), 0));
return method_;
}
virtual int ip() {
switch (state) {
case Continuation:
return reinterpret_cast<intptr_t>(continuationAddress(t, continuation))
- methodCompiled(t, continuationMethod(t, continuation));
case Method:
return reinterpret_cast<intptr_t>(ip_) - methodCompiled(t, method_);
case NativeMethod:
return 0;
default:
abort(t);
}
}
virtual unsigned count() {
unsigned count = 0;
for (MyStackWalker walker(this); walker.valid();) {
walker.next();
++ count;
}
return count;
}
MyThread* t;
State state;
void* ip_;
void* stack;
MyThread::CallTrace* trace;
object method_;
object target;
object continuation;
unsigned count_;
MyProtector protector;
};
int
localOffset(MyThread* t, int v, object method)
{
int parameterFootprint = methodParameterFootprint(t, method);
int frameSize = alignedFrameSize(t, method);
int offset = ((v < parameterFootprint) ?
(frameSize
+ parameterFootprint
+ t->arch->frameFooterSize()
+ t->arch->frameHeaderSize()
- v - 1) :
(frameSize
+ parameterFootprint
- v - 1));
assert(t, offset >= 0);
return offset;
}
int
localOffsetFromStack(MyThread* t, int index, object method)
{
return localOffset(t, index, method)
+ t->arch->frameReturnAddressSize();
}
object*
localObject(MyThread* t, void* stack, object method, unsigned index)
{
return static_cast<object*>(stack) + localOffsetFromStack(t, index, method);
}
int
stackOffsetFromFrame(MyThread* t, object method)
{
return alignedFrameSize(t, method) + t->arch->frameHeaderSize();
}
void*
stackForFrame(MyThread* t, void* frame, object method)
{
return static_cast<void**>(frame) - stackOffsetFromFrame(t, method);
}
class PoolElement: public Promise {
public:
PoolElement(Thread* t, object target, PoolElement* next):
t(t), target(target), address(0), next(next)
{ }
virtual int64_t value() {
assert(t, resolved());
return address;
}
virtual bool resolved() {
return address != 0;
}
Thread* t;
object target;
intptr_t address;
PoolElement* next;
};
class Context;
class SubroutineCall;
class Subroutine {
public:
Subroutine(unsigned ip, unsigned logIndex, Subroutine* listNext,
Subroutine* stackNext):
listNext(listNext),
stackNext(stackNext),
calls(0),
handle(0),
ip(ip),
logIndex(logIndex),
stackIndex(0),
callCount(0),
tableIndex(0),
visited(false)
{ }
Subroutine* listNext;
Subroutine* stackNext;
SubroutineCall* calls;
Compiler::Subroutine* handle;
unsigned ip;
unsigned logIndex;
unsigned stackIndex;
unsigned callCount;
unsigned tableIndex;
bool visited;
};
class SubroutinePath;
class SubroutineCall {
public:
SubroutineCall(Subroutine* subroutine, Promise* returnAddress):
subroutine(subroutine),
returnAddress(returnAddress),
paths(0),
next(subroutine->calls)
{
subroutine->calls = this;
++ subroutine->callCount;
}
Subroutine* subroutine;
Promise* returnAddress;
SubroutinePath* paths;
SubroutineCall* next;
};
class SubroutinePath {
public:
SubroutinePath(SubroutineCall* call, SubroutinePath* stackNext,
uintptr_t* rootTable):
call(call),
stackNext(stackNext),
listNext(call->paths),
rootTable(rootTable)
{
call->paths = this;
}
SubroutineCall* call;
SubroutinePath* stackNext;
SubroutinePath* listNext;
uintptr_t* rootTable;
};
void
print(SubroutinePath* path)
{
if (path) {
fprintf(stderr, " (");
while (true) {
fprintf(stderr, "%p", path->call->returnAddress->resolved() ?
reinterpret_cast<void*>(path->call->returnAddress->value()) : 0);
path = path->stackNext;
if (path) {
fprintf(stderr, ", ");
} else {
break;
}
}
fprintf(stderr, ")");
}
}
class SubroutineTrace {
public:
SubroutineTrace(SubroutinePath* path, SubroutineTrace* next,
unsigned mapSize):
path(path),
next(next),
watch(false)
{
memset(map, 0, mapSize * BytesPerWord);
}
SubroutinePath* path;
SubroutineTrace* next;
bool watch;
uintptr_t map[0];
};
class TraceElement: public TraceHandler {
public:
static const unsigned VirtualCall = 1 << 0;
static const unsigned TailCall = 1 << 1;
static const unsigned LongCall = 1 << 2;
TraceElement(Context* context, unsigned ip, object target, unsigned flags,
TraceElement* next, unsigned mapSize):
context(context),
address(0),
next(next),
subroutineTrace(0),
target(target),
ip(ip),
subroutineTraceCount(0),
argumentIndex(0),
flags(flags),
watch(false)
{
memset(map, 0, mapSize * BytesPerWord);
}
virtual void handleTrace(Promise* address, unsigned argumentIndex) {
if (this->address == 0) {
this->address = address;
this->argumentIndex = argumentIndex;
}
}
Context* context;
Promise* address;
TraceElement* next;
SubroutineTrace* subroutineTrace;
object target;
unsigned ip;
unsigned subroutineTraceCount;
unsigned argumentIndex;
unsigned flags;
bool watch;
uintptr_t map[0];
};
class TraceElementPromise: public Promise {
public:
TraceElementPromise(System* s, TraceElement* trace): s(s), trace(trace) { }
virtual int64_t value() {
assert(s, resolved());
return trace->address->value();
}
virtual bool resolved() {
return trace->address != 0 and trace->address->resolved();
}
System* s;
TraceElement* trace;
};
enum Event {
PushContextEvent,
PopContextEvent,
IpEvent,
MarkEvent,
ClearEvent,
PushExceptionHandlerEvent,
TraceEvent,
PushSubroutineEvent,
PopSubroutineEvent
};
unsigned
frameMapSizeInBits(MyThread* t, object method)
{
return localSize(t, method) + codeMaxStack(t, methodCode(t, method));
}
unsigned
frameMapSizeInWords(MyThread* t, object method)
{
return ceiling(frameMapSizeInBits(t, method), BitsPerWord);
}
uint16_t*
makeVisitTable(MyThread* t, Zone* zone, object method)
{
unsigned size = codeLength(t, methodCode(t, method)) * 2;
uint16_t* table = static_cast<uint16_t*>(zone->allocate(size));
memset(table, 0, size);
return table;
}
uintptr_t*
makeRootTable(MyThread* t, Zone* zone, object method)
{
unsigned size = frameMapSizeInWords(t, method)
* codeLength(t, methodCode(t, method))
* BytesPerWord;
uintptr_t* table = static_cast<uintptr_t*>(zone->allocate(size));
memset(table, 0xFF, size);
return table;
}
enum Thunk {
#define THUNK(s) s##Thunk,
#include "thunks.cpp"
#undef THUNK
};
const unsigned ThunkCount = gcIfNecessaryThunk + 1;
intptr_t
getThunk(MyThread* t, Thunk thunk);
class BootContext {
public:
class MyProtector: public Thread::Protector {
public:
MyProtector(Thread* t, BootContext* c): Protector(t), c(c) { }
virtual void visit(Heap::Visitor* v) {
v->visit(&(c->constants));
v->visit(&(c->calls));
}
BootContext* c;
};
BootContext(Thread* t, object constants, object calls,
DelayedPromise* addresses, Zone* zone, OffsetResolver* resolver):
protector(t, this), constants(constants), calls(calls),
addresses(addresses), addressSentinal(addresses), zone(zone),
resolver(resolver)
{ }
MyProtector protector;
object constants;
object calls;
DelayedPromise* addresses;
DelayedPromise* addressSentinal;
Zone* zone;
OffsetResolver* resolver;
};
class Context {
public:
class MyResource: public Thread::Resource {
public:
MyResource(Context* c): Resource(c->thread), c(c) { }
virtual void release() {
c->dispose();
}
Context* c;
};
class MyProtector: public Thread::Protector {
public:
MyProtector(Context* c): Protector(c->thread), c(c) { }
virtual void visit(Heap::Visitor* v) {
v->visit(&(c->method));
for (PoolElement* p = c->objectPool; p; p = p->next) {
v->visit(&(p->target));
}
for (TraceElement* p = c->traceLog; p; p = p->next) {
v->visit(&(p->target));
}
}
Context* c;
};
class MyClient: public Compiler::Client {
public:
MyClient(MyThread* t): t(t) { }
virtual intptr_t getThunk(UnaryOperation, unsigned) {
abort(t);
}
virtual intptr_t getThunk(BinaryOperation op, unsigned size,
unsigned resultSize)
{
if (size == 8) {
switch(op) {
case Absolute:
assert(t, resultSize == 8);
return local::getThunk(t, absoluteLongThunk);
case FloatNegate:
assert(t, resultSize == 8);
return local::getThunk(t, negateDoubleThunk);
case FloatSquareRoot:
assert(t, resultSize == 8);
return local::getThunk(t, squareRootDoubleThunk);
case Float2Float:
assert(t, resultSize == 4);
return local::getThunk(t, doubleToFloatThunk);
case Float2Int:
if (resultSize == 8) {
return local::getThunk(t, doubleToLongThunk);
} else {
assert(t, resultSize == 4);
return local::getThunk(t, doubleToIntThunk);
}
case Int2Float:
if (resultSize == 8) {
return local::getThunk(t, longToDoubleThunk);
} else {
assert(t, resultSize == 4);
return local::getThunk(t, longToFloatThunk);
}
default: abort(t);
}
} else {
assert(t, size == 4);
switch(op) {
case Absolute:
assert(t, resultSize == 4);
return local::getThunk(t, absoluteIntThunk);
case FloatNegate:
assert(t, resultSize == 4);
return local::getThunk(t, negateFloatThunk);
case FloatAbsolute:
assert(t, resultSize == 4);
return local::getThunk(t, absoluteFloatThunk);
case Float2Float:
assert(t, resultSize == 8);
return local::getThunk(t, floatToDoubleThunk);
case Float2Int:
if (resultSize == 4) {
return local::getThunk(t, floatToIntThunk);
} else {
assert(t, resultSize == 8);
return local::getThunk(t, floatToLongThunk);
}
case Int2Float:
if (resultSize == 4) {
return local::getThunk(t, intToFloatThunk);
} else {
assert(t, resultSize == 8);
return local::getThunk(t, intToDoubleThunk);
}
default: abort(t);
}
}
}
virtual intptr_t getThunk(TernaryOperation op, unsigned size, unsigned,
bool* threadParameter)
{
*threadParameter = false;
if (size == 8) {
switch (op) {
case Divide:
*threadParameter = true;
return local::getThunk(t, divideLongThunk);
case Remainder:
*threadParameter = true;
return local::getThunk(t, moduloLongThunk);
case FloatAdd:
return local::getThunk(t, addDoubleThunk);
case FloatSubtract:
return local::getThunk(t, subtractDoubleThunk);
case FloatMultiply:
return local::getThunk(t, multiplyDoubleThunk);
case FloatDivide:
return local::getThunk(t, divideDoubleThunk);
case FloatRemainder:
return local::getThunk(t, moduloDoubleThunk);
case JumpIfFloatEqual:
case JumpIfFloatNotEqual:
case JumpIfFloatLess:
case JumpIfFloatGreater:
case JumpIfFloatLessOrEqual:
case JumpIfFloatGreaterOrUnordered:
case JumpIfFloatGreaterOrEqualOrUnordered:
return local::getThunk(t, compareDoublesGThunk);
case JumpIfFloatGreaterOrEqual:
case JumpIfFloatLessOrUnordered:
case JumpIfFloatLessOrEqualOrUnordered:
return local::getThunk(t, compareDoublesLThunk);
default: abort(t);
}
} else {
assert(t, size == 4);
switch (op) {
case Divide:
*threadParameter = true;
return local::getThunk(t, divideIntThunk);
case Remainder:
*threadParameter = true;
return local::getThunk(t, moduloIntThunk);
case FloatAdd:
return local::getThunk(t, addFloatThunk);
case FloatSubtract:
return local::getThunk(t, subtractFloatThunk);
case FloatMultiply:
return local::getThunk(t, multiplyFloatThunk);
case FloatDivide:
return local::getThunk(t, divideFloatThunk);
case FloatRemainder:
return local::getThunk(t, moduloFloatThunk);
case JumpIfFloatEqual:
case JumpIfFloatNotEqual:
case JumpIfFloatLess:
case JumpIfFloatGreater:
case JumpIfFloatLessOrEqual:
case JumpIfFloatGreaterOrUnordered:
case JumpIfFloatGreaterOrEqualOrUnordered:
return local::getThunk(t, compareFloatsGThunk);
case JumpIfFloatGreaterOrEqual:
case JumpIfFloatLessOrUnordered:
case JumpIfFloatLessOrEqualOrUnordered:
return local::getThunk(t, compareFloatsLThunk);
default: abort(t);
}
}
}
MyThread* t;
};
Context(MyThread* t, BootContext* bootContext, object method):
thread(t),
zone(t->m->system, t->m->heap, InitialZoneCapacityInBytes),
assembler(makeAssembler(t->m->system, t->m->heap, &zone, t->arch)),
client(t),
compiler(makeCompiler(t->m->system, assembler, &zone, &client)),
method(method),
bootContext(bootContext),
objectPool(0),
subroutines(0),
traceLog(0),
visitTable(makeVisitTable(t, &zone, method)),
rootTable(makeRootTable(t, &zone, method)),
subroutineTable(0),
executableAllocator(0),
executableStart(0),
executableSize(0),
objectPoolCount(0),
traceLogCount(0),
dirtyRoots(false),
leaf(true),
eventLog(t->m->system, t->m->heap, 1024),
protector(this),
resource(this)
{ }
Context(MyThread* t):
thread(t),
zone(t->m->system, t->m->heap, InitialZoneCapacityInBytes),
assembler(makeAssembler(t->m->system, t->m->heap, &zone, t->arch)),
client(t),
compiler(0),
method(0),
bootContext(0),
objectPool(0),
subroutines(0),
traceLog(0),
visitTable(0),
rootTable(0),
subroutineTable(0),
executableAllocator(0),
executableStart(0),
executableSize(0),
objectPoolCount(0),
traceLogCount(0),
dirtyRoots(false),
leaf(true),
eventLog(t->m->system, t->m->heap, 0),
protector(this),
resource(this)
{ }
~Context() {
dispose();
}
void dispose() {
if (compiler) {
compiler->dispose();
}
assembler->dispose();
if (executableAllocator) {
executableAllocator->free(executableStart, executableSize);
}
eventLog.dispose();
zone.dispose();
}
MyThread* thread;
Zone zone;
Assembler* assembler;
MyClient client;
Compiler* compiler;
object method;
BootContext* bootContext;
PoolElement* objectPool;
Subroutine* subroutines;
TraceElement* traceLog;
uint16_t* visitTable;
uintptr_t* rootTable;
Subroutine** subroutineTable;
Allocator* executableAllocator;
void* executableStart;
unsigned executableSize;
unsigned objectPoolCount;
unsigned traceLogCount;
bool dirtyRoots;
bool leaf;
Vector eventLog;
MyProtector protector;
MyResource resource;
};
unsigned
translateLocalIndex(Context* context, unsigned footprint, unsigned index)
{
unsigned parameterFootprint = methodParameterFootprint
(context->thread, context->method);
if (index < parameterFootprint) {
return parameterFootprint - index - footprint;
} else {
return index;
}
}
Compiler::Operand*
loadLocal(Context* context, unsigned footprint, unsigned index)
{
return context->compiler->loadLocal
(footprint, translateLocalIndex(context, footprint, index));
}
void
storeLocal(Context* context, unsigned footprint, Compiler::Operand* value,
unsigned index)
{
context->compiler->storeLocal
(footprint, value, translateLocalIndex(context, footprint, index));
}
FixedAllocator*
codeAllocator(MyThread* t);
class Frame {
public:
enum StackType {
Integer,
Long,
Object
};
Frame(Context* context, uint8_t* stackMap):
context(context),
t(context->thread),
c(context->compiler),
subroutine(0),
stackMap(stackMap),
ip(0),
sp(localSize()),
level(0)
{
memset(stackMap, 0, codeMaxStack(t, methodCode(t, context->method)));
}
Frame(Frame* f, uint8_t* stackMap):
context(f->context),
t(context->thread),
c(context->compiler),
subroutine(f->subroutine),
stackMap(stackMap),
ip(f->ip),
sp(f->sp),
level(f->level + 1)
{
memcpy(stackMap, f->stackMap, codeMaxStack
(t, methodCode(t, context->method)));
if (level > 1) {
context->eventLog.append(PushContextEvent);
}
}
~Frame() {
if (level > 1) {
context->eventLog.append(PopContextEvent);
}
}
Compiler::Operand* append(object o) {
BootContext* bc = context->bootContext;
if (bc) {
Promise* p = new (bc->zone) ListenPromise(t->m->system, bc->zone);
PROTECT(t, o);
object pointer = makePointer(t, p);
bc->constants = makeTriple(t, o, pointer, bc->constants);
return c->add
(TargetBytesPerWord, c->memory
(c->register_(t->arch->thread()), Compiler::AddressType,
TargetThreadHeapImage), c->promiseConstant
(p, Compiler::AddressType));
} else {
for (PoolElement* e = context->objectPool; e; e = e->next) {
if (o == e->target) {
return c->address(e);
}
}
context->objectPool = new(&context->zone) PoolElement(t, o, context->objectPool);
++ context->objectPoolCount;
return c->address(context->objectPool);
}
}
unsigned localSize() {
return local::localSize(t, context->method);
}
unsigned stackSize() {
return codeMaxStack(t, methodCode(t, context->method));
}
unsigned frameSize() {
return localSize() + stackSize();
}
void set(unsigned index, uint8_t type) {
assert(t, index < frameSize());
if (type == Object) {
context->eventLog.append(MarkEvent);
context->eventLog.append2(index);
} else {
context->eventLog.append(ClearEvent);
context->eventLog.append2(index);
}
int si = index - localSize();
if (si >= 0) {
stackMap[si] = type;
}
}
uint8_t get(unsigned index) {
assert(t, index < frameSize());
int si = index - localSize();
assert(t, si >= 0);
return stackMap[si];
}
void pushedInt() {
assert(t, sp + 1 <= frameSize());
set(sp++, Integer);
}
void pushedLong() {
assert(t, sp + 2 <= frameSize());
set(sp++, Long);
set(sp++, Long);
}
void pushedObject() {
assert(t, sp + 1 <= frameSize());
set(sp++, Object);
}
void popped(unsigned count) {
assert(t, sp >= count);
assert(t, sp - count >= localSize());
while (count) {
set(--sp, Integer);
-- count;
}
}
void poppedInt() {
assert(t, sp >= 1);
assert(t, sp - 1 >= localSize());
assert(t, get(sp - 1) == Integer);
-- sp;
}
void poppedLong() {
assert(t, sp >= 1);
assert(t, sp - 2 >= localSize());
assert(t, get(sp - 1) == Long);
assert(t, get(sp - 2) == Long);
sp -= 2;
}
void poppedObject() {
assert(t, sp >= 1);
assert(t, sp - 1 >= localSize());
assert(t, get(sp - 1) == Object);
set(--sp, Integer);
}
void storedInt(unsigned index) {
assert(t, index < localSize());
set(index, Integer);
}
void storedLong(unsigned index) {
assert(t, index + 1 < localSize());
set(index, Long);
set(index + 1, Long);
}
void storedObject(unsigned index) {
assert(t, index < localSize());
set(index, Object);
}
void dupped() {
assert(t, sp + 1 <= frameSize());
assert(t, sp - 1 >= localSize());
set(sp, get(sp - 1));
++ sp;
}
void duppedX1() {
assert(t, sp + 1 <= frameSize());
assert(t, sp - 2 >= localSize());
uint8_t b2 = get(sp - 2);
uint8_t b1 = get(sp - 1);
set(sp - 1, b2);
set(sp - 2, b1);
set(sp , b1);
++ sp;
}
void duppedX2() {
assert(t, sp + 1 <= frameSize());
assert(t, sp - 3 >= localSize());
uint8_t b3 = get(sp - 3);
uint8_t b2 = get(sp - 2);
uint8_t b1 = get(sp - 1);
set(sp - 2, b3);
set(sp - 1, b2);
set(sp - 3, b1);
set(sp , b1);
++ sp;
}
void dupped2() {
assert(t, sp + 2 <= frameSize());
assert(t, sp - 2 >= localSize());
uint8_t b2 = get(sp - 2);
uint8_t b1 = get(sp - 1);
set(sp, b2);
set(sp + 1, b1);
sp += 2;
}
void dupped2X1() {
assert(t, sp + 2 <= frameSize());
assert(t, sp - 3 >= localSize());
uint8_t b3 = get(sp - 3);
uint8_t b2 = get(sp - 2);
uint8_t b1 = get(sp - 1);
set(sp - 1, b3);
set(sp - 3, b2);
set(sp , b2);
set(sp - 2, b1);
set(sp + 1, b1);
sp += 2;
}
void dupped2X2() {
assert(t, sp + 2 <= frameSize());
assert(t, sp - 4 >= localSize());
uint8_t b4 = get(sp - 4);
uint8_t b3 = get(sp - 3);
uint8_t b2 = get(sp - 2);
uint8_t b1 = get(sp - 1);
set(sp - 2, b4);
set(sp - 1, b3);
set(sp - 4, b2);
set(sp , b2);
set(sp - 3, b1);
set(sp + 1, b1);
sp += 2;
}
void swapped() {
assert(t, sp - 2 >= localSize());
uint8_t saved = get(sp - 1);
set(sp - 1, get(sp - 2));
set(sp - 2, saved);
}
Promise* addressPromise(Promise* p) {
BootContext* bc = context->bootContext;
if (bc) {
bc->addresses = new(bc->zone) DelayedPromise(t->m->system, bc->zone, p, bc->addresses);
return bc->addresses;
} else {
return p;
}
}
Compiler::Operand* addressOperand(Promise* p) {
return c->promiseConstant(p, Compiler::AddressType);
}
Compiler::Operand* absoluteAddressOperand(Promise* p) {
return context->bootContext
? c->add
(TargetBytesPerWord, c->memory
(c->register_(t->arch->thread()), Compiler::AddressType,
TargetThreadCodeImage), c->promiseConstant
(new(&context->zone)
OffsetPromise
(p, - reinterpret_cast<intptr_t>(codeAllocator(t)->base)),
Compiler::AddressType))
: addressOperand(p);
}
Compiler::Operand* machineIp(unsigned logicalIp) {
return c->promiseConstant(c->machineIp(logicalIp), Compiler::AddressType);
}
void visitLogicalIp(unsigned ip) {
c->visitLogicalIp(ip);
context->eventLog.append(IpEvent);
context->eventLog.append2(ip);
}
void startLogicalIp(unsigned ip) {
if (subroutine) {
context->subroutineTable[ip] = subroutine;
}
c->startLogicalIp(ip);
context->eventLog.append(IpEvent);
context->eventLog.append2(ip);
this->ip = ip;
}
void pushQuiet(unsigned footprint, Compiler::Operand* o) {
c->push(footprint, o);
}
void pushLongQuiet(Compiler::Operand* o) {
pushQuiet(2, o);
}
Compiler::Operand* popQuiet(unsigned footprint) {
return c->pop(footprint);
}
Compiler::Operand* popLongQuiet() {
Compiler::Operand* r = popQuiet(2);
return r;
}
void pushInt(Compiler::Operand* o) {
pushQuiet(1, o);
pushedInt();
}
void pushAddress(Compiler::Operand* o) {
pushQuiet(1, o);
pushedInt();
}
void pushObject(Compiler::Operand* o) {
pushQuiet(1, o);
pushedObject();
}
void pushObject() {
c->pushed();
pushedObject();
}
void pushLong(Compiler::Operand* o) {
pushLongQuiet(o);
pushedLong();
}
void pop(unsigned count) {
popped(count);
c->popped(count);
}
Compiler::Operand* popInt() {
poppedInt();
return popQuiet(1);
}
Compiler::Operand* popLong() {
poppedLong();
return popLongQuiet();
}
Compiler::Operand* popObject() {
poppedObject();
return popQuiet(1);
}
void loadInt(unsigned index) {
assert(t, index < localSize());
pushInt(loadLocal(context, 1, index));
}
void loadLong(unsigned index) {
assert(t, index < static_cast<unsigned>(localSize() - 1));
pushLong(loadLocal(context, 2, index));
}
void loadObject(unsigned index) {
assert(t, index < localSize());
pushObject(loadLocal(context, 1, index));
}
void storeInt(unsigned index) {
storeLocal(context, 1, popInt(), index);
storedInt(translateLocalIndex(context, 1, index));
}
void storeLong(unsigned index) {
storeLocal(context, 2, popLong(), index);
storedLong(translateLocalIndex(context, 2, index));
}
void storeObject(unsigned index) {
storeLocal(context, 1, popObject(), index);
storedObject(translateLocalIndex(context, 1, index));
}
void storeObjectOrAddress(unsigned index) {
storeLocal(context, 1, popQuiet(1), index);
assert(t, sp >= 1);
assert(t, sp - 1 >= localSize());
if (get(sp - 1) == Object) {
storedObject(translateLocalIndex(context, 1, index));
} else {
storedInt(translateLocalIndex(context, 1, index));
}
popped(1);
}
void dup() {
pushQuiet(1, c->peek(1, 0));
dupped();
}
void dupX1() {
Compiler::Operand* s0 = popQuiet(1);
Compiler::Operand* s1 = popQuiet(1);
pushQuiet(1, s0);
pushQuiet(1, s1);
pushQuiet(1, s0);
duppedX1();
}
void dupX2() {
Compiler::Operand* s0 = popQuiet(1);
if (get(sp - 2) == Long) {
Compiler::Operand* s1 = popLongQuiet();
pushQuiet(1, s0);
pushLongQuiet(s1);
pushQuiet(1, s0);
} else {
Compiler::Operand* s1 = popQuiet(1);
Compiler::Operand* s2 = popQuiet(1);
pushQuiet(1, s0);
pushQuiet(1, s2);
pushQuiet(1, s1);
pushQuiet(1, s0);
}
duppedX2();
}
void dup2() {
if (get(sp - 1) == Long) {
pushLongQuiet(c->peek(2, 0));
} else {
Compiler::Operand* s0 = popQuiet(1);
Compiler::Operand* s1 = popQuiet(1);
pushQuiet(1, s1);
pushQuiet(1, s0);
pushQuiet(1, s1);
pushQuiet(1, s0);
}
dupped2();
}
void dup2X1() {
if (get(sp - 1) == Long) {
Compiler::Operand* s0 = popLongQuiet();
Compiler::Operand* s1 = popQuiet(1);
pushLongQuiet(s0);
pushQuiet(1, s1);
pushLongQuiet(s0);
} else {
Compiler::Operand* s0 = popQuiet(1);
Compiler::Operand* s1 = popQuiet(1);
Compiler::Operand* s2 = popQuiet(1);
pushQuiet(1, s1);
pushQuiet(1, s0);
pushQuiet(1, s2);
pushQuiet(1, s1);
pushQuiet(1, s0);
}
dupped2X1();
}
void dup2X2() {
if (get(sp - 1) == Long) {
Compiler::Operand* s0 = popLongQuiet();
if (get(sp - 3) == Long) {
Compiler::Operand* s1 = popLongQuiet();
pushLongQuiet(s0);
pushLongQuiet(s1);
pushLongQuiet(s0);
} else {
Compiler::Operand* s1 = popQuiet(1);
Compiler::Operand* s2 = popQuiet(1);
pushLongQuiet(s0);
pushQuiet(1, s2);
pushQuiet(1, s1);
pushLongQuiet(s0);
}
} else {
Compiler::Operand* s0 = popQuiet(1);
Compiler::Operand* s1 = popQuiet(1);
Compiler::Operand* s2 = popQuiet(1);
Compiler::Operand* s3 = popQuiet(1);
pushQuiet(1, s1);
pushQuiet(1, s0);
pushQuiet(1, s3);
pushQuiet(1, s2);
pushQuiet(1, s1);
pushQuiet(1, s0);
}
dupped2X2();
}
void swap() {
Compiler::Operand* s0 = popQuiet(1);
Compiler::Operand* s1 = popQuiet(1);
pushQuiet(1, s0);
pushQuiet(1, s1);
swapped();
}
TraceElement* trace(object target, unsigned flags) {
unsigned mapSize = frameMapSizeInWords(t, context->method);
TraceElement* e = context->traceLog = new
(context->zone.allocate(sizeof(TraceElement) + (mapSize * BytesPerWord)))
TraceElement(context, ip, target, flags, context->traceLog, mapSize);
++ context->traceLogCount;
context->eventLog.append(TraceEvent);
context->eventLog.appendAddress(e);
return e;
}
unsigned startSubroutine(unsigned ip, Promise* returnAddress) {
pushAddress(absoluteAddressOperand(returnAddress));
Subroutine* subroutine = 0;
for (Subroutine* s = context->subroutines; s; s = s->listNext) {
if (s->ip == ip) {
subroutine = s;
break;
}
}
if (subroutine == 0) {
context->subroutines = subroutine = new
(context->zone.allocate(sizeof(Subroutine)))
Subroutine(ip, context->eventLog.length() + 1 + BytesPerWord + 2,
context->subroutines, this->subroutine);
if (context->subroutineTable == 0) {
unsigned size = codeLength(t, methodCode(t, context->method))
* sizeof(Subroutine*);
context->subroutineTable = static_cast<Subroutine**>
(context->zone.allocate(size));
memset(context->subroutineTable, 0, size);
}
}
subroutine->handle = c->startSubroutine();
this->subroutine = subroutine;
SubroutineCall* call = new
(context->zone.allocate(sizeof(SubroutineCall)))
SubroutineCall(subroutine, returnAddress);
context->eventLog.append(PushSubroutineEvent);
context->eventLog.appendAddress(call);
unsigned nextIndexIndex = context->eventLog.length();
context->eventLog.append2(0);
c->saveLocals();
return nextIndexIndex;
}
void returnFromSubroutine(unsigned returnAddressLocal) {
c->returnFromSubroutine
(subroutine->handle, loadLocal(context, 1, returnAddressLocal));
subroutine->stackIndex = localOffsetFromStack
(t, translateLocalIndex(context, 1, returnAddressLocal),
context->method);
}
void endSubroutine(unsigned nextIndexIndex) {
c->linkSubroutine(subroutine->handle);
poppedInt();
context->eventLog.append(PopSubroutineEvent);
context->eventLog.set2(nextIndexIndex, context->eventLog.length());
subroutine = subroutine->stackNext;
}
Context* context;
MyThread* t;
Compiler* c;
Subroutine* subroutine;
uint8_t* stackMap;
unsigned ip;
unsigned sp;
unsigned level;
};
unsigned
savedTargetIndex(MyThread* t, object method)
{
return codeMaxLocals(t, methodCode(t, method));
}
object
findCallNode(MyThread* t, void* address);
void
insertCallNode(MyThread* t, object node);
void*
findExceptionHandler(Thread* t, object method, void* ip)
{
if (t->exception) {
object table = codeExceptionHandlerTable(t, methodCode(t, method));
if (table) {
object index = arrayBody(t, table, 0);
uint8_t* compiled = reinterpret_cast<uint8_t*>
(methodCompiled(t, method));
for (unsigned i = 0; i < arrayLength(t, table) - 1; ++i) {
unsigned start = intArrayBody(t, index, i * 3);
unsigned end = intArrayBody(t, index, (i * 3) + 1);
unsigned key = difference(ip, compiled) - 1;
if (key >= start and key < end) {
object catchType = arrayBody(t, table, i + 1);
if (exceptionMatch(t, catchType, t->exception)) {
return compiled + intArrayBody(t, index, (i * 3) + 2);
}
}
}
}
}
return 0;
}
void
releaseLock(MyThread* t, object method, void* stack)
{
if (methodFlags(t, method) & ACC_SYNCHRONIZED) {
if (t->methodLockIsClean) {
object lock;
if (methodFlags(t, method) & ACC_STATIC) {
lock = methodClass(t, method);
} else {
lock = *localObject
(t, stackForFrame(t, stack, method), method,
savedTargetIndex(t, method));
}
release(t, lock);
} else {
// got an exception while trying to acquire the lock for a
// synchronized method -- don't try to release it, since we
// never succeeded in acquiring it.
t->methodLockIsClean = true;
}
}
}
void
findUnwindTarget(MyThread* t, void** targetIp, void** targetFrame,
void** targetStack, object* targetContinuation)
{
void* ip;
void* stack;
object continuation;
if (t->traceContext) {
ip = t->traceContext->ip;
stack = t->traceContext->stack;
continuation = t->traceContext->continuation;
} else {
ip = getIp(t);
stack = t->stack;
continuation = t->continuation;
}
object target = t->trace->targetMethod;
bool mostRecent = true;
*targetIp = 0;
while (*targetIp == 0) {
object method = methodForIp(t, ip);
if (method) {
void* handler = findExceptionHandler(t, method, ip);
if (handler) {
*targetIp = handler;
nextFrame(t, &ip, &stack, method, target, mostRecent);
void** sp = static_cast<void**>(stackForFrame(t, stack, method))
+ t->arch->frameReturnAddressSize();
*targetFrame = static_cast<void**>
(stack) + t->arch->framePointerOffset();
*targetStack = sp;
*targetContinuation = continuation;
sp[localOffset(t, localSize(t, method), method)] = t->exception;
t->exception = 0;
} else {
nextFrame(t, &ip, &stack, method, target, mostRecent);
if (t->exception) {
releaseLock(t, method, stack);
}
target = method;
}
} else {
expect(t, ip);
*targetIp = ip;
*targetFrame = 0;
*targetStack = static_cast<void**>(stack)
+ t->arch->frameReturnAddressSize();
*targetContinuation = continuation;
while (Continuations and *targetContinuation) {
object c = *targetContinuation;
object method = continuationMethod(t, c);
void* handler = findExceptionHandler
(t, method, continuationAddress(t, c));
if (handler) {
t->exceptionHandler = handler;
t->exceptionStackAdjustment
= (stackOffsetFromFrame(t, method)
- ((continuationFramePointerOffset(t, c) / BytesPerWord)
- t->arch->framePointerOffset()
+ t->arch->frameReturnAddressSize())) * BytesPerWord;
t->exceptionOffset
= localOffset(t, localSize(t, method), method) * BytesPerWord;
break;
} else if (t->exception) {
releaseLock(t, method,
reinterpret_cast<uint8_t*>(c)
+ ContinuationBody
+ continuationReturnAddressOffset(t, c)
- t->arch->returnAddressOffset());
}
*targetContinuation = continuationNext(t, c);
}
}
mostRecent = false;
}
}
object
makeCurrentContinuation(MyThread* t, void** targetIp, void** targetStack)
{
void* ip = getIp(t);
void* stack = t->stack;
object context = t->continuation
? continuationContext(t, t->continuation)
: makeContinuationContext(t, 0, 0, 0, 0, t->trace->originalMethod);
PROTECT(t, context);
object target = t->trace->targetMethod;
PROTECT(t, target);
object first = 0;
PROTECT(t, first);
object last = 0;
PROTECT(t, last);
bool mostRecent = true;
*targetIp = 0;
while (*targetIp == 0) {
object method = methodForIp(t, ip);
if (method) {
PROTECT(t, method);
void** top = static_cast<void**>(stack)
+ t->arch->frameReturnAddressSize()
+ t->arch->frameFooterSize();
unsigned argumentFootprint
= t->arch->argumentFootprint(methodParameterFootprint(t, target));
unsigned alignment = t->arch->stackAlignmentInWords();
if (TailCalls and argumentFootprint > alignment) {
top += argumentFootprint - alignment;
}
void* nextIp = ip;
nextFrame(t, &nextIp, &stack, method, target, mostRecent);
void** bottom = static_cast<void**>(stack)
+ t->arch->frameReturnAddressSize();
unsigned frameSize = bottom - top;
unsigned totalSize = frameSize
+ t->arch->frameFooterSize()
+ t->arch->argumentFootprint(methodParameterFootprint(t, method));
object c = makeContinuation
(t, 0, context, method, ip,
(frameSize
+ t->arch->frameFooterSize()
+ t->arch->returnAddressOffset()
- t->arch->frameReturnAddressSize()) * BytesPerWord,
(frameSize
+ t->arch->frameFooterSize()
+ t->arch->framePointerOffset()
- t->arch->frameReturnAddressSize()) * BytesPerWord,
totalSize);
memcpy(&continuationBody(t, c, 0), top, totalSize * BytesPerWord);
if (last) {
set(t, last, ContinuationNext, c);
} else {
first = c;
}
last = c;
ip = nextIp;
target = method;
} else {
*targetIp = ip;
*targetStack = static_cast<void**>(stack)
+ t->arch->frameReturnAddressSize();
}
mostRecent = false;
}
expect(t, last);
set(t, last, ContinuationNext, t->continuation);
return first;
}
void NO_RETURN
unwind(MyThread* t)
{
void* ip;
void* frame;
void* stack;
object continuation;
findUnwindTarget(t, &ip, &frame, &stack, &continuation);
t->trace->targetMethod = 0;
t->trace->nativeMethod = 0;
transition(t, ip, stack, continuation, t->trace);
vmJump(ip, frame, stack, t, 0, 0);
}
class MyCheckpoint: public Thread::Checkpoint {
public:
MyCheckpoint(MyThread* t): Checkpoint(t) { }
virtual void unwind() {
local::unwind(static_cast<MyThread*>(t));
}
};
uintptr_t
defaultThunk(MyThread* t);
uintptr_t
nativeThunk(MyThread* t);
uintptr_t
bootNativeThunk(MyThread* t);
uintptr_t
aioobThunk(MyThread* t);
uintptr_t
stackOverflowThunk(MyThread* t);
uintptr_t
virtualThunk(MyThread* t, unsigned index);
bool
unresolved(MyThread* t, uintptr_t methodAddress);
uintptr_t
methodAddress(Thread* t, object method)
{
if (methodFlags(t, method) & ACC_NATIVE) {
return bootNativeThunk(static_cast<MyThread*>(t));
} else {
return methodCompiled(t, method);
}
}
void
tryInitClass(MyThread* t, object class_)
{
initClass(t, class_);
}
void
compile(MyThread* t, FixedAllocator* allocator, BootContext* bootContext,
object method);
object
resolveMethod(Thread* t, object pair)
{
object reference = pairSecond(t, pair);
PROTECT(t, reference);
object class_ = resolveClassInObject
(t, classLoader(t, methodClass(t, pairFirst(t, pair))), reference,
ReferenceClass);
return findInHierarchy
(t, class_, referenceName(t, reference), referenceSpec(t, reference),
findMethodInClass, Machine::NoSuchMethodErrorType);
}
bool
methodAbstract(Thread* t, object method)
{
return methodCode(t, method) == 0
and (methodFlags(t, method) & ACC_NATIVE) == 0;
}
int64_t
prepareMethodForCall(MyThread* t, object target)
{
if (methodAbstract(t, target)) {
throwNew(t, Machine::AbstractMethodErrorType, "%s.%s%s",
&byteArrayBody(t, className(t, methodClass(t, target)), 0),
&byteArrayBody(t, methodName(t, target), 0),
&byteArrayBody(t, methodSpec(t, target), 0));
} else {
if (unresolved(t, methodAddress(t, target))) {
PROTECT(t, target);
compile(t, codeAllocator(t), 0, target);
}
if (methodFlags(t, target) & ACC_NATIVE) {
t->trace->nativeMethod = target;
}
return methodAddress(t, target);
}
}
int64_t
findInterfaceMethodFromInstance(MyThread* t, object method, object instance)
{
if (instance) {
return prepareMethodForCall
(t, findInterfaceMethod(t, method, objectClass(t, instance)));
} else {
throwNew(t, Machine::NullPointerExceptionType);
}
}
int64_t
findInterfaceMethodFromInstanceAndReference
(MyThread* t, object pair, object instance)
{
PROTECT(t, instance);
object method = resolveMethod(t, pair);
return findInterfaceMethodFromInstance(t, method, instance);
}
void
checkMethod(Thread* t, object method, bool shouldBeStatic)
{
if (((methodFlags(t, method) & ACC_STATIC) == 0) == shouldBeStatic) {
throwNew(t, Machine::IncompatibleClassChangeErrorType,
"expected %s.%s%s to be %s",
&byteArrayBody(t, className(t, methodClass(t, method)), 0),
&byteArrayBody(t, methodName(t, method), 0),
&byteArrayBody(t, methodSpec(t, method), 0),
shouldBeStatic ? "static" : "non-static");
}
}
void
checkField(Thread* t, object field, bool shouldBeStatic)
{
if (((fieldFlags(t, field) & ACC_STATIC) == 0) == shouldBeStatic) {
throwNew(t, Machine::IncompatibleClassChangeErrorType,
"expected %s.%s to be %s",
&byteArrayBody(t, className(t, fieldClass(t, field)), 0),
&byteArrayBody(t, fieldName(t, field), 0),
shouldBeStatic ? "static" : "non-static");
}
}
int64_t
findSpecialMethodFromReference(MyThread* t, object pair)
{
PROTECT(t, pair);
object target = resolveMethod(t, pair);
object class_ = methodClass(t, pairFirst(t, pair));
if (isSpecialMethod(t, target, class_)) {
target = findVirtualMethod(t, target, classSuper(t, class_));
}
checkMethod(t, target, false);
return prepareMethodForCall(t, target);
}
int64_t
findStaticMethodFromReference(MyThread* t, object pair)
{
object target = resolveMethod(t, pair);
checkMethod(t, target, true);
return prepareMethodForCall(t, target);
}
int64_t
findVirtualMethodFromReference(MyThread* t, object pair, object instance)
{
PROTECT(t, instance);
object target = resolveMethod(t, pair);
target = findVirtualMethod(t, target, objectClass(t, instance));
checkMethod(t, target, false);
return prepareMethodForCall(t, target);
}
int64_t
getMethodAddress(MyThread* t, object target)
{
return prepareMethodForCall(t, target);
}
int64_t
getJClassFromReference(MyThread* t, object pair)
{
return reinterpret_cast<intptr_t>
(getJClass
(t, resolveClass
(t, classLoader(t, methodClass(t, pairFirst(t, pair))),
referenceName(t, pairSecond(t, pair)))));
}
int64_t
compareDoublesG(uint64_t bi, uint64_t ai)
{
double a = bitsToDouble(ai);
double b = bitsToDouble(bi);
if (a < b) {
return -1;
} else if (a > b) {
return 1;
} else if (a == b) {
return 0;
} else {
return 1;
}
}
int64_t
compareDoublesL(uint64_t bi, uint64_t ai)
{
double a = bitsToDouble(ai);
double b = bitsToDouble(bi);
if (a < b) {
return -1;
} else if (a > b) {
return 1;
} else if (a == b) {
return 0;
} else {
return -1;
}
}
int64_t
compareFloatsG(uint32_t bi, uint32_t ai)
{
float a = bitsToFloat(ai);
float b = bitsToFloat(bi);
if (a < b) {
return -1;
} else if (a > b) {
return 1;
} else if (a == b) {
return 0;
} else {
return 1;
}
}
int64_t
compareFloatsL(uint32_t bi, uint32_t ai)
{
float a = bitsToFloat(ai);
float b = bitsToFloat(bi);
if (a < b) {
return -1;
} else if (a > b) {
return 1;
} else if (a == b) {
return 0;
} else {
return -1;
}
}
int64_t
compareLongs(uint64_t b, uint64_t a)
{
if (a < b) {
return -1;
} else if (a > b) {
return 1;
} else {
return 0;
}
}
uint64_t
addDouble(uint64_t b, uint64_t a)
{
return doubleToBits(bitsToDouble(a) + bitsToDouble(b));
}
uint64_t
subtractDouble(uint64_t b, uint64_t a)
{
return doubleToBits(bitsToDouble(a) - bitsToDouble(b));
}
uint64_t
multiplyDouble(uint64_t b, uint64_t a)
{
return doubleToBits(bitsToDouble(a) * bitsToDouble(b));
}
uint64_t
divideDouble(uint64_t b, uint64_t a)
{
return doubleToBits(bitsToDouble(a) / bitsToDouble(b));
}
uint64_t
moduloDouble(uint64_t b, uint64_t a)
{
return doubleToBits(fmod(bitsToDouble(a), bitsToDouble(b)));
}
uint64_t
negateDouble(uint64_t a)
{
return doubleToBits(- bitsToDouble(a));
}
uint64_t
squareRootDouble(uint64_t a)
{
return doubleToBits(sqrt(bitsToDouble(a)));
}
uint64_t
doubleToFloat(int64_t a)
{
return floatToBits(static_cast<float>(bitsToDouble(a)));
}
int64_t
doubleToInt(int64_t a)
{
return static_cast<int32_t>(bitsToDouble(a));
}
int64_t
doubleToLong(int64_t a)
{
return static_cast<int64_t>(bitsToDouble(a));
}
uint64_t
addFloat(uint32_t b, uint32_t a)
{
return floatToBits(bitsToFloat(a) + bitsToFloat(b));
}
uint64_t
subtractFloat(uint32_t b, uint32_t a)
{
return floatToBits(bitsToFloat(a) - bitsToFloat(b));
}
uint64_t
multiplyFloat(uint32_t b, uint32_t a)
{
return floatToBits(bitsToFloat(a) * bitsToFloat(b));
}
uint64_t
divideFloat(uint32_t b, uint32_t a)
{
return floatToBits(bitsToFloat(a) / bitsToFloat(b));
}
uint64_t
moduloFloat(uint32_t b, uint32_t a)
{
return floatToBits(fmod(bitsToFloat(a), bitsToFloat(b)));
}
uint64_t
negateFloat(uint32_t a)
{
return floatToBits(- bitsToFloat(a));
}
uint64_t
absoluteFloat(uint32_t a)
{
return floatToBits(fabsf(bitsToFloat(a)));
}
int64_t
absoluteLong(int64_t a)
{
return a > 0 ? a : -a;
}
int64_t
absoluteInt(int32_t a)
{
return a > 0 ? a : -a;
}
unsigned
traceSize(Thread* t)
{
class Counter: public Processor::StackVisitor {
public:
Counter(): count(0) { }
virtual bool visit(Processor::StackWalker*) {
++ count;
return true;
}
unsigned count;
} counter;
t->m->processor->walkStack(t, &counter);
return FixedSizeOfArray + (counter.count * ArrayElementSizeOfArray)
+ (counter.count * FixedSizeOfTraceElement);
}
void NO_RETURN
throwArithmetic(MyThread* t)
{
if (ensure(t, FixedSizeOfArithmeticException + traceSize(t))) {
atomicOr(&(t->flags), Thread::TracingFlag);
THREAD_RESOURCE0(t, atomicAnd(&(t->flags), ~Thread::TracingFlag));
throwNew(t, Machine::ArithmeticExceptionType);
} else {
// not enough memory available for a new exception and stack trace
// -- use a preallocated instance instead
throw_(t, root(t, Machine::ArithmeticException));
}
}
int64_t
divideLong(MyThread* t, int64_t b, int64_t a)
{
if (LIKELY(b)) {
return a / b;
} else {
throwArithmetic(t);
}
}
int64_t
divideInt(MyThread* t, int32_t b, int32_t a)
{
if (LIKELY(b)) {
return a / b;
} else {
throwArithmetic(t);
}
}
int64_t
moduloLong(MyThread* t, int64_t b, int64_t a)
{
if (LIKELY(b)) {
return a % b;
} else {
throwArithmetic(t);
}
}
int64_t
moduloInt(MyThread* t, int32_t b, int32_t a)
{
if (LIKELY(b)) {
return a % b;
} else {
throwArithmetic(t);
}
}
uint64_t
floatToDouble(int32_t a)
{
return doubleToBits(static_cast<double>(bitsToFloat(a)));
}
int64_t
floatToInt(int32_t a)
{
return static_cast<int32_t>(bitsToFloat(a));
}
int64_t
floatToLong(int32_t a)
{
return static_cast<int64_t>(bitsToFloat(a));
}
uint64_t
intToDouble(int32_t a)
{
return doubleToBits(static_cast<double>(a));
}
uint64_t
intToFloat(int32_t a)
{
return floatToBits(static_cast<float>(a));
}
uint64_t
longToDouble(int64_t a)
{
return doubleToBits(static_cast<double>(a));
}
uint64_t
longToFloat(int64_t a)
{
return floatToBits(static_cast<float>(a));
}
uint64_t
makeBlankObjectArray(MyThread* t, object class_, int32_t length)
{
if (length >= 0) {
return reinterpret_cast<uint64_t>(makeObjectArray(t, class_, length));
} else {
throwNew(t, Machine::NegativeArraySizeExceptionType, "%d", length);
}
}
uint64_t
makeBlankObjectArrayFromReference(MyThread* t, object pair,
int32_t length)
{
return makeBlankObjectArray
(t, resolveClass
(t, classLoader(t, methodClass(t, pairFirst(t, pair))),
referenceName(t, pairSecond(t, pair))), length);
}
uint64_t
makeBlankArray(MyThread* t, unsigned type, int32_t length)
{
if (length >= 0) {
object (*constructor)(Thread*, uintptr_t);
switch (type) {
case T_BOOLEAN:
constructor = makeBooleanArray;
break;
case T_CHAR:
constructor = makeCharArray;
break;
case T_FLOAT:
constructor = makeFloatArray;
break;
case T_DOUBLE:
constructor = makeDoubleArray;
break;
case T_BYTE:
constructor = makeByteArray;
break;
case T_SHORT:
constructor = makeShortArray;
break;
case T_INT:
constructor = makeIntArray;
break;
case T_LONG:
constructor = makeLongArray;
break;
default: abort(t);
}
return reinterpret_cast<uintptr_t>(constructor(t, length));
} else {
throwNew(t, Machine::NegativeArraySizeExceptionType, "%d", length);
}
}
uint64_t
lookUpAddress(int32_t key, uintptr_t* start, int32_t count,
uintptr_t default_)
{
int32_t bottom = 0;
int32_t top = count;
for (int32_t span = top - bottom; span; span = top - bottom) {
int32_t middle = bottom + (span / 2);
uintptr_t* p = start + (middle * 2);
int32_t k = *p;
if (key < k) {
top = middle;
} else if (key > k) {
bottom = middle + 1;
} else {
return p[1];
}
}
return default_;
}
void
setMaybeNull(MyThread* t, object o, unsigned offset, object value)
{
if (LIKELY(o)) {
set(t, o, offset, value);
} else {
throwNew(t, Machine::NullPointerExceptionType);
}
}
void
acquireMonitorForObject(MyThread* t, object o)
{
if (LIKELY(o)) {
acquire(t, o);
} else {
throwNew(t, Machine::NullPointerExceptionType);
}
}
void
acquireMonitorForObjectOnEntrance(MyThread* t, object o)
{
if (LIKELY(o)) {
t->methodLockIsClean = false;
acquire(t, o);
t->methodLockIsClean = true;
} else {
throwNew(t, Machine::NullPointerExceptionType);
}
}
void
releaseMonitorForObject(MyThread* t, object o)
{
if (LIKELY(o)) {
release(t, o);
} else {
throwNew(t, Machine::NullPointerExceptionType);
}
}
object
makeMultidimensionalArray2(MyThread* t, object class_, uintptr_t* countStack,
int32_t dimensions)
{
PROTECT(t, class_);
THREAD_RUNTIME_ARRAY(t, int32_t, counts, dimensions);
for (int i = dimensions - 1; i >= 0; --i) {
RUNTIME_ARRAY_BODY(counts)[i] = countStack[dimensions - i - 1];
if (UNLIKELY(RUNTIME_ARRAY_BODY(counts)[i] < 0)) {
throwNew(t, Machine::NegativeArraySizeExceptionType, "%d",
RUNTIME_ARRAY_BODY(counts)[i]);
return 0;
}
}
object array = makeArray(t, RUNTIME_ARRAY_BODY(counts)[0]);
setObjectClass(t, array, class_);
PROTECT(t, array);
populateMultiArray(t, array, RUNTIME_ARRAY_BODY(counts), 0, dimensions);
return array;
}
uint64_t
makeMultidimensionalArray(MyThread* t, object class_, int32_t dimensions,
int32_t offset)
{
return reinterpret_cast<uintptr_t>
(makeMultidimensionalArray2
(t, class_, static_cast<uintptr_t*>(t->stack) + offset, dimensions));
}
uint64_t
makeMultidimensionalArrayFromReference(MyThread* t, object pair,
int32_t dimensions,
int32_t offset)
{
return makeMultidimensionalArray
(t, resolveClass
(t, classLoader(t, methodClass(t, pairFirst(t, pair))),
referenceName(t, pairSecond(t, pair))), dimensions, offset);
}
void NO_RETURN
throwArrayIndexOutOfBounds(MyThread* t)
{
if (ensure(t, FixedSizeOfArrayIndexOutOfBoundsException + traceSize(t))) {
atomicOr(&(t->flags), Thread::TracingFlag);
THREAD_RESOURCE0(t, atomicAnd(&(t->flags), ~Thread::TracingFlag));
throwNew(t, Machine::ArrayIndexOutOfBoundsExceptionType);
} else {
// not enough memory available for a new exception and stack trace
// -- use a preallocated instance instead
throw_(t, root(t, Machine::ArrayIndexOutOfBoundsException));
}
}
void NO_RETURN
throwStackOverflow(MyThread* t)
{
throwNew(t, Machine::StackOverflowErrorType);
}
void NO_RETURN
throw_(MyThread* t, object o)
{
if (LIKELY(o)) {
vm::throw_(t, o);
} else {
throwNew(t, Machine::NullPointerExceptionType);
}
}
void
checkCast(MyThread* t, object class_, object o)
{
if (UNLIKELY(o and not isAssignableFrom(t, class_, objectClass(t, o)))) {
throwNew
(t, Machine::ClassCastExceptionType, "%s as %s",
&byteArrayBody(t, className(t, objectClass(t, o)), 0),
&byteArrayBody(t, className(t, class_), 0));
}
}
void
checkCastFromReference(MyThread* t, object pair, object o)
{
PROTECT(t, o);
object c = resolveClass
(t, classLoader(t, methodClass(t, pairFirst(t, pair))),
referenceName(t, pairSecond(t, pair)));
checkCast(t, c, o);
}
object
resolveField(Thread* t, object pair)
{
object reference = pairSecond(t, pair);
PROTECT(t, reference);
object class_ = resolveClassInObject
(t, classLoader(t, methodClass(t, pairFirst(t, pair))), reference,
ReferenceClass);
return findInHierarchy
(t, class_, referenceName(t, reference), referenceSpec(t, reference),
findFieldInClass, Machine::NoSuchFieldErrorType);
}
uint64_t
getFieldValue(Thread* t, object target, object field)
{
switch (fieldCode(t, field)) {
case ByteField:
case BooleanField:
return cast<int8_t>(target, fieldOffset(t, field));
case CharField:
case ShortField:
return cast<int16_t>(target, fieldOffset(t, field));
case FloatField:
case IntField:
return cast<int32_t>(target, fieldOffset(t, field));
case DoubleField:
case LongField:
return cast<int64_t>(target, fieldOffset(t, field));
case ObjectField:
return cast<intptr_t>(target, fieldOffset(t, field));
default:
abort(t);
}
}
uint64_t
getStaticFieldValueFromReference(MyThread* t, object pair)
{
object field = resolveField(t, pair);
PROTECT(t, field);
initClass(t, fieldClass(t, field));
ACQUIRE_FIELD_FOR_READ(t, field);
return getFieldValue(t, classStaticTable(t, fieldClass(t, field)), field);
}
uint64_t
getFieldValueFromReference(MyThread* t, object pair, object instance)
{
PROTECT(t, instance);
object field = resolveField(t, pair);
PROTECT(t, field);
ACQUIRE_FIELD_FOR_READ(t, field);
return getFieldValue(t, instance, field);
}
void
setStaticLongFieldValueFromReference(MyThread* t, object pair, uint64_t value)
{
object field = resolveField(t, pair);
PROTECT(t, field);
initClass(t, fieldClass(t, field));
ACQUIRE_FIELD_FOR_WRITE(t, field);
cast<int64_t>
(classStaticTable(t, fieldClass(t, field)), fieldOffset(t, field)) = value;
}
void
setLongFieldValueFromReference(MyThread* t, object pair, object instance,
uint64_t value)
{
PROTECT(t, instance);
object field = resolveField(t, pair);
PROTECT(t, field);
ACQUIRE_FIELD_FOR_WRITE(t, field);
cast<int64_t>(instance, fieldOffset(t, field)) = value;
}
void
setStaticObjectFieldValueFromReference(MyThread* t, object pair, object value)
{
PROTECT(t, value);
object field = resolveField(t, pair);
PROTECT(t, field);
initClass(t, fieldClass(t, field));
ACQUIRE_FIELD_FOR_WRITE(t, field);
set(t, classStaticTable(t, fieldClass(t, field)), fieldOffset(t, field),
value);
}
void
setObjectFieldValueFromReference(MyThread* t, object pair, object instance,
object value)
{
PROTECT(t, instance);
PROTECT(t, value);
object field = resolveField(t, pair);
PROTECT(t, field);
ACQUIRE_FIELD_FOR_WRITE(t, field);
set(t, instance, fieldOffset(t, field), value);
}
void
setFieldValue(MyThread* t, object target, object field, uint32_t value)
{
switch (fieldCode(t, field)) {
case ByteField:
case BooleanField:
cast<int8_t>(target, fieldOffset(t, field)) = value;
break;
case CharField:
case ShortField:
cast<int16_t>(target, fieldOffset(t, field)) = value;
break;
case FloatField:
case IntField:
cast<int32_t>(target, fieldOffset(t, field)) = value;
break;
default:
abort(t);
}
}
void
setStaticFieldValueFromReference(MyThread* t, object pair, uint32_t value)
{
object field = resolveField(t, pair);
PROTECT(t, field);
initClass(t, fieldClass(t, field));
ACQUIRE_FIELD_FOR_WRITE(t, field);
setFieldValue(t, classStaticTable(t, fieldClass(t, field)), field, value);
}
void
setFieldValueFromReference(MyThread* t, object pair, object instance,
uint32_t value)
{
PROTECT(t, instance);
object field = resolveField(t, pair);
PROTECT(t, field);
ACQUIRE_FIELD_FOR_WRITE(t, field);
setFieldValue(t, instance, field, value);
}
uint64_t
instanceOf64(Thread* t, object class_, object o)
{
return instanceOf(t, class_, o);
}
uint64_t
instanceOfFromReference(Thread* t, object pair, object o)
{
PROTECT(t, o);
object c = resolveClass
(t, classLoader(t, methodClass(t, pairFirst(t, pair))),
referenceName(t, pairSecond(t, pair)));
return instanceOf64(t, c, o);
}
uint64_t
makeNewGeneral64(Thread* t, object class_)
{
return reinterpret_cast<uintptr_t>(makeNewGeneral(t, class_));
}
uint64_t
makeNew64(Thread* t, object class_)
{
return reinterpret_cast<uintptr_t>(makeNew(t, class_));
}
uint64_t
makeNewFromReference(Thread* t, object pair)
{
return makeNewGeneral64
(t, resolveClass
(t, classLoader(t, methodClass(t, pairFirst(t, pair))),
referenceName(t, pairSecond(t, pair))));
}
uint64_t
getJClass64(Thread* t, object class_)
{
return reinterpret_cast<uintptr_t>(getJClass(t, class_));
}
void
gcIfNecessary(MyThread* t)
{
stress(t);
if (UNLIKELY(t->flags & Thread::UseBackupHeapFlag)) {
collect(t, Heap::MinorCollection);
}
}
unsigned
resultSize(MyThread* t, unsigned code)
{
switch (code) {
case ByteField:
case BooleanField:
case CharField:
case ShortField:
case FloatField:
case IntField:
return 4;
case ObjectField:
return TargetBytesPerWord;
case LongField:
case DoubleField:
return 8;
case VoidField:
return 0;
default:
abort(t);
}
}
void
pushReturnValue(MyThread* t, Frame* frame, unsigned code,
Compiler::Operand* result)
{
switch (code) {
case ByteField:
case BooleanField:
case CharField:
case ShortField:
case FloatField:
case IntField:
return frame->pushInt(result);
case ObjectField:
return frame->pushObject(result);
case LongField:
case DoubleField:
return frame->pushLong(result);
default:
abort(t);
}
}
Compiler::Operand*
popField(MyThread* t, Frame* frame, int code)
{
switch (code) {
case ByteField:
case BooleanField:
case CharField:
case ShortField:
case FloatField:
case IntField:
return frame->popInt();
case DoubleField:
case LongField:
return frame->popLong();
case ObjectField:
return frame->popObject();
default: abort(t);
}
}
Compiler::OperandType
operandTypeForFieldCode(Thread* t, unsigned code)
{
switch (code) {
case ByteField:
case BooleanField:
case CharField:
case ShortField:
case IntField:
case LongField:
return Compiler::IntegerType;
case ObjectField:
return Compiler::ObjectType;
case FloatField:
case DoubleField:
return Compiler::FloatType;
case VoidField:
return Compiler::VoidType;
default:
abort(t);
}
}
bool
useLongJump(MyThread* t, uintptr_t target)
{
uintptr_t reach = t->arch->maximumImmediateJump();
FixedAllocator* a = codeAllocator(t);
uintptr_t start = reinterpret_cast<uintptr_t>(a->base);
uintptr_t end = reinterpret_cast<uintptr_t>(a->base) + a->capacity;
assert(t, end - start < reach);
return (target > end && (target - start) > reach)
or (target < start && (end - target) > reach);
}
Compiler::Operand*
compileDirectInvoke(MyThread* t, Frame* frame, object target, bool tailCall,
bool useThunk, unsigned rSize, Promise* addressPromise)
{
Compiler* c = frame->c;
unsigned flags = (TailCalls and tailCall ? Compiler::TailJump : 0);
unsigned traceFlags;
if (addressPromise == 0 and useLongJump(t, methodAddress(t, target))) {
flags |= Compiler::LongJumpOrCall;
traceFlags = TraceElement::LongCall;
} else {
traceFlags = 0;
}
if (useThunk
or (TailCalls and tailCall and (methodFlags(t, target) & ACC_NATIVE)))
{
if (frame->context->bootContext == 0) {
flags |= Compiler::Aligned;
}
if (TailCalls and tailCall) {
traceFlags |= TraceElement::TailCall;
TraceElement* trace = frame->trace(target, traceFlags);
Promise* returnAddressPromise = new
(frame->context->zone.allocate(sizeof(TraceElementPromise)))
TraceElementPromise(t->m->system, trace);
Compiler::Operand* result = c->stackCall
(c->promiseConstant(returnAddressPromise, Compiler::AddressType),
flags,
trace,
rSize,
operandTypeForFieldCode(t, methodReturnCode(t, target)),
methodParameterFootprint(t, target));
c->store
(TargetBytesPerWord,
frame->absoluteAddressOperand(returnAddressPromise),
TargetBytesPerWord, c->memory
(c->register_(t->arch->thread()), Compiler::AddressType,
TargetThreadTailAddress));
c->exit
(c->constant
((methodFlags(t, target) & ACC_NATIVE)
? nativeThunk(t) : defaultThunk(t),
Compiler::AddressType));
return result;
} else {
return c->stackCall
(c->constant(defaultThunk(t), Compiler::AddressType),
flags,
frame->trace(target, traceFlags),
rSize,
operandTypeForFieldCode(t, methodReturnCode(t, target)),
methodParameterFootprint(t, target));
}
} else {
Compiler::Operand* address =
(addressPromise
? c->promiseConstant(addressPromise, Compiler::AddressType)
: c->constant(methodAddress(t, target), Compiler::AddressType));
return c->stackCall
(address,
flags,
tailCall ? 0 : frame->trace
((methodFlags(t, target) & ACC_NATIVE) ? target : 0, 0),
rSize,
operandTypeForFieldCode(t, methodReturnCode(t, target)),
methodParameterFootprint(t, target));
}
}
bool
compileDirectInvoke(MyThread* t, Frame* frame, object target, bool tailCall)
{
unsigned rSize = resultSize(t, methodReturnCode(t, target));
Compiler::Operand* result = 0;
if (emptyMethod(t, target)) {
tailCall = false;
} else {
BootContext* bc = frame->context->bootContext;
if (bc) {
if ((methodClass(t, target) == methodClass(t, frame->context->method)
or (not classNeedsInit(t, methodClass(t, target))))
and (not (TailCalls and tailCall
and (methodFlags(t, target) & ACC_NATIVE))))
{
Promise* p = new(bc->zone) ListenPromise(t->m->system, bc->zone);
PROTECT(t, target);
object pointer = makePointer(t, p);
bc->calls = makeTriple(t, target, pointer, bc->calls);
result = compileDirectInvoke
(t, frame, target, tailCall, false, rSize, p);
} else {
result = compileDirectInvoke
(t, frame, target, tailCall, true, rSize, 0);
}
} else if (unresolved(t, methodAddress(t, target))
or classNeedsInit(t, methodClass(t, target)))
{
result = compileDirectInvoke
(t, frame, target, tailCall, true, rSize, 0);
} else {
result = compileDirectInvoke
(t, frame, target, tailCall, false, rSize, 0);
}
}
frame->pop(methodParameterFootprint(t, target));
if (rSize) {
pushReturnValue(t, frame, methodReturnCode(t, target), result);
}
return tailCall;
}
unsigned
methodReferenceParameterFootprint(Thread* t, object reference, bool isStatic)
{
return parameterFootprint
(t, reinterpret_cast<const char*>
(&byteArrayBody(t, referenceSpec(t, reference), 0)), isStatic);
}
int
methodReferenceReturnCode(Thread* t, object reference)
{
unsigned parameterCount;
unsigned returnCode;
scanMethodSpec
(t, reinterpret_cast<const char*>
(&byteArrayBody(t, referenceSpec(t, reference), 0)), &parameterCount,
&returnCode);
return returnCode;
}
void
compileReferenceInvoke(MyThread* t, Frame* frame, Compiler::Operand* method,
object reference, bool isStatic, bool tailCall)
{
unsigned parameterFootprint
= methodReferenceParameterFootprint(t, reference, isStatic);
int returnCode = methodReferenceReturnCode(t, reference);
unsigned rSize = resultSize(t, returnCode);
Compiler::Operand* result = frame->c->stackCall
(method,
tailCall ? Compiler::TailJump : 0,
frame->trace(0, 0),
rSize,
operandTypeForFieldCode(t, returnCode),
parameterFootprint);
frame->pop(parameterFootprint);
if (rSize) {
pushReturnValue(t, frame, returnCode, result);
}
}
void
compileDirectReferenceInvoke(MyThread* t, Frame* frame, Thunk thunk,
object reference, bool isStatic, bool tailCall)
{
Compiler* c = frame->c;
PROTECT(t, reference);
object pair = makePair(t, frame->context->method, reference);
compileReferenceInvoke
(t, frame, c->call
(c->constant(getThunk(t, thunk), Compiler::AddressType),
0,
frame->trace(0, 0),
TargetBytesPerWord,
Compiler::AddressType,
2, c->register_(t->arch->thread()), frame->append(pair)),
reference, isStatic, tailCall);
}
void
compileAbstractInvoke(MyThread* t, Frame* frame, Compiler::Operand* method,
object target, bool tailCall)
{
unsigned parameterFootprint = methodParameterFootprint(t, target);
int returnCode = methodReturnCode(t, target);
unsigned rSize = resultSize(t, returnCode);
Compiler::Operand* result = frame->c->stackCall
(method,
tailCall ? Compiler::TailJump : 0,
frame->trace(0, 0),
rSize,
operandTypeForFieldCode(t, returnCode),
parameterFootprint);
frame->pop(parameterFootprint);
if (rSize) {
pushReturnValue(t, frame, returnCode, result);
}
}
void
compileDirectAbstractInvoke(MyThread* t, Frame* frame, Thunk thunk,
object target, bool tailCall)
{
Compiler* c = frame->c;
compileAbstractInvoke
(t, frame, c->call
(c->constant(getThunk(t, thunk), Compiler::AddressType),
0,
frame->trace(0, 0),
TargetBytesPerWord,
Compiler::AddressType,
2, c->register_(t->arch->thread()), frame->append(target)),
target, tailCall);
}
void
handleMonitorEvent(MyThread* t, Frame* frame, intptr_t function)
{
Compiler* c = frame->c;
object method = frame->context->method;
if (methodFlags(t, method) & ACC_SYNCHRONIZED) {
Compiler::Operand* lock;
if (methodFlags(t, method) & ACC_STATIC) {
PROTECT(t, method);
lock = frame->append(methodClass(t, method));
} else {
lock = loadLocal(frame->context, 1, savedTargetIndex(t, method));
}
c->call(c->constant(function, Compiler::AddressType),
0,
frame->trace(0, 0),
0,
Compiler::VoidType,
2, c->register_(t->arch->thread()), lock);
}
}
void
handleEntrance(MyThread* t, Frame* frame)
{
object method = frame->context->method;
if ((methodFlags(t, method) & (ACC_SYNCHRONIZED | ACC_STATIC))
== ACC_SYNCHRONIZED)
{
// save 'this' pointer in case it is overwritten.
unsigned index = savedTargetIndex(t, method);
storeLocal(frame->context, 1, loadLocal(frame->context, 1, 0), index);
frame->set(index, Frame::Object);
}
handleMonitorEvent
(t, frame, getThunk(t, acquireMonitorForObjectOnEntranceThunk));
}
void
handleExit(MyThread* t, Frame* frame)
{
handleMonitorEvent
(t, frame, getThunk(t, releaseMonitorForObjectThunk));
}
bool
inTryBlock(MyThread* t, object code, unsigned ip)
{
object table = codeExceptionHandlerTable(t, code);
if (table) {
unsigned length = exceptionHandlerTableLength(t, table);
for (unsigned i = 0; i < length; ++i) {
uint64_t eh = exceptionHandlerTableBody(t, table, i);
if (ip >= exceptionHandlerStart(eh)
and ip < exceptionHandlerEnd(eh))
{
return true;
}
}
}
return false;
}
bool
needsReturnBarrier(MyThread* t, object method)
{
return (methodFlags(t, method) & ConstructorFlag)
and (classVmFlags(t, methodClass(t, method)) & HasFinalMemberFlag);
}
bool
returnsNext(MyThread* t, object code, unsigned ip)
{
switch (codeBody(t, code, ip)) {
case return_:
case areturn:
case ireturn:
case freturn:
case lreturn:
case dreturn:
return true;
case goto_: {
uint32_t offset = codeReadInt16(t, code, ++ip);
uint32_t newIp = (ip - 3) + offset;
assert(t, newIp < codeLength(t, code));
return returnsNext(t, code, newIp);
}
case goto_w: {
uint32_t offset = codeReadInt32(t, code, ++ip);
uint32_t newIp = (ip - 5) + offset;
assert(t, newIp < codeLength(t, code));
return returnsNext(t, code, newIp);
}
default:
return false;
}
}
bool
isTailCall(MyThread* t, object code, unsigned ip, object caller,
int calleeReturnCode)
{
return TailCalls
and ((methodFlags(t, caller) & ACC_SYNCHRONIZED) == 0)
and (not inTryBlock(t, code, ip - 1))
and (not needsReturnBarrier(t, caller))
and (methodReturnCode(t, caller) == VoidField
or methodReturnCode(t, caller) == calleeReturnCode)
and returnsNext(t, code, ip);
}
bool
isTailCall(MyThread* t, object code, unsigned ip, object caller, object callee)
{
return isTailCall(t, code, ip, caller, methodReturnCode(t, callee));
}
bool
isReferenceTailCall(MyThread* t, object code, unsigned ip, object caller,
object calleeReference)
{
return isTailCall
(t, code, ip, caller, methodReferenceReturnCode(t, calleeReference));
}
void
compile(MyThread* t, Frame* initialFrame, unsigned ip,
int exceptionHandlerStart = -1);
void
saveStateAndCompile(MyThread* t, Frame* initialFrame, unsigned ip)
{
Compiler::State* state = initialFrame->c->saveState();
compile(t, initialFrame, ip);
initialFrame->c->restoreState(state);
}
bool
integerBranch(MyThread* t, Frame* frame, object code, unsigned& ip,
unsigned size, Compiler::Operand* a, Compiler::Operand* b)
{
if (ip + 3 > codeLength(t, code)) {
return false;
}
Compiler* c = frame->c;
unsigned instruction = codeBody(t, code, ip++);
uint32_t offset = codeReadInt16(t, code, ip);
uint32_t newIp = (ip - 3) + offset;
assert(t, newIp < codeLength(t, code));
Compiler::Operand* target = frame->machineIp(newIp);
switch (instruction) {
case ifeq:
c->jumpIfEqual(size, a, b, target);
break;
case ifne:
c->jumpIfNotEqual(size, a, b, target);
break;
case ifgt:
c->jumpIfGreater(size, a, b, target);
break;
case ifge:
c->jumpIfGreaterOrEqual(size, a, b, target);
break;
case iflt:
c->jumpIfLess(size, a, b, target);
break;
case ifle:
c->jumpIfLessOrEqual(size, a, b, target);
break;
default:
ip -= 3;
return false;
}
saveStateAndCompile(t, frame, newIp);
return true;
}
bool
floatBranch(MyThread* t, Frame* frame, object code, unsigned& ip,
unsigned size, bool lessIfUnordered, Compiler::Operand* a,
Compiler::Operand* b)
{
if (ip + 3 > codeLength(t, code)) {
return false;
}
Compiler* c = frame->c;
unsigned instruction = codeBody(t, code, ip++);
uint32_t offset = codeReadInt16(t, code, ip);
uint32_t newIp = (ip - 3) + offset;
assert(t, newIp < codeLength(t, code));
Compiler::Operand* target = frame->machineIp(newIp);
switch (instruction) {
case ifeq:
c->jumpIfFloatEqual(size, a, b, target);
break;
case ifne:
c->jumpIfFloatNotEqual(size, a, b, target);
break;
case ifgt:
if (lessIfUnordered) {
c->jumpIfFloatGreater(size, a, b, target);
} else {
c->jumpIfFloatGreaterOrUnordered(size, a, b, target);
}
break;
case ifge:
if (lessIfUnordered) {
c->jumpIfFloatGreaterOrEqual(size, a, b, target);
} else {
c->jumpIfFloatGreaterOrEqualOrUnordered(size, a, b, target);
}
break;
case iflt:
if (lessIfUnordered) {
c->jumpIfFloatLessOrUnordered(size, a, b, target);
} else {
c->jumpIfFloatLess(size, a, b, target);
}
break;
case ifle:
if (lessIfUnordered) {
c->jumpIfFloatLessOrEqualOrUnordered(size, a, b, target);
} else {
c->jumpIfFloatLessOrEqual(size, a, b, target);
}
break;
default:
ip -= 3;
return false;
}
saveStateAndCompile(t, frame, newIp);
return true;
}
Compiler::Operand*
popLongAddress(Frame* frame)
{
return TargetBytesPerWord == 8 ? frame->popLong() : frame->c->load
(8, 8, frame->popLong(), TargetBytesPerWord);
}
bool
intrinsic(MyThread* t, Frame* frame, object target)
{
#define MATCH(name, constant) \
(byteArrayLength(t, name) == sizeof(constant) \
and ::strcmp(reinterpret_cast<char*>(&byteArrayBody(t, name, 0)), \
constant) == 0)
object className = vm::className(t, methodClass(t, target));
if (UNLIKELY(MATCH(className, "java/lang/Math"))) {
Compiler* c = frame->c;
if (MATCH(methodName(t, target), "sqrt")
and MATCH(methodSpec(t, target), "(D)D"))
{
frame->pushLong(c->fsqrt(8, frame->popLong()));
return true;
} else if (MATCH(methodName(t, target), "abs")) {
if (MATCH(methodSpec(t, target), "(I)I")) {
frame->pushInt(c->abs(4, frame->popInt()));
return true;
} else if (MATCH(methodSpec(t, target), "(J)J")) {
frame->pushLong(c->abs(8, frame->popLong()));
return true;
} else if (MATCH(methodSpec(t, target), "(F)F")) {
frame->pushInt(c->fabs(4, frame->popInt()));
return true;
}
}
} else if (UNLIKELY(MATCH(className, "sun/misc/Unsafe"))) {
Compiler* c = frame->c;
if (MATCH(methodName(t, target), "getByte")
and MATCH(methodSpec(t, target), "(J)B"))
{
Compiler::Operand* address = popLongAddress(frame);
frame->popObject();
frame->pushInt
(c->load
(1, 1, c->memory(address, Compiler::IntegerType, 0, 0, 1),
TargetBytesPerWord));
return true;
} else if (MATCH(methodName(t, target), "putByte")
and MATCH(methodSpec(t, target), "(JB)V"))
{
Compiler::Operand* value = frame->popInt();
Compiler::Operand* address = popLongAddress(frame);
frame->popObject();
c->store
(TargetBytesPerWord, value, 1, c->memory
(address, Compiler::IntegerType, 0, 0, 1));
return true;
} else if ((MATCH(methodName(t, target), "getShort")
and MATCH(methodSpec(t, target), "(J)S"))
or (MATCH(methodName(t, target), "getChar")
and MATCH(methodSpec(t, target), "(J)C")))
{
Compiler::Operand* address = popLongAddress(frame);
frame->popObject();
frame->pushInt
(c->load
(2, 2, c->memory(address, Compiler::IntegerType, 0, 0, 1),
TargetBytesPerWord));
return true;
} else if ((MATCH(methodName(t, target), "putShort")
and MATCH(methodSpec(t, target), "(JS)V"))
or (MATCH(methodName(t, target), "putChar")
and MATCH(methodSpec(t, target), "(JC)V")))
{
Compiler::Operand* value = frame->popInt();
Compiler::Operand* address = popLongAddress(frame);
frame->popObject();
c->store
(TargetBytesPerWord, value, 2, c->memory
(address, Compiler::IntegerType, 0, 0, 1));
return true;
} else if ((MATCH(methodName(t, target), "getInt")
and MATCH(methodSpec(t, target), "(J)I"))
or (MATCH(methodName(t, target), "getFloat")
and MATCH(methodSpec(t, target), "(J)F")))
{
Compiler::Operand* address = popLongAddress(frame);
frame->popObject();
frame->pushInt
(c->load
(4, 4, c->memory
(address, MATCH(methodName(t, target), "getInt")
? Compiler::IntegerType : Compiler::FloatType, 0, 0, 1),
TargetBytesPerWord));
return true;
} else if ((MATCH(methodName(t, target), "putInt")
and MATCH(methodSpec(t, target), "(JI)V"))
or (MATCH(methodName(t, target), "putFloat")
and MATCH(methodSpec(t, target), "(JF)V")))
{
Compiler::Operand* value = frame->popInt();
Compiler::Operand* address = popLongAddress(frame);
frame->popObject();
c->store
(TargetBytesPerWord, value, 4, c->memory
(address, MATCH(methodName(t, target), "putInt")
? Compiler::IntegerType : Compiler::FloatType, 0, 0, 1));
return true;
} else if ((MATCH(methodName(t, target), "getLong")
and MATCH(methodSpec(t, target), "(J)J"))
or (MATCH(methodName(t, target), "getDouble")
and MATCH(methodSpec(t, target), "(J)D")))
{
Compiler::Operand* address = popLongAddress(frame);
frame->popObject();
frame->pushLong
(c->load
(8, 8, c->memory
(address, MATCH(methodName(t, target), "getLong")
? Compiler::IntegerType : Compiler::FloatType, 0, 0, 1),
8));
return true;
} else if ((MATCH(methodName(t, target), "putLong")
and MATCH(methodSpec(t, target), "(JJ)V"))
or (MATCH(methodName(t, target), "putDouble")
and MATCH(methodSpec(t, target), "(JD)V")))
{
Compiler::Operand* value = frame->popLong();
Compiler::Operand* address = popLongAddress(frame);
frame->popObject();
c->store
(8, value, 8, c->memory
(address, MATCH(methodName(t, target), "putLong")
? Compiler::IntegerType : Compiler::FloatType, 0, 0, 1));
return true;
} else if (MATCH(methodName(t, target), "getAddress")
and MATCH(methodSpec(t, target), "(J)J"))
{
Compiler::Operand* address = popLongAddress(frame);
frame->popObject();
frame->pushLong
(c->load
(TargetBytesPerWord, TargetBytesPerWord,
c->memory(address, Compiler::AddressType, 0, 0, 1), 8));
return true;
} else if (MATCH(methodName(t, target), "putAddress")
and MATCH(methodSpec(t, target), "(JJ)V"))
{
Compiler::Operand* value = frame->popLong();
Compiler::Operand* address = popLongAddress(frame);
frame->popObject();
c->store
(8, value, TargetBytesPerWord, c->memory
(address, Compiler::AddressType, 0, 0, 1));
return true;
}
}
return false;
}
unsigned
targetFieldOffset(Context* context, object field)
{
if (context->bootContext) {
return context->bootContext->resolver->fieldOffset(context->thread, field);
} else {
return fieldOffset(context->thread, field);
}
}
void
compile(MyThread* t, Frame* initialFrame, unsigned ip,
int exceptionHandlerStart)
{
THREAD_RUNTIME_ARRAY(t, uint8_t, stackMap,
codeMaxStack(t, methodCode(t, initialFrame->context->method)));
Frame myFrame(initialFrame, RUNTIME_ARRAY_BODY(stackMap));
Frame* frame = &myFrame;
Compiler* c = frame->c;
Context* context = frame->context;
object code = methodCode(t, context->method);
PROTECT(t, code);
while (ip < codeLength(t, code)) {
if (context->visitTable[ip] ++) {
// we've already visited this part of the code
frame->visitLogicalIp(ip);
return;
}
frame->startLogicalIp(ip);
if (exceptionHandlerStart >= 0) {
c->initLocalsFromLogicalIp(exceptionHandlerStart);
exceptionHandlerStart = -1;
frame->pushObject();
c->call
(c->constant(getThunk(t, gcIfNecessaryThunk), Compiler::AddressType),
0,
frame->trace(0, 0),
0,
Compiler::VoidType,
1, c->register_(t->arch->thread()));
}
// fprintf(stderr, "ip: %d map: %ld\n", ip, *(frame->map));
unsigned instruction = codeBody(t, code, ip++);
switch (instruction) {
case aaload:
case baload:
case caload:
case daload:
case faload:
case iaload:
case laload:
case saload: {
Compiler::Operand* index = frame->popInt();
Compiler::Operand* array = frame->popObject();
if (inTryBlock(t, code, ip - 1)) {
c->saveLocals();
frame->trace(0, 0);
}
if (CheckArrayBounds) {
c->checkBounds(array, TargetArrayLength, index, aioobThunk(t));
}
switch (instruction) {
case aaload:
frame->pushObject
(c->load
(TargetBytesPerWord, TargetBytesPerWord, c->memory
(array, Compiler::ObjectType, TargetArrayBody, index,
TargetBytesPerWord),
TargetBytesPerWord));
break;
case faload:
frame->pushInt
(c->load
(4, 4, c->memory
(array, Compiler::FloatType, TargetArrayBody, index, 4),
TargetBytesPerWord));
break;
case iaload:
frame->pushInt
(c->load
(4, 4, c->memory
(array, Compiler::IntegerType, TargetArrayBody, index, 4),
TargetBytesPerWord));
break;
case baload:
frame->pushInt
(c->load
(1, 1, c->memory
(array, Compiler::IntegerType, TargetArrayBody, index, 1),
TargetBytesPerWord));
break;
case caload:
frame->pushInt
(c->loadz
(2, 2, c->memory
(array, Compiler::IntegerType, TargetArrayBody, index, 2),
TargetBytesPerWord));
break;
case daload:
frame->pushLong
(c->load
(8, 8, c->memory
(array, Compiler::FloatType, TargetArrayBody, index, 8), 8));
break;
case laload:
frame->pushLong
(c->load
(8, 8, c->memory
(array, Compiler::IntegerType, TargetArrayBody, index, 8), 8));
break;
case saload:
frame->pushInt
(c->load
(2, 2, c->memory
(array, Compiler::IntegerType, TargetArrayBody, index, 2),
TargetBytesPerWord));
break;
}
} break;
case aastore:
case bastore:
case castore:
case dastore:
case fastore:
case iastore:
case lastore:
case sastore: {
Compiler::Operand* value;
if (instruction == dastore or instruction == lastore) {
value = frame->popLong();
} else if (instruction == aastore) {
value = frame->popObject();
} else {
value = frame->popInt();
}
Compiler::Operand* index = frame->popInt();
Compiler::Operand* array = frame->popObject();
if (inTryBlock(t, code, ip - 1)) {
c->saveLocals();
frame->trace(0, 0);
}
if (CheckArrayBounds) {
c->checkBounds(array, TargetArrayLength, index, aioobThunk(t));
}
switch (instruction) {
case aastore: {
c->call
(c->constant(getThunk(t, setMaybeNullThunk), Compiler::AddressType),
0,
frame->trace(0, 0),
0,
Compiler::VoidType,
4, c->register_(t->arch->thread()), array,
c->add
(4, c->constant(TargetArrayBody, Compiler::IntegerType),
c->shl
(4, c->constant(log(TargetBytesPerWord), Compiler::IntegerType),
index)),
value);
} break;
case fastore:
c->store
(TargetBytesPerWord, value, 4, c->memory
(array, Compiler::FloatType, TargetArrayBody, index, 4));
break;
case iastore:
c->store
(TargetBytesPerWord, value, 4, c->memory
(array, Compiler::IntegerType, TargetArrayBody, index, 4));
break;
case bastore:
c->store
(TargetBytesPerWord, value, 1, c->memory
(array, Compiler::IntegerType, TargetArrayBody, index, 1));
break;
case castore:
case sastore:
c->store
(TargetBytesPerWord, value, 2, c->memory
(array, Compiler::IntegerType, TargetArrayBody, index, 2));
break;
case dastore:
c->store
(8, value, 8, c->memory
(array, Compiler::FloatType, TargetArrayBody, index, 8));
break;
case lastore:
c->store
(8, value, 8, c->memory
(array, Compiler::IntegerType, TargetArrayBody, index, 8));
break;
}
} break;
case aconst_null:
frame->pushObject(c->constant(0, Compiler::ObjectType));
break;
case aload:
frame->loadObject(codeBody(t, code, ip++));
break;
case aload_0:
frame->loadObject(0);
break;
case aload_1:
frame->loadObject(1);
break;
case aload_2:
frame->loadObject(2);
break;
case aload_3:
frame->loadObject(3);
break;
case anewarray: {
uint16_t index = codeReadInt16(t, code, ip);
object reference = singletonObject
(t, codePool(t, methodCode(t, context->method)), index - 1);
PROTECT(t, reference);
object class_ = resolveClassInPool(t, context->method, index - 1, false);
Compiler::Operand* length = frame->popInt();
object argument;
Thunk thunk;
if (LIKELY(class_)) {
argument = class_;
thunk = makeBlankObjectArrayThunk;
} else {
argument = makePair(t, context->method, reference);
thunk = makeBlankObjectArrayFromReferenceThunk;
}
frame->pushObject
(c->call
(c->constant(getThunk(t, thunk), Compiler::AddressType),
0,
frame->trace(0, 0),
TargetBytesPerWord,
Compiler::ObjectType,
3, c->register_(t->arch->thread()), frame->append(argument),
length));
} break;
case areturn: {
handleExit(t, frame);
c->return_(TargetBytesPerWord, frame->popObject());
} return;
case arraylength: {
frame->pushInt
(c->load
(TargetBytesPerWord, TargetBytesPerWord,
c->memory