Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP
tree: a8bd364cf9
Fetching contributors…

Cannot retrieve contributors at this time

7546 lines (6435 sloc) 226.12 kB
/*
* mini.c: The new Mono code generator.
*
* Authors:
* Paolo Molaro (lupus@ximian.com)
* Dietmar Maurer (dietmar@ximian.com)
*
* Copyright 2002-2003 Ximian, Inc.
* Copyright 2003-2010 Novell, Inc.
* Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
*/
#define MONO_LLVM_IN_MINI 1
#include <config.h>
#include <signal.h>
#ifdef HAVE_ALLOCA_H
#include <alloca.h>
#endif
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#include <math.h>
#ifdef HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
#include <mono/utils/memcheck.h>
#include <mono/metadata/assembly.h>
#include <mono/metadata/loader.h>
#include <mono/metadata/tabledefs.h>
#include <mono/metadata/class.h>
#include <mono/metadata/object.h>
#include <mono/metadata/tokentype.h>
#include <mono/metadata/tabledefs.h>
#include <mono/metadata/threads.h>
#include <mono/metadata/appdomain.h>
#include <mono/metadata/debug-helpers.h>
#include <mono/io-layer/io-layer.h>
#include "mono/metadata/profiler.h"
#include <mono/metadata/profiler-private.h>
#include <mono/metadata/mono-config.h>
#include <mono/metadata/environment.h>
#include <mono/metadata/mono-debug.h>
#include <mono/metadata/gc-internal.h>
#include <mono/metadata/threads-types.h>
#include <mono/metadata/verify.h>
#include <mono/metadata/verify-internals.h>
#include <mono/metadata/mempool-internals.h>
#include <mono/metadata/attach.h>
#include <mono/metadata/runtime.h>
#include <mono/utils/mono-math.h>
#include <mono/utils/mono-compiler.h>
#include <mono/utils/mono-counters.h>
#include <mono/utils/mono-logger-internal.h>
#include <mono/utils/mono-mmap.h>
#include <mono/utils/mono-tls.h>
#include <mono/utils/dtrace.h>
#include "mini.h"
#include "mini-llvm.h"
#include "tasklets.h"
#include <string.h>
#include <ctype.h>
#include "trace.h"
#include "version.h"
#include "jit-icalls.h"
#include "debug-mini.h"
#include "mini-gc.h"
#include "debugger-agent.h"
static gpointer mono_jit_compile_method_with_opt (MonoMethod *method, guint32 opt, MonoException **ex);
static guint32 default_opt = 0;
static gboolean default_opt_set = FALSE;
MonoNativeTlsKey mono_jit_tls_id;
#ifdef MONO_HAVE_FAST_TLS
MONO_FAST_TLS_DECLARE(mono_jit_tls);
#endif
#ifndef MONO_ARCH_MONITOR_ENTER_ADJUSTMENT
#define MONO_ARCH_MONITOR_ENTER_ADJUSTMENT 1
#endif
MonoTraceSpec *mono_jit_trace_calls = NULL;
gboolean mono_compile_aot = FALSE;
/* If this is set, no code is generated dynamically, everything is taken from AOT files */
gboolean mono_aot_only = FALSE;
/* Whenever to use IMT */
#ifdef MONO_ARCH_HAVE_IMT
gboolean mono_use_imt = TRUE;
#else
gboolean mono_use_imt = FALSE;
#endif
MonoMethodDesc *mono_inject_async_exc_method = NULL;
int mono_inject_async_exc_pos;
MonoMethodDesc *mono_break_at_bb_method = NULL;
int mono_break_at_bb_bb_num;
gboolean mono_do_x86_stack_align = TRUE;
const char *mono_build_date;
gboolean mono_do_signal_chaining;
static gboolean mono_using_xdebug;
static int mini_verbose = 0;
/*
* This flag controls whenever the runtime uses LLVM for JIT compilation, and whenever
* it can load AOT code compiled by LLVM.
*/
gboolean mono_use_llvm = FALSE;
#define mono_jit_lock() EnterCriticalSection (&jit_mutex)
#define mono_jit_unlock() LeaveCriticalSection (&jit_mutex)
static CRITICAL_SECTION jit_mutex;
static MonoCodeManager *global_codeman = NULL;
static GHashTable *jit_icall_name_hash = NULL;
static MonoDebugOptions debug_options;
#ifdef VALGRIND_JIT_REGISTER_MAP
static int valgrind_register = 0;
#endif
/*
* Table written to by the debugger with a 1-based index into the
* mono_breakpoint_info table, which contains changes made to
* the JIT instructions by the debugger.
*/
gssize
mono_breakpoint_info_index [MONO_BREAKPOINT_ARRAY_SIZE];
/* Whenever to check for pending exceptions in managed-to-native wrappers */
gboolean check_for_pending_exc = TRUE;
/* Whenever to disable passing/returning small valuetypes in registers for managed methods */
gboolean disable_vtypes_in_regs = FALSE;
gboolean mono_dont_free_global_codeman;
gpointer
mono_realloc_native_code (MonoCompile *cfg)
{
#if defined(__default_codegen__)
return g_realloc (cfg->native_code, cfg->code_size);
#elif defined(__native_client_codegen__)
guint old_padding;
gpointer native_code;
guint alignment_check;
/* Save the old alignment offset so we can re-align after the realloc. */
old_padding = (guint)(cfg->native_code - cfg->native_code_alloc);
cfg->native_code_alloc = g_realloc ( cfg->native_code_alloc,
cfg->code_size + kNaClAlignment );
/* Align native_code to next nearest kNaClAlignment byte. */
native_code = (guint)cfg->native_code_alloc + kNaClAlignment;
native_code = (guint)native_code & ~kNaClAlignmentMask;
/* Shift the data to be 32-byte aligned again. */
memmove (native_code, cfg->native_code_alloc + old_padding, cfg->code_size);
alignment_check = (guint)native_code & kNaClAlignmentMask;
g_assert (alignment_check == 0);
return native_code;
#else
g_assert_not_reached ();
return cfg->native_code;
#endif
}
#ifdef __native_client_codegen__
/* Prevent instructions from straddling a 32-byte alignment boundary. */
/* Instructions longer than 32 bytes must be aligned internally. */
/* IN: pcode, instlen */
/* OUT: pcode */
void mono_nacl_align_inst(guint8 **pcode, int instlen) {
int space_in_block;
space_in_block = kNaClAlignment - ((uintptr_t)(*pcode) & kNaClAlignmentMask);
if (G_UNLIKELY (instlen >= kNaClAlignment)) {
g_assert_not_reached();
} else if (instlen > space_in_block) {
*pcode = mono_arch_nacl_pad(*pcode, space_in_block);
}
}
/* Move emitted call sequence to the end of a kNaClAlignment-byte block. */
/* IN: start pointer to start of call sequence */
/* IN: pcode pointer to end of call sequence (current "IP") */
/* OUT: start pointer to the start of the call sequence after padding */
/* OUT: pcode pointer to the end of the call sequence after padding */
void mono_nacl_align_call(guint8 **start, guint8 **pcode) {
const size_t MAX_NACL_CALL_LENGTH = kNaClAlignment;
guint8 copy_of_call[MAX_NACL_CALL_LENGTH];
guint8 *temp;
const size_t length = (size_t)((*pcode)-(*start));
g_assert(length < MAX_NACL_CALL_LENGTH);
memcpy(copy_of_call, *start, length);
temp = mono_nacl_pad_call(*start, (guint8)length);
memcpy(temp, copy_of_call, length);
(*start) = temp;
(*pcode) = temp + length;
}
/* mono_nacl_pad_call(): Insert padding for Native Client call instructions */
/* code pointer to buffer for emitting code */
/* ilength length of call instruction */
guint8 *mono_nacl_pad_call(guint8 *code, guint8 ilength) {
int freeSpaceInBlock = kNaClAlignment - ((uintptr_t)code & kNaClAlignmentMask);
int padding = freeSpaceInBlock - ilength;
if (padding < 0) {
/* There isn't enough space in this block for the instruction. */
/* Fill this block and start a new one. */
code = mono_arch_nacl_pad(code, freeSpaceInBlock);
freeSpaceInBlock = kNaClAlignment;
padding = freeSpaceInBlock - ilength;
}
g_assert(ilength > 0);
g_assert(padding >= 0);
g_assert(padding < kNaClAlignment);
if (0 == padding) return code;
return mono_arch_nacl_pad(code, padding);
}
guint8 *mono_nacl_align(guint8 *code) {
int padding = kNaClAlignment - ((uintptr_t)code & kNaClAlignmentMask);
if (padding != kNaClAlignment) code = mono_arch_nacl_pad(code, padding);
return code;
}
void mono_nacl_fix_patches(const guint8 *code, MonoJumpInfo *ji)
{
MonoJumpInfo *patch_info;
for (patch_info = ji; patch_info; patch_info = patch_info->next) {
unsigned char *ip = patch_info->ip.i + code;
ip = mono_arch_nacl_skip_nops(ip);
patch_info->ip.i = ip - code;
}
}
#endif /* __native_client_codegen__ */
gboolean
mono_running_on_valgrind (void)
{
if (RUNNING_ON_VALGRIND){
#ifdef VALGRIND_JIT_REGISTER_MAP
valgrind_register = TRUE;
#endif
return TRUE;
} else
return FALSE;
}
typedef struct {
MonoExceptionClause *clause;
MonoBasicBlock *basic_block;
int start_offset;
} TryBlockHole;
typedef struct {
void *ip;
MonoMethod *method;
} FindTrampUserData;
static void
find_tramp (gpointer key, gpointer value, gpointer user_data)
{
FindTrampUserData *ud = (FindTrampUserData*)user_data;
if (value == ud->ip)
ud->method = (MonoMethod*)key;
}
/* debug function */
G_GNUC_UNUSED static char*
get_method_from_ip (void *ip)
{
MonoJitInfo *ji;
char *method;
char *res;
MonoDomain *domain = mono_domain_get ();
MonoDebugSourceLocation *location;
FindTrampUserData user_data;
if (!domain)
domain = mono_get_root_domain ();
ji = mono_jit_info_table_find (domain, ip);
if (!ji) {
user_data.ip = ip;
user_data.method = NULL;
mono_domain_lock (domain);
g_hash_table_foreach (domain_jit_info (domain)->jit_trampoline_hash, find_tramp, &user_data);
mono_domain_unlock (domain);
if (user_data.method) {
char *mname = mono_method_full_name (user_data.method, TRUE);
res = g_strdup_printf ("<%p - JIT trampoline for %s>", ip, mname);
g_free (mname);
return res;
}
else
return NULL;
}
method = mono_method_full_name (ji->method, TRUE);
/* FIXME: unused ? */
location = mono_debug_lookup_source_location (ji->method, (guint32)((guint8*)ip - (guint8*)ji->code_start), domain);
res = g_strdup_printf (" %s + 0x%x (%p %p) [%p - %s]", method, (int)((char*)ip - (char*)ji->code_start), ji->code_start, (char*)ji->code_start + ji->code_size, domain, domain->friendly_name);
mono_debug_free_source_location (location);
g_free (method);
return res;
}
/**
* mono_pmip:
* @ip: an instruction pointer address
*
* This method is used from a debugger to get the name of the
* method at address @ip. This routine is typically invoked from
* a debugger like this:
*
* (gdb) print mono_pmip ($pc)
*
* Returns: the name of the method at address @ip.
*/
G_GNUC_UNUSED char *
mono_pmip (void *ip)
{
return get_method_from_ip (ip);
}
/**
* mono_print_method_from_ip
* @ip: an instruction pointer address
*
* This method is used from a debugger to get the name of the
* method at address @ip.
*
* This prints the name of the method at address @ip in the standard
* output. Unlike mono_pmip which returns a string, this routine
* prints the value on the standard output.
*/
#ifdef __GNUC__
/* Prevent the linker from optimizing this away in embedding setups to help debugging */
__attribute__((used))
#endif
void
mono_print_method_from_ip (void *ip)
{
MonoJitInfo *ji;
char *method;
MonoDebugSourceLocation *source;
MonoDomain *domain = mono_domain_get ();
MonoDomain *target_domain = mono_domain_get ();
FindTrampUserData user_data;
MonoGenericSharingContext*gsctx;
const char *shared_type;
ji = mini_jit_info_table_find (domain, ip, &target_domain);
if (!ji) {
user_data.ip = ip;
user_data.method = NULL;
mono_domain_lock (domain);
g_hash_table_foreach (domain_jit_info (domain)->jit_trampoline_hash, find_tramp, &user_data);
mono_domain_unlock (domain);
if (user_data.method) {
char *mname = mono_method_full_name (user_data.method, TRUE);
printf ("IP %p is a JIT trampoline for %s\n", ip, mname);
g_free (mname);
}
else
g_print ("No method at %p\n", ip);
fflush (stdout);
return;
}
method = mono_method_full_name (ji->method, TRUE);
source = mono_debug_lookup_source_location (ji->method, (guint32)((guint8*)ip - (guint8*)ji->code_start), target_domain);
gsctx = mono_jit_info_get_generic_sharing_context (ji);
shared_type = "";
if (gsctx) {
if (gsctx->var_is_vt || gsctx->mvar_is_vt)
shared_type = "gsharedvt ";
else
shared_type = "gshared ";
}
g_print ("IP %p at offset 0x%x of %smethod %s (%p %p)[domain %p - %s]\n", ip, (int)((char*)ip - (char*)ji->code_start), shared_type, method, ji->code_start, (char*)ji->code_start + ji->code_size, target_domain, target_domain->friendly_name);
if (source)
g_print ("%s:%d\n", source->source_file, source->row);
fflush (stdout);
mono_debug_free_source_location (source);
g_free (method);
}
/*
* mono_method_same_domain:
*
* Determine whenever two compiled methods are in the same domain, thus
* the address of the callee can be embedded in the caller.
*/
gboolean mono_method_same_domain (MonoJitInfo *caller, MonoJitInfo *callee)
{
if (!caller || !callee)
return FALSE;
/*
* If the call was made from domain-neutral to domain-specific
* code, we can't patch the call site.
*/
if (caller->domain_neutral && !callee->domain_neutral)
return FALSE;
if ((caller->method->klass == mono_defaults.appdomain_class) &&
(strstr (caller->method->name, "InvokeInDomain"))) {
/* The InvokeInDomain methods change the current appdomain */
return FALSE;
}
return TRUE;
}
/*
* mono_global_codeman_reserve:
*
* Allocate code memory from the global code manager.
*/
void *mono_global_codeman_reserve (int size)
{
void *ptr;
if (mono_aot_only)
g_error ("Attempting to allocate from the global code manager while running with --aot-only.\n");
if (!global_codeman) {
/* This can happen during startup */
global_codeman = mono_code_manager_new ();
return mono_code_manager_reserve (global_codeman, size);
}
else {
mono_jit_lock ();
ptr = mono_code_manager_reserve (global_codeman, size);
mono_jit_unlock ();
return ptr;
}
}
#if defined(__native_client_codegen__) && defined(__native_client__)
/* Given the temporary buffer (allocated by mono_global_codeman_reserve) into
* which we are generating code, return a pointer to the destination in the
* dynamic code segment into which the code will be copied when
* mono_global_codeman_commit is called.
* LOCKING: Acquires the jit lock.
*/
void*
nacl_global_codeman_get_dest (void *data)
{
void *dest;
mono_jit_lock ();
dest = nacl_code_manager_get_code_dest (global_codeman, data);
mono_jit_unlock ();
return dest;
}
void
mono_global_codeman_commit (void *data, int size, int newsize)
{
mono_jit_lock ();
mono_code_manager_commit (global_codeman, data, size, newsize);
mono_jit_unlock ();
}
/*
* Convenience function which calls mono_global_codeman_commit to validate and
* copy the code. The caller sets *buf_base and *buf_size to the start and size
* of the buffer (allocated by mono_global_codeman_reserve), and *code_end to
* the byte after the last instruction byte. On return, *buf_base will point to
* the start of the copied in the code segment, and *code_end will point after
* the end of the copied code.
*/
void
nacl_global_codeman_validate (guint8 **buf_base, int buf_size, guint8 **code_end)
{
guint8 *tmp = nacl_global_codeman_get_dest (*buf_base);
mono_global_codeman_commit (*buf_base, buf_size, *code_end - *buf_base);
*code_end = tmp + (*code_end - *buf_base);
*buf_base = tmp;
}
#else
/* no-op versions of Native Client functions */
void*
nacl_global_codeman_get_dest (void *data)
{
return data;
}
void
mono_global_codeman_commit (void *data, int size, int newsize)
{
}
void
nacl_global_codeman_validate (guint8 **buf_base, int buf_size, guint8 **code_end)
{
}
#endif /* __native_client__ */
/**
* mono_create_unwind_op:
*
* Create an unwind op with the given parameters.
*/
MonoUnwindOp*
mono_create_unwind_op (int when, int tag, int reg, int val)
{
MonoUnwindOp *op = g_new0 (MonoUnwindOp, 1);
op->op = tag;
op->reg = reg;
op->val = val;
op->when = when;
return op;
}
/**
* mono_emit_unwind_op:
*
* Add an unwind op with the given parameters for the list of unwind ops stored in
* cfg->unwind_ops.
*/
void
mono_emit_unwind_op (MonoCompile *cfg, int when, int tag, int reg, int val)
{
MonoUnwindOp *op = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoUnwindOp));
op->op = tag;
op->reg = reg;
op->val = val;
op->when = when;
cfg->unwind_ops = g_slist_append_mempool (cfg->mempool, cfg->unwind_ops, op);
}
MonoJumpInfoToken *
mono_jump_info_token_new2 (MonoMemPool *mp, MonoImage *image, guint32 token, MonoGenericContext *context)
{
MonoJumpInfoToken *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoToken));
res->image = image;
res->token = token;
res->has_context = context != NULL;
if (context)
memcpy (&res->context, context, sizeof (MonoGenericContext));
return res;
}
MonoJumpInfoToken *
mono_jump_info_token_new (MonoMemPool *mp, MonoImage *image, guint32 token)
{
return mono_jump_info_token_new2 (mp, image, token, NULL);
}
/*
* mono_tramp_info_create:
*
* Create a MonoTrampInfo structure from the arguments. This function assumes ownership
* of NAME, JI, and UNWIND_OPS.
*/
MonoTrampInfo*
mono_tramp_info_create (const char *name, guint8 *code, guint32 code_size, MonoJumpInfo *ji, GSList *unwind_ops)
{
MonoTrampInfo *info = g_new0 (MonoTrampInfo, 1);
info->name = (char*)name;
info->code = code;
info->code_size = code_size;
info->ji = ji;
info->unwind_ops = unwind_ops;
return info;
}
void
mono_tramp_info_free (MonoTrampInfo *info)
{
GSList *l;
g_free (info->name);
// FIXME: ji
for (l = info->unwind_ops; l; l = l->next)
g_free (l->data);
g_slist_free (info->unwind_ops);
g_free (info);
}
G_GNUC_UNUSED static void
break_count (void)
{
}
/*
* Runtime debugging tool, use if (debug_count ()) <x> else <y> to do <x> the first COUNT times, then do <y> afterwards.
* Set a breakpoint in break_count () to break the last time <x> is done.
*/
G_GNUC_UNUSED gboolean
mono_debug_count (void)
{
static int count = 0;
count ++;
if (!getenv ("COUNT"))
return TRUE;
if (count == atoi (getenv ("COUNT"))) {
break_count ();
}
if (count > atoi (getenv ("COUNT"))) {
return FALSE;
}
return TRUE;
}
#define MONO_INIT_VARINFO(vi,id) do { \
(vi)->range.first_use.pos.bid = 0xffff; \
(vi)->reg = -1; \
(vi)->idx = (id); \
} while (0)
/**
* mono_unlink_bblock:
*
* Unlink two basic blocks.
*/
void
mono_unlink_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
{
int i, pos;
gboolean found;
found = FALSE;
for (i = 0; i < from->out_count; ++i) {
if (to == from->out_bb [i]) {
found = TRUE;
break;
}
}
if (found) {
pos = 0;
for (i = 0; i < from->out_count; ++i) {
if (from->out_bb [i] != to)
from->out_bb [pos ++] = from->out_bb [i];
}
g_assert (pos == from->out_count - 1);
from->out_count--;
}
found = FALSE;
for (i = 0; i < to->in_count; ++i) {
if (from == to->in_bb [i]) {
found = TRUE;
break;
}
}
if (found) {
pos = 0;
for (i = 0; i < to->in_count; ++i) {
if (to->in_bb [i] != from)
to->in_bb [pos ++] = to->in_bb [i];
}
g_assert (pos == to->in_count - 1);
to->in_count--;
}
}
/*
* mono_bblocks_linked:
*
* Return whenever BB1 and BB2 are linked in the CFG.
*/
gboolean
mono_bblocks_linked (MonoBasicBlock *bb1, MonoBasicBlock *bb2)
{
int i;
for (i = 0; i < bb1->out_count; ++i) {
if (bb1->out_bb [i] == bb2)
return TRUE;
}
return FALSE;
}
static int
mono_find_block_region_notry (MonoCompile *cfg, int offset)
{
MonoMethodHeader *header = cfg->header;
MonoExceptionClause *clause;
int i;
for (i = 0; i < header->num_clauses; ++i) {
clause = &header->clauses [i];
if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
(offset < (clause->handler_offset)))
return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
else
return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
}
}
return -1;
}
/*
* mono_get_block_region_notry:
*
* Return the region corresponding to REGION, ignoring try clauses nested inside
* finally clauses.
*/
int
mono_get_block_region_notry (MonoCompile *cfg, int region)
{
if ((region & (0xf << 4)) == MONO_REGION_TRY) {
MonoMethodHeader *header = cfg->header;
/*
* This can happen if a try clause is nested inside a finally clause.
*/
int clause_index = (region >> 8) - 1;
g_assert (clause_index >= 0 && clause_index < header->num_clauses);
region = mono_find_block_region_notry (cfg, header->clauses [clause_index].try_offset);
}
return region;
}
MonoInst *
mono_find_spvar_for_region (MonoCompile *cfg, int region)
{
region = mono_get_block_region_notry (cfg, region);
return g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
}
static void
df_visit (MonoBasicBlock *start, int *dfn, MonoBasicBlock **array)
{
int i;
array [*dfn] = start;
/* g_print ("visit %d at %p (BB%ld)\n", *dfn, start->cil_code, start->block_num); */
for (i = 0; i < start->out_count; ++i) {
if (start->out_bb [i]->dfn)
continue;
(*dfn)++;
start->out_bb [i]->dfn = *dfn;
start->out_bb [i]->df_parent = start;
array [*dfn] = start->out_bb [i];
df_visit (start->out_bb [i], dfn, array);
}
}
guint32
mono_reverse_branch_op (guint32 opcode)
{
static const int reverse_map [] = {
CEE_BNE_UN, CEE_BLT, CEE_BLE, CEE_BGT, CEE_BGE,
CEE_BEQ, CEE_BLT_UN, CEE_BLE_UN, CEE_BGT_UN, CEE_BGE_UN
};
static const int reverse_fmap [] = {
OP_FBNE_UN, OP_FBLT, OP_FBLE, OP_FBGT, OP_FBGE,
OP_FBEQ, OP_FBLT_UN, OP_FBLE_UN, OP_FBGT_UN, OP_FBGE_UN
};
static const int reverse_lmap [] = {
OP_LBNE_UN, OP_LBLT, OP_LBLE, OP_LBGT, OP_LBGE,
OP_LBEQ, OP_LBLT_UN, OP_LBLE_UN, OP_LBGT_UN, OP_LBGE_UN
};
static const int reverse_imap [] = {
OP_IBNE_UN, OP_IBLT, OP_IBLE, OP_IBGT, OP_IBGE,
OP_IBEQ, OP_IBLT_UN, OP_IBLE_UN, OP_IBGT_UN, OP_IBGE_UN
};
if (opcode >= CEE_BEQ && opcode <= CEE_BLT_UN) {
opcode = reverse_map [opcode - CEE_BEQ];
} else if (opcode >= OP_FBEQ && opcode <= OP_FBLT_UN) {
opcode = reverse_fmap [opcode - OP_FBEQ];
} else if (opcode >= OP_LBEQ && opcode <= OP_LBLT_UN) {
opcode = reverse_lmap [opcode - OP_LBEQ];
} else if (opcode >= OP_IBEQ && opcode <= OP_IBLT_UN) {
opcode = reverse_imap [opcode - OP_IBEQ];
} else
g_assert_not_reached ();
return opcode;
}
guint
mono_type_to_store_membase (MonoCompile *cfg, MonoType *type)
{
if (type->byref)
return OP_STORE_MEMBASE_REG;
handle_enum:
switch (type->type) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
case MONO_TYPE_BOOLEAN:
return OP_STOREI1_MEMBASE_REG;
case MONO_TYPE_I2:
case MONO_TYPE_U2:
case MONO_TYPE_CHAR:
return OP_STOREI2_MEMBASE_REG;
case MONO_TYPE_I4:
case MONO_TYPE_U4:
return OP_STOREI4_MEMBASE_REG;
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_PTR:
case MONO_TYPE_FNPTR:
return OP_STORE_MEMBASE_REG;
case MONO_TYPE_CLASS:
case MONO_TYPE_STRING:
case MONO_TYPE_OBJECT:
case MONO_TYPE_SZARRAY:
case MONO_TYPE_ARRAY:
return OP_STORE_MEMBASE_REG;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
return OP_STOREI8_MEMBASE_REG;
case MONO_TYPE_R4:
return OP_STORER4_MEMBASE_REG;
case MONO_TYPE_R8:
return OP_STORER8_MEMBASE_REG;
case MONO_TYPE_VALUETYPE:
if (type->data.klass->enumtype) {
type = mono_class_enum_basetype (type->data.klass);
goto handle_enum;
}
if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
return OP_STOREX_MEMBASE;
return OP_STOREV_MEMBASE;
case MONO_TYPE_TYPEDBYREF:
return OP_STOREV_MEMBASE;
case MONO_TYPE_GENERICINST:
type = &type->data.generic_class->container_class->byval_arg;
goto handle_enum;
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
if (mini_type_var_is_vt (cfg, type))
return OP_STOREV_MEMBASE;
else
return OP_STORE_MEMBASE_REG;
default:
g_error ("unknown type 0x%02x in type_to_store_membase", type->type);
}
return -1;
}
guint
mono_type_to_load_membase (MonoCompile *cfg, MonoType *type)
{
if (type->byref)
return OP_LOAD_MEMBASE;
type = mono_type_get_underlying_type (type);
switch (type->type) {
case MONO_TYPE_I1:
return OP_LOADI1_MEMBASE;
case MONO_TYPE_U1:
case MONO_TYPE_BOOLEAN:
return OP_LOADU1_MEMBASE;
case MONO_TYPE_I2:
return OP_LOADI2_MEMBASE;
case MONO_TYPE_U2:
case MONO_TYPE_CHAR:
return OP_LOADU2_MEMBASE;
case MONO_TYPE_I4:
return OP_LOADI4_MEMBASE;
case MONO_TYPE_U4:
return OP_LOADU4_MEMBASE;
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_PTR:
case MONO_TYPE_FNPTR:
return OP_LOAD_MEMBASE;
case MONO_TYPE_CLASS:
case MONO_TYPE_STRING:
case MONO_TYPE_OBJECT:
case MONO_TYPE_SZARRAY:
case MONO_TYPE_ARRAY:
return OP_LOAD_MEMBASE;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
return OP_LOADI8_MEMBASE;
case MONO_TYPE_R4:
return OP_LOADR4_MEMBASE;
case MONO_TYPE_R8:
return OP_LOADR8_MEMBASE;
case MONO_TYPE_VALUETYPE:
if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
return OP_LOADX_MEMBASE;
case MONO_TYPE_TYPEDBYREF:
return OP_LOADV_MEMBASE;
case MONO_TYPE_GENERICINST:
if (mono_type_generic_inst_is_valuetype (type))
return OP_LOADV_MEMBASE;
else
return OP_LOAD_MEMBASE;
break;
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
g_assert (cfg->generic_sharing_context);
if (mini_type_var_is_vt (cfg, type))
return OP_LOADV_MEMBASE;
else
return OP_LOAD_MEMBASE;
default:
g_error ("unknown type 0x%02x in type_to_load_membase", type->type);
}
return -1;
}
static guint
mini_type_to_ldind (MonoCompile* cfg, MonoType *type)
{
if (cfg->generic_sharing_context && !type->byref) {
if (type->type == MONO_TYPE_VAR || type->type == MONO_TYPE_MVAR) {
if (mini_type_var_is_vt (cfg, type))
return CEE_LDOBJ;
else
return CEE_LDIND_REF;
}
}
return mono_type_to_ldind (type);
}
guint
mini_type_to_stind (MonoCompile* cfg, MonoType *type)
{
if (cfg->generic_sharing_context && !type->byref) {
if (type->type == MONO_TYPE_VAR || type->type == MONO_TYPE_MVAR) {
if (mini_type_var_is_vt (cfg, type))
return CEE_STOBJ;
else
return CEE_STIND_REF;
}
}
return mono_type_to_stind (type);
}
#ifndef DISABLE_JIT
int
mono_op_imm_to_op (int opcode)
{
switch (opcode) {
case OP_ADD_IMM:
#if SIZEOF_REGISTER == 4
return OP_IADD;
#else
return OP_LADD;
#endif
case OP_IADD_IMM:
return OP_IADD;
case OP_LADD_IMM:
return OP_LADD;
case OP_ISUB_IMM:
return OP_ISUB;
case OP_LSUB_IMM:
return OP_LSUB;
case OP_IMUL_IMM:
return OP_IMUL;
case OP_AND_IMM:
#if SIZEOF_REGISTER == 4
return OP_IAND;
#else
return OP_LAND;
#endif
case OP_OR_IMM:
#if SIZEOF_REGISTER == 4
return OP_IOR;
#else
return OP_LOR;
#endif
case OP_XOR_IMM:
#if SIZEOF_REGISTER == 4
return OP_IXOR;
#else
return OP_LXOR;
#endif
case OP_IAND_IMM:
return OP_IAND;
case OP_LAND_IMM:
return OP_LAND;
case OP_IOR_IMM:
return OP_IOR;
case OP_LOR_IMM:
return OP_LOR;
case OP_IXOR_IMM:
return OP_IXOR;
case OP_LXOR_IMM:
return OP_LXOR;
case OP_ISHL_IMM:
return OP_ISHL;
case OP_LSHL_IMM:
return OP_LSHL;
case OP_ISHR_IMM:
return OP_ISHR;
case OP_LSHR_IMM:
return OP_LSHR;
case OP_ISHR_UN_IMM:
return OP_ISHR_UN;
case OP_LSHR_UN_IMM:
return OP_LSHR_UN;
case OP_IDIV_IMM:
return OP_IDIV;
case OP_IDIV_UN_IMM:
return OP_IDIV_UN;
case OP_IREM_UN_IMM:
return OP_IREM_UN;
case OP_IREM_IMM:
return OP_IREM;
case OP_DIV_IMM:
#if SIZEOF_REGISTER == 4
return OP_IDIV;
#else
return OP_LDIV;
#endif
case OP_REM_IMM:
#if SIZEOF_REGISTER == 4
return OP_IREM;
#else
return OP_LREM;
#endif
case OP_ADDCC_IMM:
return OP_ADDCC;
case OP_ADC_IMM:
return OP_ADC;
case OP_SUBCC_IMM:
return OP_SUBCC;
case OP_SBB_IMM:
return OP_SBB;
case OP_IADC_IMM:
return OP_IADC;
case OP_ISBB_IMM:
return OP_ISBB;
case OP_COMPARE_IMM:
return OP_COMPARE;
case OP_ICOMPARE_IMM:
return OP_ICOMPARE;
case OP_LOCALLOC_IMM:
return OP_LOCALLOC;
default:
printf ("%s\n", mono_inst_name (opcode));
g_assert_not_reached ();
return -1;
}
}
/*
* mono_decompose_op_imm:
*
* Replace the OP_.._IMM INS with its non IMM variant.
*/
void
mono_decompose_op_imm (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins)
{
MonoInst *temp;
MONO_INST_NEW (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = mono_alloc_ireg (cfg);
mono_bblock_insert_before_ins (bb, ins, temp);
ins->opcode = mono_op_imm_to_op (ins->opcode);
if (ins->opcode == OP_LOCALLOC)
ins->sreg1 = temp->dreg;
else
ins->sreg2 = temp->dreg;
bb->max_vreg = MAX (bb->max_vreg, cfg->next_vreg);
}
#endif
static void
set_vreg_to_inst (MonoCompile *cfg, int vreg, MonoInst *inst)
{
if (vreg >= cfg->vreg_to_inst_len) {
MonoInst **tmp = cfg->vreg_to_inst;
int size = cfg->vreg_to_inst_len;
while (vreg >= cfg->vreg_to_inst_len)
cfg->vreg_to_inst_len = cfg->vreg_to_inst_len ? cfg->vreg_to_inst_len * 2 : 32;
cfg->vreg_to_inst = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * cfg->vreg_to_inst_len);
if (size)
memcpy (cfg->vreg_to_inst, tmp, size * sizeof (MonoInst*));
}
cfg->vreg_to_inst [vreg] = inst;
}
#define mono_type_is_long(type) (!(type)->byref && ((mono_type_get_underlying_type (type)->type == MONO_TYPE_I8) || (mono_type_get_underlying_type (type)->type == MONO_TYPE_U8)))
#define mono_type_is_float(type) (!(type)->byref && (((type)->type == MONO_TYPE_R8) || ((type)->type == MONO_TYPE_R4)))
#ifdef DISABLE_JIT
MonoInst*
mono_compile_create_var (MonoCompile *cfg, MonoType *type, int opcode)
{
return NULL;
}
#else
MonoInst*
mono_compile_create_var_for_vreg (MonoCompile *cfg, MonoType *type, int opcode, int vreg)
{
MonoInst *inst;
int num = cfg->num_varinfo;
gboolean regpair;
if ((num + 1) >= cfg->varinfo_count) {
int orig_count = cfg->varinfo_count;
cfg->varinfo_count = cfg->varinfo_count ? (cfg->varinfo_count * 2) : 64;
cfg->varinfo = (MonoInst **)g_realloc (cfg->varinfo, sizeof (MonoInst*) * cfg->varinfo_count);
cfg->vars = (MonoMethodVar *)g_realloc (cfg->vars, sizeof (MonoMethodVar) * cfg->varinfo_count);
memset (&cfg->vars [orig_count], 0, (cfg->varinfo_count - orig_count) * sizeof (MonoMethodVar));
}
cfg->stat_allocate_var++;
MONO_INST_NEW (cfg, inst, opcode);
inst->inst_c0 = num;
inst->inst_vtype = type;
inst->klass = mono_class_from_mono_type (type);
type_to_eval_stack_type (cfg, type, inst);
/* if set to 1 the variable is native */
inst->backend.is_pinvoke = 0;
inst->dreg = vreg;
if (inst->klass->exception_type)
mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
if (cfg->compute_gc_maps) {
if (type->byref) {
mono_mark_vreg_as_mp (cfg, vreg);
} else {
MonoType *t = mini_type_get_underlying_type (NULL, type);
if ((MONO_TYPE_ISSTRUCT (t) && inst->klass->has_references) || mini_type_is_reference (cfg, t)) {
inst->flags |= MONO_INST_GC_TRACK;
mono_mark_vreg_as_ref (cfg, vreg);
}
}
}
cfg->varinfo [num] = inst;
MONO_INIT_VARINFO (&cfg->vars [num], num);
MONO_VARINFO (cfg, num)->vreg = vreg;
if (vreg != -1)
set_vreg_to_inst (cfg, vreg, inst);
#if SIZEOF_REGISTER == 4
#ifdef MONO_ARCH_SOFT_FLOAT
regpair = mono_type_is_long (type) || mono_type_is_float (type);
#else
regpair = mono_type_is_long (type);
#endif
#else
regpair = FALSE;
#endif
if (regpair) {
MonoInst *tree;
/*
* These two cannot be allocated using create_var_for_vreg since that would
* put it into the cfg->varinfo array, confusing many parts of the JIT.
*/
/*
* Set flags to VOLATILE so SSA skips it.
*/
if (cfg->verbose_level >= 4) {
printf (" Create LVAR R%d (R%d, R%d)\n", inst->dreg, inst->dreg + 1, inst->dreg + 2);
}
#ifdef MONO_ARCH_SOFT_FLOAT
if (cfg->opt & MONO_OPT_SSA) {
if (mono_type_is_float (type))
inst->flags = MONO_INST_VOLATILE;
}
#endif
/* Allocate a dummy MonoInst for the first vreg */
MONO_INST_NEW (cfg, tree, OP_LOCAL);
tree->dreg = inst->dreg + 1;
if (cfg->opt & MONO_OPT_SSA)
tree->flags = MONO_INST_VOLATILE;
tree->inst_c0 = num;
tree->type = STACK_I4;
tree->inst_vtype = &mono_defaults.int32_class->byval_arg;
tree->klass = mono_class_from_mono_type (tree->inst_vtype);
set_vreg_to_inst (cfg, inst->dreg + 1, tree);
/* Allocate a dummy MonoInst for the second vreg */
MONO_INST_NEW (cfg, tree, OP_LOCAL);
tree->dreg = inst->dreg + 2;
if (cfg->opt & MONO_OPT_SSA)
tree->flags = MONO_INST_VOLATILE;
tree->inst_c0 = num;
tree->type = STACK_I4;
tree->inst_vtype = &mono_defaults.int32_class->byval_arg;
tree->klass = mono_class_from_mono_type (tree->inst_vtype);
set_vreg_to_inst (cfg, inst->dreg + 2, tree);
}
cfg->num_varinfo++;
if (cfg->verbose_level > 2)
g_print ("created temp %d (R%d) of type %s\n", num, vreg, mono_type_get_name (type));
return inst;
}
MonoInst*
mono_compile_create_var (MonoCompile *cfg, MonoType *type, int opcode)
{
int dreg;
if (mono_type_is_long (type))
dreg = mono_alloc_dreg (cfg, STACK_I8);
#ifdef MONO_ARCH_SOFT_FLOAT
else if (mono_type_is_float (type))
dreg = mono_alloc_dreg (cfg, STACK_R8);
#endif
else
/* All the others are unified */
dreg = mono_alloc_preg (cfg);
return mono_compile_create_var_for_vreg (cfg, type, opcode, dreg);
}
/*
* Transform a MonoInst into a load from the variable of index var_index.
*/
void
mono_compile_make_var_load (MonoCompile *cfg, MonoInst *dest, gssize var_index) {
memset (dest, 0, sizeof (MonoInst));
dest->inst_i0 = cfg->varinfo [var_index];
dest->opcode = mini_type_to_ldind (cfg, dest->inst_i0->inst_vtype);
type_to_eval_stack_type (cfg, dest->inst_i0->inst_vtype, dest);
dest->klass = dest->inst_i0->klass;
}
#endif
void
mono_mark_vreg_as_ref (MonoCompile *cfg, int vreg)
{
if (vreg >= cfg->vreg_is_ref_len) {
gboolean *tmp = cfg->vreg_is_ref;
int size = cfg->vreg_is_ref_len;
while (vreg >= cfg->vreg_is_ref_len)
cfg->vreg_is_ref_len = cfg->vreg_is_ref_len ? cfg->vreg_is_ref_len * 2 : 32;
cfg->vreg_is_ref = mono_mempool_alloc0 (cfg->mempool, sizeof (gboolean) * cfg->vreg_is_ref_len);
if (size)
memcpy (cfg->vreg_is_ref, tmp, size * sizeof (gboolean));
}
cfg->vreg_is_ref [vreg] = TRUE;
}
void
mono_mark_vreg_as_mp (MonoCompile *cfg, int vreg)
{
if (vreg >= cfg->vreg_is_mp_len) {
gboolean *tmp = cfg->vreg_is_mp;
int size = cfg->vreg_is_mp_len;
while (vreg >= cfg->vreg_is_mp_len)
cfg->vreg_is_mp_len = cfg->vreg_is_mp_len ? cfg->vreg_is_mp_len * 2 : 32;
cfg->vreg_is_mp = mono_mempool_alloc0 (cfg->mempool, sizeof (gboolean) * cfg->vreg_is_mp_len);
if (size)
memcpy (cfg->vreg_is_mp, tmp, size * sizeof (gboolean));
}
cfg->vreg_is_mp [vreg] = TRUE;
}
static MonoType*
type_from_stack_type (MonoInst *ins) {
switch (ins->type) {
case STACK_I4: return &mono_defaults.int32_class->byval_arg;
case STACK_I8: return &mono_defaults.int64_class->byval_arg;
case STACK_PTR: return &mono_defaults.int_class->byval_arg;
case STACK_R8: return &mono_defaults.double_class->byval_arg;
case STACK_MP:
/*
* this if used to be commented without any specific reason, but
* it breaks #80235 when commented
*/
if (ins->klass)
return &ins->klass->this_arg;
else
return &mono_defaults.object_class->this_arg;
case STACK_OBJ:
/* ins->klass may not be set for ldnull.
* Also, if we have a boxed valuetype, we want an object lass,
* not the valuetype class
*/
if (ins->klass && !ins->klass->valuetype)
return &ins->klass->byval_arg;
return &mono_defaults.object_class->byval_arg;
case STACK_VTYPE: return &ins->klass->byval_arg;
default:
g_error ("stack type %d to montype not handled\n", ins->type);
}
return NULL;
}
MonoType*
mono_type_from_stack_type (MonoInst *ins) {
return type_from_stack_type (ins);
}
/*
* mono_add_ins_to_end:
*
* Same as MONO_ADD_INS, but add INST before any branches at the end of BB.
*/
void
mono_add_ins_to_end (MonoBasicBlock *bb, MonoInst *inst)
{
int opcode;
if (!bb->code) {
MONO_ADD_INS (bb, inst);
return;
}
switch (bb->last_ins->opcode) {
case OP_BR:
case OP_BR_REG:
case CEE_BEQ:
case CEE_BGE:
case CEE_BGT:
case CEE_BLE:
case CEE_BLT:
case CEE_BNE_UN:
case CEE_BGE_UN:
case CEE_BGT_UN:
case CEE_BLE_UN:
case CEE_BLT_UN:
case OP_SWITCH:
mono_bblock_insert_before_ins (bb, bb->last_ins, inst);
break;
default:
if (MONO_IS_COND_BRANCH_OP (bb->last_ins)) {
/* Need to insert the ins before the compare */
if (bb->code == bb->last_ins) {
mono_bblock_insert_before_ins (bb, bb->last_ins, inst);
return;
}
if (bb->code->next == bb->last_ins) {
/* Only two instructions */
opcode = bb->code->opcode;
if ((opcode == OP_COMPARE) || (opcode == OP_COMPARE_IMM) || (opcode == OP_ICOMPARE) || (opcode == OP_ICOMPARE_IMM) || (opcode == OP_FCOMPARE) || (opcode == OP_LCOMPARE) || (opcode == OP_LCOMPARE_IMM)) {
/* NEW IR */
mono_bblock_insert_before_ins (bb, bb->code, inst);
} else {
mono_bblock_insert_before_ins (bb, bb->last_ins, inst);
}
} else {
opcode = bb->last_ins->prev->opcode;
if ((opcode == OP_COMPARE) || (opcode == OP_COMPARE_IMM) || (opcode == OP_ICOMPARE) || (opcode == OP_ICOMPARE_IMM) || (opcode == OP_FCOMPARE) || (opcode == OP_LCOMPARE) || (opcode == OP_LCOMPARE_IMM)) {
/* NEW IR */
mono_bblock_insert_before_ins (bb, bb->last_ins->prev, inst);
} else {
mono_bblock_insert_before_ins (bb, bb->last_ins, inst);
}
}
}
else
MONO_ADD_INS (bb, inst);
break;
}
}
void
mono_create_jump_table (MonoCompile *cfg, MonoInst *label, MonoBasicBlock **bbs, int num_blocks)
{
MonoJumpInfo *ji = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfo));
MonoJumpInfoBBTable *table;
table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
table->table = bbs;
table->table_size = num_blocks;
ji->ip.label = label;
ji->type = MONO_PATCH_INFO_SWITCH;
ji->data.table = table;
ji->next = cfg->patch_info;
cfg->patch_info = ji;
}
static MonoMethodSignature *
mono_get_array_new_va_signature (int arity)
{
static GHashTable *sighash = NULL;
MonoMethodSignature *res;
int i;
mono_jit_lock ();
if (!sighash) {
sighash = g_hash_table_new (NULL, NULL);
}
else if ((res = g_hash_table_lookup (sighash, GINT_TO_POINTER (arity)))) {
mono_jit_unlock ();
return res;
}
res = mono_metadata_signature_alloc (mono_defaults.corlib, arity + 1);
res->pinvoke = 1;
#ifdef MONO_ARCH_VARARG_ICALLS
/* Only set this only some archs since not all backends can handle varargs+pinvoke */
res->call_convention = MONO_CALL_VARARG;
#endif
#ifdef TARGET_WIN32
res->call_convention = MONO_CALL_C;
#endif
res->params [0] = &mono_defaults.int_class->byval_arg;
for (i = 0; i < arity; i++)
res->params [i + 1] = &mono_defaults.int_class->byval_arg;
res->ret = &mono_defaults.object_class->byval_arg;
g_hash_table_insert (sighash, GINT_TO_POINTER (arity), res);
mono_jit_unlock ();
return res;
}
MonoJitICallInfo *
mono_get_array_new_va_icall (int rank)
{
MonoMethodSignature *esig;
char icall_name [256];
char *name;
MonoJitICallInfo *info;
/* Need to register the icall so it gets an icall wrapper */
sprintf (icall_name, "ves_array_new_va_%d", rank);
mono_jit_lock ();
info = mono_find_jit_icall_by_name (icall_name);
if (info == NULL) {
esig = mono_get_array_new_va_signature (rank);
name = g_strdup (icall_name);
info = mono_register_jit_icall (mono_array_new_va, name, esig, FALSE);
g_hash_table_insert (jit_icall_name_hash, name, name);
}
mono_jit_unlock ();
return info;
}
gboolean
mini_class_is_system_array (MonoClass *klass)
{
if (klass->parent == mono_defaults.array_class)
return TRUE;
else
return FALSE;
}
gboolean
mini_assembly_can_skip_verification (MonoDomain *domain, MonoMethod *method)
{
MonoAssembly *assembly = method->klass->image->assembly;
if (method->wrapper_type != MONO_WRAPPER_NONE && method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
return FALSE;
if (assembly->in_gac || assembly->image == mono_defaults.corlib)
return FALSE;
if (mono_security_get_mode () != MONO_SECURITY_MODE_NONE)
return FALSE;
return mono_assembly_has_skip_verification (assembly);
}
/*
* mini_method_verify:
*
* Verify the method using the new verfier.
*
* Returns true if the method is invalid.
*/
static gboolean
mini_method_verify (MonoCompile *cfg, MonoMethod *method, gboolean fail_compile)
{
GSList *tmp, *res;
gboolean is_fulltrust;
MonoLoaderError *error;
if (method->verification_success)
return FALSE;
if (!mono_verifier_is_enabled_for_method (method))
return FALSE;
/*skip verification implies the assembly must be */
is_fulltrust = mono_verifier_is_method_full_trust (method) || mini_assembly_can_skip_verification (cfg->domain, method);
res = mono_method_verify_with_current_settings (method, cfg->skip_visibility, is_fulltrust);
if ((error = mono_loader_get_last_error ())) {
if (fail_compile)
cfg->exception_type = error->exception_type;
else
mono_loader_clear_error ();
if (res)
mono_free_verify_list (res);
return TRUE;
}
if (res) {
for (tmp = res; tmp; tmp = tmp->next) {
MonoVerifyInfoExtended *info = (MonoVerifyInfoExtended *)tmp->data;
if (info->info.status == MONO_VERIFY_ERROR) {
if (fail_compile) {
char *method_name = mono_method_full_name (method, TRUE);
cfg->exception_type = info->exception_type;
cfg->exception_message = g_strdup_printf ("Error verifying %s: %s", method_name, info->info.message);
g_free (method_name);
}
mono_free_verify_list (res);
return TRUE;
}
if (info->info.status == MONO_VERIFY_NOT_VERIFIABLE && (!is_fulltrust || info->exception_type == MONO_EXCEPTION_METHOD_ACCESS || info->exception_type == MONO_EXCEPTION_FIELD_ACCESS)) {
if (fail_compile) {
char *method_name = mono_method_full_name (method, TRUE);
cfg->exception_type = info->exception_type;
cfg->exception_message = g_strdup_printf ("Error verifying %s: %s", method_name, info->info.message);
g_free (method_name);
}
mono_free_verify_list (res);
return TRUE;
}
}
mono_free_verify_list (res);
}
method->verification_success = 1;
return FALSE;
}
/*Returns true if something went wrong*/
gboolean
mono_compile_is_broken (MonoCompile *cfg, MonoMethod *method, gboolean fail_compile)
{
MonoMethod *method_definition = method;
gboolean dont_verify = method->klass->image->assembly->corlib_internal;
while (method_definition->is_inflated) {
MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
method_definition = imethod->declaring;
}
return !dont_verify && mini_method_verify (cfg, method_definition, fail_compile);
}
static gconstpointer
mono_icall_get_wrapper_full (MonoJitICallInfo* callinfo, gboolean do_compile)
{
char *name;
MonoMethod *wrapper;
gconstpointer trampoline;
MonoDomain *domain = mono_get_root_domain ();
if (callinfo->wrapper) {
return callinfo->wrapper;
}
if (callinfo->trampoline)
return callinfo->trampoline;
/*
* We use the lock on the root domain instead of the JIT lock to protect
* callinfo->trampoline, since we do a lot of stuff inside the critical section.
*/
mono_loader_lock (); /*FIXME mono_compile_method requires the loader lock, by large.*/
mono_domain_lock (domain);
if (callinfo->trampoline) {
mono_domain_unlock (domain);
mono_loader_unlock ();
return callinfo->trampoline;
}
name = g_strdup_printf ("__icall_wrapper_%s", callinfo->name);
wrapper = mono_marshal_get_icall_wrapper (callinfo->sig, name, callinfo->func, check_for_pending_exc);
g_free (name);
if (do_compile)
trampoline = mono_compile_method (wrapper);
else
trampoline = mono_create_ftnptr (domain, mono_create_jit_trampoline_in_domain (domain, wrapper));
mono_register_jit_icall_wrapper (callinfo, trampoline);
callinfo->trampoline = trampoline;
mono_domain_unlock (domain);
mono_loader_unlock ();
return callinfo->trampoline;
}
gconstpointer
mono_icall_get_wrapper (MonoJitICallInfo* callinfo)
{
return mono_icall_get_wrapper_full (callinfo, FALSE);
}
static void
mono_dynamic_code_hash_insert (MonoDomain *domain, MonoMethod *method, MonoJitDynamicMethodInfo *ji)
{
if (!domain_jit_info (domain)->dynamic_code_hash)
domain_jit_info (domain)->dynamic_code_hash = g_hash_table_new (NULL, NULL);
g_hash_table_insert (domain_jit_info (domain)->dynamic_code_hash, method, ji);
}
static MonoJitDynamicMethodInfo*
mono_dynamic_code_hash_lookup (MonoDomain *domain, MonoMethod *method)
{
MonoJitDynamicMethodInfo *res;
if (domain_jit_info (domain)->dynamic_code_hash)
res = g_hash_table_lookup (domain_jit_info (domain)->dynamic_code_hash, method);
else
res = NULL;
return res;
}
typedef struct {
MonoClass *vtype;
GList *active, *inactive;
GSList *slots;
} StackSlotInfo;
static gint
compare_by_interval_start_pos_func (gconstpointer a, gconstpointer b)
{
MonoMethodVar *v1 = (MonoMethodVar*)a;
MonoMethodVar *v2 = (MonoMethodVar*)b;
if (v1 == v2)
return 0;
else if (v1->interval->range && v2->interval->range)
return v1->interval->range->from - v2->interval->range->from;
else if (v1->interval->range)
return -1;
else
return 1;
}
#ifndef DISABLE_JIT
#if 0
#define LSCAN_DEBUG(a) do { a; } while (0)
#else
#define LSCAN_DEBUG(a)
#endif
static gint32*
mono_allocate_stack_slots2 (MonoCompile *cfg, gboolean backward, guint32 *stack_size, guint32 *stack_align)
{
int i, slot, offset, size;
guint32 align;
MonoMethodVar *vmv;
MonoInst *inst;
gint32 *offsets;
GList *vars = NULL, *l, *unhandled;
StackSlotInfo *scalar_stack_slots, *vtype_stack_slots, *slot_info;
MonoType *t;
int nvtypes;
gboolean reuse_slot;
LSCAN_DEBUG (printf ("Allocate Stack Slots 2 for %s:\n", mono_method_full_name (cfg->method, TRUE)));
scalar_stack_slots = mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * MONO_TYPE_PINNED);
vtype_stack_slots = NULL;
nvtypes = 0;
offsets = mono_mempool_alloc (cfg->mempool, sizeof (gint32) * cfg->num_varinfo);
for (i = 0; i < cfg->num_varinfo; ++i)
offsets [i] = -1;
for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
inst = cfg->varinfo [i];
vmv = MONO_VARINFO (cfg, i);
if ((inst->flags & MONO_INST_IS_DEAD) || inst->opcode == OP_REGVAR || inst->opcode == OP_REGOFFSET)
continue;
vars = g_list_prepend (vars, vmv);
}
vars = g_list_sort (g_list_copy (vars), compare_by_interval_start_pos_func);
/* Sanity check */
/*
i = 0;
for (unhandled = vars; unhandled; unhandled = unhandled->next) {
MonoMethodVar *current = unhandled->data;
if (current->interval->range) {
g_assert (current->interval->range->from >= i);
i = current->interval->range->from;
}
}
*/
offset = 0;
*stack_align = 0;
for (unhandled = vars; unhandled; unhandled = unhandled->next) {
MonoMethodVar *current = unhandled->data;
vmv = current;
inst = cfg->varinfo [vmv->idx];
t = mono_type_get_underlying_type (inst->inst_vtype);
if (cfg->gsharedvt && mini_is_gsharedvt_variable_type (cfg, t))
t = mini_get_gsharedvt_alloc_type_for_type (cfg, t);
/* inst->backend.is_pinvoke indicates native sized value types, this is used by the
* pinvoke wrappers when they call functions returning structures */
if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (t) && t->type != MONO_TYPE_TYPEDBYREF) {
size = mono_class_native_size (mono_class_from_mono_type (t), &align);
}
else {
int ialign;
size = mono_type_size (t, &ialign);
align = ialign;
if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (t)))
align = 16;
}
reuse_slot = TRUE;
if (cfg->disable_reuse_stack_slots)
reuse_slot = FALSE;
switch (t->type) {
case MONO_TYPE_GENERICINST:
if (!mono_type_generic_inst_is_valuetype (t)) {
slot_info = &scalar_stack_slots [t->type];
break;
}
/* Fall through */
case MONO_TYPE_VALUETYPE:
if (!vtype_stack_slots)
vtype_stack_slots = mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * 256);
for (i = 0; i < nvtypes; ++i)
if (t->data.klass == vtype_stack_slots [i].vtype)
break;
if (i < nvtypes)
slot_info = &vtype_stack_slots [i];
else {
g_assert (nvtypes < 256);
vtype_stack_slots [nvtypes].vtype = t->data.klass;
slot_info = &vtype_stack_slots [nvtypes];
nvtypes ++;
}
if (cfg->disable_reuse_ref_stack_slots)
reuse_slot = FALSE;
break;
case MONO_TYPE_PTR:
case MONO_TYPE_I:
case MONO_TYPE_U:
#if SIZEOF_VOID_P == 4
case MONO_TYPE_I4:
#else
case MONO_TYPE_I8:
#endif
if (cfg->disable_ref_noref_stack_slot_share) {
slot_info = &scalar_stack_slots [MONO_TYPE_I];
break;
}
/* Fall through */
case MONO_TYPE_CLASS:
case MONO_TYPE_OBJECT:
case MONO_TYPE_ARRAY:
case MONO_TYPE_SZARRAY:
case MONO_TYPE_STRING:
/* Share non-float stack slots of the same size */
slot_info = &scalar_stack_slots [MONO_TYPE_CLASS];
if (cfg->disable_reuse_ref_stack_slots)
reuse_slot = FALSE;
break;
default:
slot_info = &scalar_stack_slots [t->type];
}
slot = 0xffffff;
if (cfg->comp_done & MONO_COMP_LIVENESS) {
int pos;
gboolean changed;
//printf ("START %2d %08x %08x\n", vmv->idx, vmv->range.first_use.abs_pos, vmv->range.last_use.abs_pos);
if (!current->interval->range) {
if (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))
pos = ~0;
else {
/* Dead */
inst->flags |= MONO_INST_IS_DEAD;
continue;
}
}
else
pos = current->interval->range->from;
LSCAN_DEBUG (printf ("process R%d ", inst->dreg));
if (current->interval->range)
LSCAN_DEBUG (mono_linterval_print (current->interval));
LSCAN_DEBUG (printf ("\n"));
/* Check for intervals in active which expired or inactive */
changed = TRUE;
/* FIXME: Optimize this */
while (changed) {
changed = FALSE;
for (l = slot_info->active; l != NULL; l = l->next) {
MonoMethodVar *v = (MonoMethodVar*)l->data;
if (v->interval->last_range->to < pos) {
slot_info->active = g_list_delete_link (slot_info->active, l);
slot_info->slots = g_slist_prepend_mempool (cfg->mempool, slot_info->slots, GINT_TO_POINTER (offsets [v->idx]));
LSCAN_DEBUG (printf ("Interval R%d has expired, adding 0x%x to slots\n", cfg->varinfo [v->idx]->dreg, offsets [v->idx]));
changed = TRUE;
break;
}
else if (!mono_linterval_covers (v->interval, pos)) {
slot_info->inactive = g_list_append (slot_info->inactive, v);
slot_info->active = g_list_delete_link (slot_info->active, l);
LSCAN_DEBUG (printf ("Interval R%d became inactive\n", cfg->varinfo [v->idx]->dreg));
changed = TRUE;
break;
}
}
}
/* Check for intervals in inactive which expired or active */
changed = TRUE;
/* FIXME: Optimize this */
while (changed) {
changed = FALSE;
for (l = slot_info->inactive; l != NULL; l = l->next) {
MonoMethodVar *v = (MonoMethodVar*)l->data;
if (v->interval->last_range->to < pos) {
slot_info->inactive = g_list_delete_link (slot_info->inactive, l);
// FIXME: Enabling this seems to cause impossible to debug crashes
//slot_info->slots = g_slist_prepend_mempool (cfg->mempool, slot_info->slots, GINT_TO_POINTER (offsets [v->idx]));
LSCAN_DEBUG (printf ("Interval R%d has expired, adding 0x%x to slots\n", cfg->varinfo [v->idx]->dreg, offsets [v->idx]));
changed = TRUE;
break;
}
else if (mono_linterval_covers (v->interval, pos)) {
slot_info->active = g_list_append (slot_info->active, v);
slot_info->inactive = g_list_delete_link (slot_info->inactive, l);
LSCAN_DEBUG (printf ("\tInterval R%d became active\n", cfg->varinfo [v->idx]->dreg));
changed = TRUE;
break;
}
}
}
/*
* This also handles the case when the variable is used in an
* exception region, as liveness info is not computed there.
*/
/*
* FIXME: All valuetypes are marked as INDIRECT because of LDADDR
* opcodes.
*/
if (! (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))) {
if (slot_info->slots) {
slot = GPOINTER_TO_INT (slot_info->slots->data);
slot_info->slots = slot_info->slots->next;
}
/* FIXME: We might want to consider the inactive intervals as well if slot_info->slots is empty */
slot_info->active = mono_varlist_insert_sorted (cfg, slot_info->active, vmv, TRUE);
}
}
#if 0
{
static int count = 0;
count ++;
if (count == atoi (getenv ("COUNT3")))
printf ("LAST: %s\n", mono_method_full_name (cfg->method, TRUE));
if (count > atoi (getenv ("COUNT3")))
slot = 0xffffff;
else {
mono_print_ins (inst);
}
}
#endif
LSCAN_DEBUG (printf ("R%d %s -> 0x%x\n", inst->dreg, mono_type_full_name (t), slot));
if (inst->flags & MONO_INST_LMF) {
size = sizeof (MonoLMF);
align = sizeof (mgreg_t);
reuse_slot = FALSE;
}
if (!reuse_slot)
slot = 0xffffff;
if (slot == 0xffffff) {
/*
* Allways allocate valuetypes to sizeof (gpointer) to allow more
* efficient copying (and to work around the fact that OP_MEMCPY
* and OP_MEMSET ignores alignment).
*/
if (MONO_TYPE_ISSTRUCT (t)) {
align = MAX (align, sizeof (gpointer));
align = MAX (align, mono_class_min_align (mono_class_from_mono_type (t)));
}
if (backward) {
offset += size;
offset += align - 1;
offset &= ~(align - 1);
slot = offset;
}
else {
offset += align - 1;
offset &= ~(align - 1);
slot = offset;
offset += size;
}
if (*stack_align == 0)
*stack_align = align;
}
offsets [vmv->idx] = slot;
}
g_list_free (vars);
for (i = 0; i < MONO_TYPE_PINNED; ++i) {
if (scalar_stack_slots [i].active)
g_list_free (scalar_stack_slots [i].active);
}
for (i = 0; i < nvtypes; ++i) {
if (vtype_stack_slots [i].active)
g_list_free (vtype_stack_slots [i].active);
}
cfg->stat_locals_stack_size += offset;
*stack_size = offset;
return offsets;
}
/*
* mono_allocate_stack_slots:
*
* Allocate stack slots for all non register allocated variables using a
* linear scan algorithm.
* Returns: an array of stack offsets.
* STACK_SIZE is set to the amount of stack space needed.
* STACK_ALIGN is set to the alignment needed by the locals area.
*/
gint32*
mono_allocate_stack_slots (MonoCompile *cfg, gboolean backward, guint32 *stack_size, guint32 *stack_align)
{
int i, slot, offset, size;
guint32 align;
MonoMethodVar *vmv;
MonoInst *inst;
gint32 *offsets;
GList *vars = NULL, *l;
StackSlotInfo *scalar_stack_slots, *vtype_stack_slots, *slot_info;
MonoType *t;
int nvtypes;
gboolean reuse_slot;
if ((cfg->num_varinfo > 0) && MONO_VARINFO (cfg, 0)->interval)
return mono_allocate_stack_slots2 (cfg, backward, stack_size, stack_align);
scalar_stack_slots = mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * MONO_TYPE_PINNED);
vtype_stack_slots = NULL;
nvtypes = 0;
offsets = mono_mempool_alloc (cfg->mempool, sizeof (gint32) * cfg->num_varinfo);
for (i = 0; i < cfg->num_varinfo; ++i)
offsets [i] = -1;
for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
inst = cfg->varinfo [i];
vmv = MONO_VARINFO (cfg, i);
if ((inst->flags & MONO_INST_IS_DEAD) || inst->opcode == OP_REGVAR || inst->opcode == OP_REGOFFSET)
continue;
vars = g_list_prepend (vars, vmv);
}
vars = mono_varlist_sort (cfg, vars, 0);
offset = 0;
*stack_align = sizeof(mgreg_t);
for (l = vars; l; l = l->next) {
vmv = l->data;
inst = cfg->varinfo [vmv->idx];
t = mono_type_get_underlying_type (inst->inst_vtype);
if (cfg->gsharedvt && mini_is_gsharedvt_variable_type (cfg, t))
t = mini_get_gsharedvt_alloc_type_for_type (cfg, t);
/* inst->backend.is_pinvoke indicates native sized value types, this is used by the
* pinvoke wrappers when they call functions returning structures */
if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (t) && t->type != MONO_TYPE_TYPEDBYREF) {
size = mono_class_native_size (mono_class_from_mono_type (t), &align);
} else {
int ialign;
size = mono_type_size (t, &ialign);
align = ialign;
if (mono_class_from_mono_type (t)->exception_type)
mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (t)))
align = 16;
}
reuse_slot = TRUE;
if (cfg->disable_reuse_stack_slots)
reuse_slot = FALSE;
if (t->byref) {
slot_info = &scalar_stack_slots [MONO_TYPE_I];
} else {
switch (t->type) {
case MONO_TYPE_GENERICINST:
if (!mono_type_generic_inst_is_valuetype (t)) {
slot_info = &scalar_stack_slots [t->type];
break;
}
/* Fall through */
case MONO_TYPE_VALUETYPE:
if (!vtype_stack_slots)
vtype_stack_slots = mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * 256);
for (i = 0; i < nvtypes; ++i)
if (t->data.klass == vtype_stack_slots [i].vtype)
break;
if (i < nvtypes)
slot_info = &vtype_stack_slots [i];
else {
g_assert (nvtypes < 256);
vtype_stack_slots [nvtypes].vtype = t->data.klass;
slot_info = &vtype_stack_slots [nvtypes];
nvtypes ++;
}
if (cfg->disable_reuse_ref_stack_slots)
reuse_slot = FALSE;
break;
case MONO_TYPE_PTR:
case MONO_TYPE_I:
case MONO_TYPE_U:
#if SIZEOF_VOID_P == 4
case MONO_TYPE_I4:
#else
case MONO_TYPE_I8:
#endif
if (cfg->disable_ref_noref_stack_slot_share) {
slot_info = &scalar_stack_slots [MONO_TYPE_I];
break;
}
/* Fall through */
case MONO_TYPE_CLASS:
case MONO_TYPE_OBJECT:
case MONO_TYPE_ARRAY:
case MONO_TYPE_SZARRAY:
case MONO_TYPE_STRING:
/* Share non-float stack slots of the same size */
slot_info = &scalar_stack_slots [MONO_TYPE_CLASS];
if (cfg->disable_reuse_ref_stack_slots)
reuse_slot = FALSE;
break;
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
slot_info = &scalar_stack_slots [t->type];
break;
default:
slot_info = &scalar_stack_slots [t->type];
break;
}
}
slot = 0xffffff;
if (cfg->comp_done & MONO_COMP_LIVENESS) {
//printf ("START %2d %08x %08x\n", vmv->idx, vmv->range.first_use.abs_pos, vmv->range.last_use.abs_pos);
/* expire old intervals in active */
while (slot_info->active) {
MonoMethodVar *amv = (MonoMethodVar *)slot_info->active->data;
if (amv->range.last_use.abs_pos > vmv->range.first_use.abs_pos)
break;
//printf ("EXPIR %2d %08x %08x C%d R%d\n", amv->idx, amv->range.first_use.abs_pos, amv->range.last_use.abs_pos, amv->spill_costs, amv->reg);
slot_info->active = g_list_delete_link (slot_info->active, slot_info->active);
slot_info->slots = g_slist_prepend_mempool (cfg->mempool, slot_info->slots, GINT_TO_POINTER (offsets [amv->idx]));
}
/*
* This also handles the case when the variable is used in an
* exception region, as liveness info is not computed there.
*/
/*
* FIXME: All valuetypes are marked as INDIRECT because of LDADDR
* opcodes.
*/
if (! (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))) {
if (slot_info->slots) {
slot = GPOINTER_TO_INT (slot_info->slots->data);
slot_info->slots = slot_info->slots->next;
}
slot_info->active = mono_varlist_insert_sorted (cfg, slot_info->active, vmv, TRUE);
}
}
{
static int count = 0;
count ++;
/*
if (count == atoi (getenv ("COUNT")))
printf ("LAST: %s\n", mono_method_full_name (cfg->method, TRUE));
if (count > atoi (getenv ("COUNT")))
slot = 0xffffff;
else {
mono_print_ins (inst);
}
*/
}
if (inst->flags & MONO_INST_LMF) {
/*
* This variable represents a MonoLMF structure, which has no corresponding
* CLR type, so hard-code its size/alignment.
*/
size = sizeof (MonoLMF);
align = sizeof (mgreg_t);
reuse_slot = FALSE;
}
if (!reuse_slot)
slot = 0xffffff;
if (slot == 0xffffff) {
/*
* Allways allocate valuetypes to sizeof (gpointer) to allow more
* efficient copying (and to work around the fact that OP_MEMCPY
* and OP_MEMSET ignores alignment).
*/
if (MONO_TYPE_ISSTRUCT (t)) {
align = MAX (align, sizeof (gpointer));
align = MAX (align, mono_class_min_align (mono_class_from_mono_type (t)));
/*
* Align the size too so the code generated for passing vtypes in
* registers doesn't overwrite random locals.
*/
size = (size + (align - 1)) & ~(align -1);
}
if (backward) {
offset += size;
offset += align - 1;
offset &= ~(align - 1);
slot = offset;
}
else {
offset += align - 1;
offset &= ~(align - 1);
slot = offset;
offset += size;
}
*stack_align = MAX (*stack_align, align);
}
offsets [vmv->idx] = slot;
}
g_list_free (vars);
for (i = 0; i < MONO_TYPE_PINNED; ++i) {
if (scalar_stack_slots [i].active)
g_list_free (scalar_stack_slots [i].active);
}
for (i = 0; i < nvtypes; ++i) {
if (vtype_stack_slots [i].active)
g_list_free (vtype_stack_slots [i].active);
}
cfg->stat_locals_stack_size += offset;
*stack_size = offset;
return offsets;
}
#else
gint32*
mono_allocate_stack_slots (MonoCompile *cfg, gboolean backward, guint32 *stack_size, guint32 *stack_align)
{
g_assert_not_reached ();
return NULL;
}
#endif /* DISABLE_JIT */
#define EMUL_HIT_SHIFT 3
#define EMUL_HIT_MASK ((1 << EMUL_HIT_SHIFT) - 1)
/* small hit bitmap cache */
static mono_byte emul_opcode_hit_cache [(OP_LAST>>EMUL_HIT_SHIFT) + 1] = {0};
static short emul_opcode_num = 0;
static short emul_opcode_alloced = 0;
static short *emul_opcode_opcodes = NULL;
static MonoJitICallInfo **emul_opcode_map = NULL;
MonoJitICallInfo *
mono_find_jit_opcode_emulation (int opcode)
{
g_assert (opcode >= 0 && opcode <= OP_LAST);
if (emul_opcode_hit_cache [opcode >> (EMUL_HIT_SHIFT + 3)] & (1 << (opcode & EMUL_HIT_MASK))) {
int i;
for (i = 0; i < emul_opcode_num; ++i) {
if (emul_opcode_opcodes [i] == opcode)
return emul_opcode_map [i];
}
}
return NULL;
}
void
mono_register_opcode_emulation (int opcode, const char *name, const char *sigstr, gpointer func, gboolean no_throw)
{
MonoJitICallInfo *info;
MonoMethodSignature *sig = mono_create_icall_signature (sigstr);
g_assert (!sig->hasthis);
g_assert (sig->param_count < 3);
info = mono_register_jit_icall (func, name, sig, no_throw);
if (emul_opcode_num >= emul_opcode_alloced) {
int incr = emul_opcode_alloced? emul_opcode_alloced/2: 16;
emul_opcode_alloced += incr;
emul_opcode_map = g_realloc (emul_opcode_map, sizeof (emul_opcode_map [0]) * emul_opcode_alloced);
emul_opcode_opcodes = g_realloc (emul_opcode_opcodes, sizeof (emul_opcode_opcodes [0]) * emul_opcode_alloced);
}
emul_opcode_map [emul_opcode_num] = info;
emul_opcode_opcodes [emul_opcode_num] = opcode;
emul_opcode_num++;
emul_opcode_hit_cache [opcode >> (EMUL_HIT_SHIFT + 3)] |= (1 << (opcode & EMUL_HIT_MASK));
}
static void
register_icall (gpointer func, const char *name, const char *sigstr, gboolean save)
{
MonoMethodSignature *sig;
if (sigstr)
sig = mono_create_icall_signature (sigstr);
else
sig = NULL;
mono_register_jit_icall (func, name, sig, save);
}
static void
print_dfn (MonoCompile *cfg) {
int i, j;
char *code;
MonoBasicBlock *bb;
MonoInst *c;
{
char *method_name = mono_method_full_name (cfg->method, TRUE);
g_print ("IR code for method %s\n", method_name);
g_free (method_name);
}
for (i = 0; i < cfg->num_bblocks; ++i) {
bb = cfg->bblocks [i];
/*if (bb->cil_code) {
char* code1, *code2;
code1 = mono_disasm_code_one (NULL, cfg->method, bb->cil_code, NULL);
if (bb->last_ins->cil_code)
code2 = mono_disasm_code_one (NULL, cfg->method, bb->last_ins->cil_code, NULL);
else
code2 = g_strdup ("");
code1 [strlen (code1) - 1] = 0;
code = g_strdup_printf ("%s -> %s", code1, code2);
g_free (code1);
g_free (code2);
} else*/
code = g_strdup ("\n");
g_print ("\nBB%d (%d) (len: %d): %s", bb->block_num, i, bb->cil_length, code);
MONO_BB_FOR_EACH_INS (bb, c) {
mono_print_ins_index (-1, c);
}
g_print ("\tprev:");
for (j = 0; j < bb->in_count; ++j) {
g_print (" BB%d", bb->in_bb [j]->block_num);
}
g_print ("\t\tsucc:");
for (j = 0; j < bb->out_count; ++j) {
g_print (" BB%d", bb->out_bb [j]->block_num);
}
g_print ("\n\tidom: BB%d\n", bb->idom? bb->idom->block_num: -1);
if (bb->idom)
g_assert (mono_bitset_test_fast (bb->dominators, bb->idom->dfn));
if (bb->dominators)
mono_blockset_print (cfg, bb->dominators, "\tdominators", bb->idom? bb->idom->dfn: -1);
if (bb->dfrontier)
mono_blockset_print (cfg, bb->dfrontier, "\tdfrontier", -1);
g_free (code);
}
g_print ("\n");
}
void
mono_bblock_add_inst (MonoBasicBlock *bb, MonoInst *inst)
{
MONO_ADD_INS (bb, inst);
}
void
mono_bblock_insert_after_ins (MonoBasicBlock *bb, MonoInst *ins, MonoInst *ins_to_insert)
{
if (ins == NULL) {
ins = bb->code;
bb->code = ins_to_insert;
/* Link with next */
ins_to_insert->next = ins;
if (ins)
ins->prev = ins_to_insert;
if (bb->last_ins == NULL)
bb->last_ins = ins_to_insert;
} else {
/* Link with next */
ins_to_insert->next = ins->next;
if (ins->next)
ins->next->prev = ins_to_insert;
/* Link with previous */
ins->next = ins_to_insert;
ins_to_insert->prev = ins;
if (bb->last_ins == ins)
bb->last_ins = ins_to_insert;
}
}
void
mono_bblock_insert_before_ins (MonoBasicBlock *bb, MonoInst *ins, MonoInst *ins_to_insert)
{
if (ins == NULL) {
ins = bb->code;
if (ins)
ins->prev = ins_to_insert;
bb->code = ins_to_insert;
ins_to_insert->next = ins;
if (bb->last_ins == NULL)
bb->last_ins = ins_to_insert;
} else {
/* Link with previous */
if (ins->prev)
ins->prev->next = ins_to_insert;
ins_to_insert->prev = ins->prev;
/* Link with next */
ins->prev = ins_to_insert;
ins_to_insert->next = ins;
if (bb->code == ins)
bb->code = ins_to_insert;
}
}
/*
* mono_verify_bblock:
*
* Verify that the next and prev pointers are consistent inside the instructions in BB.
*/
void
mono_verify_bblock (MonoBasicBlock *bb)
{
MonoInst *ins, *prev;
prev = NULL;
for (ins = bb->code; ins; ins = ins->next) {
g_assert (ins->prev == prev);
prev = ins;
}
if (bb->last_ins)
g_assert (!bb->last_ins->next);
}
/*
* mono_verify_cfg:
*
* Perform consistency checks on the JIT data structures and the IR
*/
void
mono_verify_cfg (MonoCompile *cfg)
{
MonoBasicBlock *bb;
for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
mono_verify_bblock (bb);
}
void
mono_destroy_compile (MonoCompile *cfg)
{
GSList *l;
if (cfg->header)
mono_metadata_free_mh (cfg->header);
//mono_mempool_stats (cfg->mempool);
mono_free_loop_info (cfg);
if (cfg->rs)
mono_regstate_free (cfg->rs);
if (cfg->spvars)
g_hash_table_destroy (cfg->spvars);
if (cfg->exvars)
g_hash_table_destroy (cfg->exvars);
for (l = cfg->headers_to_free; l; l = l->next)
mono_metadata_free_mh (l->data);
g_list_free (cfg->ldstr_list);
g_hash_table_destroy (cfg->token_info_hash);
if (cfg->abs_patches)
g_hash_table_destroy (cfg->abs_patches);
mono_mempool_destroy (cfg->mempool);
mono_debug_free_method (cfg);
g_free (cfg->varinfo);
g_free (cfg->vars);
g_free (cfg->exception_message);
g_free (cfg);
}
#ifdef MONO_HAVE_FAST_TLS
MONO_FAST_TLS_DECLARE(mono_lmf_addr);
#ifdef MONO_ARCH_ENABLE_MONO_LMF_VAR
/*
* When this is defined, the current lmf is stored in this tls variable instead of in
* jit_tls->lmf.
*/
MONO_FAST_TLS_DECLARE(mono_lmf);
#endif
#endif
MonoNativeTlsKey
mono_get_jit_tls_key (void)
{
return mono_jit_tls_id;
}
gint32
mono_get_jit_tls_offset (void)
{
int offset;
MONO_THREAD_VAR_OFFSET (mono_jit_tls, offset);
return offset;
}
gint32
mono_get_lmf_tls_offset (void)
{
#if defined(MONO_ARCH_ENABLE_MONO_LMF_VAR)
int offset;
MONO_THREAD_VAR_OFFSET(mono_lmf,offset);
return offset;
#else
return -1;
#endif
}
gint32
mono_get_lmf_addr_tls_offset (void)
{
int offset;
MONO_THREAD_VAR_OFFSET(mono_lmf_addr,offset);
return offset;
}
MonoLMF *
mono_get_lmf (void)
{
#if defined(MONO_HAVE_FAST_TLS) && defined(MONO_ARCH_ENABLE_MONO_LMF_VAR)
return MONO_FAST_TLS_GET (mono_lmf);
#else
MonoJitTlsData *jit_tls;
if ((jit_tls = mono_native_tls_get_value (mono_jit_tls_id)))
return jit_tls->lmf;
/*
* We do not assert here because this function can be called from
* mini-gc.c on a thread that has not executed any managed code, yet
* (the thread object allocation can trigger a collection).
*/
return NULL;
#endif
}
MonoLMF **
mono_get_lmf_addr (void)
{
#ifdef MONO_HAVE_FAST_TLS
return MONO_FAST_TLS_GET (mono_lmf_addr);
#else
MonoJitTlsData *jit_tls;
if ((jit_tls = mono_native_tls_get_value (mono_jit_tls_id)))
return &jit_tls->lmf;
/*
* When resolving the call to mono_jit_thread_attach full-aot will look
* in the plt, which causes a call into the generic trampoline, which in turn
* tries to resolve the lmf_addr creating a cyclic dependency. We cannot
* call mono_jit_thread_attach from the native-to-managed wrapper, without
* mono_get_lmf_addr, and mono_get_lmf_addr requires the thread to be attached.
*/
mono_jit_thread_attach (NULL);
if ((jit_tls = mono_native_tls_get_value (mono_jit_tls_id)))
return &jit_tls->lmf;
g_assert_not_reached ();
return NULL;
#endif
}
void
mono_set_lmf (MonoLMF *lmf)
{
#if defined(MONO_HAVE_FAST_TLS) && defined(MONO_ARCH_ENABLE_MONO_LMF_VAR)
MONO_FAST_TLS_SET (mono_lmf, lmf);
#endif
(*mono_get_lmf_addr ()) = lmf;
}
static void
mono_set_jit_tls (MonoJitTlsData *jit_tls)
{
mono_native_tls_set_value (mono_jit_tls_id, jit_tls);
#ifdef MONO_HAVE_FAST_TLS
MONO_FAST_TLS_SET (mono_jit_tls, jit_tls);
#endif
}
static void
mono_set_lmf_addr (gpointer lmf_addr)
{
#ifdef MONO_HAVE_FAST_TLS
MONO_FAST_TLS_SET (mono_lmf_addr, lmf_addr);
#endif
}
/*
* mono_jit_thread_attach:
*
* Called by native->managed wrappers. Returns the original domain which needs to be
* restored, or NULL.
*/
MonoDomain*
mono_jit_thread_attach (MonoDomain *domain)
{
MonoDomain *orig;
if (!domain)
/*
* Happens when called from AOTed code which is only used in the root
* domain.
*/
domain = mono_get_root_domain ();
#ifdef MONO_HAVE_FAST_TLS
if (!MONO_FAST_TLS_GET (mono_lmf_addr)) {
mono_thread_attach (domain);
// #678164
mono_thread_set_state (mono_thread_internal_current (), ThreadState_Background);
}
#else
if (!mono_native_tls_get_value (mono_jit_tls_id)) {
mono_thread_attach (domain);
mono_thread_set_state (mono_thread_internal_current (), ThreadState_Background);
}
#endif
orig = mono_domain_get ();
if (orig != domain)
mono_domain_set (domain, TRUE);
return orig != domain ? orig : NULL;
}
/* Called by native->managed wrappers */
void
mono_jit_set_domain (MonoDomain *domain)
{
if (domain)
mono_domain_set (domain, TRUE);
}
/**
* mono_thread_abort:
* @obj: exception object
*
* abort the thread, print exception information and stack trace
*/
static void
mono_thread_abort (MonoObject *obj)
{
/* MonoJitTlsData *jit_tls = mono_native_tls_get_value (mono_jit_tls_id); */
/* handle_remove should be eventually called for this thread, too
g_free (jit_tls);*/
if ((mono_runtime_unhandled_exception_policy_get () == MONO_UNHANDLED_POLICY_LEGACY) ||
(obj->vtable->klass == mono_defaults.threadabortexception_class)) {
mono_thread_exit ();
} else {
mono_invoke_unhandled_exception_hook (obj);
}
}
static void*
setup_jit_tls_data (gpointer stack_start, gpointer abort_func)
{
MonoJitTlsData *jit_tls;
MonoLMF *lmf;
jit_tls = mono_native_tls_get_value (mono_jit_tls_id);
if (jit_tls)
return jit_tls;
jit_tls = g_new0 (MonoJitTlsData, 1);
jit_tls->abort_func = abort_func;
jit_tls->end_of_stack = stack_start;
mono_set_jit_tls (jit_tls);
lmf = g_new0 (MonoLMF, 1);
MONO_ARCH_INIT_TOP_LMF_ENTRY (lmf);
jit_tls->first_lmf = lmf;
#if defined(MONO_HAVE_FAST_TLS) && defined(MONO_ARCH_ENABLE_MONO_LMF_VAR)
/* jit_tls->lmf is unused */
MONO_FAST_TLS_SET (mono_lmf, lmf);
mono_set_lmf_addr (MONO_FAST_TLS_ADDR (mono_lmf));
#else
mono_set_lmf_addr (&jit_tls->lmf);
jit_tls->lmf = lmf;
#endif
mono_setup_altstack (jit_tls);
return jit_tls;
}
static void
free_jit_tls_data (MonoJitTlsData *jit_tls)
{
mono_arch_free_jit_tls_data (jit_tls);
mono_free_altstack (jit_tls);
g_free (jit_tls->first_lmf);
g_free (jit_tls);
}
static void
mono_thread_start_cb (intptr_t tid, gpointer stack_start, gpointer func)
{
MonoInternalThread *thread;
void *jit_tls = setup_jit_tls_data (stack_start, mono_thread_abort);
thread = mono_thread_internal_current ();
mono_debugger_thread_created (tid, thread->root_domain_thread, jit_tls, func);
if (thread)
thread->jit_data = jit_tls;
mono_arch_cpu_init ();
}
void (*mono_thread_attach_aborted_cb ) (MonoObject *obj) = NULL;
static void
mono_thread_abort_dummy (MonoObject *obj)
{
if (mono_thread_attach_aborted_cb)
mono_thread_attach_aborted_cb (obj);
else
mono_thread_abort (obj);
}
static void
mono_thread_attach_cb (intptr_t tid, gpointer stack_start)
{
MonoInternalThread *thread;
void *jit_tls = setup_jit_tls_data (stack_start, mono_thread_abort_dummy);
thread = mono_thread_internal_current ();
mono_debugger_thread_created (tid, thread->root_domain_thread, (MonoJitTlsData *) jit_tls, NULL);
if (thread)
thread->jit_data = jit_tls;
if (mono_profiler_get_events () & MONO_PROFILE_STATISTICAL)
mono_runtime_setup_stat_profiler ();
mono_arch_cpu_init ();
}
static void
mini_thread_cleanup (MonoInternalThread *thread)
{
MonoJitTlsData *jit_tls = thread->jit_data;
if (jit_tls) {
mono_debugger_thread_cleanup (jit_tls);
/* We can't clean up tls information if we are on another thread, it will clean up the wrong stuff
* It would be nice to issue a warning when this happens outside of the shutdown sequence. but it's
* not a trivial thing.
*
* The current offender is mono_thread_manage which cleanup threads from the outside.
*/
if (thread == mono_thread_internal_current ())
mono_set_jit_tls (NULL);
/* If we attach a thread but never call into managed land, we might never get an lmf.*/
if (mono_get_lmf ()) {
mono_set_lmf (NULL);
mono_set_lmf_addr (NULL);
}
free_jit_tls_data (jit_tls);
thread->jit_data = NULL;
}
}
static MonoInst*
mono_create_tls_get (MonoCompile *cfg, int offset)
{
#ifdef MONO_ARCH_HAVE_TLS_GET
if (MONO_ARCH_HAVE_TLS_GET) {
MonoInst* ins;
if (offset == -1)
return NULL;
MONO_INST_NEW (cfg, ins, OP_TLS_GET);
ins->dreg = mono_alloc_preg (cfg);
ins->inst_offset = offset;
return ins;
}
#endif
return NULL;
}
MonoInst*
mono_get_jit_tls_intrinsic (MonoCompile *cfg)
{
return mono_create_tls_get (cfg, mono_get_jit_tls_offset ());
}
MonoInst*
mono_get_domain_intrinsic (MonoCompile* cfg)
{
return mono_create_tls_get (cfg, mono_domain_get_tls_offset ());
}
MonoInst*
mono_get_thread_intrinsic (MonoCompile* cfg)
{
return mono_create_tls_get (cfg, mono_thread_get_tls_offset ());
}
MonoInst*
mono_get_lmf_intrinsic (MonoCompile* cfg)
{
return mono_create_tls_get (cfg, mono_get_lmf_tls_offset ());
}
void
mono_add_patch_info (MonoCompile *cfg, int ip, MonoJumpInfoType type, gconstpointer target)
{
MonoJumpInfo *ji = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfo));
ji->ip.i = ip;
ji->type = type;
ji->data.target = target;
ji->next = cfg->patch_info;
cfg->patch_info = ji;
}
MonoJumpInfo *
mono_patch_info_list_prepend (MonoJumpInfo *list, int ip, MonoJumpInfoType type, gconstpointer target)
{
MonoJumpInfo *ji = g_new0 (MonoJumpInfo, 1);
ji->ip.i = ip;
ji->type = type;
ji->data.target = target;
ji->next = list;
return ji;
}
void
mono_remove_patch_info (MonoCompile *cfg, int ip)
{
MonoJumpInfo **ji = &cfg->patch_info;
while (*ji) {
if ((*ji)->ip.i == ip)
*ji = (*ji)->next;
else
ji = &((*ji)->next);
}
}
/**
* mono_patch_info_dup_mp:
*
* Make a copy of PATCH_INFO, allocating memory from the mempool MP.
*/
MonoJumpInfo*
mono_patch_info_dup_mp (MonoMemPool *mp, MonoJumpInfo *patch_info)
{
MonoJumpInfo *res = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
memcpy (res, patch_info, sizeof (MonoJumpInfo));
switch (patch_info->type) {
case MONO_PATCH_INFO_RVA:
case MONO_PATCH_INFO_LDSTR:
case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
case MONO_PATCH_INFO_LDTOKEN:
case MONO_PATCH_INFO_DECLSEC:
res->data.token = mono_mempool_alloc (mp, sizeof (MonoJumpInfoToken));
memcpy (res->data.token, patch_info->data.token, sizeof (MonoJumpInfoToken));
break;
case MONO_PATCH_INFO_SWITCH:
res->data.table = mono_mempool_alloc (mp, sizeof (MonoJumpInfoBBTable));
memcpy (res->data.table, patch_info->data.table, sizeof (MonoJumpInfoBBTable));
res->data.table->table = mono_mempool_alloc (mp, sizeof (MonoBasicBlock*) * patch_info->data.table->table_size);
memcpy (res->data.table->table, patch_info->data.table->table, sizeof (MonoBasicBlock*) * patch_info->data.table->table_size);
break;
case MONO_PATCH_INFO_RGCTX_FETCH:
res->data.rgctx_entry = mono_mempool_alloc (mp, sizeof (MonoJumpInfoRgctxEntry));
memcpy (res->data.rgctx_entry, patch_info->data.rgctx_entry, sizeof (MonoJumpInfoRgctxEntry));
res->data.rgctx_entry->data = mono_patch_info_dup_mp (mp, res->data.rgctx_entry->data);
break;
case MONO_PATCH_INFO_GSHAREDVT_CALL:
res->data.gsharedvt = mono_mempool_alloc (mp, sizeof (MonoJumpInfoGSharedVtCall));
memcpy (res->data.gsharedvt, patch_info->data.gsharedvt, sizeof (MonoJumpInfoGSharedVtCall));
break;
default:
break;
}
return res;
}
guint
mono_patch_info_hash (gconstpointer data)
{
const MonoJumpInfo *ji = (MonoJumpInfo*)data;
switch (ji->type) {
case MONO_PATCH_INFO_RVA:
case MONO_PATCH_INFO_LDSTR:
case MONO_PATCH_INFO_LDTOKEN:
case MONO_PATCH_INFO_DECLSEC:
return (ji->type << 8) | ji->data.token->token;
case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
return (ji->type << 8) | ji->data.token->token | (ji->data.token->has_context ? (gsize)ji->data.token->context.class_inst : 0);
case MONO_PATCH_INFO_INTERNAL_METHOD:
return (ji->type << 8) | g_str_hash (ji->data.name);
case MONO_PATCH_INFO_VTABLE:
case MONO_PATCH_INFO_CLASS:
case MONO_PATCH_INFO_IID:
case MONO_PATCH_INFO_ADJUSTED_IID:
case MONO_PATCH_INFO_CLASS_INIT:
case MONO_PATCH_INFO_METHODCONST:
case MONO_PATCH_INFO_METHOD:
case MONO_PATCH_INFO_METHOD_JUMP:
case MONO_PATCH_INFO_IMAGE:
case MONO_PATCH_INFO_JIT_ICALL_ADDR:
case MONO_PATCH_INFO_FIELD:
case MONO_PATCH_INFO_SFLDA:
case MONO_PATCH_INFO_SEQ_POINT_INFO:
return (ji->type << 8) | (gssize)ji->data.target;
case MONO_PATCH_INFO_GSHAREDVT_CALL:
return (ji->type << 8) | (gssize)ji->data.gsharedvt->method;
default:
return (ji->type << 8);
}
}
/*
* mono_patch_info_equal:
*
* This might fail to recognize equivalent patches, i.e. floats, so its only
* usable in those cases where this is not a problem, i.e. sharing GOT slots
* in AOT.
*/
gint
mono_patch_info_equal (gconstpointer ka, gconstpointer kb)
{
const MonoJumpInfo *ji1 = (MonoJumpInfo*)ka;
const MonoJumpInfo *ji2 = (MonoJumpInfo*)kb;
if (ji1->type != ji2->type)
return 0;
switch (ji1->type) {
case MONO_PATCH_INFO_RVA:
case MONO_PATCH_INFO_LDSTR:
case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
case MONO_PATCH_INFO_LDTOKEN:
case MONO_PATCH_INFO_DECLSEC:
if ((ji1->data.token->image != ji2->data.token->image) ||
(ji1->data.token->token != ji2->data.token->token) ||
(ji1->data.token->has_context != ji2->data.token->has_context) ||
(ji1->data.token->context.class_inst != ji2->data.token->context.class_inst) ||
(ji1->data.token->context.method_inst != ji2->data.token->context.method_inst))
return 0;
break;
case MONO_PATCH_INFO_INTERNAL_METHOD:
return g_str_equal (ji1->data.name, ji2->data.name);
case MONO_PATCH_INFO_RGCTX_FETCH: {
MonoJumpInfoRgctxEntry *e1 = ji1->data.rgctx_entry;
MonoJumpInfoRgctxEntry *e2 = ji2->data.rgctx_entry;
return e1->method == e2->method && e1->in_mrgctx == e2->in_mrgctx && e1->info_type == e2->info_type && mono_patch_info_equal (e1->data, e2->data);
}
case MONO_PATCH_INFO_GSHAREDVT_CALL: {
MonoJumpInfoGSharedVtCall *c1 = ji1->data.gsharedvt;
MonoJumpInfoGSharedVtCall *c2 = ji2->data.gsharedvt;
return c1->sig == c2->sig && c1->method == c2->method;
}
default:
if (ji1->data.target != ji2->data.target)
return 0;
break;
}
return 1;
}
gpointer
mono_resolve_patch_target (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *patch_info, gboolean run_cctors)
{
unsigned char *ip = patch_info->ip.i + code;
gconstpointer target = NULL;
switch (patch_info->type) {
case MONO_PATCH_INFO_BB:
/*
* FIXME: This could be hit for methods without a prolog. Should use -1
* but too much code depends on a 0 initial value.
*/
//g_assert (patch_info->data.bb->native_offset);
target = patch_info->data.bb->native_offset + code;
break;
case MONO_PATCH_INFO_ABS:
target = patch_info->data.target;
break;
case MONO_PATCH_INFO_LABEL:
target = patch_info->data.inst->inst_c0 + code;
break;
case MONO_PATCH_INFO_IP:
#if defined(__native_client__) && defined(__native_client_codegen__)
/* Need to transform to the destination address, it's */
/* emitted as an immediate in the code. */
target = nacl_inverse_modify_patch_target(ip);
#else
target = ip;
#endif
break;
case MONO_PATCH_INFO_METHOD_REL:
target = code + patch_info->data.offset;
break;
case MONO_PATCH_INFO_INTERNAL_METHOD: {
MonoJitICallInfo *mi = mono_find_jit_icall_by_name (patch_info->data.name);
if (!mi) {
g_warning ("unknown MONO_PATCH_INFO_INTERNAL_METHOD %s", patch_info->data.name);
g_assert_not_reached ();
}
target = mono_icall_get_wrapper (mi);
break;
}
case MONO_PATCH_INFO_METHOD_JUMP:
target = mono_create_jump_trampoline (domain, patch_info->data.method, FALSE);
#if defined(__native_client__) && defined(__native_client_codegen__)
#if defined(TARGET_AMD64)
/* This target is an absolute address, not relative to the */
/* current code being emitted on AMD64. */
target = nacl_inverse_modify_patch_target(target);
#endif
#endif
break;
case MONO_PATCH_INFO_METHOD:
if (patch_info->data.method == method) {
target = code;
} else {
/* get the trampoline to the method from the domain */
target = mono_create_jit_trampoline_in_domain (domain, patch_info->data.method);
}
break;
case MONO_PATCH_INFO_SWITCH: {
gpointer *jump_table;
int i;
#if defined(__native_client__) && defined(__native_client_codegen__)
/* This memory will leak, but we don't care if we're */
/* not deleting JIT'd methods anyway */
jump_table = g_malloc0 (sizeof(gpointer) * patch_info->data.table->table_size);
#else
if (method && method->dynamic) {
jump_table = mono_code_manager_reserve (mono_dynamic_code_hash_lookup (domain, method)->code_mp, sizeof (gpointer) * patch_info->data.table->table_size);
} else {
if (mono_aot_only) {
jump_table = mono_domain_alloc (domain, sizeof (gpointer) * patch_info->data.table->table_size);
} else {
jump_table = mono_domain_code_reserve (domain, sizeof (gpointer) * patch_info->data.table->table_size);
}
}
#endif
for (i = 0; i < patch_info->data.table->table_size; i++) {
#if defined(__native_client__) && defined(__native_client_codegen__)
/* 'code' is relative to the current code blob, we */
/* need to do this transform on it to make the */
/* pointers in this table absolute */
jump_table [i] = nacl_inverse_modify_patch_target (code) + GPOINTER_TO_INT (patch_info->data.table->table [i]);
#else
jump_table [i] = code + GPOINTER_TO_INT (patch_info->data.table->table [i]);
#endif
}
#if defined(__native_client__) && defined(__native_client_codegen__)
/* jump_table is in the data section, we need to transform */
/* it here so when it gets modified in amd64_patch it will */
/* then point back to the absolute data address */
target = nacl_inverse_modify_patch_target (jump_table);
#else
target = jump_table;
#endif
break;
}
case MONO_PATCH_INFO_METHODCONST:
case MONO_PATCH_INFO_CLASS:
case MONO_PATCH_INFO_IMAGE:
case MONO_PATCH_INFO_FIELD:
case MONO_PATCH_INFO_SIGNATURE:
target = patch_info->data.target;
break;
case MONO_PATCH_INFO_IID:
mono_class_init (patch_info->data.klass);
target = GINT_TO_POINTER ((int)patch_info->data.klass->interface_id);
break;
case MONO_PATCH_INFO_ADJUSTED_IID:
mono_class_init (patch_info->data.klass);
target = GINT_TO_POINTER ((int)(-((patch_info->data.klass->interface_id + 1) * SIZEOF_VOID_P)));
break;
case MONO_PATCH_INFO_VTABLE:
target = mono_class_vtable (domain, patch_info->data.klass);
g_assert (target);
break;
case MONO_PATCH_INFO_CLASS_INIT: {
MonoVTable *vtable = mono_class_vtable (domain, patch_info->data.klass);
g_assert (vtable);
target = mono_create_class_init_trampoline (vtable);
break;
}
case MONO_PATCH_INFO_DELEGATE_TRAMPOLINE:
target = mono_create_delegate_trampoline (domain, patch_info->data.klass);
break;
case MONO_PATCH_INFO_SFLDA: {
MonoVTable *vtable = mono_class_vtable (domain, patch_info->data.field->parent);
if (mono_class_field_is_special_static (patch_info->data.field)) {
gpointer addr = NULL;
mono_domain_lock (domain);
if (domain->special_static_fields)
addr = g_hash_table_lookup (domain->special_static_fields, patch_info->data.field);
mono_domain_unlock (domain);
g_assert (addr);
return addr;
}
g_assert (vtable);
if (!vtable->initialized && !(vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) && (method && mono_class_needs_cctor_run (vtable->klass, method)))
/* Done by the generated code */
;
else {
if (run_cctors)
mono_runtime_class_init (vtable);
}
target = (char*)mono_vtable_get_static_field_data (vtable) + patch_info->data.field->offset;
break;
}
case MONO_PATCH_INFO_RVA: {
guint32 field_index = mono_metadata_token_index (patch_info->data.token->token);
guint32 rva;
mono_metadata_field_info (patch_info->data.token->image, field_index - 1, NULL, &rva, NULL);
target = mono_image_rva_map (patch_info->data.token->image, rva);
break;
}
case MONO_PATCH_INFO_R4:
case MONO_PATCH_INFO_R8:
target = patch_info->data.target;
break;
case MONO_PATCH_INFO_EXC_NAME:
target = patch_info->data.name;
break;
case MONO_PATCH_INFO_LDSTR:
target =
mono_ldstr (domain, patch_info->data.token->image,
mono_metadata_token_index (patch_info->data.token->token));
break;
case MONO_PATCH_INFO_TYPE_FROM_HANDLE: {
gpointer handle;
MonoClass *handle_class;
handle = mono_ldtoken (patch_info->data.token->image,
patch_info->data.token->token, &handle_class, patch_info->data.token->has_context ? &patch_info->data.token->context : NULL);
mono_class_init (handle_class);
mono_class_init (mono_class_from_mono_type (handle));
target =
mono_type_get_object (domain, handle);
break;
}
case MONO_PATCH_INFO_LDTOKEN: {
gpointer handle;
MonoClass *handle_class;
handle = mono_ldtoken (patch_info->data.token->image,
patch_info->data.token->token, &handle_class, NULL);
mono_class_init (handle_class);
target = handle;
break;
}
case MONO_PATCH_INFO_DECLSEC:
target = (mono_metadata_blob_heap (patch_info->data.token->image, patch_info->data.token->token) + 2);
break;
case MONO_PATCH_INFO_ICALL_ADDR:
/* run_cctors == 0 -> AOT */
if (patch_info->data.method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) {
const char *exc_class;
const char *exc_arg;
if (run_cctors) {
target = mono_lookup_pinvoke_call (patch_info->data.method, &exc_class, &exc_arg);
if (!target) {
if (mono_aot_only)
mono_raise_exception (mono_exception_from_name_msg (mono_defaults.corlib, "System", exc_class, exc_arg));
g_error ("Unable to resolve pinvoke method '%s' Re-run with MONO_LOG_LEVEL=debug for more information.\n", mono_method_full_name (patch_info->data.method, TRUE));
}
} else {
target = NULL;
}
} else {
target = mono_lookup_internal_call (patch_info->data.method);
if (!target && run_cctors)
g_error ("Unregistered icall '%s'\n", mono_method_full_name (patch_info->data.method, TRUE));
}
break;
case MONO_PATCH_INFO_JIT_ICALL_ADDR: {
MonoJitICallInfo *mi = mono_find_jit_icall_by_name (patch_info->data.name);
if (!mi) {
g_warning ("unknown MONO_PATCH_INFO_JIT_ICALL_ADDR %s", patch_info->data.name);
g_assert_not_reached ();
}
target = mi->func;
break;
}
case MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG:
target = mono_thread_interruption_request_flag ();
break;
case MONO_PATCH_INFO_METHOD_RGCTX: {
MonoVTable *vtable = mono_class_vtable (domain, patch_info->data.method->klass);
g_assert (vtable);
target = mono_method_lookup_rgctx (vtable, mini_method_get_context (patch_info->data.method)->method_inst);
break;
}
case MONO_PATCH_INFO_BB_OVF:
case MONO_PATCH_INFO_EXC_OVF:
case MONO_PATCH_INFO_GOT_OFFSET:
case MONO_PATCH_INFO_NONE:
break;
case MONO_PATCH_INFO_RGCTX_FETCH: {
MonoJumpInfoRgctxEntry *entry = patch_info->data.rgctx_entry;
guint32 slot = -1;
switch (entry->data->type) {
case MONO_PATCH_INFO_CLASS:
slot = mono_method_lookup_or_register_info (entry->method, entry->in_mrgctx, &entry->data->data.klass->byval_arg, entry->info_type, mono_method_get_context (entry->method));
break;
case MONO_PATCH_INFO_METHOD:
case MONO_PATCH_INFO_METHODCONST:
slot = mono_method_lookup_or_register_info (entry->method, entry->in_mrgctx, entry->data->data.method, entry->info_type, mono_method_get_context (entry->method));
break;
case MONO_PATCH_INFO_FIELD:
slot = mono_method_lookup_or_register_info (entry->method, entry->in_mrgctx, entry->data->data.field, entry->info_type, mono_method_get_context (entry->method));
break;
case MONO_PATCH_INFO_SIGNATURE:
slot = mono_method_lookup_or_register_info (entry->method, entry->in_mrgctx, entry->data->data.sig, entry->info_type, mono_method_get_context (entry->method));
break;
case MONO_PATCH_INFO_GSHAREDVT_CALL: {
MonoJumpInfoGSharedVtCall *call_info = mono_domain_alloc0 (domain, sizeof (MonoJumpInfoGSharedVtCall));
memcpy (call_info, entry->data->data.gsharedvt, sizeof (MonoJumpInfoGSharedVtCall));
slot = mono_method_lookup_or_register_info (entry->method, entry->in_mrgctx, call_info, entry->info_type, mono_method_get_context (entry->method));
break;
}
default:
g_assert_not_reached ();
break;
}
target = mono_create_rgctx_lazy_fetch_trampoline (slot);
break;
}
case MONO_PATCH_INFO_GENERIC_CLASS_INIT:
target = mono_create_generic_class_init_trampoline ();
break;
case MONO_PATCH_INFO_MONITOR_ENTER:
target = mono_create_monitor_enter_trampoline ();
break;
case MONO_PATCH_INFO_MONITOR_EXIT:
target = mono_create_monitor_exit_trampoline ();
break;
#ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
case MONO_PATCH_INFO_SEQ_POINT_INFO:
if (!run_cctors)
/* AOT, not needed */
target = NULL;
else
target = mono_arch_get_seq_point_info (domain, code);
break;
#endif
case MONO_PATCH_INFO_LLVM_IMT_TRAMPOLINE:
#ifdef MONO_ARCH_LLVM_SUPPORTED
g_assert (mono_use_llvm);
target = mono_create_llvm_imt_trampoline (domain, patch_info->data.imt_tramp->method, patch_info->data.imt_tramp->vt_offset);
#else
g_assert_not_reached ();
#endif
break;
case MONO_PATCH_INFO_GC_CARD_TABLE_ADDR: {
int card_table_shift_bits;
gpointer card_table_mask;
target = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
break;
}
case MONO_PATCH_INFO_CASTCLASS_CACHE: {
target = mono_domain_alloc0 (domain, sizeof (gpointer));
break;
}
default:
g_assert_not_reached ();
}
return (gpointer)target;
}
void
mono_add_seq_point (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins, int native_offset)
{
ins->inst_offset = native_offset;
g_ptr_array_add (cfg->seq_points, ins);
bb->seq_points = g_slist_prepend_mempool (cfg->mempool, bb->seq_points, ins);
bb->last_seq_point = ins;
}
void
mono_add_var_location (MonoCompile *cfg, MonoInst *var, gboolean is_reg, int reg, int offset, int from, int to)
{
MonoDwarfLocListEntry *entry = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDwarfLocListEntry));
if (is_reg)
g_assert (offset == 0);
entry->is_reg = is_reg;
entry->reg = reg;
entry->offset = offset;
entry->from = from;
entry->to = to;
if (var == cfg->args [0])
cfg->this_loclist = g_slist_append_mempool (cfg->mempool, cfg->this_loclist, entry);
else if (var == cfg->rgctx_var)
cfg->rgctx_loclist = g_slist_append_mempool (cfg->mempool, cfg->rgctx_loclist, entry);
}
#ifndef DISABLE_JIT
static void
mono_compile_create_vars (MonoCompile *cfg)
{
MonoMethodSignature *sig;
MonoMethodHeader *header;
int i;
header = cfg->header;
sig = mono_method_signature (cfg->method);
if (!MONO_TYPE_IS_VOID (sig->ret)) {
cfg->ret = mono_compile_create_var (cfg, sig->ret, OP_ARG);
/* Inhibit optimizations */
cfg->ret->flags |= MONO_INST_VOLATILE;
}
if (cfg->verbose_level > 2)
g_print ("creating vars\n");
cfg->args = mono_mempool_alloc0 (cfg->mempool, (sig->param_count + sig->hasthis) * sizeof (MonoInst*));
if (sig->hasthis)
cfg->args [0] = mono_compile_create_var (cfg, &cfg->method->klass->this_arg, OP_ARG);
for (i = 0; i < sig->param_count; ++i) {
cfg->args [i + sig->hasthis] = mono_compile_create_var (cfg, sig->params [i], OP_ARG);
}
if (cfg->verbose_level > 2) {
if (cfg->ret) {
printf ("\treturn : ");
mono_print_ins (cfg->ret);
}
if (sig->hasthis) {
printf ("\tthis: ");
mono_print_ins (cfg->args [0]);
}
for (i = 0; i < sig->param_count; ++i) {
printf ("\targ [%d]: ", i);
mono_print_ins (cfg->args [i + sig->hasthis]);
}
}
cfg->locals_start = cfg->num_varinfo;
cfg->locals = mono_mempool_alloc0 (cfg->mempool, header->num_locals * sizeof (MonoInst*));
if (cfg->verbose_level > 2)
g_print ("creating locals\n");
for (i = 0; i < header->num_locals; ++i)
cfg->locals [i] = mono_compile_create_var (cfg, header->locals [i], OP_LOCAL);
if (cfg->verbose_level > 2)
g_print ("locals done\n");
mono_arch_create_vars (cfg);
}
void
mono_print_code (MonoCompile *cfg, const char* msg)
{
MonoBasicBlock *bb;
for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
mono_print_bb (bb, msg);
}
static void
mono_postprocess_patches (MonoCompile *cfg)
{
MonoJumpInfo *patch_info;
int i;
for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
switch (patch_info->type) {
case MONO_PATCH_INFO_ABS: {
MonoJitICallInfo *info = mono_find_jit_icall_by_addr (patch_info->data.target);
/*
* Change patches of type MONO_PATCH_INFO_ABS into patches describing the
* absolute address.
*/
if (info) {
//printf ("TEST %s %p\n", info->name, patch_info->data.target);
// FIXME: CLEAN UP THIS MESS.
if ((cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) &&
strstr (cfg->method->name, info->name)) {
/*
* This is an icall wrapper, and this is a call to the
* wrapped function.
*/
if (cfg->compile_aot) {
patch_info->type = MONO_PATCH_INFO_JIT_ICALL_ADDR;
patch_info->data.name = info->name;
}
} else {
/* for these array methods we currently register the same function pointer
* since it's a vararg function. But this means that mono_find_jit_icall_by_addr ()
* will return the incorrect one depending on the order they are registered.
* See tests/test-arr.cs
*/
if (strstr (info->name, "ves_array_new_va_") == NULL && strstr (info->name, "ves_array_element_address_") == NULL) {
patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
patch_info->data.name = info->name;
}
}
}
if (patch_info->type == MONO_PATCH_INFO_ABS) {
if (cfg->abs_patches) {
MonoJumpInfo *abs_ji = g_hash_table_lookup (cfg->abs_patches, patch_info->data.target);
if (abs_ji) {
patch_info->type = abs_ji->type;
patch_info->data.target = abs_ji->data.target;
}
}
}
break;
}
case MONO_PATCH_INFO_SWITCH: {
gpointer *table;
#if defined(__native_client__) && defined(__native_client_codegen__)
/* This memory will leak. */
/* TODO: can we free this when */
/* making the final jump table? */
table = g_malloc0 (sizeof(gpointer) * patch_info->data.table->table_size);
#else
if (cfg->method->dynamic) {
table = mono_code_manager_reserve (cfg->dynamic_info->code_mp, sizeof (gpointer) * patch_info->data.table->table_size);
} else {
table = mono_domain_code_reserve (cfg->domain, sizeof (gpointer) * patch_info->data.table->table_size);
}
#endif
for (i = 0; i < patch_info->data.table->table_size; i++) {
/* Might be NULL if the switch is eliminated */
if (patch_info->data.table->table [i]) {
g_assert (patch_info->data.table->table [i]->native_offset);
table [i] = GINT_TO_POINTER (patch_info->data.table->table [i]->native_offset);
} else {
table [i] = NULL;
}
}
patch_info->data.table->table = (MonoBasicBlock**)table;
break;
}
case MONO_PATCH_INFO_METHOD_JUMP: {
MonoJumpList *jlist;
MonoDomain *domain = cfg->domain;
unsigned char *ip = cfg->native_code + patch_info->ip.i;
#if defined(__native_client__) && defined(__native_client_codegen__)
/* When this jump target gets evaluated, the method */
/* will be installed in the dynamic code section, */
/* not at the location of cfg->native_code. */
ip = nacl_inverse_modify_patch_target (cfg->native_code) + patch_info->ip.i;
#endif
mono_domain_lock (domain);
jlist = g_hash_table_lookup (domain_jit_info (domain)->jump_target_hash, patch_info->data.method);
if (!jlist) {
jlist = mono_domain_alloc0 (domain, sizeof (MonoJumpList));
g_hash_table_insert (domain_jit_info (domain)->jump_target_hash, patch_info->data.method, jlist);
}
jlist->list = g_slist_prepend (jlist->list, ip);
mono_domain_unlock (domain);
break;
}
default:
/* do nothing */
break;
}
}
}
static void
collect_pred_seq_points (MonoBasicBlock *bb, MonoInst *ins, GSList **next, int depth)
{
int i;
MonoBasicBlock *in_bb;
GSList *l;
for (i = 0; i < bb->in_count; ++i) {
in_bb = bb->in_bb [i];
if (in_bb->last_seq_point) {
int src_index = in_bb->last_seq_point->backend.size;
int dst_index = ins->backend.size;
/* bb->in_bb might contain duplicates */
for (l = next [src_index]; l; l = l->next)
if (GPOINTER_TO_UINT (l->data) == dst_index)
break;
if (!l)
next [src_index] = g_slist_append (next [src_index], GUINT_TO_POINTER (dst_index));
} else {
/* Have to look at its predecessors */
if (depth < 5)
collect_pred_seq_points (in_bb, ins, next, depth + 1);
}
}
}
static void
mono_save_seq_point_info (MonoCompile *cfg)
{
MonoBasicBlock *bb;
GSList *bb_seq_points, *l;
MonoInst *last;
MonoDomain *domain = cfg->domain;
int i;
MonoSeqPointInfo *info;
GSList **next;
if (!cfg->seq_points)
return;
info = g_malloc0 (sizeof (MonoSeqPointInfo) + (cfg->seq_points->len - MONO_ZERO_LEN_ARRAY) * sizeof (SeqPoint));
info->len = cfg->seq_points->len;
for (i = 0; i < cfg->seq_points->len; ++i) {
SeqPoint *sp = &info->seq_points [i];
MonoInst *ins = g_ptr_array_index (cfg->seq_points, i);
sp->il_offset = ins->inst_imm;
sp->native_offset = ins->inst_offset;
/* Used below */
ins->backend.size = i;
}
/*
* For each sequence point, compute the list of sequence points immediately
* following it, this is needed to implement 'step over' in the debugger agent.
*/
next = g_new0 (GSList*, cfg->seq_points->len);
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
bb_seq_points = g_slist_reverse (bb->seq_points);
last = NULL;
for (l = bb_seq_points; l; l = l->next) {
MonoInst *ins = l->data;
if (ins->inst_imm == METHOD_ENTRY_IL_OFFSET || ins->inst_imm == METHOD_EXIT_IL_OFFSET)
/* Used to implement method entry/exit events */
continue;
if (last != NULL) {
/* Link with the previous seq point in the same bb */
next [last->backend.size] = g_slist_append (next [last->backend.size], GUINT_TO_POINTER (ins->backend.size));
} else {
/* Link with the last bb in the previous bblocks */
collect_pred_seq_points (bb, ins, next, 0);
}
last = ins;
}
if (bb->last_ins && bb->last_ins->opcode == OP_ENDFINALLY) {
MonoBasicBlock *bb2;
MonoInst *endfinally_seq_point = NULL;
/*
* The ENDFINALLY branches are not represented in the cfg, so link it with all seq points starting bbs.
*/
l = g_slist_last (bb->seq_points);
g_assert (l);
endfinally_seq_point = l->data;
for (bb2 = cfg->bb_entry; bb2; bb2 = bb2->next_bb) {
GSList *l = g_slist_last (bb2->seq_points);
if (l) {
MonoInst *ins = l->data;
if (!(ins->inst_imm == METHOD_ENTRY_IL_OFFSET || ins->inst_imm == METHOD_EXIT_IL_OFFSET) && ins != endfinally_seq_point)
next [endfinally_seq_point->backend.size] = g_slist_append (next [endfinally_seq_point->backend.size], GUINT_TO_POINTER (ins->backend.size));
}
}
}
}
if (cfg->verbose_level > 2) {
printf ("\nSEQ POINT MAP: \n");
}
for (i = 0; i < cfg->seq_points->len; ++i) {
SeqPoint *sp = &info->seq_points [i];
GSList *l;
int j, next_index;
sp->next_len = g_slist_length (next [i]);
sp->next = g_new (int, sp->next_len);
j = 0;
if (cfg->verbose_level > 2 && next [i]) {
printf ("\tIL0x%x ->", sp->il_offset);
for (l = next [i]; l; l = l->next) {
next_index = GPOINTER_TO_UINT (l->data);
printf (" IL0x%x", info->seq_points [next_index].il_offset);
}
printf ("\n");
}
for (l = next [i]; l; l = l->next) {
next_index = GPOINTER_TO_UINT (l->data);
sp->next [j ++] = next_index;
}
g_slist_free (next [i]);
}
g_free (next);
cfg->seq_point_info = info;
// FIXME: dynamic methods
if (!cfg->compile_aot) {
mono_domain_lock (domain);
// FIXME: How can the lookup succeed ?
if (!g_hash_table_lookup (domain_jit_info (domain)->seq_points, cfg->method_to_register))
g_hash_table_insert (domain_jit_info (domain)->seq_points, cfg->method_to_register, info);
mono_domain_unlock (domain);
}
g_ptr_array_free (cfg->seq_points, TRUE);
cfg->seq_points = NULL;
}
void
mono_codegen (MonoCompile *cfg)
{
MonoBasicBlock *bb;
int max_epilog_size;
guint8 *code;
MonoDomain *code_domain;
if (mono_using_xdebug)
/*
* Recent gdb versions have trouble processing symbol files containing
* overlapping address ranges, so allocate all code from the code manager
* of the root domain. (#666152).
*/
code_domain = mono_get_root_domain ();
else
code_domain = cfg->domain;
#if defined(__native_client_codegen__) && defined(__native_client__)
void *code_dest;
/* This keeps patch targets from being transformed during
* ordinary method compilation, for local branches and jumps.
*/
nacl_allow_target_modification (FALSE);
#endif
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
cfg->spill_count = 0;
/* we reuse dfn here */
/* bb->dfn = bb_count++; */
mono_arch_lowering_pass (cfg, bb);
if (cfg->opt & MONO_OPT_PEEPHOLE)
mono_arch_peephole_pass_1 (cfg, bb);
if (!cfg->globalra)
mono_local_regalloc (cfg, bb);
if (cfg->opt & MONO_OPT_PEEPHOLE)
mono_arch_peephole_pass_2 (cfg, bb);
}
if (cfg->prof_options & MONO_PROFILE_COVERAGE)
cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, cfg->num_bblocks);
code = mono_arch_emit_prolog (cfg);
if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
code = mono_arch_instrument_prolog (cfg, mono_profiler_method_enter, code, FALSE);
cfg->code_len = code - cfg->native_code;
cfg->prolog_end = cfg->code_len;
mono_debug_open_method (cfg);
/* emit code all basic blocks */
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
bb->native_offset = cfg->code_len;
bb->real_native_offset = cfg->code_len;
//if ((bb == cfg->bb_entry) || !(bb->region == -1 && !bb->dfn))
mono_arch_output_basic_block (cfg, bb);
bb->native_length = cfg->code_len - bb->native_offset;
if (bb == cfg->bb_exit) {
cfg->epilog_begin = cfg->code_len;
if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
code = cfg->native_code + cfg->code_len;
code = mono_arch_instrument_epilog (cfg, mono_profiler_method_leave, code, FALSE);
cfg->code_len = code - cfg->native_code;
g_assert (cfg->code_len < cfg->code_size);
}
mono_arch_emit_epilog (cfg);
}
}
#ifdef __native_client_codegen__
mono_nacl_fix_patches (cfg->native_code, cfg->patch_info);
#endif
mono_arch_emit_exceptions (cfg);
max_epilog_size = 0;
/* we always allocate code in cfg->domain->code_mp to increase locality */
cfg->code_size = cfg->code_len + max_epilog_size;
/* fixme: align to MONO_ARCH_CODE_ALIGNMENT */
if (cfg->method->dynamic) {
guint unwindlen = 0;
#ifdef MONO_ARCH_HAVE_UNWIND_TABLE
unwindlen = mono_arch_unwindinfo_get_size (cfg->arch.unwindinfo);
#endif
/* Allocate the code into a separate memory pool so it can be freed */
cfg->dynamic_info = g_new0 (MonoJitDynamicMethodInfo,