Skip to content
Permalink
Branch: master
Find file Copy path
Fetching contributors…
Cannot retrieve contributors at this time
12642 lines (11017 sloc) 398 KB
/**
* \file
* Convert CIL to the JIT internal representation
*
* Author:
* Paolo Molaro (lupus@ximian.com)
* Dietmar Maurer (dietmar@ximian.com)
*
* (C) 2002 Ximian, Inc.
* Copyright 2003-2010 Novell, Inc (http://www.novell.com)
* Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#include <config.h>
#include <glib.h>
#include <mono/utils/mono-compiler.h>
#include "mini.h"
#ifndef DISABLE_JIT
#include <signal.h>
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#include <math.h>
#include <string.h>
#include <ctype.h>
#ifdef HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
#ifdef HAVE_ALLOCA_H
#include <alloca.h>
#endif
#include <mono/utils/memcheck.h>
#include <mono/metadata/abi-details.h>
#include <mono/metadata/assembly.h>
#include <mono/metadata/attrdefs.h>
#include <mono/metadata/loader.h>
#include <mono/metadata/tabledefs.h>
#include <mono/metadata/class.h>
#include <mono/metadata/class-abi-details.h>
#include <mono/metadata/object.h>
#include <mono/metadata/exception.h>
#include <mono/metadata/exception-internals.h>
#include <mono/metadata/opcodes.h>
#include <mono/metadata/mono-endian.h>
#include <mono/metadata/tokentype.h>
#include <mono/metadata/tabledefs.h>
#include <mono/metadata/marshal.h>
#include <mono/metadata/debug-helpers.h>
#include <mono/metadata/debug-internals.h>
#include <mono/metadata/gc-internals.h>
#include <mono/metadata/security-manager.h>
#include <mono/metadata/threads-types.h>
#include <mono/metadata/security-core-clr.h>
#include <mono/metadata/profiler-private.h>
#include <mono/metadata/profiler.h>
#include <mono/metadata/monitor.h>
#include <mono/utils/mono-memory-model.h>
#include <mono/utils/mono-error-internals.h>
#include <mono/metadata/mono-basic-block.h>
#include <mono/metadata/reflection-internals.h>
#include <mono/utils/mono-threads-coop.h>
#include <mono/utils/mono-utils-debug.h>
#include <mono/utils/mono-logger-internals.h>
#include <mono/metadata/verify-internals.h>
#include <mono/metadata/icall-decl.h>
#include "trace.h"
#include "ir-emit.h"
#include "jit-icalls.h"
#include "jit.h"
#include "debugger-agent.h"
#include "seq-points.h"
#include "aot-compiler.h"
#include "mini-llvm.h"
#include "mini-runtime.h"
#include "llvmonly-runtime.h"
#define BRANCH_COST 10
#define CALL_COST 10
/* Used for the JIT */
#define INLINE_LENGTH_LIMIT 20
/* Used to LLVM JIT */
#define LLVM_JIT_INLINE_LENGTH_LIMIT 100
static const gboolean debug_tailcall = FALSE; // logging
static const gboolean debug_tailcall_try_all = FALSE; // consider any call followed by ret
gboolean
mono_tailcall_print_enabled (void)
{
return debug_tailcall || MONO_TRACE_IS_TRACED (G_LOG_LEVEL_DEBUG, MONO_TRACE_TAILCALL);
}
void
mono_tailcall_print (const char *format, ...)
{
if (!mono_tailcall_print_enabled ())
return;
va_list args;
va_start (args, format);
g_printv (format, args);
va_end (args);
}
/* These have 'cfg' as an implicit argument */
#define INLINE_FAILURE(msg) do { \
if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
inline_failure (cfg, msg); \
goto exception_exit; \
} \
} while (0)
#define CHECK_CFG_EXCEPTION do {\
if (cfg->exception_type != MONO_EXCEPTION_NONE) \
goto exception_exit; \
} while (0)
#define FIELD_ACCESS_FAILURE(method, field) do { \
field_access_failure ((cfg), (method), (field)); \
goto exception_exit; \
} while (0)
#define GENERIC_SHARING_FAILURE(opcode) do { \
if (cfg->gshared) { \
gshared_failure (cfg, opcode, __FILE__, __LINE__); \
goto exception_exit; \
} \
} while (0)
#define GSHAREDVT_FAILURE(opcode) do { \
if (cfg->gsharedvt) { \
gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
goto exception_exit; \
} \
} while (0)
#define OUT_OF_MEMORY_FAILURE do { \
mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
mono_error_set_out_of_memory (&cfg->error, ""); \
goto exception_exit; \
} while (0)
#define DISABLE_AOT(cfg) do { \
if ((cfg)->verbose_level >= 2) \
printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
(cfg)->disable_aot = TRUE; \
} while (0)
#define LOAD_ERROR do { \
break_on_unverified (); \
mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
goto exception_exit; \
} while (0)
#define TYPE_LOAD_ERROR(klass) do { \
cfg->exception_ptr = klass; \
LOAD_ERROR; \
} while (0)
#define CHECK_CFG_ERROR do {\
if (!mono_error_ok (&cfg->error)) { \
mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
goto mono_error_exit; \
} \
} while (0)
static int stind_to_store_membase (int opcode);
int mono_op_to_op_imm (int opcode);
int mono_op_to_op_imm_noemul (int opcode);
static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
guchar *ip, guint real_offset, gboolean inline_always);
static MonoInst*
convert_value (MonoCompile *cfg, MonoType *type, MonoInst *ins);
/* helper methods signatures */
static MonoMethodSignature *helper_sig_domain_get;
static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
static MonoMethodSignature *helper_sig_jit_thread_attach;
static MonoMethodSignature *helper_sig_get_tls_tramp;
static MonoMethodSignature *helper_sig_set_tls_tramp;
/* type loading helpers */
static GENERATE_TRY_GET_CLASS_WITH_CACHE (debuggable_attribute, "System.Diagnostics", "DebuggableAttribute")
static GENERATE_GET_CLASS_WITH_CACHE (iequatable, "System", "IEquatable`1")
static GENERATE_GET_CLASS_WITH_CACHE (geqcomparer, "System.Collections.Generic", "GenericEqualityComparer`1");
/*
* Instruction metadata
*/
#ifdef MINI_OP
#undef MINI_OP
#endif
#ifdef MINI_OP3
#undef MINI_OP3
#endif
#define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
#define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
#define NONE ' '
#define IREG 'i'
#define FREG 'f'
#define VREG 'v'
#define XREG 'x'
#if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == TARGET_SIZEOF_VOID_P
#define LREG IREG
#else
#define LREG 'l'
#endif
/* keep in sync with the enum in mini.h */
const char
mini_ins_info[] = {
#include "mini-ops.h"
};
#undef MINI_OP
#undef MINI_OP3
#define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
#define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
/*
* This should contain the index of the last sreg + 1. This is not the same
* as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
*/
const gint8 mini_ins_sreg_counts[] = {
#include "mini-ops.h"
};
#undef MINI_OP
#undef MINI_OP3
guint32
mono_alloc_ireg (MonoCompile *cfg)
{
return alloc_ireg (cfg);
}
guint32
mono_alloc_lreg (MonoCompile *cfg)
{
return alloc_lreg (cfg);
}
guint32
mono_alloc_freg (MonoCompile *cfg)
{
return alloc_freg (cfg);
}
guint32
mono_alloc_preg (MonoCompile *cfg)
{
return alloc_preg (cfg);
}
guint32
mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
{
return alloc_dreg (cfg, stack_type);
}
/*
* mono_alloc_ireg_ref:
*
* Allocate an IREG, and mark it as holding a GC ref.
*/
guint32
mono_alloc_ireg_ref (MonoCompile *cfg)
{
return alloc_ireg_ref (cfg);
}
/*
* mono_alloc_ireg_mp:
*
* Allocate an IREG, and mark it as holding a managed pointer.
*/
guint32
mono_alloc_ireg_mp (MonoCompile *cfg)
{
return alloc_ireg_mp (cfg);
}
/*
* mono_alloc_ireg_copy:
*
* Allocate an IREG with the same GC type as VREG.
*/
guint32
mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
{
if (vreg_is_ref (cfg, vreg))
return alloc_ireg_ref (cfg);
else if (vreg_is_mp (cfg, vreg))
return alloc_ireg_mp (cfg);
else
return alloc_ireg (cfg);
}
guint
mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
{
if (type->byref)
return OP_MOVE;
type = mini_get_underlying_type (type);
handle_enum:
switch (type->type) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
return OP_MOVE;
case MONO_TYPE_I2:
case MONO_TYPE_U2:
return OP_MOVE;
case MONO_TYPE_I4:
case MONO_TYPE_U4:
return OP_MOVE;
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_PTR:
case MONO_TYPE_FNPTR:
return OP_MOVE;
case MONO_TYPE_CLASS:
case MONO_TYPE_STRING:
case MONO_TYPE_OBJECT:
case MONO_TYPE_SZARRAY:
case MONO_TYPE_ARRAY:
return OP_MOVE;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
#if SIZEOF_REGISTER == 8
return OP_MOVE;
#else
return OP_LMOVE;
#endif
case MONO_TYPE_R4:
return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
case MONO_TYPE_R8:
return OP_FMOVE;
case MONO_TYPE_VALUETYPE:
if (m_class_is_enumtype (type->data.klass)) {
type = mono_class_enum_basetype_internal (type->data.klass);
goto handle_enum;
}
if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type_internal (type)))
return OP_XMOVE;
return OP_VMOVE;
case MONO_TYPE_TYPEDBYREF:
return OP_VMOVE;
case MONO_TYPE_GENERICINST:
if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type_internal (type)))
return OP_XMOVE;
type = m_class_get_byval_arg (type->data.generic_class->container_class);
goto handle_enum;
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
g_assert (cfg->gshared);
if (mini_type_var_is_vt (type))
return OP_VMOVE;
else
return mono_type_to_regmove (cfg, mini_get_underlying_type (type));
default:
g_error ("unknown type 0x%02x in type_to_regstore", type->type);
}
return -1;
}
void
mono_print_bb (MonoBasicBlock *bb, const char *msg)
{
int i;
MonoInst *tree;
GString *str = g_string_new ("");
g_string_append_printf (str, "%s %d: [IN: ", msg, bb->block_num);
for (i = 0; i < bb->in_count; ++i)
g_string_append_printf (str, " BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
g_string_append_printf (str, ", OUT: ");
for (i = 0; i < bb->out_count; ++i)
g_string_append_printf (str, " BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
g_string_append_printf (str, " ]\n");
g_print ("%s", str->str);
g_string_free (str, TRUE);
for (tree = bb->code; tree; tree = tree->next)
mono_print_ins_index (-1, tree);
}
void
mono_create_helper_signatures (void)
{
helper_sig_domain_get = mono_create_icall_signature ("ptr");
helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
helper_sig_jit_thread_attach = mono_create_icall_signature ("ptr ptr");
helper_sig_get_tls_tramp = mono_create_icall_signature ("ptr");
helper_sig_set_tls_tramp = mono_create_icall_signature ("void ptr");
}
static MONO_NEVER_INLINE gboolean
break_on_unverified (void)
{
if (mini_get_debug_options ()->break_on_unverified) {
G_BREAKPOINT ();
return TRUE;
}
return FALSE;
}
static void
clear_cfg_error (MonoCompile *cfg)
{
mono_error_cleanup (&cfg->error);
error_init (&cfg->error);
}
static MONO_NEVER_INLINE void
field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
{
char *method_fname = mono_method_full_name (method, TRUE);
char *field_fname = mono_field_full_name (field);
mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
mono_error_set_generic_error (&cfg->error, "System", "FieldAccessException", "Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
g_free (method_fname);
g_free (field_fname);
}
static MONO_NEVER_INLINE void
inline_failure (MonoCompile *cfg, const char *msg)
{
if (cfg->verbose_level >= 2)
printf ("inline failed: %s\n", msg);
mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
}
static MONO_NEVER_INLINE void
gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
{
if (cfg->verbose_level > 2)
printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", m_class_get_name_space (cfg->current_method->klass), m_class_get_name (cfg->current_method->klass), cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name (opcode), line);
mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
}
static MONO_NEVER_INLINE void
gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
{
cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", m_class_get_name_space (cfg->current_method->klass), m_class_get_name (cfg->current_method->klass), cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
if (cfg->verbose_level >= 2)
printf ("%s\n", cfg->exception_message);
mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
}
void
mini_set_inline_failure (MonoCompile *cfg, const char *msg)
{
if (cfg->verbose_level >= 2)
printf ("inline failed: %s\n", msg);
mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
}
/*
* When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
* foo<T> (int i) { ldarg.0; box T; }
*/
#define UNVERIFIED do { \
if (cfg->gsharedvt) { \
if (cfg->verbose_level > 2) \
printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
goto exception_exit; \
} \
break_on_unverified (); \
goto unverified; \
} while (0)
#define GET_BBLOCK(cfg,tblock,ip) do { \
(tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
if (!(tblock)) { \
if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
NEW_BBLOCK (cfg, (tblock)); \
(tblock)->cil_code = (ip); \
ADD_BBLOCK (cfg, (tblock)); \
} \
} while (0)
/* Emit conversions so both operands of a binary opcode are of the same type */
static void
add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
{
MonoInst *arg1 = *arg1_ref;
MonoInst *arg2 = *arg2_ref;
if (cfg->r4fp &&
((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
(arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
MonoInst *conv;
/* Mixing r4/r8 is allowed by the spec */
if (arg1->type == STACK_R4) {
int dreg = alloc_freg (cfg);
EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
conv->type = STACK_R8;
ins->sreg1 = dreg;
*arg1_ref = conv;
}
if (arg2->type == STACK_R4) {
int dreg = alloc_freg (cfg);
EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
conv->type = STACK_R8;
ins->sreg2 = dreg;
*arg2_ref = conv;
}
}
#if SIZEOF_REGISTER == 8
/* FIXME: Need to add many more cases */
if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
MonoInst *widen;
int dr = alloc_preg (cfg);
EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
(ins)->sreg2 = widen->dreg;
}
#endif
}
#define ADD_BINOP(op) do { \
MONO_INST_NEW (cfg, ins, (op)); \
sp -= 2; \
ins->sreg1 = sp [0]->dreg; \
ins->sreg2 = sp [1]->dreg; \
type_from_op (cfg, ins, sp [0], sp [1]); \
CHECK_TYPE (ins); \
/* Have to insert a widening op */ \
add_widen_op (cfg, ins, &sp [0], &sp [1]); \
ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
MONO_ADD_INS ((cfg)->cbb, (ins)); \
*sp++ = mono_decompose_opcode ((cfg), (ins)); \
} while (0)
#define ADD_UNOP(op) do { \
MONO_INST_NEW (cfg, ins, (op)); \
sp--; \
ins->sreg1 = sp [0]->dreg; \
type_from_op (cfg, ins, sp [0], NULL); \
CHECK_TYPE (ins); \
(ins)->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
MONO_ADD_INS ((cfg)->cbb, (ins)); \
*sp++ = mono_decompose_opcode (cfg, ins); \
} while (0)
#define ADD_BINCOND(next_block) do { \
MonoInst *cmp; \
sp -= 2; \
MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
cmp->sreg1 = sp [0]->dreg; \
cmp->sreg2 = sp [1]->dreg; \
add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
type_from_op (cfg, cmp, sp [0], sp [1]); \
CHECK_TYPE (cmp); \
type_from_op (cfg, ins, sp [0], sp [1]); \
ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
GET_BBLOCK (cfg, tblock, target); \
link_bblock (cfg, cfg->cbb, tblock); \
ins->inst_true_bb = tblock; \
if ((next_block)) { \
link_bblock (cfg, cfg->cbb, (next_block)); \
ins->inst_false_bb = (next_block); \
start_new_bblock = 1; \
} else { \
GET_BBLOCK (cfg, tblock, next_ip); \
link_bblock (cfg, cfg->cbb, tblock); \
ins->inst_false_bb = tblock; \
start_new_bblock = 2; \
} \
if (sp != stack_start) { \
handle_stack_args (cfg, stack_start, sp - stack_start); \
CHECK_UNVERIFIABLE (cfg); \
} \
MONO_ADD_INS (cfg->cbb, cmp); \
MONO_ADD_INS (cfg->cbb, ins); \
} while (0)
/* *
* link_bblock: Links two basic blocks
*
* links two basic blocks in the control flow graph, the 'from'
* argument is the starting block and the 'to' argument is the block
* the control flow ends to after 'from'.
*/
static void
link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
{
MonoBasicBlock **newa;
int i, found;
#if 0
if (from->cil_code) {
if (to->cil_code)
printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
else
printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
} else {
if (to->cil_code)
printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
else
printf ("edge from entry to exit\n");
}
#endif
found = FALSE;
for (i = 0; i < from->out_count; ++i) {
if (to == from->out_bb [i]) {
found = TRUE;
break;
}
}
if (!found) {
newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
for (i = 0; i < from->out_count; ++i) {
newa [i] = from->out_bb [i];
}
newa [i] = to;
from->out_count++;
from->out_bb = newa;
}
found = FALSE;
for (i = 0; i < to->in_count; ++i) {
if (from == to->in_bb [i]) {
found = TRUE;
break;
}
}
if (!found) {
newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
for (i = 0; i < to->in_count; ++i) {
newa [i] = to->in_bb [i];
}
newa [i] = from;
to->in_count++;
to->in_bb = newa;
}
}
void
mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
{
link_bblock (cfg, from, to);
}
static void
mono_create_spvar_for_region (MonoCompile *cfg, int region);
static void
mark_bb_in_region (MonoCompile *cfg, guint region, uint32_t start, uint32_t end)
{
MonoBasicBlock *bb = cfg->cil_offset_to_bb [start];
//start must exist in cil_offset_to_bb as those are il offsets used by EH which should have GET_BBLOCK early.
g_assert (bb);
if (cfg->verbose_level > 1)
g_print ("FIRST BB for %d is BB_%d\n", start, bb->block_num);
for (; bb && bb->real_offset < end; bb = bb->next_bb) {
//no one claimed this bb, take it.
if (bb->region == -1) {
bb->region = region;
continue;
}
//current region is an early handler, bail
if ((bb->region & (0xf << 4)) != MONO_REGION_TRY) {
continue;
}
//current region is a try, only overwrite if new region is a handler
if ((region & (0xf << 4)) != MONO_REGION_TRY) {
bb->region = region;
}
}
if (cfg->spvars)
mono_create_spvar_for_region (cfg, region);
}
static void
compute_bb_regions (MonoCompile *cfg)
{
MonoBasicBlock *bb;
MonoMethodHeader *header = cfg->header;
int i;
for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
bb->region = -1;
for (i = 0; i < header->num_clauses; ++i) {
MonoExceptionClause *clause = &header->clauses [i];
if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER)
mark_bb_in_region (cfg, ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags, clause->data.filter_offset, clause->handler_offset);
guint handler_region;
if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
handler_region = ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
handler_region = ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
else
handler_region = ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
mark_bb_in_region (cfg, handler_region, clause->handler_offset, clause->handler_offset + clause->handler_len);
mark_bb_in_region (cfg, ((i + 1) << 8) | clause->flags, clause->try_offset, clause->try_offset + clause->try_len);
}
if (cfg->verbose_level > 2) {
MonoBasicBlock *bb;
for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
g_print ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
}
}
static gboolean
ip_in_finally_clause (MonoCompile *cfg, int offset)
{
MonoMethodHeader *header = cfg->header;
MonoExceptionClause *clause;
int i;
for (i = 0; i < header->num_clauses; ++i) {
clause = &header->clauses [i];
if (clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FAULT)
continue;
if (MONO_OFFSET_IN_HANDLER (clause, offset))
return TRUE;
}
return FALSE;
}
/* Find clauses between ip and target, from inner to outer */
static GList*
mono_find_leave_clauses (MonoCompile *cfg, guchar *ip, guchar *target)
{
MonoMethodHeader *header = cfg->header;
MonoExceptionClause *clause;
int i;
GList *res = NULL;
for (i = 0; i < header->num_clauses; ++i) {
clause = &header->clauses [i];
if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
(!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
MonoLeaveClause *leave = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoLeaveClause));
leave->index = i;
leave->clause = clause;
res = g_list_append_mempool (cfg->mempool, res, leave);
}
}
return res;
}
static void
mono_create_spvar_for_region (MonoCompile *cfg, int region)
{
MonoInst *var;
var = (MonoInst *)g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
if (var)
return;
var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
/* prevent it from being register allocated */
var->flags |= MONO_INST_VOLATILE;
g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
}
MonoInst *
mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
{
return (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
}
static MonoInst*
mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
{
MonoInst *var;
var = (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
if (var)
return var;
var = mono_compile_create_var (cfg, mono_get_object_type (), OP_LOCAL);
/* prevent it from being register allocated */
var->flags |= MONO_INST_VOLATILE;
g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
return var;
}
/*
* Returns the type used in the eval stack when @type is loaded.
* FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
*/
void
mini_type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
{
MonoClass *klass;
type = mini_get_underlying_type (type);
inst->klass = klass = mono_class_from_mono_type_internal (type);
if (type->byref) {
inst->type = STACK_MP;
return;
}
handle_enum:
switch (type->type) {
case MONO_TYPE_VOID:
inst->type = STACK_INV;
return;
case MONO_TYPE_I1:
case MONO_TYPE_U1:
case MONO_TYPE_I2:
case MONO_TYPE_U2:
case MONO_TYPE_I4:
case MONO_TYPE_U4:
inst->type = STACK_I4;
return;
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_PTR:
case MONO_TYPE_FNPTR:
inst->type = STACK_PTR;
return;
case MONO_TYPE_CLASS:
case MONO_TYPE_STRING:
case MONO_TYPE_OBJECT:
case MONO_TYPE_SZARRAY:
case MONO_TYPE_ARRAY:
inst->type = STACK_OBJ;
return;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
inst->type = STACK_I8;
return;
case MONO_TYPE_R4:
inst->type = cfg->r4_stack_type;
break;
case MONO_TYPE_R8:
inst->type = STACK_R8;
return;
case MONO_TYPE_VALUETYPE:
if (m_class_is_enumtype (type->data.klass)) {
type = mono_class_enum_basetype_internal (type->data.klass);
goto handle_enum;
} else {
inst->klass = klass;
inst->type = STACK_VTYPE;
return;
}
case MONO_TYPE_TYPEDBYREF:
inst->klass = mono_defaults.typed_reference_class;
inst->type = STACK_VTYPE;
return;
case MONO_TYPE_GENERICINST:
type = m_class_get_byval_arg (type->data.generic_class->container_class);
goto handle_enum;
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
g_assert (cfg->gshared);
if (mini_is_gsharedvt_type (type)) {
g_assert (cfg->gsharedvt);
inst->type = STACK_VTYPE;
} else {
mini_type_to_eval_stack_type (cfg, mini_get_underlying_type (type), inst);
}
return;
default:
g_error ("unknown type 0x%02x in eval stack type", type->type);
}
}
/*
* The following tables are used to quickly validate the IL code in type_from_op ().
*/
static const char
bin_num_table [STACK_MAX] [STACK_MAX] = {
{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
{STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
{STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
{STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
{STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
};
static const char
neg_table [] = {
STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
};
/* reduce the size of this table */
static const char
bin_int_table [STACK_MAX] [STACK_MAX] = {
{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
{STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
{STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
{STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
};
static const char
bin_comp_table [STACK_MAX] [STACK_MAX] = {
/* Inv i L p F & O vt r4 */
{0},
{0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
{0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
{0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
{0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
{0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
{0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
{0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
{0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
};
/* reduce the size of this table */
static const char
shift_table [STACK_MAX] [STACK_MAX] = {
{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
{STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
{STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
{STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
};
/*
* Tables to map from the non-specific opcode to the matching
* type-specific opcode.
*/
/* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
static const guint16
binops_op_map [STACK_MAX] = {
0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
};
/* handles from CEE_NEG to CEE_CONV_U8 */
static const guint16
unops_op_map [STACK_MAX] = {
0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
};
/* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
static const guint16
ovfops_op_map [STACK_MAX] = {
0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
};
/* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
static const guint16
ovf2ops_op_map [STACK_MAX] = {
0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
};
/* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
static const guint16
ovf3ops_op_map [STACK_MAX] = {
0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
};
/* handles from CEE_BEQ to CEE_BLT_UN */
static const guint16
beqops_op_map [STACK_MAX] = {
0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
};
/* handles from CEE_CEQ to CEE_CLT_UN */
static const guint16
ceqops_op_map [STACK_MAX] = {
0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
};
/*
* Sets ins->type (the type on the eval stack) according to the
* type of the opcode and the arguments to it.
* Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
*
* FIXME: this function sets ins->type unconditionally in some cases, but
* it should set it to invalid for some types (a conv.x on an object)
*/
static void
type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
{
switch (ins->opcode) {
/* binops */
case MONO_CEE_ADD:
case MONO_CEE_SUB:
case MONO_CEE_MUL:
case MONO_CEE_DIV:
case MONO_CEE_REM:
/* FIXME: check unverifiable args for STACK_MP */
ins->type = bin_num_table [src1->type] [src2->type];
ins->opcode += binops_op_map [ins->type];
break;
case MONO_CEE_DIV_UN:
case MONO_CEE_REM_UN:
case MONO_CEE_AND:
case MONO_CEE_OR:
case MONO_CEE_XOR:
ins->type = bin_int_table [src1->type] [src2->type];
ins->opcode += binops_op_map [ins->type];
break;
case MONO_CEE_SHL:
case MONO_CEE_SHR:
case MONO_CEE_SHR_UN:
ins->type = shift_table [src1->type] [src2->type];
ins->opcode += binops_op_map [ins->type];
break;
case OP_COMPARE:
case OP_LCOMPARE:
case OP_ICOMPARE:
ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
if ((src1->type == STACK_I8) || ((TARGET_SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
ins->opcode = OP_LCOMPARE;
else if (src1->type == STACK_R4)
ins->opcode = OP_RCOMPARE;
else if (src1->type == STACK_R8)
ins->opcode = OP_FCOMPARE;
else
ins->opcode = OP_ICOMPARE;
break;
case OP_ICOMPARE_IMM:
ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
if ((src1->type == STACK_I8) || ((TARGET_SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
ins->opcode = OP_LCOMPARE_IMM;
break;
case MONO_CEE_BEQ:
case MONO_CEE_BGE:
case MONO_CEE_BGT:
case MONO_CEE_BLE:
case MONO_CEE_BLT:
case MONO_CEE_BNE_UN:
case MONO_CEE_BGE_UN:
case MONO_CEE_BGT_UN:
case MONO_CEE_BLE_UN:
case MONO_CEE_BLT_UN:
ins->opcode += beqops_op_map [src1->type];
break;
case OP_CEQ:
ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
ins->opcode += ceqops_op_map [src1->type];
break;
case OP_CGT:
case OP_CGT_UN:
case OP_CLT:
case OP_CLT_UN:
ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
ins->opcode += ceqops_op_map [src1->type];
break;
/* unops */
case MONO_CEE_NEG:
ins->type = neg_table [src1->type];
ins->opcode += unops_op_map [ins->type];
break;
case MONO_CEE_NOT:
if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
ins->type = src1->type;
else
ins->type = STACK_INV;
ins->opcode += unops_op_map [ins->type];
break;
case MONO_CEE_CONV_I1:
case MONO_CEE_CONV_I2:
case MONO_CEE_CONV_I4:
case MONO_CEE_CONV_U4:
ins->type = STACK_I4;
ins->opcode += unops_op_map [src1->type];
break;
case MONO_CEE_CONV_R_UN:
ins->type = STACK_R8;
switch (src1->type) {
case STACK_I4:
case STACK_PTR:
ins->opcode = OP_ICONV_TO_R_UN;
break;
case STACK_I8:
ins->opcode = OP_LCONV_TO_R_UN;
break;
}
break;
case MONO_CEE_CONV_OVF_I1:
case MONO_CEE_CONV_OVF_U1:
case MONO_CEE_CONV_OVF_I2:
case MONO_CEE_CONV_OVF_U2:
case MONO_CEE_CONV_OVF_I4:
case MONO_CEE_CONV_OVF_U4:
ins->type = STACK_I4;
ins->opcode += ovf3ops_op_map [src1->type];
break;
case MONO_CEE_CONV_OVF_I_UN:
case MONO_CEE_CONV_OVF_U_UN:
ins->type = STACK_PTR;
ins->opcode += ovf2ops_op_map [src1->type];
break;
case MONO_CEE_CONV_OVF_I1_UN:
case MONO_CEE_CONV_OVF_I2_UN:
case MONO_CEE_CONV_OVF_I4_UN:
case MONO_CEE_CONV_OVF_U1_UN:
case MONO_CEE_CONV_OVF_U2_UN:
case MONO_CEE_CONV_OVF_U4_UN:
ins->type = STACK_I4;
ins->opcode += ovf2ops_op_map [src1->type];
break;
case MONO_CEE_CONV_U:
ins->type = STACK_PTR;
switch (src1->type) {
case STACK_I4:
ins->opcode = OP_ICONV_TO_U;
break;
case STACK_PTR:
case STACK_MP:
case STACK_OBJ:
#if TARGET_SIZEOF_VOID_P == 8
ins->opcode = OP_LCONV_TO_U;
#else
ins->opcode = OP_MOVE;
#endif
break;
case STACK_I8:
ins->opcode = OP_LCONV_TO_U;
break;
case STACK_R8:
ins->opcode = OP_FCONV_TO_U;
break;
case STACK_R4:
if (TARGET_SIZEOF_VOID_P == 8)
ins->opcode = OP_RCONV_TO_U8;
else
ins->opcode = OP_RCONV_TO_U4;
break;
}
break;
case MONO_CEE_CONV_I8:
case MONO_CEE_CONV_U8:
ins->type = STACK_I8;
ins->opcode += unops_op_map [src1->type];
break;
case MONO_CEE_CONV_OVF_I8:
case MONO_CEE_CONV_OVF_U8:
ins->type = STACK_I8;
ins->opcode += ovf3ops_op_map [src1->type];
break;
case MONO_CEE_CONV_OVF_U8_UN:
case MONO_CEE_CONV_OVF_I8_UN:
ins->type = STACK_I8;
ins->opcode += ovf2ops_op_map [src1->type];
break;
case MONO_CEE_CONV_R4:
ins->type = cfg->r4_stack_type;
ins->opcode += unops_op_map [src1->type];
break;
case MONO_CEE_CONV_R8:
ins->type = STACK_R8;
ins->opcode += unops_op_map [src1->type];
break;
case OP_CKFINITE:
ins->type = STACK_R8;
break;
case MONO_CEE_CONV_U2:
case MONO_CEE_CONV_U1:
ins->type = STACK_I4;
ins->opcode += ovfops_op_map [src1->type];
break;
case MONO_CEE_CONV_I:
case MONO_CEE_CONV_OVF_I:
case MONO_CEE_CONV_OVF_U:
ins->type = STACK_PTR;
ins->opcode += ovfops_op_map [src1->type];
break;
case MONO_CEE_ADD_OVF:
case MONO_CEE_ADD_OVF_UN:
case MONO_CEE_MUL_OVF:
case MONO_CEE_MUL_OVF_UN:
case MONO_CEE_SUB_OVF:
case MONO_CEE_SUB_OVF_UN:
ins->type = bin_num_table [src1->type] [src2->type];
ins->opcode += ovfops_op_map [src1->type];
if (ins->type == STACK_R8)
ins->type = STACK_INV;
break;
case OP_LOAD_MEMBASE:
ins->type = STACK_PTR;
break;
case OP_LOADI1_MEMBASE:
case OP_LOADU1_MEMBASE:
case OP_LOADI2_MEMBASE:
case OP_LOADU2_MEMBASE:
case OP_LOADI4_MEMBASE:
case OP_LOADU4_MEMBASE:
ins->type = STACK_PTR;
break;
case OP_LOADI8_MEMBASE:
ins->type = STACK_I8;
break;
case OP_LOADR4_MEMBASE:
ins->type = cfg->r4_stack_type;
break;
case OP_LOADR8_MEMBASE:
ins->type = STACK_R8;
break;
default:
g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
break;
}
if (ins->type == STACK_MP) {
if (src1->type == STACK_MP)
ins->klass = src1->klass;
else
ins->klass = mono_defaults.object_class;
}
}
void
mini_type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
{
type_from_op (cfg, ins, src1, src2);
}
static MonoClass*
ldind_to_type (int op)
{
switch (op) {
case MONO_CEE_LDIND_I1: return mono_defaults.sbyte_class;
case MONO_CEE_LDIND_U1: return mono_defaults.byte_class;
case MONO_CEE_LDIND_I2: return mono_defaults.int16_class;
case MONO_CEE_LDIND_U2: return mono_defaults.uint16_class;
case MONO_CEE_LDIND_I4: return mono_defaults.int32_class;
case MONO_CEE_LDIND_U4: return mono_defaults.uint32_class;
case MONO_CEE_LDIND_I8: return mono_defaults.int64_class;
case MONO_CEE_LDIND_I: return mono_defaults.int_class;
case MONO_CEE_LDIND_R4: return mono_defaults.single_class;
case MONO_CEE_LDIND_R8: return mono_defaults.double_class;
case MONO_CEE_LDIND_REF:return mono_defaults.object_class; //FIXME we should try to return a more specific type
default: g_error ("Unknown ldind type %d", op);
}
}
#if 0
static const char
param_table [STACK_MAX] [STACK_MAX] = {
{0},
};
static int
check_values_to_signature (MonoInst *args, MonoType *this_ins, MonoMethodSignature *sig)
{
int i;
if (sig->hasthis) {
switch (args->type) {
case STACK_I4:
case STACK_I8:
case STACK_R8:
case STACK_VTYPE:
case STACK_INV:
return 0;
}
args++;
}
for (i = 0; i < sig->param_count; ++i) {
switch (args [i].type) {
case STACK_INV:
return 0;
case STACK_MP:
if (!sig->params [i]->byref)
return 0;
continue;
case STACK_OBJ:
if (sig->params [i]->byref)
return 0;
switch (sig->params [i]->type) {
case MONO_TYPE_CLASS:
case MONO_TYPE_STRING:
case MONO_TYPE_OBJECT:
case MONO_TYPE_SZARRAY:
case MONO_TYPE_ARRAY:
break;
default:
return 0;
}
continue;
case STACK_R8:
if (sig->params [i]->byref)
return 0;
if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
return 0;
continue;
case STACK_PTR:
case STACK_I4:
case STACK_I8:
case STACK_VTYPE:
break;
}
/*if (!param_table [args [i].type] [sig->params [i]->type])
return 0;*/
}
return 1;
}
#endif
/*
* When we need a pointer to the current domain many times in a method, we
* call mono_domain_get() once and we store the result in a local variable.
* This function returns the variable that represents the MonoDomain*.
*/
inline static MonoInst *
mono_get_domainvar (MonoCompile *cfg)
{
if (!cfg->domainvar) {
/* Make sure we don't generate references after checking whenever to init this */
g_assert (!cfg->domainvar_inited);
cfg->domainvar = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
/* Avoid optimizing it away */
cfg->domainvar->flags |= MONO_INST_VOLATILE;
}
return cfg->domainvar;
}
/*
* The got_var contains the address of the Global Offset Table when AOT
* compiling.
*/
MonoInst *
mono_get_got_var (MonoCompile *cfg)
{
if (!cfg->compile_aot || !cfg->backend->need_got_var || cfg->llvm_only)
return NULL;
if (!cfg->got_var) {
cfg->got_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
}
return cfg->got_var;
}
static void
mono_create_rgctx_var (MonoCompile *cfg)
{
if (!cfg->rgctx_var) {
cfg->rgctx_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
/* force the var to be stack allocated */
cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
}
}
static MonoInst *
mono_get_vtable_var (MonoCompile *cfg)
{
g_assert (cfg->gshared);
mono_create_rgctx_var (cfg);
return cfg->rgctx_var;
}
static MonoType*
type_from_stack_type (MonoInst *ins) {
switch (ins->type) {
case STACK_I4: return mono_get_int32_type ();
case STACK_I8: return m_class_get_byval_arg (mono_defaults.int64_class);
case STACK_PTR: return mono_get_int_type ();
case STACK_R4: return m_class_get_byval_arg (mono_defaults.single_class);
case STACK_R8: return m_class_get_byval_arg (mono_defaults.double_class);
case STACK_MP:
return m_class_get_this_arg (ins->klass);
case STACK_OBJ: return mono_get_object_type ();
case STACK_VTYPE: return m_class_get_byval_arg (ins->klass);
default:
g_error ("stack type %d to monotype not handled\n", ins->type);
}
return NULL;
}
static G_GNUC_UNUSED int
type_to_stack_type (MonoCompile *cfg, MonoType *t)
{
t = mono_type_get_underlying_type (t);
switch (t->type) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
case MONO_TYPE_I2:
case MONO_TYPE_U2:
case MONO_TYPE_I4:
case MONO_TYPE_U4:
return STACK_I4;
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_PTR:
case MONO_TYPE_FNPTR:
return STACK_PTR;
case MONO_TYPE_CLASS:
case MONO_TYPE_STRING:
case MONO_TYPE_OBJECT:
case MONO_TYPE_SZARRAY:
case MONO_TYPE_ARRAY:
return STACK_OBJ;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
return STACK_I8;
case MONO_TYPE_R4:
return cfg->r4_stack_type;
case MONO_TYPE_R8:
return STACK_R8;
case MONO_TYPE_VALUETYPE:
case MONO_TYPE_TYPEDBYREF:
return STACK_VTYPE;
case MONO_TYPE_GENERICINST:
if (mono_type_generic_inst_is_valuetype (t))
return STACK_VTYPE;
else
return STACK_OBJ;
break;
default:
g_assert_not_reached ();
}
return -1;
}
static MonoClass*
array_access_to_klass (int opcode)
{
switch (opcode) {
case MONO_CEE_LDELEM_U1:
return mono_defaults.byte_class;
case MONO_CEE_LDELEM_U2:
return mono_defaults.uint16_class;
case MONO_CEE_LDELEM_I:
case MONO_CEE_STELEM_I:
return mono_defaults.int_class;
case MONO_CEE_LDELEM_I1:
case MONO_CEE_STELEM_I1:
return mono_defaults.sbyte_class;
case MONO_CEE_LDELEM_I2:
case MONO_CEE_STELEM_I2:
return mono_defaults.int16_class;
case MONO_CEE_LDELEM_I4:
case MONO_CEE_STELEM_I4:
return mono_defaults.int32_class;
case MONO_CEE_LDELEM_U4:
return mono_defaults.uint32_class;
case MONO_CEE_LDELEM_I8:
case MONO_CEE_STELEM_I8:
return mono_defaults.int64_class;
case MONO_CEE_LDELEM_R4:
case MONO_CEE_STELEM_R4:
return mono_defaults.single_class;
case MONO_CEE_LDELEM_R8:
case MONO_CEE_STELEM_R8:
return mono_defaults.double_class;
case MONO_CEE_LDELEM_REF:
case MONO_CEE_STELEM_REF:
return mono_defaults.object_class;
default:
g_assert_not_reached ();
}
return NULL;
}
/*
* We try to share variables when possible
*/
static MonoInst *
mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
{
MonoInst *res;
int pos, vnum;
MonoType *type;
type = type_from_stack_type (ins);
/* inlining can result in deeper stacks */
if (cfg->inline_depth || slot >= cfg->header->max_stack)
return mono_compile_create_var (cfg, type, OP_LOCAL);
pos = ins->type - 1 + slot * STACK_MAX;
switch (ins->type) {
case STACK_I4:
case STACK_I8:
case STACK_R8:
case STACK_PTR:
case STACK_MP:
case STACK_OBJ:
if ((vnum = cfg->intvars [pos]))
return cfg->varinfo [vnum];
res = mono_compile_create_var (cfg, type, OP_LOCAL);
cfg->intvars [pos] = res->inst_c0;
break;
default:
res = mono_compile_create_var (cfg, type, OP_LOCAL);
}
return res;
}
static void
mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
{
/*
* Don't use this if a generic_context is set, since that means AOT can't
* look up the method using just the image+token.
* table == 0 means this is a reference made from a wrapper.
*/
if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
MonoJumpInfoToken *jump_info_token = (MonoJumpInfoToken *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
jump_info_token->image = image;
jump_info_token->token = token;
g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
}
}
/*
* This function is called to handle items that are left on the evaluation stack
* at basic block boundaries. What happens is that we save the values to local variables
* and we reload them later when first entering the target basic block (with the
* handle_loaded_temps () function).
* A single joint point will use the same variables (stored in the array bb->out_stack or
* bb->in_stack, if the basic block is before or after the joint point).
*
* This function needs to be called _before_ emitting the last instruction of
* the bb (i.e. before emitting a branch).
* If the stack merge fails at a join point, cfg->unverifiable is set.
*/
static void
handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
{
int i, bindex;
MonoBasicBlock *bb = cfg->cbb;
MonoBasicBlock *outb;
MonoInst *inst, **locals;
gboolean found;
if (!count)
return;
if (cfg->verbose_level > 3)
printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
if (!bb->out_scount) {
bb->out_scount = count;
//printf ("bblock %d has out:", bb->block_num);
found = FALSE;
for (i = 0; i < bb->out_count; ++i) {
outb = bb->out_bb [i];
/* exception handlers are linked, but they should not be considered for stack args */
if (outb->flags & BB_EXCEPTION_HANDLER)
continue;
//printf (" %d", outb->block_num);
if (outb->in_stack) {
found = TRUE;
bb->out_stack = outb->in_stack;
break;
}
}
//printf ("\n");
if (!found) {
bb->out_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
for (i = 0; i < count; ++i) {
/*
* try to reuse temps already allocated for this purpouse, if they occupy the same
* stack slot and if they are of the same type.
* This won't cause conflicts since if 'local' is used to
* store one of the values in the in_stack of a bblock, then
* the same variable will be used for the same outgoing stack
* slot as well.
* This doesn't work when inlining methods, since the bblocks
* in the inlined methods do not inherit their in_stack from
* the bblock they are inlined to. See bug #58863 for an
* example.
*/
bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
}
}
}
for (i = 0; i < bb->out_count; ++i) {
outb = bb->out_bb [i];
/* exception handlers are linked, but they should not be considered for stack args */
if (outb->flags & BB_EXCEPTION_HANDLER)
continue;
if (outb->in_scount) {
if (outb->in_scount != bb->out_scount) {
cfg->unverifiable = TRUE;
return;
}
continue; /* check they are the same locals */
}
outb->in_scount = count;
outb->in_stack = bb->out_stack;
}
locals = bb->out_stack;
cfg->cbb = bb;
for (i = 0; i < count; ++i) {
sp [i] = convert_value (cfg, locals [i]->inst_vtype, sp [i]);
EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
inst->cil_code = sp [i]->cil_code;
sp [i] = locals [i];
if (cfg->verbose_level > 3)
printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
}
/*
* It is possible that the out bblocks already have in_stack assigned, and
* the in_stacks differ. In this case, we will store to all the different
* in_stacks.
*/
found = TRUE;
bindex = 0;
while (found) {
/* Find a bblock which has a different in_stack */
found = FALSE;
while (bindex < bb->out_count) {
outb = bb->out_bb [bindex];
/* exception handlers are linked, but they should not be considered for stack args */
if (outb->flags & BB_EXCEPTION_HANDLER) {
bindex++;
continue;
}
if (outb->in_stack != locals) {
for (i = 0; i < count; ++i) {
sp [i] = convert_value (cfg, outb->in_stack [i]->inst_vtype, sp [i]);
EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
inst->cil_code = sp [i]->cil_code;
sp [i] = locals [i];
if (cfg->verbose_level > 3)
printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
}
locals = outb->in_stack;
found = TRUE;
break;
}
bindex ++;
}
}
}
MonoInst*
mini_emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
{
MonoInst *ins;
if (cfg->compile_aot) {
MONO_DISABLE_WARNING (4306) // 'type cast': conversion from 'MonoJumpInfoType' to 'MonoInst *' of greater size
EMIT_NEW_AOTCONST (cfg, ins, patch_type, data);
MONO_RESTORE_WARNING
} else {
MonoJumpInfo ji;
gpointer target;
ERROR_DECL (error);
ji.type = patch_type;
ji.data.target = data;
target = mono_resolve_patch_target (NULL, cfg->domain, NULL, &ji, FALSE, error);
mono_error_assert_ok (error);
EMIT_NEW_PCONST (cfg, ins, target);
}
return ins;
}
static MonoInst*
mono_create_fast_tls_getter (MonoCompile *cfg, MonoTlsKey key)
{
int tls_offset = mono_tls_get_tls_offset (key);
if (cfg->compile_aot)
return NULL;
if (tls_offset != -1 && mono_arch_have_fast_tls ()) {
MonoInst *ins;
MONO_INST_NEW (cfg, ins, OP_TLS_GET);
ins->dreg = mono_alloc_preg (cfg);
ins->inst_offset = tls_offset;
return ins;
}
return NULL;
}
static MonoInst*
mono_create_tls_get (MonoCompile *cfg, MonoTlsKey key)
{
MonoInst *fast_tls = NULL;
if (!mini_get_debug_options ()->use_fallback_tls)
fast_tls = mono_create_fast_tls_getter (cfg, key);
if (fast_tls) {
MONO_ADD_INS (cfg->cbb, fast_tls);
return fast_tls;
}
if (cfg->compile_aot) {
MonoInst *addr;
/*
* tls getters are critical pieces of code and we don't want to resolve them
* through the standard plt/tramp mechanism since we might expose ourselves
* to crashes and infinite recursions.
*/
EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GET_TLS_TRAMP, GUINT_TO_POINTER(key));
return mini_emit_calli (cfg, helper_sig_get_tls_tramp, NULL, addr, NULL, NULL);
} else {
gpointer getter = mono_tls_get_tls_getter (key, FALSE);
return mono_emit_jit_icall (cfg, getter, NULL);
}
}
/*
* emit_push_lmf:
*
* Emit IR to push the current LMF onto the LMF stack.
*/
static void
emit_push_lmf (MonoCompile *cfg)
{
/*
* Emit IR to push the LMF:
* lmf_addr = <lmf_addr from tls>
* lmf->lmf_addr = lmf_addr
* lmf->prev_lmf = *lmf_addr
* *lmf_addr = lmf
*/
MonoInst *ins, *lmf_ins;
if (!cfg->lmf_ir)
return;
int lmf_reg, prev_lmf_reg;
/*
* Store lmf_addr in a variable, so it can be allocated to a global register.
*/
if (!cfg->lmf_addr_var)
cfg->lmf_addr_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
#ifdef HOST_WIN32
ins = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
g_assert (ins);
int jit_tls_dreg = ins->dreg;
lmf_reg = alloc_preg (cfg);
EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
#else
lmf_ins = mono_create_tls_get (cfg, TLS_KEY_LMF_ADDR);
g_assert (lmf_ins);
#endif
lmf_ins->dreg = cfg->lmf_addr_var->dreg;
EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
lmf_reg = ins->dreg;
prev_lmf_reg = alloc_preg (cfg);
/* Save previous_lmf */
EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
/* Set new lmf */
EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
}
/*
* emit_pop_lmf:
*
* Emit IR to pop the current LMF from the LMF stack.
*/
static void
emit_pop_lmf (MonoCompile *cfg)
{
int lmf_reg, lmf_addr_reg;
MonoInst *ins;
if (!cfg->lmf_ir)
return;
EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
lmf_reg = ins->dreg;
int prev_lmf_reg;
/*
* Emit IR to pop the LMF:
* *(lmf->lmf_addr) = lmf->prev_lmf
*/
/* This could be called before emit_push_lmf () */
if (!cfg->lmf_addr_var)
cfg->lmf_addr_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
lmf_addr_reg = cfg->lmf_addr_var->dreg;
prev_lmf_reg = alloc_preg (cfg);
EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
}
/*
* target_type_is_incompatible:
* @cfg: MonoCompile context
*
* Check that the item @arg on the evaluation stack can be stored
* in the target type (can be a local, or field, etc).
* The cfg arg can be used to check if we need verification or just
* validity checks.
*
* Returns: non-0 value if arg can't be stored on a target.
*/
static int
target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
{
MonoType *simple_type;
MonoClass *klass;
if (target->byref) {
/* FIXME: check that the pointed to types match */
if (arg->type == STACK_MP) {
/* This is needed to handle gshared types + ldaddr. We lower the types so we can handle enums and other typedef-like types. */
MonoClass *target_class_lowered = mono_class_from_mono_type_internal (mini_get_underlying_type (m_class_get_byval_arg (mono_class_from_mono_type_internal (target))));
MonoClass *source_class_lowered = mono_class_from_mono_type_internal (mini_get_underlying_type (m_class_get_byval_arg (arg->klass)));
/* if the target is native int& or X* or same type */
if (target->type == MONO_TYPE_I || target->type == MONO_TYPE_PTR || target_class_lowered == source_class_lowered)
return 0;
/* Both are primitive type byrefs and the source points to a larger type that the destination */
if (MONO_TYPE_IS_PRIMITIVE_SCALAR (m_class_get_byval_arg (target_class_lowered)) && MONO_TYPE_IS_PRIMITIVE_SCALAR (m_class_get_byval_arg (source_class_lowered)) &&
mono_class_instance_size (target_class_lowered) <= mono_class_instance_size (source_class_lowered))
return 0;
return 1;
}
if (arg->type == STACK_PTR)
return 0;
return 1;
}
simple_type = mini_get_underlying_type (target);
switch (simple_type->type) {
case MONO_TYPE_VOID:
return 1;
case MONO_TYPE_I1:
case MONO_TYPE_U1:
case MONO_TYPE_I2:
case MONO_TYPE_U2:
case MONO_TYPE_I4:
case MONO_TYPE_U4:
if (arg->type != STACK_I4 && arg->type != STACK_PTR)
return 1;
return 0;
case MONO_TYPE_PTR:
/* STACK_MP is needed when setting pinned locals */
if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
return 1;
return 0;
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_FNPTR:
/*
* Some opcodes like ldloca returns 'transient pointers' which can be stored in
* in native int. (#688008).
*/
if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
return 1;
return 0;
case MONO_TYPE_CLASS:
case MONO_TYPE_STRING:
case MONO_TYPE_OBJECT:
case MONO_TYPE_SZARRAY:
case MONO_TYPE_ARRAY:
if (arg->type != STACK_OBJ)
return 1;
/* FIXME: check type compatibility */
return 0;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
if (arg->type != STACK_I8)
return 1;
return 0;
case MONO_TYPE_R4:
if (arg->type != cfg->r4_stack_type)
return 1;
return 0;
case MONO_TYPE_R8:
if (arg->type != STACK_R8)
return 1;
return 0;
case MONO_TYPE_VALUETYPE:
if (arg->type != STACK_VTYPE)
return 1;
klass = mono_class_from_mono_type_internal (simple_type);
if (klass != arg->klass)
return 1;
return 0;
case MONO_TYPE_TYPEDBYREF:
if (arg->type != STACK_VTYPE)
return 1;
klass = mono_class_from_mono_type_internal (simple_type);
if (klass != arg->klass)
return 1;
return 0;
case MONO_TYPE_GENERICINST:
if (mono_type_generic_inst_is_valuetype (simple_type)) {
MonoClass *target_class;
if (arg->type != STACK_VTYPE)
return 1;
klass = mono_class_from_mono_type_internal (simple_type);
target_class = mono_class_from_mono_type_internal (target);
/* The second cases is needed when doing partial sharing */
if (klass != arg->klass && target_class != arg->klass && target_class != mono_class_from_mono_type_internal (mini_get_underlying_type (m_class_get_byval_arg (arg->klass))))
return 1;
return 0;
} else {
if (arg->type != STACK_OBJ)
return 1;
/* FIXME: check type compatibility */
return 0;
}
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
g_assert (cfg->gshared);
if (mini_type_var_is_vt (simple_type)) {
if (arg->type != STACK_VTYPE)
return 1;
} else {
if (arg->type != STACK_OBJ)
return 1;
}
return 0;
default:
g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
}
return 1;
}
/*
* convert_value:
*
* Emit some implicit conversions which are not part of the .net spec, but are allowed by MS.NET.
*/
static MonoInst*
convert_value (MonoCompile *cfg, MonoType *type, MonoInst *ins)
{
if (!cfg->r4fp)
return ins;
type = mini_get_underlying_type (type);
switch (type->type) {
case MONO_TYPE_R4:
if (ins->type == STACK_R8) {
int dreg = alloc_freg (cfg);
MonoInst *conv;
EMIT_NEW_UNALU (cfg, conv, OP_FCONV_TO_R4, dreg, ins->dreg);
conv->type = STACK_R4;
return conv;
}
break;
case MONO_TYPE_R8:
if (ins->type == STACK_R4) {
int dreg = alloc_freg (cfg);
MonoInst *conv;
EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, ins->dreg);
conv->type = STACK_R8;
return conv;
}
break;
default:
break;
}
return ins;
}
/*
* Prepare arguments for passing to a function call.
* Return a non-zero value if the arguments can't be passed to the given
* signature.
* The type checks are not yet complete and some conversions may need
* casts on 32 or 64 bit architectures.
*
* FIXME: implement this using target_type_is_incompatible ()
*/
static gboolean
check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
{
MonoType *simple_type;
int i;
if (sig->hasthis) {
if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
return TRUE;
args++;
}
for (i = 0; i < sig->param_count; ++i) {
if (sig->params [i]->byref) {
if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
return TRUE;
continue;
}
simple_type = mini_get_underlying_type (sig->params [i]);
handle_enum:
switch (simple_type->type) {
case MONO_TYPE_VOID:
return TRUE;
case MONO_TYPE_I1:
case MONO_TYPE_U1:
case MONO_TYPE_I2:
case MONO_TYPE_U2:
case MONO_TYPE_I4:
case MONO_TYPE_U4:
if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
return TRUE;
continue;
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_PTR:
case MONO_TYPE_FNPTR:
if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
return TRUE;
continue;
case MONO_TYPE_CLASS:
case MONO_TYPE_STRING:
case MONO_TYPE_OBJECT:
case MONO_TYPE_SZARRAY:
case MONO_TYPE_ARRAY:
if (args [i]->type != STACK_OBJ)
return TRUE;
continue;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
if (args [i]->type != STACK_I8)
return TRUE;
continue;
case MONO_TYPE_R4:
if (args [i]->type != cfg->r4_stack_type)
return TRUE;
continue;
case MONO_TYPE_R8:
if (args [i]->type != STACK_R8)
return TRUE;
continue;
case MONO_TYPE_VALUETYPE:
if (m_class_is_enumtype (simple_type->data.klass)) {
simple_type = mono_class_enum_basetype_internal (simple_type->data.klass);
goto handle_enum;
}
if (args [i]->type != STACK_VTYPE)
return TRUE;
continue;
case MONO_TYPE_TYPEDBYREF:
if (args [i]->type != STACK_VTYPE)
return TRUE;
continue;
case MONO_TYPE_GENERICINST:
simple_type = m_class_get_byval_arg (simple_type->data.generic_class->container_class);
goto handle_enum;
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
/* gsharedvt */
if (args [i]->type != STACK_VTYPE)
return TRUE;
continue;
default:
g_error ("unknown type 0x%02x in check_call_signature",
simple_type->type);
}
}
return FALSE;
}
MonoJumpInfo *
mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
{
MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
ji->ip.i = ip;
ji->type = type;
ji->data.target = target;
return ji;
}
int
mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
{
if (cfg->gshared)
return mono_class_check_context_used (klass);
else
return 0;
}
int
mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
{
if (cfg->gshared)
return mono_method_check_context_used (method);
else
return 0;
}
/*
* check_method_sharing:
*
* Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
*/
static void
check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
{
gboolean pass_vtable = FALSE;
gboolean pass_mrgctx = FALSE;
if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || m_class_is_valuetype (cmethod->klass)) &&
(mono_class_is_ginst (cmethod->klass) || mono_class_is_gtd (cmethod->klass))) {
gboolean sharable = FALSE;
if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE))
sharable = TRUE;
/*
* Pass vtable iff target method might
* be shared, which means that sharing
* is enabled for its class and its
* context is sharable (and it's not a
* generic method).
*/
if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
pass_vtable = TRUE;
}
if (mini_method_needs_mrgctx (cmethod)) {
if (mini_method_is_default_method (cmethod))
pass_vtable = FALSE;
else
g_assert (!pass_vtable);
if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE)) {
pass_mrgctx = TRUE;
} else {
if (cfg->gsharedvt && mini_is_gsharedvt_signature (mono_method_signature_internal (cmethod)))
pass_mrgctx = TRUE;
}
}
if (out_pass_vtable)
*out_pass_vtable = pass_vtable;
if (out_pass_mrgctx)
*out_pass_mrgctx = pass_mrgctx;
}
static gboolean
direct_icalls_enabled (MonoCompile *cfg)
{
return FALSE;
/* LLVM on amd64 can't handle calls to non-32 bit addresses */
#ifdef TARGET_AMD64
if (cfg->compile_llvm && !cfg->llvm_only)
return FALSE;
#endif
if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
return FALSE;
return TRUE;
}
MonoInst*
mono_emit_jit_icall_by_info (MonoCompile *cfg, int il_offset, MonoJitICallInfo *info, MonoInst **args)
{
/*
* Call the jit icall without a wrapper if possible.
* The wrapper is needed to be able to do stack walks for asynchronously suspended
* threads when debugging.
*/
if (direct_icalls_enabled (cfg)) {
char *name;
int costs;
if (!info->wrapper_method) {
name = g_strdup_printf ("__icall_wrapper_%s", info->name);
info->wrapper_method = mono_marshal_get_icall_wrapper (info->sig, name, info->func, TRUE);
g_free (name);
mono_memory_barrier ();
}
/*
* Inline the wrapper method, which is basically a call to the C icall, and
* an exception check.
*/
costs = inline_method (cfg, info->wrapper_method, NULL,
args, NULL, il_offset, TRUE);
g_assert (costs > 0);
g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
return args [0];
} else {
return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
}
}
static MonoInst*
mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
{
if (!MONO_TYPE_IS_VOID (fsig->ret)) {
if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
int widen_op = -1;
/*
* Native code might return non register sized integers
* without initializing the upper bits.
*/
switch (mono_type_to_load_membase (cfg, fsig->ret)) {
case OP_LOADI1_MEMBASE:
widen_op = OP_ICONV_TO_I1;
break;
case OP_LOADU1_MEMBASE:
widen_op = OP_ICONV_TO_U1;
break;
case OP_LOADI2_MEMBASE:
widen_op = OP_ICONV_TO_I2;
break;
case OP_LOADU2_MEMBASE:
widen_op = OP_ICONV_TO_U2;
break;
default:
break;
}
if (widen_op != -1) {
int dreg = alloc_preg (cfg);
MonoInst *widen;
EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
widen->type = ins->type;
ins = widen;
}
}
}
return ins;
}
static MonoInst*
emit_get_rgctx_method (MonoCompile *cfg, int context_used,
MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
static void
emit_method_access_failure (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
{
MonoInst *args [2];
args [0] = emit_get_rgctx_method (cfg, mono_method_check_context_used (caller), caller, MONO_RGCTX_INFO_METHOD);
args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (callee), callee, MONO_RGCTX_INFO_METHOD);
mono_emit_jit_icall (cfg, mono_throw_method_access, args);
}
static MonoMethod*
get_method_nofail (MonoClass *klass, const char *method_name, int num_params, int flags)
{
MonoMethod *method;
ERROR_DECL (error);
method = mono_class_get_method_from_name_checked (klass, method_name, num_params, flags, error);
mono_error_assert_ok (error);
g_assertf (method, "Could not lookup method %s in %s", method_name, m_class_get_name (klass));
return method;
}
MonoMethod*
mini_get_memcpy_method (void)
{
static MonoMethod *memcpy_method = NULL;
if (!memcpy_method) {
memcpy_method = get_method_nofail (mono_defaults.string_class, "memcpy", 3, 0);
if (!memcpy_method)
g_error ("Old corlib found. Install a new one");
}
return memcpy_method;
}
void
mini_emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
{
int card_table_shift_bits;
target_mgreg_t card_table_mask;
guint8 *card_table;
MonoInst *dummy_use;
int nursery_shift_bits;
size_t nursery_size;
if (!cfg->gen_write_barriers)
return;
//method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !MONO_INS_IS_PCONST_NULL (sp [1])
card_table = mono_gc_get_target_card_table (&card_table_shift_bits, &card_table_mask);
mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
if (cfg->backend->have_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
MonoInst *wbarrier;
MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
wbarrier->sreg1 = ptr->dreg;
wbarrier->sreg2 = value->dreg;
MONO_ADD_INS (cfg->cbb, wbarrier);
} else if (card_table) {
int offset_reg = alloc_preg (cfg);
int card_reg;
MonoInst *ins;
/*
* We emit a fast light weight write barrier. This always marks cards as in the concurrent
* collector case, so, for the serial collector, it might slightly slow down nursery
* collections. We also expect that the host system and the target system have the same card
* table configuration, which is the case if they have the same pointer size.
*/
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
if (card_table_mask)
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
/*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
* IMM's larger than 32bits.
*/
ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
card_reg = ins->dreg;
MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
} else {
MonoMethod *write_barrier = mono_gc_get_write_barrier ();
mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
}
EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
}
MonoMethod*
mini_get_memset_method (void)
{
static MonoMethod *memset_method = NULL;
if (!memset_method) {
memset_method = get_method_nofail (mono_defaults.string_class, "memset", 3, 0);
if (!memset_method)
g_error ("Old corlib found. Install a new one");
}
return memset_method;
}
void
mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
{
MonoInst *iargs [3];
int n;
guint32 align;
MonoMethod *memset_method;
MonoInst *size_ins = NULL;
MonoInst *bzero_ins = NULL;
static MonoMethod *bzero_method;
/* FIXME: Optimize this for the case when dest is an LDADDR */
mono_class_init_internal (klass);
if (mini_is_gsharedvt_klass (klass)) {
size_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
bzero_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
if (!bzero_method)
bzero_method = get_method_nofail (mono_defaults.string_class, "bzero_aligned_1", 2, 0);
g_assert (bzero_method);
iargs [0] = dest;
iargs [1] = size_ins;
mini_emit_calli (cfg, mono_method_signature_internal (bzero_method), iargs, bzero_ins, NULL, NULL);
return;
}
klass = mono_class_from_mono_type_internal (mini_get_underlying_type (m_class_get_byval_arg (klass)));
n = mono_class_value_size (klass, &align);
if (n <= TARGET_SIZEOF_VOID_P * 8) {
mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
}
else {
memset_method = mini_get_memset_method ();
iargs [0] = dest;
EMIT_NEW_ICONST (cfg, iargs [1], 0);
EMIT_NEW_ICONST (cfg, iargs [2], n);
mono_emit_method_call (cfg, memset_method, iargs, NULL);
}
}
static gboolean
context_used_is_mrgctx (MonoCompile *cfg, int context_used)
{
/* gshared dim methods use an mrgctx */
if (mini_method_is_default_method (cfg->method))
return context_used != 0;
return context_used & MONO_GENERIC_CONTEXT_USED_METHOD;
}
/*
* emit_get_rgctx:
*
* Emit IR to return either the this pointer for instance method,
* or the mrgctx for static methods.
*/
static MonoInst*
emit_get_rgctx (MonoCompile *cfg, int context_used)
{
MonoInst *this_ins = NULL;
MonoMethod *method = cfg->method;
g_assert (cfg->gshared);
if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
!(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
!m_class_is_valuetype (method->klass))
EMIT_NEW_VARLOAD (cfg, this_ins, cfg->this_arg, mono_get_object_type ());
if (context_used_is_mrgctx (cfg, context_used)) {
MonoInst *mrgctx_loc, *mrgctx_var;
if (!mini_method_is_default_method (method)) {
g_assert (!this_ins);
g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
}
mrgctx_loc = mono_get_vtable_var (cfg);
EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
return mrgctx_var;
} else if (method->flags & METHOD_ATTRIBUTE_STATIC || m_class_is_valuetype (method->klass)) {
MonoInst *vtable_loc, *vtable_var;
g_assert (!this_ins);
vtable_loc = mono_get_vtable_var (cfg);
EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
if (method->is_inflated && mono_method_get_context (method)->method_inst) {
MonoInst *mrgctx_var = vtable_var;
int vtable_reg;
vtable_reg = alloc_preg (cfg);
EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
vtable_var->type = STACK_PTR;
}
return vtable_var;
} else {
MonoInst *ins;
int vtable_reg;
vtable_reg = alloc_preg (cfg);
EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this_ins->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
return ins;
}
}
static MonoJumpInfoRgctxEntry *
mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
{
MonoJumpInfoRgctxEntry *res = (MonoJumpInfoRgctxEntry *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
if (in_mrgctx)
res->d.method = method;
else
res->d.klass = method->klass;
res->in_mrgctx = in_mrgctx;
res->data = (MonoJumpInfo *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
res->data->type = patch_type;
res->data->data.target = patch_data;
res->info_type = info_type;
return res;
}
static inline MonoInst*
emit_rgctx_fetch_inline (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
{
MonoInst *args [16];
MonoInst *call;
// FIXME: No fastpath since the slot is not a compile time constant
args [0] = rgctx;
EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_RGCTX_SLOT_INDEX, entry);
if (entry->in_mrgctx)
call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
else
call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
return call;
#if 0
/*
* FIXME: This can be called during decompose, which is a problem since it creates
* new bblocks.
* Also, the fastpath doesn't work since the slot number is dynamically allocated.
*/
int i, slot, depth, index, rgctx_reg, val_reg, res_reg;
gboolean mrgctx;
MonoBasicBlock *is_null_bb, *end_bb;
MonoInst *res, *ins, *call;
MonoInst *args[16];
slot = mini_get_rgctx_entry_slot (entry);
mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
index = MONO_RGCTX_SLOT_INDEX (slot);
if (mrgctx)
index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / TARGET_SIZEOF_VOID_P;
for (depth = 0; ; ++depth) {
int size = mono_class_rgctx_get_array_size (depth, mrgctx);
if (index < size - 1)
break;
index -= size - 1;
}
NEW_BBLOCK (cfg, end_bb);
NEW_BBLOCK (cfg, is_null_bb);
if (mrgctx) {
rgctx_reg = rgctx->dreg;
} else {
rgctx_reg = alloc_preg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, rgctx_reg, rgctx->dreg, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
// FIXME: Avoid this check by allocating the table when the vtable is created etc.
NEW_BBLOCK (cfg, is_null_bb);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
}
for (i = 0; i < depth; ++i) {
int array_reg = alloc_preg (cfg);
/* load ptr to next array */
if (mrgctx && i == 0)
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT);
else
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, 0);
rgctx_reg = array_reg;
/* is the ptr null? */
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
/* if yes, jump to actual trampoline */
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
}
/* fetch slot */
val_reg = alloc_preg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, val_reg, rgctx_reg, (index + 1) * TARGET_SIZEOF_VOID_P);
/* is the slot null? */
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, val_reg, 0);
/* if yes, jump to actual trampoline */
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
/* Fastpath */
res_reg = alloc_preg (cfg);
MONO_INST_NEW (cfg, ins, OP_MOVE);
ins->dreg = res_reg;
ins->sreg1 = val_reg;
MONO_ADD_INS (cfg->cbb, ins);
res = ins;
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
/* Slowpath */
MONO_START_BB (cfg, is_null_bb);
args [0] = rgctx;
EMIT_NEW_ICONST (cfg, args [1], index);
if (mrgctx)
call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
else
call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
MONO_INST_NEW (cfg, ins, OP_MOVE);
ins->dreg = res_reg;
ins->sreg1 = call->dreg;
MONO_ADD_INS (cfg->cbb, ins);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
MONO_START_BB (cfg, end_bb);
return res;
#endif
}
/*
* emit_rgctx_fetch:
*
* Emit IR to load the value of the rgctx entry ENTRY from the rgctx
* given by RGCTX.
*/
static MonoInst*
emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
{
if (cfg->llvm_only)
return emit_rgctx_fetch_inline (cfg, rgctx, entry);
else
return mini_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
}
/*
* mini_emit_get_rgctx_klass:
*
* Emit IR to load the property RGCTX_TYPE of KLASS. If context_used is 0, emit
* normal constants, else emit a load from the rgctx.
*/
MonoInst*
mini_emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
MonoClass *klass, MonoRgctxInfoType rgctx_type)
{
if (!context_used) {
MonoInst *ins;
switch (rgctx_type) {
case MONO_RGCTX_INFO_KLASS:
EMIT_NEW_CLASSCONST (cfg, ins, klass);
return ins;
default:
g_assert_not_reached ();
}
}
MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_CLASS, klass, rgctx_type);
MonoInst *rgctx = emit_get_rgctx (cfg, context_used);
return emit_rgctx_fetch (cfg, rgctx, entry);
}
static MonoInst*
emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
{
MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
MonoInst *rgctx = emit_get_rgctx (cfg, context_used);
return emit_rgctx_fetch (cfg, rgctx, entry);
}
static MonoInst*
emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
{
MonoJumpInfoGSharedVtCall *call_info;
MonoJumpInfoRgctxEntry *entry;
MonoInst *rgctx;
call_info = (MonoJumpInfoGSharedVtCall *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
call_info->sig = sig;
call_info->method = cmethod;
entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
rgctx = emit_get_rgctx (cfg, context_used);
return emit_rgctx_fetch (cfg, rgctx, entry);
}
/*
* emit_get_rgctx_virt_method:
*
* Return data for method VIRT_METHOD for a receiver of type KLASS.
*/
static MonoInst*
emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used,
MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type)
{
MonoJumpInfoVirtMethod *info;
MonoJumpInfoRgctxEntry *entry;
MonoInst *rgctx;
info = (MonoJumpInfoVirtMethod *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
info->klass = klass;
info->method = virt_method;
entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
rgctx = emit_get_rgctx (cfg, context_used);
return emit_rgctx_fetch (cfg, rgctx, entry);
}
static MonoInst*
emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
{
MonoJumpInfoRgctxEntry *entry;
MonoInst *rgctx;
entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
rgctx = emit_get_rgctx (cfg, context_used);
return emit_rgctx_fetch (cfg, rgctx, entry);
}
/*
* emit_get_rgctx_method:
*
* Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
* normal constants, else emit a load from the rgctx.
*/
static MonoInst*
emit_get_rgctx_method (MonoCompile *cfg, int context_used,
MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
{
if (context_used == -1)
context_used = mono_method_check_context_used (cmethod);
if (!context_used) {
MonoInst *ins;
switch (rgctx_type) {
case MONO_RGCTX_INFO_METHOD:
EMIT_NEW_METHODCONST (cfg, ins, cmethod);
return ins;
case MONO_RGCTX_INFO_METHOD_RGCTX:
EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
return ins;
case MONO_RGCTX_INFO_METHOD_FTNDESC:
EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_METHOD_FTNDESC, cmethod);
return ins;
default:
g_assert_not_reached ();
}
} else {
MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
MonoInst *rgctx = emit_get_rgctx (cfg, context_used);
return emit_rgctx_fetch (cfg, rgctx, entry);
}
}
static MonoInst*
emit_get_rgctx_field (MonoCompile *cfg, int context_used,
MonoClassField *field, MonoRgctxInfoType rgctx_type)
{
MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_FIELD, field, rgctx_type);
MonoInst *rgctx = emit_get_rgctx (cfg, context_used);
return emit_rgctx_fetch (cfg, rgctx, entry);
}
MonoInst*
mini_emit_get_rgctx_method (MonoCompile *cfg, int context_used,
MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
{
return emit_get_rgctx_method (cfg, context_used, cmethod, rgctx_type);
}
static int
get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
{
MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
MonoRuntimeGenericContextInfoTemplate *template_;
int i, idx;
g_assert (info);
for (i = 0; i < info->num_entries; ++i) {
MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
return i;
}
if (info->num_entries == info->count_entries) {
MonoRuntimeGenericContextInfoTemplate *new_entries;
int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
new_entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
info->entries = new_entries;
info->count_entries = new_count_entries;
}
idx = info->num_entries;
template_ = &info->entries [idx];
template_->info_type = rgctx_type;
template_->data = data;
info->num_entries ++;
return idx;
}
/*
* emit_get_gsharedvt_info:
*
* This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
*/
static MonoInst*
emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
{
MonoInst *ins;
int idx, dreg;
idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
/* Load info->entries [idx] */
dreg = alloc_preg (cfg);
EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * TARGET_SIZEOF_VOID_P));
return ins;
}
MonoInst*
mini_emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
{
return emit_get_gsharedvt_info (cfg, m_class_get_byval_arg (klass), rgctx_type);
}
/*
* On return the caller must check @klass for load errors.
*/
static void
emit_class_init (MonoCompile *cfg, MonoClass *klass)
{
MonoInst *vtable_arg;
int context_used;
context_used = mini_class_check_context_used (cfg, klass);
if (context_used) {
vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used,
klass, MONO_RGCTX_INFO_VTABLE);
} else {
MonoVTable *vtable = mono_class_vtable_checked (cfg->domain, klass, &cfg->error);
if (!is_ok (&cfg->error)) {
mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
return;
}
EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
}
if (!COMPILE_LLVM (cfg) && cfg->backend->have_op_generic_class_init) {
MonoInst *ins;
/*
* Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode,
* so this doesn't have to clobber any regs and it doesn't break basic blocks.
*/
MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT);
ins->sreg1 = vtable_arg->dreg;
MONO_ADD_INS (cfg->cbb, ins);
} else {
int inited_reg;
MonoBasicBlock *inited_bb;
inited_reg = alloc_ireg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, inited_reg, vtable_arg->dreg, MONO_STRUCT_OFFSET (MonoVTable, initialized));
NEW_BBLOCK (cfg, inited_bb);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, inited_reg, 0);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, inited_bb);
mono_emit_jit_icall (cfg, mono_generic_class_init, &vtable_arg);
MONO_START_BB (cfg, inited_bb);
}
}
static void
emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
{
MonoInst *ins;
if (cfg->gen_seq_points && cfg->method == method) {
NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
if (nonempty_stack)
ins->flags |= MONO_INST_NONEMPTY_STACK;
MONO_ADD_INS (cfg->cbb, ins);
cfg->last_seq_point = ins;
}
}
void
mini_save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check)
{
if (mini_get_debug_options ()->better_cast_details) {
int vtable_reg = alloc_preg (cfg);
int klass_reg = alloc_preg (cfg);
MonoBasicBlock *is_null_bb = NULL;
MonoInst *tls_get;
if (null_check) {
NEW_BBLOCK (cfg, is_null_bb);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
}
tls_get = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
if (!tls_get) {
fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
exit (1);
}
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
MonoInst *class_ins = mini_emit_get_rgctx_klass (cfg, mini_class_check_context_used (cfg, klass), klass, MONO_RGCTX_INFO_KLASS);
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), class_ins->dreg);
if (null_check)
MONO_START_BB (cfg, is_null_bb);
}
}
void
mini_reset_cast_details (MonoCompile *cfg)
{
/* Reset the variables holding the cast details */
if (mini_get_debug_options ()->better_cast_details) {
MonoInst *tls_get = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
/* It is enough to reset the from field */
MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
}
}
/*
* On return the caller must check @array_class for load errors
*/
static void
mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
{
int vtable_reg = alloc_preg (cfg);
int context_used;
context_used = mini_class_check_context_used (cfg, array_class);
mini_save_cast_details (cfg, array_class, obj->dreg, FALSE);
MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
if (cfg->opt & MONO_OPT_SHARED) {
int class_reg = alloc_preg (cfg);
MonoInst *ins;
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, array_class);
MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, ins->dreg);
} else if (context_used) {
MonoInst *vtable_ins;
vtable_ins = mini_emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
} else {
if (cfg->compile_aot) {
int vt_reg;
MonoVTable *vtable;
if (!(vtable = mono_class_vtable_checked (cfg->domain, array_class, &cfg->error))) {
mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
return;
}
vt_reg = alloc_preg (cfg);
MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
} else {
MonoVTable *vtable;
if (!(vtable = mono_class_vtable_checked (cfg->domain, array_class, &cfg->error))) {
mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
return;
}
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, (gssize)vtable);
}
}
MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
mini_reset_cast_details (cfg);
}
/**
* Handles unbox of a Nullable<T>. If context_used is non zero, then shared
* generic code is generated.
*/
static MonoInst*
handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
{
MonoMethod* method;
if (m_class_is_enumtype (mono_class_get_nullable_param (klass)))
method = get_method_nofail (klass, "UnboxExact", 1, 0);
else
method = get_method_nofail (klass, "Unbox", 1, 0);
g_assert (method);
if (context_used) {
MonoInst *rgctx, *addr;
/* FIXME: What if the class is shared? We might not
have to get the address of the method from the
RGCTX. */
if (cfg->llvm_only) {
addr = emit_get_rgctx_method (cfg, context_used, method,
MONO_RGCTX_INFO_METHOD_FTNDESC);
cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, mono_method_signature_internal (method));
return mini_emit_llvmonly_calli (cfg, mono_method_signature_internal (method), &val, addr);
} else {
addr = emit_get_rgctx_method (cfg, context_used, method,
MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
rgctx = emit_get_rgctx (cfg, context_used);
return mini_emit_calli (cfg, mono_method_signature_internal (method), &val, addr, NULL, rgctx);
}
} else {
gboolean pass_vtable, pass_mrgctx;
MonoInst *rgctx_arg = NULL;
check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
g_assert (!pass_mrgctx);
if (pass_vtable) {
MonoVTable *vtable = mono_class_vtable_checked (cfg->domain, method->klass, &cfg->error);
mono_error_assert_ok (&cfg->error);
EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
}
return mini_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
}
}
static MonoInst*
handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
{
MonoInst *add;
int obj_reg;
int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
int klass_reg = alloc_dreg (cfg ,STACK_PTR);
int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
int rank_reg = alloc_dreg (cfg ,STACK_I4);
obj_reg = sp [0]->dreg;
MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
/* FIXME: generics */
g_assert (m_class_get_rank (klass) == 0);
// Check rank == 0
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, m_class_offsetof_element_class ());
if (context_used) {
MonoInst *element_class;
/* This assertion is from the unboxcast insn */
g_assert (m_class_get_rank (klass) == 0);
element_class = mini_emit_get_rgctx_klass (cfg, context_used,
klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
} else {
mini_save_cast_details (cfg, m_class_get_element_class (klass), obj_reg, FALSE);
mini_emit_class_check (cfg, eclass_reg, m_class_get_element_class (klass));
mini_reset_cast_details (cfg);
}
NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, MONO_ABI_SIZEOF (MonoObject));
MONO_ADD_INS (cfg->cbb, add);
add->type = STACK_MP;
add->klass = klass;
return add;
}
static MonoInst*
handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj)
{
MonoInst *addr, *klass_inst, *is_ref, *args[16];
MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
MonoInst *ins;
int dreg, addr_reg;
klass_inst = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
/* obj */
args [0] = obj;
/* klass */
args [1] = klass_inst;
/* CASTCLASS */
obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
NEW_BBLOCK (cfg, is_ref_bb);
NEW_BBLOCK (cfg, is_nullable_bb);
NEW_BBLOCK (cfg, end_bb);
is_ref = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
/* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
addr_reg = alloc_dreg (cfg, STACK_MP);
/* Non-ref case */
/* UNBOX */
NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, MONO_ABI_SIZEOF (MonoObject));
MONO_ADD_INS (cfg->cbb, addr);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
/* Ref case */
MONO_START_BB (cfg, is_ref_bb);
/* Save the ref to a temporary */
dreg = alloc_ireg (cfg);
EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, m_class_get_byval_arg (klass));
addr->dreg = addr_reg;
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
/* Nullable case */
MONO_START_BB (cfg, is_nullable_bb);
{
MonoInst *addr = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
MonoInst *unbox_call;
MonoMethodSignature *unbox_sig;
unbox_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
unbox_sig->ret = m_class_get_byval_arg (klass);
unbox_sig->param_count = 1;
unbox_sig->params [0] = mono_get_object_type ();
if (cfg->llvm_only)
unbox_call = mini_emit_llvmonly_calli (cfg, unbox_sig, &obj, addr);
else
unbox_call = mini_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, m_class_get_byval_arg (klass));
addr->dreg = addr_reg;
}
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
/* End */
MONO_START_BB (cfg, end_bb);
/* LDOBJ */
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), addr_reg, 0);
return ins;
}
/*
* Returns NULL and set the cfg exception on error.
*/
static MonoInst*
handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
{
MonoInst *iargs [2];
void *alloc_ftn;
if (mono_class_get_flags (klass) & TYPE_ATTRIBUTE_ABSTRACT) {
char* full_name = mono_type_get_full_name (klass);
mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
mono_error_set_member_access (&cfg->error, "Cannot create an abstract class: %s", full_name);
g_free (full_name);
return NULL;
}
if (context_used) {
MonoInst *data;
MonoRgctxInfoType rgctx_info;
MonoInst *iargs [2];
gboolean known_instance_size = !mini_is_gsharedvt_klass (klass);
MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
if (cfg->opt & MONO_OPT_SHARED)
rgctx_info = MONO_RGCTX_INFO_KLASS;
else
rgctx_info = MONO_RGCTX_INFO_VTABLE;
data = mini_emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
if (cfg->opt & MONO_OPT_SHARED) {
EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
iargs [1] = data;
alloc_ftn = (gpointer)ves_icall_object_new;
} else {
iargs [0] = data;
alloc_ftn = (gpointer)ves_icall_object_new_specific;
}
if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
if (known_instance_size) {
int size = mono_class_instance_size (klass);
if (size < MONO_ABI_SIZEOF (MonoObject))
g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
EMIT_NEW_ICONST (cfg, iargs [1], size);
}
return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
}
return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
}
if (cfg->opt & MONO_OPT_SHARED) {
EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
alloc_ftn = (gpointer)ves_icall_object_new;
} else if (cfg->compile_aot && cfg->cbb->out_of_line && m_class_get_type_token (klass) && m_class_get_image (klass) == mono_defaults.corlib && !mono_class_is_ginst (klass)) {
/* This happens often in argument checking code, eg. throw new FooException... */
/* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (m_class_get_type_token (klass)));
alloc_ftn = (gpointer)mono_helper_newobj_mscorlib;
} else {
MonoVTable *vtable = mono_class_vtable_checked (cfg->domain, klass, &cfg->error);
if (!is_ok (&cfg->error)) {
mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
return NULL;
}
MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
if (managed_alloc) {
int size = mono_class_instance_size (klass);
if (size < MONO_ABI_SIZEOF (MonoObject))
g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
EMIT_NEW_ICONST (cfg, iargs [1], size);
return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
}
alloc_ftn = (gpointer)ves_icall_object_new_specific;
EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
}
return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
}
/*
* Returns NULL and set the cfg exception on error.
*/
MonoInst*
mini_emit_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
{
MonoInst *alloc, *ins;
if (G_UNLIKELY (m_class_is_byreflike (klass))) {
mono_error_set_bad_image (&cfg->error, m_class_get_image (cfg->method->klass), "Cannot box IsByRefLike type '%s.%s'", m_class_get_name_space (klass), m_class_get_name (klass));
mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
return NULL;
}
if (mono_class_is_nullable (klass)) {
MonoMethod* method = get_method_nofail (klass, "Box", 1, 0);
if (context_used) {
if (cfg->llvm_only && cfg->gsharedvt) {
MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
MONO_RGCTX_INFO_METHOD_FTNDESC);
return mini_emit_llvmonly_calli (cfg, mono_method_signature_internal (method), &val, addr);
} else {
/* FIXME: What if the class is shared? We might not
have to get the method address from the RGCTX. */
MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
MonoInst *rgctx = emit_get_rgctx (cfg, context_used);
return mini_emit_calli (cfg, mono_method_signature_internal (method), &val, addr, NULL, rgctx);
}
} else {
gboolean pass_vtable, pass_mrgctx;
MonoInst *rgctx_arg = NULL;
check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
g_assert (!pass_mrgctx);
if (pass_vtable) {
MonoVTable *vtable = mono_class_vtable_checked (cfg->domain, method->klass, &cfg->error);
mono_error_assert_ok (&cfg->error);
EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
}
return mini_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
}
}
if (mini_is_gsharedvt_klass (klass)) {
MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
MonoInst *res, *is_ref, *src_var, *addr;
int dreg;
dreg = alloc_ireg (cfg);
NEW_BBLOCK (cfg, is_ref_bb);
NEW_BBLOCK (cfg, is_nullable_bb);
NEW_BBLOCK (cfg, end_bb);
is_ref = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
/* Non-ref case */
alloc = handle_alloc (cfg, klass, TRUE, context_used);
if (!alloc)
return NULL;
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), alloc->dreg, MONO_ABI_SIZEOF (MonoObject), val->dreg);
ins->opcode = OP_STOREV_MEMBASE;
EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
res->type = STACK_OBJ;
res->klass = klass;
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
/* Ref case */
MONO_START_BB (cfg, is_ref_bb);
/* val is a vtype, so has to load the value manually */
src_var = get_vreg_to_inst (cfg, val->dreg);
if (!src_var)
src_var = mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (klass), OP_LOCAL, val->dreg);
EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
/* Nullable case */
MONO_START_BB (cfg, is_nullable_bb);
{
MonoInst *addr = mini_emit_get_gsharedvt_info_klass (cfg, klass,
MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
MonoInst *box_call;
MonoMethodSignature *box_sig;
/*
* klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
* construct that method at JIT time, so have to do things by hand.
*/
box_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
box_sig->ret = mono_get_object_type ();
box_sig->param_count = 1;
box_sig->params [0] = m_class_get_byval_arg (klass);
if (cfg->llvm_only)
box_call = mini_emit_llvmonly_calli (cfg, box_sig, &val, addr);
else
box_call = mini_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
res->type = STACK_OBJ;
res->klass = klass;
}
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
MONO_START_BB (cfg, end_bb);
return res;
}
alloc = handle_alloc (cfg, klass, TRUE, context_used);
if (!alloc)
return NULL;
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), alloc->dreg, MONO_ABI_SIZEOF (MonoObject), val->dreg);
return alloc;
}
static gboolean
method_needs_stack_walk (MonoCompile *cfg, MonoMethod *cmethod)
{
if (cmethod->klass == mono_defaults.systemtype_class) {
if (!strcmp (cmethod->name, "GetType"))
return TRUE;
}
return FALSE;
}
G_GNUC_UNUSED MonoInst*
mini_handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, int enum_val_reg, MonoInst *enum_flag)
{
MonoType *enum_type = mono_type_get_underlying_type (m_class_get_byval_arg (klass));
guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
gboolean is_i4;
switch (enum_type->type) {
case MONO_TYPE_I8:
case MONO_TYPE_U8:
#if SIZEOF_REGISTER == 8
case MONO_TYPE_I:
case MONO_TYPE_U:
#endif
is_i4 = FALSE;
break;
default:
is_i4 = TRUE;
break;
}
{
MonoInst *load = NULL, *and_, *cmp, *ceq;
int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
int dest_reg = alloc_ireg (cfg);
if (enum_this) {
EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
} else {
g_assert (enum_val_reg != -1);
enum_reg = enum_val_reg;
}
EMIT_NEW_BIALU (cfg, and_, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
ceq->type = STACK_I4;
if (!is_i4) {
load = load ? mono_decompose_opcode (cfg, load) : NULL;
and_ = mono_decompose_opcode (cfg, and_);
cmp = mono_decompose_opcode (cfg, cmp);
ceq = mono_decompose_opcode (cfg, ceq);
}
return ceq;
}
}
static MonoInst*
emit_get_rgctx_dele_tramp (MonoCompile *cfg, int context_used,
MonoClass *klass, MonoMethod *virt_method, gboolean _virtual, MonoRgctxInfoType rgctx_type)
{
MonoDelegateClassMethodPair *info;
MonoJumpInfoRgctxEntry *entry;
MonoInst *rgctx;
info = (MonoDelegateClassMethodPair *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
info->klass = klass;
info->method = virt_method;
info->is_virtual = _virtual;
entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, info, rgctx_type);
rgctx = emit_get_rgctx (cfg, context_used);
return emit_rgctx_fetch (cfg, rgctx, entry);
}
/*
* Returns NULL and set the cfg exception on error.
*/
static G_GNUC_UNUSED MonoInst*
handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int target_method_context_used, int invoke_context_used, gboolean virtual_)
{
MonoInst *ptr;
int dreg;
gpointer trampoline;
MonoInst *obj, *tramp_ins;
MonoDomain *domain;
guint8 **code_slot;
if (virtual_ && !cfg->llvm_only) {
MonoMethod *invoke = mono_get_delegate_invoke_internal (klass);
g_assert (invoke);
//FIXME verify & fix any issue with removing invoke_context_used restriction
if (invoke_context_used || !mono_get_delegate_virtual_invoke_impl (mono_method_signature_internal (invoke), target_method_context_used ? NULL : method))
return NULL;
}
obj = handle_alloc (cfg, klass, FALSE, invoke_context_used);
if (!obj)
return NULL;
/* Inline the contents of mono_delegate_ctor */
/* Set target field */
/* Optimize away setting of NULL target */
if (!MONO_INS_IS_PCONST_NULL (target)) {
if (!(method->flags & METHOD_ATTRIBUTE_STATIC)) {
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target->dreg, 0);
MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
}
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
if (cfg->gen_write_barriers) {
dreg = alloc_preg (cfg);
EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
mini_emit_write_barrier (cfg, ptr, target);
}
}
/* Set method field */
if (!(target_method_context_used || invoke_context_used) || cfg->llvm_only) {
//If compiling with gsharing enabled, it's faster to load method the delegate trampoline info than to use a rgctx slot
MonoInst *method_ins = emit_get_rgctx_method (cfg, target_method_context_used, method, MONO_RGCTX_INFO_METHOD);
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
}
/*
* To avoid looking up the compiled code belonging to the target method
* in mono_delegate_trampoline (), we allocate a per-domain memory slot to
* store it, and we fill it after the method has been compiled.
*/
if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
MonoInst *code_slot_ins;
if (target_method_context_used) {
code_slot_ins = emit_get_rgctx_method (cfg, target_method_context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
} else {
domain = mono_domain_get ();
mono_domain_lock (domain);
if (!domain_jit_info (domain)->method_code_hash)
domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
code_slot = (guint8 **)g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
if (!code_slot) {
code_slot = (guint8 **)mono_domain_alloc0 (domain, sizeof (gpointer));
g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
}
mono_domain_unlock (domain);
code_slot_ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
}
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
}
if (cfg->llvm_only) {
if (virtual_) {
MonoInst *args [ ] = {
obj,
target,
emit_get_rgctx_method (cfg, target_method_context_used, method, MONO_RGCTX_INFO_METHOD)
};
mono_emit_jit_icall (cfg, mini_llvmonly_init_delegate_virtual, args);
} else {
mono_emit_jit_icall (cfg, mini_llvmonly_init_delegate, &obj);
}
return obj;
}
if (target_method_context_used || invoke_context_used) {
tramp_ins = emit_get_rgctx_dele_tramp (cfg, target_method_context_used | invoke_context_used, klass, method, virtual_, MONO_RGCTX_INFO_DELEGATE_TRAMP_INFO);
//This is emited as a contant store for the non-shared case.
//We copy from the delegate trampoline info as it's faster than a rgctx fetch
dreg = alloc_preg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method));
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), dreg);
} else if (cfg->compile_aot) {
MonoDelegateClassMethodPair *del_tramp;
del_tramp = (MonoDelegateClassMethodPair *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
del_tramp->klass = klass;
del_tramp->method = method;
del_tramp->is_virtual = virtual_;
EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
} else {
if (virtual_)
trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, method);
else
trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, method);
EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
}
/* Set invoke_impl field */
if (virtual_) {
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
} else {
dreg = alloc_preg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
dreg = alloc_preg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
}
dreg = alloc_preg (cfg);
MONO_EMIT_NEW_ICONST (cfg, dreg, virtual_ ? 1 : 0);
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_is_virtual), dreg);
/* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
return obj;
}
/*
* handle_constrained_gsharedvt_call:
*
* Handle constrained calls where the receiver is a gsharedvt type.
* Return the instruction representing the call. Set the cfg exception on failure.
*/
static MonoInst*
handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
gboolean *ref_emit_widen)
{
MonoInst *ins = NULL;
gboolean emit_widen = *ref_emit_widen;
gboolean supported;
/*
* Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
* This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
* pack the arguments into an array, and do the rest of the work in in an icall.
*/
supported = ((cmethod->klass == mono_defaults.object_class) || mono_class_is_interface (cmethod->klass) || (!m_class_is_valuetype (cmethod->klass) && m_class_get_image (cmethod->klass) != mono_defaults.corlib));
if (supported)
supported = (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || m_class_is_enumtype (mono_class_from_mono_type_internal (fsig->ret)) || mini_is_gsharedvt_type (fsig->ret));
if (supported) {
if (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1)) {
supported = TRUE;
} else {
/* Allow scalar parameters and a gsharedvt first parameter */
supported = MONO_TYPE_IS_PRIMITIVE (fsig->params [0]) || MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (fsig->params [0]);
if (supported) {
for (int i = 1; i < fsig->param_count; ++i) {
if (!(fsig->params [i]->byref || MONO_TYPE_IS_PRIMITIVE (fsig->params [i]) || MONO_TYPE_IS_REFERENCE (fsig->params [i])))
supported = FALSE;
}
}
}
}
if (supported) {
MonoInst *args [16];
/*
* This case handles calls to
* - object:ToString()/Equals()/GetHashCode(),
* - System.IComparable<T>:CompareTo()
* - System.IEquatable<T>:Equals ()
* plus some simple interface calls enough to support AsyncTaskMethodBuilder.
*/
args [0] = sp [0];
args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
args [2] = mini_emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
/* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
if (fsig->hasthis && fsig->param_count) {
/* Call mono_gsharedvt_constrained_call (gpointer mp, MonoMethod *cmethod, MonoClass *klass, gboolean deref_arg, gpointer *args) */
/* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
ins->dreg = alloc_preg (cfg);
ins->inst_imm = fsig->param_count * sizeof (target_mgreg_t);
MONO_ADD_INS (cfg->cbb, ins);
args [4] = ins;
/* Only the first argument is allowed to be gsharedvt */
/* args [3] = deref_arg */
if (mini_is_gsharedvt_type (fsig->params [0])) {
int deref_arg_reg;
ins = mini_emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type_internal (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
deref_arg_reg = alloc_preg (cfg);
/* deref_arg = BOX_TYPE != MONO_GSHAREDVT_BOX_TYPE_VTYPE */
EMIT_NEW_BIALU_IMM (cfg, args [3], OP_ISUB_IMM, deref_arg_reg, ins->dreg, 1);
} else {
EMIT_NEW_ICONST (cfg, args [3], 0);
}
for (int i = 0; i < fsig->param_count; ++i) {
int addr_reg;
if (mini_is_gsharedvt_type (fsig->params [i]) || MONO_TYPE_IS_PRIMITIVE (fsig->params [i])) {
EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [i + 1]->dreg, fsig->params [i]);
addr_reg = ins->dreg;
EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, i * sizeof (target_mgreg_t), addr_reg);
} else {
EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, i * sizeof (target_mgreg_t), sp [i + 1]->dreg);
}
}
} else {
EMIT_NEW_ICONST (cfg, args [3], 0);
EMIT_NEW_ICONST (cfg, args [4], 0);
}
ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
emit_widen = FALSE;
if (mini_is_gsharedvt_type (fsig->ret)) {
ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type_internal (fsig->ret), ins);
} else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || m_class_is_enumtype (mono_class_from_mono_type_internal (fsig->ret))) {
MonoInst *add;
/* Unbox */
NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, MONO_ABI_SIZEOF (MonoObject));
MONO_ADD_INS (cfg->cbb, add);
/* Load value */
NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
MONO_ADD_INS (cfg->cbb, ins);
/* ins represents the call result */
}
} else {
GSHAREDVT_FAILURE (CEE_CALLVIRT);
}
*ref_emit_widen = emit_widen;
return ins;
exception_exit:
return NULL;
}
static void
mono_emit_load_got_addr (MonoCompile *cfg)
{
MonoInst *getaddr, *dummy_use;
if (!cfg->got_var || cfg->got_var_allocated)
return;
MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
getaddr->cil_code = cfg->header->code;
getaddr->dreg = cfg->got_var->dreg;
/* Add it to the start of the first bblock */
if (cfg->bb_entry->code) {
getaddr->next = cfg->bb_entry->code;
cfg->bb_entry->code = getaddr;
}
else
MONO_ADD_INS (cfg->bb_entry, getaddr);
cfg->got_var_allocated = TRUE;
/*
* Add a dummy use to keep the got_var alive, since real uses might
* only be generated by the back ends.
* Add it to end_bblock, so the variable's lifetime covers the whole
* method.
* It would be better to make the usage of the got var explicit in all
* cases when the backend needs it (i.e. calls, throw etc.), so this
* wouldn't be needed.
*/
NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
MONO_ADD_INS (cfg->bb_exit, dummy_use);
}
static int inline_limit, llvm_jit_inline_limit;
static gboolean inline_limit_inited;
static gboolean
mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
{
MonoMethodHeaderSummary header;
MonoVTable *vtable;
int limit;
#ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
MonoMethodSignature *sig = mono_method_signature_internal (method);
int i;
#endif
if (cfg->disable_inline)
return FALSE;
if (cfg->gsharedvt)
return FALSE;
if (cfg->inline_depth > 10)
return FALSE;
if (!mono_method_get_header_summary (method, &header))
return FALSE;
/*runtime, icall and pinvoke are checked by summary call*/
if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
(method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
(mono_class_is_marshalbyref (method->klass)) ||
header.has_clauses)
return FALSE;
/* also consider num_locals? */
/* Do the size check early to avoid creating vtables */
if (!inline_limit_inited) {
char *inlinelimit;
if ((inlinelimit = g_getenv ("MONO_INLINELIMIT"))) {
inline_limit = atoi (inlinelimit);
llvm_jit_inline_limit = inline_limit;
g_free (inlinelimit);
} else {
inline_limit = INLINE_LENGTH_LIMIT;
llvm_jit_inline_limit = LLVM_JIT_INLINE_LENGTH_LIMIT;
}
inline_limit_inited = TRUE;
}
if (COMPILE_LLVM (cfg) && !cfg->compile_aot)
limit = llvm_jit_inline_limit;
else
limit = inline_limit;
if (header.code_size >= limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
return FALSE;
/*
* if we can initialize the class of the method right away, we do,
* otherwise we don't allow inlining if the class needs initialization,
* since it would mean inserting a call to mono_runtime_class_init()
* inside the inlined code
*/
if (cfg->gshared && m_class_has_cctor (method->klass) && mini_class_check_context_used (cfg, method->klass))
return FALSE;
if (!(cfg->opt & MONO_OPT_SHARED)) {
/* The AggressiveInlining hint is a good excuse to force that cctor to run. */
if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
if (m_class_has_cctor (method->klass)) {
ERROR_DECL (error);
vtable = mono_class_vtable_checked (cfg->domain, method->klass, error);
if (!is_ok (error)) {
mono_error_cleanup (error);
return FALSE;
}
if (!cfg->compile_aot) {
if (!mono_runtime_class_init_full (vtable, error)) {
mono_error_cleanup (error);
return FALSE;
}
}
}
} else if (mono_class_is_before_field_init (method->klass)) {
if (cfg->run_cctors && m_class_has_cctor (method->klass)) {
ERROR_DECL (error);
/*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
if (!m_class_get_runtime_info (method->klass))
/* No vtable created yet */
return FALSE;
vtable = mono_class_vtable_checked (cfg->domain, method->klass, error);
if (!is_ok (error)) {
mono_error_cleanup (error);
return FALSE;
}
/* This makes so that inline cannot trigger */
/* .cctors: too many apps depend on them */
/* running with a specific order... */
if (! vtable->initialized)
return FALSE;
if (!mono_runtime_class_init_full (vtable, error)) {
mono_error_cleanup (error);
return FALSE;
}
}
} else if (mono_class_needs_cctor_run (method->klass, NULL)) {
ERROR_DECL (error);
if (!m_class_get_runtime_info (method->klass))
/* No vtable created yet */
return FALSE;
vtable = mono_class_vtable_checked (cfg->domain, method->klass, error);
if (!is_ok (error)) {
mono_error_cleanup (error);
return FALSE;
}
if (!vtable->initialized)
return FALSE;
}
} else {
/*
* If we're compiling for shared code
* the cctor will need to be run at aot method load time, for example,
* or at the end of the compilation of the inlining method.
*/
if (mono_class_needs_cctor_run (method->klass, NULL) && !mono_class_is_before_field_init (method->klass))
return FALSE;
}
#ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
if (mono_arch_is_soft_float ()) {
/* FIXME: */
if (sig->ret && sig->ret->type == MONO_TYPE_R4)
return FALSE;
for (i = 0; i < sig->param_count; ++i)
if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
return FALSE;
}
#endif
if (g_list_find (cfg->dont_inline, method))
return FALSE;
if (mono_profiler_get_call_instrumentation_flags (method))
return FALSE;
if (mono_profiler_coverage_instrumentation_enabled (method))
return FALSE;
return TRUE;
}
static gboolean
mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
{
if (!cfg->compile_aot) {
g_assert (vtable);
if (vtable->initialized)
return FALSE;
}
if (mono_class_is_before_field_init (klass)) {
if (cfg->method == method)
return FALSE;
}
if (!mono_class_needs_cctor_run (klass, method))
return FALSE;
if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
/* The initialization is already done before the method is called */
return FALSE;
return TRUE;
}
int
mini_emit_sext_index_reg (MonoCompile *cfg, MonoInst *index)
{
int index_reg = index->dreg;
int index2_reg;
#if SIZEOF_REGISTER == 8
/* The array reg is 64 bits but the index reg is only 32 */
if (COMPILE_LLVM (cfg)) {
/*
* abcrem can't handle the OP_SEXT_I4, so add this after abcrem,
* during OP_BOUNDS_CHECK decomposition, and in the implementation
* of OP_X86_LEA for llvm.
*/
index2_reg = index_reg;
} else {
index2_reg = alloc_preg (cfg);
MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
}
#else
if (index->type == STACK_I8) {
index2_reg = alloc_preg (cfg);
MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
} else {
index2_reg = index_reg;
}
#endif
return index2_reg;
}
MonoInst*
mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
{
MonoInst *ins;
guint32 size;
int mult_reg, add_reg, array_reg, index2_reg;
int context_used;
if (mini_is_gsharedvt_variable_klass (klass)) {
size = -1;
} else {
mono_class_init_internal (klass);
size = mono_class_array_element_size (klass);
}
mult_reg = alloc_preg (cfg);
array_reg = arr->dreg;
index2_reg = mini_emit_sext_index_reg (cfg, index);
if (bcheck)
MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
#if defined(TARGET_X86) || defined(TARGET_AMD64)
if (size == 1 || size == 2 || size == 4 || size == 8) {
static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
ins->klass = klass;
ins->type = STACK_MP;