Skip to content

Commit

Permalink
Mark code instance fields as const or atomic (#46203)
Browse files Browse the repository at this point in the history
This coopts half of #44968 to mark jl_code_instance_t fields as either atomic or const.

Co-authored-by: Valentin Churavy <vchuravy@users.noreply.github.com>
  • Loading branch information
pchintalapudi and vchuravy committed Aug 11, 2022
1 parent f345923 commit d94ed88
Show file tree
Hide file tree
Showing 15 changed files with 92 additions and 65 deletions.
12 changes: 9 additions & 3 deletions base/compiler/abstractinterpretation.jl
Expand Up @@ -1107,9 +1107,15 @@ function const_prop_methodinstance_heuristic(
return false
else
code = get(code_cache(interp), mi, nothing)
if isdefined(code, :inferred) && inlining_policy(
interp, code.inferred, IR_FLAG_NULL, mi, argtypes) !== nothing
return true
if isdefined(code, :inferred)
if isa(code, CodeInstance)
inferred = @atomic :monotonic code.inferred
else
inferred = code.inferred
end
if inlining_policy(interp, inferred, IR_FLAG_NULL, mi, argtypes) !== nothing
return true
end
end
end
end
Expand Down
2 changes: 1 addition & 1 deletion base/compiler/ssair/inlining.jl
Expand Up @@ -832,7 +832,7 @@ function resolve_todo(todo::InliningTodo, state::InliningState, flag::UInt8)
et !== nothing && push!(et, mi)
return ConstantCase(quoted(code.rettype_const))
else
src = code.inferred
src = @atomic :monotonic code.inferred
end
effects = decode_effects(code.ipo_purity_bits)
else # fallback pass for external AbstractInterpreter cache
Expand Down
2 changes: 1 addition & 1 deletion base/compiler/ssair/passes.jl
Expand Up @@ -956,7 +956,7 @@ function try_inline_finalizer!(ir::IRCode, argexprs::Vector{Any}, idx::Int, mi::
et !== nothing && push!(et, mi)
return true
end
src = code.inferred
src = @atomic :monotonic code.inferred
else
src = code
end
Expand Down
5 changes: 3 additions & 2 deletions base/compiler/typeinfer.jl
Expand Up @@ -880,7 +880,8 @@ function typeinf_edge(interp::AbstractInterpreter, method::Method, @nospecialize
mi = specialize_method(method, atype, sparams)::MethodInstance
code = get(code_cache(interp), mi, nothing)
if code isa CodeInstance # return existing rettype if the code is already inferred
if code.inferred === nothing && is_stmt_inline(get_curr_ssaflag(caller))
inferred = @atomic :monotonic code.inferred
if inferred === nothing && is_stmt_inline(get_curr_ssaflag(caller))
# we already inferred this edge before and decided to discard the inferred code,
# nevertheless we re-infer it here again and keep it around in the local cache
# since the inliner will request to use it later
Expand Down Expand Up @@ -1010,7 +1011,7 @@ function typeinf_ext(interp::AbstractInterpreter, mi::MethodInstance)
code = get(code_cache(interp), mi, nothing)
if code isa CodeInstance
# see if this code already exists in the cache
inf = code.inferred
inf = @atomic :monotonic code.inferred
if use_const_api(code)
i == 2 && ccall(:jl_typeinf_end, Cvoid, ())
tree = ccall(:jl_new_code_info_uninit, Ref{CodeInfo}, ())
Expand Down
10 changes: 6 additions & 4 deletions src/aotcompile.cpp
Expand Up @@ -219,7 +219,7 @@ static void jl_ci_cache_lookup(const jl_cgparams_t &cgparams, jl_method_instance
jl_code_instance_t *codeinst = NULL;
if (ci != jl_nothing) {
codeinst = (jl_code_instance_t*)ci;
*src_out = (jl_code_info_t*)codeinst->inferred;
*src_out = (jl_code_info_t*)jl_atomic_load_relaxed(&codeinst->inferred);
jl_method_t *def = codeinst->def->def.method;
if ((jl_value_t*)*src_out == jl_nothing)
*src_out = NULL;
Expand All @@ -234,8 +234,10 @@ static void jl_ci_cache_lookup(const jl_cgparams_t &cgparams, jl_method_instance
*src_out = jl_type_infer(mi, world, 0);
if (*src_out) {
codeinst = jl_get_method_inferred(mi, (*src_out)->rettype, (*src_out)->min_world, (*src_out)->max_world);
if ((*src_out)->inferred && !codeinst->inferred)
codeinst->inferred = jl_nothing;
if ((*src_out)->inferred) {
jl_value_t *null = nullptr;
jl_atomic_cmpswap_relaxed(&codeinst->inferred, &null, jl_nothing);
}
}
}
}
Expand Down Expand Up @@ -1007,7 +1009,7 @@ void jl_get_llvmf_defn_impl(jl_llvmf_dump_t* dump, jl_method_instance_t *mi, siz
jl_value_t *ci = jl_rettype_inferred(mi, world, world);
if (ci != jl_nothing) {
jl_code_instance_t *codeinst = (jl_code_instance_t*)ci;
src = (jl_code_info_t*)codeinst->inferred;
src = (jl_code_info_t*)jl_atomic_load_relaxed(&codeinst->inferred);
if ((jl_value_t*)src != jl_nothing && !jl_is_code_info(src) && jl_is_method(mi->def.method))
src = jl_uncompress_ir(mi->def.method, codeinst, (jl_array_t*)src);
jlrettype = codeinst->rettype;
Expand Down
69 changes: 38 additions & 31 deletions src/codegen.cpp
Expand Up @@ -4839,13 +4839,18 @@ static std::pair<Function*, Function*> get_oc_function(jl_codectx_t &ctx, jl_met
jl_method_instance_t *mi = jl_specializations_get_linfo(closure_method, sigtype, jl_emptysvec);
jl_code_instance_t *ci = (jl_code_instance_t*)jl_rettype_inferred(mi, ctx.world, ctx.world);

if (ci == NULL || (jl_value_t*)ci == jl_nothing || ci->inferred == NULL || ci->inferred == jl_nothing) {
if (ci == NULL || (jl_value_t*)ci == jl_nothing) {
JL_GC_POP();
return std::make_pair((Function*)NULL, (Function*)NULL);
}
auto inferred = jl_atomic_load_relaxed(&ci->inferred);
if (!inferred || inferred == jl_nothing) {
JL_GC_POP();
return std::make_pair((Function*)NULL, (Function*)NULL);
}
++EmittedOpaqueClosureFunctions;

ir = jl_uncompress_ir(closure_method, ci, (jl_array_t*)ci->inferred);
ir = jl_uncompress_ir(closure_method, ci, (jl_array_t*)inferred);

// TODO: Emit this inline and outline it late using LLVM's coroutine support.
orc::ThreadSafeModule closure_m = jl_create_llvm_module(
Expand Down Expand Up @@ -8223,7 +8228,7 @@ jl_llvm_functions_t jl_emit_codeinst(
JL_TIMING(CODEGEN);
JL_GC_PUSH1(&src);
if (!src) {
src = (jl_code_info_t*)codeinst->inferred;
src = (jl_code_info_t*)jl_atomic_load_relaxed(&codeinst->inferred);
jl_method_t *def = codeinst->def->def.method;
if (src && (jl_value_t*)src != jl_nothing && jl_is_method(def))
src = jl_uncompress_ir(def, codeinst, (jl_array_t*)src);
Expand Down Expand Up @@ -8254,36 +8259,38 @@ jl_llvm_functions_t jl_emit_codeinst(
jl_add_code_in_flight(f, codeinst, DL);
}

if (// don't alter `inferred` when the code is not directly being used
params.world &&
if (params.world) {// don't alter `inferred` when the code is not directly being used
auto inferred = jl_atomic_load_relaxed(&codeinst->inferred);
// don't change inferred state
codeinst->inferred) {
jl_method_t *def = codeinst->def->def.method;
if (// keep code when keeping everything
!(JL_DELETE_NON_INLINEABLE) ||
// aggressively keep code when debugging level >= 2
jl_options.debug_level > 1) {
// update the stored code
if (codeinst->inferred != (jl_value_t*)src) {
if (jl_is_method(def)) {
src = (jl_code_info_t*)jl_compress_ir(def, src);
assert(jl_typeis(src, jl_array_uint8_type));
codeinst->relocatability = ((uint8_t*)jl_array_data(src))[jl_array_len(src)-1];
if (inferred) {
jl_method_t *def = codeinst->def->def.method;
if (// keep code when keeping everything
!(JL_DELETE_NON_INLINEABLE) ||
// aggressively keep code when debugging level >= 2
jl_options.debug_level > 1) {
// update the stored code
if (inferred != (jl_value_t*)src) {
if (jl_is_method(def)) {
src = (jl_code_info_t*)jl_compress_ir(def, src);
assert(jl_typeis(src, jl_array_uint8_type));
codeinst->relocatability = ((uint8_t*)jl_array_data(src))[jl_array_len(src)-1];
}
jl_atomic_store_release(&codeinst->inferred, (jl_value_t*)src);
jl_gc_wb(codeinst, src);
}
}
else if (jl_is_method(def)) {// don't delete toplevel code
if (// and there is something to delete (test this before calling jl_ir_inlining_cost)
inferred != jl_nothing &&
// don't delete inlineable code, unless it is constant
(codeinst->invoke == jl_fptr_const_return_addr ||
(jl_ir_inlining_cost((jl_array_t*)inferred) == UINT16_MAX)) &&
// don't delete code when generating a precompile file
!(params.imaging || jl_options.incremental)) {
// if not inlineable, code won't be needed again
jl_atomic_store_release(&codeinst->inferred, jl_nothing);
}
codeinst->inferred = (jl_value_t*)src;
jl_gc_wb(codeinst, src);
}
}
else if (// don't delete toplevel code
jl_is_method(def) &&
// and there is something to delete (test this before calling jl_ir_inlining_cost)
codeinst->inferred != jl_nothing &&
// don't delete inlineable code, unless it is constant
(codeinst->invoke == jl_fptr_const_return_addr || (jl_ir_inlining_cost((jl_array_t*)codeinst->inferred) == UINT16_MAX)) &&
// don't delete code when generating a precompile file
!(params.imaging || jl_options.incremental)) {
// if not inlineable, code won't be needed again
codeinst->inferred = jl_nothing;
}
}
}
Expand Down Expand Up @@ -8336,7 +8343,7 @@ void jl_compile_workqueue(
// Reinfer the function. The JIT came along and removed the inferred
// method body. See #34993
if (policy != CompilationPolicy::Default &&
codeinst->inferred && codeinst->inferred == jl_nothing) {
jl_atomic_load_relaxed(&codeinst->inferred) == jl_nothing) {
src = jl_type_infer(codeinst->def, jl_atomic_load_acquire(&jl_world_counter), 0);
if (src) {
orc::ThreadSafeModule result_m =
Expand Down
11 changes: 6 additions & 5 deletions src/dump.c
Expand Up @@ -689,10 +689,10 @@ static void jl_serialize_code_instance(jl_serializer_state *s, jl_code_instance_
write_uint8(s->s, TAG_CODE_INSTANCE);
write_uint8(s->s, flags);
write_uint32(s->s, codeinst->ipo_purity_bits);
write_uint32(s->s, codeinst->purity_bits);
write_uint32(s->s, jl_atomic_load_relaxed(&codeinst->purity_bits));
jl_serialize_value(s, (jl_value_t*)codeinst->def);
if (write_ret_type) {
jl_serialize_value(s, codeinst->inferred);
jl_serialize_value(s, jl_atomic_load_relaxed(&codeinst->inferred));
jl_serialize_value(s, codeinst->rettype_const);
jl_serialize_value(s, codeinst->rettype);
jl_serialize_value(s, codeinst->argescapes);
Expand Down Expand Up @@ -1907,11 +1907,12 @@ static jl_value_t *jl_deserialize_value_code_instance(jl_serializer_state *s, jl
int validate = (flags >> 0) & 3;
int constret = (flags >> 2) & 1;
codeinst->ipo_purity_bits = read_uint32(s->s);
codeinst->purity_bits = read_uint32(s->s);
jl_atomic_store_relaxed(&codeinst->purity_bits, read_uint32(s->s));
codeinst->def = (jl_method_instance_t*)jl_deserialize_value(s, (jl_value_t**)&codeinst->def);
jl_gc_wb(codeinst, codeinst->def);
codeinst->inferred = jl_deserialize_value(s, &codeinst->inferred);
jl_gc_wb(codeinst, codeinst->inferred);
jl_value_t *inferred = jl_deserialize_value(s, NULL);
jl_atomic_store_release(&codeinst->inferred, inferred);
jl_gc_wb(codeinst, inferred);
codeinst->rettype_const = jl_deserialize_value(s, &codeinst->rettype_const);
if (codeinst->rettype_const)
jl_gc_wb(codeinst, codeinst->rettype_const);
Expand Down
6 changes: 3 additions & 3 deletions src/gf.c
Expand Up @@ -363,7 +363,7 @@ JL_DLLEXPORT jl_value_t *jl_rettype_inferred(jl_method_instance_t *mi, size_t mi
jl_code_instance_t *codeinst = jl_atomic_load_relaxed(&mi->cache);
while (codeinst) {
if (codeinst->min_world <= min_world && max_world <= codeinst->max_world) {
jl_value_t *code = codeinst->inferred;
jl_value_t *code = jl_atomic_load_relaxed(&codeinst->inferred);
if (code && (code == jl_nothing || jl_ir_flag_inferred((jl_array_t*)code)))
return (jl_value_t*)codeinst;
}
Expand Down Expand Up @@ -409,7 +409,7 @@ JL_DLLEXPORT jl_code_instance_t *jl_new_codeinst(
codeinst->min_world = min_world;
codeinst->max_world = max_world;
codeinst->rettype = rettype;
codeinst->inferred = inferred;
jl_atomic_store_release(&codeinst->inferred, inferred);
//codeinst->edges = NULL;
if ((const_flags & 2) == 0)
inferred_const = NULL;
Expand All @@ -424,7 +424,7 @@ JL_DLLEXPORT jl_code_instance_t *jl_new_codeinst(
jl_atomic_store_relaxed(&codeinst->precompile, 0);
jl_atomic_store_relaxed(&codeinst->next, NULL);
codeinst->ipo_purity_bits = ipo_effects;
codeinst->purity_bits = effects;
jl_atomic_store_relaxed(&codeinst->purity_bits, effects);
codeinst->argescapes = argescapes;
codeinst->relocatability = relocatability;
return codeinst;
Expand Down
8 changes: 5 additions & 3 deletions src/jitlayers.cpp
Expand Up @@ -326,7 +326,7 @@ jl_code_instance_t *jl_generate_fptr_impl(jl_method_instance_t *mi JL_PROPAGATES
jl_value_t *ci = jl_rettype_inferred(mi, world, world);
jl_code_instance_t *codeinst = (ci == jl_nothing ? NULL : (jl_code_instance_t*)ci);
if (codeinst) {
src = (jl_code_info_t*)codeinst->inferred;
src = (jl_code_info_t*)jl_atomic_load_relaxed(&codeinst->inferred);
if ((jl_value_t*)src == jl_nothing)
src = NULL;
else if (jl_is_method(mi->def.method))
Expand All @@ -352,8 +352,10 @@ jl_code_instance_t *jl_generate_fptr_impl(jl_method_instance_t *mi JL_PROPAGATES
else if (src && jl_is_code_info(src)) {
if (!codeinst) {
codeinst = jl_get_method_inferred(mi, src->rettype, src->min_world, src->max_world);
if (src->inferred && !codeinst->inferred)
codeinst->inferred = jl_nothing;
if (src->inferred) {
jl_value_t *null = nullptr;
jl_atomic_cmpswap_relaxed(&codeinst->inferred, &null, jl_nothing);
}
}
_jl_compile_codeinst(codeinst, src, world, context);
if (jl_atomic_load_relaxed(&codeinst->invoke) == NULL)
Expand Down
5 changes: 4 additions & 1 deletion src/jltypes.c
Expand Up @@ -2578,8 +2578,11 @@ void jl_init_types(void) JL_GC_DISABLED
jl_emptysvec,
0, 1, 1);
jl_svecset(jl_code_instance_type->types, 1, jl_code_instance_type);
const static uint32_t code_instance_constfields[1] = { 0x00000001 }; // (1<<1);
const static uint32_t code_instance_constfields[1] = { 0b000001010111101 }; // Set fields 1, 3-6, 8, 10 as const
const static uint32_t code_instance_atomicfields[1] = { 0b011100101000010 }; // Set fields 2, 7, 9, 12-14 as atomic
//Fields 11 and 15 must be protected by locks, and thus all operations on jl_code_instance_t are threadsafe
jl_code_instance_type->name->constfields = code_instance_constfields;
jl_code_instance_type->name->atomicfields = code_instance_atomicfields;

jl_const_type = jl_new_datatype(jl_symbol("Const"), core, jl_any_type, jl_emptysvec,
jl_perm_symsvec(1, "val"),
Expand Down
4 changes: 2 additions & 2 deletions src/julia.h
Expand Up @@ -392,7 +392,7 @@ typedef struct _jl_code_instance_t {
// inference state cache
jl_value_t *rettype; // return type for fptr
jl_value_t *rettype_const; // inferred constant return value, or null
jl_value_t *inferred; // inferred jl_code_info_t, or jl_nothing, or null
_Atomic(jl_value_t *) inferred; // inferred jl_code_info_t, or jl_nothing, or null
//TODO: jl_array_t *edges; // stored information about edges from this object
//TODO: uint8_t absolute_max; // whether true max world is unknown

Expand Down Expand Up @@ -425,7 +425,7 @@ typedef struct _jl_code_instance_t {
};
#else
uint32_t ipo_purity_bits;
uint32_t purity_bits;
_Atomic(uint32_t) purity_bits;
#endif
jl_value_t *argescapes; // escape information of call arguments

Expand Down
8 changes: 5 additions & 3 deletions src/precompile.c
Expand Up @@ -336,9 +336,11 @@ static int precompile_enq_specialization_(jl_method_instance_t *mi, void *closur
while (codeinst) {
int do_compile = 0;
if (jl_atomic_load_relaxed(&codeinst->invoke) != jl_fptr_const_return) {
if (codeinst->inferred && codeinst->inferred != jl_nothing &&
jl_ir_flag_inferred((jl_array_t*)codeinst->inferred) &&
(jl_ir_inlining_cost((jl_array_t*)codeinst->inferred) == UINT16_MAX)) {
jl_value_t *inferred = jl_atomic_load_relaxed(&codeinst->inferred);
if (inferred &&
inferred != jl_nothing &&
jl_ir_flag_inferred((jl_array_t*)inferred) &&
(jl_ir_inlining_cost((jl_array_t*)inferred) == UINT16_MAX)) {
do_compile = 1;
}
else if (jl_atomic_load_relaxed(&codeinst->invoke) != NULL || jl_atomic_load_relaxed(&codeinst->precompile)) {
Expand Down
11 changes: 7 additions & 4 deletions src/staticdata.c
Expand Up @@ -1707,13 +1707,16 @@ static void strip_specializations_(jl_method_instance_t *mi)
assert(jl_is_method_instance(mi));
jl_code_instance_t *codeinst = mi->cache;
while (codeinst) {
if (codeinst->inferred && codeinst->inferred != jl_nothing) {
jl_value_t *inferred = jl_atomic_load_relaxed(&codeinst->inferred);
if (inferred && inferred != jl_nothing) {
if (jl_options.strip_ir) {
record_field_change(&codeinst->inferred, jl_nothing);
record_field_change(&inferred, jl_nothing);
}
else if (jl_options.strip_metadata) {
codeinst->inferred = strip_codeinfo_meta(mi->def.method, codeinst->inferred, 0);
jl_gc_wb(codeinst, codeinst->inferred);
jl_value_t *stripped = strip_codeinfo_meta(mi->def.method, inferred, 0);
if (jl_atomic_cmpswap_relaxed(&codeinst->inferred, &inferred, stripped)) {
jl_gc_wb(codeinst, stripped);
}
}
}
codeinst = jl_atomic_load_relaxed(&codeinst->next);
Expand Down
2 changes: 1 addition & 1 deletion test/compiler/inline.jl
Expand Up @@ -625,7 +625,7 @@ let
specs = collect(only(methods(f42078)).specializations)
mi = specs[findfirst(!isnothing, specs)]::Core.MethodInstance
codeinf = getcache(mi)::Core.CodeInstance
codeinf.inferred = nothing
@atomic codeinf.inferred = nothing
end

let # inference should re-infer `f42078(::Int)` and we should get the same code
Expand Down
2 changes: 1 addition & 1 deletion test/core.jl
Expand Up @@ -14,7 +14,7 @@ include("testenv.jl")
# sanity tests that our built-in types are marked correctly for const fields
for (T, c) in (
(Core.CodeInfo, []),
(Core.CodeInstance, [:def]),
(Core.CodeInstance, [:def, :min_world, :max_world, :rettype, :rettype_const, :ipo_purity_bits, :argescapes]),
(Core.Method, [#=:name, :module, :file, :line, :primary_world, :sig, :slot_syms, :external_mt, :nargs, :called, :nospecialize, :nkw, :isva, :pure, :is_for_opaque_closure, :constprop=#]),
(Core.MethodInstance, [#=:def, :specTypes, :sparam_vals]=#]),
(Core.MethodTable, [:module]),
Expand Down

0 comments on commit d94ed88

Please sign in to comment.