Skip to content

Commit

Permalink
CompilationParams --> Compiler.Params, revert CompilationResult --> I…
Browse files Browse the repository at this point in the history
…nferenceResult [ci skip]
  • Loading branch information
jrevels committed Jan 11, 2018
1 parent 70341f5 commit 8b38c4c
Show file tree
Hide file tree
Showing 11 changed files with 56 additions and 56 deletions.
20 changes: 10 additions & 10 deletions base/compiler/abstractinterpretation.jl
Expand Up @@ -15,8 +15,8 @@ const _REF_NAME = Ref.body.name
##################

mutable struct InferenceState
params::CompilationParams # describes how to compute the result
result::CompilationResult # remember where to put the result
params::Params # describes how to compute the result
result::InferenceResult # remember where to put the result
linfo::MethodInstance # used here for the tuple (specTypes, env, Method) and world-age validity
sp::SimpleVector # static parameters
mod::Module
Expand Down Expand Up @@ -51,16 +51,16 @@ mutable struct InferenceState
const_api::Bool
const_ret::Bool

# TODO: move these to CompilationResult / CompilationParams?
# TODO: move these to InferenceResult / Params?
optimize::Bool
cached::Bool
limited::Bool
inferred::Bool
dont_work_on_me::Bool

# src is assumed to be a newly-allocated CodeInfo, that can be modified in-place to contain intermediate results
function InferenceState(result::CompilationResult, src::CodeInfo,
optimize::Bool, cached::Bool, params::CompilationParams)
function InferenceState(result::InferenceResult, src::CodeInfo,
optimize::Bool, cached::Bool, params::Params)
linfo = result.linfo
code = src.code::Array{Any,1}
toplevel = !isa(linfo.def, Method)
Expand Down Expand Up @@ -150,12 +150,12 @@ mutable struct InferenceState
end

function InferenceState(linfo::MethodInstance,
optimize::Bool, cached::Bool, params::CompilationParams)
return InferenceState(CompilationResult(linfo), optimize, cached, params)
optimize::Bool, cached::Bool, params::Params)
return InferenceState(InferenceResult(linfo), optimize, cached, params)
end

function InferenceState(result::CompilationResult,
optimize::Bool, cached::Bool, params::CompilationParams)
function InferenceState(result::InferenceResult,
optimize::Bool, cached::Bool, params::Params)
# prepare an InferenceState object for inferring lambda
src = retrieve_code_info(result.linfo)
src === nothing && return nothing
Expand Down Expand Up @@ -387,7 +387,7 @@ function abstract_call_method_with_const_args(@nospecialize(f), argtypes::Vector
end
inf_result = cache_lookup(code, argtypes, sv.params.cache)
if inf_result === nothing
inf_result = CompilationResult(code)
inf_result = InferenceResult(code)
atypes = get_argtypes(inf_result)
for i in 1:nargs
a = argtypes[i]
Expand Down
2 changes: 1 addition & 1 deletion base/compiler/bootstrap.jl
Expand Up @@ -27,7 +27,7 @@ let fs = Any[typeinf_ext, typeinf, typeinf_edge, pure_eval_call],
typ[i] = typ[i].ub
end
end
typeinf_type(m[3], Tuple{typ...}, m[2], true, CompilationParams(world))
typeinf_type(m[3], Tuple{typ...}, m[2], true, Params(world))
end
end
end
Expand Down
28 changes: 14 additions & 14 deletions base/compiler/init.jl
Expand Up @@ -102,16 +102,16 @@ const checked_add = +

const checked_sub = -

#####################
# CompilationResult #
#####################
###################
# InferenceResult #
###################

mutable struct CompilationResult
mutable struct InferenceResult
linfo::MethodInstance
args::Vector{Any}
result # ::Type, or InferenceState if WIP
src::Union{CodeInfo, Nothing} # if inferred copy is available
function CompilationResult(linfo::MethodInstance)
function InferenceResult(linfo::MethodInstance)
if isdefined(linfo, :inferred_const)
result = Const(linfo.inferred_const)
else
Expand All @@ -121,7 +121,7 @@ mutable struct CompilationResult
end
end

function get_argtypes(result::CompilationResult)
function get_argtypes(result::InferenceResult)
result.args === EMPTY_VECTOR || return result.args # already cached
linfo = result.linfo
toplevel = !isa(linfo.def, Method)
Expand Down Expand Up @@ -177,7 +177,7 @@ function get_argtypes(result::CompilationResult)
return args
end

function cache_lookup(code::MethodInstance, argtypes::Vector{Any}, cache::Vector{CompilationResult})
function cache_lookup(code::MethodInstance, argtypes::Vector{Any}, cache::Vector{InferenceResult})
method = code.def::Method
nargs::Int = method.nargs
method.isva && (nargs -= 1)
Expand Down Expand Up @@ -210,12 +210,12 @@ function cache_lookup(code::MethodInstance, argtypes::Vector{Any}, cache::Vector
return nothing
end

#####################
# CompilationParams #
#####################
##########
# Params #
##########

struct CompilationParams
cache::Vector{CompilationResult}
struct Params
cache::Vector{InferenceResult}
world::UInt

# optimization
Expand Down Expand Up @@ -249,7 +249,7 @@ struct CompilationParams
MAX_TUPLE_SPLAT::Int

# reasonable defaults
function CompilationParams(world::UInt;
function Params(world::UInt;
inlining::Bool = inlining_enabled(),
inline_cost_threshold::Int = 100,
inline_nonleaf_penalty::Int = 1000,
Expand All @@ -260,7 +260,7 @@ struct CompilationParams
tuple_splat::Int = 16,
union_splitting::Int = 4,
apply_union_enum::Int = 8)
return new(Vector{CompilationResult}(),
return new(Vector{InferenceResult}(),
world, inlining, true, false, inline_cost_threshold, inline_nonleaf_penalty,
inline_tupleret_bonus, max_methods, union_splitting, apply_union_enum,
tupletype_len, tuple_depth, tuple_splat)
Expand Down
24 changes: 12 additions & 12 deletions base/compiler/optimize.jl
Expand Up @@ -48,7 +48,7 @@ mutable struct OptimizationState
next_label::Int # index of the current highest label for this function
min_valid::UInt
max_valid::UInt
params::CompilationParams
params::Params
function OptimizationState(frame::InferenceState)
s_edges = frame.stmt_edges[1]
if s_edges === ()
Expand All @@ -63,7 +63,7 @@ mutable struct OptimizationState
frame.params)
end
function OptimizationState(linfo::MethodInstance, src::CodeInfo,
params::CompilationParams)
params::Params)
# prepare src for running optimization passes
# if it isn't already
nssavalues = src.ssavaluetypes
Expand Down Expand Up @@ -96,7 +96,7 @@ mutable struct OptimizationState
end
end

function OptimizationState(linfo::MethodInstance, params::CompilationParams)
function OptimizationState(linfo::MethodInstance, params::Params)
src = retrieve_code_info(linfo)
src === nothing && return nothing
return OptimizationState(linfo, src, params)
Expand Down Expand Up @@ -233,7 +233,7 @@ end
# logic #
#########

function isinlineable(m::Method, src::CodeInfo, mod::Module, params::CompilationParams, bonus::Int=0)
function isinlineable(m::Method, src::CodeInfo, mod::Module, params::Params, bonus::Int=0)
# compute the cost (size) of inlining this code
inlineable = false
cost_threshold = params.inline_cost_threshold
Expand Down Expand Up @@ -1302,7 +1302,7 @@ function inlineable(@nospecialize(f), @nospecialize(ft), e::Expr, atypes::Vector
return inline_as_constant(linfo.inferred_const, argexprs, sv, invoke_data)
end

# see if the method has a CompilationResult in the current cache
# see if the method has a InferenceResult in the current cache
# or an existing inferred code info store in `.inferred`
haveconst = false
for i in 1:length(atypes)
Expand All @@ -1316,11 +1316,11 @@ function inlineable(@nospecialize(f), @nospecialize(ft), e::Expr, atypes::Vector
end
end
if haveconst
inf_result = cache_lookup(linfo, atypes, sv.params.cache) # Union{Nothing, CompilationResult}
inf_result = cache_lookup(linfo, atypes, sv.params.cache) # Union{Nothing, InferenceResult}
else
inf_result = nothing
end
if isa(inf_result, CompilationResult) && isa(inf_result.src, CodeInfo)
if isa(inf_result, InferenceResult) && isa(inf_result.src, CodeInfo)
linfo = inf_result.linfo
result = inf_result.result
if (inf_result.src::CodeInfo).pure
Expand Down Expand Up @@ -1577,7 +1577,7 @@ plus_saturate(x, y) = max(x, y, x+y)
# known return type
isknowntype(T) = (T == Union{}) || isleaftype(T)

function statement_cost(ex::Expr, line::Int, src::CodeInfo, mod::Module, params::CompilationParams)
function statement_cost(ex::Expr, line::Int, src::CodeInfo, mod::Module, params::Params)
head = ex.head
if is_meta_expr(ex) || head == :copyast # not sure if copyast is right
return 0
Expand Down Expand Up @@ -1655,7 +1655,7 @@ function statement_cost(ex::Expr, line::Int, src::CodeInfo, mod::Module, params:
end

function inline_worthy(body::Array{Any,1}, src::CodeInfo, mod::Module,
params::CompilationParams,
params::Params,
cost_threshold::Integer=params.inline_cost_threshold)
bodycost = 0
for line = 1:length(body)
Expand All @@ -1676,13 +1676,13 @@ function inline_worthy(body::Array{Any,1}, src::CodeInfo, mod::Module,
return bodycost <= cost_threshold
end

function inline_worthy(body::Expr, src::CodeInfo, mod::Module, params::CompilationParams,
function inline_worthy(body::Expr, src::CodeInfo, mod::Module, params::Params,
cost_threshold::Integer=params.inline_cost_threshold)
bodycost = statement_cost(body, typemax(Int), src, mod, params)
return bodycost <= cost_threshold
end

function inline_worthy(@nospecialize(body), src::CodeInfo, mod::Module, params::CompilationParams,
function inline_worthy(@nospecialize(body), src::CodeInfo, mod::Module, params::Params,
cost_threshold::Integer=params.inline_cost_threshold)
newbody = exprtype(body, src, mod)
!isa(newbody, Expr) && return true
Expand Down Expand Up @@ -4149,7 +4149,7 @@ function reindex_labels!(sv::OptimizationState)
end

function return_type(@nospecialize(f), @nospecialize(t))
params = CompilationParams(ccall(:jl_get_tls_world_age, UInt, ()))
params = Params(ccall(:jl_get_tls_world_age, UInt, ()))
rt = Union{}
if isa(f, Builtin)
rt = builtin_tfunction(f, Any[t.parameters...], nothing, params)
Expand Down
14 changes: 7 additions & 7 deletions base/compiler/typeinfer.jl
Expand Up @@ -87,7 +87,7 @@ end

# build (and start inferring) the inference frame for the linfo
function typeinf_frame(linfo::MethodInstance,
optimize::Bool, cached::Bool, params::CompilationParams)
optimize::Bool, cached::Bool, params::Params)
frame = InferenceState(linfo, optimize, cached, params)
frame === nothing && return nothing
cached && (linfo.inInference = true)
Expand Down Expand Up @@ -147,13 +147,13 @@ end

# compute an inferred AST and return type
function typeinf_code(method::Method, @nospecialize(atypes), sparams::SimpleVector,
optimize::Bool, cached::Bool, params::CompilationParams)
optimize::Bool, cached::Bool, params::Params)
code = code_for_method(method, atypes, sparams, params.world)
code === nothing && return (nothing, nothing, Any)
return typeinf_code(code::MethodInstance, optimize, cached, params)
end
function typeinf_code(linfo::MethodInstance, optimize::Bool, cached::Bool,
params::CompilationParams)
params::Params)
for i = 1:2 # test-and-lock-and-test
i == 2 && ccall(:jl_typeinf_begin, Cvoid, ())
if cached && isdefined(linfo, :inferred)
Expand Down Expand Up @@ -195,7 +195,7 @@ end

# compute (and cache) an inferred AST and return the inferred return type
function typeinf_type(method::Method, @nospecialize(atypes), sparams::SimpleVector,
cached::Bool, params::CompilationParams)
cached::Bool, params::Params)
if contains_is(unwrap_unionall(atypes).parameters, Union{})
return Union{}
end
Expand Down Expand Up @@ -226,13 +226,13 @@ end
function typeinf_ext(linfo::MethodInstance, world::UInt)
if isa(linfo.def, Method)
# method lambda - infer this specialization via the method cache
return typeinf_code(linfo, true, true, CompilationParams(world))
return typeinf_code(linfo, true, true, Params(world))
else
# toplevel lambda - infer directly
ccall(:jl_typeinf_begin, Cvoid, ())
result = CompilationResult(linfo)
result = InferenceResult(linfo)
frame = InferenceState(result, linfo.inferred::CodeInfo,
true, true, CompilationParams(world))
true, true, Params(world))
typeinf(frame)
ccall(:jl_typeinf_end, Cvoid, ())
@assert frame.inferred # TODO: deal with this better
Expand Down
6 changes: 3 additions & 3 deletions base/compiler/typelattice.jl
Expand Up @@ -296,9 +296,9 @@ function limit_type_depth(@nospecialize(t), d::Int)
return r
end

limit_tuple_depth(params::CompilationParams, @nospecialize(t)) = limit_tuple_depth_(params,t,0)
limit_tuple_depth(params::Params, @nospecialize(t)) = limit_tuple_depth_(params,t,0)

function limit_tuple_depth_(params::CompilationParams, @nospecialize(t), d::Int)
function limit_tuple_depth_(params::Params, @nospecialize(t), d::Int)
if isa(t,Union)
# also limit within Union types.
# may have to recur into other stuff in the future too.
Expand All @@ -324,7 +324,7 @@ function limit_tuple_depth_(params::CompilationParams, @nospecialize(t), d::Int)
Tuple{p...}
end

limit_tuple_type = (@nospecialize(t), params::CompilationParams) -> limit_tuple_type_n(t, params.MAX_TUPLETYPE_LEN)
limit_tuple_type = (@nospecialize(t), params::Params) -> limit_tuple_type_n(t, params.MAX_TUPLETYPE_LEN)

function limit_tuple_type_n(@nospecialize(t), lim::Int)
if isa(t,UnionAll)
Expand Down
4 changes: 2 additions & 2 deletions base/precompile.jl
Expand Up @@ -754,8 +754,8 @@ precompile(Tuple{typeof(Base.length), Tuple{DataType, DataType}})
precompile(Tuple{Type{BoundsError}, Array{Int64, 2}, Tuple{Base.UnitRange{Int64}, Int64}})
precompile(Tuple{typeof(Base.throw_boundserror), Array{Int64, 2}, Tuple{Base.UnitRange{Int64}, Int64}})
precompile(Tuple{getfield(Base.Cartesian, Symbol("#@nexprs")), Int64, Expr})
precompile(Tuple{typeof(Core.Compiler.builtin_tfunction), typeof(===), Array{Any, 1}, Core.Compiler.InferenceState, Core.Compiler.CompilationParams})
precompile(Tuple{typeof(Core.Compiler.typeinf_frame), Core.MethodInstance, Bool, Bool, Core.Compiler.CompilationParams})
precompile(Tuple{typeof(Core.Compiler.builtin_tfunction), typeof(===), Array{Any, 1}, Core.Compiler.InferenceState, Core.Compiler.Params})
precompile(Tuple{typeof(Core.Compiler.typeinf_frame), Core.MethodInstance, Bool, Bool, Core.Compiler.Params})
precompile(Tuple{typeof(Core.Compiler.typeinf), Core.Compiler.InferenceState})
precompile(Tuple{typeof(Base.Cartesian.inlineanonymous), Expr, Int64})
precompile(Tuple{typeof(Base.Cartesian.lreplace), Expr, Symbol, Int64})
Expand Down
4 changes: 2 additions & 2 deletions base/reflection.jl
Expand Up @@ -889,7 +889,7 @@ function code_typed(@nospecialize(f), @nospecialize(types=Tuple); optimize=true)
types = to_tuple_type(types)
asts = []
world = ccall(:jl_get_world_counter, UInt, ())
params = Core.Compiler.CompilationParams(world)
params = Core.Compiler.Params(world)
for x in _methods(f, types, -1, world)
meth = func_for_method_checked(x[3], types)
(_, code, ty) = Core.Compiler.typeinf_code(meth, x[1], x[2], optimize, optimize, params)
Expand All @@ -907,7 +907,7 @@ function return_types(@nospecialize(f), @nospecialize(types=Tuple))
types = to_tuple_type(types)
rt = []
world = ccall(:jl_get_world_counter, UInt, ())
params = Core.Compiler.CompilationParams(world)
params = Core.Compiler.Params(world)
for x in _methods(f, types, -1, world)
meth = func_for_method_checked(x[3], types)
ty = Core.Compiler.typeinf_type(meth, x[1], x[2], true, params)
Expand Down
2 changes: 1 addition & 1 deletion base/repl/REPLCompletions.jl
Expand Up @@ -310,7 +310,7 @@ function get_type_call(expr::Expr)
length(mt) == 1 || return (Any, false)
m = first(mt)
# Typeinference
params = Core.Compiler.CompilationParams(world)
params = Core.Compiler.Params(world)
return_type = Core.Compiler.typeinf_type(m[3], m[1], m[2], true, params)
return_type === nothing && return (Any, false)
return (return_type, true)
Expand Down
6 changes: 3 additions & 3 deletions doc/src/devdocs/inference.md
Expand Up @@ -27,7 +27,7 @@ mths = methods(convert, atypes) # worth checking that there is only one
m = first(mths)

# Create variables needed to call `typeinf_code`
params = Core.Compiler.CompilationParams(typemax(UInt)) # parameter is the world age,
params = Core.Compiler.Params(typemax(UInt)) # parameter is the world age,
# typemax(UInt) -> most recent
sparams = Core.svec() # this particular method doesn't have type-parameters
optimize = true # run all inference optimizations
Expand Down Expand Up @@ -84,7 +84,7 @@ input and output types were inferred in advance) is assigned a fixed
cost (currently 20 cycles). In contrast, a `:call` expression, for
functions other than intrinsics/builtins, indicates that the call will
require dynamic dispatch, in which case we assign a cost set by
`CompilationParams.inline_nonleaf_penalty` (currently set at 1000). Note
`Params.inline_nonleaf_penalty` (currently set at 1000). Note
that this is not a "first-principles" estimate of the raw cost of
dynamic dispatch, but a mere heuristic indicating that dynamic
dispatch is extremely expensive.
Expand All @@ -93,7 +93,7 @@ Each statement gets analyzed for its total cost in a function called
`statement_cost`. You can run this yourself by following this example:

```julia
params = Core.Compiler.CompilationParams(typemax(UInt))
params = Core.Compiler.Params(typemax(UInt))
# Get the CodeInfo object
ci = (@code_typed fill(3, (5, 5)))[1] # we'll try this on the code for `fill(3, (5, 5))`
# Calculate cost of each statement
Expand Down
2 changes: 1 addition & 1 deletion test/compiler.jl
Expand Up @@ -1266,7 +1266,7 @@ gg13183(x::X...) where {X} = (_false13183 ? gg13183(x, x) : 0)
# test the external OptimizationState constructor
let linfo = get_linfo(Base.convert, Tuple{Type{Int64}, Int32}),
world = typemax(UInt),
opt = Core.Compiler.OptimizationState(linfo, Core.Compiler.CompilationParams(world))
opt = Core.Compiler.OptimizationState(linfo, Core.Compiler.Params(world))
# make sure the state of the properties look reasonable
@test opt.src !== linfo.def.source
@test length(opt.src.slotflags) == length(opt.src.slotnames) == length(opt.src.slottypes)
Expand Down

0 comments on commit 8b38c4c

Please sign in to comment.