Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Continue PR #5126 #5247

Merged
merged 4 commits into from
Feb 17, 2020
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
19 changes: 18 additions & 1 deletion numba/parfors/parfor.py
Original file line number Diff line number Diff line change
Expand Up @@ -604,6 +604,8 @@ def dump(self, file=None):
file = file or sys.stdout
print(("begin parfor {}".format(self.id)).center(20, '-'), file=file)
print("index_var = ", self.index_var, file=file)
print("params = ", self.params, file=file)
print("races = ", self.races, file=file)
for loopnest in self.loop_nests:
print(loopnest, file=file)
print("init block:", file=file)
Expand Down Expand Up @@ -1550,6 +1552,7 @@ def run(self):
# push function call variables inside parfors so gufunc function
# wouldn't need function variables as argument
push_call_vars(self.func_ir.blocks, {}, {}, self.typemap)
dprint_func_ir(self.func_ir, "after push call vars")
# simplify again
simplify(self.func_ir, self.typemap, self.calltypes)
dprint_func_ir(self.func_ir, "after optimization")
Expand Down Expand Up @@ -1788,6 +1791,13 @@ def _convert_loop(self, blocks):
for bl in loop.exits:
exit_lives = exit_lives.union(live_map[bl])
races = bodydefs.intersection(exit_lives)
# It is possible for the result of an ir.Global to be flagged
# as a race if it is defined in this Parfor and then used in
# a subsequent Parfor. push_call_vars() in the Parfor pass
# copies such ir.Global nodes into the Parfors in which they
# are used so no need to treat things of type Module as a race.
races = races.intersection({x for x in races
if not isinstance(self.typemap[x], types.misc.Module)})

# replace jumps to header block with the end block
for l in body_labels:
Expand Down Expand Up @@ -1961,6 +1971,9 @@ def find_mask_from_size(size_var):
blocks.pop(loop.header)
for l in body_labels:
blocks.pop(l)
if config.DEBUG_ARRAY_OPT >= 1:
print("parfor from loop")
parfor.dump()

def _replace_loop_access_indices(self, loop_body, index_set, new_index):
"""
Expand Down Expand Up @@ -2236,6 +2249,7 @@ def _arrayexpr_to_parfor(self, equiv_set, lhs, arrayexpr, avail_vars):
body_block.body.append(setitem_node)
parfor.loop_body = {body_label: body_block}
if config.DEBUG_ARRAY_OPT >= 1:
print("parfor from arrayexpr")
parfor.dump()
return parfor

Expand Down Expand Up @@ -2329,6 +2343,7 @@ def _setitem_to_parfor(self, equiv_set, loc, target, index, value, shape=None):
true_block.body.append(ir.Jump(end_label, loc))

if config.DEBUG_ARRAY_OPT >= 1:
print("parfor from setitem")
parfor.dump()
return parfor

Expand Down Expand Up @@ -2515,6 +2530,9 @@ def _reduce_to_parfor(self, equiv_set, lhs, args, loc):
parfor = Parfor(loopnests, init_block, loop_body, loc, index_var,
equiv_set, ('{} function'.format(call_name),
'reduction'), self.flags)
if config.DEBUG_ARRAY_OPT >= 1:
print("parfor from reduction")
parfor.dump()
return parfor


Expand Down Expand Up @@ -2965,7 +2983,6 @@ def get_parfor_params(blocks, options_fusion, fusion_info):


def get_parfor_params_inner(parfor, pre_defs, options_fusion, fusion_info):

blocks = wrap_parfor_blocks(parfor)
cfg = compute_cfg_from_blocks(blocks)
usedefs = compute_use_defs(blocks)
Expand Down
20 changes: 13 additions & 7 deletions numba/parfors/parfor_lowering.py
Original file line number Diff line number Diff line change
Expand Up @@ -414,6 +414,9 @@ def _lower_parfor_parallel(lowerer, parfor):
# Beginning of this function.
lowerer.fndesc.typemap = orig_typemap

if config.DEBUG_ARRAY_OPT:
print("_lower_parfor_parallel done")

# A work-around to prevent circular imports
lowering.lower_extensions[parfor.Parfor] = _lower_parfor_parallel

Expand Down Expand Up @@ -785,6 +788,9 @@ def _create_gufunc_for_parfor_body(
for the parfor body inserted.
'''

if config.DEBUG_ARRAY_OPT >= 1:
print("starting _create_gufunc_for_parfor_body")

loc = parfor.init_block.loc

# The parfor body and the main function body share ir.Var nodes.
Expand Down Expand Up @@ -812,6 +818,12 @@ def _create_gufunc_for_parfor_body(
set(parfor_outputs) -
set(parfor_redvars)))

if config.DEBUG_ARRAY_OPT >= 1:
print("parfor_params = ", parfor_params, " ", type(parfor_params))
print("parfor_outputs = ", parfor_outputs, " ", type(parfor_outputs))
print("parfor_inputs = ", parfor_inputs, " ", type(parfor_inputs))
print("parfor_redvars = ", parfor_redvars, " ", type(parfor_redvars))

races = races.difference(set(parfor_redvars))
for race in races:
msg = ("Variable %s used in parallel loop may be written "
Expand All @@ -820,12 +832,6 @@ def _create_gufunc_for_parfor_body(
warnings.warn(NumbaParallelSafetyWarning(msg, loc))
replace_var_with_array(races, loop_body, typemap, lowerer.fndesc.calltypes)

if config.DEBUG_ARRAY_OPT >= 1:
print("parfor_params = ", parfor_params, " ", type(parfor_params))
print("parfor_outputs = ", parfor_outputs, " ", type(parfor_outputs))
print("parfor_inputs = ", parfor_inputs, " ", type(parfor_inputs))
print("parfor_redvars = ", parfor_redvars, " ", type(parfor_redvars))

# Reduction variables are represented as arrays, so they go under
# different names.
parfor_redarrs = []
Expand Down Expand Up @@ -1159,7 +1165,7 @@ def _create_gufunc_for_parfor_body(

kernel_sig = signature(types.none, *gufunc_param_types)
if config.DEBUG_ARRAY_OPT:
print("kernel_sig = ", kernel_sig)
print("finished create_gufunc_for_parfor_body. kernel_sig = ", kernel_sig)

return kernel_func, parfor_args, kernel_sig, redargstartdim, func_arg_types

Expand Down
61 changes: 60 additions & 1 deletion numba/tests/test_parfors.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,10 @@

import numba.parfors.parfor
from numba import njit, prange
from numba.core import types, utils, typing, errors, ir, rewrites, typed_passes, inline_closurecall, config, compiler, cpu
from numba.core import (types, utils, typing, errors, ir, rewrites,
typed_passes, inline_closurecall, config, compiler, cpu)
from numba.extending import (overload_method, register_model,
typeof_impl, unbox, NativeValue, models)
from numba.core.registry import cpu_target
from numba.core.annotations import type_annotations
from numba.core.ir_utils import (find_callname, guard, build_definitions,
Expand Down Expand Up @@ -3097,6 +3100,62 @@ def test_impl():

self.check(test_impl)

@skip_parfors_unsupported
def test_issue_5098(self):
class DummyType(types.Opaque):
pass

dummy_type = DummyType("my_dummy")
register_model(DummyType)(models.OpaqueModel)

class Dummy(object):
pass

@typeof_impl.register(Dummy)
def typeof_Dummy(val, c):
return dummy_type

@unbox(DummyType)
def unbox_index(typ, obj, c):
return NativeValue(c.context.get_dummy_value())

@overload_method(DummyType, "method1", jit_options={"parallel":True})
def _get_method1(obj, arr, func):
def _foo(obj, arr, func):
def baz(a, f):
c = a.copy()
c[np.isinf(a)] = np.nan
return f(c)

length = len(arr)
output_arr = np.empty(length, dtype=np.float64)
for i in prange(length):
output_arr[i] = baz(arr[i], func)
for i in prange(length - 1):
output_arr[i] += baz(arr[i], func)
return output_arr
return _foo

@njit
def bar(v):
return v.mean()

@njit
def test1(d):
return d.method1(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), bar)

save_state = numba.parfors.parfor.sequential_parfor_lowering
self.assertFalse(save_state)
try:
test1(Dummy())
self.assertFalse(numba.parfors.parfor.sequential_parfor_lowering)
finally:
# always set the sequential_parfor_lowering state back to the
# original state
numba.parfors.parfor.sequential_parfor_lowering = save_state



@skip_parfors_unsupported
class TestParforsDiagnostics(TestParforsBase):

Expand Down