Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Return device scalars from nodal_sum/nodal_min/nodal_max #170

Merged
merged 15 commits into from
Oct 28, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,11 @@ jobs:
. ./ci-support-v0

if test "$DOWNSTREAM_PROJECT" = "mirgecom"; then
git clone "https://github.com/illinois-ceesd/$DOWNSTREAM_PROJECT.git"
if [[ "$GITHUB_HEAD_REF" = "nodal-reduction-device-scalar" ]]; then
git clone "https://github.com/majosm/$DOWNSTREAM_PROJECT.git" -b "nodal-reduction-device-scalar"
else
git clone "https://github.com/illinois-ceesd/$DOWNSTREAM_PROJECT.git"
fi
thomasgibson marked this conversation as resolved.
Show resolved Hide resolved
else
git clone "https://github.com/inducer/$DOWNSTREAM_PROJECT.git"
fi
Expand Down
5 changes: 5 additions & 0 deletions doc/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,11 @@ def get_version():
# The full version, including alpha/beta/rc tags.
release = version

autodoc_type_aliases = {
"DeviceScalar": "arraycontext.DeviceScalar",
"DeviceArray": "arraycontext.DeviceArray",
}

intersphinx_mapping = {
"https://docs.python.org/3/": None,
"https://numpy.org/doc/stable/": None,
Expand Down
3 changes: 2 additions & 1 deletion examples/advection/surface.py
Original file line number Diff line number Diff line change
Expand Up @@ -206,7 +206,8 @@ def rhs(t, u):
# {{{ time stepping

# FIXME: dt estimate is not necessarily valid for surfaces
dt = 0.45 * adv_operator.estimate_rk4_timestep(actx, dcoll, fields=u0)
dt = actx.to_numpy(
0.45 * adv_operator.estimate_rk4_timestep(actx, dcoll, fields=u0))
nsteps = int(final_time // dt) + 1

logger.info("dt: %.5e", dt)
Expand Down
2 changes: 1 addition & 1 deletion examples/advection/var-velocity.py
Original file line number Diff line number Diff line change
Expand Up @@ -196,7 +196,7 @@ def zero_inflow_bc(dtag, t=0):
def rhs(t, u):
return adv_operator.operator(t, u)

dt = adv_operator.estimate_rk4_timestep(actx, dcoll, fields=u)
dt = actx.to_numpy(adv_operator.estimate_rk4_timestep(actx, dcoll, fields=u))

logger.info("Timestep size: %g", dt)

Expand Down
2 changes: 1 addition & 1 deletion examples/advection/weak.py
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,7 @@ def u_analytic(x, t=0):
def rhs(t, u):
return adv_operator.operator(t, u)

dt = adv_operator.estimate_rk4_timestep(actx, dcoll, fields=u)
dt = actx.to_numpy(adv_operator.estimate_rk4_timestep(actx, dcoll, fields=u))

logger.info("Timestep size: %g", dt)

Expand Down
3 changes: 2 additions & 1 deletion examples/maxwell/cavities.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,8 @@ def cavity_mode(x, t=0):
def rhs(t, w):
return maxwell_operator.operator(t, w)

dt = maxwell_operator.estimate_rk4_timestep(actx, dcoll, fields=fields)
dt = actx.to_numpy(
maxwell_operator.estimate_rk4_timestep(actx, dcoll, fields=fields))

dt_stepper = set_up_rk4("w", dt, fields, rhs)

Expand Down
3 changes: 2 additions & 1 deletion examples/wave/var-propagation-speed.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,8 @@ def source_f(actx, dcoll, t=0):
def rhs(t, w):
return wave_op.operator(t, w)

dt = 2/3 * wave_op.estimate_rk4_timestep(actx, dcoll, fields=fields)
dt = actx.to_numpy(
2/3 * wave_op.estimate_rk4_timestep(actx, dcoll, fields=fields))
dt_stepper = set_up_rk4("w", dt, fields, rhs)

final_t = 1
Expand Down
3 changes: 2 additions & 1 deletion examples/wave/wave-min-mpi.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,8 @@ def source_f(actx, dcoll, t=0):
[dcoll.zeros(actx) for i in range(dcoll.dim)]
)

dt = 2/3 * wave_op.estimate_rk4_timestep(actx, dcoll, fields=fields)
dt = actx.to_numpy(
2/3 * wave_op.estimate_rk4_timestep(actx, dcoll, fields=fields))

wave_op.check_bc_coverage(local_mesh)

Expand Down
10 changes: 5 additions & 5 deletions examples/wave/wave-op-mpi.py
Original file line number Diff line number Diff line change
Expand Up @@ -185,7 +185,7 @@ def main(ctx_factory, dim=2, order=3, visualize=False):
)

c = 1
dt = 0.45 * estimate_rk4_timestep(actx, dcoll, c)
dt = actx.to_numpy(0.45 * estimate_rk4_timestep(actx, dcoll, c))

vis = make_visualizer(dcoll)

Expand All @@ -201,12 +201,12 @@ def rhs(t, w):
while t < t_final:
fields = rk4_step(fields, t, dt, rhs)

l2norm = op.norm(dcoll, fields[0], 2)
l2norm = actx.to_numpy(op.norm(dcoll, fields[0], 2))

if istep % 10 == 0:
linfnorm = op.norm(dcoll, fields[0], np.inf)
nodalmax = op.nodal_max(dcoll, "vol", fields[0])
nodalmin = op.nodal_min(dcoll, "vol", fields[0])
linfnorm = actx.to_numpy(op.norm(dcoll, fields[0], np.inf))
nodalmax = actx.to_numpy(op.nodal_max(dcoll, "vol", fields[0]))
nodalmin = actx.to_numpy(op.nodal_min(dcoll, "vol", fields[0]))
if comm.rank == 0:
logger.info(f"step: {istep} t: {t} "
f"L2: {l2norm} "
Expand Down
17 changes: 11 additions & 6 deletions examples/wave/wave-op-var-velocity.py
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,7 @@ def main(ctx_factory, dim=2, order=3, visualize=False):

# bounded above by 1
c = 0.2 + 0.8*bump(actx, dcoll, center=np.zeros(3), width=0.5)
dt = 0.5 * estimate_rk4_timestep(actx, dcoll, c=1)
dt = actx.to_numpy(0.5 * estimate_rk4_timestep(actx, dcoll, c=1))

fields = flat_obj_array(
bump(actx, dcoll, ),
Expand All @@ -209,12 +209,17 @@ def rhs(t, w):
while t < t_final:
fields = rk4_step(fields, t, dt, rhs)

l2norm = actx.to_numpy(op.norm(dcoll, fields[0], 2))

if istep % 10 == 0:
linfnorm = actx.to_numpy(op.norm(dcoll, fields[0], np.inf))
nodalmax = actx.to_numpy(op.nodal_max(dcoll, "vol", fields[0]))
nodalmin = actx.to_numpy(op.nodal_min(dcoll, "vol", fields[0]))
logger.info(f"step: {istep} t: {t} "
f"L2: {op.norm(dcoll, fields[0], 2)} "
f"Linf: {op.norm(dcoll, fields[0], np.inf)} "
f"sol max: {op.nodal_max(dcoll, 'vol', fields[0])} "
f"sol min: {op.nodal_min(dcoll, 'vol', fields[0])}")
f"L2: {l2norm} "
f"Linf: {linfnorm} "
f"sol max: {nodalmax} "
f"sol min: {nodalmin}")
if visualize:
vis.write_vtk_file(
f"fld-wave-eager-var-velocity-{istep:04d}.vtu",
Expand All @@ -230,7 +235,7 @@ def rhs(t, w):

# NOTE: These are here to ensure the solution is bounded for the
# time interval specified
assert op.norm(dcoll, fields[0], 2) < 1
assert l2norm < 1


if __name__ == "__main__":
Expand Down
6 changes: 3 additions & 3 deletions grudge/dt_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@

import numpy as np

from arraycontext import ArrayContext, thaw, freeze
from arraycontext import ArrayContext, thaw, freeze, DeviceScalar
from meshmode.transform_metadata import FirstAxisIsElementsTag

from grudge.dof_desc import DD_VOLUME, DOFDesc, as_dofdesc
Expand Down Expand Up @@ -158,7 +158,7 @@ def dt_non_geometric_factors(

@memoize_on_first_arg
def h_max_from_volume(
dcoll: DiscretizationCollection, dim=None, dd=None) -> float:
dcoll: DiscretizationCollection, dim=None, dd=None) -> "DeviceScalar":
"""Returns a (maximum) characteristic length based on the volume of the
elements. This length may not be representative if the elements have very
high aspect ratios.
Expand Down Expand Up @@ -189,7 +189,7 @@ def h_max_from_volume(

@memoize_on_first_arg
def h_min_from_volume(
dcoll: DiscretizationCollection, dim=None, dd=None) -> float:
dcoll: DiscretizationCollection, dim=None, dd=None) -> "DeviceScalar":
"""Returns a (minimum) characteristic length based on the volume of the
elements. This length may not be representative if the elements have very
high aspect ratios.
Expand Down
67 changes: 31 additions & 36 deletions grudge/reductions.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@
from numbers import Number
from functools import reduce

from arraycontext import make_loopy_program
from arraycontext import make_loopy_program, DeviceScalar

from grudge.discretization import DiscretizationCollection

Expand All @@ -75,24 +75,26 @@

# {{{ Nodal reductions

def _norm(dcoll: DiscretizationCollection, vec, p, dd):
def _norm(dcoll: DiscretizationCollection, vec, p, dd) -> "DeviceScalar":
if isinstance(vec, Number):
return np.fabs(vec)
if p == 2:
from grudge.op import _apply_mass_operator
return abs(
nodal_sum(
dcoll,
dd,
vec.conj() * _apply_mass_operator(dcoll, dd, dd, vec))
)**(1/2)
return vec.array_context.np.sqrt(
# Quantities being summed are real up to rounding error, so abs() can
# go on the outside
majosm marked this conversation as resolved.
Show resolved Hide resolved
abs(
nodal_sum(
dcoll,
dd,
vec.conj() * _apply_mass_operator(dcoll, dd, dd, vec))))
elif p == np.inf:
return nodal_max(dcoll, dd, abs(vec))
else:
raise NotImplementedError("Unsupported value of p")


def norm(dcoll: DiscretizationCollection, vec, p, dd=None) -> float:
def norm(dcoll: DiscretizationCollection, vec, p, dd=None) -> "DeviceScalar":
r"""Return the vector p-norm of a function represented
by its vector of degrees of freedom *vec*.

Expand Down Expand Up @@ -128,7 +130,7 @@ def norm(dcoll: DiscretizationCollection, vec, p, dd=None) -> float:
return _norm(dcoll, vec, p, dd)


def nodal_sum(dcoll: DiscretizationCollection, dd, vec) -> float:
def nodal_sum(dcoll: DiscretizationCollection, dd, vec) -> "DeviceScalar":
r"""Return the nodal sum of a vector of degrees of freedom *vec*.

:arg dd: a :class:`~grudge.dof_desc.DOFDesc`, or a value
Expand All @@ -144,10 +146,11 @@ def nodal_sum(dcoll: DiscretizationCollection, dd, vec) -> float:
from mpi4py import MPI
actx = vec.array_context

return comm.allreduce(actx.to_numpy(nodal_sum_loc(dcoll, dd, vec)), op=MPI.SUM)
return actx.from_numpy(
comm.allreduce(actx.to_numpy(nodal_sum_loc(dcoll, dd, vec)), op=MPI.SUM))


def nodal_sum_loc(dcoll: DiscretizationCollection, dd, vec) -> float:
def nodal_sum_loc(dcoll: DiscretizationCollection, dd, vec) -> "DeviceScalar":
r"""Return the rank-local nodal sum of a vector of degrees of freedom *vec*.

:arg dd: a :class:`~grudge.dof_desc.DOFDesc`, or a value
Expand All @@ -159,12 +162,12 @@ def nodal_sum_loc(dcoll: DiscretizationCollection, dd, vec) -> float:
return sum(nodal_sum_loc(dcoll, dd, vec[idx])
for idx in np.ndindex(vec.shape))

# FIXME: do not force result onto host
actx = vec.array_context

return sum([actx.np.sum(grp_ary) for grp_ary in vec])


def nodal_min(dcoll: DiscretizationCollection, dd, vec) -> float:
def nodal_min(dcoll: DiscretizationCollection, dd, vec) -> "DeviceScalar":
r"""Return the nodal minimum of a vector of degrees of freedom *vec*.

:arg dd: a :class:`~grudge.dof_desc.DOFDesc`, or a value
Expand All @@ -178,11 +181,13 @@ def nodal_min(dcoll: DiscretizationCollection, dd, vec) -> float:

# NOTE: Don't move this
from mpi4py import MPI
actx = vec.array_context

return comm.allreduce(nodal_min_loc(dcoll, dd, vec), op=MPI.MIN)
return actx.from_numpy(
comm.allreduce(actx.to_numpy(nodal_min_loc(dcoll, dd, vec)), op=MPI.MIN))


def nodal_min_loc(dcoll: DiscretizationCollection, dd, vec) -> float:
def nodal_min_loc(dcoll: DiscretizationCollection, dd, vec) -> "DeviceScalar":
r"""Return the rank-local nodal minimum of a vector of degrees
of freedom *vec*.

Expand All @@ -197,18 +202,12 @@ def nodal_min_loc(dcoll: DiscretizationCollection, dd, vec) -> float:

actx = vec.array_context

# FIXME: do not force result onto host
# Host transfer is needed for now because actx.np.minimum does not succeed
# on array scalars.
# https://github.com/inducer/arraycontext/issues/49#issuecomment-869266944
return reduce(
lambda acc, grp_ary: actx.np.minimum(
acc,
actx.to_numpy(actx.np.min(grp_ary))[()]),
vec, np.inf)
lambda acc, grp_ary: actx.np.minimum(acc, actx.np.min(grp_ary)),
vec, actx.from_numpy(np.array(np.inf)))


def nodal_max(dcoll: DiscretizationCollection, dd, vec) -> float:
def nodal_max(dcoll: DiscretizationCollection, dd, vec) -> "DeviceScalar":
r"""Return the nodal maximum of a vector of degrees of freedom *vec*.

:arg dd: a :class:`~grudge.dof_desc.DOFDesc`, or a value
Expand All @@ -222,11 +221,13 @@ def nodal_max(dcoll: DiscretizationCollection, dd, vec) -> float:

# NOTE: Don't move this
from mpi4py import MPI
actx = vec.array_context

return comm.allreduce(nodal_max_loc(dcoll, dd, vec), op=MPI.MAX)
return actx.from_numpy(
comm.allreduce(actx.to_numpy(nodal_max_loc(dcoll, dd, vec)), op=MPI.MAX))


def nodal_max_loc(dcoll: DiscretizationCollection, dd, vec) -> float:
def nodal_max_loc(dcoll: DiscretizationCollection, dd, vec) -> "DeviceScalar":
r"""Return the rank-local nodal maximum of a vector of degrees
of freedom *vec*.

Expand All @@ -241,18 +242,12 @@ def nodal_max_loc(dcoll: DiscretizationCollection, dd, vec) -> float:

actx = vec.array_context

# FIXME: do not force result onto host
# Host transfer is needed for now because actx.np.minimum does not succeed
# on array scalars.
# https://github.com/inducer/arraycontext/issues/49#issuecomment-869266944
return reduce(
lambda acc, grp_ary: actx.np.maximum(
acc,
actx.to_numpy(actx.np.max(grp_ary))[()]),
vec, -np.inf)
lambda acc, grp_ary: actx.np.maximum(acc, actx.np.max(grp_ary)),
vec, actx.from_numpy(np.array(-np.inf)))


def integral(dcoll: DiscretizationCollection, dd, vec) -> float:
def integral(dcoll: DiscretizationCollection, dd, vec) -> "DeviceScalar":
"""Numerically integrates a function represented by a
:class:`~meshmode.dof_array.DOFArray` of degrees of freedom.

Expand Down
3 changes: 2 additions & 1 deletion test/test_dt_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,8 @@ def test_geometric_factors_regular_refinement(actx_factory, name):
mesh = builder.get_mesh(resolution, builder.mesh_order)
dcoll = DiscretizationCollection(actx, mesh, order=builder.order)
min_factors.append(
op.nodal_min(dcoll, "vol", thaw(dt_geometric_factors(dcoll), actx))
actx.to_numpy(
op.nodal_min(dcoll, "vol", thaw(dt_geometric_factors(dcoll), actx)))
)

# Resolution is doubled each refinement, so the ratio of consecutive
Expand Down
Loading