Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Added forward/backward derivatives for divergence on Cartesian grid #564

Merged
merged 2 commits into from
May 13, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Binary file modified docs/methods/boundary_discretization/boundary_discretization.pdf
Binary file not shown.
12 changes: 11 additions & 1 deletion docs/methods/boundary_discretization/boundary_discretization.tex
Original file line number Diff line number Diff line change
Expand Up @@ -510,14 +510,24 @@ \subsection{Conservative divergence operator}
\partial_\alpha v_{\alpha} \sim
\frac{r_{n + \frac12}^2 v_r(r_{n + \frac12}) - r_{n - \frac12}^2 v_r(r_{n - \frac12})}{V_n}
\end{align}
where $V_n$ is given by \Eqref{eqn:spherical_shell_volume}.
where $V_n$ is given by \Eqref{eqn:spherical_shell_volume}.
We obtain different estimates for different finite difference schemes:

\paragraph{Central differences:}
Here, we approximate the function values at the midpoints using a simple average,
\begin{align}
v_r(r_{n + \frac12}) &\approx \frac{v_r(r_{n}) + v_r(r_{n+1})}{2}
\;,
\end{align}
which might introduced uncontrolled artifacts.

\paragraph{Forward difference:}
Use $v_r(r_{n + \frac12}) \approx v_r(r_{n+1})$.

\paragraph{Backward difference:}
Use $v_r(r_{n + \frac12}) \approx v_r(r_{n})$.



\subsection{Conservative tensor divergence operator}
The radial component of the divergence of a spherically-symmetric tensor field (with $t_{\theta\theta} = t_{\phi\phi}$) reads
Expand Down
106 changes: 86 additions & 20 deletions pde/grids/operators/cartesian.py
Original file line number Diff line number Diff line change
Expand Up @@ -955,20 +955,33 @@
return gradient_squared


def _make_divergence_scipy_nd(grid: CartesianGrid) -> OperatorType:
def _make_divergence_scipy_nd(
grid: CartesianGrid, method: Literal["central", "forward", "backward"] = "central"
) -> OperatorType:
"""make a divergence operator using the scipy module

Args:
grid (:class:`~pde.grids.cartesian.CartesianGrid`):
The grid for which the operator is created
method (str):
The method for calculating the derivative. Possible values are 'central',
'forward', and 'backward'.

Returns:
A function that can be applied to an array of values
"""
from scipy import ndimage

data_shape = grid._shape_full
scale = 0.5 / grid.discretization
scale = 1 / grid.discretization
if method == "central":
stencil = [-0.5, 0, 0.5]
elif method == "forward":
stencil = [0, -1, 1]
elif method == "backward":
stencil = [-1, 1, 0]
else:
raise ValueError(f"Unknown derivative type `{method}`")

Check warning on line 984 in pde/grids/operators/cartesian.py

View check run for this annotation

Codecov / codecov/patch

pde/grids/operators/cartesian.py#L984

Added line #L984 was not covered by tests

def divergence(arr: np.ndarray, out: np.ndarray) -> None:
"""apply divergence operator to array `arr`"""
Expand All @@ -984,45 +997,67 @@
with np.errstate(all="ignore"):
# some errors can happen for ghost cells
for i in range(len(data_shape)):
out += ndimage.convolve1d(arr[i], [1, 0, -1], axis=i)[valid] * scale[i]
out += ndimage.correlate1d(arr[i], stencil, axis=i)[valid] * scale[i]

return divergence


def _make_divergence_numba_1d(grid: CartesianGrid) -> OperatorType:
def _make_divergence_numba_1d(
grid: CartesianGrid, method: Literal["central", "forward", "backward"] = "central"
) -> OperatorType:
"""make a 1d divergence operator using numba compilation

Args:
grid (:class:`~pde.grids.cartesian.CartesianGrid`):
The grid for which the operator is created
method (str):
The method for calculating the derivative. Possible values are 'central',
'forward', and 'backward'.

Returns:
A function that can be applied to an array of values
"""
if method not in {"central", "forward", "backward"}:

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If you are using Literal, you don't need this check :-)

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't think I agree. Typing is optional in python, so a user could easily pass different values (for instance by mis-typing!) and those would not be caught at runtime. The check basically ensures that the right values were given at runtime, whereas the type hint helps developers with proper IDEs and type checking.

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

are you sure? Can you give an example?

If you look into the docs of Literal you can see that it is not optional, but you have to give a specified value.

But better double checking than not checking at all ;-)

raise ValueError(f"Unknown derivative type `{method}`")

Check warning on line 1021 in pde/grids/operators/cartesian.py

View check run for this annotation

Codecov / codecov/patch

pde/grids/operators/cartesian.py#L1021

Added line #L1021 was not covered by tests
dim_x = grid.shape[0]
scale = 0.5 / grid.discretization[0]
dx = grid.discretization[0]

@jit
def divergence(arr: np.ndarray, out: np.ndarray) -> None:
"""apply gradient operator to array `arr`"""
for i in range(1, dim_x + 1):
out[i - 1] = (arr[0, i + 1] - arr[0, i - 1]) * scale
if method == "central":
out[i - 1] = (arr[0, i + 1] - arr[0, i - 1]) / (2 * dx)
elif method == "forward":
out[i - 1] = (arr[0, i + 1] - arr[0, i]) / dx
elif method == "backward":
out[i - 1] = (arr[0, i] - arr[0, i - 1]) / dx

return divergence # type: ignore


def _make_divergence_numba_2d(grid: CartesianGrid) -> OperatorType:
def _make_divergence_numba_2d(
grid: CartesianGrid, method: Literal["central", "forward", "backward"] = "central"
) -> OperatorType:
"""make a 2d divergence operator using numba compilation

Args:
grid (:class:`~pde.grids.cartesian.CartesianGrid`):
The grid for which the operator is created
method (str):
The method for calculating the derivative. Possible values are 'central',
'forward', and 'backward'.

Returns:
A function that can be applied to an array of values
"""
dim_x, dim_y = grid.shape
scale_x, scale_y = 0.5 / grid.discretization
if method == "central":
scale_x, scale_y = 0.5 / grid.discretization
elif method in {"forward", "backward"}:
scale_x, scale_y = 1 / grid.discretization
else:
raise ValueError(f"Unknown derivative type `{method}`")

Check warning on line 1060 in pde/grids/operators/cartesian.py

View check run for this annotation

Codecov / codecov/patch

pde/grids/operators/cartesian.py#L1060

Added line #L1060 was not covered by tests

# use parallel processing for large enough arrays
parallel = dim_x * dim_y >= config["numba.multithreading_threshold"]
Expand All @@ -1032,25 +1067,42 @@
"""apply gradient operator to array `arr`"""
for i in nb.prange(1, dim_x + 1):
for j in range(1, dim_y + 1):
d_x = (arr[0, i + 1, j] - arr[0, i - 1, j]) * scale_x
d_y = (arr[1, i, j + 1] - arr[1, i, j - 1]) * scale_y
if method == "central":
d_x = (arr[0, i + 1, j] - arr[0, i - 1, j]) * scale_x
d_y = (arr[1, i, j + 1] - arr[1, i, j - 1]) * scale_y
elif method == "forward":
d_x = (arr[0, i + 1, j] - arr[0, i, j]) * scale_x
d_y = (arr[1, i, j + 1] - arr[1, i, j]) * scale_y
elif method == "backward":
d_x = (arr[0, i, j] - arr[0, i - 1, j]) * scale_x
d_y = (arr[1, i, j] - arr[1, i, j - 1]) * scale_y
out[i - 1, j - 1] = d_x + d_y

return divergence # type: ignore


def _make_divergence_numba_3d(grid: CartesianGrid) -> OperatorType:
def _make_divergence_numba_3d(
grid: CartesianGrid, method: Literal["central", "forward", "backward"] = "central"
) -> OperatorType:
"""make a 3d divergence operator using numba compilation

Args:
grid (:class:`~pde.grids.cartesian.CartesianGrid`):
The grid for which the operator is created
method (str):
The method for calculating the derivative. Possible values are 'central',
'forward', and 'backward'.

Returns:
A function that can be applied to an array of values
"""
dim_x, dim_y, dim_z = grid.shape
scale_x, scale_y, scale_z = 0.5 / grid.discretization
if method == "central":
scale_x, scale_y, scale_z = 0.5 / grid.discretization
elif method in {"forward", "backward"}:
scale_x, scale_y, scale_z = 1 / grid.discretization
else:
raise ValueError(f"Unknown derivative type `{method}`")

Check warning on line 1105 in pde/grids/operators/cartesian.py

View check run for this annotation

Codecov / codecov/patch

pde/grids/operators/cartesian.py#L1105

Added line #L1105 was not covered by tests

# use parallel processing for large enough arrays
parallel = dim_x * dim_y * dim_z >= config["numba.multithreading_threshold"]
Expand All @@ -1061,17 +1113,28 @@
for i in nb.prange(1, dim_x + 1):
for j in range(1, dim_y + 1):
for k in range(1, dim_z + 1):
d_x = (arr[0, i + 1, j, k] - arr[0, i - 1, j, k]) * scale_x
d_y = (arr[1, i, j + 1, k] - arr[1, i, j - 1, k]) * scale_y
d_z = (arr[2, i, j, k + 1] - arr[2, i, j, k - 1]) * scale_z
if method == "central":
d_x = (arr[0, i + 1, j, k] - arr[0, i - 1, j, k]) * scale_x
d_y = (arr[1, i, j + 1, k] - arr[1, i, j - 1, k]) * scale_y
d_z = (arr[2, i, j, k + 1] - arr[2, i, j, k - 1]) * scale_z
elif method == "forward":
d_x = (arr[0, i + 1, j, k] - arr[0, i, j, k]) * scale_x
d_y = (arr[1, i, j + 1, k] - arr[1, i, j, k]) * scale_y
d_z = (arr[2, i, j, k + 1] - arr[2, i, j, k]) * scale_z
elif method == "backward":
d_x = (arr[0, i, j, k] - arr[0, i - 1, j, k]) * scale_x
d_y = (arr[1, i, j, k] - arr[1, i, j - 1, k]) * scale_y
d_z = (arr[2, i, j, k] - arr[2, i, j, k - 1]) * scale_z
out[i - 1, j - 1, k - 1] = d_x + d_y + d_z

return divergence # type: ignore


@CartesianGrid.register_operator("divergence", rank_in=1, rank_out=0)
def make_divergence(
grid: CartesianGrid, backend: Literal["auto", "numba", "scipy"] = "auto"
grid: CartesianGrid,
backend: Literal["auto", "numba", "scipy"] = "auto",
method: Literal["central", "forward", "backward"] = "central",
) -> OperatorType:
"""make a divergence operator on a Cartesian grid

Expand All @@ -1081,6 +1144,9 @@
backend (str):
Backend used for calculating the divergence operator.
If backend='auto', a suitable backend is chosen automatically.
method (str):
The method for calculating the derivative. Possible values are 'central',
'forward', and 'backward'.

Returns:
A function that can be applied to an array of values
Expand All @@ -1096,18 +1162,18 @@

if backend == "numba":
if dim == 1:
divergence = _make_divergence_numba_1d(grid)
divergence = _make_divergence_numba_1d(grid, method=method)
elif dim == 2:
divergence = _make_divergence_numba_2d(grid)
divergence = _make_divergence_numba_2d(grid, method=method)
elif dim == 3:
divergence = _make_divergence_numba_3d(grid)
divergence = _make_divergence_numba_3d(grid, method=method)
else:
raise NotImplementedError(
f"Numba divergence operator not implemented for dimension {dim}"
)

elif backend == "scipy":
divergence = _make_divergence_scipy_nd(grid)
divergence = _make_divergence_scipy_nd(grid, method=method)

else:
raise ValueError(f"Backend `{backend}` is not defined")
Expand Down
27 changes: 22 additions & 5 deletions pde/grids/operators/spherical_sym.py
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,10 @@ def gradient_squared(arr: np.ndarray, out: np.ndarray) -> None:
@SphericalSymGrid.register_operator("divergence", rank_in=1, rank_out=0)
@fill_in_docstring
def make_divergence(
grid: SphericalSymGrid, safe: bool = True, conservative: bool = True
grid: SphericalSymGrid,
safe: bool = True,
conservative: bool = True,
method: Literal["central", "forward", "backward"] = "central",
) -> OperatorType:
"""make a discretized divergence operator for a spherical grid

Expand All @@ -203,6 +206,9 @@ def make_divergence(
Flag indicating whether the operator should be conservative (which results
in slightly slower computations). Conservative operators ensure mass
conservation.
method (str):
The method for calculating the derivative. Possible values are 'central',
'forward', and 'backward'.

Returns:
A function that can be applied to an array of values
Expand Down Expand Up @@ -234,13 +240,19 @@ def divergence(arr: np.ndarray, out: np.ndarray) -> None:

arr_r = arr[0, :]
for i in range(1, dim_r + 1): # iterate radial points
term_h = factor_h[i - 1] * (arr_r[i] + arr_r[i + 1])
term_l = factor_l[i - 1] * (arr_r[i - 1] + arr_r[i])
if method == "central":
term_h = factor_h[i - 1] * (arr_r[i] + arr_r[i + 1])
term_l = factor_l[i - 1] * (arr_r[i - 1] + arr_r[i])
elif method == "forward":
term_h = 2 * factor_h[i - 1] * arr_r[i + 1]
term_l = 2 * factor_l[i - 1] * arr_r[i]
elif method == "backward":
term_h = 2 * factor_h[i - 1] * arr_r[i]
term_l = 2 * factor_l[i - 1] * arr_r[i - 1]
out[i - 1] = term_h - term_l

else:
# implement naive divergence operator
scale_r = 1 / (2 * dr)
factors = 2 / rs # factors that need to be multiplied below

@jit
Expand All @@ -255,7 +267,12 @@ def divergence(arr: np.ndarray, out: np.ndarray) -> None:

arr_r = arr[0, :]
for i in range(1, dim_r + 1): # iterate radial points
diff_r = (arr_r[i + 1] - arr_r[i - 1]) * scale_r
if method == "central":
diff_r = (arr_r[i + 1] - arr_r[i - 1]) / (2 * dr)
elif method == "forward":
diff_r = (arr_r[i + 1] - arr_r[i]) / dr
elif method == "backward":
diff_r = (arr_r[i] - arr_r[i - 1]) / dr
out[i - 1] = diff_r + factors[i - 1] * arr_r[i]

return divergence # type: ignore
Expand Down
7 changes: 4 additions & 3 deletions tests/grids/operators/test_cartesian_operators.py
Original file line number Diff line number Diff line change
Expand Up @@ -181,13 +181,14 @@ def test_gradient_cart(ndim, method, periodic, rng):


@pytest.mark.parametrize("ndim", [1, 2, 3])
@pytest.mark.parametrize("method", ["central", "forward", "backward"])
@pytest.mark.parametrize("periodic", [True, False])
def test_divergence_cart(ndim, periodic, rng):
def test_divergence_cart(ndim, method, periodic, rng):
"""test different divergence operators"""
bcs = _get_random_grid_bcs(ndim, dx="uniform", periodic=periodic, rank=1)
field = VectorField.random_uniform(bcs.grid, rng=rng)
res1 = field.divergence(bcs, backend="scipy").data
res2 = field.divergence(bcs, backend="numba").data
res1 = field.divergence(bcs, backend="scipy", method=method).data
res2 = field.divergence(bcs, backend="numba", method=method).data
np.testing.assert_allclose(res1, res2)


Expand Down
13 changes: 8 additions & 5 deletions tests/grids/operators/test_spherical_operators.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,8 +38,10 @@ def test_findiff_sph():
# test divergence
div = v.divergence(bc=["derivative", "value"], conservative=False)
np.testing.assert_allclose(div.data, [9, 3 + 4 / r1, -6 + 8 / r2])
div = v.divergence(bc="derivative", conservative=False)
np.testing.assert_allclose(div.data, [9, 3 + 4 / r1, 2 + 8 / r2])
div = v.divergence(bc="derivative", method="forward", conservative=False)
np.testing.assert_allclose(div.data, [10, 4 + 4 / r1, 8 / r2])
div = v.divergence(bc="derivative", method="backward", conservative=False)
np.testing.assert_allclose(div.data, [8, 2 + 4 / r1, 4 + 8 / r2])


def test_conservative_sph():
Expand All @@ -48,9 +50,10 @@ def test_conservative_sph():
expr = "1 / cosh((r - 1) * 10)"

# test divergence of vector field
vf = VectorField.from_expression(grid, [expr, 0, 0])
div = vf.divergence(bc="derivative", conservative=True)
assert div.integral == pytest.approx(0, abs=1e-2)
for method in ["central", "forward", "backward"]:
vf = VectorField.from_expression(grid, [expr, 0, 0])
div = vf.divergence(bc="derivative", conservative=True, method=method)
assert div.integral == pytest.approx(0, abs=1e-2)

# test laplacian of scalar field
lap = vf[0].laplace("derivative")
Expand Down
Loading