diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index bc6da5a5b8..5156f9f846 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -75,12 +75,17 @@ jobs: if: ${{ ! inputs.build_dev }} needs: docker_merge_vanilla uses: ./.github/workflows/docker_build.yml - # Only build the 'firedrake' container for 'linux/amd64' because - # netgen-mesher does not have ARM wheels so many Firedrake apps cannot - # be installed. + strategy: + matrix: + os: [Linux, macOS] + include: + - os: Linux + platform: linux/amd64 + - os: macOS + platform: linux/arm64 with: - os: Linux - platform: linux/amd64 + os: ${{ matrix.os }} + platform: ${{ matrix.platform }} target: firedrake tag: ${{ inputs.tag }} dockerfile: docker/Dockerfile.firedrake diff --git a/AUTHORS.rst b/AUTHORS.rst index 4a3cd76cb9..218547a6cd 100644 --- a/AUTHORS.rst +++ b/AUTHORS.rst @@ -104,7 +104,7 @@ Robert C. Kirby............... Stephan C. Kramer............. -Tuomas Kärnä.................. +Tuomas Kärnä Michael Lange................. diff --git a/demos/demo_references.bib b/demos/demo_references.bib index ec9588e178..901abaa917 100644 --- a/demos/demo_references.bib +++ b/demos/demo_references.bib @@ -331,9 +331,21 @@ @article{Krishna:1997 @article{BaierReinio:2025, title={High-order finite element methods for three-dimensional multicomponent convection-diffusion}, - author={Baier-Reinio, Aaron and Farrell, Patrick E}, + author={Baier-Reinio, Aaron and Farrell, Patrick E.}, journal={arXiv preprint arXiv:2408.17390}, - year={2025} + year={2025}, + doi={10.48550/arXiv.2408.17390} +} + +@article{VanBrunt:2025, + title={Finite element methods for multicomponent convection-diffusion}, + author={Aznaran, Francis R. A. and Farrell, Patrick E. and Monroe, Charles W. and Van-Brunt, Alexander J.}, + journal={IMA J. Numer. Anal.}, + volume={45}, + number={1}, + pages={188--222}, + year={2025}, + doi={10.1093/imanum/drae001} } @article{Brubeck2022, diff --git a/demos/multicomponent/multicomponent.py.rst b/demos/multicomponent/multicomponent.py.rst index bf74e5d2e0..aae9470301 100644 --- a/demos/multicomponent/multicomponent.py.rst +++ b/demos/multicomponent/multicomponent.py.rst @@ -78,6 +78,8 @@ The domain and mesh are visualised below. To model the mixture we employ the Stokes--Onsager--Stefan--Maxwell (SOSM) partial differential equations and discretise with the method of :cite:`BaierReinio:2025`. +We use the material property values specified in :cite:`VanBrunt:2025`, +wherein a similar benzene-cyclohexane simulation is considered. In what follows species 1 refers to benzene and species 2 to cyclohexane. We shall discretise the following unknowns: diff --git a/docker/Dockerfile.firedrake b/docker/Dockerfile.firedrake index c2ca01623f..ef7e9b24bf 100644 --- a/docker/Dockerfile.firedrake +++ b/docker/Dockerfile.firedrake @@ -1,27 +1,35 @@ -# Dockerfile for Firedrake with a full set of capabilities and applications installed. +# Dockerfile for Firedrake with a full set of capabilities installed. FROM firedrakeproject/firedrake-vanilla-default:latest # Install optional dependencies RUN pip install --verbose \ --extra-index-url https://download.pytorch.org/whl/cpu \ - jax ngsPETSc torch vtk + jax torch -# Set PYTHONPATH so netgen can be found -# (see https://github.com/NGSolve/netgen/issues/213) -ENV PYTHONPATH=/usr/local/lib/python3.12/site-packages:$PYTHONPATH - -# Install Firedrake apps -RUN pip install --verbose --src /opt \ - -e git+https://github.com/firedrakeproject/asQ.git#egg=asQ \ - -e git+https://bitbucket.org/pefarrell/defcon.git#egg=defcon \ - -e git+https://bitbucket.org/pefarrell/fascd.git#egg=fascd \ - -e git+https://github.com/FEMlium/FEMlium.git#egg=FEMlium \ - -e git+https://github.com/g-adopt/g-adopt.git#egg=gadopt \ - -e git+https://github.com/firedrakeproject/gusto.git#egg=gusto \ - -e git+https://github.com/firedrakeproject/Irksome.git#egg=Irksome \ - -e git+https://github.com/icepack/icepack.git#egg=icepack \ - -e git+https://github.com/thetisproject/thetis.git#egg=thetis +# Install ngsPETSc and dependencies. Netgen do not package ARM wheels so +# we have to build it from source in that case. If on x86 then we have to +# set PYTHONPATH so netgen can be found (see https://github.com/NGSolve/netgen/issues/213). +RUN \ + if [ "$(dpkg --print-architecture)" == "arm64" ]; then \ + apt-get update \ + && apt-get -y install python3-tk libxmu-dev tk-dev tcl-dev cmake g++ libglu1-mesa-dev liblapacke-dev libocct-data-exchange-dev libocct-draw-dev occt-misc libtbb-dev libxi-dev \ + && rm -rf /var/lib/apt/lists/* \ + && mkdir /opt/ngsuite \ + && git clone --branch=v6.2.2505 --single-branch https://github.com/NGSolve/netgen.git /opt/ngsuite/netgen-src \ + && git -C /opt/ngsuite/netgen-src submodule update --init --recursive \ + && mkdir /opt/ngsuite/netgen-build \ + && mkdir /opt/ngsuite/netgen-install \ + && cmake -DCMAKE="-cxx-flags=-flax-vector-conversions" -DCMAKE_INSTALL_PREFIX=/opt/ngsuite/netgen-install /opt/ngsuite/netgen-src \ + && make \ + && make install \ + && export PATH=/opt/ngsuite/netgen-install/bin:$PATH \ + && export PYTHONPATH=/opt/ngsuite/netgen-install/lib/python3.12/site-packages:$PYTHONPATH \ + && pip install ngsPETSc --no-deps; \ + else \ + export PYTHONPATH=/usr/local/lib/python3.12/site-packages:$PYTHONPATH \ + && pip install ngsPETSc; \ + fi # Install some other niceties RUN apt-get update \ diff --git a/docs/source/conf.py b/docs/source/conf.py index b0a8c5d882..089395ecb4 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -164,15 +164,12 @@ r'http://www.cs.virginia.edu/stream/', r'https://www.sciencedirect.com', r'https://.*\.baylor\.edu.*', - r'https://www.tuomaskarna.com', r'https://www.crosscountrytrains.co.uk/', r'https://www.siam.org/', r'https://aims.ac.rw', r'https://mpecdt.ac.uk', r'https://www.hilton.com/en/hotels/leehnhn-hilton-leeds-city/', - r'https://www.radissonhotels.com/en-us/hotels/park-plaza-leeds', - r'https://www.radissonhotels.com/en-us/hotels/radisson-blu-leeds' - r'https://www.radissonhotels.com/en-us/hotels/radisson-blu-leeds', + r'https://www.radissonhotels.com/*', r'https://all.accor.com/hotel/*', ] linkcheck_timeout = 30 diff --git a/docs/source/team.ini b/docs/source/team.ini index ddd4f119a1..a220c0b8b1 100644 --- a/docs/source/team.ini +++ b/docs/source/team.ini @@ -95,7 +95,7 @@ Christian T. Jacobs: Darko Janeković: Nick Johnson: Anna Kalogirou: -Tuomas Kärnä: https://www.tuomaskarna.com +Tuomas Kärnä: Stephan C. Kramer: https://www.imperial.ac.uk/people/s.kramer Nicolas Loriant: Fabio Luporini: diff --git a/docs/source/visualisation.rst b/docs/source/visualisation.rst index c083a3e34d..b13959d9e8 100644 --- a/docs/source/visualisation.rst +++ b/docs/source/visualisation.rst @@ -332,5 +332,5 @@ matplotlib. .. _matplotlib: http://matplotlib.org .. _Arbitrary: https://www.kitware.com/modeling-arbitrary-order-lagrange-finite-elements-in-the-visualization-toolkit/ __ Arbitrary_ -.. _Tessellate: https://www.paraview.org/paraview-docs/nightly/python/paraview.simple.Tessellate.html +.. _Tessellate: https://www.paraview.org/paraview-docs/nightly/python/paraview.simple.__init__.Tessellate.html .. _Tessellation: https://ieeexplore.ieee.org/document/1634311/ diff --git a/firedrake/assemble.py b/firedrake/assemble.py index 88aa7a7613..641125f735 100644 --- a/firedrake/assemble.py +++ b/firedrake/assemble.py @@ -386,6 +386,16 @@ def allocation_integral_types(self): else: return self._allocation_integral_types + @staticmethod + def _as_pyop2_type(tensor, indices=None): + if isinstance(tensor, (firedrake.Cofunction, firedrake.Function)): + return OneFormAssembler._as_pyop2_type(tensor, indices=indices) + elif isinstance(tensor, ufl.Matrix): + return ExplicitMatrixAssembler._as_pyop2_type(tensor, indices=indices) + else: + assert indices is None + return tensor + def assemble(self, tensor=None, current_state=None): """Assemble the form. @@ -410,21 +420,22 @@ def assemble(self, tensor=None, current_state=None): """ def visitor(e, *operands): t = tensor if e is self._form else None - return self.base_form_assembly_visitor(e, t, *operands) + # Deal with 2-form bcs inside the visitor + bcs = self._bcs if isinstance(e, ufl.BaseForm) and len(e.arguments()) == 2 else () + return self.base_form_assembly_visitor(e, t, bcs, *operands) # DAG assembly: traverse the DAG in a post-order fashion and evaluate the node on the fly. visited = {} result = BaseFormAssembler.base_form_postorder_traversal(self._form, visitor, visited) - # Apply BCs after assembly + # Deal with 1-form bcs outside the visitor rank = len(self._form.arguments()) if rank == 1 and not isinstance(result, ufl.ZeroBaseForm): for bc in self._bcs: OneFormAssembler._apply_bc(self, result, bc, u=current_state) - return result - def base_form_assembly_visitor(self, expr, tensor, *args): + def base_form_assembly_visitor(self, expr, tensor, bcs, *args): r"""Assemble a :class:`~ufl.classes.BaseForm` object given its assembled operands. This functions contains the assembly handlers corresponding to the different nodes that @@ -445,7 +456,7 @@ def base_form_assembly_visitor(self, expr, tensor, *args): assembler = OneFormAssembler(form, form_compiler_parameters=self._form_compiler_params, zero_bc_nodes=self._zero_bc_nodes, diagonal=self._diagonal, weight=self._weight) elif rank == 2: - assembler = TwoFormAssembler(form, bcs=self._bcs, form_compiler_parameters=self._form_compiler_params, + assembler = TwoFormAssembler(form, bcs=bcs, form_compiler_parameters=self._form_compiler_params, mat_type=self._mat_type, sub_mat_type=self._sub_mat_type, options_prefix=self._options_prefix, appctx=self._appctx, weight=self._weight, allocation_integral_types=self.allocation_integral_types) @@ -456,13 +467,12 @@ def base_form_assembly_visitor(self, expr, tensor, *args): if len(args) != 1: raise TypeError("Not enough operands for Adjoint") mat, = args - res = tensor.petscmat if tensor else PETSc.Mat() - petsc_mat = mat.petscmat + result = tensor.petscmat if tensor else PETSc.Mat() # Out-of-place Hermitian transpose - petsc_mat.hermitianTranspose(out=res) - (row, col) = mat.arguments() - return matrix.AssembledMatrix((col, row), self._bcs, res, - options_prefix=self._options_prefix) + mat.petscmat.hermitianTranspose(out=result) + if tensor is None: + tensor = self.assembled_matrix(expr, bcs, result) + return tensor elif isinstance(expr, ufl.Action): if len(args) != 2: raise TypeError("Not enough operands for Action") @@ -480,7 +490,7 @@ def base_form_assembly_visitor(self, expr, tensor, *args): result = tensor.petscmat if tensor else PETSc.Mat() lhs.petscmat.matMult(rhs.petscmat, result=result) if tensor is None: - tensor = self.assembled_matrix(expr, result) + tensor = self.assembled_matrix(expr, bcs, result) return tensor else: raise TypeError("Incompatible RHS for Action.") @@ -499,9 +509,6 @@ def base_form_assembly_visitor(self, expr, tensor, *args): raise TypeError("Mismatching weights and operands in FormSum") if len(args) == 0: raise TypeError("Empty FormSum") - if tensor: - tensor.zero() - # Assemble weights weights = [] for w in expr.weights(): @@ -519,27 +526,54 @@ def base_form_assembly_visitor(self, expr, tensor, *args): raise ValueError("Expecting a scalar weight expression") weights.append(w) + # Scalar FormSum if all(isinstance(op, numbers.Complex) for op in args): - result = sum(weight * arg for weight, arg in zip(weights, args)) - return tensor.assign(result) if tensor else result - elif (all(isinstance(op, firedrake.Cofunction) for op in args) + result = numpy.dot(weights, args) + return tensor.assign(result) if tensor else result.item() + + # Accumulate coefficients in a dictionary for each unique Dat/Mat + terms = defaultdict(PETSc.ScalarType) + for arg, weight in zip(args, weights): + t = self._as_pyop2_type(arg) + terms[t] += weight + + # Zero the output tensor, or rescale it if it appears in the sum + tensor_scale = terms.pop(self._as_pyop2_type(tensor), 0) + if tensor is None or tensor_scale == 1: + pass + elif tensor_scale == 0: + tensor.zero() + elif isinstance(tensor, (firedrake.Cofunction, firedrake.Function)): + with tensor.dat.vec as v: + v.scale(tensor_scale) + elif isinstance(tensor, ufl.Matrix): + tensor.petscmat.scale(tensor_scale) + else: + raise ValueError("Expecting tensor to be None, Function, Cofunction, or Matrix") + + # Compute the linear combination + if (all(isinstance(op, firedrake.Cofunction) for op in args) or all(isinstance(op, firedrake.Function) for op in args)): + # Vector FormSum V, = set(a.function_space() for a in args) result = tensor if tensor else firedrake.Function(V) - result.dat.maxpy(weights, [a.dat for a in args]) + weights = terms.values() + dats = terms.keys() + result.dat.maxpy(weights, dats) return result elif all(isinstance(op, ufl.Matrix) for op in args): + # Matrix FormSum result = tensor.petscmat if tensor else PETSc.Mat() - for (op, w) in zip(args, weights): + for (op, w) in terms.items(): if result: # If result is not void, then accumulate on it - result.axpy(w, op.petscmat) + result.axpy(w, op.handle) else: # If result is void, then allocate it with first term - op.petscmat.copy(result=result) + op.handle.copy(result=result) result.scale(w) if tensor is None: - tensor = self.assembled_matrix(expr, result) + tensor = self.assembled_matrix(expr, bcs, result) return tensor else: raise TypeError("Mismatching FormSum shapes") @@ -571,9 +605,8 @@ def base_form_assembly_visitor(self, expr, tensor, *args): # Occur in situations such as Interpolate composition operand = assembled_operand[0] - reconstruct_interp = expr._ufl_expr_reconstruct_ if (v, operand) != expr.argument_slots(): - expr = reconstruct_interp(operand, v=v) + expr = expr._ufl_expr_reconstruct_(operand, v=v) rank = len(expr.arguments()) if rank > 2: @@ -586,7 +619,7 @@ def base_form_assembly_visitor(self, expr, tensor, *args): default_missing_val = interp_data.pop('default_missing_val', None) if rank == 1 and isinstance(tensor, firedrake.Function): V = tensor - interpolator = firedrake.Interpolator(expr, V, **interp_data) + interpolator = firedrake.Interpolator(expr, V, bcs=bcs, **interp_data) # Assembly return interpolator.assemble(tensor=tensor, default_missing_val=default_missing_val) elif tensor and isinstance(expr, (firedrake.Function, firedrake.Cofunction, firedrake.MatrixBase)): @@ -598,8 +631,8 @@ def base_form_assembly_visitor(self, expr, tensor, *args): else: raise TypeError(f"Unrecognised BaseForm instance: {expr}") - def assembled_matrix(self, expr, petscmat): - return matrix.AssembledMatrix(expr.arguments(), self._bcs, petscmat, + def assembled_matrix(self, expr, bcs, petscmat): + return matrix.AssembledMatrix(expr.arguments(), bcs, petscmat, options_prefix=self._options_prefix) @staticmethod @@ -1448,10 +1481,11 @@ def _apply_bc(self, tensor, bc, u=None): index = 0 if V.index is None else V.index space = V if V.parent is None else V.parent if isinstance(bc, DirichletBC): - if space != spaces[0]: - raise TypeError("bc space does not match the test function space") - elif space != spaces[1]: - raise TypeError("bc space does not match the trial function space") + if not any(space == fs for fs in spaces): + raise TypeError("bc space does not match the test or trial function space") + if spaces[0] != spaces[1]: + # Not on a diagonal block, we cannot set diagonal entries + return # Set diagonal entries on bc nodes to 1 if the current # block is on the matrix diagonal and its index matches the diff --git a/firedrake/interpolation.py b/firedrake/interpolation.py index 23fd97a9b8..85f17400cd 100644 --- a/firedrake/interpolation.py +++ b/firedrake/interpolation.py @@ -1703,7 +1703,12 @@ def __init__(self, expr, V, bcs=None, **kwargs): super(MixedInterpolator, self).__init__(expr, V, bcs=bcs, **kwargs) expr = self.ufl_interpolate self.arguments = expr.arguments() - rank = len(self.arguments) + # Get the primal spaces + spaces = tuple(a.function_space().dual() if isinstance(a, Coargument) else a.function_space() + for a in self.arguments) + # TODO consider a stricter equality test for indexed MixedFunctionSpace + # See https://github.com/firedrakeproject/firedrake/issues/4668 + space_equals = lambda V1, V2: V1 == V2 and V1.parent == V2.parent and V1.index == V2.index # We need a Coargument in order to split the Interpolate needs_action = len([a for a in self.arguments if isinstance(a, Coargument)]) == 0 @@ -1722,12 +1727,10 @@ def __init__(self, expr, V, bcs=None, **kwargs): continue vi, _ = form.argument_slots() Vtarget = vi.function_space().dual() - if bcs and rank != 0: - args = form.arguments() - Vsource = args[1-vi.number()].function_space() - sub_bcs = [bc for bc in bcs if bc.function_space() in {Vsource, Vtarget}] - else: - sub_bcs = None + sub_bcs = [] + for space, index in zip(spaces, indices): + subspace = space.sub(index) + sub_bcs.extend(bc for bc in bcs if space_equals(bc.function_space(), subspace)) if needs_action: # Take the action of each sub-cofunction against each block form = action(form, dual_split[indices[-1:]]) diff --git a/firedrake/matrix.py b/firedrake/matrix.py index 670b38c202..2f33841289 100644 --- a/firedrake/matrix.py +++ b/firedrake/matrix.py @@ -5,7 +5,12 @@ from pyop2.mpi import internal_comm from pyop2.utils import as_tuple from firedrake.petsc import PETSc -from types import SimpleNamespace + + +class DummyOP2Mat: + """A hashable implementation of M.handle""" + def __init__(self, handle): + self.handle = handle class MatrixBase(ufl.Matrix): @@ -240,8 +245,8 @@ def __init__(self, a, bcs, petscmat, *args, **kwargs): if options_prefix is not None: self.petscmat.setOptionsPrefix(options_prefix) - # this allows call to self.M.handle without a new class - self.M = SimpleNamespace(handle=self.mat()) + # this mimics op2.Mat.handle + self.M = DummyOP2Mat(self.mat()) def mat(self): return self.petscmat diff --git a/firedrake/ml/jax/fem_operator.py b/firedrake/ml/jax/fem_operator.py index a274682da3..94f2875202 100644 --- a/firedrake/ml/jax/fem_operator.py +++ b/firedrake/ml/jax/fem_operator.py @@ -170,10 +170,10 @@ def to_jax(x: Union[Function, Constant], gather: Optional[bool] = False, batched if isinstance(x, (Function, Cofunction)): if gather: # Gather data from all processes - x_P = jnp.array(x.dat.global_data, **kwargs) + x_P = jnp.array(np.ravel(x.dat.global_data), **kwargs) else: # Use local data - x_P = jnp.array(x.dat.data_ro, **kwargs) + x_P = jnp.array(np.ravel(x.dat.data_ro), **kwargs) if batched: # Default behaviour: add batch dimension after converting to JAX return x_P[None, :] diff --git a/firedrake/ml/pytorch/fem_operator.py b/firedrake/ml/pytorch/fem_operator.py index 764c154091..868c4cbb34 100644 --- a/firedrake/ml/pytorch/fem_operator.py +++ b/firedrake/ml/pytorch/fem_operator.py @@ -15,6 +15,7 @@ else: raise ImportError("PyTorch is not installed and is required to use the FiredrakeTorchOperator.") +import numpy as np import collections from functools import partial @@ -174,10 +175,10 @@ def to_torch(x, gather=False, batched=True, **kwargs): if isinstance(x, (Function, Cofunction)): if gather: # Gather data from all processes - x_P = torch.tensor(x.dat.global_data, **kwargs) + x_P = torch.tensor(np.ravel(x.dat.global_data), **kwargs) else: # Use local data - x_P = torch.tensor(x.dat.data_ro, **kwargs) + x_P = torch.tensor(np.ravel(x.dat.data_ro), **kwargs) if batched: # Default behaviour: add batch dimension after converting to PyTorch return x_P[None, :] diff --git a/pyop2/types/mat.py b/pyop2/types/mat.py index d0fb9e2404..e03298360a 100644 --- a/pyop2/types/mat.py +++ b/pyop2/types/mat.py @@ -783,29 +783,69 @@ def zero(self): self.handle.zeroEntries() @mpi.collective - def zero_rows(self, rows, diag_val=1.0): + def zero_rows(self, + rows: Sequence | Subset, + diag_val: float = 1.0, + idx: int | None = None): """Zeroes the specified rows of the matrix, with the exception of the diagonal entry, which is set to diag_val. May be used for applying strong boundary conditions. - :param rows: a :class:`Subset` or an iterable""" - self.assemble() + Parameters + ---------- + rows: + The row indices to be zeroed out. + diag_val: + The value to be inserted along the diagonal entries of the zeroed rows. + idx: + For matrices with block row size > 1, this option enables zeroing + the component with index `idx`. The default is to zero every component. + + Note + ---- + The indices in ``rows`` should index the process-local rows of + the matrix (no mapping to global indexes is applied). + + """ rows = rows.indices if isinstance(rows, Subset) else rows + rows = np.asarray(rows, dtype=dtypes.IntType) + rbs, _ = self.dims[0][0] + if rbs > 1: + if idx is not None: + rows = rbs * rows + idx + else: + rows = np.dstack([rbs*rows + i for i in range(rbs)]).flatten() + self.assemble() self.handle.zeroRowsLocal(rows, diag_val) def _flush_assembly(self): self.handle.assemble(assembly=PETSc.Mat.AssemblyType.FLUSH) @mpi.collective - def set_local_diagonal_entries(self, rows, diag_val=1.0, idx=None): + def set_local_diagonal_entries(self, + rows: Sequence | Subset, + diag_val: float = 1.0, + idx: int | None = None): """Set the diagonal entry in ``rows`` to a particular value. - :param rows: a :class:`Subset` or an iterable. - :param diag_val: the value to add - + Parameters + ---------- + rows: + The row indices of the diagonal entries to be modified. + diag_val: + The value to insert along the diagonal. + idx: + For matrices with block row size > 1, this option enables setting the + diagonal component with index `idx`. The default is to set every + component. + + Note + ---- The indices in ``rows`` should index the process-local rows of the matrix (no mapping to global indexes is applied). + """ + rows = rows.indices if isinstance(rows, Subset) else rows rows = np.asarray(rows, dtype=dtypes.IntType) rbs, _ = self.dims[0][0] if rbs > 1: @@ -915,6 +955,17 @@ def __getitem__(self, idx): def __iter__(self): yield self + def zero_rows(self, rows, diag_val=1.0, idx=None): + rows = rows.indices if isinstance(rows, Subset) else rows + rows = np.asarray(rows, dtype=dtypes.IntType) + rbs, _ = self.dims[0][0] + if rbs > 1: + if idx is not None: + rows = rbs * rows + idx + else: + rows = np.dstack([rbs*rows + i for i in range(rbs)]).flatten() + self.handle.zeroRowsLocal(rows, diag_val) + def _flush_assembly(self): # Need to flush for all blocks for b in self._parent: @@ -922,6 +973,7 @@ def _flush_assembly(self): self._parent._flush_assembly() def set_local_diagonal_entries(self, rows, diag_val=1.0, idx=None): + rows = rows.indices if isinstance(rows, Subset) else rows rows = np.asarray(rows, dtype=dtypes.IntType) rbs, _ = self.dims[0][0] if rbs > 1: diff --git a/tests/firedrake/external_operators/test_external_operators.py b/tests/firedrake/external_operators/test_external_operators.py index b7aac1e81b..891b2a0462 100644 --- a/tests/firedrake/external_operators/test_external_operators.py +++ b/tests/firedrake/external_operators/test_external_operators.py @@ -188,6 +188,8 @@ def test_solve(mesh, solver_parameters): # calls the method of the external operator subclass associated with the assembly of the Jacobian action. solve(F == 0, u, bcs=bcs, solver_parameters=solver_parameters) + assert np.allclose(u.dat.data, w.dat.data) + # Solve the Poisson problem: # - Δu + u = N(f) in Ω # u = 0 on ∂Ω @@ -199,7 +201,7 @@ def test_solve(mesh, solver_parameters): F = inner(grad(u2), grad(v)) * dx + inner(u2, v) * dx - inner(N, v) * dx solve(F == 0, u2, bcs=bcs, solver_parameters=solver_parameters) - assert (np.allclose(u.dat.data, w.dat.data) and np.allclose(u2.dat.data, w.dat.data)) + assert np.allclose(u2.dat.data, w.dat.data) def test_multiple_external_operators(mesh): diff --git a/tests/firedrake/external_operators/test_jax_operator.py b/tests/firedrake/external_operators/test_jax_operator.py index dcd4e53749..55d2e752d4 100644 --- a/tests/firedrake/external_operators/test_jax_operator.py +++ b/tests/firedrake/external_operators/test_jax_operator.py @@ -25,11 +25,13 @@ def rg(): class Linear(): """Linear layer: y = Wx + b""" - def __init__(self, n): + def __init__(self, n, m=None): # Randomly initialise weights and biases + if m is None: + m = n w_key, b_key = jax.random.split(key) - self.weight = jax.random.normal(w_key, (n, n)) - self.bias = jax.random.normal(b_key, (n,)) + self.weight = jax.random.normal(w_key, (n, m)) + self.bias = jax.random.normal(b_key, (m,)) def __call__(self, x): return jnp.dot(self.weight, x) + self.bias @@ -245,3 +247,33 @@ def test_solve(mesh, V): err_point_expr = assemble((u-u2)**2*dx)/assemble(u**2*dx) assert err_point_expr < 1.0e-09 + + +@pytest.mark.skipcomplex # jacrev requires real-valued outputs, but got complex128. +@pytest.mark.skipjax # Skip if JAX is not installed +def test_mixed_space_bcs(): + mesh = UnitIntervalMesh(4) + V = FunctionSpace(mesh, "CG", 1) + W = V * V + + test = TestFunction(W) + bcs = [DirichletBC(W.sub(0), Constant(1), 1), + DirichletBC(W.sub(1), Constant(2), 1)] + + model = Linear(W.dim(), V.dim()) + I = jnp.eye(V.dim()) + model.weight = jnp.concatenate([I, I], axis=1) + model.bias = jnp.zeros(V.dim()) + + p1 = ml_operator(model, function_space=V, inputs_format=1) + p2 = sum + + results = [] + for p in (p1, p2): + w = Function(W) + F = inner(w, test)*dx + inner(p(w), sum(test))*dx + solve(F == 0, w, bcs=bcs) + results.append(np.ravel(w.dat.data)) + + result, expected = results + assert np.allclose(result, expected) diff --git a/tests/firedrake/external_operators/test_pytorch_operator.py b/tests/firedrake/external_operators/test_pytorch_operator.py index 5d5383c167..d9782d1644 100644 --- a/tests/firedrake/external_operators/test_pytorch_operator.py +++ b/tests/firedrake/external_operators/test_pytorch_operator.py @@ -238,3 +238,34 @@ def test_solve(mesh, V): err_point_expr = assemble((u-u2)**2*dx)/assemble(u**2*dx) assert err_point_expr < 1.0e-09 + + +@pytest.mark.skipcomplex # grad can be implicitly created only for real scalar outputs but got torch.complex128 +@pytest.mark.skiptorch # Skip if PyTorch is not installed +def test_mixed_space_bcs(): + mesh = UnitIntervalMesh(4) + V = FunctionSpace(mesh, "CG", 1) + W = V * V + + test = TestFunction(W) + bcs = [DirichletBC(W.sub(0), Constant(1), 1), + DirichletBC(W.sub(1), Constant(2), 1)] + + model = Linear(W.dim(), V.dim()) + I = torch.eye(V.dim()) + model.weight.data = torch.cat([I, I], dim=1) + model.bias.data = torch.zeros(V.dim()) + model.eval() + + p1 = ml_operator(model, function_space=V, inputs_format=1) + p2 = sum + + results = [] + for p in (p1, p2): + w = Function(W) + F = inner(w, test)*dx + inner(p(w), sum(test))*dx + solve(F == 0, w, bcs=bcs) + results.append(np.ravel(w.dat.data)) + + result, expected = results + assert np.allclose(result, expected) diff --git a/tests/firedrake/regression/test_assemble_baseform.py b/tests/firedrake/regression/test_assemble_baseform.py index ceb1abc47e..8f76b70cb0 100644 --- a/tests/firedrake/regression/test_assemble_baseform.py +++ b/tests/firedrake/regression/test_assemble_baseform.py @@ -157,7 +157,7 @@ def test_vector_formsum(a): formsum = res + a res2 = assemble(formsum) - assert isinstance(formsum, ufl.form.FormSum) + assert isinstance(formsum, ufl.FormSum) assert isinstance(res2, Cofunction) assert isinstance(preassemble, Cofunction) for f, f2 in zip(preassemble.subfunctions, res2.subfunctions): @@ -183,6 +183,37 @@ def test_matrix_formsum(M): assert np.allclose(sumfirst.petscmat[:, :], res2.petscmat[:, :], rtol=1E-14) +def test_formsum_vector_self(a): + operand = assemble(a) + tensor = assemble(a) + + w = (42, 3.1416, 666) + formsum = w[0] * tensor + w[1] * operand + w[2] * tensor + assert isinstance(formsum, ufl.FormSum) + + result = assemble(formsum, tensor=tensor) + assert result is tensor + + expected = assemble(Constant(sum(w)) * a) + for f, f2 in zip(expected.subfunctions, result.subfunctions): + assert np.allclose(f.dat.data, f2.dat.data, atol=1e-12) + + +def test_formsum_matrix_self(M): + operand = assemble(M) + tensor = assemble(M) + + w = (42, 3.1416, 666) + formsum = w[0] * tensor + w[1] * operand + w[2] * tensor + assert isinstance(formsum, ufl.FormSum) + + result = assemble(formsum, tensor=tensor) + assert result is tensor + + expected = assemble(Constant(sum(w)) * M) + assert np.allclose(expected.petscmat[:, :], result.petscmat[:, :], rtol=1E-14) + + def test_zero_form(M, f, one): zero_form = assemble(action(action(M, f), one)) assert isinstance(zero_form, ScalarType.type) diff --git a/tests/firedrake/regression/test_integral_hex.py b/tests/firedrake/regression/test_integral_hex.py index 395ceed4d4..6d5531300b 100644 --- a/tests/firedrake/regression/test_integral_hex.py +++ b/tests/firedrake/regression/test_integral_hex.py @@ -1,4 +1,5 @@ import pytest +import numpy as np from firedrake import * from os.path import abspath, dirname, join @@ -92,3 +93,17 @@ def test_integral_hex_interior_facet_facet_avg(): assert abs(A - 1. / 20.) < 1.e-14 EA = assemble(e * a * dS) assert abs(EA - E * A) < 1.e-14 + + +def test_integral_hex_interior_facet_issue4653(): + mesh = BoxMesh(2, 1, 1, 2., 1., 1., hexahedral=True) + V = FunctionSpace(mesh, "DQ", 1) + u = TrialFunction(V) + v = TestFunction(V) + a0 = inner(u('+'), v('+')) * dS + a1 = inner(u('+'), dot(grad(v)('-'), as_vector([1, 2, 3]))) * dS + a = a0 + a1 + A0 = assemble(a0) + A1 = assemble(a1) + A = assemble(a) + assert np.allclose(A.M.values, A0.M.values + A1.M.values) diff --git a/tests/firedrake/regression/test_interpolate.py b/tests/firedrake/regression/test_interpolate.py index e8b5cb595a..a2a0d11618 100644 --- a/tests/firedrake/regression/test_interpolate.py +++ b/tests/firedrake/regression/test_interpolate.py @@ -599,3 +599,24 @@ def test_interpolator_reuse(family, degree, mode): # Test for correctness assert np.allclose(result.dat.data, expected) + + +def test_mixed_space_bcs(): + mesh = UnitSquareMesh(2, 2) + V = FunctionSpace(mesh, "CG", 1) + W = V * V + rg = RandomGenerator(PCG64(seed=123456789)) + w = rg.uniform(W) + + bcs = [DirichletBC(W.sub(0), 0, 1), + DirichletBC(W.sub(1), 0, 2), + DirichletBC(V, 0, (3, 4))] + + I = assemble(interpolate(sum(TrialFunction(W)), V), bcs=bcs) + result = assemble(action(I, w)) + + for bc in bcs[:-1]: + bc.zero(w) + expected = assemble(interpolate(sum(w), V), bcs=bcs[-1:]) + + assert np.allclose(result.dat.data, expected.dat.data)