From fbc4ef7e2fe36c05e657c4513d38cfd4e6f68b43 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B8rgen=20Dokken?= Date: Mon, 6 Jun 2022 13:59:59 +0000 Subject: [PATCH 1/2] Let conj, real and imag be applied on vector quantities --- ffcx/ir/analysis/reconstruct.py | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/ffcx/ir/analysis/reconstruct.py b/ffcx/ir/analysis/reconstruct.py index 37e59440c..430014908 100644 --- a/ffcx/ir/analysis/reconstruct.py +++ b/ffcx/ir/analysis/reconstruct.py @@ -34,12 +34,10 @@ def handle_conditional(o, ops): return symbols -def handle_conj(o, ops): - if len(ops) != 1: - raise RuntimeError("Expecting one operand") - if o.ufl_shape != (): - raise RuntimeError("Expecting scalar.") - return [o._ufl_expr_reconstruct_(x) for x in ops[0]] +def handle_elementwise_unary(o, ops): + if len(ops) > 1: + raise RuntimeError("Expecting unary operator.") + return [o._ufl_expr_reconstruct_(op) for op in ops[0]] def handle_division(o, ops): @@ -142,8 +140,8 @@ def handle_index_sum(o, ops): ufl.classes.Abs: handle_scalar_nary, ufl.classes.MinValue: handle_scalar_nary, ufl.classes.MaxValue: handle_scalar_nary, - ufl.classes.Real: handle_scalar_nary, - ufl.classes.Imag: handle_scalar_nary, + ufl.classes.Real: handle_elementwise_unary, + ufl.classes.Imag: handle_elementwise_unary, ufl.classes.Power: handle_scalar_nary, ufl.classes.BesselFunction: handle_scalar_nary, ufl.classes.Atan2: handle_scalar_nary, @@ -151,7 +149,7 @@ def handle_index_sum(o, ops): ufl.classes.Division: handle_division, ufl.classes.Sum: handle_sum, ufl.classes.IndexSum: handle_index_sum, - ufl.classes.Conj: handle_conj, + ufl.classes.Conj: handle_elementwise_unary, ufl.classes.Conditional: handle_conditional, ufl.classes.Condition: handle_condition} From 6f655d4e7aa5c072f357c39f4a325acc3441c8a9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B8rgen=20Dokken?= Date: Mon, 6 Jun 2022 14:00:44 +0000 Subject: [PATCH 2/2] Add test for vector operations on real/imag/conj --- test/test_jit_forms.py | 50 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) diff --git a/test/test_jit_forms.py b/test/test_jit_forms.py index a656245a6..43e3611a4 100644 --- a/test/test_jit_forms.py +++ b/test/test_jit_forms.py @@ -645,3 +645,53 @@ def test_prism(compile_args): ffi.cast('double *', coords.ctypes.data), ffi.NULL, ffi.NULL) assert np.isclose(sum(b), 0.5) + + +def test_complex_operations(compile_args): + mode = "double _Complex" + cell = ufl.triangle + c_element = ufl.VectorElement("Lagrange", cell, 1) + mesh = ufl.Mesh(c_element) + element = ufl.VectorElement("DG", cell, 0) + V = ufl.FunctionSpace(mesh, element) + u = ufl.Coefficient(V) + J1 = ufl.real(u)[0] * ufl.imag(u)[1] * ufl.conj(u)[0] * ufl.dx + J2 = ufl.real(u[0]) * ufl.imag(u[1]) * ufl.conj(u[0]) * ufl.dx + forms = [J1, J2] + + compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms( + forms, parameters={'scalar_type': mode}, cffi_extra_compile_args=compile_args) + + form0 = compiled_forms[0].integrals(module.lib.cell)[0] + form1 = compiled_forms[1].integrals(module.lib.cell)[0] + + ffi = module.ffi + np_type = cdtype_to_numpy(mode) + w1 = np.array([3 + 5j, 8 - 7j], dtype=np_type) + c = np.array([], dtype=np_type) + + coords = np.array([[0.0, 0.0, 0.0], + [1.0, 0.0, 0.0], + [0.0, 1.0, 0.0]], dtype=np.float64) + J_1 = np.zeros((1), dtype=np_type) + kernel0 = ffi.cast(f"ufcx_tabulate_tensor_{np_type} *", getattr(form0, f"tabulate_tensor_{np_type}")) + kernel0(ffi.cast('{type} *'.format(type=mode), J_1.ctypes.data), + ffi.cast('{type} *'.format(type=mode), w1.ctypes.data), + ffi.cast('{type} *'.format(type=mode), c.ctypes.data), + ffi.cast('double *', coords.ctypes.data), ffi.NULL, ffi.NULL) + + expected_result = np.array([0.5 * np.real(w1[0]) * np.imag(w1[1]) + * (np.real(w1[0]) - 1j * np.imag(w1[0]))], dtype=np_type) + assert np.allclose(J_1, expected_result) + + J_2 = np.zeros((1), dtype=np_type) + + kernel1 = ffi.cast(f"ufcx_tabulate_tensor_{np_type} *", getattr(form1, f"tabulate_tensor_{np_type}")) + kernel1(ffi.cast('{type} *'.format(type=mode), J_2.ctypes.data), + ffi.cast('{type} *'.format(type=mode), w1.ctypes.data), + ffi.cast('{type} *'.format(type=mode), c.ctypes.data), + ffi.cast('double *', coords.ctypes.data), ffi.NULL, ffi.NULL) + + assert np.allclose(J_2, expected_result) + + assert np.allclose(J_1, J_2)