From e34d836ab7aeeaca2fd540019087c763f8fab3bf Mon Sep 17 00:00:00 2001 From: MigueldelaVarga Date: Mon, 22 Sep 2025 10:16:47 +0200 Subject: [PATCH 1/8] [ENH] Adding support for Pytorch GPU --- gempy_engine/core/backend_tensor.py | 18 +++++++++++++++++- gempy_engine/core/utils.py | 6 +++++- 2 files changed, 22 insertions(+), 2 deletions(-) diff --git a/gempy_engine/core/backend_tensor.py b/gempy_engine/core/backend_tensor.py index d8f5536..4c5f57e 100644 --- a/gempy_engine/core/backend_tensor.py +++ b/gempy_engine/core/backend_tensor.py @@ -107,9 +107,20 @@ def _change_backend(cls, engine_backend: AvailableBackends, use_pykeops: bool = if (use_gpu): cls.use_gpu = True # cls.tensor_backend_pointer['active_backend'].set_default_device("cuda") + # Check if CUDA is available + if not pytorch_copy.cuda.is_available(): + raise RuntimeError("GPU requested but CUDA is not available in PyTorch") + + # Set default device to CUDA + cls.device = pytorch_copy.device("cuda") + pytorch_copy.set_default_device("cuda") + print(f"GPU enabled. Using device: {cls.device}") + print(f"GPU device count: {pytorch_copy.cuda.device_count()}") + print(f"Current GPU device: {pytorch_copy.cuda.current_device()}") else: cls.use_gpu = False - + cls.device = pytorch_copy.device("cpu") + pytorch_copy.set_default_device("cpu") case (_): raise AttributeError( f"Engine Backend: {engine_backend} cannot be used because the correspondent library" @@ -155,6 +166,11 @@ def _array(array_like, dtype=None): if dtype is None: return array_like else: return array_like.type(dtype) else: + # Ensure numpy arrays are contiguous before converting to torch tensor + if isinstance(array_like, numpy.ndarray): + if not array_like.flags.c_contiguous: + array_like = numpy.ascontiguousarray(array_like) + return torch.tensor(array_like, dtype=dtype) def _concatenate(tensors, axis=0, dtype=None): diff --git a/gempy_engine/core/utils.py b/gempy_engine/core/utils.py index 9bacd8d..a7e4398 100644 --- a/gempy_engine/core/utils.py +++ b/gempy_engine/core/utils.py @@ -12,7 +12,11 @@ def cast_type_inplace(data_instance: Any, requires_grad:bool = False): case (gempy_engine.config.AvailableBackends.numpy): data_instance.__dict__[key] = val.astype(BackendTensor.dtype) case (gempy_engine.config.AvailableBackends.PYTORCH): - tensor = BackendTensor.t.from_numpy(val.astype(BackendTensor.dtype)) + # tensor = BackendTensor.t.from_numpy(val.astype(BackendTensor.dtype)) + # if (BackendTensor.use_gpu): + # tensor = tensor.cuda() + + tensor = BackendTensor.tfnp.array(val, dtype=BackendTensor.dtype_obj) tensor.requires_grad = requires_grad data_instance.__dict__[key] = tensor From 5b75c1fd9a7ae4bce53f1cd18d8ee8e213204d55 Mon Sep 17 00:00:00 2001 From: MigueldelaVarga Date: Mon, 22 Sep 2025 10:17:09 +0200 Subject: [PATCH 2/8] [WIP] Adapting the code that cannot implicitly convert from numpy to torch --- gempy_engine/core/data/interp_output.py | 4 ++-- gempy_engine/core/data/regular_grid.py | 4 +++- .../modules/octrees_topology/_octree_internals.py | 8 ++++---- 3 files changed, 9 insertions(+), 7 deletions(-) diff --git a/gempy_engine/core/data/interp_output.py b/gempy_engine/core/data/interp_output.py index e3fb4cd..5e602b1 100644 --- a/gempy_engine/core/data/interp_output.py +++ b/gempy_engine/core/data/interp_output.py @@ -127,8 +127,8 @@ def litho_faults_ids(self): faults_ids = BackendTensor.t.rint(self.faults_block) # Get the number of unique lithology IDs - multiplier = len(np.unique(litho_ids)) - + multiplier = len(BackendTensor.t.unique(litho_ids)) + # Generate the unique IDs unique_ids = litho_ids + faults_ids * multiplier return unique_ids diff --git a/gempy_engine/core/data/regular_grid.py b/gempy_engine/core/data/regular_grid.py index 32c6c54..bea2e48 100644 --- a/gempy_engine/core/data/regular_grid.py +++ b/gempy_engine/core/data/regular_grid.py @@ -3,6 +3,7 @@ import numpy as np +from ..backend_tensor import BackendTensor from ..utils import _check_and_convert_list_to_array, cast_type_inplace from .kernel_classes.server.input_parser import GridSchema @@ -27,7 +28,7 @@ def __post_init__(self): self._create_regular_grid_3d() - self.original_values = self.values.copy() + self.original_values = BackendTensor.t.copy(self.values) @property def dx(self): @@ -196,5 +197,6 @@ def _create_regular_grid_3d(self): g = np.meshgrid(*coords, indexing="ij") values = np.vstack(tuple(map(np.ravel, g))).T.astype("float64") values = np.ascontiguousarray(values) + values = BackendTensor.tfnp.array(values, dtype=BackendTensor.dtype_obj) self.values = values diff --git a/gempy_engine/modules/octrees_topology/_octree_internals.py b/gempy_engine/modules/octrees_topology/_octree_internals.py index d72bb38..7178031 100644 --- a/gempy_engine/modules/octrees_topology/_octree_internals.py +++ b/gempy_engine/modules/octrees_topology/_octree_internals.py @@ -63,10 +63,10 @@ def _mark_voxel(uv_8): shift_y = uv_8[:, [0, 1, 4, 5]] - uv_8[:, [2, 3, 6, 7]] shift_z = uv_8[:, ::2] - uv_8[:, 1::2] - shift_x_select = np.not_equal(shift_x, 0) - shift_y_select = np.not_equal(shift_y, 0) - shift_z_select = np.not_equal(shift_z, 0) - shift_select_xyz = np.array([shift_x_select, shift_y_select, shift_z_select]) + shift_x_select = BackendTensor.t.not_equal(shift_x, 0) + shift_y_select = BackendTensor.t.not_equal(shift_y, 0) + shift_z_select = BackendTensor.t.not_equal(shift_z, 0) + shift_select_xyz = BackendTensor.t.stack([shift_x_select, shift_y_select, shift_z_select]) idx_select_x = shift_x_select.sum(axis=1, dtype=bool) idx_select_y = shift_y_select.sum(axis=1, dtype=bool) From 9a31c27ce40cfdd2543db26a956746c6261f1bd7 Mon Sep 17 00:00:00 2001 From: MigueldelaVarga Date: Mon, 22 Sep 2025 10:27:45 +0200 Subject: [PATCH 3/8] [ENH] Adapting interpolator for full gpu --- .../API/interp_single/_octree_generation.py | 40 +++++++++++- gempy_engine/core/backend_tensor.py | 2 + .../octrees_topology/_octree_common.py | 64 ++++++++++++++++++- 3 files changed, 104 insertions(+), 2 deletions(-) diff --git a/gempy_engine/API/interp_single/_octree_generation.py b/gempy_engine/API/interp_single/_octree_generation.py index a5c9c1d..c2b2067 100644 --- a/gempy_engine/API/interp_single/_octree_generation.py +++ b/gempy_engine/API/interp_single/_octree_generation.py @@ -58,7 +58,7 @@ def interpolate_on_octree(interpolation_input: InterpolationInput, options: Inte return next_octree_level -def _generate_corners(regular_grid: RegularGrid, level=1) -> np.ndarray: +def _generate_corners_DEP(regular_grid: RegularGrid, level=1) -> np.ndarray: if regular_grid is None: raise ValueError("Regular grid is None") xyz_coord = regular_grid.values @@ -79,3 +79,41 @@ def stack_left_right(a_edg, d_a): new_xyz = np.stack((x, y, z)).T return np.ascontiguousarray(new_xyz) + +def _generate_corners(regular_grid: RegularGrid, level=1): + if regular_grid is None: + raise ValueError("Regular grid is None") + + # Convert to backend tensors + # xyz_coord = BackendTensor.tfnp.array(regular_grid.values) + # dxdydz = BackendTensor.tfnp.array(regular_grid.dxdydz) + + xyz_coord = regular_grid.values + dxdydz = regular_grid.dxdydz + + x_coord, y_coord, z_coord = xyz_coord[:, 0], xyz_coord[:, 1], xyz_coord[:, 2] + dx, dy, dz = dxdydz[0], dxdydz[1], dxdydz[2] + + def stack_left_right(a_edg, d_a): + left = a_edg - d_a / level / 2 + right = a_edg + d_a / level / 2 + return BackendTensor.tfnp.stack([left, right], axis=1) + + x_ = BackendTensor.tfnp.repeat(stack_left_right(x_coord, dx), 4, axis=1) + x = BackendTensor.tfnp.ravel(x_) + + y_temp = BackendTensor.tfnp.repeat(stack_left_right(y_coord, dy), 2, axis=1) + y_ = BackendTensor.tfnp.tile(y_temp, (1, 2)) + y = BackendTensor.tfnp.ravel(y_) + + z_ = BackendTensor.tfnp.tile(stack_left_right(z_coord, dz), (1, 4)) + z = BackendTensor.tfnp.ravel(z_) + + new_xyz = BackendTensor.tfnp.stack([x, y, z], axis=1) + + # For PyTorch, ensure contiguous memory (equivalent to np.ascontiguousarray) + if BackendTensor.engine_backend == AvailableBackends.PYTORCH: + if hasattr(new_xyz, 'contiguous'): + new_xyz = new_xyz.contiguous() + + return new_xyz \ No newline at end of file diff --git a/gempy_engine/core/backend_tensor.py b/gempy_engine/core/backend_tensor.py index 4c5f57e..a0d5da0 100644 --- a/gempy_engine/core/backend_tensor.py +++ b/gempy_engine/core/backend_tensor.py @@ -201,6 +201,8 @@ def _transpose(tensor, axes=None): cls.tfnp.transpose = _transpose cls.tfnp.geomspace = lambda start, stop, step: torch.logspace(start, stop, step, base=10) cls.tfnp.abs = lambda tensor, dtype = None: tensor.abs().type(dtype) if dtype is not None else tensor.abs() + cls.tfnp.tile = lambda tensor, repeats: tensor.repeat(repeats) + cls.tfnp.ravel = lambda tensor: tensor.flatten() @classmethod def _wrap_pykeops_functions(cls): diff --git a/gempy_engine/modules/octrees_topology/_octree_common.py b/gempy_engine/modules/octrees_topology/_octree_common.py index 57b7bef..5f9b344 100644 --- a/gempy_engine/modules/octrees_topology/_octree_common.py +++ b/gempy_engine/modules/octrees_topology/_octree_common.py @@ -1,7 +1,8 @@ import numpy as np +from gempy_engine.core.backend_tensor import BackendTensor -def _generate_next_level_centers(xyz_coord, dxdydz, level=1): +def _generate_next_level_centers_DEP(xyz_coord, dxdydz, level=1): def _expand(slr_x, slr_y, slr_z): x_ = np.repeat(slr_x, 4, axis=1) x = x_.ravel() @@ -36,3 +37,64 @@ def stack_left_right(a_edg, d_a): + + +def _generate_next_level_centers(xyz_coord, dxdydz, level=1): + def _expand(slr_x, slr_y, slr_z): + x_ = BackendTensor.tfnp.repeat(slr_x, 4, axis=1) + x = BackendTensor.tfnp.ravel(x_) + + y_temp = BackendTensor.tfnp.repeat(slr_y, 2, axis=1) + y_ = BackendTensor.tfnp.tile(y_temp, (1, 2)) + y = BackendTensor.tfnp.ravel(y_) + + z_ = BackendTensor.tfnp.tile(slr_z, (1, 4)) + z = BackendTensor.tfnp.ravel(z_) + + new_xyz = BackendTensor.tfnp.stack([x, y, z], axis=1) + + # Ensure contiguous memory for PyTorch (equivalent to np.ascontiguousarray) + if BackendTensor.engine_backend == BackendTensor.engine_backend.PYTORCH: + if hasattr(new_xyz, 'contiguous'): + new_xyz = new_xyz.contiguous() + + return new_xyz + + # Convert inputs to backend tensors + xyz_coord = BackendTensor.tfnp.array(xyz_coord) + dxdydz = BackendTensor.tfnp.array(dxdydz) + + x_coord, y_coord, z_coord = xyz_coord[:, 0], xyz_coord[:, 1], xyz_coord[:, 2] + dx, dy, dz = dxdydz[0], dxdydz[1], dxdydz[2] + + def stack_left_right(a_edg, d_a): + left = a_edg - d_a / level / 4 + right = a_edg + d_a / level / 4 + return BackendTensor.tfnp.stack([left, right], axis=1) + + slr_x = stack_left_right(x_coord, dx) + slr_y = stack_left_right(y_coord, dy) + slr_z = stack_left_right(z_coord, dz) + + # Create boolean arrays - need to add zeros function to BackendTensor + dtype = bool + match BackendTensor.engine_backend: + case BackendTensor.engine_backend.PYTORCH: + dtype = BackendTensor.tfnp.bool + case BackendTensor.engine_backend.numpy: + dtype = bool + case _: + raise ValueError("Unsupported backend") + + + bool_x = BackendTensor.tfnp.zeros((x_coord.shape[0], 2), dtype=dtype) + bool_y = BackendTensor.tfnp.zeros((y_coord.shape[0], 2), dtype=dtype) + bool_z = BackendTensor.tfnp.zeros((z_coord.shape[0], 2), dtype=dtype) + bool_x[:, 1] = True + bool_y[:, 1] = True + bool_z[:, 1] = True + + new_xyz = _expand(slr_x, slr_y, slr_z) + bool_idx = _expand(bool_x, bool_y, bool_z) + + return new_xyz, bool_idx \ No newline at end of file From 6221c0104a2b3e9392ec445e82434115b1fa31d0 Mon Sep 17 00:00:00 2001 From: MigueldelaVarga Date: Mon, 22 Sep 2025 13:03:00 +0200 Subject: [PATCH 4/8] [WIP] Adapting triangulation to gpu --- gempy_engine/core/backend_tensor.py | 23 +- .../dual_contouring/fancy_triangulation.py | 113 ++++++---- .../dual_contouring/fancy_triangulation_.py | 213 ++++++++++++++++++ 3 files changed, 300 insertions(+), 49 deletions(-) create mode 100644 gempy_engine/modules/dual_contouring/fancy_triangulation_.py diff --git a/gempy_engine/core/backend_tensor.py b/gempy_engine/core/backend_tensor.py index a0d5da0..e90a694 100644 --- a/gempy_engine/core/backend_tensor.py +++ b/gempy_engine/core/backend_tensor.py @@ -145,7 +145,7 @@ def describe_conf(cls): @classmethod def _wrap_pytorch_functions(cls): - from torch import sum, repeat_interleave + from torch import sum, repeat_interleave, flip import torch def _sum(tensor, axis=None, dtype=None, keepdims=False): @@ -183,6 +183,26 @@ def _concatenate(tensors, axis=0, dtype=None): def _transpose(tensor, axes=None): return tensor.transpose(axes[0], axes[1]) + + def _packbits(tensor, axis=None, bitorder="big"): + # PyTorch doesn't have packbits, need to implement or use alternative + # This is a simplified version - you might need a more complete implementation + if bitorder == "little": + # Reverse bits for little endian + # noinspection PyArgumentList + tensor = flip(tensor, axis=[axis] if axis is not None else [-1]) + + # Convert boolean to integers and pack + tensor_int = tensor.to(torch.uint8) + if axis == 0: + # Pack along first axis + result = torch.zeros(tensor.shape[1], dtype=torch.uint8, device=tensor.device) + for i in range(tensor.shape[0]): + result += tensor_int[i] * (2 ** i) + return result + else: + raise NotImplementedError("packbits only implemented for axis=0") + cls.tfnp.sum = _sum cls.tfnp.repeat = _repeat @@ -203,6 +223,7 @@ def _transpose(tensor, axes=None): cls.tfnp.abs = lambda tensor, dtype = None: tensor.abs().type(dtype) if dtype is not None else tensor.abs() cls.tfnp.tile = lambda tensor, repeats: tensor.repeat(repeats) cls.tfnp.ravel = lambda tensor: tensor.flatten() + cls.tfnp.packbits = _packbits @classmethod def _wrap_pykeops_functions(cls): diff --git a/gempy_engine/modules/dual_contouring/fancy_triangulation.py b/gempy_engine/modules/dual_contouring/fancy_triangulation.py index 80e2876..415bb5f 100644 --- a/gempy_engine/modules/dual_contouring/fancy_triangulation.py +++ b/gempy_engine/modules/dual_contouring/fancy_triangulation.py @@ -1,20 +1,31 @@ -# TODO: Make this methods private -import numpy as np - +from gempy_engine.core.backend_tensor import BackendTensor from gempy_engine.core.data.octree_level import OctreeLevel -def get_left_right_array(octree_list: list[OctreeLevel]) -> np.ndarray: +def get_left_right_array(octree_list: list[OctreeLevel]): + dtype = bool + match BackendTensor.engine_backend: + case BackendTensor.engine_backend.PYTORCH: + dtype = BackendTensor.tfnp.bool + case BackendTensor.engine_backend.numpy: + dtype = bool + case _: + raise ValueError("Unsupported backend") + # === Local function === def _compute_voxel_binary_code(idx_from_root, dir_idx: int, left_right_all, voxel_select_all): # Calculate the voxels from root for active_voxels_per_lvl in voxel_select_all: # * The first level is all True - idx_from_root = np.repeat(idx_from_root[active_voxels_per_lvl], 8) + idx_from_root = BackendTensor.tfnp.repeat(idx_from_root[active_voxels_per_lvl], 8, axis=0) left_right_list = [] voxel_select_op = list(voxel_select_all[1:]) - voxel_select_op.append(np.ones(left_right_all[-1].shape[0], bool)) + voxel_select_op.append(BackendTensor.tfnp.ones( + left_right_all[-1].shape[0], + dtype=dtype + ) + ) left_right_all = left_right_all[::-1] voxel_select_op = voxel_select_op[::-1] @@ -22,13 +33,11 @@ def _compute_voxel_binary_code(idx_from_root, dir_idx: int, left_right_all, voxe left_right_per_lvl_dir = left_right_per_lvl[:, dir_idx] for n_rep in range(e): inner = left_right_per_lvl_dir[voxel_select_op[e - n_rep]] - left_right_per_lvl_dir = np.repeat(inner, 8) # ? Is it always e? - # ? Is this repeat wrong? + left_right_per_lvl_dir = BackendTensor.tfnp.repeat(inner, 8, axis=0) left_right_list.append(left_right_per_lvl_dir) left_right_list.append(idx_from_root) - binary_code = np.vstack(left_right_list) - f = binary_code.T + binary_code = BackendTensor.tfnp.vstack(left_right_list) return binary_code # === Local function === @@ -40,27 +49,36 @@ def _compute_voxel_binary_code(idx_from_root, dir_idx: int, left_right_all, voxe voxel_select_all = [octree_iter.grid_centers.octree_grid.active_cells for octree_iter in octree_list[1:]] left_right_all = [octree_iter.grid_centers.octree_grid.left_right for octree_iter in octree_list[1:]] - idx_root_x = np.zeros(8, dtype=bool) + dtype = bool + match BackendTensor.engine_backend: + case BackendTensor.engine_backend.PYTORCH: + dtype = BackendTensor.tfnp.bool + case BackendTensor.engine_backend.numpy: + dtype = bool + case _: + raise ValueError("Unsupported backend") + + idx_root_x = BackendTensor.tfnp.zeros(8, dtype=dtype) idx_root_x[4:] = True binary_x = _compute_voxel_binary_code(idx_root_x, 0, left_right_all, voxel_select_all) - idx_root_y = np.zeros(8, dtype=bool) + idx_root_y = BackendTensor.tfnp.zeros(8, dtype=dtype) idx_root_y[[2, 3, 6, 7]] = True binary_y = _compute_voxel_binary_code(idx_root_y, 1, left_right_all, voxel_select_all) - idx_root_z = np.zeros(8, dtype=bool) + idx_root_z = BackendTensor.tfnp.zeros(8, dtype=dtype) idx_root_z[1::2] = True binary_z = _compute_voxel_binary_code(idx_root_z, 2, left_right_all, voxel_select_all) - bool_to_int_x = np.packbits(binary_x, axis=0, bitorder="little") - bool_to_int_y = np.packbits(binary_y, axis=0, bitorder="little") - bool_to_int_z = np.packbits(binary_z, axis=0, bitorder="little") - left_right_array = np.vstack((bool_to_int_x, bool_to_int_y, bool_to_int_z)).T + bool_to_int_x = BackendTensor.tfnp.packbits(binary_x, axis=0, bitorder="little") + bool_to_int_y = BackendTensor.tfnp.packbits(binary_y, axis=0, bitorder="little") + bool_to_int_z = BackendTensor.tfnp.packbits(binary_z, axis=0, bitorder="little") + left_right_array = BackendTensor.tfnp.vstack([bool_to_int_x, bool_to_int_y, bool_to_int_z]).T _StaticTriangulationData.depth = 2 foo = (left_right_array * _StaticTriangulationData.get_pack_directions_into_bits()).sum(axis=1) - sorted_indices = np.argsort(foo) + sorted_indices = BackendTensor.tfnp.argsort(foo) # left_right_array = left_right_array[sorted_indices] return left_right_array @@ -69,29 +87,29 @@ class _StaticTriangulationData: depth: int @staticmethod - def get_pack_directions_into_bits() -> np.ndarray: + def get_pack_directions_into_bits(): base_number = 2 ** _StaticTriangulationData.depth - # return np.array([1, base_number, base_number ** 2], dtype=np.int64) - return np.array([base_number ** 2, base_number, 1], dtype=np.int64) + # return BackendTensor.tfnp.array([1, base_number, base_number ** 2], dtype='int64') + return BackendTensor.tfnp.array([base_number ** 2, base_number, 1], dtype='int64') @staticmethod - def get_base_array(pack_directions_into_bits: np.ndarray) -> np.ndarray: - return np.array([pack_directions_into_bits, pack_directions_into_bits * 2, pack_directions_into_bits * 3], - dtype=np.int64) + def get_base_array(pack_directions_into_bits): + return BackendTensor.tfnp.array([pack_directions_into_bits, pack_directions_into_bits * 2, pack_directions_into_bits * 3], + dtype='int64') @staticmethod def get_base_number() -> int: return 2 ** _StaticTriangulationData.depth -def triangulate(left_right_array: np.ndarray, valid_edges: np.ndarray, tree_depth: int, voxel_normals: np.ndarray): +def triangulate(left_right_array, valid_edges, tree_depth: int, voxel_normals): # * Variables # depending on depth _StaticTriangulationData.depth = tree_depth - edge_vector_a = np.array([0, 0, 0, 0, -1, -1, 1, 1, -1, 1, -1, 1]) - edge_vector_b = np.array([-1, -1,-1, 1, 0, 0,0, 0, -1, 1, -1, 1]) - edge_vector_c = np.array([-1, -1, 1, 1, -1, -1,1, 1, 0, 0, 0, 0]) + edge_vector_a = BackendTensor.tfnp.array([0, 0, 0, 0, -1, -1, 1, 1, -1, 1, -1, 1]) + edge_vector_b = BackendTensor.tfnp.array([-1, -1, -1, 1, 0, 0, 0, 0, -1, 1, -1, 1]) + edge_vector_c = BackendTensor.tfnp.array([-1, -1, 1, 1, -1, -1, 1, 1, 0, 0, 0, 0]) # * Consts voxel_code = (left_right_array * _StaticTriangulationData.get_pack_directions_into_bits()).sum(1).reshape(-1, 1) @@ -131,7 +149,7 @@ def compute_triangles_for_edge(edge_vector_a, edge_vector_b, edge_vector_c, def check_voxels_exist_next_to_edge(coord_col, edge_vector, _left_right_array_active_edge): match edge_vector: case 0: - _valid_edges = np.ones(_left_right_array_active_edge.shape[0], dtype=bool) + _valid_edges = BackendTensor.tfnp.ones(_left_right_array_active_edge.shape[0], dtype='bool') case 1: _valid_edges = _left_right_array_active_edge[:, coord_col] != _StaticTriangulationData.get_base_number() - 1 case -1: @@ -152,13 +170,13 @@ def check_voxels_exist_next_to_edge(coord_col, edge_vector, _left_right_array_ac # region: Compress remaining voxel codes per direction # * These are the codes that describe each vertex of the triangle - edge_vector_0 = np.array([edge_vector_a, 0, 0]) - edge_vector_1 = np.array([0, edge_vector_b, 0]) - edge_vector_2 = np.array([0, 0, edge_vector_c]) - - binary_idx_0: np.ndarray = left_right_array_active_edge + edge_vector_0 # (n_voxels - active_voxels_for_given_edge - invalid_edges, 3-directions) - binary_idx_1: np.ndarray = left_right_array_active_edge + edge_vector_1 # (n_voxels - active_voxels_for_given_edge - invalid_edges, 3-directions) - binary_idx_2: np.ndarray = left_right_array_active_edge + edge_vector_2 # (n_voxels - active_voxels_for_given_edge - invalid_edges, 3-directions) + edge_vector_0 = BackendTensor.tfnp.array([edge_vector_a, 0, 0]) + edge_vector_1 = BackendTensor.tfnp.array([0, edge_vector_b, 0]) + edge_vector_2 = BackendTensor.tfnp.array([0, 0, edge_vector_c]) + + binary_idx_0 = left_right_array_active_edge + edge_vector_0 # (n_voxels - active_voxels_for_given_edge - invalid_edges, 3-directions) + binary_idx_1 = left_right_array_active_edge + edge_vector_1 # (n_voxels - active_voxels_for_given_edge - invalid_edges, 3-directions) + binary_idx_2 = left_right_array_active_edge + edge_vector_2 # (n_voxels - active_voxels_for_given_edge - invalid_edges, 3-directions) compressed_binary_idx_0 = (binary_idx_0 * _StaticTriangulationData.get_pack_directions_into_bits()).sum(axis=1) # (n_voxels - active_voxels_for_given_edge - invalid_edges, 1) compressed_binary_idx_1 = (binary_idx_1 * _StaticTriangulationData.get_pack_directions_into_bits()).sum(axis=1) # (n_voxels - active_voxels_for_given_edge - invalid_edges, 1) @@ -172,25 +190,24 @@ def check_voxels_exist_next_to_edge(coord_col, edge_vector, _left_right_array_ac # endregion # region: Find and remove edges at the border of the extent - code__a_prod_edge = ~mapped_voxel_0.all(axis=0) # mapped_voxel_0.prod(axis=0) == 0 # (n_voxels - active_voxels_for_given_edge - invalid_edges, 1) - code__b_prod_edge = ~mapped_voxel_1.all(axis=0) # mapped_voxel_1.prod(axis=0) == 0 # (n_voxels - active_voxels_for_given_edge - invalid_edges, 1) - code__c_prod_edge = ~mapped_voxel_2.all(axis=0) # mapped_voxel_2.prod(axis=0) == 0 # (n_voxels - active_voxels_for_given_edge - invalid_edges, 1) + code__a_prod_edge = ~BackendTensor.tfnp.all(mapped_voxel_0, axis=0) # (n_voxels - active_voxels_for_given_edge - invalid_edges, 1) + code__b_prod_edge = ~BackendTensor.tfnp.all(mapped_voxel_1, axis=0) # (n_voxels - active_voxels_for_given_edge - invalid_edges, 1) + code__c_prod_edge = ~BackendTensor.tfnp.all(mapped_voxel_2, axis=0) # (n_voxels - active_voxels_for_given_edge - invalid_edges, 1) valid_edges_within_extent = code__a_prod_edge * code__b_prod_edge * code__c_prod_edge # * Valid in the sense that there are valid voxels around - from ...core.backend_tensor import BackendTensor - code__a_p = BackendTensor.t.array(mapped_voxel_0[:, valid_edges_within_extent] == 0) # (n_voxels, n_voxels - active_voxels_for_given_edge - invalid_edges - edges_at_extent_border) - code__b_p = BackendTensor.t.array(mapped_voxel_1[:, valid_edges_within_extent] == 0) # (n_voxels, n_voxels - active_voxels_for_given_edge - invalid_edges - edges_at_extent_border) - code__c_p = BackendTensor.t.array(mapped_voxel_2[:, valid_edges_within_extent] == 0) # (n_voxels, n_voxels - active_voxels_for_given_edge - invalid_edges - edges_at_extent_border) + code__a_p = BackendTensor.tfnp.array(mapped_voxel_0[:, valid_edges_within_extent] == 0) # (n_voxels, n_voxels - active_voxels_for_given_edge - invalid_edges - edges_at_extent_border) + code__b_p = BackendTensor.tfnp.array(mapped_voxel_1[:, valid_edges_within_extent] == 0) # (n_voxels, n_voxels - active_voxels_for_given_edge - invalid_edges - edges_at_extent_border) + code__c_p = BackendTensor.tfnp.array(mapped_voxel_2[:, valid_edges_within_extent] == 0) # (n_voxels, n_voxels - active_voxels_for_given_edge - invalid_edges - edges_at_extent_border) if False: debug_code_p = code__a_p + code__b_p + code__c_p # (n_voxels, n_voxels - active_voxels_for_given_edge - invalid_edges - edges_at_extent_border) # 15 and 17 does not have y - + # endregion # region Convert remaining compressed binary codes to ints - indices_array = np.arange(code__a_p.shape[0]).reshape(-1, 1) + indices_array = BackendTensor.tfnp.arange(code__a_p.shape[0]).reshape(-1, 1) x = (code__a_p * indices_array).T[code__a_p.T] y = (code__b_p * indices_array).T[code__b_p.T] z = (code__c_p * indices_array).T[code__c_p.T] @@ -206,8 +223,8 @@ def check_voxels_exist_next_to_edge(coord_col, edge_vector, _left_right_array_ac raise ValueError("n must be smaller than 12") # flip triangle order if normal is negative - indices = np.vstack((x[normal >= 0], y[normal >= 0], z[normal >= 0])).T - flipped_indices = np.vstack((x[normal < 0], y[normal < 0], z[normal < 0])).T[:, [0, 2, 1]] - indices = np.vstack((indices, flipped_indices)) + indices = BackendTensor.tfnp.vstack([x[normal >= 0], y[normal >= 0], z[normal >= 0]]).T + flipped_indices = BackendTensor.tfnp.vstack([x[normal < 0], y[normal < 0], z[normal < 0]]).T[:, [0, 2, 1]] + indices = BackendTensor.tfnp.vstack([indices, flipped_indices]) return indices diff --git a/gempy_engine/modules/dual_contouring/fancy_triangulation_.py b/gempy_engine/modules/dual_contouring/fancy_triangulation_.py new file mode 100644 index 0000000..80e2876 --- /dev/null +++ b/gempy_engine/modules/dual_contouring/fancy_triangulation_.py @@ -0,0 +1,213 @@ +# TODO: Make this methods private +import numpy as np + +from gempy_engine.core.data.octree_level import OctreeLevel + + +def get_left_right_array(octree_list: list[OctreeLevel]) -> np.ndarray: + # === Local function === + def _compute_voxel_binary_code(idx_from_root, dir_idx: int, left_right_all, voxel_select_all): + + # Calculate the voxels from root + for active_voxels_per_lvl in voxel_select_all: # * The first level is all True + idx_from_root = np.repeat(idx_from_root[active_voxels_per_lvl], 8) + + left_right_list = [] + voxel_select_op = list(voxel_select_all[1:]) + voxel_select_op.append(np.ones(left_right_all[-1].shape[0], bool)) + left_right_all = left_right_all[::-1] + voxel_select_op = voxel_select_op[::-1] + + for e, left_right_per_lvl in enumerate(left_right_all): + left_right_per_lvl_dir = left_right_per_lvl[:, dir_idx] + for n_rep in range(e): + inner = left_right_per_lvl_dir[voxel_select_op[e - n_rep]] + left_right_per_lvl_dir = np.repeat(inner, 8) # ? Is it always e? + # ? Is this repeat wrong? + left_right_list.append(left_right_per_lvl_dir) + + left_right_list.append(idx_from_root) + binary_code = np.vstack(left_right_list) + f = binary_code.T + return binary_code + + # === Local function === + + if len(octree_list) == 1: + # * Not only that, the current implementation only works with pure octree starting at [2,2,2] + raise ValueError("Octree list must have more than one level") + + voxel_select_all = [octree_iter.grid_centers.octree_grid.active_cells for octree_iter in octree_list[1:]] + left_right_all = [octree_iter.grid_centers.octree_grid.left_right for octree_iter in octree_list[1:]] + + idx_root_x = np.zeros(8, dtype=bool) + idx_root_x[4:] = True + binary_x = _compute_voxel_binary_code(idx_root_x, 0, left_right_all, voxel_select_all) + + idx_root_y = np.zeros(8, dtype=bool) + idx_root_y[[2, 3, 6, 7]] = True + binary_y = _compute_voxel_binary_code(idx_root_y, 1, left_right_all, voxel_select_all) + + idx_root_z = np.zeros(8, dtype=bool) + idx_root_z[1::2] = True + binary_z = _compute_voxel_binary_code(idx_root_z, 2, left_right_all, voxel_select_all) + + bool_to_int_x = np.packbits(binary_x, axis=0, bitorder="little") + bool_to_int_y = np.packbits(binary_y, axis=0, bitorder="little") + bool_to_int_z = np.packbits(binary_z, axis=0, bitorder="little") + left_right_array = np.vstack((bool_to_int_x, bool_to_int_y, bool_to_int_z)).T + + _StaticTriangulationData.depth = 2 + foo = (left_right_array * _StaticTriangulationData.get_pack_directions_into_bits()).sum(axis=1) + + sorted_indices = np.argsort(foo) + # left_right_array = left_right_array[sorted_indices] + return left_right_array + + +class _StaticTriangulationData: + depth: int + + @staticmethod + def get_pack_directions_into_bits() -> np.ndarray: + base_number = 2 ** _StaticTriangulationData.depth + # return np.array([1, base_number, base_number ** 2], dtype=np.int64) + return np.array([base_number ** 2, base_number, 1], dtype=np.int64) + + @staticmethod + def get_base_array(pack_directions_into_bits: np.ndarray) -> np.ndarray: + return np.array([pack_directions_into_bits, pack_directions_into_bits * 2, pack_directions_into_bits * 3], + dtype=np.int64) + + @staticmethod + def get_base_number() -> int: + return 2 ** _StaticTriangulationData.depth + + +def triangulate(left_right_array: np.ndarray, valid_edges: np.ndarray, tree_depth: int, voxel_normals: np.ndarray): + # * Variables + # depending on depth + _StaticTriangulationData.depth = tree_depth + + edge_vector_a = np.array([0, 0, 0, 0, -1, -1, 1, 1, -1, 1, -1, 1]) + edge_vector_b = np.array([-1, -1,-1, 1, 0, 0,0, 0, -1, 1, -1, 1]) + edge_vector_c = np.array([-1, -1, 1, 1, -1, -1,1, 1, 0, 0, 0, 0]) + + # * Consts + voxel_code = (left_right_array * _StaticTriangulationData.get_pack_directions_into_bits()).sum(1).reshape(-1, 1) + # ---------- + + indices = [] + all = [0, 3, 4, 7, 8, 11] + + for n in all: + # TODO: Make sure that changing the edge_vector we do not change + left_right_array_active_edge = left_right_array[valid_edges[:, n]] + _ = compute_triangles_for_edge( + edge_vector_a=edge_vector_a[n], + edge_vector_b=edge_vector_b[n], + edge_vector_c=edge_vector_c[n], + left_right_array_active_edge=left_right_array_active_edge, + voxel_code=voxel_code, + voxel_normals=voxel_normals, + n=n + ) + indices.append(_) + + return indices + + +def compute_triangles_for_edge(edge_vector_a, edge_vector_b, edge_vector_c, + left_right_array_active_edge, voxel_code, voxel_normals, n): + """ + Important concepts to understand this triangulation: + - left_right_array (n_voxels, 3-directions) contains a unique number per direction describing if it is left (even) or right (odd) and the voxel level + - left_right_array_active_edge (n_voxels - active_voxels_for_given_edge, 3-directions): Subset of left_right for voxels with selected edge active + - valid_edges (n_voxels - active_voxels_for_given_edge, 1): bool + - voxel_code (n_voxels, 1): All the compressed codes of the voxels at the leaves + """ + + # region: Removing edges that does not have voxel next to it. Depending on the evaluated edge, the voxels checked are different + def check_voxels_exist_next_to_edge(coord_col, edge_vector, _left_right_array_active_edge): + match edge_vector: + case 0: + _valid_edges = np.ones(_left_right_array_active_edge.shape[0], dtype=bool) + case 1: + _valid_edges = _left_right_array_active_edge[:, coord_col] != _StaticTriangulationData.get_base_number() - 1 + case -1: + _valid_edges = _left_right_array_active_edge[:, coord_col] != 0 + case _: + raise ValueError("edge_vector_a must be -1, 0 or 1") + return _valid_edges + + valid_edges_x = check_voxels_exist_next_to_edge(0, edge_vector_a, left_right_array_active_edge) + valid_edges_y = check_voxels_exist_next_to_edge(1, edge_vector_b, left_right_array_active_edge) + valid_edges_z = check_voxels_exist_next_to_edge(2, edge_vector_c, left_right_array_active_edge) + + valid_edges_with_neighbour_voxels = valid_edges_x * valid_edges_y * valid_edges_z # * In the sense of not being on the side of the model + left_right_array_active_edge = left_right_array_active_edge[valid_edges_with_neighbour_voxels] + # * At this point left_right_array_active_edge contains the voxel code of those voxels that have the evaluated + # * edge cut AND that have nearby voxels + # endregion + + # region: Compress remaining voxel codes per direction + # * These are the codes that describe each vertex of the triangle + edge_vector_0 = np.array([edge_vector_a, 0, 0]) + edge_vector_1 = np.array([0, edge_vector_b, 0]) + edge_vector_2 = np.array([0, 0, edge_vector_c]) + + binary_idx_0: np.ndarray = left_right_array_active_edge + edge_vector_0 # (n_voxels - active_voxels_for_given_edge - invalid_edges, 3-directions) + binary_idx_1: np.ndarray = left_right_array_active_edge + edge_vector_1 # (n_voxels - active_voxels_for_given_edge - invalid_edges, 3-directions) + binary_idx_2: np.ndarray = left_right_array_active_edge + edge_vector_2 # (n_voxels - active_voxels_for_given_edge - invalid_edges, 3-directions) + + compressed_binary_idx_0 = (binary_idx_0 * _StaticTriangulationData.get_pack_directions_into_bits()).sum(axis=1) # (n_voxels - active_voxels_for_given_edge - invalid_edges, 1) + compressed_binary_idx_1 = (binary_idx_1 * _StaticTriangulationData.get_pack_directions_into_bits()).sum(axis=1) # (n_voxels - active_voxels_for_given_edge - invalid_edges, 1) + compressed_binary_idx_2 = (binary_idx_2 * _StaticTriangulationData.get_pack_directions_into_bits()).sum(axis=1) # (n_voxels - active_voxels_for_given_edge - invalid_edges, 1) + # endregion + + # region: Map remaining compressed binary code to all the binary codes at leaves + mapped_voxel_0 = (voxel_code - compressed_binary_idx_0) # (n_voxels, n_voxels - active_voxels_for_given_edge - invalid_edges) + mapped_voxel_1 = (voxel_code - compressed_binary_idx_1) # (n_voxels, n_voxels - active_voxels_for_given_edge - invalid_edges) + mapped_voxel_2 = (voxel_code - compressed_binary_idx_2) # (n_voxels, n_voxels - active_voxels_for_given_edge - invalid_edges) + # endregion + + # region: Find and remove edges at the border of the extent + code__a_prod_edge = ~mapped_voxel_0.all(axis=0) # mapped_voxel_0.prod(axis=0) == 0 # (n_voxels - active_voxels_for_given_edge - invalid_edges, 1) + code__b_prod_edge = ~mapped_voxel_1.all(axis=0) # mapped_voxel_1.prod(axis=0) == 0 # (n_voxels - active_voxels_for_given_edge - invalid_edges, 1) + code__c_prod_edge = ~mapped_voxel_2.all(axis=0) # mapped_voxel_2.prod(axis=0) == 0 # (n_voxels - active_voxels_for_given_edge - invalid_edges, 1) + + valid_edges_within_extent = code__a_prod_edge * code__b_prod_edge * code__c_prod_edge # * Valid in the sense that there are valid voxels around + + from ...core.backend_tensor import BackendTensor + code__a_p = BackendTensor.t.array(mapped_voxel_0[:, valid_edges_within_extent] == 0) # (n_voxels, n_voxels - active_voxels_for_given_edge - invalid_edges - edges_at_extent_border) + code__b_p = BackendTensor.t.array(mapped_voxel_1[:, valid_edges_within_extent] == 0) # (n_voxels, n_voxels - active_voxels_for_given_edge - invalid_edges - edges_at_extent_border) + code__c_p = BackendTensor.t.array(mapped_voxel_2[:, valid_edges_within_extent] == 0) # (n_voxels, n_voxels - active_voxels_for_given_edge - invalid_edges - edges_at_extent_border) + + if False: + debug_code_p = code__a_p + code__b_p + code__c_p # (n_voxels, n_voxels - active_voxels_for_given_edge - invalid_edges - edges_at_extent_border) + # 15 and 17 does not have y + + # endregion + + # region Convert remaining compressed binary codes to ints + indices_array = np.arange(code__a_p.shape[0]).reshape(-1, 1) + x = (code__a_p * indices_array).T[code__a_p.T] + y = (code__b_p * indices_array).T[code__b_p.T] + z = (code__c_p * indices_array).T[code__c_p.T] + # endregion + + if n < 4: + normal = (code__a_p * voxel_normals[:, [0]]).T[code__a_p.T] + elif n < 8: + normal = (code__b_p * voxel_normals[:, [1]]).T[code__b_p.T] + elif n < 12: + normal = (code__c_p * voxel_normals[:, [2]]).T[code__c_p.T] + else: + raise ValueError("n must be smaller than 12") + + # flip triangle order if normal is negative + indices = np.vstack((x[normal >= 0], y[normal >= 0], z[normal >= 0])).T + flipped_indices = np.vstack((x[normal < 0], y[normal < 0], z[normal < 0])).T[:, [0, 2, 1]] + indices = np.vstack((indices, flipped_indices)) + + return indices From e6f88da6f7d65f1c03349a4e1b57086d490c92b3 Mon Sep 17 00:00:00 2001 From: MigueldelaVarga Date: Mon, 22 Sep 2025 13:14:18 +0200 Subject: [PATCH 5/8] [WIP] Adapting bit packing --- gempy_engine/core/backend_tensor.py | 66 +++++++++++++++++++ .../dual_contouring/fancy_triangulation.py | 2 +- 2 files changed, 67 insertions(+), 1 deletion(-) diff --git a/gempy_engine/core/backend_tensor.py b/gempy_engine/core/backend_tensor.py index e90a694..f1722c8 100644 --- a/gempy_engine/core/backend_tensor.py +++ b/gempy_engine/core/backend_tensor.py @@ -203,6 +203,72 @@ def _packbits(tensor, axis=None, bitorder="big"): else: raise NotImplementedError("packbits only implemented for axis=0") + def _packbits(tensor, axis=None, bitorder="big"): + """ + Pack boolean values into uint8 bytes along the specified axis. + For a (4, n) tensor with axis=0, this packs every 4 bits into nibbles, + then pads to create full bytes. + """ + # Convert to uint8 if boolean + if tensor.dtype == torch.bool: + tensor = tensor.to(torch.uint8) + + if axis == 0: + # Pack along axis 0 (rows) + n_rows, n_cols = tensor.shape + n_output_rows = (n_rows + 7) // 8 # Round up to nearest byte boundary + + # Pad with zeros if we don't have multiples of 8 rows + if n_rows % 8 != 0: + padding_rows = 8 - (n_rows % 8) + padding = torch.zeros(padding_rows, n_cols, dtype=torch.uint8, device=tensor.device) + tensor = torch.cat([tensor, padding], dim=0) + + # Reshape to group every 8 rows together: (n_output_rows, 8, n_cols) + tensor_reshaped = tensor.view(n_output_rows, 8, n_cols) + + # Define bit positions (powers of 2) + if bitorder == "little": + # Little endian: LSB first [1, 2, 4, 8, 16, 32, 64, 128] + powers = torch.tensor([1, 2, 4, 8, 16, 32, 64, 128], + dtype=torch.uint8, device=tensor.device).view(1, 8, 1) + else: + # Big endian: MSB first [128, 64, 32, 16, 8, 4, 2, 1] + powers = torch.tensor([128, 64, 32, 16, 8, 4, 2, 1], + dtype=torch.uint8, device=tensor.device).view(1, 8, 1) + + # Pack bits: multiply each bit by its power and sum along the 8-bit dimension + packed = (tensor_reshaped * powers).sum(dim=1) # Shape: (n_output_rows, n_cols) + + return packed + + elif axis == 1: + # Pack along axis 1 (columns) + n_rows, n_cols = tensor.shape + n_output_cols = (n_cols + 7) // 8 + + # Pad with zeros if needed + if n_cols % 8 != 0: + padding_cols = 8 - (n_cols % 8) + padding = torch.zeros(n_rows, padding_cols, dtype=torch.uint8, device=tensor.device) + tensor = torch.cat([tensor, padding], dim=1) + + # Reshape: (n_rows, n_output_cols, 8) + tensor_reshaped = tensor.view(n_rows, n_output_cols, 8) + + # Define bit positions + if bitorder == "little": + powers = torch.tensor([1, 2, 4, 8, 16, 32, 64, 128], + dtype=torch.uint8, device=tensor.device).view(1, 1, 8) + else: + powers = torch.tensor([128, 64, 32, 16, 8, 4, 2, 1], + dtype=torch.uint8, device=tensor.device).view(1, 1, 8) + + packed = (tensor_reshaped * powers).sum(dim=2) # Shape: (n_rows, n_output_cols) + return packed + + else: + raise NotImplementedError(f"packbits not implemented for axis={axis}") cls.tfnp.sum = _sum cls.tfnp.repeat = _repeat diff --git a/gempy_engine/modules/dual_contouring/fancy_triangulation.py b/gempy_engine/modules/dual_contouring/fancy_triangulation.py index 415bb5f..c69cf01 100644 --- a/gempy_engine/modules/dual_contouring/fancy_triangulation.py +++ b/gempy_engine/modules/dual_contouring/fancy_triangulation.py @@ -37,7 +37,7 @@ def _compute_voxel_binary_code(idx_from_root, dir_idx: int, left_right_all, voxe left_right_list.append(left_right_per_lvl_dir) left_right_list.append(idx_from_root) - binary_code = BackendTensor.tfnp.vstack(left_right_list) + binary_code = BackendTensor.tfnp.stack(left_right_list) return binary_code # === Local function === From 900cab243a6b4abf3684ce00d3476b676cd54237 Mon Sep 17 00:00:00 2001 From: MigueldelaVarga Date: Mon, 22 Sep 2025 13:51:26 +0200 Subject: [PATCH 6/8] [ENH] Adapt code to do triangulation in GPU --- .../API/dual_contouring/_dual_contouring.py | 8 +- gempy_engine/core/backend_tensor.py | 33 ++- gempy_engine/core/data/interp_output.py | 3 +- .../dual_contouring_interface.py | 146 ++++++------ .../dual_contouring/fancy_triangulation.py | 46 +++- .../dual_contouring/fancy_triangulation_.py | 213 ------------------ .../octrees_topology_interface.py | 2 + 7 files changed, 142 insertions(+), 309 deletions(-) delete mode 100644 gempy_engine/modules/dual_contouring/fancy_triangulation_.py diff --git a/gempy_engine/API/dual_contouring/_dual_contouring.py b/gempy_engine/API/dual_contouring/_dual_contouring.py index b831355..97da1b5 100644 --- a/gempy_engine/API/dual_contouring/_dual_contouring.py +++ b/gempy_engine/API/dual_contouring/_dual_contouring.py @@ -1,3 +1,4 @@ +import numpy import warnings from typing import List @@ -90,18 +91,19 @@ def compute_dual_contouring(dc_data_per_stack: DualContouringData, left_right_co tree_depth = dc_data_per_surface.tree_depth, voxel_normals = voxel_normal ) - indices = np.vstack(indices) + indices = BackendTensor.t.concatenate(indices, axis=0) # @on vertices_numpy = BackendTensor.t.to_numpy(vertices) + indices_numpy = BackendTensor.t.to_numpy(indices) if TRIMESH_LAST_PASS := True: - vertices_numpy, indices = _last_pass(vertices_numpy, indices) + vertices_numpy, indices_numpy = _last_pass(vertices_numpy, indices_numpy) stack_meshes.append( DualContouringMesh( vertices_numpy, - indices, + indices_numpy, dc_data_per_stack ) ) diff --git a/gempy_engine/core/backend_tensor.py b/gempy_engine/core/backend_tensor.py index f1722c8..7176aab 100644 --- a/gempy_engine/core/backend_tensor.py +++ b/gempy_engine/core/backend_tensor.py @@ -184,24 +184,6 @@ def _concatenate(tensors, axis=0, dtype=None): def _transpose(tensor, axes=None): return tensor.transpose(axes[0], axes[1]) - def _packbits(tensor, axis=None, bitorder="big"): - # PyTorch doesn't have packbits, need to implement or use alternative - # This is a simplified version - you might need a more complete implementation - if bitorder == "little": - # Reverse bits for little endian - # noinspection PyArgumentList - tensor = flip(tensor, axis=[axis] if axis is not None else [-1]) - - # Convert boolean to integers and pack - tensor_int = tensor.to(torch.uint8) - if axis == 0: - # Pack along first axis - result = torch.zeros(tensor.shape[1], dtype=torch.uint8, device=tensor.device) - for i in range(tensor.shape[0]): - result += tensor_int[i] * (2 ** i) - return result - else: - raise NotImplementedError("packbits only implemented for axis=0") def _packbits(tensor, axis=None, bitorder="big"): """ @@ -270,6 +252,19 @@ def _packbits(tensor, axis=None, bitorder="big"): else: raise NotImplementedError(f"packbits not implemented for axis={axis}") + + def _to_numpy(tensor): + """Convert tensor to numpy array, handling GPU tensors properly""" + if hasattr(tensor, 'device') and tensor.device.type == 'cuda': + # Move to CPU first, then detach and convert to numpy + return tensor.cpu().detach().numpy() + elif hasattr(tensor, 'detach'): + # CPU tensor, just detach and convert + return tensor.detach().numpy() + else: + # Not a torch tensor, return as-is + return tensor + cls.tfnp.sum = _sum cls.tfnp.repeat = _repeat cls.tfnp.expand_dims = lambda tensor, axis: tensor @@ -277,7 +272,7 @@ def _packbits(tensor, axis=None, bitorder="big"): cls.tfnp.flip = lambda tensor, axis: tensor.flip(axis) cls.tfnp.hstack = lambda tensors: torch.concat(tensors, dim=1) cls.tfnp.array = _array - cls.tfnp.to_numpy = lambda tensor: tensor.detach().numpy() + cls.tfnp.to_numpy = _to_numpy cls.tfnp.min = lambda tensor, axis: tensor.min(axis=axis)[0] cls.tfnp.max = lambda tensor, axis: tensor.max(axis=axis)[0] cls.tfnp.rint = lambda tensor: tensor.round().type(torch.int32) diff --git a/gempy_engine/core/data/interp_output.py b/gempy_engine/core/data/interp_output.py index 5e602b1..a5cc86e 100644 --- a/gempy_engine/core/data/interp_output.py +++ b/gempy_engine/core/data/interp_output.py @@ -154,6 +154,7 @@ def get_block_from_value_type(self, value_type: ValueType, slice_: slice): match (BackendTensor.engine_backend): case AvailableBackends.PYTORCH: - block = block.detach().numpy() + block = BackendTensor.t.to_numpy(block) + # block = block.to_numpy() return block[slice_] diff --git a/gempy_engine/modules/dual_contouring/dual_contouring_interface.py b/gempy_engine/modules/dual_contouring/dual_contouring_interface.py index c17a641..4fbca65 100644 --- a/gempy_engine/modules/dual_contouring/dual_contouring_interface.py +++ b/gempy_engine/modules/dual_contouring/dual_contouring_interface.py @@ -1,13 +1,13 @@ +import torch from typing import Tuple from gempy_engine.config import AvailableBackends from ...core.backend_tensor import BackendTensor from ...core.data.dual_contouring_data import DualContouringData -import numpy as np -def find_intersection_on_edge(_xyz_corners: np.ndarray, scalar_field_on_corners: np.ndarray, - scalar_at_sp: np.ndarray, masking=None) -> Tuple[np.ndarray, np.ndarray]: +def find_intersection_on_edge(_xyz_corners, scalar_field_on_corners, + scalar_at_sp, masking=None) -> Tuple: """This function finds all the intersections for multiple layers per series - The shape of valid edges is n_surfaces * xyz_corners. Where xyz_corners is 8 * the octree leaf @@ -27,13 +27,13 @@ def find_intersection_on_edge(_xyz_corners: np.ndarray, scalar_field_on_corners: scalar_at_sp = scalar_at_sp.reshape((-1, 1, 1)) n_isosurface = scalar_at_sp.shape[0] - xyz_8 = BackendTensor.t.tile(xyz_8, (n_isosurface, 1, 1)) # TODO: Generalize + xyz_8 = BackendTensor.tfnp.tile(xyz_8, (n_isosurface, 1, 1)) # TODO: Generalize # Compute distance of scalar field on the corners - scalar_dx = scalar_8[:, :, :4] - scalar_8[:, :, 4:] + scalar_dx = scalar_8[:, :, :4] - scalar_8[:, :, 4:] scalar_d_y = scalar_8[:, :, [0, 1, 4, 5]] - scalar_8[:, :, [2, 3, 6, 7]] scalar_d_z = scalar_8[:, :, ::2] - scalar_8[:, :, 1::2] - + # Add a tiny value to avoid division by zero scalar_dx += 1e-10 scalar_d_y += 1e-10 @@ -53,16 +53,16 @@ def find_intersection_on_edge(_xyz_corners: np.ndarray, scalar_field_on_corners: intersect_dx = d_x[:, :, :] * weight_x[:, :, :] intersect_dy = d_y[:, :, :] * weight_y[:, :, :] intersect_dz = d_z[:, :, :] * weight_z[:, :, :] - + # Mask invalid edges - valid_edge_x = np.logical_and(weight_x > -0.01, weight_x < 1.01) - valid_edge_y = np.logical_and(weight_y > -0.01, weight_y < 1.01) - valid_edge_z = np.logical_and(weight_z > -0.01, weight_z < 1.01) + valid_edge_x = BackendTensor.tfnp.logical_and(weight_x > -0.01, weight_x < 1.01) + valid_edge_y = BackendTensor.tfnp.logical_and(weight_y > -0.01, weight_y < 1.01) + valid_edge_z = BackendTensor.tfnp.logical_and(weight_z > -0.01, weight_z < 1.01) # * Note(miguel) From this point on the arrays become sparse - xyz_8_edges = BackendTensor.t.hstack([xyz_8[:, 4:], xyz_8[:, [2, 3, 6, 7]], xyz_8[:, 1::2]]) - intersect_segment = BackendTensor.t.hstack([intersect_dx, intersect_dy, intersect_dz]) - valid_edges = BackendTensor.t.hstack([valid_edge_x, valid_edge_y, valid_edge_z])[:, :, 0] + xyz_8_edges = BackendTensor.tfnp.hstack([xyz_8[:, 4:], xyz_8[:, [2, 3, 6, 7]], xyz_8[:, 1::2]]) + intersect_segment = BackendTensor.tfnp.hstack([intersect_dx, intersect_dy, intersect_dz]) + valid_edges = BackendTensor.tfnp.hstack([valid_edge_x, valid_edge_y, valid_edge_z])[:, :, 0] valid_edges = valid_edges > 0 intersection_xyz = xyz_8_edges[valid_edges] + intersect_segment[valid_edges] @@ -88,13 +88,13 @@ def triangulate_dual_contouring(dc_data_per_surface: DualContouringData): x_2 = centers_xyz[valid_voxels][None, :, :] manhattan = x_1 - x_2 - zeros = np.isclose(manhattan[:, :, :], 0, .00001) - x_direction_neighbour = np.isclose(manhattan[:, :, 0], dx, .00001) - nx_direction_neighbour = np.isclose(manhattan[:, :, 0], -dx, .00001) - y_direction_neighbour = np.isclose(manhattan[:, :, 1], dy, .00001) - ny_direction_neighbour = np.isclose(manhattan[:, :, 1], -dy, .00001) - z_direction_neighbour = np.isclose(manhattan[:, :, 2], dz, .00001) - nz_direction_neighbour = np.isclose(manhattan[:, :, 2], -dz, .00001) + zeros = BackendTensor.tfnp.isclose(manhattan[:, :, :], 0, .00001) + x_direction_neighbour = BackendTensor.tfnp.isclose(manhattan[:, :, 0], dx, .00001) + nx_direction_neighbour = BackendTensor.tfnp.isclose(manhattan[:, :, 0], -dx, .00001) + y_direction_neighbour = BackendTensor.tfnp.isclose(manhattan[:, :, 1], dy, .00001) + ny_direction_neighbour = BackendTensor.tfnp.isclose(manhattan[:, :, 1], -dy, .00001) + z_direction_neighbour = BackendTensor.tfnp.isclose(manhattan[:, :, 2], dz, .00001) + nz_direction_neighbour = BackendTensor.tfnp.isclose(manhattan[:, :, 2], -dz, .00001) x_direction = x_direction_neighbour * zeros[:, :, 1] * zeros[:, :, 2] nx_direction = nx_direction_neighbour * zeros[:, :, 1] * zeros[:, :, 2] @@ -103,12 +103,12 @@ def triangulate_dual_contouring(dc_data_per_surface: DualContouringData): z_direction = z_direction_neighbour * zeros[:, :, 0] * zeros[:, :, 1] nz_direction = nz_direction_neighbour * zeros[:, :, 0] * zeros[:, :, 1] - np.fill_diagonal(x_direction, True) - np.fill_diagonal(nx_direction, True) - np.fill_diagonal(y_direction, True) - np.fill_diagonal(nx_direction, True) - np.fill_diagonal(z_direction, True) - np.fill_diagonal(nz_direction, True) + BackendTensor.tfnp.fill_diagonal(x_direction, True) + BackendTensor.tfnp.fill_diagonal(nx_direction, True) + BackendTensor.tfnp.fill_diagonal(y_direction, True) + BackendTensor.tfnp.fill_diagonal(ny_direction, True) + BackendTensor.tfnp.fill_diagonal(z_direction, True) + BackendTensor.tfnp.fill_diagonal(nz_direction, True) # X edges nynz_direction = ny_direction + nz_direction @@ -129,72 +129,81 @@ def triangulate_dual_contouring(dc_data_per_surface: DualContouringData): xy_direction = x_direction + y_direction # Stack all 12 directions - directions = np.dstack([nynz_direction, nyz_direction, ynz_direction, yz_direction, - nxnz_direction, xnz_direction, nxz_direction, xz_direction, - nxny_direction, nxy_direction, xny_direction, xy_direction]) + directions = BackendTensor.tfnp.dstack([nynz_direction, nyz_direction, ynz_direction, yz_direction, + nxnz_direction, xnz_direction, nxz_direction, xz_direction, + nxny_direction, nxy_direction, xny_direction, xy_direction]) # endregion valid_edg = valid_edges[valid_voxels][:, :] - valid_edg = BackendTensor.t.to_numpy(valid_edg) - + valid_edg = BackendTensor.tfnp.to_numpy(valid_edg) + direction_each_edge = (directions * valid_edg) # Pick only edges with more than 2 voxels nearby three_neighbours = (directions * valid_edg).sum(axis=0) == 3 - matrix_to_right_C_order = np.transpose((direction_each_edge * three_neighbours), (1, 2, 0)) - indices = np.where(matrix_to_right_C_order)[2].reshape(-1, 3) + matrix_to_right_C_order = BackendTensor.tfnp.transpose((direction_each_edge * three_neighbours), (1, 2, 0)) + indices = BackendTensor.tfnp.where(matrix_to_right_C_order)[2].reshape(-1, 3) indices_shift = indices indices_arrays.append(indices_shift) - indices_arrays_f = np.vstack(indices_arrays) + indices_arrays_f = BackendTensor.tfnp.vstack(indices_arrays) return indices_arrays_f -def generate_dual_contouring_vertices(dc_data_per_stack: DualContouringData, slice_surface: slice, debug: bool = False) -> np.ndarray: +def generate_dual_contouring_vertices(dc_data_per_stack: DualContouringData, slice_surface: slice, debug: bool = False): # @off - n_edges = dc_data_per_stack.n_edges - valid_edges = dc_data_per_stack.valid_edges + n_edges = dc_data_per_stack.n_edges + valid_edges = dc_data_per_stack.valid_edges valid_voxels = dc_data_per_stack.valid_voxels - xyz_on_edge = dc_data_per_stack.xyz_on_edge[slice_surface] - gradients = dc_data_per_stack.gradients[slice_surface] + xyz_on_edge = dc_data_per_stack.xyz_on_edge[slice_surface] + gradients = dc_data_per_stack.gradients[slice_surface] # @on # * Coordinates for all posible edges (12) and 3 dummy edges_normals in the center - edges_xyz = BackendTensor.t.zeros((n_edges, 15, 3), dtype=BackendTensor.dtype_obj) + edges_xyz = BackendTensor.tfnp.zeros((n_edges, 15, 3), dtype=BackendTensor.dtype_obj) valid_edges = valid_edges > 0 edges_xyz[:, :12][valid_edges] = xyz_on_edge # Normals - edges_normals = BackendTensor.t.zeros((n_edges, 15, 3), dtype=BackendTensor.dtype_obj) + edges_normals = BackendTensor.tfnp.zeros((n_edges, 15, 3), dtype=BackendTensor.dtype_obj) edges_normals[:, :12][valid_edges] = gradients - if OLD_METHOD:=False: + if OLD_METHOD := False: # ! Moureze model does not seems to work with the new method # ! This branch is all nans at least with ch1_1 model - bias_xyz = np.copy(edges_xyz[:, :12]) - isclose = np.isclose(bias_xyz, 0) - bias_xyz[isclose] = np.nan # np zero values to nans - mass_points = np.nanmean(bias_xyz, axis=1) # Mean ignoring nans + bias_xyz = BackendTensor.tfnp.copy(edges_xyz[:, :12]) + isclose = BackendTensor.tfnp.isclose(bias_xyz, 0) + bias_xyz[isclose] = BackendTensor.tfnp.nan # zero values to nans + mass_points = BackendTensor.tfnp.nanmean(bias_xyz, axis=1) # Mean ignoring nans else: # ? This is actually doing something - bias_xyz = BackendTensor.t.copy(edges_xyz[:, :12]) - bias_xyz = BackendTensor.t.to_numpy(bias_xyz) - mask = bias_xyz == 0 - masked_arr = np.ma.masked_array(bias_xyz, mask) - mass_points = masked_arr.mean(axis=1) - mass_points = BackendTensor.t.array(mass_points) + bias_xyz = BackendTensor.tfnp.copy(edges_xyz[:, :12]) + if BackendTensor.engine_backend == AvailableBackends.PYTORCH: + # PyTorch doesn't have masked arrays, so we'll use a different approach + mask = bias_xyz == 0 + # Replace zeros with NaN for mean calculation + bias_xyz_masked = BackendTensor.tfnp.where(mask, float('nan'), bias_xyz) + mass_points = BackendTensor.tfnp.nanmean(bias_xyz_masked, axis=1) + else: + # NumPy approach with masked arrays + bias_xyz = BackendTensor.tfnp.to_numpy(bias_xyz) + import numpy as np + mask = bias_xyz == 0 + masked_arr = np.ma.masked_array(bias_xyz, mask) + mass_points = masked_arr.mean(axis=1) + mass_points = BackendTensor.tfnp.array(mass_points) edges_xyz[:, 12] = mass_points edges_xyz[:, 13] = mass_points edges_xyz[:, 14] = mass_points BIAS_STRENGTH = 1 - - bias_x = BackendTensor.t.array([BIAS_STRENGTH, 0, 0], dtype=BackendTensor.dtype_obj) - bias_y = BackendTensor.t.array([0, BIAS_STRENGTH, 0], dtype=BackendTensor.dtype_obj) - bias_z = BackendTensor.t.array([0, 0, BIAS_STRENGTH], dtype=BackendTensor.dtype_obj) - + + bias_x = BackendTensor.tfnp.array([BIAS_STRENGTH, 0, 0], dtype=BackendTensor.dtype_obj) + bias_y = BackendTensor.tfnp.array([0, BIAS_STRENGTH, 0], dtype=BackendTensor.dtype_obj) + bias_z = BackendTensor.tfnp.array([0, 0, BIAS_STRENGTH], dtype=BackendTensor.dtype_obj) + edges_normals[:, 12] = bias_x edges_normals[:, 13] = bias_y edges_normals[:, 14] = bias_z @@ -208,14 +217,15 @@ def generate_dual_contouring_vertices(dc_data_per_stack: DualContouringData, sli b = (A * edges_xyz).sum(axis=2) if BackendTensor.engine_backend == AvailableBackends.PYTORCH: - transpose_shape = (2, 1) + transpose_shape = (2, 1, 0) # For PyTorch: (batch, dim2, dim1) else: - transpose_shape = (0, 2,1) - - term1 = BackendTensor.t.einsum("ijk, ilj->ikl", A, BackendTensor.t.transpose(A, transpose_shape)) - term2 = BackendTensor.t.linalg.inv(term1) - term3 = BackendTensor.t.einsum("ijk,ik->ij", BackendTensor.t.transpose(A, transpose_shape), b) - vertices = BackendTensor.t.einsum("ijk, ij->ik", term2, term3) + transpose_shape = (0, 2, 1) # For NumPy: (batch, dim2, dim1) + + import torch + term1 = BackendTensor.tfnp.einsum("ijk, ilj->ikl", A, BackendTensor.tfnp.transpose(A, transpose_shape)) + term2 = BackendTensor.tfnp.linalg.inv(term1) + term3 = BackendTensor.tfnp.einsum("ijk,ik->ij", BackendTensor.tfnp.transpose(A, transpose_shape), b) + vertices = BackendTensor.tfnp.einsum("ijk, ij->ik", term2, term3) if debug: dc_data_per_stack.bias_center_mass = edges_xyz[:, 12:].reshape(-1, 3) @@ -237,8 +247,8 @@ def evaluate(self, x): """Evaluates the function at a given point. This is what the solve method is trying to minimize. NB: Doesn't work with fixed axes.""" - x = np.array(x) - return np.linalg.norm(np.matmul(self.A, x) - self.b) + x = BackendTensor.tfnp.array(x) + return BackendTensor.tfnp.linalg.norm(BackendTensor.tfnp.matmul(self.A, x) - self.b) def eval_with_pos(self, x): """Evaluates the QEF at a position, returning the same format solve does.""" @@ -248,7 +258,7 @@ def eval_with_pos(self, x): def make_3d(positions, normals): """Returns a QEF that measures the the error from a bunch of normals, each emanating from given positions""" - A = np.array(normals) + A = BackendTensor.tfnp.array(normals) b = [v[0] * n[0] + v[1] * n[1] + v[2] * n[2] for v, n in zip(positions, normals)] fixed_values = [None] * A.shape[1] return QEF(A, b, fixed_values) @@ -256,7 +266,7 @@ def make_3d(positions, normals): def solve(self): """Finds the point that minimizes the error of this QEF, and returns a tuple of the error squared and the point itself""" - result, residual, rank, s = np.linalg.lstsq(self.A, self.b) + result, residual, rank, s = BackendTensor.tfnp.linalg.lstsq(self.A, self.b) if len(residual) == 0: residual = self.evaluate(result) else: diff --git a/gempy_engine/modules/dual_contouring/fancy_triangulation.py b/gempy_engine/modules/dual_contouring/fancy_triangulation.py index c69cf01..59e4dd9 100644 --- a/gempy_engine/modules/dual_contouring/fancy_triangulation.py +++ b/gempy_engine/modules/dual_contouring/fancy_triangulation.py @@ -145,11 +145,19 @@ def compute_triangles_for_edge(edge_vector_a, edge_vector_b, edge_vector_c, - voxel_code (n_voxels, 1): All the compressed codes of the voxels at the leaves """ + match BackendTensor.engine_backend: + case BackendTensor.engine_backend.PYTORCH: + dtype = BackendTensor.tfnp.bool + case BackendTensor.engine_backend.numpy: + dtype = bool + case _: + raise ValueError("Unsupported backend") + # region: Removing edges that does not have voxel next to it. Depending on the evaluated edge, the voxels checked are different def check_voxels_exist_next_to_edge(coord_col, edge_vector, _left_right_array_active_edge): match edge_vector: case 0: - _valid_edges = BackendTensor.tfnp.ones(_left_right_array_active_edge.shape[0], dtype='bool') + _valid_edges = BackendTensor.tfnp.ones(_left_right_array_active_edge.shape[0], dtype=dtype) case 1: _valid_edges = _left_right_array_active_edge[:, coord_col] != _StaticTriangulationData.get_base_number() - 1 case -1: @@ -223,8 +231,36 @@ def check_voxels_exist_next_to_edge(coord_col, edge_vector, _left_right_array_ac raise ValueError("n must be smaller than 12") # flip triangle order if normal is negative - indices = BackendTensor.tfnp.vstack([x[normal >= 0], y[normal >= 0], z[normal >= 0]]).T - flipped_indices = BackendTensor.tfnp.vstack([x[normal < 0], y[normal < 0], z[normal < 0]]).T[:, [0, 2, 1]] - indices = BackendTensor.tfnp.vstack([indices, flipped_indices]) - + if False: + indices = BackendTensor.tfnp.stack([x[normal >= 0], y[normal >= 0], z[normal >= 0]]).T + flipped_indices = BackendTensor.tfnp.stack( + [ + x[normal < 0], + y[normal < 0], + z[normal < 0]]).T[:, [0, 2, 1] + ] + indices = BackendTensor.tfnp.stack([indices, flipped_indices]) + else: + # flip triangle order if normal is negative + # Create masks for positive and negative normals + positive_mask = normal >= 0 + negative_mask = normal < 0 + + # Extract indices for positive normals (keep original order) + x_pos = x[positive_mask] + y_pos = y[positive_mask] + z_pos = z[positive_mask] + + # Extract indices for negative normals (flip order: x, z, y instead of x, y, z) + x_neg = x[negative_mask] + y_neg = y[negative_mask] + z_neg = z[negative_mask] + + # Combine all indices + all_x = BackendTensor.tfnp.concatenate([x_pos, x_neg], axis=0) + all_y = BackendTensor.tfnp.concatenate([y_pos, z_neg], axis=0) # Note: z_neg for flipped triangles + all_z = BackendTensor.tfnp.concatenate([z_pos, y_neg], axis=0) # Note: y_neg for flipped triangles + + # Stack into final indices array + indices = BackendTensor.tfnp.stack([all_x, all_y, all_z], axis=1) return indices diff --git a/gempy_engine/modules/dual_contouring/fancy_triangulation_.py b/gempy_engine/modules/dual_contouring/fancy_triangulation_.py deleted file mode 100644 index 80e2876..0000000 --- a/gempy_engine/modules/dual_contouring/fancy_triangulation_.py +++ /dev/null @@ -1,213 +0,0 @@ -# TODO: Make this methods private -import numpy as np - -from gempy_engine.core.data.octree_level import OctreeLevel - - -def get_left_right_array(octree_list: list[OctreeLevel]) -> np.ndarray: - # === Local function === - def _compute_voxel_binary_code(idx_from_root, dir_idx: int, left_right_all, voxel_select_all): - - # Calculate the voxels from root - for active_voxels_per_lvl in voxel_select_all: # * The first level is all True - idx_from_root = np.repeat(idx_from_root[active_voxels_per_lvl], 8) - - left_right_list = [] - voxel_select_op = list(voxel_select_all[1:]) - voxel_select_op.append(np.ones(left_right_all[-1].shape[0], bool)) - left_right_all = left_right_all[::-1] - voxel_select_op = voxel_select_op[::-1] - - for e, left_right_per_lvl in enumerate(left_right_all): - left_right_per_lvl_dir = left_right_per_lvl[:, dir_idx] - for n_rep in range(e): - inner = left_right_per_lvl_dir[voxel_select_op[e - n_rep]] - left_right_per_lvl_dir = np.repeat(inner, 8) # ? Is it always e? - # ? Is this repeat wrong? - left_right_list.append(left_right_per_lvl_dir) - - left_right_list.append(idx_from_root) - binary_code = np.vstack(left_right_list) - f = binary_code.T - return binary_code - - # === Local function === - - if len(octree_list) == 1: - # * Not only that, the current implementation only works with pure octree starting at [2,2,2] - raise ValueError("Octree list must have more than one level") - - voxel_select_all = [octree_iter.grid_centers.octree_grid.active_cells for octree_iter in octree_list[1:]] - left_right_all = [octree_iter.grid_centers.octree_grid.left_right for octree_iter in octree_list[1:]] - - idx_root_x = np.zeros(8, dtype=bool) - idx_root_x[4:] = True - binary_x = _compute_voxel_binary_code(idx_root_x, 0, left_right_all, voxel_select_all) - - idx_root_y = np.zeros(8, dtype=bool) - idx_root_y[[2, 3, 6, 7]] = True - binary_y = _compute_voxel_binary_code(idx_root_y, 1, left_right_all, voxel_select_all) - - idx_root_z = np.zeros(8, dtype=bool) - idx_root_z[1::2] = True - binary_z = _compute_voxel_binary_code(idx_root_z, 2, left_right_all, voxel_select_all) - - bool_to_int_x = np.packbits(binary_x, axis=0, bitorder="little") - bool_to_int_y = np.packbits(binary_y, axis=0, bitorder="little") - bool_to_int_z = np.packbits(binary_z, axis=0, bitorder="little") - left_right_array = np.vstack((bool_to_int_x, bool_to_int_y, bool_to_int_z)).T - - _StaticTriangulationData.depth = 2 - foo = (left_right_array * _StaticTriangulationData.get_pack_directions_into_bits()).sum(axis=1) - - sorted_indices = np.argsort(foo) - # left_right_array = left_right_array[sorted_indices] - return left_right_array - - -class _StaticTriangulationData: - depth: int - - @staticmethod - def get_pack_directions_into_bits() -> np.ndarray: - base_number = 2 ** _StaticTriangulationData.depth - # return np.array([1, base_number, base_number ** 2], dtype=np.int64) - return np.array([base_number ** 2, base_number, 1], dtype=np.int64) - - @staticmethod - def get_base_array(pack_directions_into_bits: np.ndarray) -> np.ndarray: - return np.array([pack_directions_into_bits, pack_directions_into_bits * 2, pack_directions_into_bits * 3], - dtype=np.int64) - - @staticmethod - def get_base_number() -> int: - return 2 ** _StaticTriangulationData.depth - - -def triangulate(left_right_array: np.ndarray, valid_edges: np.ndarray, tree_depth: int, voxel_normals: np.ndarray): - # * Variables - # depending on depth - _StaticTriangulationData.depth = tree_depth - - edge_vector_a = np.array([0, 0, 0, 0, -1, -1, 1, 1, -1, 1, -1, 1]) - edge_vector_b = np.array([-1, -1,-1, 1, 0, 0,0, 0, -1, 1, -1, 1]) - edge_vector_c = np.array([-1, -1, 1, 1, -1, -1,1, 1, 0, 0, 0, 0]) - - # * Consts - voxel_code = (left_right_array * _StaticTriangulationData.get_pack_directions_into_bits()).sum(1).reshape(-1, 1) - # ---------- - - indices = [] - all = [0, 3, 4, 7, 8, 11] - - for n in all: - # TODO: Make sure that changing the edge_vector we do not change - left_right_array_active_edge = left_right_array[valid_edges[:, n]] - _ = compute_triangles_for_edge( - edge_vector_a=edge_vector_a[n], - edge_vector_b=edge_vector_b[n], - edge_vector_c=edge_vector_c[n], - left_right_array_active_edge=left_right_array_active_edge, - voxel_code=voxel_code, - voxel_normals=voxel_normals, - n=n - ) - indices.append(_) - - return indices - - -def compute_triangles_for_edge(edge_vector_a, edge_vector_b, edge_vector_c, - left_right_array_active_edge, voxel_code, voxel_normals, n): - """ - Important concepts to understand this triangulation: - - left_right_array (n_voxels, 3-directions) contains a unique number per direction describing if it is left (even) or right (odd) and the voxel level - - left_right_array_active_edge (n_voxels - active_voxels_for_given_edge, 3-directions): Subset of left_right for voxels with selected edge active - - valid_edges (n_voxels - active_voxels_for_given_edge, 1): bool - - voxel_code (n_voxels, 1): All the compressed codes of the voxels at the leaves - """ - - # region: Removing edges that does not have voxel next to it. Depending on the evaluated edge, the voxels checked are different - def check_voxels_exist_next_to_edge(coord_col, edge_vector, _left_right_array_active_edge): - match edge_vector: - case 0: - _valid_edges = np.ones(_left_right_array_active_edge.shape[0], dtype=bool) - case 1: - _valid_edges = _left_right_array_active_edge[:, coord_col] != _StaticTriangulationData.get_base_number() - 1 - case -1: - _valid_edges = _left_right_array_active_edge[:, coord_col] != 0 - case _: - raise ValueError("edge_vector_a must be -1, 0 or 1") - return _valid_edges - - valid_edges_x = check_voxels_exist_next_to_edge(0, edge_vector_a, left_right_array_active_edge) - valid_edges_y = check_voxels_exist_next_to_edge(1, edge_vector_b, left_right_array_active_edge) - valid_edges_z = check_voxels_exist_next_to_edge(2, edge_vector_c, left_right_array_active_edge) - - valid_edges_with_neighbour_voxels = valid_edges_x * valid_edges_y * valid_edges_z # * In the sense of not being on the side of the model - left_right_array_active_edge = left_right_array_active_edge[valid_edges_with_neighbour_voxels] - # * At this point left_right_array_active_edge contains the voxel code of those voxels that have the evaluated - # * edge cut AND that have nearby voxels - # endregion - - # region: Compress remaining voxel codes per direction - # * These are the codes that describe each vertex of the triangle - edge_vector_0 = np.array([edge_vector_a, 0, 0]) - edge_vector_1 = np.array([0, edge_vector_b, 0]) - edge_vector_2 = np.array([0, 0, edge_vector_c]) - - binary_idx_0: np.ndarray = left_right_array_active_edge + edge_vector_0 # (n_voxels - active_voxels_for_given_edge - invalid_edges, 3-directions) - binary_idx_1: np.ndarray = left_right_array_active_edge + edge_vector_1 # (n_voxels - active_voxels_for_given_edge - invalid_edges, 3-directions) - binary_idx_2: np.ndarray = left_right_array_active_edge + edge_vector_2 # (n_voxels - active_voxels_for_given_edge - invalid_edges, 3-directions) - - compressed_binary_idx_0 = (binary_idx_0 * _StaticTriangulationData.get_pack_directions_into_bits()).sum(axis=1) # (n_voxels - active_voxels_for_given_edge - invalid_edges, 1) - compressed_binary_idx_1 = (binary_idx_1 * _StaticTriangulationData.get_pack_directions_into_bits()).sum(axis=1) # (n_voxels - active_voxels_for_given_edge - invalid_edges, 1) - compressed_binary_idx_2 = (binary_idx_2 * _StaticTriangulationData.get_pack_directions_into_bits()).sum(axis=1) # (n_voxels - active_voxels_for_given_edge - invalid_edges, 1) - # endregion - - # region: Map remaining compressed binary code to all the binary codes at leaves - mapped_voxel_0 = (voxel_code - compressed_binary_idx_0) # (n_voxels, n_voxels - active_voxels_for_given_edge - invalid_edges) - mapped_voxel_1 = (voxel_code - compressed_binary_idx_1) # (n_voxels, n_voxels - active_voxels_for_given_edge - invalid_edges) - mapped_voxel_2 = (voxel_code - compressed_binary_idx_2) # (n_voxels, n_voxels - active_voxels_for_given_edge - invalid_edges) - # endregion - - # region: Find and remove edges at the border of the extent - code__a_prod_edge = ~mapped_voxel_0.all(axis=0) # mapped_voxel_0.prod(axis=0) == 0 # (n_voxels - active_voxels_for_given_edge - invalid_edges, 1) - code__b_prod_edge = ~mapped_voxel_1.all(axis=0) # mapped_voxel_1.prod(axis=0) == 0 # (n_voxels - active_voxels_for_given_edge - invalid_edges, 1) - code__c_prod_edge = ~mapped_voxel_2.all(axis=0) # mapped_voxel_2.prod(axis=0) == 0 # (n_voxels - active_voxels_for_given_edge - invalid_edges, 1) - - valid_edges_within_extent = code__a_prod_edge * code__b_prod_edge * code__c_prod_edge # * Valid in the sense that there are valid voxels around - - from ...core.backend_tensor import BackendTensor - code__a_p = BackendTensor.t.array(mapped_voxel_0[:, valid_edges_within_extent] == 0) # (n_voxels, n_voxels - active_voxels_for_given_edge - invalid_edges - edges_at_extent_border) - code__b_p = BackendTensor.t.array(mapped_voxel_1[:, valid_edges_within_extent] == 0) # (n_voxels, n_voxels - active_voxels_for_given_edge - invalid_edges - edges_at_extent_border) - code__c_p = BackendTensor.t.array(mapped_voxel_2[:, valid_edges_within_extent] == 0) # (n_voxels, n_voxels - active_voxels_for_given_edge - invalid_edges - edges_at_extent_border) - - if False: - debug_code_p = code__a_p + code__b_p + code__c_p # (n_voxels, n_voxels - active_voxels_for_given_edge - invalid_edges - edges_at_extent_border) - # 15 and 17 does not have y - - # endregion - - # region Convert remaining compressed binary codes to ints - indices_array = np.arange(code__a_p.shape[0]).reshape(-1, 1) - x = (code__a_p * indices_array).T[code__a_p.T] - y = (code__b_p * indices_array).T[code__b_p.T] - z = (code__c_p * indices_array).T[code__c_p.T] - # endregion - - if n < 4: - normal = (code__a_p * voxel_normals[:, [0]]).T[code__a_p.T] - elif n < 8: - normal = (code__b_p * voxel_normals[:, [1]]).T[code__b_p.T] - elif n < 12: - normal = (code__c_p * voxel_normals[:, [2]]).T[code__c_p.T] - else: - raise ValueError("n must be smaller than 12") - - # flip triangle order if normal is negative - indices = np.vstack((x[normal >= 0], y[normal >= 0], z[normal >= 0])).T - flipped_indices = np.vstack((x[normal < 0], y[normal < 0], z[normal < 0])).T[:, [0, 2, 1]] - indices = np.vstack((indices, flipped_indices)) - - return indices diff --git a/gempy_engine/modules/octrees_topology/octrees_topology_interface.py b/gempy_engine/modules/octrees_topology/octrees_topology_interface.py index 853bfe6..a8fdcd1 100644 --- a/gempy_engine/modules/octrees_topology/octrees_topology_interface.py +++ b/gempy_engine/modules/octrees_topology/octrees_topology_interface.py @@ -8,6 +8,7 @@ from ._octree_internals import compute_next_octree_locations from gempy_engine.core.data.octree_level import OctreeLevel from gempy_engine.core.data.engine_grid import EngineGrid +from ...core.backend_tensor import BackendTensor from ...core.data.options.evaluation_options import EvaluationOptions @@ -105,6 +106,7 @@ def _expand_octree(active_cells_eo: np.ndarray, n_rep: int) -> np.ndarray: for e, octree in enumerate(octree_list[1:level + 1]): n_rep = (level - e) active_cells: np.ndarray = octree.grid_centers.octree_grid.active_cells + active_cells = BackendTensor.t.to_numpy(active_cells) local_active_cells: np.ndarray = np.where(active_cells)[0] shape: np.ndarray = octree.grid_centers.octree_grid_shape From dc0127f00b06667366999a87b362f657822ccdfa Mon Sep 17 00:00:00 2001 From: MigueldelaVarga Date: Mon, 22 Sep 2025 16:02:42 +0200 Subject: [PATCH 7/8] [META] Deactivate moving arrays to pytorch gpu --- gempy_engine/core/backend_tensor.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/gempy_engine/core/backend_tensor.py b/gempy_engine/core/backend_tensor.py index 7176aab..f6d32c2 100644 --- a/gempy_engine/core/backend_tensor.py +++ b/gempy_engine/core/backend_tensor.py @@ -110,13 +110,13 @@ def _change_backend(cls, engine_backend: AvailableBackends, use_pykeops: bool = # Check if CUDA is available if not pytorch_copy.cuda.is_available(): raise RuntimeError("GPU requested but CUDA is not available in PyTorch") - - # Set default device to CUDA - cls.device = pytorch_copy.device("cuda") - pytorch_copy.set_default_device("cuda") - print(f"GPU enabled. Using device: {cls.device}") - print(f"GPU device count: {pytorch_copy.cuda.device_count()}") - print(f"Current GPU device: {pytorch_copy.cuda.current_device()}") + if False: + # Set default device to CUDA + cls.device = pytorch_copy.device("cuda") + pytorch_copy.set_default_device("cuda") + print(f"GPU enabled. Using device: {cls.device}") + print(f"GPU device count: {pytorch_copy.cuda.device_count()}") + print(f"Current GPU device: {pytorch_copy.cuda.current_device()}") else: cls.use_gpu = False cls.device = pytorch_copy.device("cpu") From c6925dd556c052352a72aa0d2e647e0542b87185 Mon Sep 17 00:00:00 2001 From: MigueldelaVarga Date: Wed, 24 Sep 2025 14:31:15 +0200 Subject: [PATCH 8/8] [BUG] --- gempy_engine/core/backend_tensor.py | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/gempy_engine/core/backend_tensor.py b/gempy_engine/core/backend_tensor.py index f6d32c2..f427c69 100644 --- a/gempy_engine/core/backend_tensor.py +++ b/gempy_engine/core/backend_tensor.py @@ -145,7 +145,7 @@ def describe_conf(cls): @classmethod def _wrap_pytorch_functions(cls): - from torch import sum, repeat_interleave, flip + from torch import sum, repeat_interleave, isclose import torch def _sum(tensor, axis=None, dtype=None, keepdims=False): @@ -265,6 +265,14 @@ def _to_numpy(tensor): # Not a torch tensor, return as-is return tensor + def _fill_diagonal(tensor, value): + """Fill the diagonal of a 2D tensor with the given value""" + if tensor.dim() != 2: + raise ValueError("fill_diagonal only supports 2D tensors") + diagonal_indices = torch.arange(min(tensor.size(0), tensor.size(1))) + tensor[diagonal_indices, diagonal_indices] = value + return tensor + cls.tfnp.sum = _sum cls.tfnp.repeat = _repeat cls.tfnp.expand_dims = lambda tensor, axis: tensor @@ -285,6 +293,14 @@ def _to_numpy(tensor): cls.tfnp.tile = lambda tensor, repeats: tensor.repeat(repeats) cls.tfnp.ravel = lambda tensor: tensor.flatten() cls.tfnp.packbits = _packbits + cls.tfnp.fill_diagonal = _fill_diagonal + cls.tfnp.isclose = lambda a, b, rtol=1e-05, atol=1e-08, equal_nan=False: isclose( + a, + torch.tensor(b, dtype=a.dtype, device=a.device), + rtol=rtol, + atol=atol, + equal_nan=equal_nan + ) @classmethod def _wrap_pykeops_functions(cls):