Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 5 additions & 3 deletions gempy_engine/API/dual_contouring/_dual_contouring.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import numpy
import warnings
from typing import List

Expand Down Expand Up @@ -90,18 +91,19 @@ def compute_dual_contouring(dc_data_per_stack: DualContouringData, left_right_co
tree_depth = dc_data_per_surface.tree_depth,
voxel_normals = voxel_normal
)
indices = np.vstack(indices)
indices = BackendTensor.t.concatenate(indices, axis=0)

# @on
vertices_numpy = BackendTensor.t.to_numpy(vertices)
indices_numpy = BackendTensor.t.to_numpy(indices)

if TRIMESH_LAST_PASS := True:
vertices_numpy, indices = _last_pass(vertices_numpy, indices)
vertices_numpy, indices_numpy = _last_pass(vertices_numpy, indices_numpy)

stack_meshes.append(
DualContouringMesh(
vertices_numpy,
indices,
indices_numpy,
dc_data_per_stack
)
)
Expand Down
40 changes: 39 additions & 1 deletion gempy_engine/API/interp_single/_octree_generation.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ def interpolate_on_octree(interpolation_input: InterpolationInput, options: Inte
return next_octree_level


def _generate_corners(regular_grid: RegularGrid, level=1) -> np.ndarray:
def _generate_corners_DEP(regular_grid: RegularGrid, level=1) -> np.ndarray:
if regular_grid is None: raise ValueError("Regular grid is None")

xyz_coord = regular_grid.values
Expand All @@ -79,3 +79,41 @@ def stack_left_right(a_edg, d_a):

new_xyz = np.stack((x, y, z)).T
return np.ascontiguousarray(new_xyz)

def _generate_corners(regular_grid: RegularGrid, level=1):
if regular_grid is None:
raise ValueError("Regular grid is None")

# Convert to backend tensors
# xyz_coord = BackendTensor.tfnp.array(regular_grid.values)
# dxdydz = BackendTensor.tfnp.array(regular_grid.dxdydz)

xyz_coord = regular_grid.values
dxdydz = regular_grid.dxdydz

x_coord, y_coord, z_coord = xyz_coord[:, 0], xyz_coord[:, 1], xyz_coord[:, 2]
dx, dy, dz = dxdydz[0], dxdydz[1], dxdydz[2]

def stack_left_right(a_edg, d_a):
left = a_edg - d_a / level / 2
right = a_edg + d_a / level / 2
return BackendTensor.tfnp.stack([left, right], axis=1)

x_ = BackendTensor.tfnp.repeat(stack_left_right(x_coord, dx), 4, axis=1)
x = BackendTensor.tfnp.ravel(x_)

y_temp = BackendTensor.tfnp.repeat(stack_left_right(y_coord, dy), 2, axis=1)
y_ = BackendTensor.tfnp.tile(y_temp, (1, 2))
y = BackendTensor.tfnp.ravel(y_)

z_ = BackendTensor.tfnp.tile(stack_left_right(z_coord, dz), (1, 4))
z = BackendTensor.tfnp.ravel(z_)

new_xyz = BackendTensor.tfnp.stack([x, y, z], axis=1)

# For PyTorch, ensure contiguous memory (equivalent to np.ascontiguousarray)
if BackendTensor.engine_backend == AvailableBackends.PYTORCH:
if hasattr(new_xyz, 'contiguous'):
new_xyz = new_xyz.contiguous()

return new_xyz
122 changes: 119 additions & 3 deletions gempy_engine/core/backend_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,9 +107,20 @@ def _change_backend(cls, engine_backend: AvailableBackends, use_pykeops: bool =
if (use_gpu):
cls.use_gpu = True
# cls.tensor_backend_pointer['active_backend'].set_default_device("cuda")
# Check if CUDA is available
if not pytorch_copy.cuda.is_available():
raise RuntimeError("GPU requested but CUDA is not available in PyTorch")
if False:
# Set default device to CUDA
cls.device = pytorch_copy.device("cuda")
pytorch_copy.set_default_device("cuda")
print(f"GPU enabled. Using device: {cls.device}")
print(f"GPU device count: {pytorch_copy.cuda.device_count()}")
print(f"Current GPU device: {pytorch_copy.cuda.current_device()}")
else:
cls.use_gpu = False

cls.device = pytorch_copy.device("cpu")
pytorch_copy.set_default_device("cpu")
case (_):
raise AttributeError(
f"Engine Backend: {engine_backend} cannot be used because the correspondent library"
Expand All @@ -134,7 +145,7 @@ def describe_conf(cls):

@classmethod
def _wrap_pytorch_functions(cls):
from torch import sum, repeat_interleave
from torch import sum, repeat_interleave, isclose
import torch

def _sum(tensor, axis=None, dtype=None, keepdims=False):
Expand All @@ -155,6 +166,11 @@ def _array(array_like, dtype=None):
if dtype is None: return array_like
else: return array_like.type(dtype)
else:
# Ensure numpy arrays are contiguous before converting to torch tensor
if isinstance(array_like, numpy.ndarray):
if not array_like.flags.c_contiguous:
array_like = numpy.ascontiguousarray(array_like)

return torch.tensor(array_like, dtype=dtype)

def _concatenate(tensors, axis=0, dtype=None):
Expand All @@ -167,6 +183,95 @@ def _concatenate(tensors, axis=0, dtype=None):

def _transpose(tensor, axes=None):
return tensor.transpose(axes[0], axes[1])


def _packbits(tensor, axis=None, bitorder="big"):
"""
Pack boolean values into uint8 bytes along the specified axis.
For a (4, n) tensor with axis=0, this packs every 4 bits into nibbles,
then pads to create full bytes.
"""
# Convert to uint8 if boolean
if tensor.dtype == torch.bool:
tensor = tensor.to(torch.uint8)

if axis == 0:
# Pack along axis 0 (rows)
n_rows, n_cols = tensor.shape
n_output_rows = (n_rows + 7) // 8 # Round up to nearest byte boundary

# Pad with zeros if we don't have multiples of 8 rows
if n_rows % 8 != 0:
padding_rows = 8 - (n_rows % 8)
padding = torch.zeros(padding_rows, n_cols, dtype=torch.uint8, device=tensor.device)
tensor = torch.cat([tensor, padding], dim=0)

# Reshape to group every 8 rows together: (n_output_rows, 8, n_cols)
tensor_reshaped = tensor.view(n_output_rows, 8, n_cols)

# Define bit positions (powers of 2)
if bitorder == "little":
# Little endian: LSB first [1, 2, 4, 8, 16, 32, 64, 128]
powers = torch.tensor([1, 2, 4, 8, 16, 32, 64, 128],
dtype=torch.uint8, device=tensor.device).view(1, 8, 1)
else:
# Big endian: MSB first [128, 64, 32, 16, 8, 4, 2, 1]
powers = torch.tensor([128, 64, 32, 16, 8, 4, 2, 1],
dtype=torch.uint8, device=tensor.device).view(1, 8, 1)

# Pack bits: multiply each bit by its power and sum along the 8-bit dimension
packed = (tensor_reshaped * powers).sum(dim=1) # Shape: (n_output_rows, n_cols)

return packed

elif axis == 1:
# Pack along axis 1 (columns)
n_rows, n_cols = tensor.shape
n_output_cols = (n_cols + 7) // 8

# Pad with zeros if needed
if n_cols % 8 != 0:
padding_cols = 8 - (n_cols % 8)
padding = torch.zeros(n_rows, padding_cols, dtype=torch.uint8, device=tensor.device)
tensor = torch.cat([tensor, padding], dim=1)

# Reshape: (n_rows, n_output_cols, 8)
tensor_reshaped = tensor.view(n_rows, n_output_cols, 8)

# Define bit positions
if bitorder == "little":
powers = torch.tensor([1, 2, 4, 8, 16, 32, 64, 128],
dtype=torch.uint8, device=tensor.device).view(1, 1, 8)
else:
powers = torch.tensor([128, 64, 32, 16, 8, 4, 2, 1],
dtype=torch.uint8, device=tensor.device).view(1, 1, 8)

packed = (tensor_reshaped * powers).sum(dim=2) # Shape: (n_rows, n_output_cols)
return packed

else:
raise NotImplementedError(f"packbits not implemented for axis={axis}")


def _to_numpy(tensor):
"""Convert tensor to numpy array, handling GPU tensors properly"""
if hasattr(tensor, 'device') and tensor.device.type == 'cuda':
# Move to CPU first, then detach and convert to numpy
return tensor.cpu().detach().numpy()
elif hasattr(tensor, 'detach'):
# CPU tensor, just detach and convert
return tensor.detach().numpy()
else:
# Not a torch tensor, return as-is
return tensor

def _fill_diagonal(tensor, value):
"""Fill the diagonal of a 2D tensor with the given value"""
if tensor.dim() != 2:
raise ValueError("fill_diagonal only supports 2D tensors")
diagonal_indices = torch.arange(min(tensor.size(0), tensor.size(1)))
tensor[diagonal_indices, diagonal_indices] = value
return tensor

cls.tfnp.sum = _sum
cls.tfnp.repeat = _repeat
Expand All @@ -175,7 +280,7 @@ def _transpose(tensor, axes=None):
cls.tfnp.flip = lambda tensor, axis: tensor.flip(axis)
cls.tfnp.hstack = lambda tensors: torch.concat(tensors, dim=1)
cls.tfnp.array = _array
cls.tfnp.to_numpy = lambda tensor: tensor.detach().numpy()
cls.tfnp.to_numpy = _to_numpy
cls.tfnp.min = lambda tensor, axis: tensor.min(axis=axis)[0]
cls.tfnp.max = lambda tensor, axis: tensor.max(axis=axis)[0]
cls.tfnp.rint = lambda tensor: tensor.round().type(torch.int32)
Expand All @@ -185,6 +290,17 @@ def _transpose(tensor, axes=None):
cls.tfnp.transpose = _transpose
cls.tfnp.geomspace = lambda start, stop, step: torch.logspace(start, stop, step, base=10)
cls.tfnp.abs = lambda tensor, dtype = None: tensor.abs().type(dtype) if dtype is not None else tensor.abs()
cls.tfnp.tile = lambda tensor, repeats: tensor.repeat(repeats)
cls.tfnp.ravel = lambda tensor: tensor.flatten()
cls.tfnp.packbits = _packbits
cls.tfnp.fill_diagonal = _fill_diagonal
cls.tfnp.isclose = lambda a, b, rtol=1e-05, atol=1e-08, equal_nan=False: isclose(
a,
torch.tensor(b, dtype=a.dtype, device=a.device),
rtol=rtol,
atol=atol,
equal_nan=equal_nan
)

@classmethod
def _wrap_pykeops_functions(cls):
Expand Down
7 changes: 4 additions & 3 deletions gempy_engine/core/data/interp_output.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,8 +127,8 @@ def litho_faults_ids(self):
faults_ids = BackendTensor.t.rint(self.faults_block)

# Get the number of unique lithology IDs
multiplier = len(np.unique(litho_ids))

multiplier = len(BackendTensor.t.unique(litho_ids))
# Generate the unique IDs
unique_ids = litho_ids + faults_ids * multiplier
return unique_ids
Expand All @@ -154,6 +154,7 @@ def get_block_from_value_type(self, value_type: ValueType, slice_: slice):

match (BackendTensor.engine_backend):
case AvailableBackends.PYTORCH:
block = block.detach().numpy()
block = BackendTensor.t.to_numpy(block)
# block = block.to_numpy()

return block[slice_]
4 changes: 3 additions & 1 deletion gempy_engine/core/data/regular_grid.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@

import numpy as np

from ..backend_tensor import BackendTensor
from ..utils import _check_and_convert_list_to_array, cast_type_inplace
from .kernel_classes.server.input_parser import GridSchema

Expand All @@ -27,7 +28,7 @@ def __post_init__(self):

self._create_regular_grid_3d()

self.original_values = self.values.copy()
self.original_values = BackendTensor.t.copy(self.values)

@property
def dx(self):
Expand Down Expand Up @@ -196,5 +197,6 @@ def _create_regular_grid_3d(self):
g = np.meshgrid(*coords, indexing="ij")
values = np.vstack(tuple(map(np.ravel, g))).T.astype("float64")
values = np.ascontiguousarray(values)
values = BackendTensor.tfnp.array(values, dtype=BackendTensor.dtype_obj)

self.values = values
6 changes: 5 additions & 1 deletion gempy_engine/core/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,11 @@ def cast_type_inplace(data_instance: Any, requires_grad:bool = False):
case (gempy_engine.config.AvailableBackends.numpy):
data_instance.__dict__[key] = val.astype(BackendTensor.dtype)
case (gempy_engine.config.AvailableBackends.PYTORCH):
tensor = BackendTensor.t.from_numpy(val.astype(BackendTensor.dtype))
# tensor = BackendTensor.t.from_numpy(val.astype(BackendTensor.dtype))
# if (BackendTensor.use_gpu):
# tensor = tensor.cuda()

tensor = BackendTensor.tfnp.array(val, dtype=BackendTensor.dtype_obj)
tensor.requires_grad = requires_grad
data_instance.__dict__[key] = tensor

Expand Down
Loading