Skip to content
99 changes: 67 additions & 32 deletions gempy_engine/API/dual_contouring/multi_scalar_dual_contouring.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
from ...core.data.interpolation_input import InterpolationInput
from ...core.data.octree_level import OctreeLevel
from ...core.utils import gempy_profiler_decorator
from ...modules.dual_contouring._aux import _surface_slicer
from ...modules.dual_contouring.dual_contouring_interface import (find_intersection_on_edge, get_triangulation_codes,
get_masked_codes, mask_generation,apply_faults_vertex_overlap)

Expand Down Expand Up @@ -68,7 +69,7 @@ def dual_contouring_multi_scalar(
)

# Process each scalar field
all_stack_intersection = []
all_surfaces_intersection = []
all_valid_edges = []
all_left_right_codes = []
# region Interp on edges
Expand All @@ -89,49 +90,53 @@ def dual_contouring_multi_scalar(
masking=mask
)

all_stack_intersection.append(intersection_xyz)
all_surfaces_intersection.append(intersection_xyz)
all_valid_edges.append(valid_edges)

# * 5) Interpolate on edges for all stacks
output_on_edges = _interp_on_edges(
all_stack_intersection, data_descriptor, dual_contouring_options, interpolation_input
all_surfaces_intersection, data_descriptor, dual_contouring_options, interpolation_input
)

# endregion

# region Vertex gen and triangulation
left_right_per_mesh = []
# Generate meshes for each scalar field
for n_scalar_field in range(data_descriptor.stack_structure.n_stacks):
output: InterpOutput = octree_leaves.outputs_centers[n_scalar_field]
mask = all_mask_arrays[n_scalar_field]

dc_data = DualContouringData(
xyz_on_edge=all_stack_intersection[n_scalar_field],
valid_edges=all_valid_edges[n_scalar_field],
xyz_on_centers=(
octree_leaves.grid_centers.octree_grid.values if mask is None
else octree_leaves.grid_centers.octree_grid.values[mask]
),
dxdydz=octree_leaves.grid_centers.octree_dxdydz,
gradients=output_on_edges[n_scalar_field],
n_surfaces_to_export=output.scalar_field_at_sp.shape[0],
tree_depth=options.number_octree_levels,
)

meshes: List[DualContouringMesh] = compute_dual_contouring(
dc_data_per_stack=dc_data,
left_right_codes=all_left_right_codes[n_scalar_field],
debug=options.debug
if LEGACY:=False:
for n_scalar_field in range(data_descriptor.stack_structure.n_stacks):
_compute_meshes_legacy(all_left_right_codes, all_mask_arrays, all_meshes, all_surfaces_intersection, all_valid_edges, n_scalar_field, octree_leaves, options, output_on_edges)
else:
dc_data_per_surface_all = []
for n_scalar_field in range(data_descriptor.stack_structure.n_stacks):
output: InterpOutput = octree_leaves.outputs_centers[n_scalar_field]
mask = all_mask_arrays[n_scalar_field]
n_surfaces_to_export = output.scalar_field_at_sp.shape[0]
for surface_i in range(n_surfaces_to_export):
valid_edges = all_valid_edges[n_scalar_field]
valid_edges_per_surface =valid_edges.reshape((n_surfaces_to_export, -1, 12))
slice_object = _surface_slicer(surface_i, valid_edges_per_surface)

dc_data_per_surface = DualContouringData(
xyz_on_edge=all_surfaces_intersection[n_scalar_field][slice_object],
valid_edges=valid_edges_per_surface[surface_i],
xyz_on_centers=(
octree_leaves.grid_centers.octree_grid.values if mask is None
else octree_leaves.grid_centers.octree_grid.values[mask]
),
dxdydz=octree_leaves.grid_centers.octree_dxdydz,
left_right_codes=all_left_right_codes[n_scalar_field],
gradients=output_on_edges[n_scalar_field][slice_object],
n_surfaces_to_export=n_scalar_field,
tree_depth=options.number_octree_levels
)

dc_data_per_surface_all.append(dc_data_per_surface)

from gempy_engine.modules.dual_contouring._dual_contouring_v2 import compute_dual_contouring_v2
all_meshes = compute_dual_contouring_v2(
dc_data_list=dc_data_per_surface_all,
)

for m in meshes:
left_right_per_mesh.append(m.left_right)

# TODO: If the order of the meshes does not match the order of scalar_field_at_surface points, reorder them here
if meshes is not None:
all_meshes.extend(meshes)

# endregion
if (options.debug or len(all_left_right_codes) > 1) and False:
apply_faults_vertex_overlap(all_meshes, data_descriptor.stack_structure, left_right_per_mesh)
Expand All @@ -141,6 +146,36 @@ def dual_contouring_multi_scalar(
# ... existing code ...


def _compute_meshes_legacy(all_left_right_codes: list[Any], all_mask_arrays: np.ndarray,
all_meshes: list[DualContouringMesh], all_stack_intersection: list[Any],
all_valid_edges: list[Any], n_scalar_field: int,
octree_leaves: OctreeLevel, options: InterpolationOptions, output_on_edges: list[np.ndarray]):
output: InterpOutput = octree_leaves.outputs_centers[n_scalar_field]
mask = all_mask_arrays[n_scalar_field]

dc_data = DualContouringData(
xyz_on_edge=all_stack_intersection[n_scalar_field],
valid_edges=all_valid_edges[n_scalar_field],
xyz_on_centers=(
octree_leaves.grid_centers.octree_grid.values if mask is None
else octree_leaves.grid_centers.octree_grid.values[mask]
),
dxdydz=octree_leaves.grid_centers.octree_dxdydz,
gradients=output_on_edges[n_scalar_field],
n_surfaces_to_export=output.scalar_field_at_sp.shape[0],
tree_depth=options.number_octree_levels,
)

meshes: List[DualContouringMesh] = compute_dual_contouring(
dc_data_per_stack=dc_data,
left_right_codes=all_left_right_codes[n_scalar_field],
debug=options.debug
)


# TODO: If the order of the meshes does not match the order of scalar_field_at_surface points, reorder them here
if meshes is not None:
all_meshes.extend(meshes)


def _validate_stack_relations(data_descriptor: InputDataDescriptor, n_scalar_field: int) -> None:
Expand Down
2 changes: 1 addition & 1 deletion gempy_engine/core/data/dual_contouring_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,11 +12,11 @@ class DualContouringData:
dxdydz: np.ndarray | tuple[float, float, float]

n_surfaces_to_export: int
left_right_codes: np.ndarray
gradients: np.ndarray = None

tree_depth: int = -1
# Water tight
mask: np.ndarray = None

bias_center_mass: np.ndarray = None # * Only for testing
bias_normals: np.ndarray = None # * Only for testing
Expand Down
1 change: 0 additions & 1 deletion gempy_engine/core/data/dual_contouring_mesh.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@ class DualContouringMesh:
vertices: np.ndarray
edges: np.ndarray
dc_data: Optional[DualContouringData] = None # * In principle we need this just for testing
left_right: Optional[np.ndarray] = None

def __repr__(self):
return f"DualContouringMesh({self.vertices.shape[0]} vertices, {self.edges.shape[0]} edges)"
Expand Down
8 changes: 3 additions & 5 deletions gempy_engine/modules/dual_contouring/_dual_contouring.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ def compute_dual_contouring(dc_data_per_stack: DualContouringData,
use_parallel = _should_use_parallel_processing(dc_data_per_stack.n_surfaces_to_export, BackendTensor.engine_backend)
parallel_results = None

if use_parallel and False: # ! (Miguel Sep 25) I do not see a speedup
if use_parallel and True: # ! (Miguel Sep 25) I do not see a speedup
print(f"Using parallel processing for {dc_data_per_stack.n_surfaces_to_export} surfaces")
parallel_results = _parallel_process_surfaces(dc_data_per_stack, left_right_codes, debug)

Expand Down Expand Up @@ -70,8 +70,7 @@ def compute_dual_contouring(dc_data_per_stack: DualContouringData,
DualContouringMesh(
vertices_numpy,
indices_numpy,
dc_data_per_stack,
left_right=valid_left_right_codes
dc_data_per_stack
)
)
return stack_meshes
Expand All @@ -88,10 +87,9 @@ def _parallel_process_surfaces(dc_data_per_stack, left_right_codes, debug, num_w
'valid_edges' : dc_data_per_stack.valid_edges,
'xyz_on_centers' : dc_data_per_stack.xyz_on_centers,
'dxdydz' : dc_data_per_stack.dxdydz,
'exported_fields_on_edges': dc_data_per_stack.exported_fields_on_edges,
'gradients': dc_data_per_stack.gradients,
'n_surfaces_to_export' : dc_data_per_stack.n_surfaces_to_export,
'tree_depth' : dc_data_per_stack.tree_depth,
# 'gradients': getattr(dc_data_per_stack, 'gradients', None)
}

# Create surface index chunks
Expand Down
164 changes: 164 additions & 0 deletions gempy_engine/modules/dual_contouring/_dual_contouring_v2.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,164 @@
import os
from typing import List

from ._gen_vertices import _generate_vertices
from ._parallel_triangulation import _should_use_parallel_processing, _init_worker
from ._sequential_triangulation import _compute_triangulation
from ... import optional_dependencies
from ...core.backend_tensor import BackendTensor
from ...core.data.dual_contouring_data import DualContouringData
from ...core.data.dual_contouring_mesh import DualContouringMesh
from ...core.utils import gempy_profiler_decorator

# Multiprocessing imports
try:
import torch.multiprocessing as mp

MULTIPROCESSING_AVAILABLE = True
except ImportError:
import multiprocessing as mp

MULTIPROCESSING_AVAILABLE = False


@gempy_profiler_decorator
def compute_dual_contouring_v2(dc_data_list: list[DualContouringData], ) -> List[DualContouringMesh]:
parallel_results = _parallel_process(dc_data_list)

if parallel_results is not None:
return parallel_results


# Fall back to sequential processing
print(f"Using sequential processing for {len(dc_data_list)} surfaces")
stack_meshes: List[DualContouringMesh] = []

for dc_data in dc_data_list:
mesh = _process_one_surface(dc_data, dc_data.left_right_codes)
stack_meshes.append(mesh)
return stack_meshes


def _parallel_process(dc_data_list: list[DualContouringData]):
# Check if we should use parallel processing
n_surfaces_to_export = len(dc_data_list)
use_parallel = _should_use_parallel_processing(n_surfaces_to_export, BackendTensor.engine_backend)
parallel_results = None

if use_parallel and False: # ! (Miguel Sep 25) I do not see a speedup
print(f"Using parallel processing for {n_surfaces_to_export} surfaces")
parallel_results = _parallel_process_surfaces_v2(dc_data_list)

return parallel_results


def _parallel_process_surfaces_v2(dc_data_list: list[DualContouringData], num_workers=None, chunk_size=2):
"""Process surfaces in parallel using multiprocessing."""
n_surfaces = len(dc_data_list)

if num_workers is None:
num_workers = max(1, min(os.cpu_count() // 2, n_surfaces // 2))
num_workers=3

# Prepare data for serialization - convert each DualContouringData to dict
dc_data_dicts = []
for dc_data in dc_data_list:
dc_data_dict = {
'xyz_on_edge' : dc_data.xyz_on_edge,
'valid_edges' : dc_data.valid_edges,
'xyz_on_centers' : dc_data.xyz_on_centers,
'dxdydz' : dc_data.dxdydz,
'gradients' : dc_data.gradients,
'left_right_codes' : dc_data.left_right_codes,
'n_surfaces_to_export': dc_data.n_surfaces_to_export,
'tree_depth' : dc_data.tree_depth
}
dc_data_dicts.append(dc_data_dict)

# Create surface index chunks
surface_indices = list(range(n_surfaces))
chunks = [surface_indices[i:i + chunk_size] for i in range(0, len(surface_indices), chunk_size)]

try:
# Use spawn context for better PyTorch compatibility
ctx = mp.get_context("fork") if MULTIPROCESSING_AVAILABLE else mp

with ctx.Pool(processes=num_workers, initializer=_init_worker) as pool:
# Submit all chunks
async_results = []
for chunk in chunks:
result = pool.apply_async(
_process_surface_batch_v2,
(chunk, dc_data_dicts )
)
async_results.append(result)

# Collect results
all_results = []
for async_result in async_results:
batch_results = async_result.get()
all_results.extend(batch_results)

return all_results

except Exception as e:
print(f"Parallel processing failed: {e}. Falling back to sequential processing.")
return None


def _process_surface_batch_v2(surface_indices, dc_data_dicts, left_right_codes):
"""Process a batch of surfaces. This function runs in worker processes."""
results = []

for idx in surface_indices:
dc_data_dict = dc_data_dicts[idx]

# Reconstruct DualContouringData from dict
dc_data = DualContouringData(
xyz_on_edge=dc_data_dict['xyz_on_edge'],
valid_edges=dc_data_dict['valid_edges'],
xyz_on_centers=dc_data_dict['xyz_on_centers'],
dxdydz=dc_data_dict['dxdydz'],
gradients=dc_data_dict['gradients'],
left_right_codes=dc_data_dict['left_right_codes'],
n_surfaces_to_export=dc_data_dict['n_surfaces_to_export'],
tree_depth=dc_data_dict['tree_depth']
)
# Process the surface
mesh = _process_one_surface(dc_data, dc_data.left_right_codes)
results.append(mesh)

return results
def _process_one_surface(dc_data: DualContouringData, left_right_codes) -> DualContouringMesh:
vertices = _generate_vertices(dc_data, False, None)

# * Average gradient for the edges
valid_edges = dc_data.valid_edges
edges_normals = BackendTensor.t.zeros((valid_edges.shape[0], 12, 3), dtype=BackendTensor.dtype_obj)
edges_normals[:] = 0
edges_normals[valid_edges] = dc_data.gradients

indices_numpy = _compute_triangulation(
dc_data_per_surface=dc_data,
left_right_codes=left_right_codes,
edges_normals=edges_normals,
vertex=vertices
)

vertices_numpy = BackendTensor.t.to_numpy(vertices)
if TRIMESH_LAST_PASS := True:
vertices_numpy, indices_numpy = _last_pass(vertices_numpy, indices_numpy)

mesh = DualContouringMesh(vertices_numpy, indices_numpy, dc_data)
return mesh


def _last_pass(vertices, indices):
# Check if trimesh is available
try:
trimesh = optional_dependencies.require_trimesh()
mesh = trimesh.Trimesh(vertices=vertices, faces=indices)
mesh.fill_holes()
return mesh.vertices, mesh.faces
except ImportError:
return vertices, indices
12 changes: 8 additions & 4 deletions gempy_engine/modules/dual_contouring/_gen_vertices.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from typing import Any
from typing import Any, Optional

import numpy as np

Expand Down Expand Up @@ -40,13 +40,17 @@ def _generate_vertices(dc_data_per_surface: DualContouringData, debug: bool, sli
return vertices


def generate_dual_contouring_vertices(dc_data_per_stack: DualContouringData, slice_surface: slice, debug: bool = False):
def generate_dual_contouring_vertices(dc_data_per_stack: DualContouringData, slice_surface: Optional[slice] = None, debug: bool = False):
# @off
n_edges = dc_data_per_stack.n_valid_edges
valid_edges = dc_data_per_stack.valid_edges
valid_voxels = dc_data_per_stack.valid_voxels
xyz_on_edge = dc_data_per_stack.xyz_on_edge[slice_surface]
gradients = dc_data_per_stack.gradients[slice_surface]
if slice_surface is not None:
xyz_on_edge = dc_data_per_stack.xyz_on_edge[slice_surface]
gradients = dc_data_per_stack.gradients[slice_surface]
else:
xyz_on_edge = dc_data_per_stack.xyz_on_edge
gradients = dc_data_per_stack.gradients
# @on

# * Coordinates for all posible edges (12) and 3 dummy edges_normals in the center
Expand Down
Loading