Skip to content

Commit b58188d

Browse files
authored
[REFACTOR] Move dual contouring code to modules directory and improve mesh consistency and water tight prot (#28)
# Refactoring Dual Contouring Implementation This PR reorganizes the dual contouring code to improve maintainability and fix mesh generation issues: - Moved `_dual_contouring.py` from API to modules directory to better reflect its role - Refactored the dual contouring implementation into smaller, more focused files: - `_gen_vertices.py` for vertex generation - `_triangulate.py` for triangulation logic - `_sequential_triangulation.py` and `_parallel_triangulation.py` for processing surfaces - Added functionality to detect and fix overlapping voxels across stacks - Added `left_right` property to `DualContouringMesh` to track triangulation codes - Improved vertex visualization in PyVista helper by increasing point size - Extracted common functionality into reusable functions like `_surface_slicer` - Moved mask generation and triangulation code functions to the interface module These changes improve code organization and lay groundwork for fixing mesh generation issues at stack boundaries.
2 parents a884678 + f9f497a commit b58188d

File tree

14 files changed

+617
-439
lines changed

14 files changed

+617
-439
lines changed

gempy_engine/API/dual_contouring/_experimental_water_tight_DC_1.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22

33
import numpy as np
44

5-
from gempy_engine.API.dual_contouring._dual_contouring import compute_dual_contouring
5+
from gempy_engine.modules.dual_contouring._dual_contouring import compute_dual_contouring
66
from gempy_engine.API.dual_contouring._interpolate_on_edges import interpolate_on_edges_for_dual_contouring
77
from gempy_engine.core.data.dual_contouring_data import DualContouringData
88
from gempy_engine.core.data.dual_contouring_mesh import DualContouringMesh

gempy_engine/API/dual_contouring/multi_scalar_dual_contouring.py

Lines changed: 113 additions & 146 deletions
Large diffs are not rendered by default.

gempy_engine/core/data/dual_contouring_mesh.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@ class DualContouringMesh:
1010
vertices: np.ndarray
1111
edges: np.ndarray
1212
dc_data: Optional[DualContouringData] = None # * In principle we need this just for testing
13+
left_right: Optional[np.ndarray] = None
1314

1415
def __repr__(self):
1516
return f"DualContouringMesh({self.vertices.shape[0]} vertices, {self.edges.shape[0]} edges)"

gempy_engine/API/dual_contouring/_dual_contouring.py renamed to gempy_engine/modules/dual_contouring/_dual_contouring.py

Lines changed: 34 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -6,33 +6,33 @@
66
from ...core.data.dual_contouring_data import DualContouringData
77
from ...core.data.dual_contouring_mesh import DualContouringMesh
88
from ...core.utils import gempy_profiler_decorator
9-
from ...modules.dual_contouring._parallel_triangulation import _should_use_parallel_processing, _process_surface_batch, _init_worker
10-
from ...modules.dual_contouring._sequential_triangulation import _sequential_triangulation
9+
from ._parallel_triangulation import _should_use_parallel_processing, _process_surface_batch, _init_worker
10+
from ._sequential_triangulation import _sequential_triangulation
11+
from ._gen_vertices import _compute_vertices
1112

1213
# Multiprocessing imports
1314
try:
1415
import torch.multiprocessing as mp
16+
1517
MULTIPROCESSING_AVAILABLE = True
1618
except ImportError:
1719
import multiprocessing as mp
18-
MULTIPROCESSING_AVAILABLE = False
19-
20-
2120

21+
MULTIPROCESSING_AVAILABLE = False
2222

2323

2424
@gempy_profiler_decorator
25-
def compute_dual_contouring(dc_data_per_stack: DualContouringData, left_right_codes=None, debug: bool = False) -> List[DualContouringMesh]:
25+
def compute_dual_contouring(dc_data_per_stack: DualContouringData,
26+
left_right_codes=None, debug: bool = False) -> List[DualContouringMesh]:
2627
valid_edges_per_surface = dc_data_per_stack.valid_edges.reshape((dc_data_per_stack.n_surfaces_to_export, -1, 12))
2728

2829
# Check if we should use parallel processing
2930
use_parallel = _should_use_parallel_processing(dc_data_per_stack.n_surfaces_to_export, BackendTensor.engine_backend)
3031
parallel_results = None
31-
32-
if use_parallel and False: # ! (Miguel Sep 25) I do not see a speedup
32+
33+
if use_parallel and False: # ! (Miguel Sep 25) I do not see a speedup
3334
print(f"Using parallel processing for {dc_data_per_stack.n_surfaces_to_export} surfaces")
3435
parallel_results = _parallel_process_surfaces(dc_data_per_stack, left_right_codes, debug)
35-
3636

3737
# Fall back to sequential processing
3838
print(f"Using sequential processing for {dc_data_per_stack.n_surfaces_to_export} surfaces")
@@ -41,24 +41,27 @@ def compute_dual_contouring(dc_data_per_stack: DualContouringData, left_right_co
4141
for i in range(dc_data_per_stack.n_surfaces_to_export):
4242
# @off
4343
if parallel_results is not None:
44-
_, vertices_numpy = _sequential_triangulation(
45-
dc_data_per_stack,
46-
debug,
47-
i,
48-
left_right_codes,
49-
valid_edges_per_surface,
50-
compute_indices=False
44+
_, vertices_numpy = _compute_vertices(
45+
dc_data_per_stack=dc_data_per_stack,
46+
debug=debug,
47+
surface_i=i,
48+
valid_edges_per_surface=valid_edges_per_surface
5149
)
5250
indices_numpy = parallel_results[i]
5351
else:
54-
indices_numpy, vertices_numpy = _sequential_triangulation(
55-
dc_data_per_stack,
56-
debug,
57-
i,
58-
left_right_codes,
59-
valid_edges_per_surface,
60-
compute_indices=True
52+
dc_data_per_surface, indices_numpy, vertices_numpy = _sequential_triangulation(
53+
dc_data_per_stack=dc_data_per_stack,
54+
debug=debug,
55+
i=i,
56+
valid_edges_per_surface=valid_edges_per_surface,
57+
left_right_codes=left_right_codes
6158
)
59+
60+
# Handle None left_right_codes case
61+
if left_right_codes is not None:
62+
valid_left_right_codes = left_right_codes[dc_data_per_surface.valid_voxels]
63+
else:
64+
valid_left_right_codes = None
6265

6366
if TRIMESH_LAST_PASS := True:
6467
vertices_numpy, indices_numpy = _last_pass(vertices_numpy, indices_numpy)
@@ -67,14 +70,13 @@ def compute_dual_contouring(dc_data_per_stack: DualContouringData, left_right_co
6770
DualContouringMesh(
6871
vertices_numpy,
6972
indices_numpy,
70-
dc_data_per_stack
73+
dc_data_per_stack,
74+
left_right=valid_left_right_codes
7175
)
7276
)
7377
return stack_meshes
7478

7579

76-
77-
7880
def _parallel_process_surfaces(dc_data_per_stack, left_right_codes, debug, num_workers=None, chunk_size=2):
7981
"""Process surfaces in parallel using multiprocessing."""
8082
if num_workers is None:
@@ -96,6 +98,9 @@ def _parallel_process_surfaces(dc_data_per_stack, left_right_codes, debug, num_w
9698
surface_indices = list(range(dc_data_per_stack.n_surfaces_to_export))
9799
chunks = [surface_indices[i:i + chunk_size] for i in range(0, len(surface_indices), chunk_size)]
98100

101+
# Handle None left_right_codes case - ensure we pass a serializable value
102+
serializable_left_right_codes = left_right_codes
103+
99104
try:
100105
# Use spawn context for better PyTorch compatibility
101106
ctx = mp.get_context("spawn") if MULTIPROCESSING_AVAILABLE else mp
@@ -106,7 +111,7 @@ def _parallel_process_surfaces(dc_data_per_stack, left_right_codes, debug, num_w
106111
for chunk in chunks:
107112
result = pool.apply_async(
108113
_process_surface_batch,
109-
(chunk, dc_data_dict, left_right_codes, debug)
114+
(chunk, dc_data_dict, serializable_left_right_codes, debug)
110115
)
111116
async_results.append(result)
112117

@@ -122,6 +127,7 @@ def _parallel_process_surfaces(dc_data_per_stack, left_right_codes, debug, num_w
122127
print(f"Parallel processing failed: {e}. Falling back to sequential processing.")
123128
return None
124129

130+
125131
def _last_pass(vertices, indices):
126132
# Check if trimesh is available
127133
try:
@@ -130,4 +136,4 @@ def _last_pass(vertices, indices):
130136
mesh.fill_holes()
131137
return mesh.vertices, mesh.faces
132138
except ImportError:
133-
return vertices, indices
139+
return vertices, indices
Lines changed: 125 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,125 @@
1+
from typing import Any
2+
3+
import numpy as np
4+
5+
from ...config import AvailableBackends
6+
from ...core.backend_tensor import BackendTensor
7+
from ...core.data.dual_contouring_data import DualContouringData
8+
9+
10+
def _compute_vertices(dc_data_per_stack: DualContouringData,
11+
debug: bool,
12+
surface_i: int,
13+
valid_edges_per_surface) -> tuple[DualContouringData, Any]:
14+
"""Compute vertices for a specific surface."""
15+
valid_edges: np.ndarray = valid_edges_per_surface[surface_i]
16+
next_surface_edge_idx: int = valid_edges_per_surface[:surface_i + 1].sum()
17+
if surface_i == 0:
18+
last_surface_edge_idx = 0
19+
else:
20+
last_surface_edge_idx: int = valid_edges_per_surface[:surface_i].sum()
21+
slice_object: slice = slice(last_surface_edge_idx, next_surface_edge_idx)
22+
23+
dc_data_per_surface = DualContouringData(
24+
xyz_on_edge=dc_data_per_stack.xyz_on_edge,
25+
valid_edges=valid_edges,
26+
xyz_on_centers=dc_data_per_stack.xyz_on_centers,
27+
dxdydz=dc_data_per_stack.dxdydz,
28+
exported_fields_on_edges=dc_data_per_stack.exported_fields_on_edges,
29+
n_surfaces_to_export=dc_data_per_stack.n_surfaces_to_export,
30+
tree_depth=dc_data_per_stack.tree_depth
31+
)
32+
33+
vertices_numpy = _generate_vertices(dc_data_per_surface, debug, slice_object)
34+
return dc_data_per_surface, vertices_numpy
35+
36+
37+
def _generate_vertices(dc_data_per_surface: DualContouringData, debug: bool, slice_object: slice) -> Any:
38+
vertices: np.ndarray = generate_dual_contouring_vertices(
39+
dc_data_per_stack=dc_data_per_surface,
40+
slice_surface=slice_object,
41+
debug=debug
42+
)
43+
vertices_numpy = BackendTensor.t.to_numpy(vertices)
44+
return vertices_numpy
45+
46+
47+
def generate_dual_contouring_vertices(dc_data_per_stack: DualContouringData, slice_surface: slice, debug: bool = False):
48+
# @off
49+
n_edges = dc_data_per_stack.n_edges
50+
valid_edges = dc_data_per_stack.valid_edges
51+
valid_voxels = dc_data_per_stack.valid_voxels
52+
xyz_on_edge = dc_data_per_stack.xyz_on_edge[slice_surface]
53+
gradients = dc_data_per_stack.gradients[slice_surface]
54+
# @on
55+
56+
# * Coordinates for all posible edges (12) and 3 dummy edges_normals in the center
57+
edges_xyz = BackendTensor.tfnp.zeros((n_edges, 15, 3), dtype=BackendTensor.dtype_obj)
58+
valid_edges = valid_edges > 0
59+
edges_xyz[:, :12][valid_edges] = xyz_on_edge
60+
61+
# Normals
62+
edges_normals = BackendTensor.tfnp.zeros((n_edges, 15, 3), dtype=BackendTensor.dtype_obj)
63+
edges_normals[:, :12][valid_edges] = gradients
64+
65+
if OLD_METHOD := False:
66+
# ! Moureze model does not seems to work with the new method
67+
# ! This branch is all nans at least with ch1_1 model
68+
bias_xyz = BackendTensor.tfnp.copy(edges_xyz[:, :12])
69+
isclose = BackendTensor.tfnp.isclose(bias_xyz, 0)
70+
bias_xyz[isclose] = BackendTensor.tfnp.nan # zero values to nans
71+
mass_points = BackendTensor.tfnp.nanmean(bias_xyz, axis=1) # Mean ignoring nans
72+
else: # ? This is actually doing something
73+
bias_xyz = BackendTensor.tfnp.copy(edges_xyz[:, :12])
74+
if BackendTensor.engine_backend == AvailableBackends.PYTORCH:
75+
# PyTorch doesn't have masked arrays, so we'll use a different approach
76+
mask = bias_xyz == 0
77+
# Replace zeros with NaN for mean calculation
78+
bias_xyz_masked = BackendTensor.tfnp.where(mask, float('nan'), bias_xyz)
79+
mass_points = BackendTensor.tfnp.nanmean(bias_xyz_masked, axis=1)
80+
else:
81+
# NumPy approach with masked arrays
82+
bias_xyz = BackendTensor.tfnp.to_numpy(bias_xyz)
83+
import numpy as np
84+
mask = bias_xyz == 0
85+
masked_arr = np.ma.masked_array(bias_xyz, mask)
86+
mass_points = masked_arr.mean(axis=1)
87+
mass_points = BackendTensor.tfnp.array(mass_points)
88+
89+
edges_xyz[:, 12] = mass_points
90+
edges_xyz[:, 13] = mass_points
91+
edges_xyz[:, 14] = mass_points
92+
93+
BIAS_STRENGTH = 1
94+
95+
bias_x = BackendTensor.tfnp.array([BIAS_STRENGTH, 0, 0], dtype=BackendTensor.dtype_obj)
96+
bias_y = BackendTensor.tfnp.array([0, BIAS_STRENGTH, 0], dtype=BackendTensor.dtype_obj)
97+
bias_z = BackendTensor.tfnp.array([0, 0, BIAS_STRENGTH], dtype=BackendTensor.dtype_obj)
98+
99+
edges_normals[:, 12] = bias_x
100+
edges_normals[:, 13] = bias_y
101+
edges_normals[:, 14] = bias_z
102+
103+
# Remove unused voxels
104+
edges_xyz = edges_xyz[valid_voxels]
105+
edges_normals = edges_normals[valid_voxels]
106+
107+
# Compute LSTSQS in all voxels at the same time
108+
A = edges_normals
109+
b = (A * edges_xyz).sum(axis=2)
110+
111+
if BackendTensor.engine_backend == AvailableBackends.PYTORCH:
112+
transpose_shape = (2, 1, 0) # For PyTorch: (batch, dim2, dim1)
113+
else:
114+
transpose_shape = (0, 2, 1) # For NumPy: (batch, dim2, dim1)
115+
116+
term1 = BackendTensor.tfnp.einsum("ijk, ilj->ikl", A, BackendTensor.tfnp.transpose(A, transpose_shape))
117+
term2 = BackendTensor.tfnp.linalg.inv(term1)
118+
term3 = BackendTensor.tfnp.einsum("ijk,ik->ij", BackendTensor.tfnp.transpose(A, transpose_shape), b)
119+
vertices = BackendTensor.tfnp.einsum("ijk, ij->ik", term2, term3)
120+
121+
if debug:
122+
dc_data_per_stack.bias_center_mass = edges_xyz[:, 12:].reshape(-1, 3)
123+
dc_data_per_stack.bias_normals = edges_normals[:, 12:].reshape(-1, 3)
124+
125+
return vertices

gempy_engine/modules/dual_contouring/_parallel_triangulation.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
from gempy_engine.config import AvailableBackends
66
from ...core.backend_tensor import BackendTensor
77
from ...core.data.dual_contouring_data import DualContouringData
8-
from ...modules.dual_contouring.dual_contouring_interface import triangulate_dual_contouring
8+
from ._triangulate import triangulate_dual_contouring
99
from ...modules.dual_contouring.fancy_triangulation import triangulate
1010

1111
# Multiprocessing imports

0 commit comments

Comments
 (0)