Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
56 changes: 56 additions & 0 deletions .github/workflows/tests.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
name: Tests

on:
push:
branches: [main]
pull_request:
branches: [main]

jobs:
test:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["3.12"]

steps:
- name: Checkout code
uses: actions/checkout@v4

- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}

- name: Install uv
uses: astral-sh/setup-uv@v4
with:
enable-cache: true

- name: Install dependencies
run: |
uv sync --extra test

- name: Run ruff linter
run: |
uv run ruff check .

- name: Run ruff formatter check
run: |
uv run ruff format --check .

- name: Run tests
run: |
uv run pytest

- name: Run tests with coverage
run: |
uv run pytest --cov=deeponet_acoustics --cov-report=xml --cov-report=term

- name: Upload coverage to Codecov (optional)
uses: codecov/codecov-action@v4
if: matrix.python-version == '3.11'
with:
file: ./coverage.xml
fail_ci_if_error: false
continue-on-error: true
38 changes: 3 additions & 35 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,8 @@ coverage.xml
.pytest_cache/
cover/

.claude/

# Translations
*.mo
*.pot
Expand Down Expand Up @@ -82,33 +84,6 @@ target/
profile_default/
ipython_config.py

# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version

# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock

# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock

# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml

# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/

Expand Down Expand Up @@ -150,11 +125,4 @@ dmypy.json
.pytype/

# Cython debug symbols
cython_debug/

# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
cython_debug/
9 changes: 7 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,14 +10,19 @@ git clone git@github.com:dtu-act/deeponet-acoustic-wave-prop.git
cd deeponet-acoustic-wave-prop

# option 1 - development installation via uv (recommended)
uv sync
uv sync --extra test
pre-commit install

# run tests
uv run pytest tests/

# run ruff for formatting code
uv run ruff check . --fix && uv run ruff format .

# option 2 - development installation via pip
pip install -e .
pip install -e .[test]
pre-commit install

# run ruff for formatting code
uv run ruff check . --fix && uv run ruff format .
```
Expand Down
2 changes: 1 addition & 1 deletion deeponet_acoustics/end2end/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ def train(settings_dict: dict[str, Any]):
datasetInfo(
metadata, metadata_val, training.batch_size_coord, training.batch_size_branch
)

# setup network
input_example = next(iter(dataloader))[0][1]
in_tn = input_example.shape[-1]
Expand Down
14 changes: 7 additions & 7 deletions deeponet_acoustics/models/datastructures.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
from dataclasses import dataclass
from enum import Enum
from pathlib import Path
from typing import Callable, Dict, List, TypeAlias
from typing import Callable, TypeAlias

import numpy as np

Expand Down Expand Up @@ -121,10 +121,10 @@ class Physics:
class Domain:
spatial_dimension: int

Xbounds: List[List[float]]
Xbounds: list[list[float]]
tmax: float

nX: List[List[int]]
nX: list[list[int]]
nt: int

def __init__(self, Xbounds, tmax, dt, dx):
Expand Down Expand Up @@ -218,18 +218,18 @@ class ResNetArchitecture:
class EvaluationSettings:
model_dir: str
data_path: str
receiver_pos: List[object]
receiver_pos: np.ndarray
tmax: float
num_srcs: int

snap_to_grid: bool
source_position_override: List[object]
source_position_override: np.ndarray
write_full_wave_field: bool
write_ir_wav: bool
write_ir_plots: bool
write_ir_animations: bool

def __init__(self, settings: Dict, num_srcs: int = -1):
def __init__(self, settings: dict, num_srcs: int = -1):
key_recv_pos = "receiver_positions_all_sources"
key_recv_groups = "receiver_position_groups"
key_src_pos = "source_positions"
Expand Down Expand Up @@ -304,7 +304,7 @@ def __init__(self, settings: Dict, num_srcs: int = -1):
self.write_ir_plots = settings["write_ir_plots"]
self.write_ir_animations = settings["write_ir_animations"]

def parseReceiverGroups(self, receiver_keys: List, receivers: Dict) -> List[object]:
def parseReceiverGroups(self, receiver_keys: list, receivers: dict) -> np.ndarray:
receiver_pos = np.empty(len(receiver_keys), dtype=object)
for i_src in range(len(receiver_keys)):
# the receiver positions are located in another entry in the JSON file with the key inside 'recvs'
Expand Down
1 change: 1 addition & 0 deletions deeponet_acoustics/models/networks_flax.py
Original file line number Diff line number Diff line change
Expand Up @@ -195,6 +195,7 @@ class MLP(nn.Module):
activation: Callable = jnp.sin
kernel_init: Callable = sinusoidal_init
tag: str = "<undef>"
network_type: NetworkArchitectureType = NetworkArchitectureType.MLP

@nn.compact
def __call__(self, inputs, output_layer_indx=-1):
Expand Down
37 changes: 25 additions & 12 deletions deeponet_acoustics/scripts/convertH5/assemble_2D_sources.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,16 +22,16 @@
def assemble_2d_source_files(data_dir, header_filepath_in, filepath_out):
"""
Assemble multiple H5 files (one per source position) into a single H5 file.
When running MATLAB in parallel with multiple threads, each source position
is written to separate files. This function combines them into one file

When running MATLAB in parallel with multiple threads, each source position
is written to separate files. This function combines them into one file
for the 2D Python code to process.

Args:
data_dir: Directory containing individual H5 files per source position
header_filepath_in: Path to header H5 file containing mesh and metadata
filepath_out: Output path for assembled H5 file

Raises:
Exception: If no H5 files found in data_dir or if files don't exist
"""
Expand Down Expand Up @@ -122,18 +122,31 @@ def assemble_2d_source_files(data_dir, header_filepath_in, filepath_out):
upressure_new[i, :] = upressures[()].flatten()
x0_srcs_new[i, :] = x0_srcs[()]


if __name__ == "__main__":
"""
Example usage:
data_dir = "/work3/nibor/1TB/deeponet/input_1D_2D/Lshape3x3_freq_indep_ppw_2_4_2_5srcpos_val"
header_filepath_in = "/work3/nibor/1TB/deeponet/input_1D_2D/Lshape3x3_freq_indep_ppw_2_4_2_5srcpos_val/Lshape3x3_freq_indep_ppw_2_4_2_5srcpos_val_header.h5"
filepath_out = "/work3/nibor/1TB/deeponet/input_1D_2D/Lshape3x3_freq_indep_ppw_2_4_2_5srcpos_val.h5"
"""
parser = argparse.ArgumentParser(description='Assemble multiple H5 files (one per source) into a single H5 file')
parser.add_argument('--data_dir', required=True, help='Directory containing individual H5 files per source position')
parser.add_argument('--header_file', required=True, help='Path to header H5 file containing mesh and metadata')
parser.add_argument('--output_file', required=True, help='Output path for assembled H5 file')

parser = argparse.ArgumentParser(
description="Assemble multiple H5 files (one per source) into a single H5 file"
)
parser.add_argument(
"--data_dir",
required=True,
help="Directory containing individual H5 files per source position",
)
parser.add_argument(
"--header_file",
required=True,
help="Path to header H5 file containing mesh and metadata",
)
parser.add_argument(
"--output_file", required=True, help="Output path for assembled H5 file"
)

args = parser.parse_args()
assemble_2d_source_files(args.data_dir, args.header_file, args.output_file)

assemble_2d_source_files(args.data_dir, args.header_file, args.output_file)
17 changes: 11 additions & 6 deletions deeponet_acoustics/scripts/convertH5/convert_3D_dtype_compact.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,10 +21,10 @@ def convert_to_dtype_compact(
):
"""
Convert 3D H5 file data type and optionally downsample temporal resolution.

Converts pressure and upressure data to a more compact data type (typically float16)
to reduce file size. Can also downsample temporal resolution by skipping time steps.

Args:
path_data_in: Input H5 file path
path_data_out: Output H5 file path
Expand Down Expand Up @@ -54,14 +54,19 @@ def convert_to_dtype_compact(
pressures_new.attrs.create("time_steps", time_steps, dtype=np.float32)
umesh_new.attrs.create("umesh_shape", ushape, dtype=np.float32)


if __name__ == "__main__":
"""
Example usage:
base_path = "/work3/nibor/1TB/libP/bilbao_1000hz_p4_5ppw_srcpos5_val"
"""
parser = argparse.ArgumentParser(description='Convert 3D H5 files to compact data type (float16)')
parser.add_argument('--base_path', required=True, help='Directory containing H5 files to process')

parser = argparse.ArgumentParser(
description="Convert 3D H5 files to compact data type (float16)"
)
parser.add_argument(
"--base_path", required=True, help="Directory containing H5 files to process"
)

args = parser.parse_args()

filenamesH5 = IO.pathsToFileType(args.base_path, ".h5")
Expand All @@ -70,7 +75,7 @@ def convert_to_dtype_compact(
print(f"Number of files to process: {N}\n")

for i, filename in enumerate(filenamesH5):
print(f"Processing {i+1}/{N}")
print(f"Processing {i + 1}/{N}")
print(f"Input: {filename}")

filename_out = pathlib.Path(filename).with_suffix("")
Expand Down
Loading
Loading