diff --git a/.github/workflows/pysplashsurf_CI.yml b/.github/workflows/pysplashsurf_CI.yml new file mode 100644 index 00000000..fdc11ea1 --- /dev/null +++ b/.github/workflows/pysplashsurf_CI.yml @@ -0,0 +1,274 @@ +# This file is autogenerated by maturin v1.8.2 +# To update, run +# +# maturin generate-ci github +# +name: PySplashsurf CI + +on: + push: + workflow_dispatch: + release: + types: [published] + +permissions: + contents: read + +jobs: + check_format: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Check formatting of PySplashsurf + run: cargo fmt -- --check + working-directory: pysplashsurf + + generate-stub: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: moonrepo/setup-rust@v1 + - run: | + cargo run --bin stub_gen + working-directory: pysplashsurf + - name: Upload stub as artifact + uses: actions/upload-artifact@v4 + with: + name: pysplashsurf.pyi + path: pysplashsurf/pysplashsurf/ + + linux: + needs: generate-stub + runs-on: ${{ matrix.platform.runner }} + strategy: + matrix: + platform: + - runner: ubuntu-24.04 + target: x86_64 + - runner: ubuntu-24.04 + target: x86 + - runner: ubuntu-24.04 + target: aarch64 + - runner: ubuntu-24.04 + target: armv7 + steps: + - uses: actions/checkout@v4 + - name: Download stub artifact + uses: actions/download-artifact@v4 + with: + name: pysplashsurf.pyi + path: pysplashsurf/pysplashsurf/ + - uses: actions/setup-python@v5 + with: + python-version: 3.x + - name: Build wheels + uses: PyO3/maturin-action@v1 + with: + target: ${{ matrix.platform.target }} + args: --release --out dist -m pysplashsurf/Cargo.toml + # sccache: ${{ !startsWith(github.ref, 'refs/tags/') }} + manylinux: auto + - name: Upload wheels + uses: actions/upload-artifact@v4 + with: + name: wheels-linux-${{ matrix.platform.target }} + path: dist + + windows: + needs: generate-stub + runs-on: ${{ matrix.platform.runner }} + strategy: + matrix: + platform: + - runner: windows-latest + target: x64 + - runner: windows-latest + target: x86 + steps: + - uses: actions/checkout@v4 + - name: Download stub artifact + uses: actions/download-artifact@v4 + with: + name: pysplashsurf.pyi + path: pysplashsurf/pysplashsurf/ + - uses: actions/setup-python@v5 + with: + python-version: 3.x + architecture: ${{ matrix.platform.target }} + - name: Build wheels + uses: PyO3/maturin-action@v1 + with: + target: ${{ matrix.platform.target }} + args: --release --out dist -m pysplashsurf\Cargo.toml + sccache: ${{ !startsWith(github.ref, 'refs/tags/') }} + - name: Upload wheels + uses: actions/upload-artifact@v4 + with: + name: wheels-windows-${{ matrix.platform.target }} + path: dist + + macos: + needs: generate-stub + runs-on: ${{ matrix.platform.runner }} + strategy: + matrix: + platform: + - runner: macos-13 + target: x86_64 + - runner: macos-14 + target: aarch64 + steps: + - uses: actions/checkout@v4 + - name: Download stub artifact + uses: actions/download-artifact@v4 + with: + name: pysplashsurf.pyi + path: pysplashsurf/pysplashsurf/ + - uses: actions/setup-python@v5 + with: + python-version: 3.x + - name: Build wheels + uses: PyO3/maturin-action@v1 + with: + target: ${{ matrix.platform.target }} + args: --release --out dist -m pysplashsurf/Cargo.toml + sccache: ${{ !startsWith(github.ref, 'refs/tags/') }} + - name: Upload wheels + uses: actions/upload-artifact@v4 + with: + name: wheels-macos-${{ matrix.platform.target }} + path: dist + + sdist: + needs: generate-stub + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Download stub artifact + uses: actions/download-artifact@v4 + with: + name: pysplashsurf.pyi + path: pysplashsurf/pysplashsurf/ + - name: Build sdist + uses: PyO3/maturin-action@v1 + with: + command: sdist + args: --out dist -m pysplashsurf/Cargo.toml + - name: Upload sdist + uses: actions/upload-artifact@v4 + with: + name: wheels-sdist + path: dist + + release: + name: Release + runs-on: ubuntu-latest + if: ${{ startsWith(github.ref, 'refs/tags/v') || github.event_name == 'workflow_dispatch' }} + needs: [linux, windows, macos, sdist] + permissions: + # Use to sign the release artifacts + id-token: write + # Used to upload release artifacts + contents: write + # Used to generate artifact attestation + attestations: write + steps: + - uses: actions/download-artifact@v4 + #- name: Generate artifact attestation + # uses: actions/attest-build-provenance@v1 + # with: + # subject-path: 'wheels-*/*' + - name: Publish to PyPI + #if: ${{ startsWith(github.ref, 'refs/tags/') }} + uses: PyO3/maturin-action@v1 + env: + MATURIN_PYPI_TOKEN: ${{ secrets.PYPI_API_TOKEN }} + with: + command: upload + args: --non-interactive --skip-existing wheels-*/* + + build_wheel: + runs-on: ubuntu-latest + outputs: + filename: ${{ steps.get_filename.outputs.file_name }} + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: 3.x + - name: Build wheels + uses: PyO3/maturin-action@v1 + with: + target: x86_64 + args: --release --out dist -m pysplashsurf/Cargo.toml + manylinux: auto + - name: Get wheel name + id: get_filename + run: | + FILE_NAME=$(ls dist) + echo "file_name=$FILE_NAME" >> $GITHUB_OUTPUT + - name: Upload wheel + uses: actions/upload-artifact@v4 + with: + name: doc_tests_wheel + path: dist + + docs: + needs: build_wheel + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: 3.x + - name: Download wheel + uses: actions/download-artifact@v4 + with: + name: doc_tests_wheel + path: dist/ + - name: Append output to file + run: | + echo "./dist/${{ needs.build_wheel.outputs.filename }}" >> pysplashsurf/pysplashsurf/docs/requirements.txt + - uses: ammaraskar/sphinx-action@8.2.3 + with: + docs-folder: "pysplashsurf/pysplashsurf/docs/" + - uses: actions/upload-artifact@v4 + with: + name: DocumentationHTML + path: pysplashsurf/pysplashsurf/docs/build/html/ + + tests: + needs: build_wheel + defaults: + run: + shell: bash -l {0} + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: 3.x + - uses: mamba-org/setup-micromamba@v2 + with: + environment-file: pysplashsurf/python_environment.yaml + init-shell: >- + bash + - run: | + conda info + conda list + - name: Download wheel + uses: actions/download-artifact@v4 + with: + name: doc_tests_wheel + path: dist/ + - run: pip install dist/${{ needs.build_wheel.outputs.filename }} + - name: Install splashsurf CLI + run: cargo install splashsurf + - name: Run pytest + uses: pavelzw/pytest-action@v2 + with: + verbose: true + emoji: false + job-summary: true + click-to-expand: true + report-title: 'Pytest Report' diff --git a/pysplashsurf/.gitignore b/pysplashsurf/.gitignore new file mode 100644 index 00000000..e9913575 --- /dev/null +++ b/pysplashsurf/.gitignore @@ -0,0 +1,12 @@ +dist/ +*.egg-info/ +target/ +**/__pycache__/ +*.so +test.vtk +test_bin.vtk +env/ +envs/ + +# Sphinx +pysplashsurf/docs/build/ \ No newline at end of file diff --git a/pysplashsurf/Cargo.lock b/pysplashsurf/Cargo.lock new file mode 100644 index 00000000..e22f2f4b --- /dev/null +++ b/pysplashsurf/Cargo.lock @@ -0,0 +1,1286 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 4 + +[[package]] +name = "android-tzdata" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" + +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] + +[[package]] +name = "anyhow" +version = "1.0.98" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487" + +[[package]] +name = "approx" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cab112f0a86d568ea0e627cc1d6be74a1e9cd55214684db5561995f6dad897c6" +dependencies = [ + "num-traits", +] + +[[package]] +name = "arrayvec" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" + +[[package]] +name = "autocfg" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" + +[[package]] +name = "bitflags" +version = "2.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967" + +[[package]] +name = "bumpalo" +version = "3.18.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "793db76d6187cd04dff33004d8e6c9cc4e05cd330500379d2394209271b4aeee" + +[[package]] +name = "bytemuck" +version = "1.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9134a6ef01ce4b366b50689c94f82c14bc72bc5d0386829828a2e2752ef7958c" + +[[package]] +name = "bytemuck_derive" +version = "1.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ecc273b49b3205b83d648f0690daa588925572cc5063745bfe547fe7ec8e1a1" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + +[[package]] +name = "camino" +version = "1.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0da45bc31171d8d6960122e222a67740df867c1dd53b4d51caa297084c185cab" +dependencies = [ + "serde", +] + +[[package]] +name = "cargo-platform" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e35af189006b9c0f00a064685c727031e3ed2d8020f7ba284d78cc2671bd36ea" +dependencies = [ + "serde", +] + +[[package]] +name = "cargo_metadata" +version = "0.19.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd5eb614ed4c27c5d706420e4320fbe3216ab31fa1c33cd8246ac36dae4479ba" +dependencies = [ + "camino", + "cargo-platform", + "semver", + "serde", + "serde_json", + "thiserror", +] + +[[package]] +name = "cc" +version = "1.2.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "956a5e21988b87f372569b66183b78babf23ebc2e744b733e4350a752c4dafac" +dependencies = [ + "shlex", +] + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "chrono" +version = "0.4.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c469d952047f47f91b68d1cba3f10d63c11d73e4636f24f08daf0278abf01c4d" +dependencies = [ + "android-tzdata", + "iana-time-zone", + "js-sys", + "num-traits", + "wasm-bindgen", + "windows-link", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" + +[[package]] +name = "crossbeam-deque" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51" +dependencies = [ + "crossbeam-epoch", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" + +[[package]] +name = "dashmap" +version = "6.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5041cc499144891f3790297212f32a74fb938e5136a14943f338ef9e0ae276cf" +dependencies = [ + "cfg-if", + "crossbeam-utils", + "hashbrown 0.14.5", + "lock_api", + "once_cell", + "parking_lot_core", +] + +[[package]] +name = "either" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" + +[[package]] +name = "equivalent" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" + +[[package]] +name = "fxhash" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c" +dependencies = [ + "byteorder", +] + +[[package]] +name = "getrandom" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" +dependencies = [ + "cfg-if", + "libc", + "wasi", +] + +[[package]] +name = "hash32" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47d60b12902ba28e2730cd37e95b8c9223af2808df9e902d4df49588d1470606" +dependencies = [ + "byteorder", +] + +[[package]] +name = "hashbrown" +version = "0.14.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" + +[[package]] +name = "hashbrown" +version = "0.15.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "84b26c544d002229e640969970a2e74021aadf6e2f96372b9c58eff97de08eb3" + +[[package]] +name = "heapless" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bfb9eb618601c89945a70e254898da93b13be0388091d42117462b265bb3fad" +dependencies = [ + "hash32", + "stable_deref_trait", +] + +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + +[[package]] +name = "iana-time-zone" +version = "0.1.63" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0c919e5debc312ad217002b8048a17b7d83f80703865bbfcfebb0458b0b27d8" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "log", + "wasm-bindgen", + "windows-core", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" +dependencies = [ + "cc", +] + +[[package]] +name = "indexmap" +version = "2.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cea70ddb795996207ad57735b50c5982d8844f38ba9ee5f1aedcfb708a2aa11e" +dependencies = [ + "equivalent", + "hashbrown 0.15.3", +] + +[[package]] +name = "indoc" +version = "2.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4c7245a08504955605670dbf141fceab975f15ca21570696aebe9d2e71576bd" + +[[package]] +name = "inventory" +version = "0.3.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab08d7cd2c5897f2c949e5383ea7c7db03fb19130ffcfbf7eda795137ae3cb83" +dependencies = [ + "rustversion", +] + +[[package]] +name = "itertools" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" +dependencies = [ + "either", +] + +[[package]] +name = "itertools" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b192c782037fadd9cfa75548310488aabdbf3d2da73885b31bd0abd03351285" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" + +[[package]] +name = "js-sys" +version = "0.3.77" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" +dependencies = [ + "once_cell", + "wasm-bindgen", +] + +[[package]] +name = "libc" +version = "0.2.172" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa" + +[[package]] +name = "libm" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de" + +[[package]] +name = "lock_api" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96936507f153605bddfcda068dd804796c84324ed2510809e5b2a624c81da765" +dependencies = [ + "autocfg", + "scopeguard", +] + +[[package]] +name = "log" +version = "0.4.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" + +[[package]] +name = "maplit" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e2e65a1a2e43cfcb47a895c4c8b10d1f4a61097f9f254f183aee60cad9c651d" + +[[package]] +name = "matrixmultiply" +version = "0.3.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a06de3016e9fae57a36fd14dba131fccf49f74b40b7fbdb472f96e361ec71a08" +dependencies = [ + "autocfg", + "rawpointer", +] + +[[package]] +name = "memchr" +version = "2.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" + +[[package]] +name = "memoffset" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "488016bfae457b036d996092f6cb448677611ce4449e970ceaf42695203f218a" +dependencies = [ + "autocfg", +] + +[[package]] +name = "nalgebra" +version = "0.33.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26aecdf64b707efd1310e3544d709c5c0ac61c13756046aaaba41be5c4f66a3b" +dependencies = [ + "approx", + "bytemuck", + "matrixmultiply", + "nalgebra-macros", + "num-complex", + "num-rational", + "num-traits", + "rand", + "rand_distr", + "simba", + "typenum", +] + +[[package]] +name = "nalgebra-macros" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "254a5372af8fc138e36684761d3c0cdb758a4410e938babcff1c860ce14ddbfc" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "ndarray" +version = "0.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "882ed72dce9365842bf196bdeedf5055305f11fc8c03dee7bb0194a6cad34841" +dependencies = [ + "matrixmultiply", + "num-complex", + "num-integer", + "num-traits", + "portable-atomic", + "portable-atomic-util", + "rawpointer", +] + +[[package]] +name = "num-bigint" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" +dependencies = [ + "num-integer", + "num-traits", +] + +[[package]] +name = "num-complex" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73f88a1307638156682bada9d7604135552957b7818057dcef22705b4d509495" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-integer" +version = "0.1.46" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-rational" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f83d14da390562dca69fc84082e73e548e1ad308d24accdedd2720017cb37824" +dependencies = [ + "num-bigint", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +dependencies = [ + "autocfg", + "libm", +] + +[[package]] +name = "numeric_literals" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "095aa67b0b9f2081746998f4f17106bdb51d56dc8c211afca5531b92b83bf98a" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "numpy" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29f1dee9aa8d3f6f8e8b9af3803006101bb3653866ef056d530d53ae68587191" +dependencies = [ + "libc", + "ndarray", + "num-complex", + "num-integer", + "num-traits", + "pyo3", + "pyo3-build-config 0.25.0", + "rustc-hash", +] + +[[package]] +name = "once_cell" +version = "1.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" + +[[package]] +name = "parking_lot" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70d58bf43669b5795d1576d0641cfb6fbb2057bf629506267a92807158584a13" +dependencies = [ + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.9.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc838d2a56b5b1a6c25f55575dfc605fabb63bb2365f6c2353ef9159aa69e4a5" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall", + "smallvec", + "windows-targets", +] + +[[package]] +name = "paste" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" + +[[package]] +name = "portable-atomic" +version = "1.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f84267b20a16ea918e43c6a88433c2d54fa145c92a811b5b047ccbe153674483" + +[[package]] +name = "portable-atomic-util" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8a2f0d8d040d7848a709caf78912debcc3f33ee4b3cac47d73d1e1069e83507" +dependencies = [ + "portable-atomic", +] + +[[package]] +name = "ppv-lite86" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" +dependencies = [ + "zerocopy", +] + +[[package]] +name = "proc-macro2" +version = "1.0.95" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "pyo3" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f239d656363bcee73afef85277f1b281e8ac6212a1d42aa90e55b90ed43c47a4" +dependencies = [ + "indoc", + "libc", + "memoffset", + "once_cell", + "portable-atomic", + "pyo3-build-config 0.25.0", + "pyo3-ffi", + "pyo3-macros", + "unindent", +] + +[[package]] +name = "pyo3-build-config" +version = "0.24.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99636d423fa2ca130fa5acde3059308006d46f98caac629418e53f7ebb1e9999" +dependencies = [ + "once_cell", + "target-lexicon", +] + +[[package]] +name = "pyo3-build-config" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "755ea671a1c34044fa165247aaf6f419ca39caa6003aee791a0df2713d8f1b6d" +dependencies = [ + "once_cell", + "target-lexicon", +] + +[[package]] +name = "pyo3-ffi" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc95a2e67091e44791d4ea300ff744be5293f394f1bafd9f78c080814d35956e" +dependencies = [ + "libc", + "pyo3-build-config 0.25.0", +] + +[[package]] +name = "pyo3-macros" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a179641d1b93920829a62f15e87c0ed791b6c8db2271ba0fd7c2686090510214" +dependencies = [ + "proc-macro2", + "pyo3-macros-backend", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "pyo3-macros-backend" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9dff85ebcaab8c441b0e3f7ae40a6963ecea8a9f5e74f647e33fcf5ec9a1e89e" +dependencies = [ + "heck", + "proc-macro2", + "pyo3-build-config 0.25.0", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "pyo3-stub-gen" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61ebbf208fc5d7af8f5365b74204202996c2ec4dca38ee811486561d0da5c260" +dependencies = [ + "anyhow", + "cargo_metadata", + "chrono", + "either", + "indexmap", + "inventory", + "itertools 0.13.0", + "log", + "maplit", + "num-complex", + "numpy", + "pyo3", + "pyo3-build-config 0.24.2", + "pyo3-stub-gen-derive", + "semver", + "serde", + "toml", +] + +[[package]] +name = "pyo3-stub-gen-derive" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23b582d0ff1c7c3bff13e8a04a1e41f783a6b8135531362c748ed655fbff14cb" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "pysplashsurf" +version = "0.1.0" +dependencies = [ + "anyhow", + "bytemuck", + "fxhash", + "log", + "ndarray", + "numpy", + "pyo3", + "pyo3-stub-gen", + "rayon", + "splashsurf_lib", +] + +[[package]] +name = "quote" +version = "1.0.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha", + "rand_core", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom", +] + +[[package]] +name = "rand_distr" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32cb0b9bc82b0a0876c2dd994a7e7a2683d3e7390ca40e6886785ef0c7e3ee31" +dependencies = [ + "num-traits", + "rand", +] + +[[package]] +name = "rawpointer" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60a357793950651c4ed0f3f52338f53b2f809f32d83a07f72909fa13e4c6c1e3" + +[[package]] +name = "rayon" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" +dependencies = [ + "either", + "rayon-core", +] + +[[package]] +name = "rayon-core" +version = "1.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" +dependencies = [ + "crossbeam-deque", + "crossbeam-utils", +] + +[[package]] +name = "redox_syscall" +version = "0.5.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "928fca9cf2aa042393a8325b9ead81d2f0df4cb12e1e24cef072922ccd99c5af" +dependencies = [ + "bitflags", +] + +[[package]] +name = "rstar" +version = "0.12.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "421400d13ccfd26dfa5858199c30a5d76f9c54e0dba7575273025b43c5175dbb" +dependencies = [ + "heapless", + "num-traits", + "smallvec", +] + +[[package]] +name = "rustc-hash" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" + +[[package]] +name = "rustversion" +version = "1.0.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a0d197bd2c9dc6e53b84da9556a69ba4cdfab8619eb41a8bd1cc2027a0f6b1d" + +[[package]] +name = "ryu" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" + +[[package]] +name = "safe_arch" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96b02de82ddbe1b636e6170c21be622223aea188ef2e139be0a5b219ec215323" +dependencies = [ + "bytemuck", +] + +[[package]] +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + +[[package]] +name = "semver" +version = "1.0.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" +dependencies = [ + "serde", +] + +[[package]] +name = "serde" +version = "1.0.219" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.219" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "serde_json" +version = "1.0.140" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" +dependencies = [ + "itoa", + "memchr", + "ryu", + "serde", +] + +[[package]] +name = "serde_spanned" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1" +dependencies = [ + "serde", +] + +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + +[[package]] +name = "simba" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b3a386a501cd104797982c15ae17aafe8b9261315b5d07e3ec803f2ea26be0fa" +dependencies = [ + "approx", + "num-complex", + "num-traits", + "paste", + "wide", +] + +[[package]] +name = "smallvec" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8917285742e9f3e1683f0a9c4e6b57960b7314d0b08d30d1ecd426713ee2eee9" + +[[package]] +name = "splashsurf_lib" +version = "0.11.0" +dependencies = [ + "anyhow", + "arrayvec", + "bitflags", + "bytemuck", + "bytemuck_derive", + "dashmap", + "fxhash", + "itertools 0.14.0", + "log", + "nalgebra", + "num-integer", + "num-traits", + "numeric_literals", + "parking_lot", + "rayon", + "rstar", + "thiserror", + "thread_local", +] + +[[package]] +name = "stable_deref_trait" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" + +[[package]] +name = "syn" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.101" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ce2b7fc941b3a24138a0a7cf8e858bfc6a992e7978a068a5c760deb0ed43caf" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "target-lexicon" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e502f78cdbb8ba4718f566c418c52bc729126ffd16baee5baa718cf25dd5a69a" + +[[package]] +name = "thiserror" +version = "2.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "567b8a2dae586314f7be2a752ec7474332959c6460e02bde30d702a66d488708" +dependencies = [ + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "2.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "thread_local" +version = "1.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" +dependencies = [ + "cfg-if", + "once_cell", +] + +[[package]] +name = "toml" +version = "0.8.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05ae329d1f08c4d17a59bed7ff5b5a769d062e64a62d34a3261b219e62cd5aae" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit", +] + +[[package]] +name = "toml_datetime" +version = "0.6.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3da5db5a963e24bc68be8b17b6fa82814bb22ee8660f192bb182771d498f09a3" +dependencies = [ + "serde", +] + +[[package]] +name = "toml_edit" +version = "0.22.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "310068873db2c5b3e7659d2cc35d21855dbafa50d1ce336397c666e3cb08137e" +dependencies = [ + "indexmap", + "serde", + "serde_spanned", + "toml_datetime", + "toml_write", + "winnow", +] + +[[package]] +name = "toml_write" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfb942dfe1d8e29a7ee7fcbde5bd2b9a25fb89aa70caea2eba3bee836ff41076" + +[[package]] +name = "typenum" +version = "1.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f" + +[[package]] +name = "unicode-ident" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" + +[[package]] +name = "unindent" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7264e107f553ccae879d21fbea1d6724ac785e8c3bfc762137959b5802826ef3" + +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + +[[package]] +name = "wasm-bindgen" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" +dependencies = [ + "cfg-if", + "once_cell", + "rustversion", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" +dependencies = [ + "bumpalo", + "log", + "proc-macro2", + "quote", + "syn 2.0.101", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "wide" +version = "0.7.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41b5576b9a81633f3e8df296ce0063042a73507636cbe956c61133dd7034ab22" +dependencies = [ + "bytemuck", + "safe_arch", +] + +[[package]] +name = "windows-core" +version = "0.61.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0fdd3ddb90610c7638aa2b3a3ab2904fb9e5cdbecc643ddb3647212781c4ae3" +dependencies = [ + "windows-implement", + "windows-interface", + "windows-link", + "windows-result", + "windows-strings", +] + +[[package]] +name = "windows-implement" +version = "0.60.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "windows-interface" +version = "0.59.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "windows-link" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76840935b766e1b0a05c0066835fb9ec80071d4c09a16f6bd5f7e655e3c14c38" + +[[package]] +name = "windows-result" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-strings" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_gnullvm", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" + +[[package]] +name = "winnow" +version = "0.7.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c06928c8748d81b05c9be96aad92e1b6ff01833332f281e8cfca3be4b35fc9ec" +dependencies = [ + "memchr", +] + +[[package]] +name = "zerocopy" +version = "0.8.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1702d9583232ddb9174e01bb7c15a2ab8fb1bc6f227aa1233858c351a3ba0cb" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.8.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28a6e20d751156648aa063f3800b706ee209a32c0b4d9f24be3d980b01be55ef" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] diff --git a/pysplashsurf/Cargo.toml b/pysplashsurf/Cargo.toml new file mode 100644 index 00000000..859c6274 --- /dev/null +++ b/pysplashsurf/Cargo.toml @@ -0,0 +1,28 @@ +[package] +name = "pysplashsurf" +version = "0.1.0" +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +pyo3 = {version = "0.25.0"} +numpy = "0.25.0" +ndarray = "0.16.1" +splashsurf_lib = { path = "../splashsurf_lib" } +fxhash = "0.2.1" +bytemuck = { version = "1.23.0", features = ["extern_crate_alloc"] } +anyhow = "1.0.98" +rayon = "1.10.0" +log = "0.4.27" +pyo3-stub-gen = "0.9.0" + +[features] +extension-module = ["pyo3/extension-module", "pyo3/abi3-py37"] +default = ["extension-module"] + +[lib] +name = "pysplashsurf" +crate-type = ["cdylib", "rlib"] + +[workspace] diff --git a/pysplashsurf/MANIFEST.in b/pysplashsurf/MANIFEST.in new file mode 100644 index 00000000..30c28368 --- /dev/null +++ b/pysplashsurf/MANIFEST.in @@ -0,0 +1,3 @@ +include Cargo.toml +recursive-include src * + diff --git a/pysplashsurf/ParticleData_Fluid_5.vtk b/pysplashsurf/ParticleData_Fluid_5.vtk new file mode 100644 index 00000000..2e3abe86 Binary files /dev/null and b/pysplashsurf/ParticleData_Fluid_5.vtk differ diff --git a/pysplashsurf/ParticleData_Fluid_50.bgeo b/pysplashsurf/ParticleData_Fluid_50.bgeo new file mode 100644 index 00000000..daf24396 Binary files /dev/null and b/pysplashsurf/ParticleData_Fluid_50.bgeo differ diff --git a/pysplashsurf/README.md b/pysplashsurf/README.md new file mode 100644 index 00000000..916c672b --- /dev/null +++ b/pysplashsurf/README.md @@ -0,0 +1,73 @@ +# Pysplashsurf + +![splashsurf logo](https://raw.githubusercontent.com/InteractiveComputerGraphics/splashsurf/main/logos/logo_small.svg "splashsurf") + +PySplashsurf provides Python bindings for `splashsurf`, an open source surface reconstruction library for particle data from SPH simulations. +Detailed information on the surface reconstruction and library itself and its API can be found on the [project website (splashsurf.physics-simulation.org)](https://splashsurf.physics-simulation.org/) or the [main repository](https://github.com/InteractiveComputerGraphics/splashsurf). + +## Installation +``` +pip install pysplashsurf +``` +Requires Python version 3.7+ + +To install pysplashsurf with meshio support (which adds some additional IO functionality), use +``` +pip install pysplashsurf[meshio] +``` +This will add support for the `.bgeo` file extension to meshio, so that particle data in the `BGEOV` format can be read using meshio. +Meshio is also required for the `write_to_file` method from the bindings to work. +The rest of the package will still work even if meshio is not installed. + +## Usage +Example to reconstruct the surface from an input file, apply some post-processing methods and write the data back to a file: +```python +import meshio +import numpy as np +import pysplashsurf + +mesh = meshio.read("input.vtk") +particles = np.array(mesh.points, dtype=np.float64) + +mesh_with_data, reconstruction = pysplashsurf.reconstruction_pipeline( + particles, + particle_radius=0.025, + rest_density=1000.0, + smoothing_length=2.0, + cube_size=0.5, + iso_surface_threshold=0.6, + mesh_smoothing_weights=True, + mesh_smoothing_weights_normalization=13.0, + mesh_smoothing_iters=25, + normals_smoothing_iters=10, + mesh_cleanup=True, + compute_normals=True, + subdomain_grid=True, + subdomain_num_cubes_per_dim=64, + output_mesh_smoothing_weights=True +) + +pysplashsurf.write_to_file(mesh_with_data, "output.vtk") +``` +The `reconstruction_pipeline` method provides (mostly) the same arguments as the splashsurf binary CLI. +It may be necessary to specify the `dtype` of a function input (as done for `particles` in the example) so that the bindings know what data type to use internally. +The extension supports single (`np.float32`) and double precision floats (`np.float64`). + +## Build instructions +You can also manually build the package from the source code: +1. Clone the repository +2. cd to the `pysplashsurf` directory +3. Create an environment from `python_environment.yaml` and activate it + - I recommend creating it in a subfolder, e.g. + ```conda env create --prefix ./envs -f python_environment.yaml``` + - Then activate it using `conda activate ./envs` +4. Now, to build the project, use maturin: `maturin develop` + - Maturin automatically installs the resulting binary in your python environment + - Set the release flag `-r` or `--release` to build an optimized binary, however, compilation time will be slightly longer + +### Documentation Build +To generate the Sphinx documentation, make sure that the package is installed through, e.g., maturin, and then run `make html` in the `pysplashsurf/pysplashsurf/docs` directory. +The resulting HTML files will be in `pysplashsurf/pysplashsurf/docs/build/html`. + +### Stub File Generation +To automatically generate a stub file for the package, run `cargo run --bin stub_gen` from the root project folder (from `pysplashsurf/`). diff --git a/pysplashsurf/pyproject.toml b/pysplashsurf/pyproject.toml new file mode 100644 index 00000000..8ca51b13 --- /dev/null +++ b/pysplashsurf/pyproject.toml @@ -0,0 +1,37 @@ +[build-system] +requires = ["maturin>=1,<2"] +build-backend = "maturin" + +[project] +name = "pysplashsurf" +version = "0.11.0.0rc1" +description = "Python bindings for splashsurf, a surface reconstruction library for SPH simulations." +keywords = ["surface reconstruction", "marching cubes", "sph", "fluid", "particles", "mesh", "splashsurf", "splishsplash"] +readme = "README.md" +license = "MIT" +requires-python = ">=3.7" +classifiers = [ + "Private :: Do Not Upload", + "Programming Language :: Rust", + "Programming Language :: Python :: Implementation :: CPython", + "Programming Language :: Python :: Implementation :: PyPy", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Topic :: Scientific/Engineering", + "Topic :: Scientific/Engineering :: Physics", + "Topic :: Scientific/Engineering :: Visualization", + "Topic :: Utilities", +] +dependencies = ["numpy"] + +[project.optional-dependencies] +meshio = ["meshio"] + +[project.urls] +homepage = "https://splashsurf.physics-simulation.org/" +repository = "https://github.com/InteractiveComputerGraphics/splashsurf.git" diff --git a/pysplashsurf/pysplashsurf/__init__.py b/pysplashsurf/pysplashsurf/__init__.py new file mode 100644 index 00000000..3ec613fa --- /dev/null +++ b/pysplashsurf/pysplashsurf/__init__.py @@ -0,0 +1,638 @@ +from .pysplashsurf import * +from . import bgeo +import numpy as np + +def push_point_attribute(self, name: str, data: np.ndarray, real_type): + """Add a point attribute to the mesh""" + if data.ndim == 2: + return self.push_point_attribute_vector_real(name, data) + + elif data.ndim == 1: + if data.dtype == np.uint64: + return self.push_point_attribute_scalar_u64(name, data) + + elif data.dtype == real_type: + return self.push_point_attribute_scalar_real(name, data) + + else: + raise ValueError("Not a valid data type, try explicitly specifying uint64 or float64") + + else: + raise ValueError("Not a valid data array") + +def push_cell_attribute(self, name: str, data: np.ndarray, real_type): + """Add a cell attribute to the mesh""" + if data.ndim == 2: + return self.push_cell_attribute_vector_real(name, data) + + elif data.ndim == 1: + if data.dtype == np.uint64: + return self.push_cell_attribute_scalar_u64(name, data) + + elif data.dtype == real_type: + return self.push_cell_attribute_scalar_real(name, data) + + else: + raise ValueError("Not a valid data type, try explicitly specifying uint64 or float64") + + else: + raise ValueError("Not a valid data array") + +TriMeshWithDataF64.push_point_attribute = lambda self, name, data: push_point_attribute(self, name, data, np.float64) +TriMeshWithDataF64.push_point_attribute.__doc__ = push_point_attribute.__doc__ +TriMeshWithDataF32.push_point_attribute = lambda self, name, data: push_point_attribute(self, name, data, np.float32) +TriMeshWithDataF32.push_point_attribute.__doc__ = push_point_attribute.__doc__ + +TriMeshWithDataF64.push_cell_attribute = lambda self, name, data: push_cell_attribute(self, name, data, np.float64) +TriMeshWithDataF64.push_cell_attribute.__doc__ = push_cell_attribute.__doc__ +TriMeshWithDataF32.push_cell_attribute = lambda self, name, data: push_cell_attribute(self, name, data, np.float32) +TriMeshWithDataF32.push_cell_attribute.__doc__ = push_cell_attribute.__doc__ + +MixedTriQuadMeshWithDataF64.push_point_attribute = lambda self, name, data: push_point_attribute(self, name, data, np.float64) +MixedTriQuadMeshWithDataF64.push_point_attribute.__doc__ = push_point_attribute.__doc__ +MixedTriQuadMeshWithDataF32.push_point_attribute = lambda self, name, data: push_point_attribute(self, name, data, np.float32) +MixedTriQuadMeshWithDataF32.push_point_attribute.__doc__ = push_point_attribute.__doc__ + +MixedTriQuadMeshWithDataF64.push_cell_attribute = lambda self, name, data: push_cell_attribute(self, name, data, np.float64) +MixedTriQuadMeshWithDataF64.push_cell_attribute.__doc__ = push_cell_attribute.__doc__ +MixedTriQuadMeshWithDataF32.push_cell_attribute = lambda self, name, data: push_cell_attribute(self, name, data, np.float32) +MixedTriQuadMeshWithDataF32.push_cell_attribute.__doc__ = push_cell_attribute.__doc__ + +def write_to_file(mesh_with_data, filename, file_format=None, consume_object=False): + """Write the mesh and its attributes to a file using meshio + + Parameters + ---------- + mesh: TriMeshWithDataF64 | TriMeshWithDataF32 | MixedTriQuadMeshWithDataF64 | MixedTriQuadMeshWithDataF32 + Mesh with data object to write + + filename: Any + File path for the output file + + file_format: str | None + File format for the output file, generally also derived from filename + + consume_object: bool + Flag for specifying whether the MeshWithData object should be consumed for a faster execution. + Only consumes the mesh field. + """ + try: + import meshio + except ImportError: + raise ImportError("meshio is not installed, please install it with with `pip install meshio` to use this function") + + mesh = mesh_with_data.take_mesh() if consume_object else mesh_with_data.mesh + + point_data = mesh_with_data.get_point_attributes() + cell_data = mesh_with_data.get_cell_attributes() + + if type(mesh) is pysplashsurf.TriMesh3dF64 or type(mesh) is pysplashsurf.TriMesh3dF32: + verts, tris = mesh.take_vertices_and_triangles() if consume_object else (mesh.vertices, mesh.triangles) + meshio.write_points_cells(filename, verts, [("triangle", tris)], point_data=point_data, cell_data=cell_data, file_format=file_format) + + else: + verts, cells = mesh.take_vertices_and_cells() if consume_object else (mesh.vertices, mesh.cells) + cells = [("triangle", list(filter(lambda x: len(x) == 3, cells))), ("quad", list(filter(lambda x: len(x) == 4, cells)))] + meshio.write_points_cells(filename, verts, cells, point_data=point_data, cell_data=cell_data, file_format=file_format) + + +def create_mesh_with_data_object(mesh): + """Create the corresponding mesh with data object to a mesh object + + Parameters + ---------- + mesh: TriMesh3dF64 | TriMesh3dF32 | MixedTriQuadMesh3dF64 | MixedTriQuadMesh3dF32 + Mesh object to convert + + Returns + ------- + TriMeshWithDataF64 | TriMeshWithDataF32 | MixedTriQuadMeshWithDataF64 | MixedTriQuadMeshWithDataF32 + Mesh with data object + """ + + if type(mesh) is TriMesh3dF64: + return TriMeshWithDataF64(mesh) + elif type(mesh) is TriMesh3dF32: + return TriMeshWithDataF32(mesh) + elif type(mesh) is MixedTriQuadMesh3dF64: + return MixedTriQuadMeshWithDataF64(mesh) + elif type(mesh) is MixedTriQuadMesh3dF32: + return MixedTriQuadMeshWithDataF32(mesh) + else: + raise ValueError("Invalid mesh type") + +def create_sph_interpolator_object(particle_positions, particle_densities, particle_rest_mass, compact_support_radius): + """Create the corresponding SPH interpolator object to a set of particle data + + Parameters + ---------- + particle_positions: np.ndarray + 2-dimensional array containing all particle positions [[ax, ay, az], [bx, by, bz], ...] + + particle_densities: np.ndarray + 1-dimensional array containing all particle densities + + particle_rest_mass: float + Rest mass of the particles + + compact_support_radius: float + Compact support radius of the SPH kernel + + Returns + ------- + SphInterpolatorF32 | SphInterpolatorF64 + SphInterpolator object + """ + + if particle_positions.dtype == 'float32': + return SphInterpolatorF32(particle_positions, particle_densities, particle_rest_mass, compact_support_radius) + elif particle_positions.dtype == 'float64': + return SphInterpolatorF64(particle_positions, particle_densities, particle_rest_mass, compact_support_radius) + else: + raise ValueError("Invalid data type (only float32 and float64 are supported, consider explicitly specifying the dtype for particle_positions)") + +def create_aabb_object(aabb_min, aabb_max): + """Create the corresponding AABB object to a set of min and max values + + Parameters + ---------- + aabb_min: np.ndarray + Smallest corner of the axis-aligned bounding box + + aabb_max: np.ndarray + Largest corner of the axis-aligned bounding box + + Returns + ------- + Aabb3dF32 | Aabb3dF64 + Aabb object + """ + + if aabb_min.dtype == 'float32': + return Aabb3dF32(aabb_min, aabb_max) + elif aabb_min.dtype == 'float64': + return Aabb3dF64(aabb_min, aabb_max) + else: + raise ValueError("Invalid data type (only float32 and float64 are supported, consider explicitly specifying the dtype for aabb_min and aabb_max)") + +def create_aabb_object_from_points(points): + """Create the corresponding AABB object to a set of points + + Parameters + ---------- + points: np.ndarray + 2-dimensional array containing all point positions [[ax, ay, az], [bx, by, bz], ...] + + Returns + ------- + Aabb3dF32 | Aabb3dF64 + Aabb object + """ + + if points.dtype == 'float32': + return Aabb3dF32.from_points(points) + elif points.dtype == 'float64': + return Aabb3dF64.from_points(points) + else: + raise ValueError("Invalid data type (only float32 and float64 are supported, consider explicitly specifying the dtype for points)") + +def reconstruct_surface( + particles, *, + particle_radius: float = 0.025, + rest_density: float = 1000.0, + smoothing_length: float = 2.0, + cube_size: float = 0.5, + iso_surface_threshold: float = 0.6, + enable_multi_threading: bool = False, + global_neighborhood_list: bool = False, + subdomain_grid: bool = False, + subdomain_num_cubes_per_dim: int = 64, + aabb_min = None, + aabb_max = None, +): + """Reconstruct the surface from only particle positions + + Performs a marching cubes surface construction of the fluid represented by the given particle positions + + Parameters + ---------- + particles: np.ndarray + 2-dimensional array containing all particle positions [[ax, ay, az], [bx, by, bz], ...] + + particle_radius: float, optional (default=0.025) + Particle radius + + rest_density: float + Rest density of the fluid + + smoothing_length: float + Smoothing length of the fluid + + cube_size: float + Size of the cubes used in the uniform grid + + iso_surface_threshold: float + Threshold for the iso surface + + enable_multi_threading: bool + Multi-threading flag + + global_neighborhood_list: bool + Global neighborhood list flag + + subdomain_grid: bool + Enable spatial decomposition using a regular grid-based approach + + subdomain_num_cubes_per_dim: int + Each subdomain will be a cube consisting of this number of MC cube cells along each coordinate axis + + aabb_min: np.ndarray + Smallest corner of the axis-aligned bounding box + + aabb_max: np.ndarray + Largest corner of the axis-aligned bounding box + + Returns + ------- + SurfaceReconstructionF32 | SurfaceReconstructionF64 + SurfaceReconstruction object containing the reconstructed mesh and used grid + + """ + + if particles.dtype == 'float32': + return reconstruct_surface_f32(particles, particle_radius=particle_radius, rest_density=rest_density, + smoothing_length=smoothing_length, cube_size=cube_size, iso_surface_threshold=iso_surface_threshold, + enable_multi_threading=enable_multi_threading, global_neighborhood_list=global_neighborhood_list, + use_custom_grid_decomposition=subdomain_grid, subdomain_num_cubes_per_dim=subdomain_num_cubes_per_dim, + aabb_min=aabb_min, aabb_max=aabb_max) + elif particles.dtype == 'float64': + return reconstruct_surface_f64(particles, particle_radius=particle_radius, rest_density=rest_density, + smoothing_length=smoothing_length, cube_size=cube_size, iso_surface_threshold=iso_surface_threshold, + enable_multi_threading=enable_multi_threading, global_neighborhood_list=global_neighborhood_list, + use_custom_grid_decomposition=subdomain_grid, subdomain_num_cubes_per_dim=subdomain_num_cubes_per_dim, + aabb_min=aabb_min, aabb_max=aabb_max) + else: + raise ValueError("Invalid data type (only float32 and float64 are supported, consider explicitly specifying the dtype for particles)") + +def marching_cubes_cleanup( + mesh, + grid, + max_iter: int = 5, + keep_vertices: bool = False +): + """Mesh simplification designed for marching cubes surfaces meshes inspired by the "Compact Contouring"/"Mesh displacement" approach by Doug Moore and Joe Warren + + See Moore and Warren: ["Mesh Displacement: An Improved Contouring Method for Trivariate Data"](https://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.49.5214&rep=rep1&type=pdf) (1991) + or Moore and Warren: "Compact Isocontours from Sampled Data" in "Graphics Gems III" (1992). + + Parameters + ---------- + mesh: TriMesh3dF32 | TriMesh3dF64 | TriMeshWithDataF32 | TriMeshWithDataF64 + Mesh object to simplify + + grid: UniformGridF32 | UniformGridF64 + Uniform grid object that was used to construct the mesh + + max_iter: int + Maximum number of iterations + + keep_vertices: bool + Flag to keep vertices + + Returns + ------- + list + vertex connectivity list of the simplified mesh + """ + if type(mesh) is TriMesh3dF32 or type(mesh) is TriMeshWithDataF32: + return marching_cubes_cleanup_f32(mesh, grid, max_iter=max_iter, keep_vertices=keep_vertices) + + elif type(mesh) is TriMesh3dF64 or type(mesh) is TriMeshWithDataF64: + return marching_cubes_cleanup_f64(mesh, grid, max_iter=max_iter, keep_vertices=keep_vertices) + + else: + raise ValueError("Invalid mesh type") + +def decimation( + mesh, + keep_vertices: bool = False +): + """Barnacle decimation + + Parameters + ---------- + mesh: TriMesh3dF32 | TriMesh3dF64 | TriMeshWithDataF32 | TriMeshWithDataF64 + Mesh object to simplify + + keep_vertices: bool + Flag to keep vertices + + Returns + ------- + list + vertex connectivity list of the simplified mesh + """ + if type(mesh) is TriMesh3dF32 or type(mesh) is TriMeshWithDataF32: + return decimation_f32(mesh, keep_vertices=keep_vertices) + + elif type(mesh) is TriMesh3dF64 or type(mesh) is TriMeshWithDataF64: + return decimation_f64(mesh, keep_vertices=keep_vertices) + + else: + raise ValueError("Invalid mesh type") + +def par_laplacian_smoothing_inplace( + mesh, + vertex_connectivity: list[list[int]], + iterations: int, + beta: float, + weights: list[float] +): + """Laplacian Smoothing with feature weights + + Move each vertex towards the mean position of its neighbors. + Factor beta in [0;1] proportional to amount of smoothing (for beta=1 each vertex is placed at the mean position). + Additionally, feature weights can be specified to apply a varying amount of smoothing over the mesh. + + Parameters + ---------- + mesh: TriMesh3dF32 | TriMesh3dF64 | TriMeshWithDataF32 | TriMeshWithDataF64 + Mesh object to smooth + + vertex_connectivity: list[list[int]] + Vertex connectivity list + + iterations: int + Number of iterations + + beta: float + Smoothing factor + + weights: list[float] + Feature weights for the vertices + """ + + if type(mesh) is TriMesh3dF32 or type(mesh) is TriMeshWithDataF32: + par_laplacian_smoothing_inplace_f32(mesh, vertex_connectivity, iterations, beta, weights) + + elif type(mesh) is TriMesh3dF64 or type(mesh) is TiMeshWithDataF64: + par_laplacian_smoothing_inplace_f64(mesh, vertex_connectivity, iterations, beta, weights) + + else: + raise ValueError("Invalid mesh type") + +def par_laplacian_smoothing_normals_inplace( + normals: np.ndarray, + vertex_connectivity: list[list[int]], + iterations: int +): + """Laplacian Smoothing of the vertex normals + + Parameters + ---------- + normals: np.ndarray + 2D-Array of vertex normals to smooth + + vertex_connectivity: list[list[int]] + Vertex connectivity list + + iterations: int + Number of iterations + """ + + if normals.dtype == 'float32': + par_laplacian_smoothing_normals_inplace_f32(normals, vertex_connectivity, iterations) + + elif normals.dtype == 'float64': + par_laplacian_smoothing_normals_inplace_f64(normals, vertex_connectivity, iterations) + + else: + raise ValueError("Invalid mesh type") + +def neighborhood_search_spatial_hashing_parallel( + domain, + particle_positions: np.ndarray, + search_radius: float +): + """Performs a neighborhood search (multi-threaded implementation) + + Returns the indices of all neighboring particles in the given search radius per particle as a `Vec>`. + + Parameters + ---------- + domain: Aabb3dF32 | Aabb3dF64 + Axis-aligned bounding box of the domain + + particle_positions: np.ndarray + 2D-Array of particle positions + + search_radius: float + Search radius + """ + + if type(domain) is Aabb3dF32: + return neighborhood_search_spatial_hashing_parallel_f32(domain, particle_positions, search_radius) + + elif type(domain) is Aabb3dF64: + return neighborhood_search_spatial_hashing_parallel_f64(domain, particle_positions, search_radius) + + else: + raise ValueError("Invalid domain type") + +def check_mesh_consistency( + grid, + mesh, *, + check_closed: bool, + check_manifold: bool, + debug: bool, +): + """Check mesh consistency + + Parameters + ---------- + grid: UniformGridF32 | UniformGridF64 + Uniform grid object + + mesh: TriMesh3dF32 | TriMesh3dF64 | TriMeshWithDataF32 | TriMeshWithDataF64 + Triangular mesh object + + check_closed: bool + Flag to check for closed mesh + + check_manifold: bool + Flag to check for manifold mesh + + debug: bool + Flag to enable debug output + """ + + if type(grid) is UniformGridF32 and (type(mesh) is TriMesh3dF32 or type(mesh) is TriMeshWithDataF32): + return check_mesh_consistency_f32(grid, mesh, check_closed=check_closed, check_manifold=check_manifold, debug=debug) + + elif type(grid) is UniformGridF64 and (type(mesh) is TriMesh3dF64 or type(mesh) is TriMeshWithDataF64): + return check_mesh_consistency_f64(grid, mesh, check_closed=check_closed, check_manifold=check_manifold, debug=debug) + + else: + raise ValueError("Invalid grid or mesh type") + +def convert_tris_to_quads( + mesh, *, + non_squareness_limit: float, + normal_angle_limit_rad: float, + max_interior_angle: float, +): + """Converts triangles to quads + + Parameters + ---------- + mesh: TriMesh3dF32 | TriMesh3dF64 | TriMeshWithDataF32 | TriMeshWithDataF64 + Triangular mesh object + When called with a MeshWithData Object, the resulting MixedTriQuadMeshWithData won't inherit the cell attributes from the input. + + non_squareness_limit: float + Non-squareness limit + + normal_angle_limit_rad: float + Normal angle limit in radians + + max_interior_angle: float + Maximum interior angle in radians + + Returns + ------- + MixedTriQuadMesh3dF32 | MixedTriQuadMesh3dF64 | MixedTriQuadMeshWithDataF32 | MixedTriQuadMeshWithDataF64 + Mixed triangular and quadrilateral mesh object + """ + + if type(mesh) is TriMesh3dF32 or type(mesh) is TriMeshWithDataF32: + return convert_tris_to_quads_f32(mesh, non_squareness_limit=non_squareness_limit, normal_angle_limit_rad=normal_angle_limit_rad, max_interior_angle=max_interior_angle) + + elif type(mesh) is TriMesh3dF64 or type(mesh) is TriMeshWithDataF64: + return convert_tris_to_quads_f64(mesh, non_squareness_limit=non_squareness_limit, normal_angle_limit_rad=normal_angle_limit_rad, max_interior_angle=max_interior_angle) + + else: + raise ValueError("Invalid mesh type") + + +def reconstruction_pipeline( + particles, *, attributes_to_interpolate={}, particle_radius, + rest_density=1000.0, smoothing_length=2.0, cube_size, + iso_surface_threshold=0.6, enable_multi_threading=True, mesh_smoothing_weights=False, sph_normals=False, + mesh_smoothing_weights_normalization=13.0, mesh_smoothing_iters=None, normals_smoothing_iters=None, + mesh_cleanup=False, decimate_barnacles=False, keep_vertices=False, + compute_normals=False, output_raw_normals=False, output_mesh_smoothing_weights=False, mesh_aabb_clamp_vertices=False, + subdomain_grid=True, subdomain_num_cubes_per_dim=64, aabb_min=None, aabb_max=None, mesh_aabb_min=None, mesh_aabb_max=None +): + """Surface reconstruction based on particle positions and post-processing + + Parameters + ---------- + particles: np.ndarray + 2-dimensional array containing all particle positions [[ax, ay, az], [bx, by, bz], ...] + + attributes_to_interpolate: dict + Dictionary containing all attributes to interpolate. The keys are the attribute names and the values are the corresponding 1D/2D arrays. + The arrays must have the same length as the number of particles. + Supported array types are 2D float32/float64 arrays for vector attributes and 1D uint64/float32/float64 arrays for scalar attributes. + + particle_radius: float + Particle radius + + rest_density: float + Rest density of the fluid + + smoothing_length: float + Smoothing length of the fluid in multiples of the particle radius (compact support radius of SPH kernel will be twice the smoothing length) + + cube_size: float + Size of the cubes used for the marching cubes grid in multiples of the particle radius + + iso_surface_threshold: float + Threshold for the iso surface + + enable_multi_threading: bool + Multi-threading flag + + sph_normals: bool + Flag to compute normals using SPH interpolation instead of geometry-based normals. + + mesh_smoothing_weights: bool + Flag to compute mesh smoothing weights + This implements the method from “Weighted Laplacian Smoothing for Surface Reconstruction of Particle-based Fluids” (Löschner, Böttcher, Jeske, Bender; 2023). + + mesh_smoothing_weights_normalization: float + Normalization factor for the mesh smoothing weights + + mesh_smoothing_iters: int + Number of iterations for the mesh smoothing + + normals_smoothing_iters: int + Number of iterations for the normal smoothing + + mesh_cleanup: bool + Flag to perform mesh cleanup + This implements the method from “Compact isocontours from sampled data” (Moore, Warren; 1992) + + decimate_barnacles: bool + Flag to perform barnacle decimation + For details see “Weighted Laplacian Smoothing for Surface Reconstruction of Particle-based Fluids” (Löschner, Böttcher, Jeske, Bender; 2023). + + keep_vertices: bool + Flag to keep any vertices without connectivity resulting from mesh cleanup or decimation step + + compute_normals: bool + Flag to compute normals + If set to True, the normals will be computed and stored in the mesh. + + output_raw_normals: bool + Flag to output the raw normals in addition to smoothed normals if smoothing of normals is enabled + + output_mesh_smoothing_weights: bool + Flag to store the mesh smoothing weights if smoothing weights are computed. + + mesh_aabb_clamp_vertices: bool + Flag to clamp the vertices of the mesh to the AABB + + subdomain_grid: bool + Enables spatial decomposition using a regular grid-based approach + + subdomain_num_cubes_per_dim: int + Each subdomain will be a cube consisting of this number of MC cube cells along each coordinate axis + + aabb_min: np.ndarray + Smallest corner of the axis-aligned bounding box + + aabb_max: np.ndarray + Largest corner of the axis-aligned bounding box + + mesh_aabb_min: np.ndarray + Smallest corner of the axis-aligned bounding box for the mesh + + mesh_aabb_max: np.ndarray + Largest corner of the axis-aligned bounding box for the mesh + + Returns + ------- + tuple[TriMeshWithDataF32 | TriMeshWithDataF64, SurfaceReconstructionF32 | SurfaceReconstructionF64] + Mesh with data object and SurfaceReconstruction object containing the reconstructed mesh and used grid + """ + if particles.dtype == 'float32': + return reconstruction_pipeline_f32(particles, attributes_to_interpolate=attributes_to_interpolate, particle_radius=particle_radius, rest_density=rest_density, + smoothing_length=smoothing_length, cube_size=cube_size, iso_surface_threshold=iso_surface_threshold, + aabb_min=aabb_min, aabb_max=aabb_max, enable_multi_threading=enable_multi_threading, + use_custom_grid_decomposition=subdomain_grid, subdomain_num_cubes_per_dim=subdomain_num_cubes_per_dim, + global_neighborhood_list=False, mesh_cleanup=mesh_cleanup, decimate_barnacles=decimate_barnacles, + keep_vertices=keep_vertices, compute_normals=compute_normals, sph_normals=sph_normals, normals_smoothing_iters=normals_smoothing_iters, + mesh_smoothing_iters=mesh_smoothing_iters, mesh_smoothing_weights=mesh_smoothing_weights, mesh_smoothing_weights_normalization=mesh_smoothing_weights_normalization, + output_mesh_smoothing_weights=output_mesh_smoothing_weights, output_raw_normals=output_raw_normals, mesh_aabb_min=mesh_aabb_min, mesh_aabb_max=mesh_aabb_max, mesh_aabb_clamp_vertices=mesh_aabb_clamp_vertices) + elif particles.dtype == 'float64': + return reconstruction_pipeline_f64(particles, attributes_to_interpolate=attributes_to_interpolate, particle_radius=particle_radius, rest_density=rest_density, + smoothing_length=smoothing_length, cube_size=cube_size, iso_surface_threshold=iso_surface_threshold, + aabb_min=aabb_min, aabb_max=aabb_max, enable_multi_threading=enable_multi_threading, + use_custom_grid_decomposition=subdomain_grid, subdomain_num_cubes_per_dim=subdomain_num_cubes_per_dim, + global_neighborhood_list=False, mesh_cleanup=mesh_cleanup, decimate_barnacles=decimate_barnacles, + keep_vertices=keep_vertices, compute_normals=compute_normals, sph_normals=sph_normals, normals_smoothing_iters=normals_smoothing_iters, + mesh_smoothing_iters=mesh_smoothing_iters, mesh_smoothing_weights=mesh_smoothing_weights, mesh_smoothing_weights_normalization=mesh_smoothing_weights_normalization, + output_mesh_smoothing_weights=output_mesh_smoothing_weights, output_raw_normals=output_raw_normals, mesh_aabb_min=mesh_aabb_min, mesh_aabb_max=mesh_aabb_max, mesh_aabb_clamp_vertices=mesh_aabb_clamp_vertices) + else: + raise ValueError("Invalid data type (only float32 and float64 are supported, consider explicitly specifying the dtype for particles)") diff --git a/pysplashsurf/pysplashsurf/bgeo.py b/pysplashsurf/pysplashsurf/bgeo.py new file mode 100644 index 00000000..9e58072b --- /dev/null +++ b/pysplashsurf/pysplashsurf/bgeo.py @@ -0,0 +1,116 @@ +# The contents of this file are partially copied from the blender sequence loader (https://github.com/InteractiveComputerGraphics/blender-sequence-loader) + +try: + import gzip + import numpy as np + import meshio + + + def readbgeo_to_meshio(filepath): + with gzip.open(filepath, 'r') as file: + byte = file.read(5) + if byte != b"BgeoV": + raise Exception('not bgeo file format') + byte = file.read(4) + version = int.from_bytes(byte, byteorder="big") + if version != 5: + raise Exception('bgeo file not version 5') + + header = {} + point_attributes = {} + point_attributes_names = [] + point_attributes_sizes = [] + point_attributes_types = [] + position = None + + byte = file.read(4) + header['nPoints'] = int.from_bytes(byte, byteorder="big") + + byte = file.read(4) + header['nPrims'] = int.from_bytes(byte, byteorder="big") + + byte = file.read(4) + header['nPointGroups'] = int.from_bytes(byte, byteorder="big") + + byte = file.read(4) + header['nPrimGroups'] = int.from_bytes(byte, byteorder="big") + + byte = file.read(4) + header['nPointAttrib'] = int.from_bytes(byte, byteorder="big") + + byte = file.read(4) + header['nVertexAttrib'] = int.from_bytes(byte, byteorder="big") + + byte = file.read(4) + header['nPrimAttrib'] = int.from_bytes(byte, byteorder="big") + + byte = file.read(4) + header['nAttrib'] = int.from_bytes(byte, byteorder="big") + + particle_size = 4 + + for _ in range(header['nPointAttrib']): + byte = file.read(2) + namelength = int.from_bytes(byte, byteorder="big") + name_binary = file.read(namelength) + name = name_binary.decode('utf-8') + point_attributes_names.append(name) + + byte = file.read(2) + size = int.from_bytes(byte, byteorder="big") + point_attributes_sizes.append(size) + particle_size += size + + byte = file.read(4) + input_dtype = int.from_bytes(byte, byteorder="big") + if input_dtype == 0: + point_attributes_types.append('FLOAT') + # read default value + # not going to do anything about it + byte = file.read(size * 4) + elif input_dtype == 1: + point_attributes_types.append('INT') + # read default value + # not going to do anything about it + byte = file.read(size * 4) + elif input_dtype == 5: + point_attributes_types.append('VECTOR') + # read default value + # not going to do anything about it + byte = file.read(size * 4) + else: + raise Exception('input_dtype unknown/unsupported') + byte = file.read(particle_size * header['nPoints'] * 4) + # > means big endian + attribute_data = np.frombuffer(byte, dtype='>f') + attribute_data = np.reshape(attribute_data, (header['nPoints'], particle_size)) + # the first 3 column is its position data + position = attribute_data[:, :3] + # the 4th column is homogeneous coordiante, which is all 1, and will be ignored + + current_attribute_start_point = 4 + for i in range(header['nPointAttrib']): + if point_attributes_types[i] == 'FLOAT': + point_attributes[point_attributes_names[i]] = attribute_data[:, current_attribute_start_point] + current_attribute_start_point += 1 + elif point_attributes_types[i] == 'VECTOR': + point_attributes[ + point_attributes_names[i]] = attribute_data[:, + current_attribute_start_point:current_attribute_start_point + 3] + current_attribute_start_point += 3 + elif point_attributes_types[i] == 'INT': + data = (attribute_data[:, current_attribute_start_point]).tobytes() + # > means big endian + point_attributes[point_attributes_names[i]] = np.frombuffer(data, dtype='>i') + current_attribute_start_point += 1 + remaining = file.read() + if not remaining == b'\x00\xff': + raise Exception("file didn't end") + return meshio.Mesh(position, [('vertex', [])], point_data=point_attributes) + + + # no need for write function + meshio.register_format("bgeo", [".bgeo"], readbgeo_to_meshio, {".bgeo": None}) + +except ImportError: + pass \ No newline at end of file diff --git a/pysplashsurf/pysplashsurf/docs/Makefile b/pysplashsurf/pysplashsurf/docs/Makefile new file mode 100644 index 00000000..d0c3cbf1 --- /dev/null +++ b/pysplashsurf/pysplashsurf/docs/Makefile @@ -0,0 +1,20 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = source +BUILDDIR = build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/pysplashsurf/pysplashsurf/docs/make.bat b/pysplashsurf/pysplashsurf/docs/make.bat new file mode 100644 index 00000000..6fcf05b4 --- /dev/null +++ b/pysplashsurf/pysplashsurf/docs/make.bat @@ -0,0 +1,35 @@ +@ECHO OFF + +pushd %~dp0 + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set SOURCEDIR=source +set BUILDDIR=build + +if "%1" == "" goto help + +%SPHINXBUILD% >NUL 2>NUL +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.https://www.sphinx-doc.org/ + exit /b 1 +) + +%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% +goto end + +:help +%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% + +:end +popd diff --git a/pysplashsurf/pysplashsurf/docs/requirements.txt b/pysplashsurf/pysplashsurf/docs/requirements.txt new file mode 100644 index 00000000..6b63755c --- /dev/null +++ b/pysplashsurf/pysplashsurf/docs/requirements.txt @@ -0,0 +1,6 @@ +pip==25.0.1 +numpy==2.2.3 +meshio==5.3.5 +furo +numpydoc==1.8.0 +myst-parser==4.0.1 diff --git a/pysplashsurf/pysplashsurf/docs/source/api.rst b/pysplashsurf/pysplashsurf/docs/source/api.rst new file mode 100644 index 00000000..e8b95fa5 --- /dev/null +++ b/pysplashsurf/pysplashsurf/docs/source/api.rst @@ -0,0 +1,36 @@ +API +=== + +.. currentmodule:: pysplashsurf + +Methods +------- + +.. autosummary:: + check_mesh_consistency + convert_tris_to_quads + create_aabb_object + create_aabb_object_from_points + create_mesh_with_data_object + create_sph_interpolator_object + decimation + marching_cubes_cleanup + neighborhood_search_spatial_hashing_parallel + par_laplacian_smoothing_inplace + par_laplacian_smoothing_normals_inplace + reconstruct_surface + reconstruction_pipeline + write_to_file + +Classes +------- + +.. autosummary:: + Aabb3dF32 + MixedTriQuadMesh3dF32 + MixedTriQuadMeshWithDataF32 + SphInterpolatorF32 + SurfaceReconstructionF32 + TriMesh3dF32 + TriMeshWithDataF32 + UniformGridF32 \ No newline at end of file diff --git a/pysplashsurf/pysplashsurf/docs/source/classes.rst b/pysplashsurf/pysplashsurf/docs/source/classes.rst new file mode 100644 index 00000000..afb33aa8 --- /dev/null +++ b/pysplashsurf/pysplashsurf/docs/source/classes.rst @@ -0,0 +1,24 @@ +Classes +======= + +Additionally, there exists a F64 version for every class which is otherwise identical to the F32 version. + +.. currentmodule:: pysplashsurf + +.. autoclass:: Aabb3dF32 + +.. autoclass:: MixedTriQuadMesh3dF32 + +.. autoclass:: MixedTriQuadMeshWithDataF32 + :exclude-members: push_point_attribute_scalar_u64, push_point_attribute_scalar_real, push_point_attribute_vector_real, push_cell_attribute_scalar_real, push_cell_attribute_scalar_u64, push_cell_attribute_vector_real + +.. autoclass:: SphInterpolatorF32 + +.. autoclass:: SurfaceReconstructionF32 + +.. autoclass:: TriMesh3dF32 + +.. autoclass:: TriMeshWithDataF32 + :exclude-members: push_point_attribute_scalar_u64, push_point_attribute_scalar_real, push_point_attribute_vector_real, push_cell_attribute_scalar_real, push_cell_attribute_scalar_u64, push_cell_attribute_vector_real + +.. autoclass:: UniformGridF32 \ No newline at end of file diff --git a/pysplashsurf/pysplashsurf/docs/source/conf.py b/pysplashsurf/pysplashsurf/docs/source/conf.py new file mode 100644 index 00000000..7bd06548 --- /dev/null +++ b/pysplashsurf/pysplashsurf/docs/source/conf.py @@ -0,0 +1,60 @@ +# Configuration file for the Sphinx documentation builder. +# +# This file only contains a selection of the most common options. For a full +# list see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +# import os +# import sys +# sys.path.insert(0, os.path.abspath('.')) +import pysplashsurf + +# -- Project information ----------------------------------------------------- + +project = 'Pysplashsurf' +copyright = '2025, Interactive Computer Graphics' +author = 'Interactive Computer Graphics' + + +# -- General configuration --------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.autosummary', + 'numpydoc', + 'myst_parser', +] + +source_suffix = ['.rst', '.md'] + +numpydoc_class_members_toctree = False + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = [] + + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'furo' + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] \ No newline at end of file diff --git a/pysplashsurf/pysplashsurf/docs/source/index.rst b/pysplashsurf/pysplashsurf/docs/source/index.rst new file mode 100644 index 00000000..62b8b9f9 --- /dev/null +++ b/pysplashsurf/pysplashsurf/docs/source/index.rst @@ -0,0 +1,28 @@ +.. Pysplashsurf documentation master file, created by + sphinx-quickstart on Mon Mar 24 14:32:44 2025. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +Welcome to Pysplashsurf's documentation! +======================================== + +.. image:: https://raw.githubusercontent.com/InteractiveComputerGraphics/splashsurf/main/logos/logo_small.svg + +Check out the :doc:`Readme <./introduction>` for a brief overview of the package. + +.. toctree:: + :caption: Table of Contents + + introduction + methods + classes + api + + + +.. Indices and tables +.. ================== + +.. * :ref:`genindex` +.. * :ref:`modindex` +.. * :ref:`search` diff --git a/pysplashsurf/pysplashsurf/docs/source/introduction.md b/pysplashsurf/pysplashsurf/docs/source/introduction.md new file mode 100644 index 00000000..baba6872 --- /dev/null +++ b/pysplashsurf/pysplashsurf/docs/source/introduction.md @@ -0,0 +1,2 @@ +```{include} ../../../README.md +``` \ No newline at end of file diff --git a/pysplashsurf/pysplashsurf/docs/source/methods.rst b/pysplashsurf/pysplashsurf/docs/source/methods.rst new file mode 100644 index 00000000..420f6a0b --- /dev/null +++ b/pysplashsurf/pysplashsurf/docs/source/methods.rst @@ -0,0 +1,34 @@ +Methods +======= + +All methods infer float precision based on the input (32bit or 64bit). + +.. currentmodule:: pysplashsurf + +.. autofunction:: check_mesh_consistency + +.. autofunction:: convert_tris_to_quads + +.. autofunction:: create_aabb_object + +.. autofunction:: create_aabb_object_from_points + +.. autofunction:: create_mesh_with_data_object + +.. autofunction:: create_sph_interpolator_object + +.. autofunction:: decimation + +.. autofunction:: marching_cubes_cleanup + +.. autofunction:: neighborhood_search_spatial_hashing_parallel + +.. autofunction:: par_laplacian_smoothing_inplace + +.. autofunction:: par_laplacian_smoothing_normals_inplace + +.. autofunction:: reconstruct_surface + +.. autofunction:: reconstruction_pipeline + +.. autofunction:: write_to_file \ No newline at end of file diff --git a/pysplashsurf/pysplashsurf/py.typed b/pysplashsurf/pysplashsurf/py.typed new file mode 100644 index 00000000..e69de29b diff --git a/pysplashsurf/pysplashsurf/pysplashsurf.pyi b/pysplashsurf/pysplashsurf/pysplashsurf.pyi new file mode 100644 index 00000000..d1db6ece --- /dev/null +++ b/pysplashsurf/pysplashsurf/pysplashsurf.pyi @@ -0,0 +1,746 @@ +# This file is automatically generated by pyo3_stub_gen +# ruff: noqa: E501, F401 + +import builtins +import numpy +import numpy.typing +import typing + +class Aabb3dF32: + r""" + Aabb3d wrapper + """ + def __new__(cls,min:typing.Sequence[builtins.float], max:typing.Sequence[builtins.float]): ... + @staticmethod + def from_points(points:numpy.typing.NDArray[numpy.float32]) -> Aabb3dF32: + r""" + Constructs the smallest AABB fitting around all the given points + """ + ... + + @staticmethod + def par_from_points(points:numpy.typing.NDArray[numpy.float32]) -> Aabb3dF32: + r""" + Constructs the smallest AABB fitting around all the given points, parallel version + """ + ... + + @staticmethod + def zeros() -> Aabb3dF32: + r""" + Constructs a degenerate AABB with min and max set to zero + """ + ... + + @staticmethod + def from_point(point:typing.Sequence[builtins.float]) -> Aabb3dF32: + r""" + Constructs a degenerate AABB with zero extents centered at the given point + """ + ... + + def min(self) -> numpy.typing.NDArray[numpy.float32]: + r""" + Returns the min coordinate of the bounding box + """ + ... + + def max(self) -> numpy.typing.NDArray[numpy.float32]: + r""" + Returns the max coordinate of the bounding box + """ + ... + + def is_consistent(self) -> builtins.bool: + r""" + Returns whether the AABB is consistent, i.e. `aabb.min()[i] <= aabb.max()[i]` for all `i` + """ + ... + + def is_degenerate(self) -> builtins.bool: + r""" + Returns whether the AABB is degenerate in any dimension, i.e. `aabb.min()[i] == aabb.max()[i]` for any `i` + """ + ... + + def extents(self) -> numpy.typing.NDArray[numpy.float32]: + r""" + Returns the extents of the bounding box (vector connecting min and max point of the box) + """ + ... + + def min_extent(self) -> builtins.float: + r""" + Returns the smallest scalar extent of the AABB over all of its dimensions + """ + ... + + def max_extent(self) -> builtins.float: + r""" + Returns the largest scalar extent of the AABB over all of its dimensions + """ + ... + + def centroid(self) -> numpy.typing.NDArray[numpy.float32]: + r""" + Returns the geometric centroid of the AABB (mean of the corner points) + """ + ... + + def contains_aabb(self, other:Aabb3dF32) -> builtins.bool: + r""" + Checks if the given AABB is inside of the AABB, the AABB is considered to be half-open to its max coordinate + """ + ... + + def contains_point(self, point:typing.Sequence[builtins.float]) -> builtins.bool: + r""" + Checks if the given point is inside of the AABB, the AABB is considered to be half-open to its max coordinate + """ + ... + + def translate(self, vector:typing.Sequence[builtins.float]) -> None: + r""" + Translates the AABB by the given vector + """ + ... + + def center_at_origin(self) -> None: + r""" + Translates the AABB to center it at the coordinate origin (moves the centroid to the coordinate origin) + """ + ... + + def scale_uniformly(self, scaling:builtins.float) -> None: + r""" + Multiplies a uniform, local scaling to the AABB (i.e. multiplying its extents as if it was centered at the origin) + """ + ... + + def join(self, other:Aabb3dF32) -> None: + r""" + Enlarges this AABB to the smallest AABB enclosing both itself and another AABB + """ + ... + + def join_with_point(self, point:typing.Sequence[builtins.float]) -> None: + r""" + Enlarges this AABB to the smallest AABB enclosing both itself and another point + """ + ... + + def grow_uniformly(self, margin:builtins.float) -> None: + r""" + Grows this AABB uniformly in all directions by the given scalar margin (i.e. adding the margin to min/max extents) + """ + ... + + def enclosing_cube(self) -> Aabb3dF32: + r""" + Returns the smallest cubical AABB with the same center that encloses this AABB + """ + ... + + +class Aabb3dF64: + r""" + Aabb3d wrapper + """ + def __new__(cls,min:typing.Sequence[builtins.float], max:typing.Sequence[builtins.float]): ... + @staticmethod + def from_points(points:numpy.typing.NDArray[numpy.float64]) -> Aabb3dF64: + r""" + Constructs the smallest AABB fitting around all the given points + """ + ... + + @staticmethod + def par_from_points(points:numpy.typing.NDArray[numpy.float64]) -> Aabb3dF64: + r""" + Constructs the smallest AABB fitting around all the given points, parallel version + """ + ... + + @staticmethod + def zeros() -> Aabb3dF64: + r""" + Constructs a degenerate AABB with min and max set to zero + """ + ... + + @staticmethod + def from_point(point:typing.Sequence[builtins.float]) -> Aabb3dF64: + r""" + Constructs a degenerate AABB with zero extents centered at the given point + """ + ... + + def min(self) -> numpy.typing.NDArray[numpy.float64]: + r""" + Returns the min coordinate of the bounding box + """ + ... + + def max(self) -> numpy.typing.NDArray[numpy.float64]: + r""" + Returns the max coordinate of the bounding box + """ + ... + + def is_consistent(self) -> builtins.bool: + r""" + Returns whether the AABB is consistent, i.e. `aabb.min()[i] <= aabb.max()[i]` for all `i` + """ + ... + + def is_degenerate(self) -> builtins.bool: + r""" + Returns whether the AABB is degenerate in any dimension, i.e. `aabb.min()[i] == aabb.max()[i]` for any `i` + """ + ... + + def extents(self) -> numpy.typing.NDArray[numpy.float64]: + r""" + Returns the extents of the bounding box (vector connecting min and max point of the box) + """ + ... + + def min_extent(self) -> builtins.float: + r""" + Returns the smallest scalar extent of the AABB over all of its dimensions + """ + ... + + def max_extent(self) -> builtins.float: + r""" + Returns the largest scalar extent of the AABB over all of its dimensions + """ + ... + + def centroid(self) -> numpy.typing.NDArray[numpy.float64]: + r""" + Returns the geometric centroid of the AABB (mean of the corner points) + """ + ... + + def contains_aabb(self, other:Aabb3dF64) -> builtins.bool: + r""" + Checks if the given AABB is inside of the AABB, the AABB is considered to be half-open to its max coordinate + """ + ... + + def contains_point(self, point:typing.Sequence[builtins.float]) -> builtins.bool: + r""" + Checks if the given point is inside of the AABB, the AABB is considered to be half-open to its max coordinate + """ + ... + + def translate(self, vector:typing.Sequence[builtins.float]) -> None: + r""" + Translates the AABB by the given vector + """ + ... + + def center_at_origin(self) -> None: + r""" + Translates the AABB to center it at the coordinate origin (moves the centroid to the coordinate origin) + """ + ... + + def scale_uniformly(self, scaling:builtins.float) -> None: + r""" + Multiplies a uniform, local scaling to the AABB (i.e. multiplying its extents as if it was centered at the origin) + """ + ... + + def join(self, other:Aabb3dF64) -> None: + r""" + Enlarges this AABB to the smallest AABB enclosing both itself and another AABB + """ + ... + + def join_with_point(self, point:typing.Sequence[builtins.float]) -> None: + r""" + Enlarges this AABB to the smallest AABB enclosing both itself and another point + """ + ... + + def grow_uniformly(self, margin:builtins.float) -> None: + r""" + Grows this AABB uniformly in all directions by the given scalar margin (i.e. adding the margin to min/max extents) + """ + ... + + def enclosing_cube(self) -> Aabb3dF64: + r""" + Returns the smallest cubical AABB with the same center that encloses this AABB + """ + ... + + +class MixedTriQuadMesh3dF32: + r""" + MixedTriQuadMesh3d wrapper + """ + vertices: numpy.typing.NDArray[numpy.float32] + cells: builtins.list[builtins.list[builtins.int]] + def take_vertices_and_cells(self) -> tuple: + r""" + Returns a tuple of vertices and triangles without copying the data, removes the data in the class + """ + ... + + +class MixedTriQuadMesh3dF64: + r""" + MixedTriQuadMesh3d wrapper + """ + vertices: numpy.typing.NDArray[numpy.float64] + cells: builtins.list[builtins.list[builtins.int]] + def take_vertices_and_cells(self) -> tuple: + r""" + Returns a tuple of vertices and triangles without copying the data, removes the data in the class + """ + ... + + +class MixedTriQuadMeshWithDataF32: + r""" + MeshWithData wrapper + """ + mesh: MixedTriQuadMesh3dF32 + def __new__(cls,mesh:MixedTriQuadMesh3dF32): ... + def take_mesh(self) -> MixedTriQuadMesh3dF32: + r""" + Returns mesh without copying the mesh data, removes it from the object + """ + ... + + def par_clamp_with_aabb(self, aabb:Aabb3dF32, clamp_vertices:builtins.bool, keep_vertices:builtins.bool) -> MixedTriQuadMeshWithDataF32: + r""" + Removes all cells from the mesh that are completely outside of the given AABB and clamps the remaining cells to the boundary + """ + ... + + def push_point_attribute_scalar_u64(self, name:builtins.str, data:typing.Sequence[builtins.int]) -> None: + ... + + def push_point_attribute_scalar_real(self, name:builtins.str, data:typing.Sequence[builtins.float]) -> None: + ... + + def push_point_attribute_vector_real(self, name:builtins.str, data:numpy.typing.NDArray[numpy.float32]) -> None: + ... + + def push_cell_attribute_scalar_u64(self, name:builtins.str, data:typing.Sequence[builtins.int]) -> None: + ... + + def push_cell_attribute_scalar_real(self, name:builtins.str, data:typing.Sequence[builtins.float]) -> None: + ... + + def push_cell_attribute_vector_real(self, name:builtins.str, data:numpy.typing.NDArray[numpy.float32]) -> None: + ... + + def get_point_attribute(self, name:builtins.str) -> typing.Any: + r""" + Get mesh vertex attribute by name + """ + ... + + def get_cell_attribute(self, name:builtins.str) -> typing.Any: + r""" + Get mesh cell attribute by name + """ + ... + + def get_point_attributes(self) -> dict: + r""" + Get all point attributes in a python dictionary + """ + ... + + def get_cell_attributes(self) -> dict: + r""" + Get all cell attributes in a python dictionary + """ + ... + + def get_point_attribute_keys(self) -> list: + r""" + Get all registered point attribute names + """ + ... + + def get_cell_attribute_keys(self) -> list: + r""" + Get all registered cell attribute names + """ + ... + + +class MixedTriQuadMeshWithDataF64: + r""" + MeshWithData wrapper + """ + mesh: MixedTriQuadMesh3dF64 + def __new__(cls,mesh:MixedTriQuadMesh3dF64): ... + def take_mesh(self) -> MixedTriQuadMesh3dF64: + r""" + Returns mesh without copying the mesh data, removes it from the object + """ + ... + + def par_clamp_with_aabb(self, aabb:Aabb3dF64, clamp_vertices:builtins.bool, keep_vertices:builtins.bool) -> MixedTriQuadMeshWithDataF64: + r""" + Removes all cells from the mesh that are completely outside of the given AABB and clamps the remaining cells to the boundary + """ + ... + + def push_point_attribute_scalar_u64(self, name:builtins.str, data:typing.Sequence[builtins.int]) -> None: + ... + + def push_point_attribute_scalar_real(self, name:builtins.str, data:typing.Sequence[builtins.float]) -> None: + ... + + def push_point_attribute_vector_real(self, name:builtins.str, data:numpy.typing.NDArray[numpy.float64]) -> None: + ... + + def push_cell_attribute_scalar_u64(self, name:builtins.str, data:typing.Sequence[builtins.int]) -> None: + ... + + def push_cell_attribute_scalar_real(self, name:builtins.str, data:typing.Sequence[builtins.float]) -> None: + ... + + def push_cell_attribute_vector_real(self, name:builtins.str, data:numpy.typing.NDArray[numpy.float64]) -> None: + ... + + def get_point_attribute(self, name:builtins.str) -> typing.Any: + r""" + Get mesh vertex attribute by name + """ + ... + + def get_cell_attribute(self, name:builtins.str) -> typing.Any: + r""" + Get mesh cell attribute by name + """ + ... + + def get_point_attributes(self) -> dict: + r""" + Get all point attributes in a python dictionary + """ + ... + + def get_cell_attributes(self) -> dict: + r""" + Get all cell attributes in a python dictionary + """ + ... + + def get_point_attribute_keys(self) -> list: + r""" + Get all registered point attribute names + """ + ... + + def get_cell_attribute_keys(self) -> list: + r""" + Get all registered cell attribute names + """ + ... + + +class SphInterpolatorF32: + r""" + SphInterpolator wrapper + """ + def __new__(cls,particle_positions:numpy.typing.NDArray[numpy.float32], particle_densities:typing.Sequence[builtins.float], particle_rest_mass:builtins.float, compact_support_radius:builtins.float): ... + def interpolate_scalar_quantity(self, particle_quantity:typing.Sequence[builtins.float], interpolation_points:numpy.typing.NDArray[numpy.float32], first_order_correction:builtins.bool) -> builtins.list[builtins.float]: + r""" + Interpolates a scalar per particle quantity to the given points, panics if the there are less per-particles values than particles + """ + ... + + def interpolate_normals(self, interpolation_points:numpy.typing.NDArray[numpy.float32]) -> numpy.typing.NDArray[numpy.float32]: + r""" + Interpolates surface normals (i.e. normalized SPH gradient of the indicator function) of the fluid to the given points using SPH interpolation + """ + ... + + def interpolate_vector_quantity(self, particle_quantity:numpy.typing.NDArray[numpy.float32], interpolation_points:numpy.typing.NDArray[numpy.float32], first_order_correction:builtins.bool) -> numpy.typing.NDArray[numpy.float32]: + r""" + Interpolates a vectorial per particle quantity to the given points, panics if the there are less per-particles values than particles + """ + ... + + +class SphInterpolatorF64: + r""" + SphInterpolator wrapper + """ + def __new__(cls,particle_positions:numpy.typing.NDArray[numpy.float64], particle_densities:typing.Sequence[builtins.float], particle_rest_mass:builtins.float, compact_support_radius:builtins.float): ... + def interpolate_scalar_quantity(self, particle_quantity:typing.Sequence[builtins.float], interpolation_points:numpy.typing.NDArray[numpy.float64], first_order_correction:builtins.bool) -> builtins.list[builtins.float]: + r""" + Interpolates a scalar per particle quantity to the given points, panics if the there are less per-particles values than particles + """ + ... + + def interpolate_normals(self, interpolation_points:numpy.typing.NDArray[numpy.float64]) -> numpy.typing.NDArray[numpy.float64]: + r""" + Interpolates surface normals (i.e. normalized SPH gradient of the indicator function) of the fluid to the given points using SPH interpolation + """ + ... + + def interpolate_vector_quantity(self, particle_quantity:numpy.typing.NDArray[numpy.float64], interpolation_points:numpy.typing.NDArray[numpy.float64], first_order_correction:builtins.bool) -> numpy.typing.NDArray[numpy.float64]: + r""" + Interpolates a vectorial per particle quantity to the given points, panics if the there are less per-particles values than particles + """ + ... + + +class SurfaceReconstructionF32: + r""" + SurfaceReconstruction wrapper + """ + mesh: TriMesh3dF32 + grid: UniformGridF32 + def particle_densities(self) -> builtins.list[builtins.float]: + r""" + Returns a reference to the global particle density vector if computed during the reconstruction (currently, all reconstruction approaches return this) + """ + ... + + def particle_neighbors(self) -> typing.Optional[builtins.list[builtins.list[builtins.int]]]: + r""" + Returns a reference to the global list of per-particle neighborhood lists if computed during the reconstruction (`None` if not specified in the parameters) + """ + ... + + +class SurfaceReconstructionF64: + r""" + SurfaceReconstruction wrapper + """ + mesh: TriMesh3dF64 + grid: UniformGridF64 + def particle_densities(self) -> builtins.list[builtins.float]: + r""" + Returns a reference to the global particle density vector if computed during the reconstruction (currently, all reconstruction approaches return this) + """ + ... + + def particle_neighbors(self) -> typing.Optional[builtins.list[builtins.list[builtins.int]]]: + r""" + Returns a reference to the global list of per-particle neighborhood lists if computed during the reconstruction (`None` if not specified in the parameters) + """ + ... + + +class TriMesh3dF32: + r""" + TriMesh3d wrapper + """ + vertices: numpy.typing.NDArray[numpy.float32] + triangles: numpy.typing.NDArray[numpy.uint64] + def take_vertices_and_triangles(self) -> tuple: + r""" + Returns a tuple of vertices and triangles without copying the data, removes the data in the class + """ + ... + + def par_vertex_normals(self) -> numpy.typing.NDArray[numpy.float32]: + r""" + Computes the mesh's vertex normals using an area weighted average of the adjacent triangle faces (parallelized version) + """ + ... + + def vertex_vertex_connectivity(self) -> builtins.list[builtins.list[builtins.int]]: + r""" + Returns a mapping of all mesh vertices to the set of their connected neighbor vertices + """ + ... + + +class TriMesh3dF64: + r""" + TriMesh3d wrapper + """ + vertices: numpy.typing.NDArray[numpy.float64] + triangles: numpy.typing.NDArray[numpy.uint64] + def take_vertices_and_triangles(self) -> tuple: + r""" + Returns a tuple of vertices and triangles without copying the data, removes the data in the class + """ + ... + + def par_vertex_normals(self) -> numpy.typing.NDArray[numpy.float64]: + r""" + Computes the mesh's vertex normals using an area weighted average of the adjacent triangle faces (parallelized version) + """ + ... + + def vertex_vertex_connectivity(self) -> builtins.list[builtins.list[builtins.int]]: + r""" + Returns a mapping of all mesh vertices to the set of their connected neighbor vertices + """ + ... + + +class TriMeshWithDataF32: + r""" + MeshWithData wrapper + """ + mesh: TriMesh3dF32 + def __new__(cls,mesh:TriMesh3dF32): ... + def take_mesh(self) -> TriMesh3dF32: + r""" + Returns mesh without copying the mesh data, removes it from the object + """ + ... + + def par_clamp_with_aabb(self, aabb:Aabb3dF32, clamp_vertices:builtins.bool, keep_vertices:builtins.bool) -> TriMeshWithDataF32: + r""" + Removes all cells from the mesh that are completely outside of the given AABB and clamps the remaining cells to the boundary + """ + ... + + def push_point_attribute_scalar_u64(self, name:builtins.str, data:typing.Sequence[builtins.int]) -> None: + ... + + def push_point_attribute_scalar_real(self, name:builtins.str, data:typing.Sequence[builtins.float]) -> None: + ... + + def push_point_attribute_vector_real(self, name:builtins.str, data:numpy.typing.NDArray[numpy.float32]) -> None: + ... + + def push_cell_attribute_scalar_u64(self, name:builtins.str, data:typing.Sequence[builtins.int]) -> None: + ... + + def push_cell_attribute_scalar_real(self, name:builtins.str, data:typing.Sequence[builtins.float]) -> None: + ... + + def push_cell_attribute_vector_real(self, name:builtins.str, data:numpy.typing.NDArray[numpy.float32]) -> None: + ... + + def get_point_attribute(self, name:builtins.str) -> typing.Any: + r""" + Get mesh vertex attribute by name + """ + ... + + def get_cell_attribute(self, name:builtins.str) -> typing.Any: + r""" + Get mesh cell attribute by name + """ + ... + + def get_point_attributes(self) -> dict: + r""" + Get all point attributes in a python dictionary + """ + ... + + def get_cell_attributes(self) -> dict: + r""" + Get all cell attributes in a python dictionary + """ + ... + + def get_point_attribute_keys(self) -> list: + r""" + Get all registered point attribute names + """ + ... + + def get_cell_attribute_keys(self) -> list: + r""" + Get all registered cell attribute names + """ + ... + + +class TriMeshWithDataF64: + r""" + MeshWithData wrapper + """ + mesh: TriMesh3dF64 + def __new__(cls,mesh:TriMesh3dF64): ... + def take_mesh(self) -> TriMesh3dF64: + r""" + Returns mesh without copying the mesh data, removes it from the object + """ + ... + + def par_clamp_with_aabb(self, aabb:Aabb3dF64, clamp_vertices:builtins.bool, keep_vertices:builtins.bool) -> TriMeshWithDataF64: + r""" + Removes all cells from the mesh that are completely outside of the given AABB and clamps the remaining cells to the boundary + """ + ... + + def push_point_attribute_scalar_u64(self, name:builtins.str, data:typing.Sequence[builtins.int]) -> None: + ... + + def push_point_attribute_scalar_real(self, name:builtins.str, data:typing.Sequence[builtins.float]) -> None: + ... + + def push_point_attribute_vector_real(self, name:builtins.str, data:numpy.typing.NDArray[numpy.float64]) -> None: + ... + + def push_cell_attribute_scalar_u64(self, name:builtins.str, data:typing.Sequence[builtins.int]) -> None: + ... + + def push_cell_attribute_scalar_real(self, name:builtins.str, data:typing.Sequence[builtins.float]) -> None: + ... + + def push_cell_attribute_vector_real(self, name:builtins.str, data:numpy.typing.NDArray[numpy.float64]) -> None: + ... + + def get_point_attribute(self, name:builtins.str) -> typing.Any: + r""" + Get mesh vertex attribute by name + """ + ... + + def get_cell_attribute(self, name:builtins.str) -> typing.Any: + r""" + Get mesh cell attribute by name + """ + ... + + def get_point_attributes(self) -> dict: + r""" + Get all point attributes in a python dictionary + """ + ... + + def get_cell_attributes(self) -> dict: + r""" + Get all cell attributes in a python dictionary + """ + ... + + def get_point_attribute_keys(self) -> list: + r""" + Get all registered point attribute names + """ + ... + + def get_cell_attribute_keys(self) -> list: + r""" + Get all registered cell attribute names + """ + ... + + +class UniformGridF32: + r""" + UniformGrid wrapper + """ + ... + +class UniformGridF64: + r""" + UniformGrid wrapper + """ + ... + diff --git a/pysplashsurf/python_environment.yaml b/pysplashsurf/python_environment.yaml new file mode 100644 index 00000000..be1b3f10 --- /dev/null +++ b/pysplashsurf/python_environment.yaml @@ -0,0 +1,22 @@ +# I recommend installing as a subdirectory: +# conda env create --prefix ./envs -f python_environment.yaml +# Then activate using: conda activate ./envs +name: pysplashsurf +channels: + - conda-forge +dependencies: + - maturin=1.8.2 + - meshio=5.3.5 + - numpy=2.2.3 + - pip=25.0.1 + - python=3.13.2 + - python_abi=3.13 + - sphinx=8.2.3 + - numpydoc=1.8.0 + - trimesh=4.6.5 + - rtree=1.4.0 + - scipy=1.15.2 + - furo + - myst-parser=4.0.1 + - pytest=8.3.5 + - pytest-md=0.2.0 diff --git a/pysplashsurf/src/aabb.rs b/pysplashsurf/src/aabb.rs new file mode 100644 index 00000000..66723ffa --- /dev/null +++ b/pysplashsurf/src/aabb.rs @@ -0,0 +1,159 @@ +use numpy::{PyArray, PyArray1, PyArray2, PyReadonlyArray2}; +use pyo3::{prelude::*, PyResult}; +use pyo3_stub_gen::derive::*; +use splashsurf_lib::{nalgebra::Vector3, Aabb3d}; + +macro_rules! create_aabb3d_interface { + ($name: ident, $type: ident) => { + /// Aabb3d wrapper + #[gen_stub_pyclass] + #[pyclass] + pub struct $name { + pub inner: Aabb3d<$type>, + } + + impl $name { + pub fn new(data: Aabb3d<$type>) -> Self { + Self { inner: data } + } + } + + #[gen_stub_pymethods] + #[pymethods] + impl $name { + #[new] + fn py_new<'py>(min: [$type; 3], max: [$type; 3]) -> PyResult { + Ok($name::new(Aabb3d::<$type>::new( + Vector3::from_column_slice(&min), + Vector3::from_column_slice(&max), + ))) + } + + /// Constructs the smallest AABB fitting around all the given points + #[staticmethod] + fn from_points<'py>(points: &Bound<'py, PyArray2<$type>>) -> PyResult<$name> { + let points: PyReadonlyArray2<$type> = points.extract()?; + let points = points.as_slice()?; + let points: &[Vector3<$type>] = bytemuck::cast_slice(points); + + Ok($name::new(Aabb3d::from_points(points))) + } + + /// Constructs the smallest AABB fitting around all the given points, parallel version + #[staticmethod] + fn par_from_points<'py>(points: &Bound<'py, PyArray2<$type>>) -> PyResult<$name> { + let points: PyReadonlyArray2<$type> = points.extract()?; + let points = points.as_slice()?; + let points: &[Vector3<$type>] = bytemuck::cast_slice(points); + + Ok($name::new(Aabb3d::par_from_points(points))) + } + + /// Constructs a degenerate AABB with min and max set to zero + #[staticmethod] + fn zeros() -> $name { + $name::new(Aabb3d::zeros()) + } + + /// Constructs a degenerate AABB with zero extents centered at the given point + #[staticmethod] + fn from_point(point: [$type; 3]) -> Self { + $name::new(Aabb3d::from_point(Vector3::from_column_slice(&point))) + } + + /// Returns the min coordinate of the bounding box + fn min<'py>(&self, py: Python<'py>) -> Bound<'py, PyArray1<$type>> { + let min: &[$type] = self.inner.min().as_slice(); + PyArray::from_slice(py, min) + } + + /// Returns the max coordinate of the bounding box + fn max<'py>(&self, py: Python<'py>) -> Bound<'py, PyArray1<$type>> { + let max: &[$type] = self.inner.max().as_slice(); + PyArray::from_slice(py, max) + } + + /// Returns whether the AABB is consistent, i.e. `aabb.min()[i] <= aabb.max()[i]` for all `i` + fn is_consistent(&self) -> bool { + self.inner.is_consistent() + } + + /// Returns whether the AABB is degenerate in any dimension, i.e. `aabb.min()[i] == aabb.max()[i]` for any `i` + fn is_degenerate(&self) -> bool { + self.inner.is_degenerate() + } + + /// Returns the extents of the bounding box (vector connecting min and max point of the box) + fn extents<'py>(&self, py: Python<'py>) -> Bound<'py, PyArray1<$type>> { + let extents = self.inner.extents(); + PyArray::from_slice(py, extents.as_slice()) + } + + /// Returns the smallest scalar extent of the AABB over all of its dimensions + fn min_extent(&self) -> $type { + self.inner.min_extent() + } + + /// Returns the largest scalar extent of the AABB over all of its dimensions + fn max_extent(&self) -> $type { + self.inner.max_extent() + } + + /// Returns the geometric centroid of the AABB (mean of the corner points) + fn centroid<'py>(&self, py: Python<'py>) -> Bound<'py, PyArray1<$type>> { + let centroid = self.inner.centroid(); + PyArray::from_slice(py, centroid.as_slice()) + } + + /// Checks if the given AABB is inside of the AABB, the AABB is considered to be half-open to its max coordinate + fn contains_aabb(&self, other: &$name) -> bool { + self.inner.contains_aabb(&other.inner) + } + + /// Checks if the given point is inside of the AABB, the AABB is considered to be half-open to its max coordinate + fn contains_point(&self, point: [$type; 3]) -> bool { + self.inner + .contains_point(&Vector3::from_column_slice(&point)) + } + + /// Translates the AABB by the given vector + fn translate(&mut self, vector: [$type; 3]) { + self.inner.translate(&Vector3::from_column_slice(&vector)); + } + + /// Translates the AABB to center it at the coordinate origin (moves the centroid to the coordinate origin) + fn center_at_origin(&mut self) { + self.inner.center_at_origin(); + } + + /// Multiplies a uniform, local scaling to the AABB (i.e. multiplying its extents as if it was centered at the origin) + fn scale_uniformly(&mut self, scaling: $type) { + self.inner.scale_uniformly(scaling); + } + + /// Enlarges this AABB to the smallest AABB enclosing both itself and another AABB + fn join(&mut self, other: &$name) { + self.inner.join(&other.inner); + } + + /// Enlarges this AABB to the smallest AABB enclosing both itself and another point + fn join_with_point(&mut self, point: [$type; 3]) { + self.inner + .join_with_point(&Vector3::from_column_slice(&point)); + } + + /// Grows this AABB uniformly in all directions by the given scalar margin (i.e. adding the margin to min/max extents) + fn grow_uniformly(&mut self, margin: $type) { + self.inner.grow_uniformly(margin); + } + + /// Returns the smallest cubical AABB with the same center that encloses this AABB + fn enclosing_cube(&self) -> $name { + $name::new(self.inner.enclosing_cube()) + } + } + }; +} + +create_aabb3d_interface!(Aabb3dF64, f64); +create_aabb3d_interface!(Aabb3dF32, f32); diff --git a/pysplashsurf/src/bin/stub_gen.rs b/pysplashsurf/src/bin/stub_gen.rs new file mode 100644 index 00000000..befaaa89 --- /dev/null +++ b/pysplashsurf/src/bin/stub_gen.rs @@ -0,0 +1,10 @@ +// Run `cargo run --bin stub_gen` to generate a stub file for the extension +use pyo3_stub_gen::Result; + +fn main() -> Result<()> { + // `stub_info` is a function defined by `define_stub_info_gatherer!` macro. + let stub = pysplashsurf::stub_info()?; + stub.generate()?; + std::fs::rename("pysplashsurf.pyi", "pysplashsurf/pysplashsurf.pyi")?; + Ok(()) +} diff --git a/pysplashsurf/src/lib.rs b/pysplashsurf/src/lib.rs new file mode 100644 index 00000000..1754694d --- /dev/null +++ b/pysplashsurf/src/lib.rs @@ -0,0 +1,119 @@ +use pyo3::prelude::*; +use pyo3_stub_gen::define_stub_info_gatherer; + +mod aabb; +mod mesh; +mod sph_interpolation; +mod uniform_grid; + +mod marching_cubes; +mod neighborhood_search; +mod pipeline; +mod post_processing; +mod reconstruction; + +/// High-Level Bindings of the splashsurf surface reconstruction implementation. +/// Support reconstructing Level-Set surfaces from particle clouds or from regular grids. +#[pymodule] +fn pysplashsurf(m: &Bound<'_, PyModule>) -> PyResult<()> { + let _ = m.add_class::()?; + let _ = m.add_class::()?; + let _ = m.add_class::()?; + let _ = m.add_class::()?; + + let _ = m.add_class::()?; + let _ = m.add_class::()?; + let _ = m.add_class::()?; + let _ = m.add_class::()?; + + let _ = m.add_class::()?; + let _ = m.add_class::()?; + + let _ = m.add_class::()?; + let _ = m.add_class::()?; + + let _ = m.add_class::()?; + let _ = m.add_class::()?; + + let _ = m.add_class::()?; + let _ = m.add_class::()?; + + let _ = m.add_function(wrap_pyfunction!( + reconstruction::reconstruct_surface_py_f32, + m + )?); + let _ = m.add_function(wrap_pyfunction!( + reconstruction::reconstruct_surface_py_f64, + m + )?); + + let _ = m.add_function(wrap_pyfunction!( + post_processing::convert_tris_to_quads_py_f32, + m + )?); + let _ = m.add_function(wrap_pyfunction!( + post_processing::convert_tris_to_quads_py_f64, + m + )?); + + let _ = m.add_function(wrap_pyfunction!( + post_processing::marching_cubes_cleanup_py_f32, + m + )?); + let _ = m.add_function(wrap_pyfunction!( + post_processing::marching_cubes_cleanup_py_f64, + m + )?); + + let _ = m.add_function(wrap_pyfunction!( + marching_cubes::check_mesh_consistency_py_f32, + m + )?); + let _ = m.add_function(wrap_pyfunction!( + marching_cubes::check_mesh_consistency_py_f64, + m + )?); + + let _ = m.add_function(wrap_pyfunction!(post_processing::decimation_py_f32, m)?); + let _ = m.add_function(wrap_pyfunction!(post_processing::decimation_py_f64, m)?); + + let _ = m.add_function(wrap_pyfunction!( + post_processing::par_laplacian_smoothing_inplace_py_f32, + m + )?); + let _ = m.add_function(wrap_pyfunction!( + post_processing::par_laplacian_smoothing_inplace_py_f64, + m + )?); + + let _ = m.add_function(wrap_pyfunction!( + post_processing::par_laplacian_smoothing_normals_inplace_py_f32, + m + )?); + let _ = m.add_function(wrap_pyfunction!( + post_processing::par_laplacian_smoothing_normals_inplace_py_f64, + m + )?); + + let _ = m.add_function(wrap_pyfunction!( + neighborhood_search::neighborhood_search_spatial_hashing_parallel_py_f32, + m + )?); + let _ = m.add_function(wrap_pyfunction!( + neighborhood_search::neighborhood_search_spatial_hashing_parallel_py_f64, + m + )?); + + let _ = m.add_function(wrap_pyfunction!( + pipeline::reconstruction_pipeline_py_f32, + m + )?); + let _ = m.add_function(wrap_pyfunction!( + pipeline::reconstruction_pipeline_py_f64, + m + )?); + + Ok(()) +} + +define_stub_info_gatherer!(stub_info); diff --git a/pysplashsurf/src/marching_cubes.rs b/pysplashsurf/src/marching_cubes.rs new file mode 100644 index 00000000..8dedd671 --- /dev/null +++ b/pysplashsurf/src/marching_cubes.rs @@ -0,0 +1,77 @@ +use pyo3::{ + exceptions::{PyRuntimeError, PyValueError}, + prelude::*, +}; + +use crate::{ + mesh::{TriMesh3dF32, TriMesh3dF64, TriMeshWithDataF32, TriMeshWithDataF64}, + uniform_grid::{UniformGridF32, UniformGridF64}, +}; + +#[pyfunction] +#[pyo3(name = "check_mesh_consistency_f32")] +#[pyo3(signature = (grid, mesh, *, check_closed, check_manifold, debug))] +pub fn check_mesh_consistency_py_f32<'py>( + py: Python, + grid: &UniformGridF32, + mesh: PyObject, + check_closed: bool, + check_manifold: bool, + debug: bool, +) -> PyResult<()> { + if let Ok(mesh) = mesh.downcast_bound::(py) { + splashsurf_lib::marching_cubes::check_mesh_consistency( + &grid.inner, + &mesh.borrow().inner, + check_closed, + check_manifold, + debug, + ) + .map_err(|x| PyErr::new::(x)) + } else if let Ok(mesh) = mesh.downcast_bound::(py) { + splashsurf_lib::marching_cubes::check_mesh_consistency( + &grid.inner, + &mesh.borrow().inner.mesh, + check_closed, + check_manifold, + debug, + ) + .map_err(|x| PyErr::new::(x)) + } else { + Err(PyErr::new::("Invalid mesh type")) + } +} + +#[pyfunction] +#[pyo3(name = "check_mesh_consistency_f64")] +#[pyo3(signature = (grid, mesh, *, check_closed, check_manifold, debug))] +pub fn check_mesh_consistency_py_f64<'py>( + py: Python, + grid: &UniformGridF64, + mesh: PyObject, + check_closed: bool, + check_manifold: bool, + debug: bool, +) -> PyResult<()> { + if let Ok(mesh) = mesh.downcast_bound::(py) { + splashsurf_lib::marching_cubes::check_mesh_consistency( + &grid.inner, + &mesh.borrow().inner, + check_closed, + check_manifold, + debug, + ) + .map_err(|x| PyErr::new::(x)) + } else if let Ok(mesh) = mesh.downcast_bound::(py) { + splashsurf_lib::marching_cubes::check_mesh_consistency( + &grid.inner, + &mesh.borrow().inner.mesh, + check_closed, + check_manifold, + debug, + ) + .map_err(|x| PyErr::new::(x)) + } else { + Err(PyErr::new::("Invalid mesh type")) + } +} diff --git a/pysplashsurf/src/mesh.rs b/pysplashsurf/src/mesh.rs new file mode 100644 index 00000000..b3b47732 --- /dev/null +++ b/pysplashsurf/src/mesh.rs @@ -0,0 +1,432 @@ +use ndarray::{Array2, ArrayView, ArrayView2}; +use numpy::{Element, IntoPyArray, PyArray, PyArray2, PyArrayMethods, PyReadonlyArray2, ToPyArray}; +use pyo3::{ + exceptions::PyValueError, + prelude::*, + types::{PyDict, PyList, PyTuple}, + IntoPyObjectExt, +}; +use pyo3_stub_gen::derive::*; +use splashsurf_lib::{ + mesh::{ + AttributeData, Mesh3d, MeshAttribute, MeshWithData, MixedTriQuadMesh3d, TriMesh3d, + TriangleOrQuadCell, + }, + nalgebra::{Unit, Vector3}, + Real, +}; + +use crate::aabb::{Aabb3dF32, Aabb3dF64}; + +fn get_attribute_with_name<'py, R: Real + Element>( + py: Python<'py>, + attrs: &[MeshAttribute], + name: &str, +) -> PyResult +where + R: pyo3::IntoPyObject<'py>, +{ + let elem = attrs.iter().filter(|x| x.name == name).next(); + match elem { + Some(attr) => match attr.data.clone() { + AttributeData::ScalarU64(res) => Ok(res.into_pyobject(py).unwrap().into()), + AttributeData::ScalarReal(res) => Ok(res.into_pyobject(py).unwrap().into()), + AttributeData::Vector3Real(res) => { + let flattened: Vec = bytemuck::cast_vec(res); + let res: Array2 = + Array2::from_shape_vec((flattened.len() / 3, 3), flattened).unwrap(); + Ok(res.into_pyarray(py).into_bound_py_any(py).unwrap().into()) + } + }, + None => Err(PyErr::new::(format!( + "Attribute with name {} doesn't exist", + name + ))), + } +} + +fn add_attribute_with_name<'py, R: Real + Element>( + attrs: &mut Vec>, + attribute: MeshAttribute, +) -> PyResult<()> { + let elem = attrs.iter().filter(|x| x.name == attribute.name).next(); + match elem { + None => { + attrs.push(attribute); + Ok(()) + } + _ => Err(PyErr::new::(format!( + "Attribute with name {} already exists", + attribute.name + ))), + } +} + +macro_rules! create_mesh_data_interface { + ($name: ident, $type: ident, $mesh_class: ident, $pymesh_class: ident, $aabb_class: ident) => { + /// MeshWithData wrapper + #[gen_stub_pyclass] + #[pyclass] + pub struct $name { + pub inner: MeshWithData<$type, $mesh_class<$type>>, + } + + impl $name { + pub fn new(data: MeshWithData<$type, $mesh_class<$type>>) -> Self { + Self { inner: data } + } + } + + #[gen_stub_pymethods] + #[pymethods] + impl $name { + #[new] + fn py_new(mesh: &$pymesh_class) -> PyResult { + let meshdata = MeshWithData::new(mesh.inner.clone()); + Ok($name::new(meshdata)) + } + + /// Clone of the contained mesh + #[getter] + fn mesh(&self) -> $pymesh_class { + $pymesh_class::new(self.inner.mesh.clone()) + } + + /// Returns mesh without copying the mesh data, removes it from the object + fn take_mesh(&mut self) -> $pymesh_class { + let mesh = std::mem::take(&mut self.inner.mesh); + $pymesh_class::new(mesh) + } + + /// Removes all cells from the mesh that are completely outside of the given AABB and clamps the remaining cells to the boundary + fn par_clamp_with_aabb( + &self, + aabb: &$aabb_class, + clamp_vertices: bool, + keep_vertices: bool, + ) -> $name { + $name::new(self.inner.par_clamp_with_aabb( + &aabb.inner, + clamp_vertices, + keep_vertices, + )) + } + + fn push_point_attribute_scalar_u64<'py>( + &mut self, + name: &str, + data: Vec, + ) -> PyResult<()> { + add_attribute_with_name::<$type>( + &mut self.inner.point_attributes, + MeshAttribute::new(name, AttributeData::ScalarU64(data)), + ) + } + + fn push_point_attribute_scalar_real<'py>( + &mut self, + name: &str, + data: Vec<$type>, + ) -> PyResult<()> { + add_attribute_with_name::<$type>( + &mut self.inner.point_attributes, + MeshAttribute::new(name, AttributeData::ScalarReal(data)), + ) + } + + fn push_point_attribute_vector_real<'py>( + &mut self, + name: &str, + data: &Bound<'py, PyArray2<$type>>, + ) -> PyResult<()> { + let data: PyReadonlyArray2<$type> = data.extract().unwrap(); + let data = data.as_slice().unwrap(); + let data: &[Vector3<$type>] = bytemuck::cast_slice(data); + + add_attribute_with_name::<$type>( + &mut self.inner.point_attributes, + MeshAttribute::new(name, AttributeData::Vector3Real(data.to_vec())), + ) + } + + fn push_cell_attribute_scalar_u64<'py>( + &mut self, + name: &str, + data: Vec, + ) -> PyResult<()> { + add_attribute_with_name::<$type>( + &mut self.inner.cell_attributes, + MeshAttribute::new(name, AttributeData::ScalarU64(data)), + ) + } + + fn push_cell_attribute_scalar_real<'py>( + &mut self, + name: &str, + data: Vec<$type>, + ) -> PyResult<()> { + add_attribute_with_name::<$type>( + &mut self.inner.cell_attributes, + MeshAttribute::new(name, AttributeData::ScalarReal(data)), + ) + } + + fn push_cell_attribute_vector_real<'py>( + &mut self, + name: &str, + data: &Bound<'py, PyArray2<$type>>, + ) -> PyResult<()> { + let data: PyReadonlyArray2<$type> = data.extract().unwrap(); + let data = data.as_slice().unwrap(); + let data: &[Vector3<$type>] = bytemuck::cast_slice(data); + + add_attribute_with_name::<$type>( + &mut self.inner.cell_attributes, + MeshAttribute::new(name, AttributeData::Vector3Real(data.to_vec())), + ) + } + + /// Get mesh vertex attribute by name + fn get_point_attribute<'py>(&self, py: Python<'py>, name: &str) -> PyResult { + get_attribute_with_name::<$type>(py, self.inner.point_attributes.as_slice(), name) + } + + /// Get mesh cell attribute by name + fn get_cell_attribute<'py>(&self, py: Python<'py>, name: &str) -> PyResult { + get_attribute_with_name::<$type>(py, self.inner.cell_attributes.as_slice(), name) + } + + /// Get all point attributes in a python dictionary + fn get_point_attributes<'py>(&self, py: Python<'py>) -> Bound<'py, PyDict> { + let res = PyDict::new(py); + + for attr in self.inner.point_attributes.iter() { + let data = get_attribute_with_name::<$type>( + py, + self.inner.point_attributes.as_slice(), + &attr.name, + ); + match data { + Ok(data) => res.set_item(&attr.name, data).unwrap(), + Err(_) => println!("Couldn't embed attribute {} in PyDict", &attr.name), + } + } + + res + } + + /// Get all cell attributes in a python dictionary + fn get_cell_attributes<'py>(&self, py: Python<'py>) -> Bound<'py, PyDict> { + let res = PyDict::new(py); + + for attr in self.inner.cell_attributes.iter() { + let data = get_attribute_with_name::<$type>( + py, + self.inner.cell_attributes.as_slice(), + &attr.name, + ); + match data { + Ok(data) => res.set_item(&attr.name, data).unwrap(), + Err(_) => println!("Couldn't embed attribute {} in PyDict", &attr.name), + } + } + + res + } + + /// Get all registered point attribute names + fn get_point_attribute_keys<'py>(&self, py: Python<'py>) -> Bound<'py, PyList> { + let mut res: Vec<&str> = vec![]; + + for attr in self.inner.point_attributes.iter() { + res.push(&attr.name); + } + + PyList::new(py, res).unwrap() + } + + /// Get all registered cell attribute names + fn get_cell_attribute_keys<'py>(&self, py: Python<'py>) -> Bound<'py, PyList> { + let mut res: Vec<&str> = vec![]; + + for attr in self.inner.cell_attributes.iter() { + res.push(&attr.name); + } + + PyList::new(py, res).unwrap() + } + } + }; +} + +macro_rules! create_mesh_interface { + ($name: ident, $type: ident) => { + /// TriMesh3d wrapper + #[gen_stub_pyclass] + #[pyclass] + pub struct $name { + pub inner: TriMesh3d<$type>, + } + + impl $name { + pub fn new(data: TriMesh3d<$type>) -> Self { + Self { inner: data } + } + } + + #[gen_stub_pymethods] + #[pymethods] + impl $name { + /// nx3 array of vertex positions, copies the data + #[getter] + fn vertices<'py>(&self, py: Python<'py>) -> Bound<'py, PyArray2<$type>> { + let points: &[$type] = bytemuck::cast_slice(&self.inner.vertices); + let vertices: ArrayView2<$type> = + ArrayView::from_shape((self.inner.vertices.len(), 3), points).unwrap(); + vertices.to_pyarray(py) // seems like at least one copy is necessary here (to_pyarray copies the data) + } + + /// nx3 array of the vertex indices that make up a triangle, copies the data + #[getter] + fn triangles<'py>(&self, py: Python<'py>) -> Bound<'py, PyArray2> { + let tris: &[u64] = bytemuck::cast_slice(&self.inner.triangles); + let triangles: ArrayView2 = + ArrayView::from_shape((self.inner.triangles.len(), 3), tris).unwrap(); + triangles.to_pyarray(py) + } + + /// Returns a tuple of vertices and triangles without copying the data, removes the data in the class + fn take_vertices_and_triangles<'py>(&mut self, py: Python<'py>) -> Bound<'py, PyTuple> { + let vertices = std::mem::take(&mut self.inner.vertices); + let triangles = std::mem::take(&mut self.inner.triangles); + + let n = vertices.len(); + let m = triangles.len(); + + let vertices_scalar: Vec<$type> = bytemuck::cast_vec(vertices); + let vertices_array = PyArray::from_vec(py, vertices_scalar) + .reshape([n, 3]) + .unwrap(); + + let triangles_scalar: Vec = bytemuck::cast_vec(triangles); + let triangles_array = PyArray::from_vec(py, triangles_scalar) + .reshape([m, 3]) + .unwrap(); + + let tup = (vertices_array, triangles_array); + tup.into_pyobject(py).unwrap() + } + + /// Computes the mesh's vertex normals using an area weighted average of the adjacent triangle faces (parallelized version) + fn par_vertex_normals<'py>(&self, py: Python<'py>) -> Bound<'py, PyArray2<$type>> { + let normals_vec = self.inner.par_vertex_normals(); + let normals_vec = + bytemuck::allocation::cast_vec::>, $type>(normals_vec); + + let normals: &[$type] = normals_vec.as_slice(); + let normals: ArrayView2<$type> = + ArrayView::from_shape((normals.len() / 3, 3), normals).unwrap(); + + normals.to_pyarray(py) + } + + /// Returns a mapping of all mesh vertices to the set of their connected neighbor vertices + fn vertex_vertex_connectivity(&self) -> Vec> { + self.inner.vertex_vertex_connectivity() + } + } + }; +} + +macro_rules! create_tri_quad_mesh_interface { + ($name: ident, $type: ident) => { + /// MixedTriQuadMesh3d wrapper + #[gen_stub_pyclass] + #[pyclass] + pub struct $name { + pub inner: MixedTriQuadMesh3d<$type>, + } + + impl $name { + pub fn new(data: MixedTriQuadMesh3d<$type>) -> Self { + Self { inner: data } + } + } + + #[gen_stub_pymethods] + #[pymethods] + impl $name { + /// nx3 array of vertex positions, copies data + #[getter] + fn vertices<'py>(&self, py: Python<'py>) -> Bound<'py, PyArray2<$type>> { + let points: &[$type] = bytemuck::cast_slice(&self.inner.vertices); + let vertices: ArrayView2<$type> = + ArrayView::from_shape((self.inner.vertices.len(), 3), points).unwrap(); + vertices.to_pyarray(py) + } + + /// 2D list specifying the vertex indices either for a triangle or a quad + #[getter] + fn cells(&self) -> Vec> { + let cells: Vec> = self + .inner + .cells + .iter() + .map(|c| match c { + TriangleOrQuadCell::Tri(v) => v.to_vec(), + TriangleOrQuadCell::Quad(v) => v.to_vec(), + }) + .collect(); + cells + } + + /// Returns a tuple of vertices and triangles without copying the data, removes the data in the class + fn take_vertices_and_cells<'py>(&mut self, py: Python<'py>) -> Bound<'py, PyTuple> { + let vertices = std::mem::take(&mut self.inner.vertices); + let cells = std::mem::take(&mut self.inner.cells); + + let n = vertices.len(); + + let vertices_scalar: Vec<$type> = bytemuck::cast_vec(vertices); + let vertices_array = PyArray::from_vec(py, vertices_scalar) + .reshape([n, 3]) + .unwrap(); + + let cells_list: Vec> = cells + .into_iter() + .map(|c| match c { + TriangleOrQuadCell::Tri(v) => v.into(), + TriangleOrQuadCell::Quad(v) => v.into(), + }) + .collect(); + + let tup = (vertices_array, cells_list); + tup.into_pyobject(py).unwrap() + } + } + }; +} + +create_mesh_interface!(TriMesh3dF64, f64); +create_mesh_interface!(TriMesh3dF32, f32); + +create_tri_quad_mesh_interface!(MixedTriQuadMesh3dF64, f64); +create_tri_quad_mesh_interface!(MixedTriQuadMesh3dF32, f32); + +create_mesh_data_interface!(TriMeshWithDataF64, f64, TriMesh3d, TriMesh3dF64, Aabb3dF64); +create_mesh_data_interface!(TriMeshWithDataF32, f32, TriMesh3d, TriMesh3dF32, Aabb3dF32); + +create_mesh_data_interface!( + MixedTriQuadMeshWithDataF64, + f64, + MixedTriQuadMesh3d, + MixedTriQuadMesh3dF64, + Aabb3dF64 +); +create_mesh_data_interface!( + MixedTriQuadMeshWithDataF32, + f32, + MixedTriQuadMesh3d, + MixedTriQuadMesh3dF32, + Aabb3dF32 +); diff --git a/pysplashsurf/src/neighborhood_search.rs b/pysplashsurf/src/neighborhood_search.rs new file mode 100644 index 00000000..d0e20281 --- /dev/null +++ b/pysplashsurf/src/neighborhood_search.rs @@ -0,0 +1,53 @@ +use numpy::{PyArray2, PyReadonlyArray2}; +use pyo3::prelude::*; +use splashsurf_lib::{nalgebra::Vector3, neighborhood_search::*}; + +use crate::aabb::{Aabb3dF32, Aabb3dF64}; + +#[pyfunction] +#[pyo3(name = "neighborhood_search_spatial_hashing_parallel_f64")] +#[pyo3(signature = (domain, particle_positions, search_radius))] +pub fn neighborhood_search_spatial_hashing_parallel_py_f64<'py>( + domain: &Aabb3dF64, + particle_positions: &Bound<'py, PyArray2>, + search_radius: f64, +) -> PyResult>> { + let mut nl: Vec> = Vec::new(); + + let particle_positions: PyReadonlyArray2 = particle_positions.extract()?; + let particle_positions = particle_positions.as_slice()?; + let particle_positions: &[Vector3] = bytemuck::cast_slice(particle_positions); + + neighborhood_search_spatial_hashing_parallel::( + &domain.inner, + particle_positions, + search_radius, + &mut nl, + ); + + Ok(nl) +} + +#[pyfunction] +#[pyo3(name = "neighborhood_search_spatial_hashing_parallel_f32")] +#[pyo3(signature = (domain, particle_positions, search_radius))] +pub fn neighborhood_search_spatial_hashing_parallel_py_f32<'py>( + domain: &Aabb3dF32, + particle_positions: &Bound<'py, PyArray2>, + search_radius: f32, +) -> PyResult>> { + let mut nl: Vec> = Vec::new(); + + let particle_positions: PyReadonlyArray2 = particle_positions.extract()?; + let particle_positions = particle_positions.as_slice()?; + let particle_positions: &[Vector3] = bytemuck::cast_slice(particle_positions); + + neighborhood_search_spatial_hashing_parallel::( + &domain.inner, + particle_positions, + search_radius, + &mut nl, + ); + + Ok(nl) +} diff --git a/pysplashsurf/src/pipeline.rs b/pysplashsurf/src/pipeline.rs new file mode 100644 index 00000000..11e8a075 --- /dev/null +++ b/pysplashsurf/src/pipeline.rs @@ -0,0 +1,626 @@ +use anyhow::anyhow; +use log::info; +use numpy::{Element, PyArray1, PyArray2, PyReadonlyArray1, PyReadonlyArray2}; +use pyo3::{ + prelude::*, + types::{PyDict, PyString}, +}; +use rayon::iter::{IndexedParallelIterator, IntoParallelRefIterator, ParallelIterator}; +use splashsurf_lib::{ + mesh::{AttributeData, Mesh3d, MeshAttribute, MeshWithData, TriMesh3d}, + nalgebra::{Unit, Vector3}, + profile, + sph_interpolation::SphInterpolator, + Aabb3d, Index, Real, SurfaceReconstruction, +}; +use std::borrow::Cow; + +use crate::{ + mesh::{TriMeshWithDataF32, TriMeshWithDataF64}, + reconstruction::{reconstruct_surface_py, SurfaceReconstructionF32, SurfaceReconstructionF64}, +}; + +fn reconstruction_pipeline_generic( + particles: &[Vector3], + attributes: Vec>, + particle_radius: R, + rest_density: R, + smoothing_length: R, + cube_size: R, + iso_surface_threshold: R, + aabb_min: Option<[R; 3]>, + aabb_max: Option<[R; 3]>, + enable_multi_threading: bool, + use_custom_grid_decomposition: bool, + subdomain_num_cubes_per_dim: u32, + global_neighborhood_list: bool, + // check_mesh_closed: bool, + // check_mesh_manifold: bool, + // check_mesh_orientation: bool, + // check_mesh_debug: bool, + mesh_cleanup: bool, + decimate_barnacles: bool, + keep_vertices: bool, + compute_normals: bool, + sph_normals: bool, + normals_smoothing_iters: Option, + mesh_smoothing_iters: Option, + mesh_smoothing_weights: bool, + mesh_smoothing_weights_normalization: f64, + // generate_quads: bool, + // quad_max_edge_diag_ratio: f64, + // quad_max_normal_angle: f64, + // quad_max_interior_angle: f64, + output_mesh_smoothing_weights: bool, + output_raw_normals: bool, + mesh_aabb_min: Option<[R; 3]>, + mesh_aabb_max: Option<[R; 3]>, + mesh_aabb_clamp_vertices: bool, +) -> Result<(MeshWithData>, SurfaceReconstruction), anyhow::Error> { + profile!("surface reconstruction"); + + let compact_support_radius = R::from_f64(2.0).unwrap() * smoothing_length * particle_radius; + + // Perform the surface reconstruction + let reconstruction = reconstruct_surface_py::( + particles, + particle_radius, + rest_density, + smoothing_length, + cube_size, + iso_surface_threshold, + enable_multi_threading, + global_neighborhood_list, + use_custom_grid_decomposition, + subdomain_num_cubes_per_dim, + aabb_min, + aabb_max, + ); + + // let grid = reconstruction.grid(); + let mut mesh_with_data: MeshWithData> = + MeshWithData::new(reconstruction.mesh().clone()); + + // Perform post-processing + { + profile!("postprocessing"); + let mut vertex_connectivity = None; + + if mesh_cleanup { + info!("Post-processing: Performing mesh cleanup"); + let tris_before = mesh_with_data.mesh.triangles.len(); + let verts_before = mesh_with_data.mesh.vertices.len(); + vertex_connectivity = Some(splashsurf_lib::postprocessing::marching_cubes_cleanup( + &mut mesh_with_data.mesh, + reconstruction.grid(), + 5, + keep_vertices, + )); + let tris_after = mesh_with_data.mesh.triangles.len(); + let verts_after = mesh_with_data.mesh.vertices.len(); + info!("Post-processing: Cleanup reduced number of vertices to {:.2}% and number of triangles to {:.2}% of original mesh.", (verts_after as f64 / verts_before as f64) * 100.0, (tris_after as f64 / tris_before as f64) * 100.0) + } + + // Decimate mesh if requested + if decimate_barnacles { + info!("Post-processing: Performing decimation"); + vertex_connectivity = Some(splashsurf_lib::postprocessing::decimation( + &mut mesh_with_data.mesh, + keep_vertices, + )); + } + + // Initialize SPH interpolator if required later + let interpolator_required = mesh_smoothing_weights || sph_normals; + + let interpolator = if interpolator_required { + profile!("initialize interpolator"); + info!("Post-processing: Initializing interpolator..."); + + info!( + "Constructing global acceleration structure for SPH interpolation to {} vertices...", + mesh_with_data.vertices().len() + ); + + let particle_rest_density = rest_density; + let particle_rest_volume = + R::from_f64((4.0 / 3.0) * std::f64::consts::PI).unwrap() * particle_radius.powi(3); + let particle_rest_mass = particle_rest_volume * particle_rest_density; + + let particle_densities = reconstruction + .particle_densities() + .ok_or_else(|| anyhow::anyhow!("Particle densities were not returned by surface reconstruction but are required for SPH normal computation"))? + .as_slice(); + assert_eq!( + particles.len(), + particle_densities.len(), + "There has to be one density value per particle" + ); + + Some(SphInterpolator::new( + &particles, + particle_densities, + particle_rest_mass, + compact_support_radius, + )) + } else { + None + }; + + // Compute mesh vertex-vertex connectivity map if required later + let vertex_connectivity_required = + normals_smoothing_iters.is_some() || mesh_smoothing_iters.is_some(); + if vertex_connectivity.is_none() && vertex_connectivity_required { + vertex_connectivity = Some(mesh_with_data.mesh.vertex_vertex_connectivity()); + } + + // Compute smoothing weights if requested + let smoothing_weights = if mesh_smoothing_weights { + profile!("compute smoothing weights"); + info!("Post-processing: Computing smoothing weights..."); + + // TODO: Switch between parallel/single threaded + // TODO: Re-use data from reconstruction? + + // Global neighborhood search + let nl = reconstruction + .particle_neighbors() + .map(Cow::Borrowed) + .unwrap_or_else(|| + { + let search_radius = compact_support_radius; + + let mut domain = Aabb3d::from_points(particles); + domain.grow_uniformly(search_radius); + + let mut nl = Vec::new(); + splashsurf_lib::neighborhood_search::neighborhood_search_spatial_hashing_parallel::( + &domain, + particles, + search_radius, + &mut nl, + ); + assert_eq!(nl.len(), particles.len()); + Cow::Owned(nl) + } + ); + + // Compute weighted neighbor count + let squared_r = compact_support_radius * compact_support_radius; + let weighted_ncounts = nl + .par_iter() + .enumerate() + .map(|(i, nl)| { + nl.iter() + .copied() + .map(|j| { + let dist = (particles[i] - particles[j]).norm_squared(); + + R::one() - (dist / squared_r).clamp(R::zero(), R::one()) + }) + .fold(R::zero(), R::add) + }) + .collect::>(); + + let vertex_weighted_num_neighbors = { + profile!("interpolate weighted neighbor counts"); + interpolator + .as_ref() + .expect("interpolator is required") + .interpolate_scalar_quantity( + weighted_ncounts.as_slice(), + mesh_with_data.vertices(), + true, + ) + }; + + let smoothing_weights = { + let offset = R::zero(); + let normalization = R::from_f64(mesh_smoothing_weights_normalization).expect( + "smoothing weight normalization value cannot be represented as Real type", + ) - offset; + + // Normalize number of neighbors + let smoothing_weights = vertex_weighted_num_neighbors + .par_iter() + .copied() + .map(|n| (n - offset).max(R::zero())) + .map(|n| (n / normalization).min(R::one())) + // Smooth-Step function + .map(|x| x.powi(5).times(6) - x.powi(4).times(15) + x.powi(3).times(10)) + .collect::>(); + + if output_mesh_smoothing_weights { + // Raw distance-weighted number of neighbors value per vertex (can be used to determine normalization value) + mesh_with_data.point_attributes.push(MeshAttribute::new( + "wnn".to_string(), + AttributeData::ScalarReal(vertex_weighted_num_neighbors), + )); + // Final smoothing weights per vertex + mesh_with_data.point_attributes.push(MeshAttribute::new( + "sw".to_string(), + AttributeData::ScalarReal(smoothing_weights.clone()), + )); + } + + smoothing_weights + }; + + Some(smoothing_weights) + } else { + None + }; + + // Perform smoothing if requested + if let Some(mesh_smoothing_iters) = mesh_smoothing_iters { + profile!("mesh smoothing"); + info!("Post-processing: Smoothing mesh..."); + + // TODO: Switch between parallel/single threaded + + let smoothing_weights = smoothing_weights + .unwrap_or_else(|| vec![R::one(); mesh_with_data.vertices().len()]); + + splashsurf_lib::postprocessing::par_laplacian_smoothing_inplace( + &mut mesh_with_data.mesh, + vertex_connectivity + .as_ref() + .expect("vertex connectivity is required"), + mesh_smoothing_iters, + R::one(), + &smoothing_weights, + ); + } + + // Add normals to mesh if requested + if compute_normals { + profile!("compute normals"); + info!("Post-processing: Computing surface normals..."); + + // Compute normals + let normals = if sph_normals { + info!("Using SPH interpolation to compute surface normals"); + + let sph_normals = interpolator + .as_ref() + .expect("interpolator is required") + .interpolate_normals(mesh_with_data.vertices()); + bytemuck::allocation::cast_vec::>, Vector3>(sph_normals) + } else { + info!("Using area weighted triangle normals for surface normals"); + profile!("mesh.par_vertex_normals"); + let tri_normals = mesh_with_data.mesh.par_vertex_normals(); + + // Convert unit vectors to plain vectors + bytemuck::allocation::cast_vec::>, Vector3>(tri_normals) + }; + + // Smooth normals + if let Some(smoothing_iters) = normals_smoothing_iters { + info!("Post-processing: Smoothing normals..."); + + let mut smoothed_normals = normals.clone(); + splashsurf_lib::postprocessing::par_laplacian_smoothing_normals_inplace( + &mut smoothed_normals, + vertex_connectivity + .as_ref() + .expect("vertex connectivity is required"), + smoothing_iters, + ); + + mesh_with_data.point_attributes.push(MeshAttribute::new( + "normals".to_string(), + AttributeData::Vector3Real(smoothed_normals), + )); + if output_raw_normals { + mesh_with_data.point_attributes.push(MeshAttribute::new( + "raw_normals".to_string(), + AttributeData::Vector3Real(normals), + )); + } + } else { + mesh_with_data.point_attributes.push(MeshAttribute::new( + "normals".to_string(), + AttributeData::Vector3Real(normals), + )); + } + } + + // Interpolate attributes if requested + if !attributes.is_empty() { + profile!("interpolate attributes"); + info!("Post-processing: Interpolating attributes..."); + let interpolator = interpolator.as_ref().expect("interpolator is required"); + + for attribute in attributes.into_iter() { + info!("Interpolating attribute \"{}\"...", attribute.name); + + match attribute.data { + AttributeData::ScalarReal(values) => { + let interpolated_values = interpolator.interpolate_scalar_quantity( + values.as_slice(), + mesh_with_data.vertices(), + true, + ); + mesh_with_data.point_attributes.push(MeshAttribute::new( + attribute.name, + AttributeData::ScalarReal(interpolated_values), + )); + } + AttributeData::Vector3Real(values) => { + let interpolated_values = interpolator.interpolate_vector_quantity( + values.as_slice(), + mesh_with_data.vertices(), + true, + ); + mesh_with_data.point_attributes.push(MeshAttribute::new( + attribute.name, + AttributeData::Vector3Real(interpolated_values), + )); + } + _ => unimplemented!("Interpolation of this attribute type not implemented"), + } + } + } + } + + // Remove and clamp cells outside of AABB + let mesh_aabb = if aabb_min != None && aabb_max != None { + Some(Aabb3d::new( + Vector3::from(mesh_aabb_min.unwrap()), + Vector3::from(mesh_aabb_max.unwrap()), + )) + } else { + None + }; + + let mesh_with_data = if let Some(mesh_aabb) = &mesh_aabb { + profile!("clamp mesh to aabb"); + info!("Post-processing: Clamping mesh to AABB..."); + + mesh_with_data.par_clamp_with_aabb( + &mesh_aabb + .try_convert() + .ok_or_else(|| anyhow!("Failed to convert mesh AABB"))?, + mesh_aabb_clamp_vertices, + keep_vertices, + ) + } else { + mesh_with_data + }; + + // Convert triangles to quads + // let (tri_mesh, tri_quad_mesh) = if generate_quads { + // info!("Post-processing: Convert triangles to quads..."); + // let non_squareness_limit = R::from_f64(quad_max_edge_diag_ratio).unwrap(); + // let normal_angle_limit_rad = + // R::from_f64(quad_max_normal_angle.to_radians()).unwrap(); + // let max_interior_angle = + // R::from_f64(quad_max_interior_angle.to_radians()).unwrap(); + + // let tri_quad_mesh = splashsurf_lib::postprocessing::convert_tris_to_quads( + // &mesh_with_data.mesh, + // non_squareness_limit, + // normal_angle_limit_rad, + // max_interior_angle, + // ); + + // (None, Some(mesh_with_data.with_mesh(tri_quad_mesh))) + // } else { + // (Some(mesh_with_data), None) + // }; + Ok((mesh_with_data, reconstruction)) +} + +fn attrs_conversion<'py, R: Real + Element>( + attributes_to_interpolate: Bound<'py, PyDict>, +) -> Vec> { + let mut attrs: Vec> = Vec::new(); + for (key, value) in attributes_to_interpolate.iter() { + let key_str: String = key + .downcast::() + .expect("Key wasn't a string") + .extract() + .unwrap(); + + if let Ok(value) = value.downcast::>() { + let value: Vec = value + .extract::>() + .unwrap() + .as_slice() + .unwrap() + .to_vec(); + let mesh_attr = MeshAttribute::new(key_str, AttributeData::ScalarU64(value)); + attrs.push(mesh_attr); + } else if let Ok(value) = value.downcast::>() { + let value: Vec = value + .extract::>() + .unwrap() + .as_slice() + .unwrap() + .to_vec(); + let mesh_attr = MeshAttribute::new(key_str, AttributeData::ScalarReal(value)); + attrs.push(mesh_attr); + } else if let Ok(value) = value.downcast::>() { + let value: PyReadonlyArray2 = value.extract().unwrap(); + + let value_slice = value.as_slice().unwrap(); + let value_slice: &[Vector3] = bytemuck::cast_slice(value_slice); + + let mesh_attr = + MeshAttribute::new(key_str, AttributeData::Vector3Real(value_slice.to_vec())); + attrs.push(mesh_attr); + } else { + println!("Couldnt downcast attribute {} to valid type", &key_str); + } + } + attrs +} + +#[pyfunction] +#[pyo3(name = "reconstruction_pipeline_f32")] +#[pyo3(signature = (particles, *, attributes_to_interpolate, particle_radius, rest_density, + smoothing_length, cube_size, iso_surface_threshold, + aabb_min = None, aabb_max = None, enable_multi_threading=false, + use_custom_grid_decomposition=false, subdomain_num_cubes_per_dim=64, global_neighborhood_list=false, + mesh_cleanup, decimate_barnacles, keep_vertices, compute_normals, sph_normals, + normals_smoothing_iters, mesh_smoothing_iters, mesh_smoothing_weights, + mesh_smoothing_weights_normalization, output_mesh_smoothing_weights, + output_raw_normals, mesh_aabb_min, mesh_aabb_max, mesh_aabb_clamp_vertices +))] +pub fn reconstruction_pipeline_py_f32<'py>( + particles: &Bound<'py, PyArray2>, + attributes_to_interpolate: Bound<'py, PyDict>, + particle_radius: f32, + rest_density: f32, + smoothing_length: f32, + cube_size: f32, + iso_surface_threshold: f32, + aabb_min: Option<[f32; 3]>, + aabb_max: Option<[f32; 3]>, + enable_multi_threading: bool, + use_custom_grid_decomposition: bool, + subdomain_num_cubes_per_dim: u32, + global_neighborhood_list: bool, + mesh_cleanup: bool, + decimate_barnacles: bool, + keep_vertices: bool, + compute_normals: bool, + sph_normals: bool, + normals_smoothing_iters: Option, + mesh_smoothing_iters: Option, + mesh_smoothing_weights: bool, + mesh_smoothing_weights_normalization: f64, + output_mesh_smoothing_weights: bool, + output_raw_normals: bool, + mesh_aabb_min: Option<[f32; 3]>, + mesh_aabb_max: Option<[f32; 3]>, + mesh_aabb_clamp_vertices: bool, +) -> (TriMeshWithDataF32, SurfaceReconstructionF32) { + let particles: PyReadonlyArray2 = particles.extract().unwrap(); + + let particle_positions = particles.as_slice().unwrap(); + let particle_positions: &[Vector3] = bytemuck::cast_slice(particle_positions); + + let attrs = attrs_conversion(attributes_to_interpolate); + + let (mesh, reconstruction) = reconstruction_pipeline_generic::( + particle_positions, + attrs, + particle_radius, + rest_density, + smoothing_length, + cube_size, + iso_surface_threshold, + aabb_min, + aabb_max, + enable_multi_threading, + use_custom_grid_decomposition, + subdomain_num_cubes_per_dim, + global_neighborhood_list, + mesh_cleanup, + decimate_barnacles, + keep_vertices, + compute_normals, + sph_normals, + normals_smoothing_iters, + mesh_smoothing_iters, + mesh_smoothing_weights, + mesh_smoothing_weights_normalization, + output_mesh_smoothing_weights, + output_raw_normals, + mesh_aabb_min, + mesh_aabb_max, + mesh_aabb_clamp_vertices, + ) + .unwrap(); + + ( + TriMeshWithDataF32::new(mesh), + SurfaceReconstructionF32::new(reconstruction), + ) +} + +#[pyfunction] +#[pyo3(name = "reconstruction_pipeline_f64")] +#[pyo3(signature = (particles, *, attributes_to_interpolate, particle_radius, rest_density, + smoothing_length, cube_size, iso_surface_threshold, + aabb_min = None, aabb_max = None, enable_multi_threading=false, + use_custom_grid_decomposition=false, subdomain_num_cubes_per_dim=64, global_neighborhood_list=false, + mesh_cleanup, decimate_barnacles, keep_vertices, compute_normals, sph_normals, + normals_smoothing_iters, mesh_smoothing_iters, mesh_smoothing_weights, + mesh_smoothing_weights_normalization, output_mesh_smoothing_weights, + output_raw_normals, mesh_aabb_min, mesh_aabb_max, mesh_aabb_clamp_vertices +))] +pub fn reconstruction_pipeline_py_f64<'py>( + particles: &Bound<'py, PyArray2>, + attributes_to_interpolate: Bound<'py, PyDict>, + particle_radius: f64, + rest_density: f64, + smoothing_length: f64, + cube_size: f64, + iso_surface_threshold: f64, + aabb_min: Option<[f64; 3]>, + aabb_max: Option<[f64; 3]>, + enable_multi_threading: bool, + use_custom_grid_decomposition: bool, + subdomain_num_cubes_per_dim: u32, + global_neighborhood_list: bool, + mesh_cleanup: bool, + decimate_barnacles: bool, + keep_vertices: bool, + compute_normals: bool, + sph_normals: bool, + normals_smoothing_iters: Option, + mesh_smoothing_iters: Option, + mesh_smoothing_weights: bool, + mesh_smoothing_weights_normalization: f64, + output_mesh_smoothing_weights: bool, + output_raw_normals: bool, + mesh_aabb_min: Option<[f64; 3]>, + mesh_aabb_max: Option<[f64; 3]>, + mesh_aabb_clamp_vertices: bool, +) -> (TriMeshWithDataF64, SurfaceReconstructionF64) { + let particles: PyReadonlyArray2 = particles.extract().unwrap(); + + let particle_positions = particles.as_slice().unwrap(); + let particle_positions: &[Vector3] = bytemuck::cast_slice(particle_positions); + + let attrs = attrs_conversion(attributes_to_interpolate); + + let (mesh, reconstruction) = reconstruction_pipeline_generic::( + particle_positions, + attrs, + particle_radius, + rest_density, + smoothing_length, + cube_size, + iso_surface_threshold, + aabb_min, + aabb_max, + enable_multi_threading, + use_custom_grid_decomposition, + subdomain_num_cubes_per_dim, + global_neighborhood_list, + mesh_cleanup, + decimate_barnacles, + keep_vertices, + compute_normals, + sph_normals, + normals_smoothing_iters, + mesh_smoothing_iters, + mesh_smoothing_weights, + mesh_smoothing_weights_normalization, + output_mesh_smoothing_weights, + output_raw_normals, + mesh_aabb_min, + mesh_aabb_max, + mesh_aabb_clamp_vertices, + ) + .unwrap(); + + ( + TriMeshWithDataF64::new(mesh), + SurfaceReconstructionF64::new(reconstruction), + ) +} diff --git a/pysplashsurf/src/post_processing.rs b/pysplashsurf/src/post_processing.rs new file mode 100644 index 00000000..470dd310 --- /dev/null +++ b/pysplashsurf/src/post_processing.rs @@ -0,0 +1,317 @@ +use ndarray::ArrayViewMut2; +use numpy::{PyArray2, PyArrayMethods}; +use pyo3::{exceptions::PyValueError, prelude::*}; +use splashsurf_lib::nalgebra::Vector3; + +use crate::{ + mesh::{ + MixedTriQuadMesh3dF32, MixedTriQuadMesh3dF64, MixedTriQuadMeshWithDataF32, + MixedTriQuadMeshWithDataF64, TriMesh3dF32, TriMesh3dF64, TriMeshWithDataF32, + TriMeshWithDataF64, + }, + uniform_grid::{UniformGridF32, UniformGridF64}, +}; + +#[pyfunction] +#[pyo3(name = "convert_tris_to_quads_f64")] +#[pyo3(signature = (mesh, *, non_squareness_limit, normal_angle_limit_rad, max_interior_angle))] +pub fn convert_tris_to_quads_py_f64<'py>( + mesh: PyObject, + py: Python<'py>, + non_squareness_limit: f64, + normal_angle_limit_rad: f64, + max_interior_angle: f64, +) -> PyResult { + if mesh.downcast_bound::(py).is_ok() { + let mesh = mesh.downcast_bound::(py).unwrap(); + let quad_mesh = + MixedTriQuadMesh3dF64::new(splashsurf_lib::postprocessing::convert_tris_to_quads( + &mesh.borrow().inner, + non_squareness_limit, + normal_angle_limit_rad, + max_interior_angle, + )); + Ok(quad_mesh.into_pyobject(py).unwrap().into()) + } else if mesh.downcast_bound::(py).is_ok() { + let mesh = mesh.downcast_bound::(py).unwrap(); + let mut quad_mesh = + MixedTriQuadMeshWithDataF64::new(splashsurf_lib::mesh::MeshWithData::new( + splashsurf_lib::postprocessing::convert_tris_to_quads( + &mesh.borrow().inner.mesh, + non_squareness_limit, + normal_angle_limit_rad, + max_interior_angle, + ), + )); + + quad_mesh.inner.point_attributes = mesh.borrow().inner.point_attributes.clone(); + + Ok(quad_mesh.into_pyobject(py).unwrap().into()) + } else { + Err(PyErr::new::("Invalid mesh type")) + } +} + +#[pyfunction] +#[pyo3(name = "convert_tris_to_quads_f32")] +#[pyo3(signature = (mesh, *, non_squareness_limit, normal_angle_limit_rad, max_interior_angle))] +pub fn convert_tris_to_quads_py_f32<'py>( + py: Python<'py>, + mesh: PyObject, + non_squareness_limit: f32, + normal_angle_limit_rad: f32, + max_interior_angle: f32, +) -> PyResult { + if mesh.downcast_bound::(py).is_ok() { + let mesh = mesh.downcast_bound::(py).unwrap(); + let quad_mesh = + MixedTriQuadMesh3dF32::new(splashsurf_lib::postprocessing::convert_tris_to_quads( + &mesh.borrow().inner, + non_squareness_limit, + normal_angle_limit_rad, + max_interior_angle, + )); + Ok(quad_mesh.into_pyobject(py).unwrap().into()) + } else if mesh.downcast_bound::(py).is_ok() { + let mesh = mesh.downcast_bound::(py).unwrap(); + let mut quad_mesh = + MixedTriQuadMeshWithDataF32::new(splashsurf_lib::mesh::MeshWithData::new( + splashsurf_lib::postprocessing::convert_tris_to_quads( + &mesh.borrow().inner.mesh, + non_squareness_limit, + normal_angle_limit_rad, + max_interior_angle, + ), + )); + + quad_mesh.inner.point_attributes = mesh.borrow().inner.point_attributes.clone(); + + Ok(quad_mesh.into_pyobject(py).unwrap().into()) + } else { + Err(PyErr::new::("Invalid mesh type")) + } +} + +#[pyfunction] +#[pyo3(name = "par_laplacian_smoothing_inplace_f64")] +#[pyo3(signature = (mesh, vertex_connectivity, iterations, beta, weights))] +pub fn par_laplacian_smoothing_inplace_py_f64<'py>( + py: Python, + mesh: PyObject, + vertex_connectivity: Vec>, // ToDo: only take reference to data here + iterations: usize, + beta: f64, + weights: Vec, // ToDo: Same here +) -> PyResult<()> { + if mesh.downcast_bound::(py).is_ok() { + let mesh = mesh.downcast_bound::(py).unwrap(); + splashsurf_lib::postprocessing::par_laplacian_smoothing_inplace( + &mut mesh.borrow_mut().inner, + &vertex_connectivity, + iterations, + beta, + &weights, + ); + Ok(()) + } else if mesh.downcast_bound::(py).is_ok() { + let mesh = mesh.downcast_bound::(py).unwrap(); + splashsurf_lib::postprocessing::par_laplacian_smoothing_inplace( + &mut mesh.borrow_mut().inner.mesh, + &vertex_connectivity, + iterations, + beta, + &weights, + ); + Ok(()) + } else { + Err(PyErr::new::("Invalid mesh type")) + } +} + +#[pyfunction] +#[pyo3(name = "par_laplacian_smoothing_inplace_f32")] +#[pyo3(signature = (mesh, vertex_connectivity, iterations, beta, weights))] +pub fn par_laplacian_smoothing_inplace_py_f32<'py>( + py: Python, + mesh: PyObject, + vertex_connectivity: Vec>, // ToDo: only take reference to data here + iterations: usize, + beta: f32, + weights: Vec, // ToDo: Same here +) -> PyResult<()> { + if mesh.downcast_bound::(py).is_ok() { + let mesh = mesh.downcast_bound::(py).unwrap(); + splashsurf_lib::postprocessing::par_laplacian_smoothing_inplace( + &mut mesh.borrow_mut().inner, + &vertex_connectivity, + iterations, + beta, + &weights, + ); + Ok(()) + } else if mesh.downcast_bound::(py).is_ok() { + let mesh = mesh.downcast_bound::(py).unwrap(); + splashsurf_lib::postprocessing::par_laplacian_smoothing_inplace( + &mut mesh.borrow_mut().inner.mesh, + &vertex_connectivity, + iterations, + beta, + &weights, + ); + Ok(()) + } else { + Err(PyErr::new::("Invalid mesh type")) + } +} + +#[pyfunction] +#[pyo3(name = "par_laplacian_smoothing_normals_inplace_f32")] +#[pyo3(signature = (normals, vertex_connectivity, iterations))] +pub fn par_laplacian_smoothing_normals_inplace_py_f32<'py>( + normals: &Bound<'py, PyArray2>, + vertex_connectivity: Vec>, + iterations: usize, +) { + let mut normals: ArrayViewMut2 = unsafe { normals.as_array_mut() }; + let mut normals_vec: Vec> = + bytemuck::cast_vec(normals.as_slice().unwrap().to_vec()); // Copies data temporarily into a vec + splashsurf_lib::postprocessing::par_laplacian_smoothing_normals_inplace( + &mut normals_vec, + &vertex_connectivity, + iterations, + ); + normals + .as_slice_mut() + .unwrap() + .copy_from_slice(&bytemuck::cast_slice(normals_vec.as_slice())); // Copy back to numpy array +} + +#[pyfunction] +#[pyo3(name = "par_laplacian_smoothing_normals_inplace_f64")] +#[pyo3(signature = (normals, vertex_connectivity, iterations))] +pub fn par_laplacian_smoothing_normals_inplace_py_f64<'py>( + normals: &Bound<'py, PyArray2>, + vertex_connectivity: Vec>, + iterations: usize, +) { + let mut normals: ArrayViewMut2 = unsafe { normals.as_array_mut() }; + let mut normals_vec: Vec> = + bytemuck::cast_vec(normals.as_slice().unwrap().to_vec()); // Copies data temporarily into a vec + splashsurf_lib::postprocessing::par_laplacian_smoothing_normals_inplace( + &mut normals_vec, + &vertex_connectivity, + iterations, + ); + normals + .as_slice_mut() + .unwrap() + .copy_from_slice(&bytemuck::cast_slice(normals_vec.as_slice())); // Copy back to numpy array +} + +#[pyfunction] +#[pyo3(name = "decimation_f64")] +#[pyo3(signature = (mesh, *, keep_vertices))] +pub fn decimation_py_f64<'py>( + py: Python, + mesh: PyObject, + keep_vertices: bool, +) -> PyResult>> { + if mesh.downcast_bound::(py).is_ok() { + let mesh = mesh.downcast_bound::(py).unwrap(); + Ok(splashsurf_lib::postprocessing::decimation( + &mut mesh.borrow_mut().inner, + keep_vertices, + )) + } else if mesh.downcast_bound::(py).is_ok() { + let mesh = mesh.downcast_bound::(py).unwrap(); + Ok(splashsurf_lib::postprocessing::decimation( + &mut mesh.borrow_mut().inner.mesh, + keep_vertices, + )) + } else { + Err(PyErr::new::("Invalid mesh type")) + } +} + +#[pyfunction] +#[pyo3(name = "decimation_f32")] +#[pyo3(signature = (mesh, *, keep_vertices))] +pub fn decimation_py_f32<'py>( + py: Python, + mesh: PyObject, + keep_vertices: bool, +) -> PyResult>> { + if mesh.downcast_bound::(py).is_ok() { + let mesh = mesh.downcast_bound::(py).unwrap(); + Ok(splashsurf_lib::postprocessing::decimation( + &mut mesh.borrow_mut().inner, + keep_vertices, + )) + } else if mesh.downcast_bound::(py).is_ok() { + let mesh = mesh.downcast_bound::(py).unwrap(); + Ok(splashsurf_lib::postprocessing::decimation( + &mut mesh.borrow_mut().inner.mesh, + keep_vertices, + )) + } else { + Err(PyErr::new::("Invalid mesh type")) + } +} + +#[pyfunction] +#[pyo3(name = "marching_cubes_cleanup_f64")] +#[pyo3(signature = (mesh, grid, *, max_iter, keep_vertices))] +pub fn marching_cubes_cleanup_py_f64<'py>( + py: Python, + mesh: PyObject, + grid: &UniformGridF64, + max_iter: usize, + keep_vertices: bool, +) -> PyResult>> { + if let Ok(mesh) = mesh.downcast_bound::(py) { + Ok(splashsurf_lib::postprocessing::marching_cubes_cleanup( + &mut mesh.borrow_mut().inner, + &grid.inner, + max_iter, + keep_vertices, + )) + } else if let Ok(mesh) = mesh.downcast_bound::(py) { + Ok(splashsurf_lib::postprocessing::marching_cubes_cleanup( + &mut mesh.borrow_mut().inner.mesh, + &grid.inner, + max_iter, + keep_vertices, + )) + } else { + Err(PyErr::new::("Invalid mesh type")) + } +} + +#[pyfunction] +#[pyo3(name = "marching_cubes_cleanup_f32")] +#[pyo3(signature = (mesh, grid, *, max_iter, keep_vertices))] +pub fn marching_cubes_cleanup_py_f32<'py>( + py: Python, + mesh: PyObject, + grid: &UniformGridF32, + max_iter: usize, + keep_vertices: bool, +) -> PyResult>> { + if let Ok(mesh) = mesh.downcast_bound::(py) { + Ok(splashsurf_lib::postprocessing::marching_cubes_cleanup( + &mut mesh.borrow_mut().inner, + &grid.inner, + max_iter, + keep_vertices, + )) + } else if let Ok(mesh) = mesh.downcast_bound::(py) { + Ok(splashsurf_lib::postprocessing::marching_cubes_cleanup( + &mut mesh.borrow_mut().inner.mesh, + &grid.inner, + max_iter, + keep_vertices, + )) + } else { + Err(PyErr::new::("Invalid mesh type")) + } +} diff --git a/pysplashsurf/src/reconstruction.rs b/pysplashsurf/src/reconstruction.rs new file mode 100644 index 00000000..dd84aaec --- /dev/null +++ b/pysplashsurf/src/reconstruction.rs @@ -0,0 +1,210 @@ +use numpy::{PyArray2, PyReadonlyArray2}; +use pyo3::{prelude::*, Bound}; +use pyo3_stub_gen::derive::*; +use splashsurf_lib::{ + nalgebra::Vector3, reconstruct_surface, Aabb3d, GridDecompositionParameters, Index, Real, + SpatialDecomposition, SurfaceReconstruction, +}; + +use crate::{ + mesh::{TriMesh3dF32, TriMesh3dF64}, + uniform_grid::{UniformGridF32, UniformGridF64}, +}; + +macro_rules! create_reconstruction_interface { + ($name: ident, $type: ident, $mesh_class: ident, $grid_class: ident) => { + /// SurfaceReconstruction wrapper + #[gen_stub_pyclass] + #[pyclass] + pub struct $name { + pub inner: SurfaceReconstruction, + } + + impl $name { + pub fn new(data: SurfaceReconstruction) -> Self { + Self { inner: data } + } + } + + #[gen_stub_pymethods] + #[pymethods] + impl $name { + /// PyTrimesh3d clone of the contained mesh + #[getter] + fn mesh(&self) -> $mesh_class { + $mesh_class::new(self.inner.mesh().clone()) + } + + /// PyUniformGrid clone of the contained grid + #[getter] + fn grid(&self) -> $grid_class { + $grid_class::new(self.inner.grid().clone()) + } + + // Doesn't work because SurfaceReconstruction.mesh() only returns an immutable reference + // /// Returns PyTrimesh3dF32/F64 without copying the mesh data, removes the mesh from the object + // fn take_mesh(&mut self) -> $mesh_class { + // let mesh = std::mem::take(&mut self.inner.mesh()); + // $mesh_class::new(mesh) + // } + + /// Returns a reference to the global particle density vector if computed during the reconstruction (currently, all reconstruction approaches return this) + fn particle_densities(&self) -> &Vec<$type> { + self.inner + .particle_densities() + .ok_or_else(|| { + anyhow::anyhow!("Surface Reconstruction did not return particle densities") + }) + .unwrap() + } + + /// Returns a reference to the global list of per-particle neighborhood lists if computed during the reconstruction (`None` if not specified in the parameters) + fn particle_neighbors(&self) -> Option<&Vec>> { + self.inner.particle_neighbors() + } + } + }; +} + +create_reconstruction_interface!(SurfaceReconstructionF64, f64, TriMesh3dF64, UniformGridF64); +create_reconstruction_interface!(SurfaceReconstructionF32, f32, TriMesh3dF32, UniformGridF32); + +/// Reconstruct the surface from only particle positions +pub fn reconstruct_surface_py( + particles: &[Vector3], + particle_radius: R, + rest_density: R, + smoothing_length: R, + cube_size: R, + iso_surface_threshold: R, + enable_multi_threading: bool, + global_neighborhood_list: bool, + use_custom_grid_decomposition: bool, + subdomain_num_cubes_per_dim: u32, + aabb_min: Option<[R; 3]>, + aabb_max: Option<[R; 3]>, +) -> SurfaceReconstruction { + let aabb; + if let (Some(aabb_min), Some(aabb_max)) = (aabb_min, aabb_max) { + // Convert the min and max arrays to Vector3 + aabb = Some(Aabb3d::new( + Vector3::from(aabb_min), + Vector3::from(aabb_max), + )); + } else { + aabb = None; + } + + let spatial_decomposition; + if use_custom_grid_decomposition { + let mut grid_params = GridDecompositionParameters::default(); + grid_params.subdomain_num_cubes_per_dim = subdomain_num_cubes_per_dim; + spatial_decomposition = Some(SpatialDecomposition::UniformGrid(grid_params)); + } else { + spatial_decomposition = None; + } + + let params = splashsurf_lib::Parameters { + particle_radius, + rest_density, + compact_support_radius: (smoothing_length * particle_radius).times_f64(2.0), + cube_size: cube_size * particle_radius, + iso_surface_threshold, + particle_aabb: aabb, + enable_multi_threading, + spatial_decomposition, + global_neighborhood_list, + }; + + let surface = reconstruct_surface(&particles, ¶ms).unwrap(); + + surface +} + +#[pyfunction] +#[pyo3(name = "reconstruct_surface_f32")] +#[pyo3(signature = (particles, *, particle_radius, rest_density, + smoothing_length, cube_size, iso_surface_threshold, enable_multi_threading=false, + global_neighborhood_list=false, use_custom_grid_decomposition=false, subdomain_num_cubes_per_dim=64, + aabb_min = None, aabb_max = None +))] +pub fn reconstruct_surface_py_f32<'py>( + particles: &Bound<'py, PyArray2>, + particle_radius: f32, + rest_density: f32, + smoothing_length: f32, + cube_size: f32, + iso_surface_threshold: f32, + enable_multi_threading: bool, + global_neighborhood_list: bool, + use_custom_grid_decomposition: bool, + subdomain_num_cubes_per_dim: u32, + aabb_min: Option<[f32; 3]>, + aabb_max: Option<[f32; 3]>, +) -> PyResult { + let particles: PyReadonlyArray2 = particles.extract()?; + + let particle_positions = particles.as_slice()?; + let particle_positions: &[Vector3] = bytemuck::cast_slice(particle_positions); + + let reconstruction = reconstruct_surface_py::( + particle_positions, + particle_radius, + rest_density, + smoothing_length, + cube_size, + iso_surface_threshold, + enable_multi_threading, + global_neighborhood_list, + use_custom_grid_decomposition, + subdomain_num_cubes_per_dim, + aabb_min, + aabb_max, + ); + + Ok(SurfaceReconstructionF32::new(reconstruction.to_owned())) +} + +#[pyfunction] +#[pyo3(name = "reconstruct_surface_f64")] +#[pyo3(signature = (particles, *, particle_radius, rest_density, + smoothing_length, cube_size, iso_surface_threshold, enable_multi_threading=false, + global_neighborhood_list=false, use_custom_grid_decomposition=false, subdomain_num_cubes_per_dim=64, + aabb_min = None, aabb_max = None +))] +pub fn reconstruct_surface_py_f64<'py>( + particles: &Bound<'py, PyArray2>, + particle_radius: f64, + rest_density: f64, + smoothing_length: f64, + cube_size: f64, + iso_surface_threshold: f64, + enable_multi_threading: bool, + global_neighborhood_list: bool, + use_custom_grid_decomposition: bool, + subdomain_num_cubes_per_dim: u32, + aabb_min: Option<[f64; 3]>, + aabb_max: Option<[f64; 3]>, +) -> PyResult { + let particles: PyReadonlyArray2 = particles.extract()?; + + let particle_positions = particles.as_slice()?; + let particle_positions: &[Vector3] = bytemuck::cast_slice(particle_positions); + + let reconstruction = reconstruct_surface_py::( + particle_positions, + particle_radius, + rest_density, + smoothing_length, + cube_size, + iso_surface_threshold, + enable_multi_threading, + global_neighborhood_list, + use_custom_grid_decomposition, + subdomain_num_cubes_per_dim, + aabb_min, + aabb_max, + ); + + Ok(SurfaceReconstructionF64::new(reconstruction.to_owned())) +} diff --git a/pysplashsurf/src/sph_interpolation.rs b/pysplashsurf/src/sph_interpolation.rs new file mode 100644 index 00000000..5ef4f350 --- /dev/null +++ b/pysplashsurf/src/sph_interpolation.rs @@ -0,0 +1,128 @@ +use ndarray::{ArrayView, ArrayView2}; +use numpy::{PyArray2, PyReadonlyArray2, ToPyArray}; +use pyo3::{prelude::*, PyResult}; +use pyo3_stub_gen::derive::*; +use splashsurf_lib::{ + nalgebra::{Unit, Vector3}, + sph_interpolation::SphInterpolator, +}; + +macro_rules! create_sph_interpolator_interface { + ($name: ident, $type: ident) => { + /// SphInterpolator wrapper + #[gen_stub_pyclass] + #[pyclass] + pub struct $name { + pub inner: SphInterpolator<$type>, + } + + impl $name { + pub fn new(data: SphInterpolator<$type>) -> Self { + Self { inner: data } + } + } + + #[gen_stub_pymethods] + #[pymethods] + impl $name { + #[new] + fn py_new<'py>( + particle_positions: &Bound<'py, PyArray2<$type>>, + particle_densities: Vec<$type>, + particle_rest_mass: $type, + compact_support_radius: $type, + ) -> PyResult { + let particle_positions: PyReadonlyArray2<$type> = + particle_positions.extract().unwrap(); + let particle_positions = particle_positions.as_slice().unwrap(); + let particle_positions: &[Vector3<$type>] = + bytemuck::cast_slice(particle_positions); + + Ok($name::new(SphInterpolator::new( + particle_positions, + particle_densities.as_slice(), + particle_rest_mass, + compact_support_radius, + ))) + } + + /// Interpolates a scalar per particle quantity to the given points, panics if the there are less per-particles values than particles + fn interpolate_scalar_quantity<'py>( + &self, + particle_quantity: Vec<$type>, + interpolation_points: &Bound<'py, PyArray2<$type>>, + first_order_correction: bool, + ) -> PyResult> { + let interpolation_points: PyReadonlyArray2<$type> = + interpolation_points.extract()?; + let interpolation_points = interpolation_points.as_slice()?; + let interpolation_points: &[Vector3<$type>] = + bytemuck::cast_slice(interpolation_points); + + Ok(self.inner.interpolate_scalar_quantity( + particle_quantity.as_slice(), + interpolation_points, + first_order_correction, + )) + } + + /// Interpolates surface normals (i.e. normalized SPH gradient of the indicator function) of the fluid to the given points using SPH interpolation + fn interpolate_normals<'py>( + &self, + py: Python<'py>, + interpolation_points: &Bound<'py, PyArray2<$type>>, + ) -> PyResult>> { + let interpolation_points: PyReadonlyArray2<$type> = + interpolation_points.extract()?; + let interpolation_points = interpolation_points.as_slice()?; + let interpolation_points: &[Vector3<$type>] = + bytemuck::cast_slice(interpolation_points); + + let normals_vec = self.inner.interpolate_normals(interpolation_points); + let normals_vec = + bytemuck::allocation::cast_vec::>, $type>(normals_vec); + + let normals: &[$type] = normals_vec.as_slice(); + let normals: ArrayView2<$type> = + ArrayView::from_shape((normals.len() / 3, 3), normals).unwrap(); + + Ok(normals.to_pyarray(py)) + } + + /// Interpolates a vectorial per particle quantity to the given points, panics if the there are less per-particles values than particles + fn interpolate_vector_quantity<'py>( + &self, + py: Python<'py>, + particle_quantity: &Bound<'py, PyArray2<$type>>, + interpolation_points: &Bound<'py, PyArray2<$type>>, + first_order_correction: bool, + ) -> PyResult>> { + let interpolation_points: PyReadonlyArray2<$type> = + interpolation_points.extract()?; + let interpolation_points = interpolation_points.as_slice()?; + let interpolation_points: &[Vector3<$type>] = + bytemuck::cast_slice(interpolation_points); + + let particle_quantity: PyReadonlyArray2<$type> = particle_quantity.extract()?; + let particle_quantity = particle_quantity.as_slice()?; + let particle_quantity: &[Vector3<$type>] = bytemuck::cast_slice(particle_quantity); + + let res_vec = self.inner.interpolate_vector_quantity( + particle_quantity, + interpolation_points, + first_order_correction, + ); + let res_vec = bytemuck::allocation::cast_vec::, $type>(res_vec); + + let res: &[$type] = res_vec.as_slice(); + let res: ArrayView2<$type> = + ArrayView::from_shape((res.len() / 3, 3), res).unwrap(); + + Ok(res.to_pyarray(py)) + } + } + }; +} + +create_sph_interpolator_interface!(SphInterpolatorF64, f64); +create_sph_interpolator_interface!(SphInterpolatorF32, f32); diff --git a/pysplashsurf/src/uniform_grid.rs b/pysplashsurf/src/uniform_grid.rs new file mode 100644 index 00000000..b33b116c --- /dev/null +++ b/pysplashsurf/src/uniform_grid.rs @@ -0,0 +1,23 @@ +use pyo3::prelude::*; +use pyo3_stub_gen::derive::*; +use splashsurf_lib::UniformGrid; + +macro_rules! create_grid_interface { + ($name: ident, $type: ident) => { + /// UniformGrid wrapper + #[gen_stub_pyclass] + #[pyclass] + pub struct $name { + pub inner: UniformGrid, + } + + impl $name { + pub fn new(data: UniformGrid) -> Self { + Self { inner: data } + } + } + }; +} + +create_grid_interface!(UniformGridF64, f64); +create_grid_interface!(UniformGridF32, f32); diff --git a/pysplashsurf/tests/main.rs b/pysplashsurf/tests/main.rs new file mode 100644 index 00000000..31e1bb20 --- /dev/null +++ b/pysplashsurf/tests/main.rs @@ -0,0 +1,7 @@ +#[cfg(test)] +mod tests { + #[test] + fn it_works() { + assert_eq!(2 + 2, 4); + } +} diff --git a/pysplashsurf/tests/test_calling.py b/pysplashsurf/tests/test_calling.py new file mode 100644 index 00000000..eee399d8 --- /dev/null +++ b/pysplashsurf/tests/test_calling.py @@ -0,0 +1,216 @@ +import pysplashsurf +import numpy as np +import math +import meshio +import subprocess +import time +import trimesh +import pathlib + +BINARY_PATH = "splashsurf" +DIR = pathlib.Path(__file__).parent.resolve() +BGEO_PATH = DIR.joinpath("../ParticleData_Fluid_50.bgeo") +VTK_PATH = DIR.joinpath("../ParticleData_Fluid_5.vtk") + + +def test_bgeo(): + particles = np.array(meshio.read(BGEO_PATH).points, dtype=np.float32) + + assert(len(particles) == 4732) + +def test_aabb_class(): + print("\nTesting AABB class") + + aabb = pysplashsurf.Aabb3dF64.par_from_points(np.array([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0], [2.0, 0.5, 4.2]])) + + assert(aabb.min() == np.array([0.0, 0.0, 0.0])).all() + assert(aabb.max() == np.array([2.0, 1.0, 4.2])).all() + + aabb.join_with_point([3.0, 2.0, 1.0]) + + assert(aabb.min() == np.array([0.0, 0.0, 0.0])).all() + assert(aabb.max() == np.array([3.0, 2.0, 4.2])).all() + + assert(aabb.contains_point([1.0, 1.0, 4.1])) + assert(aabb.contains_point([0.0, 0.0, 0.0])) + assert(not aabb.contains_point([4.0, 2.0, 1.0])) + assert(not aabb.contains_point([1.0, -1.0, 5.0])) + +def test_marching_cubes_calls(): + print("\nTesting marching cubes calls") + + particles = np.array(meshio.read(VTK_PATH).points, dtype=np.float32) + reconstruction = pysplashsurf.reconstruct_surface(particles, enable_multi_threading=True, particle_radius=0.025, + rest_density=1000.0, smoothing_length=2.0, cube_size=0.5, + iso_surface_threshold=0.6) + mesh = reconstruction.mesh + verts_before = len(mesh.vertices) + print("# of vertices before:", verts_before) + + mesh_with_data = pysplashsurf.create_mesh_with_data_object(mesh) + pysplashsurf.marching_cubes_cleanup(mesh_with_data, reconstruction.grid) + + verts_after = len(mesh_with_data.mesh.vertices) + print("# of vertices after:", verts_after) + assert(verts_after < verts_before) + +def test_memory_access(): + print("\nTesting memory copy vs take") + + particles = np.array(meshio.read(VTK_PATH).points, dtype=np.float64) + reconstruction = pysplashsurf.reconstruct_surface(particles, enable_multi_threading=True, particle_radius=0.025, + rest_density=1000.0, smoothing_length=2.0, cube_size=0.5, + iso_surface_threshold=0.6, aabb_min=np.array([0.0, 0.0, 0.0]), aabb_max=np.array([2.0, 2.0, 2.0])) + mesh = reconstruction.mesh + + start = time.time() + triangles_copy = mesh.triangles + vertices_copy = mesh.vertices + copy_time = time.time() - start + print("Copy time:", copy_time) + + start = time.time() + vertices, triangles = mesh.take_vertices_and_triangles() + take_time = time.time() - start + print("Take time:", take_time) + + print("Copy time / Take time (Speedup):", copy_time / take_time) + + assert(np.allclose(vertices, vertices_copy)) + assert(np.allclose(triangles, triangles_copy)) + +def reconstruction_pipeline(input_file, output_file, *, attributes_to_interpolate=[], enable_multi_threading=True, particle_radius=0.025, + rest_density=1000.0, smoothing_length=2.0, cube_size=0.5, + iso_surface_threshold=0.6, mesh_smoothing_weights=False, output_mesh_smoothing_weights=False, sph_normals=False, + mesh_smoothing_weights_normalization=13.0, mesh_smoothing_iters=5, normals_smoothing_iters=5, + mesh_aabb_min=None, mesh_aabb_max=None, mesh_cleanup=False, decimate_barnacles=False, keep_vertices=False, + compute_normals=False, output_raw_normals=False, mesh_aabb_clamp_vertices=False, + check_mesh_closed=False, check_mesh_manifold=False, check_mesh_debug=False, + generate_quads=False, quad_max_edge_diag_ratio=1.75, quad_max_normal_angle=10.0, quad_max_interior_angle=135.0, + subdomain_grid=False, subdomain_num_cubes_per_dim=64): + + mesh = meshio.read(input_file) + particles = np.array(mesh.points, dtype=np.float64) + + attrs = {} + for attr in attributes_to_interpolate: + if attr in mesh.point_data: + if mesh.point_data[attr].dtype.kind == 'f': + attrs[attr] = mesh.point_data[attr].astype(np.float64) + else: + attrs[attr] = mesh.point_data[attr].astype(np.int64) + + mesh_with_data, reconstruction = pysplashsurf.reconstruction_pipeline(particles, attributes_to_interpolate=attrs, enable_multi_threading=enable_multi_threading, particle_radius=particle_radius, + rest_density=rest_density, smoothing_length=smoothing_length, cube_size=cube_size, iso_surface_threshold=iso_surface_threshold, + mesh_smoothing_weights=mesh_smoothing_weights, sph_normals=sph_normals, + mesh_smoothing_weights_normalization=mesh_smoothing_weights_normalization, + mesh_smoothing_iters=mesh_smoothing_iters, normals_smoothing_iters=normals_smoothing_iters, + mesh_aabb_min=mesh_aabb_min, mesh_aabb_max=mesh_aabb_max, mesh_cleanup=mesh_cleanup, decimate_barnacles=decimate_barnacles, + keep_vertices=keep_vertices, compute_normals=compute_normals, output_raw_normals=output_raw_normals, + mesh_aabb_clamp_vertices=mesh_aabb_clamp_vertices, subdomain_grid=subdomain_grid, subdomain_num_cubes_per_dim=subdomain_num_cubes_per_dim, output_mesh_smoothing_weights=output_mesh_smoothing_weights) + + # Convert triangles to quads + if generate_quads: + mesh_with_data = pysplashsurf.convert_tris_to_quads(mesh_with_data, non_squareness_limit=quad_max_edge_diag_ratio, normal_angle_limit_rad=math.radians(quad_max_normal_angle), max_interior_angle=math.radians(quad_max_interior_angle)) + + pysplashsurf.write_to_file(mesh_with_data, output_file, consume_object=False) + + mesh = mesh_with_data.take_mesh() + + if type(mesh) is pysplashsurf.TriMesh3dF64 or type(mesh) is pysplashsurf.TriMesh3dF32: + # Mesh checks + if check_mesh_closed or check_mesh_manifold: + pysplashsurf.check_mesh_consistency(reconstruction.grid, mesh, check_closed=check_mesh_closed, check_manifold=check_mesh_manifold, debug=check_mesh_debug) + + + # Left out: Mesh orientation check + + + +def test_no_post_processing(): + start = time.time() + subprocess.run([BINARY_PATH] + f"reconstruct {VTK_PATH} -o {DIR.joinpath("test_bin.vtk")} -r=0.025 -l=2.0 -c=0.5 -t=0.6 -d=on --subdomain-grid=on --mesh-cleanup=off --mesh-smoothing-weights=off --mesh-smoothing-iters=0 --normals=off --normals-smoothing-iters=0".split(), check=True) + print("Binary done in", time.time() - start) + + start = time.time() + reconstruction_pipeline(VTK_PATH, DIR.joinpath("test.vtk"), particle_radius=np.float64(0.025), smoothing_length=np.float64(2.0), + cube_size=np.float64(0.5), iso_surface_threshold=np.float64(0.6), mesh_smoothing_weights=True, + mesh_smoothing_weights_normalization=np.float64(13.0), mesh_smoothing_iters=0, normals_smoothing_iters=0, + generate_quads=False, mesh_cleanup=False, compute_normals=False, subdomain_grid=True) + print("Python done in", time.time() - start) + + binary_mesh = meshio.read(DIR.joinpath("test_bin.vtk")) + python_mesh = meshio.read(DIR.joinpath("test.vtk")) + + binary_verts = np.array(binary_mesh.points, dtype=np.float64) + python_verts = np.array(python_mesh.points, dtype=np.float64) + + print("# of vertices binary:", len(binary_verts)) + print("# of vertices python:", len(python_verts)) + + assert(len(binary_verts) == len(python_verts)) + + binary_verts.sort(axis=0) + python_verts.sort(axis=0) + + assert(np.allclose(binary_verts, python_verts)) + +def test_with_post_processing(): + start = time.time() + subprocess.run([BINARY_PATH] + f"reconstruct {VTK_PATH} -o {DIR.joinpath("test_bin.vtk")} -r=0.025 -l=2.0 -c=0.5 -t=0.6 -d=on --subdomain-grid=on --interpolate_attribute velocity --decimate-barnacles=on --mesh-cleanup=on --mesh-smoothing-weights=on --mesh-smoothing-iters=25 --normals=on --normals-smoothing-iters=10 --output-smoothing-weights=on --generate-quads=off".split(), check=True) + print("Binary done in", time.time() - start) + + start = time.time() + reconstruction_pipeline(VTK_PATH, DIR.joinpath("test.vtk"), attributes_to_interpolate=["velocity"], particle_radius=np.float64(0.025), smoothing_length=np.float64(2.0), + cube_size=np.float64(0.5), iso_surface_threshold=np.float64(0.6), mesh_smoothing_weights=True, + mesh_smoothing_weights_normalization=np.float64(13.0), mesh_smoothing_iters=25, normals_smoothing_iters=10, + generate_quads=False, mesh_cleanup=True, compute_normals=True, subdomain_grid=True, decimate_barnacles=True, + output_mesh_smoothing_weights=True, output_raw_normals=True) + print("Python done in", time.time() - start) + + binary_mesh = meshio.read(DIR.joinpath("test_bin.vtk")) + python_mesh = meshio.read(DIR.joinpath("test.vtk")) + + # Compare number of vertices + binary_verts = np.array(binary_mesh.points, dtype=np.float64) + python_verts = np.array(python_mesh.points, dtype=np.float64) + + print("# of vertices binary:", len(binary_verts)) + print("# of vertices python:", len(python_verts)) + + assert(len(binary_verts) == len(python_verts)) + + # Compare interpolated attribute + binary_vels = binary_mesh.point_data["velocity"] + python_vels = python_mesh.point_data["velocity"] + + binary_vels.sort(axis=0) + python_vels.sort(axis=0) + + assert(np.allclose(binary_vels, python_vels)) + + # Trimesh similarity test + binary_mesh = trimesh.load_mesh(DIR.joinpath("test_bin.vtk"), "vtk") + python_mesh = trimesh.load_mesh(DIR.joinpath("test.vtk"), "vtk") + + (_, distance_bin, _) = trimesh.proximity.closest_point(binary_mesh, python_verts) + (_, distance_py, _) = trimesh.proximity.closest_point(python_mesh, binary_verts) + distance = (np.sum(distance_bin) + np.sum(distance_py)) / (len(distance_bin) + len(python_verts)) + print("Distance:", distance) + assert(distance < 1e-5) + + # Naïve similarity test + + binary_verts.sort(axis=0) + python_verts.sort(axis=0) + + print("Binary verts:", binary_verts) + print("Python verts:", python_verts) + + assert(np.allclose(binary_verts, python_verts)) + +# test_bgeo() +# test_aabb_class() +# test_marching_cubes_calls() +# test_memory_access() +# test_with_post_processing()