diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 0f8ad475db8..344b8eacc3a 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -1,3 +1,5 @@ +env: + steps: - label: "CUDA Julia {{matrix.version}}" matrix: @@ -7,12 +9,13 @@ steps: plugins: - JuliaCI/julia#v1: version: "{{matrix.version}}" - command: | - true + - JuliaCI/julia-test#v1: ~ + env: + TRIXI_TEST: "CUDA" agents: queue: "juliagpu" cuda: "*" if: build.message !~ /\[skip ci\]/ timeout_in_minutes: 60 soft_fail: - - exit_status: 3 \ No newline at end of file + - exit_status: 3 diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs new file mode 100644 index 00000000000..4a244f68d69 --- /dev/null +++ b/.git-blame-ignore-revs @@ -0,0 +1 @@ +2bc1cc68c4fed509a2a3e71d02b84e0be5c5565b diff --git a/.github/workflows/Documenter.yml b/.github/workflows/Documenter.yml index 2b926562fc5..321df202738 100644 --- a/.github/workflows/Documenter.yml +++ b/.github/workflows/Documenter.yml @@ -33,7 +33,7 @@ jobs: build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - uses: julia-actions/setup-julia@v2 with: version: '1.10' diff --git a/.github/workflows/Downgrade.yml b/.github/workflows/Downgrade.yml index 1becd0390c5..bffb0e744ab 100644 --- a/.github/workflows/Downgrade.yml +++ b/.github/workflows/Downgrade.yml @@ -68,7 +68,7 @@ jobs: # - mpi - threaded steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - uses: julia-actions/setup-julia@v2 with: version: ${{ matrix.version }} diff --git a/.github/workflows/FormatCheck.yml b/.github/workflows/FormatCheck.yml index 4770fcdfe54..1c2923f30d1 100644 --- a/.github/workflows/FormatCheck.yml +++ b/.github/workflows/FormatCheck.yml @@ -11,7 +11,7 @@ jobs: format-suggestions: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - uses: julia-actions/julia-format@v4 with: version: "1.0.60" diff --git a/.github/workflows/GPUCompat.yml b/.github/workflows/GPUCompat.yml deleted file mode 100644 index 335e1c83c4c..00000000000 --- a/.github/workflows/GPUCompat.yml +++ /dev/null @@ -1,86 +0,0 @@ -name: GPU Package Compatibility - -on: - pull_request: - paths-ignore: - - 'AUTHORS.md' - - 'CITATION.bib' - - 'CONTRIBUTING.md' - - 'LICENSE.md' - - 'NEWS.md' - - 'README.md' - - '.zenodo.json' - - '.github/workflows/benchmark.yml' - - '.github/workflows/CompatHelper.yml' - - '.github/workflows/TagBot.yml' - - 'benchmark/**' - - 'docs/**' - - 'utils/**' - workflow_dispatch: - -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true - -jobs: - test: - if: "!contains(github.event.head_commit.message, 'skip ci')" - name: ${{ matrix.os }} - ${{ matrix.arch }} - runs-on: ${{ matrix.os }} - strategy: - fail-fast: false - matrix: - include: - - version: '1.10' - os: ubuntu-latest - arch: x64 - - version: '1.10' - os: windows-latest - arch: x64 - # CUDA.jl only supports 64-bit Linux and Windows, see https://github.com/JuliaGPU/CUDA.jl?tab=readme-ov-file#requirements - steps: - - name: Checkout repository - uses: actions/checkout@v4 - - - name: Set up Julia - uses: julia-actions/setup-julia@v2 - with: - version: ${{ matrix.version }} - arch: ${{ matrix.arch }} - - - name: Display version info - run: julia -e 'using InteractiveUtils; versioninfo(verbose=true)' - - - name: Cache Julia packages - uses: julia-actions/cache@v2 - - - name: Build project - uses: julia-actions/julia-buildpkg@v1 - - # Only CUDA.jl is needed for GPU compatibility test now - - name: Add CUDA.jl to environment - run: | - julia --project=. -e ' - using Pkg; - Pkg.activate(temp=true); - Pkg.develop(PackageSpec(path=pwd())); - Pkg.add("CUDA"); - Pkg.update()' - - # - name: Add Metal.jl to environment - # run: | - # julia --project=. -e ' - # using Pkg; - # Pkg.activate(temp=true); - # Pkg.develop(PackageSpec(path=pwd())); - # Pkg.add("Metal"); - # Pkg.update()' - - # - name: Add AMDGPU.jl to environment - # run: | - # julia --project=. -e ' - # using Pkg; - # Pkg.activate(temp=true); - # Pkg.develop(PackageSpec(path=pwd())); - # Pkg.add("AMDGPU"); - # Pkg.update()' diff --git a/.github/workflows/Invalidations.yml b/.github/workflows/Invalidations.yml index b2d34cbc856..85c029f8392 100644 --- a/.github/workflows/Invalidations.yml +++ b/.github/workflows/Invalidations.yml @@ -19,12 +19,12 @@ jobs: - uses: julia-actions/setup-julia@v2 with: version: '1' - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - uses: julia-actions/julia-buildpkg@v1 - uses: julia-actions/julia-invalidations@v1 id: invs_pr - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 with: ref: ${{ github.event.repository.default_branch }} - uses: julia-actions/julia-buildpkg@v1 diff --git a/.github/workflows/ReviewChecklist.yml b/.github/workflows/ReviewChecklist.yml index d8854411804..22ecc7158a1 100644 --- a/.github/workflows/ReviewChecklist.yml +++ b/.github/workflows/ReviewChecklist.yml @@ -12,7 +12,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Check out repository - uses: actions/checkout@v4 + uses: actions/checkout@v5 - name: Add review checklist uses: trixi-framework/add-pr-review-checklist@v1 with: diff --git a/.github/workflows/SpellCheck.yml b/.github/workflows/SpellCheck.yml index 546be01ad74..172991d9f12 100644 --- a/.github/workflows/SpellCheck.yml +++ b/.github/workflows/SpellCheck.yml @@ -8,6 +8,6 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout Actions Repository - uses: actions/checkout@v4 + uses: actions/checkout@v5 - name: Check spelling - uses: crate-ci/typos@v1.34.0 + uses: crate-ci/typos@v1.35.7 diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 62382b4b2c9..39d447d6b53 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -15,7 +15,7 @@ jobs: arch: - x64 steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 with: fetch-depth: 0 - run: | diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index df47ce4b73f..b8b08bca6c4 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -103,7 +103,7 @@ jobs: arch: x64 trixi_test: threaded steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - uses: julia-actions/setup-julia@v2 with: version: ${{ matrix.version }} @@ -176,8 +176,8 @@ jobs: # Instead, we use the more tedious approach described above. # At first, we check out the repository and download all artifacts # (and list files for debugging). - - uses: actions/checkout@v4 - - uses: actions/download-artifact@v4 + - uses: actions/checkout@v5 + - uses: actions/download-artifact@v5 - run: ls -R # Next, we merge the individual coverage files and upload # the combined results to Coveralls. diff --git a/.github/workflows/downstream.yml b/.github/workflows/downstream.yml index f6603468639..f12816c43f9 100644 --- a/.github/workflows/downstream.yml +++ b/.github/workflows/downstream.yml @@ -64,7 +64,7 @@ jobs: - TrixiShallowWater.jl - TrixiAtmo.jl steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - uses: julia-actions/setup-julia@v2 with: version: ${{ matrix.version }} @@ -76,7 +76,7 @@ jobs: # Note: we retrieve the current `main` branch of the downstream package to ensure # that compatibility errors we make in Trixi.jl are detected already here # See also https://github.com/trixi-framework/Trixi.jl/pull/1707#discussion_r1382938895 - uses: actions/checkout@v4 + uses: actions/checkout@v5 with: repository: trixi-framework/${{ matrix.package }} path: downstream diff --git a/.mailmap b/.mailmap new file mode 100644 index 00000000000..921e21a9555 --- /dev/null +++ b/.mailmap @@ -0,0 +1,19 @@ +Hendrik Ranocha +Michael Schlottke-Lakemper +Jesse Chan <1156048+jlchan@users.noreply.github.com> +Gregor Gassner +Gregor Gassner +Erik Faulhaber +Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> +Erik Faulhaber <44124897+erik-f@users.noreply.github.com> +Valentin Churavy +Benjamin Bolm <74359358+bennibolm@users.noreply.github.com> +Lars Christmann +Felipe Santillan <72009492+FelipeSantillan@users.noreply.github.com> +Benedict Geihe <135045760+benegee@users.noreply.github.com> +Benedict Geihe <135045760+benegee@users.noreply.github.com> <135045760+bgeihe@users.noreply.github.com> +JuliaOd <71124291+JuliaOd@users.noreply.github.com> +Daniel Doehring +Christof Czernik +Felipe Santillan <72009492+FelipeSantillan@users.noreply.github.com> +Warisa Roongaraya <81345089+warisa-r@users.noreply.github.com> diff --git a/NEWS.md b/NEWS.md index 72f572a74fc..b87a369b042 100644 --- a/NEWS.md +++ b/NEWS.md @@ -5,6 +5,37 @@ Trixi.jl follows the interpretation of used in the Julia ecosystem. Notable changes will be documented in this file for human readability. + +## Changes when updating to v0.13 from v0.12.x + +#### Changed + +- The `polyester` preference got merged with the `native_threading` preference and the `Trixi.set_polyester!` + function got renamed to `Trixi.set_threading_backend!` ([#2476]). +- Default wave-speed estimate used within `flux_lax_friedrichs` changed from `max_abs_speed_naive` to + `max_abs_speed` which is less diffusive. + In v0.13, `flux_lax_friedrichs = FluxLaxFriedrichs(max_abs_speed = max_abs_speed)` + instead of the previous default + `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)` ([#2458]). +- The signature of the `VisualizationCallback` constructor changed. + In the new version, it is mandatory to pass the semidiscretization `semi` to + determine the default plotting type (1D for 1D simulations, 2D for 2D and 3D simulations). + This can further be customized via the keyword argument `plot_data_creator`, which had + the default value `plot_data_creator = PlotData2D` before the change ([#2468]). + +#### Removed + +- Deprecations introduced in earlier versions of Trixi.jl have been removed. + + +## Changes in the v0.12 lifecycle + +#### Added +- Initial support for adapting data-structures between different storage arrays was added. This enables future work to support GPU with Trixi ([#2212]). + +#### Deprecated + + ## Changes when updating to v0.12 from v0.11.x #### Added @@ -26,10 +57,10 @@ for human readability. #### Removed -- The shallow-water equation types `ShallowWaterEquations1D`, `ShallowWaterEquations2D`, and - `ShallowWaterEquationsQuasi1D` have been removed from Trixi.jl and are now available via - [TrixiShallowWater.jl](https://github.com/trixi-framework/TrixiShallowWater.jl/). - This also affects the related functions `hydrostatic_reconstruction_audusse_etal`, +- The shallow-water equation types `ShallowWaterEquations1D`, `ShallowWaterEquations2D`, and + `ShallowWaterEquationsQuasi1D` have been removed from Trixi.jl and are now available via + [TrixiShallowWater.jl](https://github.com/trixi-framework/TrixiShallowWater.jl/). + This also affects the related functions `hydrostatic_reconstruction_audusse_etal`, `flux_nonconservative_audusse_etal`, and `FluxHydrostaticReconstruction`. ([#2379]) - The additional `ìnitial_cache` entries in the caches of `SemidiscretizationHyperbolic` and `SemidiscretizationHyperbolicParabolic`, and the corresponding keyword arguments of @@ -39,7 +70,7 @@ for human readability. #### Added -- Added symmetry plane/reflective wall velocity+stress boundary conditions for the compressible Navier-Stokes equations in 2D and 3D. +- Added symmetry plane/reflective wall velocity+stress boundary conditions for the compressible Navier-Stokes equations in 2D and 3D. Currently available only for the `P4estMesh` mesh type, `GradientVariablesPrimitive`, and `Adiabatic` heat boundary condition ([#2416]). - Added `LaplaceDiffusionEntropyVariables1D`, `LaplaceDiffusionEntropyVariables2D`, and `LaplaceDiffusionEntropyVariables3D`. These add scalar diffusion to each equation of a system, but apply diffusion in terms of the entropy variables, which symmetrizes the viscous formulation and ensures semi-discrete entropy dissipation ([#2406]). @@ -52,8 +83,8 @@ for human readability. #### Deprecated -- The (2D) aerodynamic coefficients - `DragCoefficientPressure, LiftCoefficientPressure, DragCoefficientShearStress, LiftCoefficientShearStress` have been renamed to +- The (2D) aerodynamic coefficients + `DragCoefficientPressure, LiftCoefficientPressure, DragCoefficientShearStress, LiftCoefficientShearStress` have been renamed to `DragCoefficientPressure2D, LiftCoefficientPressure2D, DragCoefficientShearStress2D, LiftCoefficientShearStress2D`. ([#2375]) ## Changes when updating to v0.11 from v0.10.x diff --git a/Project.toml b/Project.toml index 83bfc35f982..fbddc63a651 100644 --- a/Project.toml +++ b/Project.toml @@ -1,10 +1,11 @@ name = "Trixi" uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb" authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan ", "Andrés Rueda-Ramírez "] -version = "0.12.7-DEV" +version = "0.13.7-DEV" [deps] Accessors = "7d9f7c33-5ae7-4f3b-8dc6-eff91059b697" +Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2" ConstructionBase = "187b0558-2788-49d3-abe0-74a17ed4e7c9" DataStructures = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8" @@ -16,6 +17,7 @@ EllipsisNotation = "da5c29d0-fa7d-589e-88eb-ea29b0a81949" FillArrays = "1a297f60-69ca-5386-bcde-b61e274b549b" ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" HDF5 = "f67ccb44-e63f-5c2f-98bd-6dc0ccc4ba2f" +KernelAbstractions = "63c18a36-062a-441e-b654-da1e3ab1ce7c" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" LinearMaps = "7a12625a-238d-50fd-b39a-03d52299707e" LoopVectorization = "bdcacae8-1622-11e9-2a5c-532679323890" @@ -52,24 +54,30 @@ TrixiBase = "9a0f1c46-06d5-4909-a5a3-ce25d3fa3284" UUIDs = "cf7118a7-6976-5b1a-9a39-7adc72f591a4" [weakdeps] +CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba" Convex = "f65535da-76fb-5f13-bab9-19810c17039a" ECOS = "e2685f51-7e38-5353-a97d-a921fd2c8199" Makie = "ee78f7c6-11fb-53f2-987a-cfe4a2b5a57a" NLsolve = "2774e3e8-f4cf-5e23-947b-6d7e65073b56" +SparseConnectivityTracer = "9f842d2f-2579-4b1d-911e-f412cf18a3f5" [extensions] +TrixiCUDAExt = "CUDA" TrixiConvexECOSExt = ["Convex", "ECOS"] TrixiMakieExt = "Makie" TrixiNLsolveExt = "NLsolve" +TrixiSparseConnectivityTracerExt = "SparseConnectivityTracer" [compat] Accessors = "0.1.36" -CodeTracking = "1.0.5" +Adapt = "4" +CUDA = "5.8" +CodeTracking = "1.0.5, 2" ConstructionBase = "1.5" Convex = "0.16" -DataStructures = "0.18.15" +DataStructures = "0.18.15, 0.19" DelimitedFiles = "1" -DiffEqBase = "6.154" +DiffEqBase = "6.155.2" DiffEqCallbacks = "2.35, 3, 4" Downloads = "1.6" ECOS = "1.1.2" @@ -77,6 +85,7 @@ EllipsisNotation = "1.0" FillArrays = "1.9" ForwardDiff = "0.10.36, 1" HDF5 = "0.16.10, 0.17" +KernelAbstractions = "0.9.36" LinearAlgebra = "1" LinearMaps = "2.7, 3.0" LoopVectorization = "0.12.171" @@ -94,23 +103,24 @@ Printf = "1" RecipesBase = "1.3.4" RecursiveArrayTools = "3.31.1" Reexport = "1.2" -Requires = "1.1" +Requires = "1.3" SciMLBase = "2.67.0" SimpleUnPack = "1.1" SparseArrays = "1" +SparseConnectivityTracer = "1.0.1" StableRNGs = "1.0.2" StartUpDG = "1.1.5" Static = "1.1.1" StaticArrayInterface = "1.5.1" StaticArrays = "1.9" StrideArrays = "0.1.29" -StructArrays = "0.6.18, 0.7" +StructArrays = "0.6.20, 0.7" SummationByPartsOperators = "0.5.52" T8code = "0.7.4" -TimerOutputs = "0.5.23" -Triangulate = "2.2" +TimerOutputs = "0.5.25" +Triangulate = "2.2, 3" TriplotBase = "0.1" TriplotRecipes = "0.1" -TrixiBase = "0.1.3" +TrixiBase = "0.1.6" UUIDs = "1.6" julia = "1.10" diff --git a/README.md b/README.md index abbfe97d9ea..b6cf57073da 100644 --- a/README.md +++ b/README.md @@ -46,8 +46,12 @@ installation and postprocessing procedures. Its features include: * [Explicit low-storage Runge-Kutta time integration](https://diffeq.sciml.ai/latest/solvers/ode_solve/#Low-Storage-Methods) * [Strong stability preserving methods](https://diffeq.sciml.ai/latest/solvers/ode_solve/#Explicit-Strong-Stability-Preserving-Runge-Kutta-Methods-for-Hyperbolic-PDEs-(Conservation-Laws)) * CFL-based and error-based time step control +* Custom explicit time integration schemes + * Maximized linear stability via paired explicit Runge-Kutta methods + * Relaxation Runge-Kutta methods for entropy-conservative time integration * Native support for differentiable programming * Forward mode automatic differentiation via [ForwardDiff.jl](https://github.com/JuliaDiff/ForwardDiff.jl) + * Automatic Jacobian sparsity detection via [SparseConnectivityTracer.jl](https://github.com/adrhill/SparseConnectivityTracer.jl) * Periodic and weakly-enforced boundary conditions * Multiple governing equations: * Compressible Euler equations diff --git a/benchmark/Project.toml b/benchmark/Project.toml index 84c53b01f29..ab28877c57f 100644 --- a/benchmark/Project.toml +++ b/benchmark/Project.toml @@ -8,4 +8,4 @@ Trixi = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb" BenchmarkTools = "0.5, 0.7, 1.0" OrdinaryDiffEq = "5.65, 6" PkgBenchmark = "0.2.10" -Trixi = "0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.10, 0.11, 0.12" +Trixi = "0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.10, 0.11, 0.12, 0.13" diff --git a/docs/Project.toml b/docs/Project.toml index f2623d6c1ba..895e1254eb5 100644 --- a/docs/Project.toml +++ b/docs/Project.toml @@ -1,4 +1,6 @@ [deps] +Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" +ADTypes = "47edcb42-4c32-4615-8424-f2b9edc5f35b" CairoMakie = "13f3f980-e62b-5c42-98c6-ff1f3baf88f0" Changelog = "5217a498-cd5d-4ec6-b8c2-9b85a09b6e3e" Convex = "f65535da-76fb-5f13-bab9-19810c17039a" @@ -12,14 +14,23 @@ Measurements = "eff96d63-e80a-5855-80a2-b1b0885c5ab7" NLsolve = "2774e3e8-f4cf-5e23-947b-6d7e65073b56" OrdinaryDiffEqLowOrderRK = "1344f307-1e59-4825-a18e-ace9aa3fa4c6" OrdinaryDiffEqLowStorageRK = "b0944070-b475-4768-8dec-fb6eb410534d" +OrdinaryDiffEqSDIRK = "2d112036-d095-4a1e-ab9a-08536f3ecdbf" OrdinaryDiffEqSSPRK = "669c94d9-1f4b-4b64-b377-1aa079aa2388" OrdinaryDiffEqTsit5 = "b1df2697-797e-41e3-8120-5422d3b24e4a" Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" +SparseConnectivityTracer = "9f842d2f-2579-4b1d-911e-f412cf18a3f5" +SparseMatrixColorings = "0a514795-09f3-496d-8182-132a7b665d35" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" +Trixi = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb" Trixi2Vtk = "bc1476a1-1ca6-4cc3-950b-c312b255ff95" TrixiBase = "9a0f1c46-06d5-4909-a5a3-ce25d3fa3284" +[sources.Trixi] +path = ".." + [compat] +Adapt = "4" +ADTypes = "1.11" CairoMakie = "0.12, 0.13, 0.14, 0.15" Changelog = "1.1" Convex = "0.16" @@ -33,9 +44,12 @@ Measurements = "2.5" NLsolve = "4.5.1" OrdinaryDiffEqLowOrderRK = "1.2" OrdinaryDiffEqLowStorageRK = "1.2" +OrdinaryDiffEqSDIRK = "1.1" OrdinaryDiffEqSSPRK = "1.2" OrdinaryDiffEqTsit5 = "1.1" Plots = "1.9" +SparseConnectivityTracer = "1.0.1" +SparseMatrixColorings = "0.4.21" Test = "1" Trixi2Vtk = "0.3.16" TrixiBase = "0.1.1" diff --git a/docs/literate/src/files/differentiable_programming.jl b/docs/literate/src/files/differentiable_programming.jl index 10760d476ce..60c661967ea 100644 --- a/docs/literate/src/files/differentiable_programming.jl +++ b/docs/literate/src/files/differentiable_programming.jl @@ -401,6 +401,94 @@ relative_difference = norm(J_fd - J_ad) / size(J_fd, 1) # This discrepancy is of the expected order of magnitude for central finite difference approximations. +# ## Automatic Jacobian sparsity detection and coloring + +# When solving large sparse nonlinear ODE systems originating from spatial discretizations +# with compact stencils such as the DG method with implicit time integrators, +# exploiting the sparsity of the Jacobian can lead to significant speedups in the Newton-Raphson solver. +# Similarly, steady-state problems can also be solved faster. + +# [Trixi.jl](https://github.com/trixi-framework/Trixi.jl) supports efficient Jacobian computations by leveraging the +# [SparseConnectivityTracer.jl](https://github.com/adrhill/SparseConnectivityTracer.jl) +# and [SparseMatrixColorings.jl](https://github.com/gdalle/SparseMatrixColorings.jl) packages. +# These tools allow to detect the sparsity pattern of the Jacobian and compute the +# optional coloring vector for efficient Jacobian evaluations. +# These are then handed over to the ODE solver from [OrdinaryDiffEq.jl](https://github.com/SciML/OrdinaryDiffEq.jl). + +# Below is a minimal example in 1D, showing how to use these packages with Trixi.jl. +# First, we define the the `equation` and `mesh` as for an ordinary simulation: + +using Trixi + +advection_velocity = 1.0 +equation = LinearScalarAdvectionEquation1D(advection_velocity) + +mesh = TreeMesh((-1.0,), (1.0,), initial_refinement_level = 4, n_cells_max = 10^4) + +# We define the basic floating point type used for the actual simulation +# and construct the solver: +float_type = Float64 +solver = DGSEM(polydeg = 3, surface_flux = flux_godunov, RealT = float_type) + +# Next, we set up the sparsity detection. For this we need +using SparseConnectivityTracer # For Jacobian sparsity pattern + +# We use the [global `TracerSparsityDetector()`](https://adrianhill.de/SparseConnectivityTracer.jl/stable/user/global_vs_local/) here. +jac_detector = TracerSparsityDetector() + +# Next, we retrieve the right element type corresponding to `float_type` for the Jacobian sparsity detection. +# For more details, see the API documentation of +# [`jacobian_eltype`](https://adrianhill.de/SparseConnectivityTracer.jl/stable/user/api/#SparseConnectivityTracer.jacobian_eltype). +jac_eltype = jacobian_eltype(float_type, jac_detector) + +# Now we can construct the semidiscretization for sparsity detection with `jac_eltype` as the +# datatype for the working arrays and helper datastructures. +semi_jac_type = SemidiscretizationHyperbolic(mesh, equation, + initial_condition_convergence_test, solver, + uEltype = jac_eltype) # Supply sparsity detection datatype here + +tspan = (0.0, 1.0) # Re-used later in `rhs!` evaluation +ode_jac_type = semidiscretize(semi_jac_type, tspan) +u0_ode = ode_jac_type.u0 +du_ode = similar(u0_ode) + +# Wrap the RHS for sparsity detection to match the expected signature `f!(du, u)` required by +# [`jacobian_sparsity`](https://adrianhill.de/SparseConnectivityTracer.jl/stable/user/api/#ADTypes.jacobian_sparsity). +rhs_wrapped! = (du, u) -> Trixi.rhs!(du, u, semi_jac_type, tspan[1]) +jac_prototype = jacobian_sparsity(rhs_wrapped!, du_ode, u0_ode, jac_detector) + +# Optionally, we can also compute the coloring vector to reduce Jacobian evaluations +# to `1 + maximum(coloring_vec)` for finite differencing and `maximum(coloring_vec)` for algorithmic differentiation. +# For this, we need +using SparseMatrixColorings + +# We partition by columns as we are using finite differencing here. +# One would also partition by columns if forward-based algorithmic differentiation were used, +# and only partition by rows if reverse-mode AD were used. +# See also [the documentation of the now deprecated SparseDiffTools.jl](https://github.com/JuliaDiff/SparseDiffTools.jl?tab=readme-ov-file#matrix-coloring) package, +# the predecessor in spirit to SparseConnectivityTracer.jl and SparseMatrixColorings.jl, for more information. +coloring_prob = ColoringProblem(; structure = :nonsymmetric, partition = :column) +coloring_alg = GreedyColoringAlgorithm(; decompression = :direct) +coloring_result = coloring(jac_prototype, coloring_prob, coloring_alg) +coloring_vec = column_colors(coloring_result) + +# Now, set up the actual semidiscretization for the simulation. +# The datatype is automatically retrieved from the solver (in this case `float_type = Float64`). +semi_float_type = SemidiscretizationHyperbolic(mesh, equation, + initial_condition_convergence_test, solver) +# Supply the sparse Jacobian prototype and the optional coloring vector. +# Internally, an [`ODEFunction`](https://docs.sciml.ai/DiffEqDocs/stable/types/ode_types/#SciMLBase.ODEFunction) +# with `jac_prototype = jac_prototype` and `colorvec = coloring_vec` is created. +ode_jac_sparse = semidiscretize(semi_float_type, tspan, + jac_prototype = jac_prototype, + colorvec = coloring_vec) + +# You can now solve the ODE problem efficiently with an implicit solver. +# Currently we are bound to finite differencing here. +using OrdinaryDiffEqSDIRK, ADTypes +sol = solve(ode_jac_sparse, TRBDF2(; autodiff = AutoFiniteDiff()), dt = 0.1, + save_everystep = false) + # ## Linear systems # When a linear PDE is discretized using a linear scheme such as a standard DG method, diff --git a/docs/make.jl b/docs/make.jl index 7111b66ab94..0301f5ba64e 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -163,7 +163,8 @@ makedocs( "Style guide" => "styleguide.md", "Testing" => "testing.md", "Performance" => "performance.md", - "Parallelization" => "parallelization.md" + "Parallelization" => "parallelization.md", + "Heterogeneous" => "heterogeneous.md" ], "Troubleshooting and FAQ" => "troubleshooting.md", "Reference" => [ diff --git a/docs/src/conventions.md b/docs/src/conventions.md index ec812e31e18..bf46892575e 100644 --- a/docs/src/conventions.md +++ b/docs/src/conventions.md @@ -76,6 +76,24 @@ set via the keywords documented with a docstring (but maybe with comments using `#`). +## Structure of the `solver` directory + +If some functionality is shared by multiple combinations of meshes/solvers, +it is defined in the directory of the most basic mesh and solver type. +An example for this is the `rhs!` function, which lays out the sequence of functions +that compose the overall right-hand-side function provided to the ODE integrator. +Since this general "recipe" can be unified for different meshes of a certain dimension, +a shared implementation is used to minimize code duplication. + +The most basic (in the sense that it is most tested and developed) solver type in Trixi.jl is +[`DGSEM`](@ref) due to historic reasons and background of the main contributors. +We consider the [`TreeMesh`](@ref) to be the most basic mesh type since it is Cartesian +and was the first mesh in Trixi.jl. +Thus, shared implementations for more advanced meshes such as the [`P4estMesh`](@ref) can be found in +the `src/solvers/dgsem_tree/` directory, while only necessary specifics are actually placed in +`src/solvers/dgsem_p4est/`. + + ## Array types and wrapping To allow adaptive mesh refinement efficiently when using time integrators from diff --git a/docs/src/heterogeneous.md b/docs/src/heterogeneous.md new file mode 100644 index 00000000000..9d4dc50c181 --- /dev/null +++ b/docs/src/heterogeneous.md @@ -0,0 +1,159 @@ +# Heterogeneous computing + +Support for heterogeneous computing is currently being worked on. + +## The use of Adapt.jl + +[Adapt.jl](https://github.com/JuliaGPU/Adapt.jl) is a package in the +[JuliaGPU](https://github.com/JuliaGPU) family that allows for +the translation of nested data structures. The primary goal is to allow the substitution of `Array` +at the storage level with a GPU array like `CuArray` from [CUDA.jl](https://github.com/JuliaGPU/CUDA.jl). + +To facilitate this, data structures must be parameterized, so instead of: + +```julia +struct Container <: Trixi.AbstractContainer + data::Array{Float64, 2} +end +``` + +They must be written as: + +```jldoctest adapt; output = false, setup=:(import Trixi) +struct Container{D<:AbstractArray} <: Trixi.AbstractContainer + data::D +end + +# output + +``` + +furthermore, we need to define a function that allows for the conversion of storage +of our types: + +```jldoctest adapt; output = false +using Adapt + +function Adapt.adapt_structure(to, C::Container) + return Container(adapt(to, C.data)) +end + +# output + +``` + +or simply + +```julia +Adapt.@adapt_structure(Container) +``` + +additionally, we must define `Adapt.parent_type`. + +```jldoctest adapt; output = false +function Adapt.parent_type(::Type{<:Container{D}}) where D + return D +end + +# output + +``` + +All together we can use this machinery to perform conversions of a container. + +```jldoctest adapt +julia> C = Container(zeros(3)) +Container{Vector{Float64}}([0.0, 0.0, 0.0]) + +julia> Trixi.storage_type(C) +Array +``` + + +```julia-repl +julia> using CUDA + +julia> GPU_C = adapt(CuArray, C) +Container{CuArray{Float64, 1, CUDA.DeviceMemory}}([0.0, 0.0, 0.0]) + +julia> Trixi.storage_type(C) +CuArray +``` + +## Element-type conversion with `Trixi.trixi_adapt`. + +We can use [`Trixi.trixi_adapt`](@ref) to perform both an element-type and a storage-type adoption: + +```jldoctest adapt +julia> C = Container(zeros(3)) +Container{Vector{Float64}}([0.0, 0.0, 0.0]) + +julia> Trixi.trixi_adapt(Array, Float32, C) +Container{Vector{Float32}}(Float32[0.0, 0.0, 0.0]) +``` + +```julia-repl +julia> Trixi.trixi_adapt(CuArray, Float32, C) +Container{CuArray{Float32, 1, CUDA.DeviceMemory}}(Float32[0.0, 0.0, 0.0]) +``` + +!!! note + `adapt(Array{Float32}, C)` is tempting, but it will do the wrong thing + in the presence of `SVector`s and similar arrays from StaticArrays.jl. + + +## Writing GPU kernels + +Offloading computations to the GPU is done with +[KernelAbstractions.jl](https://github.com/JuliaGPU/KernelAbstractions.jl), +allowing for vendor-agnostic GPU code. + +### Example + +Given the following Trixi.jl code, which would typically be called from within `rhs!`: + +```julia +function trixi_rhs_fct(mesh, equations, solver, cache, args) + @threaded for element in eachelement(solver, cache) + # code + end +end +``` + +1. Put the inner code in a new function `rhs_fct_per_element`. Besides the index + `element`, pass all required fields as arguments, but make sure to `@unpack` them from + their structs in advance. +2. Where `trixi_rhs_fct` is called, get the backend, i.e., the hardware we are currently + running on via `trixi_backend(x)`. + This will, e.g., work with `u_ode`. Internally, KernelAbstractions.jl's `get_backend` + will be called, i.e., KernelAbstractions.jl has to know the type of `x`. + ```julia + backend = trixi_backend(u_ode) + ``` +3. Add a new argument `backend` to `trixi_rhs_fct` used for dispatch. + When `backend` is `nothing`, the legacy implementation should be used: + ```julia + function trixi_rhs_fct(backend::Nothing, mesh, equations, solver, cache, args) + @unpack unpacked_args = cache + @threaded for element in eachelement(solver, cache) + rhs_fct_per_element(element, unpacked_args, args) + end + end + ``` +4. When `backend` is a `Backend` (a type defined by KernelAbstractions.jl), write a + KernelAbstractions.jl kernel: + ```julia + function trixi_rhs_fct(backend::Backend, mesh, equations, solver, cache, args) + nelements(solver, cache) == 0 && return nothing # return early when there are no elements + @unpack unpacked_args = cache + kernel! = rhs_fct_kernel!(backend) + kernel!(unpacked_args, args, + ndrange = nelements(solver, cache)) + return nothing + end + + @kernel function rhs_fct_kernel!(unpacked_args, args) + element = @index(Global) + rhs_fct_per_element(element, unpacked_args, args) + end + ``` diff --git a/docs/src/index.md b/docs/src/index.md index 5efa605dad1..5b35112bf6f 100644 --- a/docs/src/index.md +++ b/docs/src/index.md @@ -42,8 +42,12 @@ installation and postprocessing procedures. Its features include: * [Explicit low-storage Runge-Kutta time integration](https://diffeq.sciml.ai/latest/solvers/ode_solve/#Low-Storage-Methods) * [Strong stability preserving methods](https://diffeq.sciml.ai/latest/solvers/ode_solve/#Explicit-Strong-Stability-Preserving-Runge-Kutta-Methods-for-Hyperbolic-PDEs-(Conservation-Laws)) * CFL-based and error-based time step control +* Custom explicit time integration schemes + * Maximized linear stability via paired explicit Runge-Kutta methods + * Relaxation Runge-Kutta methods for entropy-conservative time integration * Native support for differentiable programming * Forward mode automatic differentiation via [ForwardDiff.jl](https://github.com/JuliaDiff/ForwardDiff.jl) + * Automatic Jacobian sparsity detection via [SparseConnectivityTracer.jl](https://github.com/adrhill/SparseConnectivityTracer.jl) * Periodic and weakly-enforced boundary conditions * Multiple governing equations: * Compressible Euler equations diff --git a/docs/src/styleguide.md b/docs/src/styleguide.md index 27210f951c0..07f2d90cddc 100644 --- a/docs/src/styleguide.md +++ b/docs/src/styleguide.md @@ -20,6 +20,7 @@ conventions, we apply and enforce automated source code formatting * The main modified argument comes first. For example, if the right-hand side `du` is modified, it should come first. If only the `cache` is modified, e.g., in `prolong2interfaces!` and its siblings, put the `cache` first. + * Some internal functions take a "computational backend" argument, this should always be passed as the first argument. * Otherwise, use the order `mesh, equations, solver, cache`. * If something needs to be specified in more detail for dispatch, put the additional argument before the general one that is specified in more detail. For example, we use `have_nonconservative_terms(equations), equations` diff --git a/docs/src/time_integration.md b/docs/src/time_integration.md index 232d4652251..269d7feb813 100644 --- a/docs/src/time_integration.md +++ b/docs/src/time_integration.md @@ -30,7 +30,7 @@ are the following. Further documentation can be found in the from Trixi.jl. - If you start Julia with multiple threads and want to use them also in the time integration method from OrdinaryDiffEq.jl, you need to pass the keyword argument - `thread = Trixi.True()` (or `thread = OrdinaryDiffEq.True()`)` to the algorithm, e.g., + `thread = Trixi.True()` (or `thread = OrdinaryDiffEq.True()`) to the algorithm, e.g., `RDPK3SpFSAL49(thread = Trixi.True())` or `CarpenterKennedy2N54(thread = Trixi.True(), williamson_condition = false)`. For more information on using thread-based parallelism in Trixi.jl, please refer to @@ -40,6 +40,8 @@ are the following. Further documentation can be found in the pass `internalnorm = ode_norm` and you should pass `unstable_check = ode_unstable_check` to OrdinaryDiffEq's [`solve`](https://docs.sciml.ai/DiffEqDocs/latest/basics/common_solver_opts/), which are both included in [`ode_default_options`](@ref). +- Hyperbolic-parabolic problems can be solved using IMEX (implicit-explicit) integrators. + Available options from OrdinaryDiffEq.jl are [IMEX SDIRK](https://docs.sciml.ai/OrdinaryDiffEq/stable/implicit/SDIRK/#IMEX-SDIRK) (Single-Diagonal Implicit Runge-Kutta) methods and [IMEX BDF](https://docs.sciml.ai/OrdinaryDiffEq/stable/imex/IMEXBDF/#IMEX-Multistep) (Backwards Differentiation Formula) methods. !!! note "Number of `rhs!` calls" If you use explicit Runge-Kutta methods from [OrdinaryDiffEq.jl](https://github.com/SciML/OrdinaryDiffEq.jl), @@ -216,3 +218,92 @@ Then, the stable CFL number can be computed as described above. - [`Trixi.PairedExplicitRK2`](@ref): Second-order PERK method with at least two stages. - [`Trixi.PairedExplicitRK3`](@ref): Third-order PERK method with at least three stages. - [`Trixi.PairedExplicitRK4`](@ref): Fourth-order PERK method with at least five stages. + +## Relaxation Runge-Kutta Methods for Entropy-Conservative Time Integration + +While standard Runge-Kutta methods (or in fact the whole broad class of general linear methods such as multistep, additive, and partitioned Runge-Kutta methods) preserve linear solution invariants such as mass, momentum and energy, (assuming evolution in conserved variables $\boldsymbol u = (\rho, \rho v_i, \rho e)$) they do in general not preserve nonlinear solution invariants such as entropy. + +### The Notion of Entropy + +For an ideal gas with isentropic exponent $\gamma$, the thermodynamic entropy is given by +```math +s_\text{therm}(\boldsymbol u) = \ln \left( \frac{p}{\rho^\gamma} \right) +``` +where $p$ is the pressure, $\rho$ the density, and $\gamma$ the ratio of specific heats. +The mathematical entropy is then given by +```math +s(\boldsymbol u) \coloneqq - \underbrace{\rho}_{\equiv u_1} \cdot s_\text{therm}(\boldsymbol u) = - \rho \cdot \log \left( \frac{p(\boldsymbol u)}{\rho^\gamma} \right) \: . +``` +The total entropy $\eta$ is then obtained by integrating the mathematical entropy $s$ over the domain $\Omega$: +```math +\eta(t) \coloneqq \eta \big(\boldsymbol u(t, \boldsymbol x) \big) = \int_{\Omega} s \big (\boldsymbol u(t, \boldsymbol x) \big ) \, \text{d} \boldsymbol x \tag{1} +``` + +For a semidiscretized partial differential equation (PDE) of the form +```math +\begin{align*} +\boldsymbol U(t_0) &= \boldsymbol U_0, \\ +\boldsymbol U'(t) &= \boldsymbol F\big(t, \boldsymbol U(t) \big) \tag{2} +\end{align*} +``` +one can construct a discrete equivalent $H$ to (1) which is obtained by computing the mathematical entropy $s$ at every node of the mesh and then integrating it over the domain $\Omega$ by applying a quadrature rule: +```math +H(t) \coloneqq H\big(\boldsymbol U(t)\big) = \int_{\Omega} s \big(\boldsymbol U(t) \big) \, \text{d} \Omega +``` + +For a suitable spatial discretization (2) entropy-conservative systems such as the Euler equations preserve the total entropy $H(t)$ over time, i.e., +```math +\frac{\text{d}}{\text{d} t} H \big(\boldsymbol U(t) \big ) += +\left \langle \frac{\partial H(\boldsymbol U)}{\partial \boldsymbol U}, \frac{\text{d}}{\text{d} t} \boldsymbol U(t) \right \rangle +\overset{(2)}{=} +\left \langle \frac{\partial H(\boldsymbol U)}{\partial \boldsymbol U}, \boldsymbol F\big(t, \boldsymbol U(t) \big) \right \rangle = 0 \tag{3} +``` +while entropy-stable discretiations of entropy-diffusive systems such as the Navier-Stokes equations ensure that the total entropy decays over time, i.e., +```math +\left \langle \frac{\partial H(\boldsymbol U)}{\partial \boldsymbol U}, \boldsymbol F(t, \boldsymbol U) \right \rangle \leq 0 \tag{4} +``` + +### Ensuring Entropy-Conservation/Stability with Relaxation Runge-Kutta Methods + +Evolving the ordinary differential equation (ODE) for the entropy (2) with a Runge-Kutta scheme gives +```math +H_{n+1} = H_n + \Delta t \sum_{i=1}^S b_i \, \left\langle \frac{\partial +H(\boldsymbol U_{n, i}) +}{\partial \boldsymbol U}, \boldsymbol F(\boldsymbol U_{n, i}) \right\rangle \tag{5} +``` +which preserves (3) and (4). +In practice, however, we evolve the conserved variables $\boldsymbol U$ which results in +```math +\boldsymbol U_{n+1} = \boldsymbol U_n + \Delta t \sum_{i=1}^S b_i \boldsymbol F(\boldsymbol U_{n, i}) +``` +and in particular for the entropy $H$ +```math +H(\boldsymbol U_{n+1}) = H\left( \boldsymbol U_n + \Delta t \sum_{i=1}^S b_i \boldsymbol F(\boldsymbol U_{n, i}) \right) \neq H_{n+1} \text{ computed from (5)} +``` + +To resolve the difference $H(\boldsymbol U_{n+1}) - H_{n+1}$ Ketcheson, Ranocha and collaborators have introduced *relaxation* Runge-Kutta methods in a series of publications, see for instance +- [Ketcheson (2019)](https://doi.org/10.1137/19M1263662): Relaxation Runge-Kutta Methods: Conservation and Stability for Inner-Product Norms +- [Ranocha et al. (2020)](https://doi.org/10.1137/19M1263480): Relaxation Runge-Kutta methods: Fully discrete explicit entropy-stable schemes for the compressible Euler and Navier-Stokes equations +- [Ranocha, Lóczi, and Ketcheson (2020)](https://doi.org/10.1007/s00211-020-01158-4): General relaxation methods for initial-value problems with application to multistep schemes + +Almost miraculously, it suffices to introduce a single parameter $\gamma$ in the final update step of the Runge-Kutta method to ensure that the properties of the spatial discretization are preserved, i.e., +```math +H \big(\boldsymbol U_{n+1}( \gamma ) \big) +\overset{!}{=} +H(\boldsymbol U_n) + +\gamma \Delta t \sum_{i=1}^S b_i +\left \langle +\frac{\partial H(\boldsymbol U_{n, i})}{\partial \boldsymbol U_{n, i}}, \boldsymbol F(\boldsymbol U_{n, i}) +\right \rangle +\tag{6} +``` +This comes only at the price that one needs to solve the scalar nonlinear equation (6) for $\gamma$ at every time step. +To do so, [`Trixi.RelaxationSolverNewton`](@ref) is implemented in Trixi.jl. +These can then be supplied to the relaxation time algorithms such as [`Trixi.RelaxationRalston3`](@ref) and [`Trixi.RelaxationRK44`](@ref) via specifying the `relaxation_solver` keyword argument: +```julia +ode_algorithm = Trixi.RelaxationRK44(solver = Trixi.RelaxationSolverNewton()) +ode_algorithm = Trixi.RelaxationRalston3(solver = Trixi.RelaxationSolverNewton()) +ode_algorithm = Trixi.RelaxationCKL43(solver = Trixi.RelaxationSolverNewton()) +ode_algorithm = Trixi.RelaxationCKL54(solver = Trixi.RelaxationSolverNewton()) +``` diff --git a/examples/dgmulti_1d/elixir_euler_flux_diff.jl b/examples/dgmulti_1d/elixir_euler_flux_diff.jl index 3edf1db483c..d6081be6770 100644 --- a/examples/dgmulti_1d/elixir_euler_flux_diff.jl +++ b/examples/dgmulti_1d/elixir_euler_flux_diff.jl @@ -1,7 +1,14 @@ using OrdinaryDiffEqLowStorageRK using Trixi -surface_flux = FluxLaxFriedrichs() +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) volume_flux = flux_ranocha dg = DGMulti(polydeg = 3, element_type = Line(), approximation_type = Polynomial(), surface_integral = SurfaceIntegralWeakForm(surface_flux), diff --git a/examples/dgmulti_1d/elixir_euler_shu_osher_gauss_shock_capturing.jl b/examples/dgmulti_1d/elixir_euler_shu_osher_gauss_shock_capturing.jl index 0c00930aa31..0225c156679 100644 --- a/examples/dgmulti_1d/elixir_euler_shu_osher_gauss_shock_capturing.jl +++ b/examples/dgmulti_1d/elixir_euler_shu_osher_gauss_shock_capturing.jl @@ -32,7 +32,14 @@ end initial_condition = initial_condition_shu_osher -surface_flux = flux_lax_friedrichs +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) volume_flux = flux_ranocha polydeg = 3 diff --git a/examples/dgmulti_2d/elixir_euler_brown_minion_vortex.jl b/examples/dgmulti_2d/elixir_euler_brown_minion_vortex.jl index 5bf08ea7cb6..979134d224e 100644 --- a/examples/dgmulti_2d/elixir_euler_brown_minion_vortex.jl +++ b/examples/dgmulti_2d/elixir_euler_brown_minion_vortex.jl @@ -1,8 +1,15 @@ using OrdinaryDiffEqLowStorageRK using Trixi +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. dg = DGMulti(polydeg = 4, element_type = Quad(), approximation_type = Polynomial(), - surface_integral = SurfaceIntegralWeakForm(FluxLaxFriedrichs()), + surface_integral = SurfaceIntegralWeakForm(FluxLaxFriedrichs(max_abs_speed_naive)), volume_integral = VolumeIntegralFluxDifferencing(flux_ranocha)) equations = CompressibleEulerEquations2D(1.4) diff --git a/examples/dgmulti_2d/elixir_euler_hohqmesh.jl b/examples/dgmulti_2d/elixir_euler_hohqmesh.jl index 064fe0d9d89..b377f5a48b2 100644 --- a/examples/dgmulti_2d/elixir_euler_hohqmesh.jl +++ b/examples/dgmulti_2d/elixir_euler_hohqmesh.jl @@ -22,8 +22,15 @@ boundary_conditions = (; :Slant => boundary_condition_convergence_test, ############################################################################### # Get the DG approximation space +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. dg = DGMulti(polydeg = 8, element_type = Quad(), approximation_type = SBP(), - surface_integral = SurfaceIntegralWeakForm(flux_lax_friedrichs), + surface_integral = SurfaceIntegralWeakForm(FluxLaxFriedrichs(max_abs_speed_naive)), volume_integral = VolumeIntegralFluxDifferencing(flux_ranocha)) ############################################################################### diff --git a/examples/dgmulti_2d/elixir_euler_kelvin_helmholtz_instability.jl b/examples/dgmulti_2d/elixir_euler_kelvin_helmholtz_instability.jl index 1db2d6525a6..c0b75594df6 100644 --- a/examples/dgmulti_2d/elixir_euler_kelvin_helmholtz_instability.jl +++ b/examples/dgmulti_2d/elixir_euler_kelvin_helmholtz_instability.jl @@ -1,8 +1,15 @@ using OrdinaryDiffEqLowStorageRK using Trixi +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. dg = DGMulti(polydeg = 3, element_type = Quad(), approximation_type = SBP(), - surface_integral = SurfaceIntegralWeakForm(FluxLaxFriedrichs()), + surface_integral = SurfaceIntegralWeakForm(FluxLaxFriedrichs(max_abs_speed_naive)), volume_integral = VolumeIntegralFluxDifferencing(flux_ranocha)) equations = CompressibleEulerEquations2D(1.4) diff --git a/examples/dgmulti_2d/elixir_euler_laplace_diffusion.jl b/examples/dgmulti_2d/elixir_euler_laplace_diffusion.jl index a0261555d6c..728293ad381 100644 --- a/examples/dgmulti_2d/elixir_euler_laplace_diffusion.jl +++ b/examples/dgmulti_2d/elixir_euler_laplace_diffusion.jl @@ -1,7 +1,14 @@ using OrdinaryDiffEqLowStorageRK using Trixi -surface_flux = FluxLaxFriedrichs() +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) volume_flux = flux_ranocha dg = DGMulti(polydeg = 3, element_type = Tri(), approximation_type = Polynomial(), surface_integral = SurfaceIntegralWeakForm(surface_flux), diff --git a/examples/dgmulti_2d/elixir_euler_shockcapturing.jl b/examples/dgmulti_2d/elixir_euler_shockcapturing.jl index 03dfa2fd264..0eae24b2618 100644 --- a/examples/dgmulti_2d/elixir_euler_shockcapturing.jl +++ b/examples/dgmulti_2d/elixir_euler_shockcapturing.jl @@ -8,7 +8,14 @@ equations = CompressibleEulerEquations2D(1.4) initial_condition = initial_condition_weak_blast_wave -surface_flux = flux_lax_friedrichs +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) volume_flux = flux_ranocha polydeg = 3 diff --git a/examples/dgmulti_2d/elixir_euler_shockcapturing_curved.jl b/examples/dgmulti_2d/elixir_euler_shockcapturing_curved.jl index 2e0e7fd8687..1a1ceedf5b0 100644 --- a/examples/dgmulti_2d/elixir_euler_shockcapturing_curved.jl +++ b/examples/dgmulti_2d/elixir_euler_shockcapturing_curved.jl @@ -8,7 +8,14 @@ equations = CompressibleEulerEquations2D(1.4) initial_condition = initial_condition_weak_blast_wave -surface_flux = flux_lax_friedrichs +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) volume_flux = flux_ranocha polydeg = 3 diff --git a/examples/dgmulti_2d/elixir_mhd_reflective_wall.jl b/examples/dgmulti_2d/elixir_mhd_reflective_wall.jl index 49503bb71f9..183a347c1e9 100644 --- a/examples/dgmulti_2d/elixir_mhd_reflective_wall.jl +++ b/examples/dgmulti_2d/elixir_mhd_reflective_wall.jl @@ -28,7 +28,14 @@ function initial_condition_perturbation(x, t, equations::IdealGlmMhdEquations2D) end initial_condition = initial_condition_perturbation -surface_flux = (flux_lax_friedrichs, flux_nonconservative_powell) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = (FluxLaxFriedrichs(max_abs_speed_naive), flux_nonconservative_powell) volume_flux = (flux_hindenlang_gassner, flux_nonconservative_powell) solver = DGMulti(polydeg = 3, element_type = Quad(), approximation_type = GaussSBP(), diff --git a/examples/dgmulti_2d/elixir_mhd_weak_blast_wave.jl b/examples/dgmulti_2d/elixir_mhd_weak_blast_wave.jl index df365e6e307..f4e635e45e0 100644 --- a/examples/dgmulti_2d/elixir_mhd_weak_blast_wave.jl +++ b/examples/dgmulti_2d/elixir_mhd_weak_blast_wave.jl @@ -8,7 +8,14 @@ equations = IdealGlmMhdEquations2D(1.4) initial_condition = initial_condition_weak_blast_wave -surface_flux = (flux_lax_friedrichs, flux_nonconservative_powell) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = (FluxLaxFriedrichs(max_abs_speed_naive), flux_nonconservative_powell) volume_flux = (flux_hindenlang_gassner, flux_nonconservative_powell) dg = DGMulti(polydeg = 3, element_type = Quad(), approximation_type = Polynomial(), surface_integral = SurfaceIntegralWeakForm(surface_flux), diff --git a/examples/dgmulti_2d/elixir_mhd_weak_blast_wave_SBP.jl b/examples/dgmulti_2d/elixir_mhd_weak_blast_wave_SBP.jl index c00c76c9bb4..639adaeaac9 100644 --- a/examples/dgmulti_2d/elixir_mhd_weak_blast_wave_SBP.jl +++ b/examples/dgmulti_2d/elixir_mhd_weak_blast_wave_SBP.jl @@ -12,7 +12,14 @@ equations = IdealGlmMhdEquations2D(1.4, c_h) initial_condition = initial_condition_weak_blast_wave -surface_flux = (flux_lax_friedrichs, flux_nonconservative_powell) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = (FluxLaxFriedrichs(max_abs_speed_naive), flux_nonconservative_powell) volume_flux = (flux_hindenlang_gassner, flux_nonconservative_powell) dg = DGMulti(polydeg = 3, element_type = Quad(), approximation_type = SBP(), surface_integral = SurfaceIntegralWeakForm(surface_flux), diff --git a/examples/dgmulti_2d/elixir_navierstokes_convergence.jl b/examples/dgmulti_2d/elixir_navierstokes_convergence.jl index c3e61387213..1da9ee88dd3 100644 --- a/examples/dgmulti_2d/elixir_navierstokes_convergence.jl +++ b/examples/dgmulti_2d/elixir_navierstokes_convergence.jl @@ -15,8 +15,16 @@ equations_parabolic = CompressibleNavierStokesDiffusion2D(equations, mu = mu(), gradient_variables = GradientVariablesPrimitive()) # Create DG solver with polynomial degree = 3 and (local) Lax-Friedrichs/Rusanov flux as surface flux + +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. dg = DGMulti(polydeg = 3, element_type = Tri(), approximation_type = Polynomial(), - surface_integral = SurfaceIntegralWeakForm(flux_lax_friedrichs), + surface_integral = SurfaceIntegralWeakForm(FluxLaxFriedrichs(max_abs_speed_naive)), volume_integral = VolumeIntegralWeakForm()) top_bottom(x, tol = 50 * eps()) = abs(abs(x[2]) - 1) < tol diff --git a/examples/dgmulti_2d/elixir_navierstokes_convergence_curved.jl b/examples/dgmulti_2d/elixir_navierstokes_convergence_curved.jl index 986b23f5c19..b6a3329fb7b 100644 --- a/examples/dgmulti_2d/elixir_navierstokes_convergence_curved.jl +++ b/examples/dgmulti_2d/elixir_navierstokes_convergence_curved.jl @@ -15,8 +15,16 @@ equations_parabolic = CompressibleNavierStokesDiffusion2D(equations, mu = mu(), gradient_variables = GradientVariablesPrimitive()) # Create DG solver with polynomial degree = 3 and (local) Lax-Friedrichs/Rusanov flux as surface flux + +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. dg = DGMulti(polydeg = 3, element_type = Tri(), approximation_type = Polynomial(), - surface_integral = SurfaceIntegralWeakForm(flux_lax_friedrichs), + surface_integral = SurfaceIntegralWeakForm(FluxLaxFriedrichs(max_abs_speed_naive)), volume_integral = VolumeIntegralWeakForm()) top_bottom(x, tol = 50 * eps()) = abs(abs(x[2]) - 1) < tol diff --git a/examples/dgmulti_2d/elixir_navierstokes_lid_driven_cavity.jl b/examples/dgmulti_2d/elixir_navierstokes_lid_driven_cavity.jl index b5515928718..f9ea111fc21 100644 --- a/examples/dgmulti_2d/elixir_navierstokes_lid_driven_cavity.jl +++ b/examples/dgmulti_2d/elixir_navierstokes_lid_driven_cavity.jl @@ -12,8 +12,16 @@ equations_parabolic = CompressibleNavierStokesDiffusion2D(equations, mu = mu, Prandtl = prandtl_number()) # Create DG solver with polynomial degree = 3 and (local) Lax-Friedrichs/Rusanov flux as surface flux + +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. dg = DGMulti(polydeg = 3, element_type = Quad(), approximation_type = GaussSBP(), - surface_integral = SurfaceIntegralWeakForm(flux_lax_friedrichs), + surface_integral = SurfaceIntegralWeakForm(FluxLaxFriedrichs(max_abs_speed_naive)), volume_integral = VolumeIntegralFluxDifferencing(flux_ranocha)) top(x, tol = 50 * eps()) = abs(x[2] - 1) < tol diff --git a/examples/dgmulti_3d/elixir_euler_fdsbp_periodic.jl b/examples/dgmulti_3d/elixir_euler_fdsbp_periodic.jl index f5d225e71cc..44b3753cd76 100644 --- a/examples/dgmulti_3d/elixir_euler_fdsbp_periodic.jl +++ b/examples/dgmulti_3d/elixir_euler_fdsbp_periodic.jl @@ -10,12 +10,19 @@ initial_condition = initial_condition_convergence_test source_terms = source_terms_convergence_test volume_flux = flux_ranocha +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. solver = DGMulti(element_type = Hex(), approximation_type = periodic_derivative_operator(derivative_order = 1, accuracy_order = 4, xmin = 0.0, xmax = 1.0, N = 20), - surface_flux = flux_lax_friedrichs, + surface_flux = FluxLaxFriedrichs(max_abs_speed_naive), volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) mesh = DGMultiMesh(solver, coordinates_min = (-1.0, -1.0, -1.0), diff --git a/examples/dgmulti_3d/elixir_euler_taylor_green_vortex.jl b/examples/dgmulti_3d/elixir_euler_taylor_green_vortex.jl index cd28df7fc5c..a3502e896c7 100644 --- a/examples/dgmulti_3d/elixir_euler_taylor_green_vortex.jl +++ b/examples/dgmulti_3d/elixir_euler_taylor_green_vortex.jl @@ -31,7 +31,14 @@ end initial_condition = initial_condition_taylor_green_vortex volume_flux = flux_ranocha -surface_flux = flux_lax_friedrichs +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) solver = DGMulti(polydeg = 3, element_type = Hex(), approximation_type = Polynomial(), surface_integral = SurfaceIntegralWeakForm(surface_flux), volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) diff --git a/examples/dgmulti_3d/elixir_navierstokes_convergence.jl b/examples/dgmulti_3d/elixir_navierstokes_convergence.jl index 39a13b85018..8c5b8b09268 100644 --- a/examples/dgmulti_3d/elixir_navierstokes_convergence.jl +++ b/examples/dgmulti_3d/elixir_navierstokes_convergence.jl @@ -13,8 +13,16 @@ equations_parabolic = CompressibleNavierStokesDiffusion3D(equations, mu = mu(), gradient_variables = GradientVariablesPrimitive()) # Create DG solver with polynomial degree = 3 and (local) Lax-Friedrichs/Rusanov flux as surface flux + +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. dg = DGMulti(polydeg = 3, element_type = Hex(), approximation_type = Polynomial(), - surface_integral = SurfaceIntegralWeakForm(flux_lax_friedrichs), + surface_integral = SurfaceIntegralWeakForm(FluxLaxFriedrichs(max_abs_speed_naive)), volume_integral = VolumeIntegralWeakForm()) top_bottom(x, tol = 50 * eps()) = abs(abs(x[2]) - 1) < tol diff --git a/examples/dgmulti_3d/elixir_navierstokes_convergence_curved.jl b/examples/dgmulti_3d/elixir_navierstokes_convergence_curved.jl index 928202e6d63..3e5df891c72 100644 --- a/examples/dgmulti_3d/elixir_navierstokes_convergence_curved.jl +++ b/examples/dgmulti_3d/elixir_navierstokes_convergence_curved.jl @@ -13,8 +13,16 @@ equations_parabolic = CompressibleNavierStokesDiffusion3D(equations, mu = mu(), gradient_variables = GradientVariablesPrimitive()) # Create DG solver with polynomial degree = 3 and (local) Lax-Friedrichs/Rusanov flux as surface flux + +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. dg = DGMulti(polydeg = 3, element_type = Hex(), approximation_type = Polynomial(), - surface_integral = SurfaceIntegralWeakForm(flux_lax_friedrichs), + surface_integral = SurfaceIntegralWeakForm(FluxLaxFriedrichs(max_abs_speed_naive)), volume_integral = VolumeIntegralWeakForm()) top_bottom(x, tol = 50 * eps()) = abs(abs(x[2]) - 1) < tol diff --git a/examples/dgmulti_3d/elixir_navierstokes_taylor_green_vortex.jl b/examples/dgmulti_3d/elixir_navierstokes_taylor_green_vortex.jl index 5356f9f48da..f38547638a5 100644 --- a/examples/dgmulti_3d/elixir_navierstokes_taylor_green_vortex.jl +++ b/examples/dgmulti_3d/elixir_navierstokes_taylor_green_vortex.jl @@ -36,8 +36,16 @@ end initial_condition = initial_condition_taylor_green_vortex # Create DG solver with polynomial degree = 3 and (local) Lax-Friedrichs/Rusanov flux as surface flux + +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. dg = DGMulti(polydeg = 3, element_type = Hex(), approximation_type = GaussSBP(), - surface_integral = SurfaceIntegralWeakForm(flux_lax_friedrichs), + surface_integral = SurfaceIntegralWeakForm(FluxLaxFriedrichs(max_abs_speed_naive)), volume_integral = VolumeIntegralFluxDifferencing(flux_ranocha)) coordinates_min = (-1.0, -1.0, -1.0) .* pi diff --git a/examples/p4est_2d_dgsem/elixir_advection_basic_gpu.jl b/examples/p4est_2d_dgsem/elixir_advection_basic_gpu.jl new file mode 100644 index 00000000000..6f9e8e56986 --- /dev/null +++ b/examples/p4est_2d_dgsem/elixir_advection_basic_gpu.jl @@ -0,0 +1,61 @@ +# The same setup as tree_2d_dgsem/elixir_advection_basic.jl +# to verify GPU support and Adapt.jl support. + +using OrdinaryDiffEqLowStorageRK +using Trixi + +############################################################################### +# semidiscretization of the linear advection equation + +advection_velocity = (0.2, -0.7) +equations = LinearScalarAdvectionEquation2D(advection_velocity) + +# Create DG solver with polynomial degree = 3 and (local) Lax-Friedrichs/Rusanov flux as surface flux +solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) + +coordinates_min = (-1.0, -1.0) # minimum coordinates (min(x), min(y)) +coordinates_max = (1.0, 1.0) # maximum coordinates (max(x), max(y)) + +trees_per_dimension = (8, 8) + +# Create P4estMesh with 8 x 8 trees and 16 x 16 elements +mesh = P4estMesh(trees_per_dimension, polydeg = 3, + coordinates_min = coordinates_min, coordinates_max = coordinates_max, + initial_refinement_level = 1) + +# A semidiscretization collects data structures and functions for the spatial discretization +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition_convergence_test, + solver) + +############################################################################### +# ODE solvers, callbacks etc. + +# Create ODE problem with time span from 0.0 to 1.0 +ode = semidiscretize(semi, (0.0, 1.0); real_type = nothing, storage_type = nothing) + +# At the beginning of the main loop, the SummaryCallback prints a summary of the simulation setup +# and resets the timers +summary_callback = SummaryCallback() + +# The AnalysisCallback allows to analyse the solution in regular intervals and prints the results +analysis_callback = AnalysisCallback(semi, interval = 100) + +# The SaveSolutionCallback allows to save the solution to a file in regular intervals +save_solution = SaveSolutionCallback(interval = 100, + solution_variables = cons2prim) + +# The StepsizeCallback handles the re-calculation of the maximum Δt after each time step +stepsize_callback = StepsizeCallback(cfl = 1.6) + +# Create a CallbackSet to collect all callbacks such that they can be passed to the ODE solver +callbacks = CallbackSet(summary_callback, stepsize_callback) +# TODO: GPU. The `analysis_callback` needs to be updated for GPU support +# analysis_callback, save_solution, stepsize_callback) + +############################################################################### +# run the simulation + +# OrdinaryDiffEq's `solve` method evolves the solution in time and executes the passed callbacks +sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false); + dt = 1e-2, # solve needs some value here but it will be overwritten by the stepsize_callback + ode_default_options()..., callback = callbacks); diff --git a/examples/p4est_2d_dgsem/elixir_euler_NACA0012airfoil_mach085.jl b/examples/p4est_2d_dgsem/elixir_euler_NACA0012airfoil_mach085.jl index b04e22f8958..ae1548ec657 100644 --- a/examples/p4est_2d_dgsem/elixir_euler_NACA0012airfoil_mach085.jl +++ b/examples/p4est_2d_dgsem/elixir_euler_NACA0012airfoil_mach085.jl @@ -27,7 +27,14 @@ end initial_condition = initial_condition_mach085_flow volume_flux = flux_ranocha_turbo -surface_flux = flux_lax_friedrichs +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) polydeg = 3 basis = LobattoLegendreBasis(polydeg) @@ -57,7 +64,7 @@ mesh = P4estMesh{2}(mesh_file) equations::CompressibleEulerEquations2D) u_boundary = initial_condition_mach085_flow(x, t, equations) - return Trixi.flux_hll(u_inner, u_boundary, normal_direction, equations) + return flux_hll(u_inner, u_boundary, normal_direction, equations) end boundary_conditions = Dict(:Left => boundary_condition_subsonic_constant, diff --git a/examples/p4est_2d_dgsem/elixir_euler_NACA6412airfoil_mach2.jl b/examples/p4est_2d_dgsem/elixir_euler_NACA6412airfoil_mach2.jl index 15e419a5be9..a16922c3b9b 100644 --- a/examples/p4est_2d_dgsem/elixir_euler_NACA6412airfoil_mach2.jl +++ b/examples/p4est_2d_dgsem/elixir_euler_NACA6412airfoil_mach2.jl @@ -27,9 +27,8 @@ initial_condition = initial_condition_mach2_flow x, t, surface_flux_function, equations::CompressibleEulerEquations2D) u_boundary = initial_condition_mach2_flow(x, t, equations) - flux = Trixi.flux(u_boundary, normal_direction, equations) - return flux + return flux(u_boundary, normal_direction, equations) end # Supersonic outflow boundary condition. @@ -40,14 +39,19 @@ end t, surface_flux_function, equations::CompressibleEulerEquations2D) - flux = Trixi.flux(u_inner, normal_direction, equations) - - return flux + return flux(u_inner, normal_direction, equations) end polydeg = 3 -surface_flux = flux_lax_friedrichs +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) volume_flux = flux_ranocha basis = LobattoLegendreBasis(polydeg) diff --git a/examples/p4est_2d_dgsem/elixir_euler_blast_wave_amr.jl b/examples/p4est_2d_dgsem/elixir_euler_blast_wave_amr.jl index 9871f359224..87e425cb62c 100644 --- a/examples/p4est_2d_dgsem/elixir_euler_blast_wave_amr.jl +++ b/examples/p4est_2d_dgsem/elixir_euler_blast_wave_amr.jl @@ -34,7 +34,14 @@ function initial_condition_blast_wave(x, t, equations::CompressibleEulerEquation end initial_condition = initial_condition_blast_wave -surface_flux = flux_lax_friedrichs +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) volume_flux = flux_ranocha basis = LobattoLegendreBasis(3) indicator_sc = IndicatorHennemannGassner(equations, basis, diff --git a/examples/p4est_2d_dgsem/elixir_euler_cylinder_bowshock_mach3.jl b/examples/p4est_2d_dgsem/elixir_euler_cylinder_bowshock_mach3.jl new file mode 100644 index 00000000000..b404beb6c16 --- /dev/null +++ b/examples/p4est_2d_dgsem/elixir_euler_cylinder_bowshock_mach3.jl @@ -0,0 +1,187 @@ +using Trixi +using OrdinaryDiffEqSSPRK + +############################################################################### +# Geometry & boundary conditions + +# Mapping to create a "close-up" mesh around the second quadrant of a cylinder, +# implemented by Georgii Oblapenko. If you use this in your own work, please cite: +# +# - G. Oblapenko and A. Tarnovskiy (2024) +# Reproducibility Repository for the paper: +# Entropy-stable fluxes for high-order Discontinuous Galerkin simulations of high-enthalpy flows. +# [DOI: 10.5281/zenodo.13981615](https://doi.org/10.5281/zenodo.13981615) +# [GitHub](https://github.com/knstmrd/paper_ec_trixi_chem) +# +# as well as the corresponding paper: +# - G. Oblapenko and M. Torrilhon (2025) +# Entropy-conservative high-order methods for high-enthalpy gas flows. +# Computers & Fluids, 2025. +# [DOI: 10.1016/j.compfluid.2025.106640](https://doi.org/10.1016/j.compfluid.2025.106640) +# +# The mapping produces the following geometry & shock (indicated by the asterisks `* `): +# ____x_neg____ +# | | +# | | +# | | +# | * | +# | * y +# | Inflow * _ +# | state * p +# x * o +# _ * s +# n * | +# e * | +# g Shock . +# | * . +# | * . <- x_pos +# | * . +# | * . (Cylinder) +# |_______y_neg_______. +function mapping_cylinder_shock_fitted(xi_, eta_, + cylinder_radius, spline_points) + shock_shape = [ + (spline_points[1], 0.0), # Shock position on the stagnation line (`y_neg`, y = 0) + (spline_points[2], spline_points[2]), # Shock position at -45° angle + (0.0, spline_points[3]) # Shock position at outflow (`y_pos`, x = x_max) + ] # 3 points that define the geometry of the mesh which follows the shape of the shock (known a-priori) + R = [sqrt(shock_shape[i][1]^2 + shock_shape[i][2]^2) for i in 1:3] # 3 radii + + # Construct spline with form R[1] + c2 * eta_01^2 + c3 * eta_01^3, + # chosen such that derivative w.r.t eta_01 is 0 at eta_01 = 0 such that + # we have symmetry along the stagnation line (`y_neg`, y = 0). + # + # A single cubic spline doesn't fit the shock perfectly, + # but is the simplest curve that does a reasonable job and it also can be easily computed analytically. + # The choice of points on the stagnation line and outflow region is somewhat self-evident + # (capture the minimum and maximum extent of the shock stand-off), + # and the point at the 45 degree angle seemed the most logical to add + # since it only requires one additional value (and not two), + # simplifies the math a bit, and the angle lies exactly in between the other angles. + spline_matrix = [1.0 1.0; 0.25 0.125] + spline_RHS = [R[3] - R[1], R[2] - R[1]] + spline_coeffs = spline_matrix \ spline_RHS # c2, c3 + + eta_01 = (eta_ + 1) / 2 # Transform `eta_` in [-1, 1] to `eta_01` in [0, 1] + # "Flip" `xi_` in [-1, 1] to `xi_01` in [0, 1] since + # shock positions where originally for first quadrant, here we use second quadrant + xi_01 = (-xi_ + 1) / 2 + + R_outer = R[1] + spline_coeffs[1] * eta_01^2 + spline_coeffs[2] * eta_01^3 + + angle = -π / 4 + eta_ * π / 4 # Angle runs from -90° to 0° + r = (cylinder_radius + xi_01 * (R_outer - cylinder_radius)) + + return SVector(round(r * sin(angle); digits = 8), round(r * cos(angle); digits = 8)) +end + +@inline function initial_condition_mach3_flow(x, t, equations::CompressibleEulerEquations2D) + # set the freestream flow parameters + rho_freestream = equations.gamma + v1 = 3.0 # => Mach 3 for unity speed of sound + v2 = 0 + p_freestream = 1 + prim = SVector(rho_freestream, v1, v2, p_freestream) + return prim2cons(prim, equations) +end + +@inline function boundary_condition_supersonic_inflow(u_inner, + normal_direction::AbstractVector, + x, t, + surface_flux_function, + equations::CompressibleEulerEquations2D) + u_boundary = initial_condition_mach3_flow(x, t, equations) + return flux(u_boundary, normal_direction, equations) +end + +# For physical significance of boundary conditions, see sketch at `mapping_cylinder_shock_fitted` +boundary_conditions = Dict(:x_neg => boundary_condition_supersonic_inflow, # Supersonic inflow + :y_neg => boundary_condition_slip_wall, # Induce symmetry by slip wall + :y_pos => boundary_condition_do_nothing, # Free outflow + :x_pos => boundary_condition_slip_wall) # Cylinder + +############################################################################### +# Equations, mesh and solver + +gamma = 1.4 +equations = CompressibleEulerEquations2D(gamma) + +polydeg = 3 +basis = LobattoLegendreBasis(polydeg) + +surface_flux = flux_hllc +volume_flux = flux_ranocha + +shock_indicator = IndicatorHennemannGassner(equations, basis, + alpha_max = 0.5, + alpha_min = 0.001, + alpha_smooth = true, + variable = density_pressure) +volume_integral = VolumeIntegralShockCapturingHG(shock_indicator; + volume_flux_dg = volume_flux, + volume_flux_fv = surface_flux) + +solver = DGSEM(polydeg = polydeg, surface_flux = surface_flux, + volume_integral = volume_integral) + +trees_per_dimension = (25, 25) + +cylinder_radius = 0.5 +# Follow from a-priori known shock shape, originally for first qaudrant, +# here transformed to second quadrant, see `mapping_cylinder_shock_fitted`. +spline_points = [1.32, 1.05, 2.25] +cylinder_mapping = (xi, eta) -> mapping_cylinder_shock_fitted(xi, eta, + cylinder_radius, + spline_points) + +mesh = P4estMesh(trees_per_dimension, + polydeg = polydeg, + mapping = cylinder_mapping, + periodicity = false) + +solver = DGSEM(polydeg = polydeg, surface_flux = surface_flux, + volume_integral = volume_integral) + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition_mach3_flow, + solver, + boundary_conditions = boundary_conditions) + +############################################################################### +# Semidiscretization & callbacks + +tspan = (0.0, 5.0) # More or less stationary shock position reached +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_callback = AnalysisCallback(semi, interval = 5000) +alive_callback = AliveCallback(alive_interval = 200) + +save_solution = SaveSolutionCallback(dt = 0.25, + save_initial_solution = true, + save_final_solution = true, + solution_variables = cons2prim) + +amr_controller = ControllerThreeLevel(semi, shock_indicator; + base_level = 0, + med_level = 1, med_threshold = 0.175, + max_level = 3, max_threshold = 0.35) + +amr_callback = AMRCallback(semi, amr_controller, + interval = 25, + adapt_initial_condition = true, + adapt_initial_condition_only_refine = true) + +callbacks = CallbackSet(summary_callback, + analysis_callback, alive_callback, + save_solution, amr_callback) + +stage_limiter! = PositivityPreservingLimiterZhangShu(thresholds = (5.0e-6, 5.0e-6), + variables = (Trixi.density, pressure)) + +############################################################################### +# Run the simulation + +sol = solve(ode, SSPRK33(stage_limiter! = stage_limiter!, thread = Trixi.True()); + dt = 1.6e-5, # Fixed timestep works decent here + ode_default_options()..., callback = callbacks); diff --git a/examples/p4est_2d_dgsem/elixir_euler_double_mach_amr.jl b/examples/p4est_2d_dgsem/elixir_euler_double_mach_amr.jl index eefa3afdb60..e019c2afddc 100644 --- a/examples/p4est_2d_dgsem/elixir_euler_double_mach_amr.jl +++ b/examples/p4est_2d_dgsem/elixir_euler_double_mach_amr.jl @@ -81,7 +81,14 @@ boundary_conditions = Dict(:Bottom => boundary_condition_mixed_dirichlet_wall, :Left => boundary_condition_inflow) volume_flux = flux_ranocha -surface_flux = flux_lax_friedrichs +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) polydeg = 4 basis = LobattoLegendreBasis(polydeg) @@ -124,6 +131,11 @@ save_solution = SaveSolutionCallback(interval = 100, save_final_solution = true, solution_variables = cons2prim) +# positivity limiter necessary for this example with strong shocks +positivity_limiter = PositivityPreservingLimiterZhangShu(thresholds = (5.0e-6, 5.0e-6), + variables = (Trixi.density, + pressure)) + amr_indicator = IndicatorLöhner(semi, variable = Trixi.density) amr_controller = ControllerThreeLevel(semi, amr_indicator, @@ -134,18 +146,15 @@ amr_controller = ControllerThreeLevel(semi, amr_indicator, amr_callback = AMRCallback(semi, amr_controller, interval = 1, adapt_initial_condition = true, - adapt_initial_condition_only_refine = true) + adapt_initial_condition_only_refine = true, + limiter! = positivity_limiter) callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution, amr_callback) -# positivity limiter necessary for this example with strong shocks -stage_limiter! = PositivityPreservingLimiterZhangShu(thresholds = (5.0e-6, 5.0e-6), - variables = (Trixi.density, pressure)) - ############################################################################### # run the simulation -sol = solve(ode, SSPRK43(stage_limiter!); +sol = solve(ode, SSPRK43(stage_limiter! = positivity_limiter); ode_default_options()..., callback = callbacks); diff --git a/examples/p4est_2d_dgsem/elixir_euler_forward_step_amr.jl b/examples/p4est_2d_dgsem/elixir_euler_forward_step_amr.jl index b11c178637f..50c564357c4 100644 --- a/examples/p4est_2d_dgsem/elixir_euler_forward_step_amr.jl +++ b/examples/p4est_2d_dgsem/elixir_euler_forward_step_amr.jl @@ -86,7 +86,14 @@ boundary_conditions = Dict(:Bottom => boundary_condition_slip_wall, :Left => boundary_condition_inflow) volume_flux = flux_ranocha -surface_flux = flux_lax_friedrichs +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) polydeg = 4 basis = LobattoLegendreBasis(polydeg) @@ -129,6 +136,11 @@ save_solution = SaveSolutionCallback(interval = 2000, save_final_solution = true, solution_variables = cons2prim) +# positivity limiter necessary for this example with strong shocks +positivity_limiter = PositivityPreservingLimiterZhangShu(thresholds = (5.0e-6, 5.0e-6), + variables = (Trixi.density, + pressure)) + amr_indicator = IndicatorLöhner(semi, variable = Trixi.density) amr_controller = ControllerThreeLevel(semi, amr_indicator, @@ -139,19 +151,16 @@ amr_controller = ControllerThreeLevel(semi, amr_indicator, amr_callback = AMRCallback(semi, amr_controller, interval = 5, adapt_initial_condition = true, - adapt_initial_condition_only_refine = true) + adapt_initial_condition_only_refine = true, + limiter! = positivity_limiter) callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution, amr_callback) -# positivity limiter necessary for this example with strong shocks -stage_limiter! = PositivityPreservingLimiterZhangShu(thresholds = (5.0e-6, 5.0e-6), - variables = (Trixi.density, pressure)) - ############################################################################### # run the simulation -sol = solve(ode, SSPRK43(stage_limiter!); +sol = solve(ode, SSPRK43(stage_limiter! = positivity_limiter); maxiters = 999999, ode_default_options()..., callback = callbacks); diff --git a/examples/p4est_2d_dgsem/elixir_euler_free_stream.jl b/examples/p4est_2d_dgsem/elixir_euler_free_stream.jl index bd80589fb64..5f8c1989c66 100644 --- a/examples/p4est_2d_dgsem/elixir_euler_free_stream.jl +++ b/examples/p4est_2d_dgsem/elixir_euler_free_stream.jl @@ -8,7 +8,14 @@ equations = CompressibleEulerEquations2D(1.4) initial_condition = initial_condition_constant -solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 3, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive)) # Mapping as described in https://arxiv.org/abs/2012.12040 but reduced to 2D function mapping(xi_, eta_) diff --git a/examples/p4est_2d_dgsem/elixir_euler_free_stream_hybrid_mesh.jl b/examples/p4est_2d_dgsem/elixir_euler_free_stream_hybrid_mesh.jl index 20de2ecbf1c..10f52f0382e 100644 --- a/examples/p4est_2d_dgsem/elixir_euler_free_stream_hybrid_mesh.jl +++ b/examples/p4est_2d_dgsem/elixir_euler_free_stream_hybrid_mesh.jl @@ -1,4 +1,3 @@ - using OrdinaryDiffEqLowStorageRK using Trixi @@ -10,7 +9,14 @@ equations = CompressibleEulerEquations2D(1.4) # Test free stream preservation with constant initial condition initial_condition = initial_condition_constant -solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 3, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive)) ############################################################################### diff --git a/examples/p4est_2d_dgsem/elixir_euler_sedov.jl b/examples/p4est_2d_dgsem/elixir_euler_sedov.jl index db0e81955b6..dd0ddc13533 100644 --- a/examples/p4est_2d_dgsem/elixir_euler_sedov.jl +++ b/examples/p4est_2d_dgsem/elixir_euler_sedov.jl @@ -37,7 +37,15 @@ end initial_condition = initial_condition_sedov_blast_wave # Get the DG approximation space -surface_flux = flux_lax_friedrichs + +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) volume_flux = flux_ranocha polydeg = 4 basis = LobattoLegendreBasis(polydeg) diff --git a/examples/p4est_2d_dgsem/elixir_euler_sedov_blast_wave_sc_subcell.jl b/examples/p4est_2d_dgsem/elixir_euler_sedov_blast_wave_sc_subcell.jl index fb916c93d41..d976c9c25d9 100644 --- a/examples/p4est_2d_dgsem/elixir_euler_sedov_blast_wave_sc_subcell.jl +++ b/examples/p4est_2d_dgsem/elixir_euler_sedov_blast_wave_sc_subcell.jl @@ -36,7 +36,15 @@ end initial_condition = initial_condition_sedov_blast_wave # Get the DG approximation space -surface_flux = flux_lax_friedrichs + +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) volume_flux = flux_ranocha polydeg = 3 basis = LobattoLegendreBasis(polydeg) diff --git a/examples/p4est_2d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_flag.jl b/examples/p4est_2d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_flag.jl index 33a264c1c6e..fb9a7b8c80c 100644 --- a/examples/p4est_2d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_flag.jl +++ b/examples/p4est_2d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_flag.jl @@ -14,7 +14,14 @@ source_terms = source_terms_convergence_test boundary_condition = BoundaryConditionDirichlet(initial_condition) boundary_conditions = Dict(:all => boundary_condition) -solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 3, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive)) # Deformed rectangle that looks like a waving flag, # lower and upper faces are sinus curves, left and right are vertical lines. diff --git a/examples/p4est_2d_dgsem/elixir_euler_subsonic_cylinder.jl b/examples/p4est_2d_dgsem/elixir_euler_subsonic_cylinder.jl index 43dc6caca07..253cb0bf3a7 100644 --- a/examples/p4est_2d_dgsem/elixir_euler_subsonic_cylinder.jl +++ b/examples/p4est_2d_dgsem/elixir_euler_subsonic_cylinder.jl @@ -21,7 +21,14 @@ end initial_condition = initial_condition_mach038_flow volume_flux = flux_ranocha_turbo # FluxRotated(flux_chandrashekar) can also be used -surface_flux = flux_lax_friedrichs +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) polydeg = 3 solver = DGSEM(polydeg = polydeg, surface_flux = surface_flux, diff --git a/examples/p4est_2d_dgsem/elixir_euler_supersonic_cylinder.jl b/examples/p4est_2d_dgsem/elixir_euler_supersonic_cylinder.jl index 045d4e8a755..f9d54b89ab5 100644 --- a/examples/p4est_2d_dgsem/elixir_euler_supersonic_cylinder.jl +++ b/examples/p4est_2d_dgsem/elixir_euler_supersonic_cylinder.jl @@ -42,9 +42,7 @@ initial_condition = initial_condition_mach3_flow x, t, surface_flux_function, equations::CompressibleEulerEquations2D) u_boundary = initial_condition_mach3_flow(x, t, equations) - flux = Trixi.flux(u_boundary, normal_direction, equations) - - return flux + return flux(u_boundary, normal_direction, equations) end # Supersonic outflow boundary condition. @@ -53,9 +51,7 @@ end @inline function boundary_condition_outflow(u_inner, normal_direction::AbstractVector, x, t, surface_flux_function, equations::CompressibleEulerEquations2D) - flux = Trixi.flux(u_inner, normal_direction, equations) - - return flux + return flux(u_inner, normal_direction, equations) end boundary_conditions = Dict(:Bottom => boundary_condition_slip_wall, @@ -65,7 +61,14 @@ boundary_conditions = Dict(:Bottom => boundary_condition_slip_wall, :Left => boundary_condition_supersonic_inflow) volume_flux = flux_ranocha_turbo -surface_flux = flux_lax_friedrichs +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) polydeg = 3 basis = LobattoLegendreBasis(polydeg) @@ -109,6 +112,12 @@ save_solution = SaveSolutionCallback(interval = 1000, save_final_solution = true, solution_variables = cons2prim) +# positivity limiter necessary for this example with strong shocks. Very sensitive +# to the order of the limiter variables, pressure must come first. +positivity_limiter = PositivityPreservingLimiterZhangShu(thresholds = (5.0e-7, 1.0e-6), + variables = (pressure, + Trixi.density)) + amr_indicator = IndicatorLöhner(semi, variable = Trixi.density) amr_controller = ControllerThreeLevel(semi, amr_indicator, @@ -119,19 +128,15 @@ amr_controller = ControllerThreeLevel(semi, amr_indicator, amr_callback = AMRCallback(semi, amr_controller, interval = 1, adapt_initial_condition = true, - adapt_initial_condition_only_refine = true) + adapt_initial_condition_only_refine = true, + limiter! = positivity_limiter) callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution, amr_callback) -# positivity limiter necessary for this example with strong shocks. Very sensitive -# to the order of the limiter variables, pressure must come first. -stage_limiter! = PositivityPreservingLimiterZhangShu(thresholds = (5.0e-7, 1.0e-6), - variables = (pressure, Trixi.density)) - ############################################################################### # run the simulation -sol = solve(ode, SSPRK43(stage_limiter!); +sol = solve(ode, SSPRK43(stage_limiter! = positivity_limiter); ode_default_options()..., callback = callbacks); diff --git a/examples/p4est_2d_dgsem/elixir_euler_supersonic_cylinder_sc_subcell.jl b/examples/p4est_2d_dgsem/elixir_euler_supersonic_cylinder_sc_subcell.jl index de08e3de19f..8d46c19e141 100644 --- a/examples/p4est_2d_dgsem/elixir_euler_supersonic_cylinder_sc_subcell.jl +++ b/examples/p4est_2d_dgsem/elixir_euler_supersonic_cylinder_sc_subcell.jl @@ -41,9 +41,7 @@ initial_condition = initial_condition_mach3_flow x, t, surface_flux_function, equations::CompressibleEulerEquations2D) u_boundary = initial_condition_mach3_flow(x, t, equations) - flux = Trixi.flux(u_boundary, normal_direction, equations) - - return flux + return flux(u_boundary, normal_direction, equations) end # For subcell limiting, the calculation of local bounds for non-periodic domains requires the @@ -65,9 +63,7 @@ end @inline function boundary_condition_outflow(u_inner, normal_direction::AbstractVector, x, t, surface_flux_function, equations::CompressibleEulerEquations2D) - flux = Trixi.flux(u_inner, normal_direction, equations) - - return flux + return flux(u_inner, normal_direction, equations) end @inline function Trixi.get_boundary_outer_state(u_inner, t, @@ -100,7 +96,14 @@ boundary_conditions = Dict(:Bottom => boundary_condition_slip_wall, :Left => boundary_condition_supersonic_inflow) volume_flux = flux_ranocha_turbo -surface_flux = flux_lax_friedrichs +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) polydeg = 3 basis = LobattoLegendreBasis(polydeg) limiter_idp = SubcellLimiterIDP(equations, basis; diff --git a/examples/p4est_2d_dgsem/elixir_euler_wall_bc_amr.jl b/examples/p4est_2d_dgsem/elixir_euler_wall_bc_amr.jl index 6ea0eea7473..9579153eb93 100644 --- a/examples/p4est_2d_dgsem/elixir_euler_wall_bc_amr.jl +++ b/examples/p4est_2d_dgsem/elixir_euler_wall_bc_amr.jl @@ -33,7 +33,14 @@ boundary_conditions = Dict(:Body => boundary_condition_uniform_flow, :Bowtie => boundary_condition_slip_wall) volume_flux = flux_ranocha -solver = DGSEM(polydeg = 5, surface_flux = flux_lax_friedrichs, +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 5, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive), volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) # Get the unstructured quad mesh from a file (downloads the file if not available locally) diff --git a/examples/p4est_2d_dgsem/elixir_euler_weak_blast_wave_amr.jl b/examples/p4est_2d_dgsem/elixir_euler_weak_blast_wave_amr.jl index 7aeca418e80..770dbe0e01f 100644 --- a/examples/p4est_2d_dgsem/elixir_euler_weak_blast_wave_amr.jl +++ b/examples/p4est_2d_dgsem/elixir_euler_weak_blast_wave_amr.jl @@ -31,11 +31,18 @@ initial_condition = initial_condition_weak_blast_wave # Get the DG approximation space -# Activate the shock capturing + flux differencing -surface_flux = flux_lax_friedrichs +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) volume_flux = flux_ranocha polydeg = 4 basis = LobattoLegendreBasis(polydeg) +# Activate the shock capturing + flux differencing indicator_sc = IndicatorHennemannGassner(equations, basis, alpha_max = 0.5, alpha_min = 0.001, diff --git a/examples/p4est_2d_dgsem/elixir_eulergravity_convergence.jl b/examples/p4est_2d_dgsem/elixir_eulergravity_convergence.jl index 0d389dbeea3..b128f245195 100644 --- a/examples/p4est_2d_dgsem/elixir_eulergravity_convergence.jl +++ b/examples/p4est_2d_dgsem/elixir_eulergravity_convergence.jl @@ -27,7 +27,14 @@ semi_euler = SemidiscretizationHyperbolic(mesh, equations_euler, initial_conditi # semidiscretization of the hyperbolic diffusion equations equations_gravity = HyperbolicDiffusionEquations2D() -solver_gravity = DGSEM(polydeg, flux_lax_friedrichs) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver_gravity = DGSEM(polydeg, FluxLaxFriedrichs(max_abs_speed_naive)) semi_gravity = SemidiscretizationHyperbolic(mesh, equations_gravity, initial_condition, solver_gravity, diff --git a/examples/p4est_2d_dgsem/elixir_mhd_rotor.jl b/examples/p4est_2d_dgsem/elixir_mhd_rotor.jl index aab3162a8ed..c3c54aa24b6 100644 --- a/examples/p4est_2d_dgsem/elixir_mhd_rotor.jl +++ b/examples/p4est_2d_dgsem/elixir_mhd_rotor.jl @@ -43,7 +43,14 @@ function initial_condition_rotor(x, t, equations::IdealGlmMhdEquations2D) end initial_condition = initial_condition_rotor -surface_flux = (flux_lax_friedrichs, flux_nonconservative_powell) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = (FluxLaxFriedrichs(max_abs_speed_naive), flux_nonconservative_powell) volume_flux = (flux_hindenlang_gassner, flux_nonconservative_powell) polydeg = 4 basis = LobattoLegendreBasis(polydeg) diff --git a/examples/p4est_2d_dgsem/elixir_mhd_rotor_cfl_ramp.jl b/examples/p4est_2d_dgsem/elixir_mhd_rotor_cfl_ramp.jl index 87daa267ea5..1d7da07578f 100644 --- a/examples/p4est_2d_dgsem/elixir_mhd_rotor_cfl_ramp.jl +++ b/examples/p4est_2d_dgsem/elixir_mhd_rotor_cfl_ramp.jl @@ -43,7 +43,14 @@ function initial_condition_rotor(x, t, equations::IdealGlmMhdEquations2D) end initial_condition = initial_condition_rotor -surface_flux = (flux_lax_friedrichs, flux_nonconservative_powell) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = (FluxLaxFriedrichs(max_abs_speed_naive), flux_nonconservative_powell) volume_flux = (flux_hindenlang_gassner, flux_nonconservative_powell) polydeg = 4 basis = LobattoLegendreBasis(polydeg) diff --git a/examples/p4est_2d_dgsem/elixir_navierstokes_NACA0012airfoil_mach08.jl b/examples/p4est_2d_dgsem/elixir_navierstokes_NACA0012airfoil_mach08.jl index 6c2b2a2ecfd..6aaf2727597 100644 --- a/examples/p4est_2d_dgsem/elixir_navierstokes_NACA0012airfoil_mach08.jl +++ b/examples/p4est_2d_dgsem/elixir_navierstokes_NACA0012airfoil_mach08.jl @@ -45,7 +45,14 @@ end initial_condition = initial_condition_mach08_flow -surface_flux = flux_lax_friedrichs +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) polydeg = 3 solver = DGSEM(polydeg = polydeg, surface_flux = surface_flux) @@ -65,7 +72,7 @@ mesh = P4estMesh{2}(mesh_file, initial_refinement_level = 1) equations::CompressibleEulerEquations2D) u_boundary = initial_condition_mach08_flow(x, t, equations) - return Trixi.flux_hll(u_inner, u_boundary, normal_direction, equations) + return flux_hll(u_inner, u_boundary, normal_direction, equations) end boundary_conditions = Dict(:Left => boundary_condition_subsonic_constant, diff --git a/examples/p4est_2d_dgsem/elixir_navierstokes_convergence.jl b/examples/p4est_2d_dgsem/elixir_navierstokes_convergence.jl index c32b52c9f6b..2e69ca790c2 100644 --- a/examples/p4est_2d_dgsem/elixir_navierstokes_convergence.jl +++ b/examples/p4est_2d_dgsem/elixir_navierstokes_convergence.jl @@ -13,7 +13,15 @@ equations_parabolic = CompressibleNavierStokesDiffusion2D(equations, mu = mu(), gradient_variables = GradientVariablesPrimitive()) # Create DG solver with polynomial degree = 3 and (local) Lax-Friedrichs/Rusanov flux as surface flux -solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs, + +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 3, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive), volume_integral = VolumeIntegralWeakForm()) coordinates_min = (-1.0, -1.0) # minimum coordinates (min(x), min(y)) diff --git a/examples/p4est_2d_dgsem/elixir_navierstokes_convergence_nonperiodic.jl b/examples/p4est_2d_dgsem/elixir_navierstokes_convergence_nonperiodic.jl index a3c10ba708a..9405d016ddb 100644 --- a/examples/p4est_2d_dgsem/elixir_navierstokes_convergence_nonperiodic.jl +++ b/examples/p4est_2d_dgsem/elixir_navierstokes_convergence_nonperiodic.jl @@ -13,7 +13,15 @@ equations_parabolic = CompressibleNavierStokesDiffusion2D(equations, mu = mu(), gradient_variables = GradientVariablesPrimitive()) # Create DG solver with polynomial degree = 3 and (local) Lax-Friedrichs/Rusanov flux as surface flux -solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs, + +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 3, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive), volume_integral = VolumeIntegralWeakForm()) coordinates_min = (-1.0, -1.0) # minimum coordinates (min(x), min(y)) diff --git a/examples/p4est_2d_dgsem/elixir_navierstokes_couette_flow.jl b/examples/p4est_2d_dgsem/elixir_navierstokes_couette_flow.jl index 92bc780c7a0..84b56aad1c1 100644 --- a/examples/p4est_2d_dgsem/elixir_navierstokes_couette_flow.jl +++ b/examples/p4est_2d_dgsem/elixir_navierstokes_couette_flow.jl @@ -52,7 +52,7 @@ function boundary_condition_outflow(u_inner, normal_direction::AbstractVector, x surface_flux_function, equations::CompressibleEulerEquations2D) # Calculate the boundary flux entirely from the internal solution state - return Trixi.flux(u_inner, normal_direction, equations) + return flux(u_inner, normal_direction, equations) end ### Hyperbolic boundary conditions ### diff --git a/examples/p4est_2d_dgsem/elixir_navierstokes_lid_driven_cavity.jl b/examples/p4est_2d_dgsem/elixir_navierstokes_lid_driven_cavity.jl index 57664dbae8b..85042f0f456 100644 --- a/examples/p4est_2d_dgsem/elixir_navierstokes_lid_driven_cavity.jl +++ b/examples/p4est_2d_dgsem/elixir_navierstokes_lid_driven_cavity.jl @@ -12,7 +12,15 @@ equations_parabolic = CompressibleNavierStokesDiffusion2D(equations, mu = mu, Prandtl = prandtl_number()) # Create DG solver with polynomial degree = 3 and (local) Lax-Friedrichs/Rusanov flux as surface flux -solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) + +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 3, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive)) coordinates_min = (-1.0, -1.0) # minimum coordinates (min(x), min(y)) coordinates_max = (1.0, 1.0) # maximum coordinates (max(x), max(y)) diff --git a/examples/p4est_2d_dgsem/elixir_navierstokes_lid_driven_cavity_amr.jl b/examples/p4est_2d_dgsem/elixir_navierstokes_lid_driven_cavity_amr.jl index e294adb508d..3b787039708 100644 --- a/examples/p4est_2d_dgsem/elixir_navierstokes_lid_driven_cavity_amr.jl +++ b/examples/p4est_2d_dgsem/elixir_navierstokes_lid_driven_cavity_amr.jl @@ -12,7 +12,15 @@ equations_parabolic = CompressibleNavierStokesDiffusion2D(equations, mu = mu, Prandtl = prandtl_number()) # Create DG solver with polynomial degree = 3 and (local) Lax-Friedrichs/Rusanov flux as surface flux -solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) + +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 3, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive)) coordinates_min = (-1.0, -1.0) # minimum coordinates (min(x), min(y)) coordinates_max = (1.0, 1.0) # maximum coordinates (max(x), max(y)) diff --git a/examples/p4est_2d_dgsem/elixir_navierstokes_poiseuille_flow.jl b/examples/p4est_2d_dgsem/elixir_navierstokes_poiseuille_flow.jl index 5a328b354a4..fcbcd7d65e6 100644 --- a/examples/p4est_2d_dgsem/elixir_navierstokes_poiseuille_flow.jl +++ b/examples/p4est_2d_dgsem/elixir_navierstokes_poiseuille_flow.jl @@ -54,7 +54,7 @@ function boundary_condition_outflow(u_inner, normal_direction::AbstractVector, x surface_flux_function, equations::CompressibleEulerEquations2D) # Calculate the boundary flux entirely from the internal solution state - return Trixi.flux(u_inner, normal_direction, equations) + return flux(u_inner, normal_direction, equations) end ### Hyperbolic boundary conditions ### diff --git a/examples/p4est_2d_dgsem/elixir_navierstokes_viscous_shock.jl b/examples/p4est_2d_dgsem/elixir_navierstokes_viscous_shock.jl index a125af232e6..6d6e6ab1422 100644 --- a/examples/p4est_2d_dgsem/elixir_navierstokes_viscous_shock.jl +++ b/examples/p4est_2d_dgsem/elixir_navierstokes_viscous_shock.jl @@ -113,9 +113,7 @@ function boundary_condition_inflow(u_inner, normal_direction::AbstractVector, x, surface_flux_function, equations::CompressibleEulerEquations2D) u_cons = initial_condition_viscous_shock(x, t, equations) - flux = Trixi.flux(u_cons, normal_direction, equations) - - return flux + return flux(u_cons, normal_direction, equations) end # Completely free outflow @@ -123,9 +121,7 @@ function boundary_condition_outflow(u_inner, normal_direction::AbstractVector, x surface_flux_function, equations::CompressibleEulerEquations2D) # Calculate the boundary flux entirely from the internal solution state - flux = Trixi.flux(u_inner, normal_direction, equations) - - return flux + return flux(u_inner, normal_direction, equations) end boundary_conditions = Dict(:x_neg => boundary_condition_inflow, diff --git a/examples/p4est_2d_dgsem/elixir_navierstokes_viscous_shock_newton_krylov.jl b/examples/p4est_2d_dgsem/elixir_navierstokes_viscous_shock_newton_krylov.jl new file mode 100644 index 00000000000..adbc63c08b6 --- /dev/null +++ b/examples/p4est_2d_dgsem/elixir_navierstokes_viscous_shock_newton_krylov.jl @@ -0,0 +1,181 @@ +using Trixi + +using OrdinaryDiffEqSDIRK +using LinearSolve # For Jacobian-free Newton-Krylov (GMRES) solver +using ADTypes # For automatic differentiation via finite differences + +# This is the classic 1D viscous shock wave problem with analytical solution +# for a special value of the Prandtl number. +# The original references are: +# +# - R. Becker (1922) +# Stoßwelle und Detonation. +# [DOI: 10.1007/BF01329605](https://doi.org/10.1007/BF01329605) +# +# English translations: +# Impact waves and detonation. Part I. +# https://ntrs.nasa.gov/api/citations/19930090862/downloads/19930090862.pdf +# Impact waves and detonation. Part II. +# https://ntrs.nasa.gov/api/citations/19930090863/downloads/19930090863.pdf +# +# - M. Morduchow, P. A. Libby (1949) +# On a Complete Solution of the One-Dimensional Flow Equations +# of a Viscous, Head-Conducting, Compressible Gas +# [DOI: 10.2514/8.11882](https://doi.org/10.2514/8.11882) +# +# +# The particular problem considered here is described in +# - L. G. Margolin, J. M. Reisner, P. M. Jordan (2017) +# Entropy in self-similar shock profiles +# [DOI: 10.1016/j.ijnonlinmec.2017.07.003](https://doi.org/10.1016/j.ijnonlinmec.2017.07.003) + +### Fixed parameters ### + +# Special value for which nonlinear solver can be omitted +# Corresponds essentially to fixing the Mach number +alpha = 0.5 +# We want kappa = cp * mu = mu_bar to ensure constant enthalpy +prandtl_number() = 3 / 4 + +### Free choices: ### +gamma() = 5 / 3 + +# In Margolin et al., the Navier-Stokes equations are given for an +# isotropic stress tensor τ, i.e., ∇ ⋅ τ = μ Δu +mu_isotropic() = 0.15 +mu_bar() = mu_isotropic() / (gamma() - 1) # Re-scaled viscosity + +rho_0() = 1 +v() = 1 # Shock speed + +domain_length = 4.0 + +### Derived quantities ### + +Ma() = 2 / sqrt(3 - gamma()) # Mach number for alpha = 0.5 +c_0() = v() / Ma() # Speed of sound ahead of the shock + +# From constant enthalpy condition +p_0() = c_0()^2 * rho_0() / gamma() + +l() = mu_bar() / (rho_0() * v()) * 2 * gamma() / (gamma() + 1) # Appropriate length scale + +""" + initial_condition_viscous_shock(x, t, equations) + +Classic 1D viscous shock wave problem with analytical solution +for a special value of the Prandtl number. +The version implemented here is described in +- L. G. Margolin, J. M. Reisner, P. M. Jordan (2017) + Entropy in self-similar shock profiles + [DOI: 10.1016/j.ijnonlinmec.2017.07.003](https://doi.org/10.1016/j.ijnonlinmec.2017.07.003) +""" +function initial_condition_viscous_shock(x, t, equations) + y = x[1] - v() * t # Translated coordinate + + # Coordinate transformation. See eq. (33) in Margolin et al. (2017) + chi = 2 * exp(y / (2 * l())) + + w = 1 + 1 / (2 * chi^2) * (1 - sqrt(1 + 2 * chi^2)) + + rho = rho_0() / w + u = v() * (1 - w) + p = p_0() * 1 / w * (1 + (gamma() - 1) / 2 * Ma()^2 * (1 - w^2)) + + return prim2cons(SVector(rho, u, 0, p), equations) +end +initial_condition = initial_condition_viscous_shock + +############################################################################### +# semidiscretization of the ideal compressible Navier-Stokes equations + +equations = CompressibleEulerEquations2D(gamma()) + +# Trixi implements the stress tensor in deviatoric form, thus we need to +# convert the "isotropic viscosity" to the "deviatoric viscosity" +mu_deviatoric() = mu_bar() * 3 / 4 +equations_parabolic = CompressibleNavierStokesDiffusion2D(equations, mu = mu_deviatoric(), + Prandtl = prandtl_number(), + gradient_variables = GradientVariablesEntropy()) + +solver = DGSEM(polydeg = 3, surface_flux = flux_hlle) + +coordinates_min = (-domain_length / 2, -domain_length / 2) +coordinates_max = (domain_length / 2, domain_length / 2) + +trees_per_dimension = (12, 3) +mesh = P4estMesh(trees_per_dimension, + polydeg = 3, initial_refinement_level = 0, + coordinates_min = coordinates_min, coordinates_max = coordinates_max, + periodicity = (false, true)) + +### Inviscid boundary conditions ### + +# Prescribe pure influx based on initial conditions +function boundary_condition_inflow(u_inner, normal_direction::AbstractVector, x, t, + surface_flux_function, + equations::CompressibleEulerEquations2D) + u_cons = initial_condition_viscous_shock(x, t, equations) + return flux(u_cons, normal_direction, equations) +end + +boundary_conditions = Dict(:x_neg => boundary_condition_inflow, + :x_pos => boundary_condition_do_nothing) + +### Viscous boundary conditions ### +# For the viscous BCs, we use the known analytical solution +velocity_bc = NoSlip() do x, t, equations_parabolic + Trixi.velocity(initial_condition_viscous_shock(x, + t, + equations_parabolic), + equations_parabolic) +end + +heat_bc = Isothermal() do x, t, equations_parabolic + Trixi.temperature(initial_condition_viscous_shock(x, + t, + equations_parabolic), + equations_parabolic) +end + +boundary_condition_parabolic = BoundaryConditionNavierStokesWall(velocity_bc, heat_bc) + +boundary_conditions_parabolic = Dict(:x_neg => boundary_condition_parabolic, + :x_pos => boundary_condition_parabolic) + +semi = SemidiscretizationHyperbolicParabolic(mesh, (equations, equations_parabolic), + initial_condition, solver; + boundary_conditions = (boundary_conditions, + boundary_conditions_parabolic)) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 1.0) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() +alive_callback = AliveCallback(alive_interval = 1) +analysis_callback = AnalysisCallback(semi, interval = 100) + +callbacks = CallbackSet(summary_callback, alive_callback, analysis_callback) + +############################################################################### +# run the simulation + +# Tolerances for GMRES residual, see https://jso.dev/Krylov.jl/stable/solvers/unsymmetric/#Krylov.gmres +atol_lin_solve = 1e-5 +rtol_lin_solve = 1e-5 + +# Jacobian-free Newton-Krylov (GMRES) solver +linsolve = KrylovJL_GMRES(atol = atol_lin_solve, rtol = rtol_lin_solve) + +# Use (diagonally) implicit Runge-Kutta, see +# https://docs.sciml.ai/DiffEqDocs/stable/tutorials/advanced_ode_example/#Using-Jacobian-Free-Newton-Krylov +ode_alg = Kvaerno4(autodiff = AutoFiniteDiff(), linsolve = linsolve) + +atol_ode_solve = 1e-4 +rtol_ode_solve = 1e-4 +sol = solve(ode, ode_alg; + abstol = atol_ode_solve, reltol = rtol_ode_solve, + ode_default_options()..., callback = callbacks); diff --git a/examples/p4est_3d_dgsem/elixir_advection_amr.jl b/examples/p4est_3d_dgsem/elixir_advection_amr.jl index 94b987d53b5..ed0c788e9d4 100644 --- a/examples/p4est_3d_dgsem/elixir_advection_amr.jl +++ b/examples/p4est_3d_dgsem/elixir_advection_amr.jl @@ -29,7 +29,7 @@ semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) ############################################################################### # ODE solvers, callbacks etc. -tspan = (0.0, 0.3) +tspan = (0.0, 6.0) ode = semidiscretize(semi, tspan) summary_callback = SummaryCallback() @@ -46,8 +46,8 @@ save_solution = SaveSolutionCallback(interval = 100, solution_variables = cons2prim) amr_controller = ControllerThreeLevel(semi, IndicatorMax(semi, variable = first), - base_level = 4, - med_level = 5, med_threshold = 0.1, + base_level = 2, + med_level = 5, med_threshold = 0.2, max_level = 6, max_threshold = 0.6) amr_callback = AMRCallback(semi, amr_controller, interval = 5, diff --git a/examples/p4est_3d_dgsem/elixir_euler_OMNERA_M6_wing.jl b/examples/p4est_3d_dgsem/elixir_euler_ONERA_M6_wing.jl similarity index 88% rename from examples/p4est_3d_dgsem/elixir_euler_OMNERA_M6_wing.jl rename to examples/p4est_3d_dgsem/elixir_euler_ONERA_M6_wing.jl index f3eae998103..0753a8071e4 100644 --- a/examples/p4est_3d_dgsem/elixir_euler_OMNERA_M6_wing.jl +++ b/examples/p4est_3d_dgsem/elixir_euler_ONERA_M6_wing.jl @@ -65,7 +65,14 @@ end polydeg = 2 basis = LobattoLegendreBasis(polydeg) -surface_flux = flux_lax_friedrichs +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) volume_flux = flux_ranocha # Flux Differencing is required, shock capturing not! @@ -114,7 +121,7 @@ force_boundary_names = (:BottomWing, :TopWing) aoa() = deg2rad(3.06) rho_inf() = 1.4 -u_inf(equations) = 0.84 +u_inf() = 0.84 ### Wing projected area calculated from geometry information provided at ### ### https://www.grc.nasa.gov/www/wind/valid/m6wing/m6wing.html ### @@ -131,10 +138,10 @@ A = height * (0.5 * (g_I + g_III) + g_II) lift_coefficient = AnalysisSurfaceIntegral(force_boundary_names, LiftCoefficientPressure3D(aoa(), rho_inf(), - u_inf(equations), A)) + u_inf(), A)) drag_coefficient = AnalysisSurfaceIntegral(force_boundary_names, DragCoefficientPressure3D(aoa(), rho_inf(), - u_inf(equations), A)) + u_inf(), A)) analysis_interval = 100_000 analysis_callback = AnalysisCallback(semi, interval = analysis_interval, diff --git a/examples/p4est_3d_dgsem/elixir_euler_free_stream.jl b/examples/p4est_3d_dgsem/elixir_euler_free_stream.jl index 4d2893b0db9..70fe9fae8f5 100644 --- a/examples/p4est_3d_dgsem/elixir_euler_free_stream.jl +++ b/examples/p4est_3d_dgsem/elixir_euler_free_stream.jl @@ -13,7 +13,15 @@ boundary_conditions = Dict(:all => BoundaryConditionDirichlet(initial_condition) # Solver with polydeg=4 to ensure free stream preservation (FSP) on non-conforming meshes. # The polydeg of the solver must be at least twice as big as the polydeg of the mesh. # See https://doi.org/10.1007/s10915-018-00897-9, Section 6. -solver = DGSEM(polydeg = 4, surface_flux = flux_lax_friedrichs, + +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 4, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive), volume_integral = VolumeIntegralWeakForm()) # Mapping as described in https://arxiv.org/abs/2012.12040 but with less warping. diff --git a/examples/p4est_3d_dgsem/elixir_euler_free_stream_boundaries.jl b/examples/p4est_3d_dgsem/elixir_euler_free_stream_boundaries.jl index 71f729837dc..f748448abca 100644 --- a/examples/p4est_3d_dgsem/elixir_euler_free_stream_boundaries.jl +++ b/examples/p4est_3d_dgsem/elixir_euler_free_stream_boundaries.jl @@ -9,7 +9,14 @@ equations = CompressibleEulerEquations3D(1.4) initial_condition = initial_condition_constant polydeg = 3 -solver = DGSEM(polydeg = polydeg, surface_flux = flux_lax_friedrichs) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = polydeg, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive)) ############################################################################### # Get the uncurved mesh from a file (downloads the file if not available locally) diff --git a/examples/p4est_3d_dgsem/elixir_euler_free_stream_boundaries_float32.jl b/examples/p4est_3d_dgsem/elixir_euler_free_stream_boundaries_float32.jl index 3779eb379b1..a311223d6fb 100644 --- a/examples/p4est_3d_dgsem/elixir_euler_free_stream_boundaries_float32.jl +++ b/examples/p4est_3d_dgsem/elixir_euler_free_stream_boundaries_float32.jl @@ -12,7 +12,15 @@ equations = CompressibleEulerEquations3D(1.4f0) initial_condition = initial_condition_constant polydeg = 3 -solver = DGSEM(polydeg = polydeg, surface_flux = flux_lax_friedrichs, RealT = Float32) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = polydeg, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive), + RealT = Float32) ############################################################################### # Get the uncurved mesh from a file (downloads the file if not available locally) diff --git a/examples/p4est_3d_dgsem/elixir_euler_free_stream_extruded.jl b/examples/p4est_3d_dgsem/elixir_euler_free_stream_extruded.jl index d2208a7844c..78e21ca6576 100644 --- a/examples/p4est_3d_dgsem/elixir_euler_free_stream_extruded.jl +++ b/examples/p4est_3d_dgsem/elixir_euler_free_stream_extruded.jl @@ -10,7 +10,14 @@ initial_condition = initial_condition_constant boundary_conditions = Dict(:all => BoundaryConditionDirichlet(initial_condition)) -solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs, +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 3, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive), volume_integral = VolumeIntegralWeakForm()) # Mapping as described in https://arxiv.org/abs/2012.12040 but reduced to 2D. diff --git a/examples/p4est_3d_dgsem/elixir_euler_sedov.jl b/examples/p4est_3d_dgsem/elixir_euler_sedov.jl index 24e36ffd551..5d955fdc4a0 100644 --- a/examples/p4est_3d_dgsem/elixir_euler_sedov.jl +++ b/examples/p4est_3d_dgsem/elixir_euler_sedov.jl @@ -40,7 +40,14 @@ end initial_condition = initial_condition_medium_sedov_blast_wave -surface_flux = flux_lax_friedrichs +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) volume_flux = flux_ranocha polydeg = 5 basis = LobattoLegendreBasis(polydeg) diff --git a/examples/p4est_3d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_curved.jl b/examples/p4est_3d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_curved.jl index 3e65337d264..a16e8c9a775 100644 --- a/examples/p4est_3d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_curved.jl +++ b/examples/p4est_3d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_curved.jl @@ -14,7 +14,15 @@ boundary_conditions = Dict(:all => boundary_condition) # Solver with polydeg=4 to ensure free stream preservation (FSP) on non-conforming meshes. # The polydeg of the solver must be at least twice as big as the polydeg of the mesh. # See https://doi.org/10.1007/s10915-018-00897-9, Section 6. -solver = DGSEM(polydeg = 4, surface_flux = flux_lax_friedrichs, + +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 4, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive), volume_integral = VolumeIntegralWeakForm()) # Mapping as described in https://arxiv.org/abs/2012.12040 but with less warping. diff --git a/examples/p4est_3d_dgsem/elixir_euler_source_terms_nonperiodic.jl b/examples/p4est_3d_dgsem/elixir_euler_source_terms_nonperiodic.jl index 0d024e4ec04..6d57205a169 100644 --- a/examples/p4est_3d_dgsem/elixir_euler_source_terms_nonperiodic.jl +++ b/examples/p4est_3d_dgsem/elixir_euler_source_terms_nonperiodic.jl @@ -16,7 +16,14 @@ boundary_conditions = Dict(:x_neg => boundary_condition, :z_neg => boundary_condition, :z_pos => boundary_condition) -solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs, +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 3, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive), volume_integral = VolumeIntegralWeakForm()) coordinates_min = (0.0, 0.0, 0.0) diff --git a/examples/p4est_3d_dgsem/elixir_euler_source_terms_nonperiodic_hohqmesh.jl b/examples/p4est_3d_dgsem/elixir_euler_source_terms_nonperiodic_hohqmesh.jl index e0860887aeb..41053510ed5 100644 --- a/examples/p4est_3d_dgsem/elixir_euler_source_terms_nonperiodic_hohqmesh.jl +++ b/examples/p4est_3d_dgsem/elixir_euler_source_terms_nonperiodic_hohqmesh.jl @@ -14,7 +14,14 @@ boundary_conditions = Dict(:Bottom => boundary_condition, :Circle => boundary_condition, :Cut => boundary_condition) -solver = DGSEM(polydeg = 4, surface_flux = flux_lax_friedrichs) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 4, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive)) # Unstructured 3D half circle mesh from HOHQMesh mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/11461efbfb02c42e06aca338b3d0b645/raw/81deeb1ebc4945952c30af5bb75fe222a18d975c/abaqus_half_circle_3d.inp", diff --git a/examples/p4est_3d_dgsem/elixir_euler_weak_blast_wave_amr.jl b/examples/p4est_3d_dgsem/elixir_euler_weak_blast_wave_amr.jl index 7f9d07486fd..20a08b383c3 100644 --- a/examples/p4est_3d_dgsem/elixir_euler_weak_blast_wave_amr.jl +++ b/examples/p4est_3d_dgsem/elixir_euler_weak_blast_wave_amr.jl @@ -32,7 +32,14 @@ end initial_condition = initial_condition_weak_blast_wave -surface_flux = flux_lax_friedrichs +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) volume_flux = flux_ranocha polydeg = 4 basis = LobattoLegendreBasis(polydeg) @@ -95,7 +102,7 @@ amr_controller = ControllerThreeLevelCombined(semi, amr_indicator, indicator_sc, amr_callback = AMRCallback(semi, amr_controller, interval = 1, - adapt_initial_condition = false, + adapt_initial_condition = true, adapt_initial_condition_only_refine = false) stepsize_callback = StepsizeCallback(cfl = 0.5) diff --git a/examples/p4est_3d_dgsem/elixir_mhd_alfven_wave_er.jl b/examples/p4est_3d_dgsem/elixir_mhd_alfven_wave_er.jl new file mode 100644 index 00000000000..96b53bf98dd --- /dev/null +++ b/examples/p4est_3d_dgsem/elixir_mhd_alfven_wave_er.jl @@ -0,0 +1,62 @@ +using Trixi + +############################################################################### +# semidiscretization of the compressible ideal GLM-MHD equations + +equations = IdealGlmMhdEquations3D(5 / 3) + +# Volume flux stabilizes the simulation - in contrast to standard DGSEM with +# `surface_flux = (flux_hindenlang_gassner, flux_nonconservative_powell)` only which crashes. +# To turn this into a convergence test, use a flux with some dissipation, e.g. +# `flux_lax_friedrichs` or `flux_hll`. +flux = (flux_hindenlang_gassner, flux_nonconservative_powell) +solver = DGSEM(polydeg = 3, surface_flux = flux, + volume_integral = VolumeIntegralFluxDifferencing(flux)) + +coordinates_min = (-1.0, -1.0, -1.0) +coordinates_max = (1.0, 1.0, 1.0) + +trees_per_dimension = (2, 2, 2) +mesh = P4estMesh(trees_per_dimension, + polydeg = 1, initial_refinement_level = 1, + coordinates_min = coordinates_min, coordinates_max = coordinates_max) + +initial_condition = initial_condition_convergence_test +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 1.0) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_callback = AnalysisCallback(semi, interval = 10, + analysis_errors = Symbol[], # Switch off error computation + # Note: `entropy` defaults to mathematical entropy + analysis_integrals = (entropy,), + analysis_filename = "entropy_ER.dat", + save_analysis = true) + +cfl = 1.0 +stepsize_callback = StepsizeCallback(cfl = cfl) + +glm_speed_callback = GlmSpeedCallback(glm_scale = 0.5, cfl = cfl) + +callbacks = CallbackSet(summary_callback, + analysis_callback, + stepsize_callback, + glm_speed_callback) + +############################################################################### +# run the simulation + +# Ensure exact entropy conservation by employing a relaxation Runge-Kutta method +relaxation_solver = Trixi.RelaxationSolverNewton(max_iterations = 5, + root_tol = eps(Float64), + gamma_tol = eps(Float64)) +ode_alg = Trixi.RelaxationCKL54(relaxation_solver = relaxation_solver) + +sol = Trixi.solve(ode, ode_alg; + dt = 42.0, save_everystep = false, callback = callbacks); diff --git a/examples/p4est_3d_dgsem/elixir_mhd_amr_entropy_bounded.jl b/examples/p4est_3d_dgsem/elixir_mhd_amr_entropy_bounded.jl index 9af1340b16d..982d8cc581a 100644 --- a/examples/p4est_3d_dgsem/elixir_mhd_amr_entropy_bounded.jl +++ b/examples/p4est_3d_dgsem/elixir_mhd_amr_entropy_bounded.jl @@ -35,7 +35,14 @@ function initial_condition_blast_wave(x, t, equations::IdealGlmMhdEquations3D) end initial_condition = initial_condition_blast_wave -surface_flux = (flux_lax_friedrichs, flux_nonconservative_powell) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = (FluxLaxFriedrichs(max_abs_speed_naive), flux_nonconservative_powell) volume_flux = (flux_hindenlang_gassner, flux_nonconservative_powell) polydeg = 3 volume_integral = VolumeIntegralFluxDifferencing(volume_flux) diff --git a/examples/p4est_3d_dgsem/elixir_mhd_shockcapturing_amr.jl b/examples/p4est_3d_dgsem/elixir_mhd_shockcapturing_amr.jl index 4ccd49130d3..f782ec6f142 100644 --- a/examples/p4est_3d_dgsem/elixir_mhd_shockcapturing_amr.jl +++ b/examples/p4est_3d_dgsem/elixir_mhd_shockcapturing_amr.jl @@ -35,7 +35,14 @@ function initial_condition_blast_wave(x, t, equations::IdealGlmMhdEquations3D) end initial_condition = initial_condition_blast_wave -surface_flux = (flux_lax_friedrichs, flux_nonconservative_powell) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = (FluxLaxFriedrichs(max_abs_speed_naive), flux_nonconservative_powell) volume_flux = (flux_hindenlang_gassner, flux_nonconservative_powell) polydeg = 3 basis = LobattoLegendreBasis(polydeg) diff --git a/examples/p4est_3d_dgsem/elixir_mhdmultiion_convergence.jl b/examples/p4est_3d_dgsem/elixir_mhdmultiion_convergence.jl new file mode 100644 index 00000000000..0e008810c7b --- /dev/null +++ b/examples/p4est_3d_dgsem/elixir_mhdmultiion_convergence.jl @@ -0,0 +1,209 @@ + +using OrdinaryDiffEqLowStorageRK +using Trixi + +# 3D version of multi-ion MHD convergence test from +# - Rueda-Ramírez, A. M., Sikstel, A., & Gassner, G. J. (2025). An entropy-stable discontinuous Galerkin +# discretization of the ideal multi-ion magnetohydrodynamics system. Journal of Computational Physics, 523, 113655. + +############################################################################### +""" + electron_pressure_alpha(u, equations::IdealGlmMhdMultiIonEquations3D) +Returns a fraction (alpha) of the total ion pressure for the electron pressure. +""" +function electron_pressure_alpha(u, equations::IdealGlmMhdMultiIonEquations3D) + alpha = 0.2 + prim = cons2prim(u, equations) + p_e = zero(u[1]) + for k in eachcomponent(equations) + _, _, _, _, p_k = Trixi.get_component(k, prim, equations) + p_e += p_k + end + return alpha * p_e +end +# semidiscretization of the ideal multi-ion MHD equations +equations = IdealGlmMhdMultiIonEquations3D(gammas = (2.0, 4.0), + charge_to_mass = (2.0, 1.0), + electron_pressure = electron_pressure_alpha) + +""" +Initial (and exact) solution for the the manufactured solution test. Runs with +* gammas = (2.0, 4.0), +* charge_to_mass = (2.0, 1.0) +* Domain size: [-1,1]² +""" +function initial_condition_manufactured_solution(x, t, + equations::IdealGlmMhdMultiIonEquations3D) + am = 0.1 + om = π + h = am * sin(om * (x[1] + x[2] + x[3] - t)) + 2 + hh1 = am * 0.4 * sin(om * (x[1] + x[2] + x[3] - t)) + 1 + hh2 = h - hh1 + + rho_1 = hh1 + rhou_1 = hh1 + rhov_1 = hh1 + rhow_1 = 0.1 * hh1 + rhoe_1 = 2 * hh1^2 + hh1 + rho_2 = hh2 + rhou_2 = hh2 + rhov_2 = hh2 + rhow_2 = 0.1 * hh2 + rhoe_2 = 2 * hh2^2 + hh2 + B1 = 0.5 * h + B2 = -0.25 * h + B3 = -0.25 * h + + return SVector{nvariables(equations), real(equations)}([ + B1, + B2, + B3, + rho_1, + rhou_1, + rhov_1, + rhow_1, + rhoe_1, + rho_2, + rhou_2, + rhov_2, + rhow_2, + rhoe_2, + zero(eltype(x)) + ]) +end + +""" +Source term that corresponds to the manufactured solution test. Runs with +* gammas = (2.0, 4.0), +* charge_to_mass = (2.0, 1.0) +* Domain size: [-1,1]² +""" +function source_terms_manufactured_solution_pe(u, x, t, + equations::IdealGlmMhdMultiIonEquations3D) + am = 0.1 + om = pi + h1 = am * sin(om * (x[1] + x[2] + x[3] - t)) + hx = am * om * cos(om * (x[1] + x[2] + x[3] - t)) + + s1 = (11 * hx) / 25 + s2 = (30615 * hx * h1^2 + 156461 * hx * h1 + 191990 * hx) / (35000 * h1 + 75000) + s3 = (30615 * hx * h1^2 + 156461 * hx * h1 + 191990 * hx) / (35000 * h1 + 75000) + s4 = (30615 * hx * h1^2 + 142601 * hx * h1 + 162290 * hx) / (35000 * h1 + 75000) + s5 = (4735167957644739545 * hx * h1^2 + 22683915240114795103 * hx * h1 + + 26562869799135852870 * hx) / (1863579030125050000 * h1 + 3993383635982250000) + s6 = (33 * hx) / 50 + s7 = (63915 * hx * h1^2 + 245476 * hx * h1 + 233885 * hx) / (17500 * h1 + 37500) + s8 = (63915 * hx * h1^2 + 245476 * hx * h1 + 233885 * hx) / (17500 * h1 + 37500) + s9 = (63915 * hx * h1^2 + 235081 * hx * h1 + 211610 * hx) / (17500 * h1 + 37500) + s10 = (1619415 * hx * h1^2 + 6083946 * hx * h1 + 5629335 * hx) / (175000 * h1 + 375000) + s11 = (11 * hx) / 20 + s12 = -((11 * hx) / 40) + s13 = -((11 * hx) / 40) + + s = SVector{nvariables(equations), real(equations)}([ + s11, + s12, + s13, + s1, + s2, + s3, + s4, + s5, + s6, + s7, + s8, + s9, + s10, + zero(eltype(u)) + ]) + S_std = source_terms_lorentz(u, x, t, equations::IdealGlmMhdMultiIonEquations3D) + + return SVector{nvariables(equations), real(equations)}(S_std .+ s) +end + +initial_condition = initial_condition_manufactured_solution +source_terms = source_terms_manufactured_solution_pe + +volume_flux = (flux_ruedaramirez_etal, flux_nonconservative_ruedaramirez_etal) +surface_flux = (flux_lax_friedrichs, flux_nonconservative_central) + +polydeg = 3 +solver = DGSEM(polydeg = polydeg, surface_flux = surface_flux, + volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) + +coordinates_min = (-1.0, -1.0, -1.0) +coordinates_max = (1.0, 1.0, 1.0) +# Mapping as described in https://arxiv.org/abs/2012.12040 +function mapping(xi_, eta_, zeta_) + # Transform input variables between -1 and 1 onto [0,3] + xi = 1.5 * xi_ + 1.5 + eta = 1.5 * eta_ + 1.5 + zeta = 1.5 * zeta_ + 1.5 + + y = eta + + 0.1 * (cos(1.5 * pi * (2 * xi - 3) / 3) * + cos(0.5 * pi * (2 * eta - 3) / 3) * + cos(0.5 * pi * (2 * zeta - 3) / 3)) + + x = xi + + 0.1 * (cos(0.5 * pi * (2 * xi - 3) / 3) * + cos(2 * pi * (2 * y - 3) / 3) * + cos(0.5 * pi * (2 * zeta - 3) / 3)) + + z = zeta + + 0.1 * (cos(0.5 * pi * (2 * x - 3) / 3) * + cos(pi * (2 * y - 3) / 3) * + cos(0.5 * pi * (2 * zeta - 3) / 3)) + + # Go back to [-1,1]^3 + x = x * 2 / 3 - 1 + y = y * 2 / 3 - 1 + z = z * 2 / 3 - 1 + + return SVector(x, y, z) +end + +cells_per_dimension = (8, 8, 8) +mesh = P4estMesh(cells_per_dimension, + polydeg = polydeg, #initial_refinement_level = 0, + mapping = mapping, + #coordinates_min = coordinates_min, coordinates_max = coordinates_max, + periodicity = true) + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + source_terms = source_terms) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 1.0) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 100 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +save_solution = SaveSolutionCallback(interval = 100, + save_initial_solution = true, + save_final_solution = true, + solution_variables = cons2prim, + output_directory = joinpath(@__DIR__, "out")) + +cfl = 0.5 +stepsize_callback = StepsizeCallback(cfl = cfl) + +glm_speed_callback = GlmSpeedCallback(glm_scale = 0.5, cfl = cfl) + +callbacks = CallbackSet(summary_callback, + analysis_callback, alive_callback, + save_solution, + stepsize_callback, + glm_speed_callback) + +############################################################################### +# run the simulation +sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), + dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep = false, callback = callbacks); diff --git a/examples/p4est_3d_dgsem/elixir_navierstokes_blast_wave_amr.jl b/examples/p4est_3d_dgsem/elixir_navierstokes_blast_wave_amr.jl index c76f39ec846..60773aeb865 100644 --- a/examples/p4est_3d_dgsem/elixir_navierstokes_blast_wave_amr.jl +++ b/examples/p4est_3d_dgsem/elixir_navierstokes_blast_wave_amr.jl @@ -40,7 +40,14 @@ function initial_condition_3d_blast_wave(x, t, equations::CompressibleEulerEquat end initial_condition = initial_condition_3d_blast_wave -surface_flux = flux_lax_friedrichs +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) volume_flux = flux_ranocha polydeg = 3 basis = LobattoLegendreBasis(polydeg) diff --git a/examples/p4est_3d_dgsem/elixir_navierstokes_convergence.jl b/examples/p4est_3d_dgsem/elixir_navierstokes_convergence.jl index a781793fdda..4e29fecc17a 100644 --- a/examples/p4est_3d_dgsem/elixir_navierstokes_convergence.jl +++ b/examples/p4est_3d_dgsem/elixir_navierstokes_convergence.jl @@ -13,7 +13,15 @@ equations_parabolic = CompressibleNavierStokesDiffusion3D(equations, mu = mu(), gradient_variables = GradientVariablesPrimitive()) # Create DG solver with polynomial degree = 3 and (local) Lax-Friedrichs/Rusanov flux as surface flux -solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs, + +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 3, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive), volume_integral = VolumeIntegralWeakForm()) coordinates_min = (-1.0, -1.0, -1.0) # minimum coordinates (min(x), min(y), min(z)) diff --git a/examples/p4est_3d_dgsem/elixir_navierstokes_crm.jl b/examples/p4est_3d_dgsem/elixir_navierstokes_crm.jl index 359ed7b7f03..6e4caddf3c4 100644 --- a/examples/p4est_3d_dgsem/elixir_navierstokes_crm.jl +++ b/examples/p4est_3d_dgsem/elixir_navierstokes_crm.jl @@ -83,9 +83,9 @@ boundary_conditions_hyp = Dict(:SYMMETRY => boundary_condition_slip_wall, # slip :WING_UP => boundary_condition_slip_wall, :WING_LO => boundary_condition_slip_wall) -velocity_bc_airfoil = NoSlip((x, t, equations) -> SVector(0.0, 0.0, 0.0)) +velocity_bc_plane = NoSlip((x, t, equations) -> SVector(0.0, 0.0, 0.0)) heat_bc = Adiabatic((x, t, equations) -> 0.0) -bc_body = BoundaryConditionNavierStokesWall(velocity_bc_airfoil, heat_bc) +bc_body = BoundaryConditionNavierStokesWall(velocity_bc_plane, heat_bc) # The "Slip" boundary condition rotates all velocities into tangential direction # and thus acts as a symmetry plane. diff --git a/examples/p4est_3d_dgsem/elixir_navierstokes_taylor_green_vortex_amr.jl b/examples/p4est_3d_dgsem/elixir_navierstokes_taylor_green_vortex_amr.jl index de1378eed6e..a5daea7e143 100644 --- a/examples/p4est_3d_dgsem/elixir_navierstokes_taylor_green_vortex_amr.jl +++ b/examples/p4est_3d_dgsem/elixir_navierstokes_taylor_green_vortex_amr.jl @@ -41,7 +41,14 @@ initial_condition = initial_condition_taylor_green_vortex end volume_flux = flux_ranocha -solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs, +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 3, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive), volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) coordinates_min = (-1.0, -1.0, -1.0) .* pi diff --git a/examples/p4est_3d_dgsem/elixir_navierstokes_viscous_shock.jl b/examples/p4est_3d_dgsem/elixir_navierstokes_viscous_shock.jl index 5275c850ada..58a577a11dd 100644 --- a/examples/p4est_3d_dgsem/elixir_navierstokes_viscous_shock.jl +++ b/examples/p4est_3d_dgsem/elixir_navierstokes_viscous_shock.jl @@ -113,9 +113,7 @@ function boundary_condition_inflow(u_inner, normal_direction::AbstractVector, x, surface_flux_function, equations::CompressibleEulerEquations3D) u_cons = initial_condition_viscous_shock(x, t, equations) - flux = Trixi.flux(u_cons, normal_direction, equations) - - return flux + return flux(u_cons, normal_direction, equations) end # Completely free outflow @@ -123,9 +121,7 @@ function boundary_condition_outflow(u_inner, normal_direction::AbstractVector, x surface_flux_function, equations::CompressibleEulerEquations3D) # Calculate the boundary flux entirely from the internal solution state - flux = Trixi.flux(u_inner, normal_direction, equations) - - return flux + return flux(u_inner, normal_direction, equations) end boundary_conditions = Dict(:x_neg => boundary_condition_inflow, diff --git a/examples/p4est_3d_dgsem/elixir_navierstokes_viscous_shock_dirichlet_bc.jl b/examples/p4est_3d_dgsem/elixir_navierstokes_viscous_shock_dirichlet_bc.jl index 5d1d8376997..fdecf80ba93 100644 --- a/examples/p4est_3d_dgsem/elixir_navierstokes_viscous_shock_dirichlet_bc.jl +++ b/examples/p4est_3d_dgsem/elixir_navierstokes_viscous_shock_dirichlet_bc.jl @@ -109,9 +109,7 @@ function boundary_condition_inflow(u_inner, normal_direction::AbstractVector, x, surface_flux_function, equations::CompressibleEulerEquations3D) u_cons = initial_condition_viscous_shock(x, t, equations) - flux = Trixi.flux(u_cons, normal_direction, equations) - - return flux + return flux(u_cons, normal_direction, equations) end # Completely free outflow @@ -119,9 +117,7 @@ function boundary_condition_outflow(u_inner, normal_direction::AbstractVector, x surface_flux_function, equations::CompressibleEulerEquations3D) # Calculate the boundary flux entirely from the internal solution state - flux = Trixi.flux(u_inner, normal_direction, equations) - - return flux + return flux(u_inner, normal_direction, equations) end boundary_conditions = Dict(:x_neg => boundary_condition_inflow, diff --git a/examples/paper_self_gravitating_gas_dynamics/elixir_eulergravity_convergence.jl b/examples/paper_self_gravitating_gas_dynamics/elixir_eulergravity_convergence.jl index e1d8b374b69..d8a9b29f514 100644 --- a/examples/paper_self_gravitating_gas_dynamics/elixir_eulergravity_convergence.jl +++ b/examples/paper_self_gravitating_gas_dynamics/elixir_eulergravity_convergence.jl @@ -5,8 +5,8 @@ initial_condition = initial_condition_eoc_test_coupled_euler_gravity ############################################################################### # semidiscretization of the compressible Euler equations -gamma = 2.0 -equations_euler = CompressibleEulerEquations2D(gamma) +gamma() = 2.0 +equations_euler = CompressibleEulerEquations2D(gamma()) polydeg = 3 solver_euler = DGSEM(polydeg, FluxHLL(min_max_speed_naive)) @@ -25,7 +25,14 @@ semi_euler = SemidiscretizationHyperbolic(mesh, equations_euler, initial_conditi # semidiscretization of the hyperbolic diffusion equations equations_gravity = HyperbolicDiffusionEquations2D() -solver_gravity = DGSEM(polydeg, flux_lax_friedrichs) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver_gravity = DGSEM(polydeg, FluxLaxFriedrichs(max_abs_speed_naive)) semi_gravity = SemidiscretizationHyperbolic(mesh, equations_gravity, initial_condition, solver_gravity, diff --git a/examples/paper_self_gravitating_gas_dynamics/elixir_eulergravity_jeans_instability.jl b/examples/paper_self_gravitating_gas_dynamics/elixir_eulergravity_jeans_instability.jl index 73a772cf3ca..a45d208ed2f 100644 --- a/examples/paper_self_gravitating_gas_dynamics/elixir_eulergravity_jeans_instability.jl +++ b/examples/paper_self_gravitating_gas_dynamics/elixir_eulergravity_jeans_instability.jl @@ -80,7 +80,14 @@ semi_euler = SemidiscretizationHyperbolic(mesh, equations_euler, initial_conditi # semidiscretization of the hyperbolic diffusion equations equations_gravity = HyperbolicDiffusionEquations2D() -solver_gravity = DGSEM(polydeg, flux_lax_friedrichs) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver_gravity = DGSEM(polydeg, FluxLaxFriedrichs(max_abs_speed_naive)) semi_gravity = SemidiscretizationHyperbolic(mesh, equations_gravity, initial_condition, solver_gravity, diff --git a/examples/paper_self_gravitating_gas_dynamics/elixir_eulergravity_sedov_blast_wave.jl b/examples/paper_self_gravitating_gas_dynamics/elixir_eulergravity_sedov_blast_wave.jl index c476428b3ca..6f424341751 100644 --- a/examples/paper_self_gravitating_gas_dynamics/elixir_eulergravity_sedov_blast_wave.jl +++ b/examples/paper_self_gravitating_gas_dynamics/elixir_eulergravity_sedov_blast_wave.jl @@ -161,7 +161,14 @@ function boundary_condition_sedov_self_gravity(u_inner, orientation, direction, return flux end -solver_gravity = DGSEM(polydeg, flux_lax_friedrichs) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver_gravity = DGSEM(polydeg, FluxLaxFriedrichs(max_abs_speed_naive)) semi_gravity = SemidiscretizationHyperbolic(mesh, equations_gravity, initial_condition, solver_gravity, diff --git a/examples/special_elixirs/elixir_euler_ad.jl b/examples/special_elixirs/elixir_euler_ad.jl index 574eba6364c..b0740335303 100644 --- a/examples/special_elixirs/elixir_euler_ad.jl +++ b/examples/special_elixirs/elixir_euler_ad.jl @@ -7,7 +7,14 @@ equations = CompressibleEulerEquations2D(1.4) mesh = TreeMesh((-1.0, -1.0), (1.0, 1.0), initial_refinement_level = 2, n_cells_max = 10^5) -solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs, +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 3, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive), volume_integral = VolumeIntegralFluxDifferencing(flux_ranocha)) """ diff --git a/examples/structured_1d_dgsem/elixir_euler_sedov.jl b/examples/structured_1d_dgsem/elixir_euler_sedov.jl index 73deae03d11..6afc0b255cc 100644 --- a/examples/structured_1d_dgsem/elixir_euler_sedov.jl +++ b/examples/structured_1d_dgsem/elixir_euler_sedov.jl @@ -35,7 +35,14 @@ end initial_condition = initial_condition_sedov_blast_wave -surface_flux = flux_lax_friedrichs +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) volume_flux = flux_ranocha polydeg = 3 basis = LobattoLegendreBasis(polydeg) diff --git a/examples/structured_1d_dgsem/elixir_euler_source_terms.jl b/examples/structured_1d_dgsem/elixir_euler_source_terms.jl index a169df46ad4..07a089245bf 100644 --- a/examples/structured_1d_dgsem/elixir_euler_source_terms.jl +++ b/examples/structured_1d_dgsem/elixir_euler_source_terms.jl @@ -12,8 +12,16 @@ equations = CompressibleEulerEquations1D(1.4) initial_condition = initial_condition_convergence_test # Note that the expected EOC of 5 is not reached with this flux. -# Using flux_hll instead yields the expected EOC. -solver = DGSEM(polydeg = 4, surface_flux = flux_lax_friedrichs) +# Using `flux_hll` instead yields the expected EOC. + +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 4, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive)) coordinates_min = (0.0,) coordinates_max = (2.0,) diff --git a/examples/structured_1d_dgsem/elixir_euler_source_terms_nonperiodic.jl b/examples/structured_1d_dgsem/elixir_euler_source_terms_nonperiodic.jl index 3bf950331e7..9284b79539e 100644 --- a/examples/structured_1d_dgsem/elixir_euler_source_terms_nonperiodic.jl +++ b/examples/structured_1d_dgsem/elixir_euler_source_terms_nonperiodic.jl @@ -17,7 +17,14 @@ boundary_condition = BoundaryConditionDirichlet(initial_condition) boundary_conditions = (x_neg = boundary_condition, x_pos = boundary_condition) -solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 3, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive)) f1() = SVector(0.0) f2() = SVector(2.0) diff --git a/examples/structured_1d_dgsem/elixir_euler_weak_blast_er.jl b/examples/structured_1d_dgsem/elixir_euler_weak_blast_er.jl new file mode 100644 index 00000000000..224ea34b2ab --- /dev/null +++ b/examples/structured_1d_dgsem/elixir_euler_weak_blast_er.jl @@ -0,0 +1,53 @@ +using Trixi + +############################################################################### +# semidiscretization of the compressible Euler equations + +equations = CompressibleEulerEquations1D(1.4) + +# Volume flux stabilizes the simulation - in contrast to standard DGSEM with +# `surface_flux = flux_ranocha` only which crashes. +solver = DGSEM(polydeg = 3, surface_flux = flux_ranocha, + volume_integral = VolumeIntegralFluxDifferencing(flux_ranocha)) + +coordinates_min = -2.0 +coordinates_max = 2.0 +cells_per_dimension = 32 +mesh = StructuredMesh(cells_per_dimension, coordinates_min, coordinates_max) + +initial_condition = initial_condition_weak_blast_wave +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 1.0) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 1 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval, + analysis_errors = Symbol[], # Switch off error computation + # Note: `entropy` defaults to mathematical entropy + analysis_integrals = (entropy,), + analysis_filename = "entropy_ER.dat", + save_analysis = true) + +stepsize_callback = StepsizeCallback(cfl = 0.25) + +callbacks = CallbackSet(summary_callback, + analysis_callback, + stepsize_callback) + +############################################################################### +# run the simulation + +# Ensure exact entropy conservation by employing a relaxation Runge-Kutta method +relaxation_solver = Trixi.RelaxationSolverNewton(max_iterations = 5, + root_tol = eps(Float64), + gamma_tol = eps(Float64)) +ode_alg = Trixi.RelaxationRK44(relaxation_solver = relaxation_solver) + +sol = Trixi.solve(ode, ode_alg, + dt = 42.0, save_everystep = false, callback = callbacks); diff --git a/examples/structured_1d_dgsem/elixir_traffic_flow_lwr_greenlight.jl b/examples/structured_1d_dgsem/elixir_traffic_flow_lwr_greenlight.jl index 48c0f83640b..ccdba5e265f 100644 --- a/examples/structured_1d_dgsem/elixir_traffic_flow_lwr_greenlight.jl +++ b/examples/structured_1d_dgsem/elixir_traffic_flow_lwr_greenlight.jl @@ -39,9 +39,7 @@ function boundary_condition_outflow(u_inner, orientation, normal_direction, x, t surface_flux_function, equations::TrafficFlowLWREquations1D) # Calculate the boundary flux entirely from the internal solution state - flux = Trixi.flux(u_inner, orientation, equations) - - return flux + flux(u_inner, orientation, equations) end boundary_conditions = (x_neg = boundary_condition_inflow, diff --git a/examples/structured_2d_dgsem/elixir_euler_convergence_implicit_sparse_jacobian.jl b/examples/structured_2d_dgsem/elixir_euler_convergence_implicit_sparse_jacobian.jl new file mode 100644 index 00000000000..909d9b3e6ec --- /dev/null +++ b/examples/structured_2d_dgsem/elixir_euler_convergence_implicit_sparse_jacobian.jl @@ -0,0 +1,107 @@ +using Trixi +using SparseConnectivityTracer # For obtaining the Jacobian sparsity pattern +using SparseMatrixColorings # For obtaining the coloring vector +using OrdinaryDiffEqSDIRK, ADTypes + +############################################################################### +### solver and equations ### + +# For sparsity detection we can only use `flux_lax_friedrichs` at the moment since this is +# `if`-clause free (although it contains `min` and `max` operations). +# The sparsity pattern, however, should be the same for other (two-point) fluxes as well. +surface_flux = flux_lax_friedrichs +solver = DGSEM(polydeg = 3, surface_flux = surface_flux) + +equations = CompressibleEulerEquations2D(1.4) + +############################################################################### +### mesh ### + +# Mapping as described in https://arxiv.org/abs/2012.12040, +# reduced to 2D on [0, 2]^2 instead of [0, 3]^3 +function mapping(xi_, eta_) + # Transform input variables between -1 and 1 onto [0,2] + xi = xi_ + 1 + eta = eta_ + 1 + + y = eta + 1 / 4 * (cos(pi * (xi - 1)) * + cos(0.5 * pi * (eta - 1))) + + x = xi + 1 / 4 * (cos(0.5 * pi * (xi - 1)) * + cos(2 * pi * (y - 1))) + + return SVector(x, y) +end +cells_per_dimension = (16, 16) +mesh = StructuredMesh(cells_per_dimension, mapping) + +############################################################################### +### semidiscretization for sparsity detection ### + +jac_detector = TracerSparsityDetector() +# We need to construct the semidiscretization with the correct +# sparsity-detection ready datatype, which is retrieved here +jac_eltype = jacobian_eltype(real(solver), jac_detector) + +# Semidiscretization for sparsity pattern detection +semi_jac_type = SemidiscretizationHyperbolic(mesh, equations, + initial_condition_convergence_test, + solver, + source_terms = source_terms_convergence_test, + uEltype = jac_eltype) # Need to supply Jacobian element type + +tspan = (0.0, 5.0) # Re-used for wrapping `rhs` below + +# Call `semidiscretize` to create the ODE problem to have access to the +# initial condition based on which the sparsity pattern is computed +ode_jac_type = semidiscretize(semi_jac_type, tspan) +u0_ode = ode_jac_type.u0 +du_ode = similar(u0_ode) + +############################################################################### +### Compute the Jacobian sparsity pattern ### + +# Wrap the `Trixi.rhs!` function to match the signature `f!(du, u)`, see +# https://adrianhill.de/SparseConnectivityTracer.jl/stable/user/api/#ADTypes.jacobian_sparsity +rhs_wrapped! = (du_ode, u0_ode) -> Trixi.rhs!(du_ode, u0_ode, semi_jac_type, tspan[1]) + +jac_prototype = jacobian_sparsity(rhs_wrapped!, du_ode, u0_ode, jac_detector) + +# For most efficient solving we also want the coloring vector + +coloring_prob = ColoringProblem(; structure = :nonsymmetric, partition = :column) +coloring_alg = GreedyColoringAlgorithm(; decompression = :direct) +coloring_result = coloring(jac_prototype, coloring_prob, coloring_alg) +coloring_vec = column_colors(coloring_result) + +############################################################################### +### sparsity-aware semidiscretization and ode ### + +# Semidiscretization for actual simulation +semi_float_type = SemidiscretizationHyperbolic(mesh, equations, + initial_condition_convergence_test, + solver, + source_terms = source_terms_convergence_test) + +# Supply Jacobian prototype and coloring vector to the semidiscretization +ode_jac_sparse = semidiscretize(semi_float_type, tspan, + jac_prototype = jac_prototype, + colorvec = coloring_vec) +# using "dense" `ode = semidiscretize(semi_float_type, tspan)` +# is essentially infeasible, even single step takes ages! + +############################################################################### +### callbacks & solve ### + +summary_callback = SummaryCallback() +analysis_callback = AnalysisCallback(semi_float_type, interval = 50) +alive_callback = AliveCallback(alive_interval = 3) + +# Note: No `stepsize_callback` due to implicit solver +callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback) + +sol = solve(ode_jac_sparse, + # Default `AutoForwardDiff()` is not yet working, see + # https://github.com/trixi-framework/Trixi.jl/issues/2369 + Kvaerno4(; autodiff = AutoFiniteDiff()); + dt = 0.05, save_everystep = false, callback = callbacks); diff --git a/examples/structured_2d_dgsem/elixir_euler_free_stream.jl b/examples/structured_2d_dgsem/elixir_euler_free_stream.jl index 2187104a4af..1ac5b09090a 100644 --- a/examples/structured_2d_dgsem/elixir_euler_free_stream.jl +++ b/examples/structured_2d_dgsem/elixir_euler_free_stream.jl @@ -8,7 +8,14 @@ equations = CompressibleEulerEquations2D(1.4) initial_condition = initial_condition_constant -solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 3, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive)) # Mapping as described in https://arxiv.org/abs/2012.12040, but reduced to 2D function mapping(xi_, eta_) diff --git a/examples/structured_2d_dgsem/elixir_euler_sedov.jl b/examples/structured_2d_dgsem/elixir_euler_sedov.jl index 64e96565d2e..7f91c6b4aa4 100644 --- a/examples/structured_2d_dgsem/elixir_euler_sedov.jl +++ b/examples/structured_2d_dgsem/elixir_euler_sedov.jl @@ -37,7 +37,15 @@ end initial_condition = initial_condition_sedov_blast_wave # Get the DG approximation space -surface_flux = flux_lax_friedrichs + +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) volume_flux = flux_ranocha polydeg = 4 basis = LobattoLegendreBasis(polydeg) diff --git a/examples/structured_2d_dgsem/elixir_euler_sedov_blast_wave_sc_subcell.jl b/examples/structured_2d_dgsem/elixir_euler_sedov_blast_wave_sc_subcell.jl index 0fc9d184c34..3064e8d7206 100644 --- a/examples/structured_2d_dgsem/elixir_euler_sedov_blast_wave_sc_subcell.jl +++ b/examples/structured_2d_dgsem/elixir_euler_sedov_blast_wave_sc_subcell.jl @@ -42,7 +42,14 @@ boundary_conditions = (x_neg = boundary_condition, y_neg = boundary_condition, y_pos = boundary_condition) -surface_flux = flux_lax_friedrichs +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) volume_flux = flux_ranocha polydeg = 3 basis = LobattoLegendreBasis(polydeg) diff --git a/examples/structured_2d_dgsem/elixir_euler_source_terms.jl b/examples/structured_2d_dgsem/elixir_euler_source_terms.jl index a7088bd7076..32e27c376e0 100644 --- a/examples/structured_2d_dgsem/elixir_euler_source_terms.jl +++ b/examples/structured_2d_dgsem/elixir_euler_source_terms.jl @@ -11,7 +11,14 @@ equations = CompressibleEulerEquations2D(1.4) initial_condition = initial_condition_convergence_test -solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 3, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive)) coordinates_min = (0.0, 0.0) coordinates_max = (2.0, 2.0) diff --git a/examples/structured_2d_dgsem/elixir_euler_source_terms_nonperiodic.jl b/examples/structured_2d_dgsem/elixir_euler_source_terms_nonperiodic.jl index 972e07fabdd..b573d3a0a21 100644 --- a/examples/structured_2d_dgsem/elixir_euler_source_terms_nonperiodic.jl +++ b/examples/structured_2d_dgsem/elixir_euler_source_terms_nonperiodic.jl @@ -16,7 +16,14 @@ boundary_conditions = (x_neg = boundary_condition, y_neg = boundary_condition, y_pos = boundary_condition) -solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 3, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive)) coordinates_min = (0.0, 0.0) coordinates_max = (2.0, 2.0) diff --git a/examples/structured_2d_dgsem/elixir_euler_source_terms_parallelogram.jl b/examples/structured_2d_dgsem/elixir_euler_source_terms_parallelogram.jl index 86011016c47..ed154dcec91 100644 --- a/examples/structured_2d_dgsem/elixir_euler_source_terms_parallelogram.jl +++ b/examples/structured_2d_dgsem/elixir_euler_source_terms_parallelogram.jl @@ -8,7 +8,14 @@ equations = CompressibleEulerEquations2D(1.4) initial_condition = initial_condition_convergence_test -solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 3, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive)) # Define faces for a parallelogram that looks like this # diff --git a/examples/structured_2d_dgsem/elixir_euler_source_terms_rotated.jl b/examples/structured_2d_dgsem/elixir_euler_source_terms_rotated.jl index a63179a1669..a777e2018a4 100644 --- a/examples/structured_2d_dgsem/elixir_euler_source_terms_rotated.jl +++ b/examples/structured_2d_dgsem/elixir_euler_source_terms_rotated.jl @@ -84,7 +84,14 @@ sin_ = initial_condition_source_terms.sin_alpha cos_ = initial_condition_source_terms.cos_alpha T = [cos_ -sin_; sin_ cos_] -solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 3, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive)) mapping(xi, eta) = T * SVector(xi, eta) diff --git a/examples/structured_2d_dgsem/elixir_euler_source_terms_waving_flag.jl b/examples/structured_2d_dgsem/elixir_euler_source_terms_waving_flag.jl index 412c44c5b4d..2ed65505eb7 100644 --- a/examples/structured_2d_dgsem/elixir_euler_source_terms_waving_flag.jl +++ b/examples/structured_2d_dgsem/elixir_euler_source_terms_waving_flag.jl @@ -8,7 +8,14 @@ equations = CompressibleEulerEquations2D(1.4) initial_condition = initial_condition_convergence_test -solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 3, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive)) # Deformed rectangle that looks like a waving flag, # lower and upper faces are sinus curves, left and right are vertical lines. diff --git a/examples/structured_2d_dgsem/elixir_eulerpolytropic_wave.jl b/examples/structured_2d_dgsem/elixir_eulerpolytropic_wave.jl index bc9a08290fe..b14e3b20500 100644 --- a/examples/structured_2d_dgsem/elixir_eulerpolytropic_wave.jl +++ b/examples/structured_2d_dgsem/elixir_eulerpolytropic_wave.jl @@ -4,9 +4,9 @@ using Trixi ############################################################################### # semidiscretization of the polytropic Euler equations -gamma = 2.0 # Adiabatic monatomic gas in 2d. -kappa = 0.5 # Scaling factor for the pressure. -equations = PolytropicEulerEquations2D(gamma, kappa) +gamma() = 2.0 # Adiabatic monatomic gas in 2d. +kappa() = 0.5 # Scaling factor for the pressure. +equations = PolytropicEulerEquations2D(gamma(), kappa()) # Linear pressure wave in the negative x-direction. function initial_condition_wave(x, t, equations::PolytropicEulerEquations2D) diff --git a/examples/structured_2d_dgsem/elixir_mhd_alfven_wave.jl b/examples/structured_2d_dgsem/elixir_mhd_alfven_wave.jl index 18986ae6843..6286f007bc5 100644 --- a/examples/structured_2d_dgsem/elixir_mhd_alfven_wave.jl +++ b/examples/structured_2d_dgsem/elixir_mhd_alfven_wave.jl @@ -10,9 +10,17 @@ equations = IdealGlmMhdEquations2D(gamma) initial_condition = initial_condition_convergence_test # Get the DG approximation space + +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = (FluxLaxFriedrichs(max_abs_speed_naive), flux_nonconservative_powell) volume_flux = (flux_central, flux_nonconservative_powell) -solver = DGSEM(polydeg = 3, - surface_flux = (flux_lax_friedrichs, flux_nonconservative_powell), +solver = DGSEM(polydeg = 3, surface_flux = surface_flux, volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) # Get the curved quad mesh from a mapping function diff --git a/examples/structured_2d_dgsem/elixir_mhd_coupled.jl b/examples/structured_2d_dgsem/elixir_mhd_coupled.jl index 060d30be99b..3465de1723b 100644 --- a/examples/structured_2d_dgsem/elixir_mhd_coupled.jl +++ b/examples/structured_2d_dgsem/elixir_mhd_coupled.jl @@ -31,9 +31,16 @@ equations = IdealGlmMhdEquations2D(gamma) cells_per_dimension = (32, 64) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = (FluxLaxFriedrichs(max_abs_speed_naive), flux_nonconservative_powell) volume_flux = (flux_hindenlang_gassner, flux_nonconservative_powell) -solver = DGSEM(polydeg = 3, - surface_flux = (flux_lax_friedrichs, flux_nonconservative_powell), +solver = DGSEM(polydeg = 3, surface_flux = surface_flux, volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) ########### diff --git a/examples/structured_2d_dgsem/elixir_mhd_onion.jl b/examples/structured_2d_dgsem/elixir_mhd_onion.jl index 0fcf657f1e5..065f60dc863 100644 --- a/examples/structured_2d_dgsem/elixir_mhd_onion.jl +++ b/examples/structured_2d_dgsem/elixir_mhd_onion.jl @@ -33,9 +33,16 @@ mesh = StructuredMesh(cells_per_dimension, coordinates_max, periodicity = (false, false)) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = (FluxLaxFriedrichs(max_abs_speed_naive), flux_nonconservative_powell) volume_flux = (flux_hindenlang_gassner, flux_nonconservative_powell) -solver = DGSEM(polydeg = 3, - surface_flux = (flux_lax_friedrichs, flux_nonconservative_powell), +solver = DGSEM(polydeg = 3, surface_flux = surface_flux, volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) boundary_conditions = (x_neg = BoundaryConditionDirichlet(initial_condition), diff --git a/examples/structured_2d_dgsem/elixir_mhd_orszag_tang_sc_subcell.jl b/examples/structured_2d_dgsem/elixir_mhd_orszag_tang_sc_subcell.jl index 2837eb2d4a0..0fbfc1e21ab 100644 --- a/examples/structured_2d_dgsem/elixir_mhd_orszag_tang_sc_subcell.jl +++ b/examples/structured_2d_dgsem/elixir_mhd_orszag_tang_sc_subcell.jl @@ -29,7 +29,15 @@ function initial_condition_orszag_tang(x, t, equations::IdealGlmMhdEquations2D) end initial_condition = initial_condition_orszag_tang -surface_flux = (flux_lax_friedrichs, flux_nonconservative_powell_local_symmetric) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = (FluxLaxFriedrichs(max_abs_speed_naive), + flux_nonconservative_powell_local_symmetric) volume_flux = (flux_central, flux_nonconservative_powell_local_symmetric) polydeg = 3 diff --git a/examples/structured_3d_dgsem/elixir_euler_free_stream.jl b/examples/structured_3d_dgsem/elixir_euler_free_stream.jl index d9766b1ea52..cfd415a2523 100644 --- a/examples/structured_3d_dgsem/elixir_euler_free_stream.jl +++ b/examples/structured_3d_dgsem/elixir_euler_free_stream.jl @@ -8,7 +8,14 @@ equations = CompressibleEulerEquations3D(1.4) initial_condition = initial_condition_constant -solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs, +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 3, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive), volume_integral = VolumeIntegralWeakForm()) # Mapping as described in https://arxiv.org/abs/2012.12040 diff --git a/examples/structured_3d_dgsem/elixir_euler_sedov.jl b/examples/structured_3d_dgsem/elixir_euler_sedov.jl index bc4ca8efa75..2cbd33dec9e 100644 --- a/examples/structured_3d_dgsem/elixir_euler_sedov.jl +++ b/examples/structured_3d_dgsem/elixir_euler_sedov.jl @@ -40,7 +40,14 @@ end initial_condition = initial_condition_medium_sedov_blast_wave -surface_flux = flux_lax_friedrichs +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) volume_flux = flux_ranocha polydeg = 3 basis = LobattoLegendreBasis(polydeg) diff --git a/examples/structured_3d_dgsem/elixir_euler_source_terms.jl b/examples/structured_3d_dgsem/elixir_euler_source_terms.jl index 2865046adff..f9e8dc5e3f9 100644 --- a/examples/structured_3d_dgsem/elixir_euler_source_terms.jl +++ b/examples/structured_3d_dgsem/elixir_euler_source_terms.jl @@ -11,7 +11,14 @@ equations = CompressibleEulerEquations3D(1.4) initial_condition = initial_condition_convergence_test -solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs, +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 3, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive), volume_integral = VolumeIntegralWeakForm()) # coordinates_min = (0.0, 0.0, 0.0) diff --git a/examples/structured_3d_dgsem/elixir_euler_source_terms_nonperiodic_curved.jl b/examples/structured_3d_dgsem/elixir_euler_source_terms_nonperiodic_curved.jl index 3bc00730451..b0c5234e5b1 100644 --- a/examples/structured_3d_dgsem/elixir_euler_source_terms_nonperiodic_curved.jl +++ b/examples/structured_3d_dgsem/elixir_euler_source_terms_nonperiodic_curved.jl @@ -18,7 +18,14 @@ boundary_conditions = (x_neg = boundary_condition, z_neg = boundary_condition, z_pos = boundary_condition) -solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs, +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 3, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive), volume_integral = VolumeIntegralWeakForm()) # Mapping as described in https://arxiv.org/abs/2012.12040 but with less warping. diff --git a/examples/t8code_2d_dgsem/elixir_euler_free_stream.jl b/examples/t8code_2d_dgsem/elixir_euler_free_stream.jl index 9d0f7445bd1..b204c25a756 100644 --- a/examples/t8code_2d_dgsem/elixir_euler_free_stream.jl +++ b/examples/t8code_2d_dgsem/elixir_euler_free_stream.jl @@ -8,7 +8,14 @@ equations = CompressibleEulerEquations2D(1.4) initial_condition = initial_condition_constant -solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 3, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive)) # Mapping as described in https://arxiv.org/abs/2012.12040 but reduced to 2D function mapping(xi_, eta_) diff --git a/examples/t8code_2d_dgsem/elixir_euler_sedov.jl b/examples/t8code_2d_dgsem/elixir_euler_sedov.jl index 4a1f2cd1d93..8653a0a30ed 100644 --- a/examples/t8code_2d_dgsem/elixir_euler_sedov.jl +++ b/examples/t8code_2d_dgsem/elixir_euler_sedov.jl @@ -37,7 +37,15 @@ end initial_condition = initial_condition_sedov_blast_wave # Get the DG approximation space -surface_flux = flux_lax_friedrichs + +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) volume_flux = flux_ranocha polydeg = 4 basis = LobattoLegendreBasis(polydeg) diff --git a/examples/t8code_2d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_flag.jl b/examples/t8code_2d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_flag.jl index 6c22d18e5ba..604857ff8e4 100644 --- a/examples/t8code_2d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_flag.jl +++ b/examples/t8code_2d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_flag.jl @@ -14,7 +14,14 @@ source_terms = source_terms_convergence_test boundary_condition = BoundaryConditionDirichlet(initial_condition) boundary_conditions = Dict(:all => boundary_condition) -solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 3, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive)) # Deformed rectangle that looks like a waving flag, # lower and upper faces are sinus curves, left and right are vertical lines. diff --git a/examples/t8code_2d_dgsem/elixir_euler_weak_blast_wave_amr.jl b/examples/t8code_2d_dgsem/elixir_euler_weak_blast_wave_amr.jl index 6e8fc6529cd..f8f934395a1 100644 --- a/examples/t8code_2d_dgsem/elixir_euler_weak_blast_wave_amr.jl +++ b/examples/t8code_2d_dgsem/elixir_euler_weak_blast_wave_amr.jl @@ -31,11 +31,18 @@ initial_condition = initial_condition_weak_blast_wave # Get the DG approximation space -# Activate the shock capturing + flux differencing -surface_flux = flux_lax_friedrichs +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) volume_flux = flux_ranocha polydeg = 4 basis = LobattoLegendreBasis(polydeg) +# Activate the shock capturing + flux differencing indicator_sc = IndicatorHennemannGassner(equations, basis, alpha_max = 0.5, alpha_min = 0.001, diff --git a/examples/t8code_2d_dgsem/elixir_eulergravity_convergence.jl b/examples/t8code_2d_dgsem/elixir_eulergravity_convergence.jl index 2c0a1d13c2a..3fe0386a897 100644 --- a/examples/t8code_2d_dgsem/elixir_eulergravity_convergence.jl +++ b/examples/t8code_2d_dgsem/elixir_eulergravity_convergence.jl @@ -28,7 +28,14 @@ semi_euler = SemidiscretizationHyperbolic(mesh, equations_euler, initial_conditi # semidiscretization of the hyperbolic diffusion equations equations_gravity = HyperbolicDiffusionEquations2D() -solver_gravity = DGSEM(polydeg, flux_lax_friedrichs) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver_gravity = DGSEM(polydeg, FluxLaxFriedrichs(max_abs_speed_naive)) semi_gravity = SemidiscretizationHyperbolic(mesh, equations_gravity, initial_condition, solver_gravity, diff --git a/examples/t8code_2d_dgsem/elixir_mhd_rotor.jl b/examples/t8code_2d_dgsem/elixir_mhd_rotor.jl index bd6b7aa5b50..65d2abc3862 100644 --- a/examples/t8code_2d_dgsem/elixir_mhd_rotor.jl +++ b/examples/t8code_2d_dgsem/elixir_mhd_rotor.jl @@ -43,7 +43,14 @@ function initial_condition_rotor(x, t, equations::IdealGlmMhdEquations2D) end initial_condition = initial_condition_rotor -surface_flux = (flux_lax_friedrichs, flux_nonconservative_powell) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = (FluxLaxFriedrichs(max_abs_speed_naive), flux_nonconservative_powell) volume_flux = (flux_hindenlang_gassner, flux_nonconservative_powell) polydeg = 4 basis = LobattoLegendreBasis(polydeg) diff --git a/examples/t8code_3d_dgsem/elixir_advection_cubed_sphere.jl b/examples/t8code_3d_dgsem/elixir_advection_cubed_sphere.jl index 86f54ec8ce4..1cf443f5cf2 100644 --- a/examples/t8code_3d_dgsem/elixir_advection_cubed_sphere.jl +++ b/examples/t8code_3d_dgsem/elixir_advection_cubed_sphere.jl @@ -16,7 +16,12 @@ boundary_condition = BoundaryConditionDirichlet(initial_condition) boundary_conditions = Dict(:inside => boundary_condition, :outside => boundary_condition) -mesh = Trixi.T8codeMeshCubedSphere(5, 3, 0.5, 0.5; +trees_per_face_dimension = 5 # Number of trees per patch in longitudinal and latitudinal direction +layers = 3 # Number of layers of the shell +inner_radius = 0.5 # Radius of the inner side of the shell +thickness = 0.5 # Thickness of the shell. The outer radius will be `inner_radius + thickness` +mesh = Trixi.T8codeMeshCubedSphere(trees_per_face_dimension, layers, + inner_radius, thickness; polydeg = 3, initial_refinement_level = 0) # A semidiscretization collects data structures and functions for the spatial discretization diff --git a/examples/t8code_3d_dgsem/elixir_euler_baroclinic_instability.jl b/examples/t8code_3d_dgsem/elixir_euler_baroclinic_instability.jl index deb37ea6204..2b3c6e72e78 100644 --- a/examples/t8code_3d_dgsem/elixir_euler_baroclinic_instability.jl +++ b/examples/t8code_3d_dgsem/elixir_euler_baroclinic_instability.jl @@ -229,7 +229,9 @@ solver = DGSEM(polydeg = 5, surface_flux = surface_flux, # For optimal results, use (16, 8) here trees_per_cube_face = (8, 4) -mesh = Trixi.T8codeMeshCubedSphere(trees_per_cube_face..., 6.371229e6, 30000.0, +inner_radius = 6.371229e6 # Radius of the inner side of the shell +thickness = 30000.0 # Thickness of the shell +mesh = Trixi.T8codeMeshCubedSphere(trees_per_cube_face..., inner_radius, thickness, polydeg = 5, initial_refinement_level = 0) semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, diff --git a/examples/t8code_3d_dgsem/elixir_euler_free_stream.jl b/examples/t8code_3d_dgsem/elixir_euler_free_stream.jl index 4f558e42974..851e35db80b 100644 --- a/examples/t8code_3d_dgsem/elixir_euler_free_stream.jl +++ b/examples/t8code_3d_dgsem/elixir_euler_free_stream.jl @@ -13,7 +13,15 @@ boundary_conditions = Dict(:all => BoundaryConditionDirichlet(initial_condition) # Solver with polydeg=4 to ensure free stream preservation (FSP) on non-conforming meshes. # The polydeg of the solver must be at least twice as big as the polydeg of the mesh. # See https://doi.org/10.1007/s10915-018-00897-9, Section 6. -solver = DGSEM(polydeg = 4, surface_flux = flux_lax_friedrichs, + +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 4, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive), volume_integral = VolumeIntegralWeakForm()) # Mapping as described in https://arxiv.org/abs/2012.12040 but with less warping. diff --git a/examples/t8code_3d_dgsem/elixir_euler_free_stream_extruded.jl b/examples/t8code_3d_dgsem/elixir_euler_free_stream_extruded.jl index be2f2cfb2d0..8c89bbd9378 100644 --- a/examples/t8code_3d_dgsem/elixir_euler_free_stream_extruded.jl +++ b/examples/t8code_3d_dgsem/elixir_euler_free_stream_extruded.jl @@ -10,7 +10,14 @@ initial_condition = initial_condition_constant boundary_conditions = Dict(:all => BoundaryConditionDirichlet(initial_condition)) -solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs, +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 3, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive), volume_integral = VolumeIntegralWeakForm()) # Mapping as described in https://arxiv.org/abs/2012.12040 but reduced to 2D. diff --git a/examples/t8code_3d_dgsem/elixir_euler_sedov.jl b/examples/t8code_3d_dgsem/elixir_euler_sedov.jl index f51fc068ad2..ddb106864d9 100644 --- a/examples/t8code_3d_dgsem/elixir_euler_sedov.jl +++ b/examples/t8code_3d_dgsem/elixir_euler_sedov.jl @@ -40,7 +40,14 @@ end initial_condition = initial_condition_medium_sedov_blast_wave -surface_flux = flux_lax_friedrichs +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) volume_flux = flux_ranocha polydeg = 5 basis = LobattoLegendreBasis(polydeg) diff --git a/examples/t8code_3d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_curved.jl b/examples/t8code_3d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_curved.jl index cefde6d9b44..09e7994f1f3 100644 --- a/examples/t8code_3d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_curved.jl +++ b/examples/t8code_3d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_curved.jl @@ -14,7 +14,15 @@ boundary_conditions = Dict(:all => boundary_condition) # Solver with polydeg=4 to ensure free stream preservation (FSP) on non-conforming meshes. # The polydeg of the solver must be at least twice as big as the polydeg of the mesh. # See https://doi.org/10.1007/s10915-018-00897-9, Section 6. -solver = DGSEM(polydeg = 4, surface_flux = flux_lax_friedrichs, + +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 4, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive), volume_integral = VolumeIntegralWeakForm()) # Mapping as described in https://arxiv.org/abs/2012.12040 but with less warping. diff --git a/examples/t8code_3d_dgsem/elixir_euler_source_terms_nonperiodic.jl b/examples/t8code_3d_dgsem/elixir_euler_source_terms_nonperiodic.jl index 40532c92740..36101d088a6 100644 --- a/examples/t8code_3d_dgsem/elixir_euler_source_terms_nonperiodic.jl +++ b/examples/t8code_3d_dgsem/elixir_euler_source_terms_nonperiodic.jl @@ -16,7 +16,14 @@ boundary_conditions = Dict(:x_neg => boundary_condition, :z_neg => boundary_condition, :z_pos => boundary_condition) -solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs, +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 3, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive), volume_integral = VolumeIntegralWeakForm()) coordinates_min = (0.0, 0.0, 0.0) diff --git a/examples/t8code_3d_dgsem/elixir_euler_weak_blast_wave_amr.jl b/examples/t8code_3d_dgsem/elixir_euler_weak_blast_wave_amr.jl index f8c8de17313..03b6dac3fb4 100644 --- a/examples/t8code_3d_dgsem/elixir_euler_weak_blast_wave_amr.jl +++ b/examples/t8code_3d_dgsem/elixir_euler_weak_blast_wave_amr.jl @@ -32,7 +32,14 @@ end initial_condition = initial_condition_weak_blast_wave -surface_flux = flux_lax_friedrichs +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) volume_flux = flux_ranocha polydeg = 4 basis = LobattoLegendreBasis(polydeg) diff --git a/examples/tree_1d_dgsem/elixir_advection_diffusion.jl b/examples/tree_1d_dgsem/elixir_advection_diffusion.jl index bfe593ba89e..3ca76ad6aa0 100644 --- a/examples/tree_1d_dgsem/elixir_advection_diffusion.jl +++ b/examples/tree_1d_dgsem/elixir_advection_diffusion.jl @@ -87,6 +87,6 @@ callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, sav # OrdinaryDiffEq's `solve` method evolves the solution in time and executes the passed callbacks time_int_tol = 1.0e-10 time_abs_tol = 1.0e-10 -sol = solve(ode, KenCarp4(autodiff = AutoFiniteDiff()); +sol = solve(ode, KenCarp4(autodiff = AutoFiniteDiff()); # This is an IMEX SDIRK method abstol = time_abs_tol, reltol = time_int_tol, ode_default_options()..., callback = callbacks) diff --git a/examples/tree_1d_dgsem/elixir_advection_extended.jl b/examples/tree_1d_dgsem/elixir_advection_extended.jl index 576882a19a7..8df04c72334 100644 --- a/examples/tree_1d_dgsem/elixir_advection_extended.jl +++ b/examples/tree_1d_dgsem/elixir_advection_extended.jl @@ -1,5 +1,6 @@ using OrdinaryDiffEqLowStorageRK using Trixi +using Plots # For visualization callback ############################################################################### # semidiscretization of the linear advection equation @@ -64,10 +65,14 @@ save_solution = SaveSolutionCallback(interval = 100, # The StepsizeCallback handles the re-calculation of the maximum Δt after each time step stepsize_callback = StepsizeCallback(cfl = 1.6) +# Enable in-situ visualization with a new plot generated at every time step +visualization = VisualizationCallback(semi; interval = 1) + # Create a CallbackSet to collect all callbacks such that they can be passed to the ODE solver callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, - save_restart, save_solution, + save_restart, + save_solution, visualization, stepsize_callback) ############################################################################### diff --git a/examples/tree_1d_dgsem/elixir_burgers_rarefaction.jl b/examples/tree_1d_dgsem/elixir_burgers_rarefaction.jl index 76f966f4c52..00539dc8137 100644 --- a/examples/tree_1d_dgsem/elixir_burgers_rarefaction.jl +++ b/examples/tree_1d_dgsem/elixir_burgers_rarefaction.jl @@ -45,13 +45,11 @@ end boundary_condition_inflow = BoundaryConditionDirichlet(initial_condition_rarefaction) -function boundary_condition_outflow(u_inner, orientation, normal_direction, x, t, +function boundary_condition_outflow(u_inner, orientation, direction, x, t, surface_flux_function, equations::InviscidBurgersEquation1D) # Calculate the boundary flux entirely from the internal solution state - flux = Trixi.flux(u_inner, normal_direction, equations) - - return flux + return flux(u_inner, orientation, equations) end boundary_conditions = (x_neg = boundary_condition_inflow, diff --git a/examples/tree_1d_dgsem/elixir_burgers_shock.jl b/examples/tree_1d_dgsem/elixir_burgers_shock.jl index e5e152c7de2..24f569201d7 100644 --- a/examples/tree_1d_dgsem/elixir_burgers_shock.jl +++ b/examples/tree_1d_dgsem/elixir_burgers_shock.jl @@ -45,13 +45,11 @@ end boundary_condition_inflow = BoundaryConditionDirichlet(initial_condition_shock) -function boundary_condition_outflow(u_inner, orientation, normal_direction, x, t, +function boundary_condition_outflow(u_inner, orientation, direction, x, t, surface_flux_function, equations::InviscidBurgersEquation1D) # Calculate the boundary flux entirely from the internal solution state - flux = Trixi.flux(u_inner, normal_direction, equations) - - return flux + return flux(u_inner, orientation, equations) end boundary_conditions = (x_neg = boundary_condition_inflow, diff --git a/examples/tree_1d_dgsem/elixir_diffusion_ldg.jl b/examples/tree_1d_dgsem/elixir_diffusion_ldg.jl index 68f5c650c01..143a9cc9840 100644 --- a/examples/tree_1d_dgsem/elixir_diffusion_ldg.jl +++ b/examples/tree_1d_dgsem/elixir_diffusion_ldg.jl @@ -44,15 +44,14 @@ boundary_conditions_parabolic = boundary_condition_periodic solver_parabolic = ViscousFormulationLocalDG() semi = SemidiscretizationHyperbolicParabolic(mesh, (equations, equations_parabolic), initial_condition, - solver; - solver_parabolic, + solver; solver_parabolic, boundary_conditions = (boundary_conditions, boundary_conditions_parabolic)) ############################################################################### # ODE solvers, callbacks etc. -# Create ODE problem with time span from 0.0 to 1.0 +# Create ODE problem with time span from 0.0 to 0.1 tspan = (0.0, 0.1) ode = semidiscretize(semi, tspan) diff --git a/examples/tree_1d_dgsem/elixir_diffusion_ldg_newton_krylov.jl b/examples/tree_1d_dgsem/elixir_diffusion_ldg_newton_krylov.jl new file mode 100644 index 00000000000..8d20593bc0e --- /dev/null +++ b/examples/tree_1d_dgsem/elixir_diffusion_ldg_newton_krylov.jl @@ -0,0 +1,71 @@ +using Trixi + +using OrdinaryDiffEqSDIRK +using LinearSolve # For Jacobian-free Newton-Krylov (GMRES) solver +using ADTypes # For automatic differentiation via finite differences + +############################################################################### +# semidiscretization of the linear (advection) diffusion equation + +advection_velocity = 0.0 # Note: This renders the equation mathematically purely parabolic +equations = LinearScalarAdvectionEquation1D(advection_velocity) +diffusivity() = 0.5 +equations_parabolic = LaplaceDiffusion1D(diffusivity(), equations) + +# surface flux does not matter for pure diffusion problem +solver = DGSEM(polydeg = 3, surface_flux = flux_central) + +coordinates_min = -convert(Float64, pi) +coordinates_max = convert(Float64, pi) + +mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 4, + n_cells_max = 30_000) + +function initial_condition_pure_diffusion_1d_convergence_test(x, t, + equation) + nu = diffusivity() + c = 0 + A = 1 + omega = 1 + scalar = c + A * sin(omega * sum(x)) * exp(-nu * omega^2 * t) + return SVector(scalar) +end +initial_condition = initial_condition_pure_diffusion_1d_convergence_test + +solver_parabolic = ViscousFormulationLocalDG() +semi = SemidiscretizationHyperbolicParabolic(mesh, (equations, equations_parabolic), + initial_condition, + solver; solver_parabolic) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 2.0) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() +analysis_callback = AnalysisCallback(semi, interval = 10) +alive_callback = AliveCallback(alive_interval = 1) + +callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback) + +############################################################################### +# run the simulation + +# Tolerances for GMRES residual, see https://jso.dev/Krylov.jl/stable/solvers/unsymmetric/#Krylov.gmres +atol_lin_solve = 1e-6 +rtol_lin_solve = 1e-5 + +# Jacobian-free Newton-Krylov (GMRES) solver +linsolve = KrylovJL_GMRES(atol = atol_lin_solve, rtol = rtol_lin_solve) + +# Use (diagonally) implicit Runge-Kutta, see +# https://docs.sciml.ai/DiffEqDocs/stable/tutorials/advanced_ode_example/#Using-Jacobian-Free-Newton-Krylov +ode_alg = KenCarp47(autodiff = AutoFiniteDiff(), linsolve = linsolve) + +atol_ode_solve = 1e-5 +rtol_ode_solve = 1e-4 +sol = solve(ode, ode_alg; + abstol = atol_ode_solve, reltol = rtol_ode_solve, + ode_default_options()..., callback = callbacks); diff --git a/examples/tree_1d_dgsem/elixir_euler_blast_wave.jl b/examples/tree_1d_dgsem/elixir_euler_blast_wave.jl index 52ad9d98177..abd12230166 100644 --- a/examples/tree_1d_dgsem/elixir_euler_blast_wave.jl +++ b/examples/tree_1d_dgsem/elixir_euler_blast_wave.jl @@ -36,7 +36,14 @@ function initial_condition_blast_wave(x, t, equations::CompressibleEulerEquation end initial_condition = initial_condition_blast_wave -surface_flux = flux_lax_friedrichs +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) volume_flux = flux_ranocha basis = LobattoLegendreBasis(3) indicator_sc = IndicatorHennemannGassner(equations, basis, diff --git a/examples/tree_1d_dgsem/elixir_euler_density_wave_tracers.jl b/examples/tree_1d_dgsem/elixir_euler_density_wave_tracers.jl index c28c6dc473e..2ff409de704 100644 --- a/examples/tree_1d_dgsem/elixir_euler_density_wave_tracers.jl +++ b/examples/tree_1d_dgsem/elixir_euler_density_wave_tracers.jl @@ -11,8 +11,15 @@ initial_condition = initial_condition_density_wave volume_flux = FluxTracerEquationsCentral(flux_ranocha) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. solver = DGSEM(polydeg = 3, - surface_flux = flux_lax_friedrichs, + surface_flux = FluxLaxFriedrichs(max_abs_speed_naive), volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) coordinates_min = -1.0 diff --git a/examples/tree_1d_dgsem/elixir_euler_positivity.jl b/examples/tree_1d_dgsem/elixir_euler_positivity.jl index 6f1298ffd15..2dce260683a 100644 --- a/examples/tree_1d_dgsem/elixir_euler_positivity.jl +++ b/examples/tree_1d_dgsem/elixir_euler_positivity.jl @@ -36,7 +36,14 @@ function initial_condition_sedov_blast_wave(x, t, equations::CompressibleEulerEq end initial_condition = initial_condition_sedov_blast_wave -surface_flux = flux_lax_friedrichs +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) volume_flux = flux_ranocha basis = LobattoLegendreBasis(3) indicator_sc = IndicatorHennemannGassner(equations, basis, @@ -75,16 +82,23 @@ save_solution = SaveSolutionCallback(interval = 100, save_final_solution = true, solution_variables = cons2prim) +positivity_limiter = PositivityPreservingLimiterZhangShu(thresholds = (5.0e-6, 5.0e-6), + variables = (Trixi.density, + pressure)) + amr_indicator = IndicatorLöhner(semi, variable = density_pressure) + amr_controller = ControllerThreeLevel(semi, amr_indicator, base_level = 4, med_level = 0, med_threshold = 0.1, # med_level = current level max_level = 6, max_threshold = 0.3) + amr_callback = AMRCallback(semi, amr_controller, interval = 2, adapt_initial_condition = true, - adapt_initial_condition_only_refine = true) + adapt_initial_condition_only_refine = true, + limiter! = positivity_limiter) stepsize_callback = StepsizeCallback(cfl = 0.5) @@ -93,12 +107,11 @@ callbacks = CallbackSet(summary_callback, save_solution, amr_callback, stepsize_callback) -stage_limiter! = PositivityPreservingLimiterZhangShu(thresholds = (5.0e-6, 5.0e-6), - variables = (Trixi.density, pressure)) - ############################################################################### # run the simulation -sol = solve(ode, CarpenterKennedy2N54(stage_limiter!, williamson_condition = false); +sol = solve(ode, + CarpenterKennedy2N54(stage_limiter! = positivity_limiter, + williamson_condition = false); dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback ode_default_options()..., callback = callbacks); diff --git a/examples/tree_1d_dgsem/elixir_euler_quasi_1d_discontinuous.jl b/examples/tree_1d_dgsem/elixir_euler_quasi_1d_discontinuous.jl index 698a9116d2a..4a21237ae0b 100644 --- a/examples/tree_1d_dgsem/elixir_euler_quasi_1d_discontinuous.jl +++ b/examples/tree_1d_dgsem/elixir_euler_quasi_1d_discontinuous.jl @@ -29,7 +29,14 @@ end initial_condition = initial_condition_discontinuity -surface_flux = (flux_lax_friedrichs, flux_nonconservative_chan_etal) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = (FluxLaxFriedrichs(max_abs_speed_naive), flux_nonconservative_chan_etal) volume_flux = (flux_chan_etal, flux_nonconservative_chan_etal) basis = LobattoLegendreBasis(3) diff --git a/examples/tree_1d_dgsem/elixir_euler_sedov_blast_wave.jl b/examples/tree_1d_dgsem/elixir_euler_sedov_blast_wave.jl index 7fa8d19c05d..131343b47ae 100644 --- a/examples/tree_1d_dgsem/elixir_euler_sedov_blast_wave.jl +++ b/examples/tree_1d_dgsem/elixir_euler_sedov_blast_wave.jl @@ -36,7 +36,14 @@ function initial_condition_sedov_blast_wave(x, t, equations::CompressibleEulerEq end initial_condition = initial_condition_sedov_blast_wave -surface_flux = flux_lax_friedrichs +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) volume_flux = flux_chandrashekar basis = LobattoLegendreBasis(3) shock_indicator_variable = density_pressure diff --git a/examples/tree_1d_dgsem/elixir_euler_shockcapturing.jl b/examples/tree_1d_dgsem/elixir_euler_shockcapturing.jl index 34bb4b54d79..304537f306c 100644 --- a/examples/tree_1d_dgsem/elixir_euler_shockcapturing.jl +++ b/examples/tree_1d_dgsem/elixir_euler_shockcapturing.jl @@ -8,7 +8,14 @@ equations = CompressibleEulerEquations1D(1.4) initial_condition = initial_condition_weak_blast_wave -surface_flux = flux_lax_friedrichs +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) volume_flux = flux_shima_etal basis = LobattoLegendreBasis(3) indicator_sc = IndicatorHennemannGassner(equations, basis, diff --git a/examples/tree_1d_dgsem/elixir_euler_source_terms.jl b/examples/tree_1d_dgsem/elixir_euler_source_terms.jl index 735af8c75a6..5738fb918cb 100644 --- a/examples/tree_1d_dgsem/elixir_euler_source_terms.jl +++ b/examples/tree_1d_dgsem/elixir_euler_source_terms.jl @@ -9,8 +9,16 @@ equations = CompressibleEulerEquations1D(1.4) initial_condition = initial_condition_convergence_test # Note that the expected EOC of 5 is not reached with this flux. -# Using flux_hll instead yields the expected EOC. -solver = DGSEM(polydeg = 4, surface_flux = flux_lax_friedrichs) +# Using `flux_hll` instead yields the expected EOC. + +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 4, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive)) coordinates_min = 0.0 coordinates_max = 2.0 diff --git a/examples/tree_1d_dgsem/elixir_euler_source_terms_nonperiodic.jl b/examples/tree_1d_dgsem/elixir_euler_source_terms_nonperiodic.jl index 76fa36b27a2..e51ca955c91 100644 --- a/examples/tree_1d_dgsem/elixir_euler_source_terms_nonperiodic.jl +++ b/examples/tree_1d_dgsem/elixir_euler_source_terms_nonperiodic.jl @@ -15,7 +15,14 @@ boundary_condition = BoundaryConditionDirichlet(initial_condition) boundary_conditions = (x_neg = boundary_condition, x_pos = boundary_condition) -solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 3, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive)) coordinates_min = (0.0,) coordinates_max = (2.0,) diff --git a/examples/tree_1d_dgsem/elixir_eulergravity_convergence.jl b/examples/tree_1d_dgsem/elixir_eulergravity_convergence.jl index dcbc5c444c3..5b2788c65a7 100644 --- a/examples/tree_1d_dgsem/elixir_eulergravity_convergence.jl +++ b/examples/tree_1d_dgsem/elixir_eulergravity_convergence.jl @@ -24,7 +24,14 @@ semi_euler = SemidiscretizationHyperbolic(mesh, equations_euler, initial_conditi # semidiscretization of the hyperbolic diffusion equations equations_gravity = HyperbolicDiffusionEquations1D() -solver_gravity = DGSEM(polydeg, flux_lax_friedrichs) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver_gravity = DGSEM(polydeg, FluxLaxFriedrichs(max_abs_speed_naive)) semi_gravity = SemidiscretizationHyperbolic(mesh, equations_gravity, initial_condition, solver_gravity, diff --git a/examples/tree_1d_dgsem/elixir_eulermulti_convergence_es.jl b/examples/tree_1d_dgsem/elixir_eulermulti_convergence_es.jl index e0c416ba595..3af86f4fcc8 100644 --- a/examples/tree_1d_dgsem/elixir_eulermulti_convergence_es.jl +++ b/examples/tree_1d_dgsem/elixir_eulermulti_convergence_es.jl @@ -9,7 +9,14 @@ equations = CompressibleEulerMulticomponentEquations1D(gammas = (1.4, 1.4, 1.4, initial_condition = initial_condition_convergence_test volume_flux = flux_ranocha -solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs, +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 3, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive), volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) coordinates_min = (-1.0,) diff --git a/examples/tree_1d_dgsem/elixir_eulermulti_es.jl b/examples/tree_1d_dgsem/elixir_eulermulti_es.jl index 0f17027bb76..500c73e3e1a 100644 --- a/examples/tree_1d_dgsem/elixir_eulermulti_es.jl +++ b/examples/tree_1d_dgsem/elixir_eulermulti_es.jl @@ -9,7 +9,14 @@ equations = CompressibleEulerMulticomponentEquations1D(gammas = (1.4, 1.4), initial_condition = initial_condition_weak_blast_wave volume_flux = flux_ranocha -solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs, +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 3, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive), volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) coordinates_min = (-2.0,) diff --git a/examples/tree_1d_dgsem/elixir_eulermulti_two_interacting_blast_waves.jl b/examples/tree_1d_dgsem/elixir_eulermulti_two_interacting_blast_waves.jl index 94bd632fab0..1083b0d3425 100644 --- a/examples/tree_1d_dgsem/elixir_eulermulti_two_interacting_blast_waves.jl +++ b/examples/tree_1d_dgsem/elixir_eulermulti_two_interacting_blast_waves.jl @@ -55,7 +55,14 @@ function boundary_condition_two_interacting_blast_waves(u_inner, orientation, di end boundary_conditions = boundary_condition_two_interacting_blast_waves -surface_flux = flux_lax_friedrichs +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) volume_flux = flux_ranocha basis = LobattoLegendreBasis(3) indicator_sc = IndicatorHennemannGassner(equations, basis, diff --git a/examples/tree_1d_dgsem/elixir_mhd_alfven_wave.jl b/examples/tree_1d_dgsem/elixir_mhd_alfven_wave.jl index 7de8a982fe8..f55656e5645 100644 --- a/examples/tree_1d_dgsem/elixir_mhd_alfven_wave.jl +++ b/examples/tree_1d_dgsem/elixir_mhd_alfven_wave.jl @@ -9,7 +9,14 @@ equations = IdealGlmMhdEquations1D(gamma) initial_condition = initial_condition_convergence_test volume_flux = flux_hindenlang_gassner -solver = DGSEM(polydeg = 4, surface_flux = flux_lax_friedrichs, +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 4, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive), volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) coordinates_min = 0.0 diff --git a/examples/tree_1d_dgsem/elixir_mhd_torrilhon_shock_tube.jl b/examples/tree_1d_dgsem/elixir_mhd_torrilhon_shock_tube.jl index 12ebbd36720..be80bf681ea 100644 --- a/examples/tree_1d_dgsem/elixir_mhd_torrilhon_shock_tube.jl +++ b/examples/tree_1d_dgsem/elixir_mhd_torrilhon_shock_tube.jl @@ -31,7 +31,14 @@ initial_condition = initial_condition_torrilhon_shock_tube boundary_conditions = BoundaryConditionDirichlet(initial_condition) -surface_flux = flux_lax_friedrichs +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) volume_flux = flux_central basis = LobattoLegendreBasis(3) diff --git a/examples/tree_1d_dgsem/elixir_mhdmulti_briowu_shock_tube.jl b/examples/tree_1d_dgsem/elixir_mhdmulti_briowu_shock_tube.jl index 8180602006c..b5f776b808b 100644 --- a/examples/tree_1d_dgsem/elixir_mhdmulti_briowu_shock_tube.jl +++ b/examples/tree_1d_dgsem/elixir_mhdmulti_briowu_shock_tube.jl @@ -47,7 +47,14 @@ initial_condition = initial_condition_briowu_shock_tube boundary_conditions = BoundaryConditionDirichlet(initial_condition) -surface_flux = flux_lax_friedrichs +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) volume_flux = flux_hindenlang_gassner basis = LobattoLegendreBasis(3) diff --git a/examples/tree_1d_dgsem/elixir_mhdmulti_convergence.jl b/examples/tree_1d_dgsem/elixir_mhdmulti_convergence.jl index f0f3e64d28d..2ac008e60b4 100644 --- a/examples/tree_1d_dgsem/elixir_mhdmulti_convergence.jl +++ b/examples/tree_1d_dgsem/elixir_mhdmulti_convergence.jl @@ -10,7 +10,14 @@ equations = IdealGlmMhdMulticomponentEquations1D(gammas = (5 / 3, 5 / 3, 5 / 3), initial_condition = initial_condition_convergence_test volume_flux = flux_hindenlang_gassner -solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs, +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 3, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive), volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) coordinates_min = 0.0 diff --git a/examples/tree_1d_dgsem/elixir_mhdmulti_es.jl b/examples/tree_1d_dgsem/elixir_mhdmulti_es.jl index ad9b47aa1d8..fff1f0f32c0 100644 --- a/examples/tree_1d_dgsem/elixir_mhdmulti_es.jl +++ b/examples/tree_1d_dgsem/elixir_mhdmulti_es.jl @@ -9,7 +9,14 @@ equations = IdealGlmMhdMulticomponentEquations1D(gammas = (2.0, 2.0, 2.0), initial_condition = initial_condition_weak_blast_wave volume_flux = flux_hindenlang_gassner -solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs, +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 3, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive), volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) coordinates_min = 0.0 diff --git a/examples/tree_1d_dgsem/elixir_navierstokes_convergence_walls.jl b/examples/tree_1d_dgsem/elixir_navierstokes_convergence_walls.jl index b6e080cf5a7..2b9979db443 100644 --- a/examples/tree_1d_dgsem/elixir_navierstokes_convergence_walls.jl +++ b/examples/tree_1d_dgsem/elixir_navierstokes_convergence_walls.jl @@ -13,7 +13,15 @@ equations_parabolic = CompressibleNavierStokesDiffusion1D(equations, mu = mu(), gradient_variables = GradientVariablesPrimitive()) # Create DG solver with polynomial degree = 3 and (local) Lax-Friedrichs/Rusanov flux as surface flux -solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs, + +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 3, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive), volume_integral = VolumeIntegralWeakForm()) coordinates_min = -1.0 diff --git a/examples/tree_1d_dgsem/elixir_navierstokes_convergence_walls_amr.jl b/examples/tree_1d_dgsem/elixir_navierstokes_convergence_walls_amr.jl index 2049fa6ade6..cb7b4310b6e 100644 --- a/examples/tree_1d_dgsem/elixir_navierstokes_convergence_walls_amr.jl +++ b/examples/tree_1d_dgsem/elixir_navierstokes_convergence_walls_amr.jl @@ -13,7 +13,15 @@ equations_parabolic = CompressibleNavierStokesDiffusion1D(equations, mu = mu(), gradient_variables = GradientVariablesEntropy()) # Create DG solver with polynomial degree = 3 and (local) Lax-Friedrichs/Rusanov flux as surface flux -solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs, + +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 3, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive), volume_integral = VolumeIntegralWeakForm()) coordinates_min = -1.0 diff --git a/examples/tree_1d_dgsem/elixir_navierstokes_viscous_shock.jl b/examples/tree_1d_dgsem/elixir_navierstokes_viscous_shock.jl index 48e257a33c5..80597cab362 100644 --- a/examples/tree_1d_dgsem/elixir_navierstokes_viscous_shock.jl +++ b/examples/tree_1d_dgsem/elixir_navierstokes_viscous_shock.jl @@ -102,23 +102,19 @@ mesh = TreeMesh(coordinates_min, coordinates_max, ### Inviscid boundary conditions ### # Prescribe pure influx based on initial conditions -function boundary_condition_inflow(u_inner, orientation::Integer, normal_direction, x, t, +function boundary_condition_inflow(u_inner, orientation::Integer, direction, x, t, surface_flux_function, equations::CompressibleEulerEquations1D) u_cons = initial_condition_viscous_shock(x, t, equations) - flux = Trixi.flux(u_cons, orientation, equations) - - return flux + return flux(u_cons, orientation, equations) end # Completely free outflow -function boundary_condition_outflow(u_inner, orientation::Integer, normal_direction, x, t, +function boundary_condition_outflow(u_inner, orientation::Integer, direction, x, t, surface_flux_function, equations::CompressibleEulerEquations1D) # Calculate the boundary flux entirely from the internal solution state - flux = Trixi.flux(u_inner, orientation, equations) - - return flux + return flux(u_inner, orientation, equations) end boundary_conditions = (; x_neg = boundary_condition_inflow, diff --git a/examples/tree_1d_dgsem/elixir_navierstokes_viscous_shock_imex.jl b/examples/tree_1d_dgsem/elixir_navierstokes_viscous_shock_imex.jl new file mode 100644 index 00000000000..18f1df5bd28 --- /dev/null +++ b/examples/tree_1d_dgsem/elixir_navierstokes_viscous_shock_imex.jl @@ -0,0 +1,179 @@ +using Trixi +using OrdinaryDiffEqBDF # BDF subpackage exports IMEX methods +using LinearSolve # For Jacobian-free Newton-Krylov (GMRES) solver +using ADTypes # To access the types choosing how to evaluate Jacobian-vector products + +# This is the classic 1D viscous shock wave problem with analytical solution +# for a special value of the Prandtl number. +# The original references are: +# +# - R. Becker (1922) +# Stoßwelle und Detonation. +# [DOI: 10.1007/BF01329605](https://doi.org/10.1007/BF01329605) +# +# English translations: +# Impact waves and detonation. Part I. +# https://ntrs.nasa.gov/api/citations/19930090862/downloads/19930090862.pdf +# Impact waves and detonation. Part II. +# https://ntrs.nasa.gov/api/citations/19930090863/downloads/19930090863.pdf +# +# - M. Morduchow, P. A. Libby (1949) +# On a Complete Solution of the One-Dimensional Flow Equations +# of a Viscous, Head-Conducting, Compressible Gas +# [DOI: 10.2514/8.11882](https://doi.org/10.2514/8.11882) +# +# +# The particular problem considered here is described in +# - L. G. Margolin, J. M. Reisner, P. M. Jordan (2017) +# Entropy in self-similar shock profiles +# [DOI: 10.1016/j.ijnonlinmec.2017.07.003](https://doi.org/10.1016/j.ijnonlinmec.2017.07.003) + +### Fixed parameters ### + +# Special value for which nonlinear solver can be omitted +# Corresponds essentially to fixing the Mach number +alpha = 0.5 +# We want kappa = cp * mu = mu_bar to ensure constant enthalpy +prandtl_number() = 1 + +### Free choices: ### +gamma() = 5 / 3 + +mu() = 0.15 +mu_bar() = mu() / (gamma() - 1) # Re-scaled viscosity + +rho_0() = 1 +v() = 1 # Shock speed + +domain_length = 4.0 + +### Derived quantities ### + +Ma() = 2 / sqrt(3 - gamma()) # Mach number for alpha = 0.5 +c_0() = v() / Ma() # Speed of sound ahead of the shock + +# From constant enthalpy condition +p_0() = c_0()^2 * rho_0() / gamma() + +l() = mu_bar() / (rho_0() * v()) * 2 * gamma() / (gamma() + 1) # Appropriate length scale + +""" + initial_condition_viscous_shock(x, t, equations) + +Classic 1D viscous shock wave problem with analytical solution +for a special value of the Prandtl number. +The version implemented here is described in +- L. G. Margolin, J. M. Reisner, P. M. Jordan (2017) + Entropy in self-similar shock profiles + [DOI: 10.1016/j.ijnonlinmec.2017.07.003](https://doi.org/10.1016/j.ijnonlinmec.2017.07.003) +""" +function initial_condition_viscous_shock(x, t, equations) + y = x[1] - v() * t # Translated coordinate + + # Coordinate transformation. See eq. (33) in Margolin et al. (2017) + chi = 2 * exp(y / (2 * l())) + + w = 1 + 1 / (2 * chi^2) * (1 - sqrt(1 + 2 * chi^2)) + + rho = rho_0() / w + u = v() * (1 - w) + p = p_0() * 1 / w * (1 + (gamma() - 1) / 2 * Ma()^2 * (1 - w^2)) + + return prim2cons(SVector(rho, u, p), equations) +end +initial_condition = initial_condition_viscous_shock + +############################################################################### +# semidiscretization of the ideal compressible Navier-Stokes equations + +equations = CompressibleEulerEquations1D(gamma()) +equations_parabolic = CompressibleNavierStokesDiffusion1D(equations, mu = mu_bar(), + Prandtl = prandtl_number(), + gradient_variables = GradientVariablesPrimitive()) + +solver = DGSEM(polydeg = 3, surface_flux = flux_hlle) + +coordinates_min = -domain_length / 2 +coordinates_max = domain_length / 2 + +mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 3, + periodicity = false, + n_cells_max = 30_000) + +### Inviscid boundary conditions ### + +# Prescribe pure influx based on initial conditions +function boundary_condition_inflow(u_inner, orientation::Integer, normal_direction, x, t, + surface_flux_function, + equations::CompressibleEulerEquations1D) + u_cons = initial_condition_viscous_shock(x, t, equations) + return flux(u_cons, orientation, equations) +end + +boundary_conditions = (; x_neg = boundary_condition_inflow, + x_pos = boundary_condition_do_nothing) + +### Viscous boundary conditions ### +# For the viscous BCs, we use the known analytical solution +velocity_bc = NoSlip() do x, t, equations_parabolic + Trixi.velocity(initial_condition_viscous_shock(x, + t, + equations_parabolic), + equations_parabolic) +end + +heat_bc = Isothermal() do x, t, equations_parabolic + Trixi.temperature(initial_condition_viscous_shock(x, + t, + equations_parabolic), + equations_parabolic) +end + +boundary_condition_parabolic = BoundaryConditionNavierStokesWall(velocity_bc, heat_bc) + +boundary_conditions_parabolic = (; x_neg = boundary_condition_parabolic, + x_pos = boundary_condition_parabolic) + +semi = SemidiscretizationHyperbolicParabolic(mesh, (equations, equations_parabolic), + initial_condition, solver; + solver_parabolic = ViscousFormulationLocalDG(), + boundary_conditions = (boundary_conditions, + boundary_conditions_parabolic)) + +############################################################################### + +tspan = (0.0, 0.75) +# For hyperbolic-parabolic problems, this results in a SciML SplitODEProblem, see e.g. +# https://docs.sciml.ai/DiffEqDocs/stable/types/split_ode_types/#SciMLBase.SplitODEProblem +# These exactly fit IMEX (implicit-explicit) integrators +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +alive_callback = AliveCallback(alive_interval = 100) + +analysis_interval = 1000 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) + +callbacks = CallbackSet(summary_callback, alive_callback, analysis_callback) + +############################################################################### + +# Tolerances for GMRES residual, see https://jso.dev/Krylov.jl/stable/solvers/unsymmetric/#Krylov.gmres +atol_lin_solve = 1e-4 +rtol_lin_solve = 1e-4 + +# Jacobian-free Newton-Krylov (GMRES) solver +linsolve = KrylovJL_GMRES(atol = atol_lin_solve, rtol = rtol_lin_solve) + +# Choice of method: +# https://docs.sciml.ai/OrdinaryDiffEq/stable/imex/IMEXBDF/#Solver-Selection-Guide +# higher order methods (`SBDF3` and `SBDF4`) have trouble converging. +# +# Use IMEX Runge-Kutta method with Jacobian-free (!) Newton-Krylov (GMRES) implicit solver, see +# https://docs.sciml.ai/DiffEqDocs/stable/tutorials/advanced_ode_example/#Using-Jacobian-Free-Newton-Krylov +ode_alg = SBDF2(autodiff = AutoFiniteDiff(), linsolve = linsolve) + +sol = solve(ode, ode_alg; dt = 0.05, # Fixed timestep + ode_default_options()..., callback = callbacks); diff --git a/examples/tree_1d_dgsem/elixir_traffic_flow_lwr_trafficjam.jl b/examples/tree_1d_dgsem/elixir_traffic_flow_lwr_trafficjam.jl index 4681735cd52..5a26fd9283b 100644 --- a/examples/tree_1d_dgsem/elixir_traffic_flow_lwr_trafficjam.jl +++ b/examples/tree_1d_dgsem/elixir_traffic_flow_lwr_trafficjam.jl @@ -34,13 +34,11 @@ function outflow(x, t, equations::TrafficFlowLWREquations1D) end boundary_condition_outflow = BoundaryConditionDirichlet(outflow) -function boundary_condition_inflow(u_inner, orientation, normal_direction, x, t, +function boundary_condition_inflow(u_inner, orientation, direction, x, t, surface_flux_function, equations::TrafficFlowLWREquations1D) # Calculate the boundary flux entirely from the internal solution state - flux = Trixi.flux(u_inner, orientation, equations) - - return flux + return flux(u_inner, orientation, equations) end boundary_conditions = (x_neg = boundary_condition_outflow, diff --git a/examples/tree_2d_dgsem/elixir_acoustics_convergence.jl b/examples/tree_2d_dgsem/elixir_acoustics_convergence.jl index b1799148560..d20841a187b 100644 --- a/examples/tree_2d_dgsem/elixir_acoustics_convergence.jl +++ b/examples/tree_2d_dgsem/elixir_acoustics_convergence.jl @@ -10,7 +10,15 @@ equations = AcousticPerturbationEquations2D(v_mean_global = (0.5, 0.3), c_mean_g initial_condition = initial_condition_convergence_test # Create DG solver with polynomial degree = 3 and (local) Lax-Friedrichs/Rusanov flux as surface flux -solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) + +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 3, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive)) coordinates_min = (0.0, 0.0) # minimum coordinates (min(x), min(y)) coordinates_max = (2.0, 2.0) # maximum coordinates (max(x), max(y)) diff --git a/examples/tree_2d_dgsem/elixir_acoustics_gauss.jl b/examples/tree_2d_dgsem/elixir_acoustics_gauss.jl index 20549294a1c..6725df1cfa9 100644 --- a/examples/tree_2d_dgsem/elixir_acoustics_gauss.jl +++ b/examples/tree_2d_dgsem/elixir_acoustics_gauss.jl @@ -10,7 +10,15 @@ rho_mean_global = 1.0 equations = AcousticPerturbationEquations2D(v_mean_global, c_mean_global, rho_mean_global) # Create DG solver with polynomial degree = 3 and (local) Lax-Friedrichs/Rusanov flux as surface flux -solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) + +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 3, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive)) coordinates_min = (-1.0, -1.0) # minimum coordinates (min(x), min(y)) coordinates_max = (1.0, 1.0) # maximum coordinates (max(x), max(y)) diff --git a/examples/tree_2d_dgsem/elixir_acoustics_gauss_wall.jl b/examples/tree_2d_dgsem/elixir_acoustics_gauss_wall.jl index d5d37c74732..bca7f50c3ec 100644 --- a/examples/tree_2d_dgsem/elixir_acoustics_gauss_wall.jl +++ b/examples/tree_2d_dgsem/elixir_acoustics_gauss_wall.jl @@ -8,7 +8,14 @@ equations = AcousticPerturbationEquations2D(v_mean_global = (0.5, 0.0), c_mean_g rho_mean_global = 1.0) # Create DG solver with polynomial degree = 5 and (local) Lax-Friedrichs/Rusanov flux as surface flux -solver = DGSEM(polydeg = 5, surface_flux = flux_lax_friedrichs) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 5, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive)) coordinates_min = (-100.0, 0.0) # minimum coordinates (min(x), min(y)) coordinates_max = (100.0, 200.0) # maximum coordinates (max(x), max(y)) diff --git a/examples/tree_2d_dgsem/elixir_acoustics_gaussian_source.jl b/examples/tree_2d_dgsem/elixir_acoustics_gaussian_source.jl index ea8a4d0bd0e..f98a8a34d6f 100644 --- a/examples/tree_2d_dgsem/elixir_acoustics_gaussian_source.jl +++ b/examples/tree_2d_dgsem/elixir_acoustics_gaussian_source.jl @@ -30,7 +30,15 @@ equations = AcousticPerturbationEquations2D(v_mean_global = (-0.5, 0.25), initial_condition = initial_condition_constant # Create DG solver with polynomial degree = 3 and (local) Lax-Friedrichs/Rusanov flux as surface flux -solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) + +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 3, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive)) coordinates_min = (-3.0, -3.0) # minimum coordinates (min(x), min(y)) coordinates_max = (3.0, 3.0) # maximum coordinates (max(x), max(y)) diff --git a/examples/tree_2d_dgsem/elixir_acoustics_monopole.jl b/examples/tree_2d_dgsem/elixir_acoustics_monopole.jl index a4eb556d61b..8282131e722 100644 --- a/examples/tree_2d_dgsem/elixir_acoustics_monopole.jl +++ b/examples/tree_2d_dgsem/elixir_acoustics_monopole.jl @@ -8,7 +8,15 @@ equations = AcousticPerturbationEquations2D(v_mean_global = (0.0, 0.0), c_mean_g rho_mean_global = 0.0) # Create DG solver with polynomial degree = 3 and (local) Lax-Friedrichs/Rusanov flux as surface flux -solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) + +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 3, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive)) coordinates_min = (-20.6, 0.0) # minimum coordinates (min(x), min(y)) coordinates_max = (30.6, 51.2) # maximum coordinates (max(x), max(y)) diff --git a/examples/tree_2d_dgsem/elixir_advection_amr_visualization.jl b/examples/tree_2d_dgsem/elixir_advection_amr_visualization.jl index 04c9818cebb..f6a44153b95 100644 --- a/examples/tree_2d_dgsem/elixir_advection_amr_visualization.jl +++ b/examples/tree_2d_dgsem/elixir_advection_amr_visualization.jl @@ -49,7 +49,7 @@ save_solution = SaveSolutionCallback(interval = 100, # Enable in-situ visualization with a new plot generated every 20 time steps # and additional plotting options passed as keyword arguments -visualization = VisualizationCallback(interval = 20, clims = (0, 1)) +visualization = VisualizationCallback(semi; interval = 20, clims = (0, 1)) amr_controller = ControllerThreeLevel(semi, IndicatorMax(semi, variable = first), base_level = 3, diff --git a/examples/tree_2d_dgsem/elixir_advection_implicit_sparse_jacobian.jl b/examples/tree_2d_dgsem/elixir_advection_implicit_sparse_jacobian.jl new file mode 100644 index 00000000000..e2996ec7580 --- /dev/null +++ b/examples/tree_2d_dgsem/elixir_advection_implicit_sparse_jacobian.jl @@ -0,0 +1,90 @@ +using Trixi +using SparseConnectivityTracer # For obtaining the Jacobian sparsity pattern +using SparseMatrixColorings # For obtaining the coloring vector +using OrdinaryDiffEqSDIRK, ADTypes + +############################################################################### +### equation, solver, mesh ### + +advection_velocity = (0.2, -0.7) +equation = LinearScalarAdvectionEquation2D(advection_velocity) + +solver = DGSEM(polydeg = 3, surface_flux = flux_godunov) + +coordinates_min = (-1.0, -1.0) +coordinates_max = (1.0, 1.0) + +mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 4, + n_cells_max = 30_000) + +############################################################################### +### semidiscretization for sparsity detection ### + +jac_detector = TracerSparsityDetector() +# We need to construct the semidiscretization with the correct +# sparsity-detection ready datatype, which is retrieved here +jac_eltype = jacobian_eltype(real(solver), jac_detector) + +# Semidiscretization for sparsity pattern detection +semi_jac_type = SemidiscretizationHyperbolic(mesh, equation, + initial_condition_convergence_test, solver, + uEltype = jac_eltype) # Need to supply Jacobian element type + +tspan = (0.0, 1.0) # Re-used for wrapping `rhs` below + +# Call `semidiscretize` to create the ODE problem to have access to the +# initial condition based on which the sparsity pattern is computed +ode_jac_type = semidiscretize(semi_jac_type, tspan) +u0_ode = ode_jac_type.u0 +du_ode = similar(u0_ode) + +############################################################################### +### Compute the Jacobian sparsity pattern ### + +# Wrap the `Trixi.rhs!` function to match the signature `f!(du, u)`, see +# https://adrianhill.de/SparseConnectivityTracer.jl/stable/user/api/#ADTypes.jacobian_sparsity +rhs_wrapped! = (du_ode, u0_ode) -> Trixi.rhs!(du_ode, u0_ode, semi_jac_type, tspan[1]) + +jac_prototype = jacobian_sparsity(rhs_wrapped!, du_ode, u0_ode, jac_detector) + +# For most efficient solving we also want the coloring vector + +coloring_prob = ColoringProblem(; structure = :nonsymmetric, partition = :column) +coloring_alg = GreedyColoringAlgorithm(; decompression = :direct) +coloring_result = coloring(jac_prototype, coloring_prob, coloring_alg) +coloring_vec = column_colors(coloring_result) + +############################################################################### +### sparsity-aware semidiscretization and ode ### + +# Semidiscretization for actual simulation. `eEltype` is here retrieved from `solver` +semi_float_type = SemidiscretizationHyperbolic(mesh, equation, + initial_condition_convergence_test, + solver) + +# Supply Jacobian prototype and coloring vector to the semidiscretization +ode_jac_sparse = semidiscretize(semi_float_type, tspan, + jac_prototype = jac_prototype, + colorvec = coloring_vec) +# using "dense" `ode = semidiscretize(semi_float_type, tspan)` is 10-15 times slower! + +############################################################################### +### callbacks & solve ### + +summary_callback = SummaryCallback() +analysis_callback = AnalysisCallback(semi_float_type, interval = 10) +save_restart = SaveRestartCallback(interval = 100, + save_final_restart = true) + +# Note: No `stepsize_callback` due to (implicit) solver with adaptive timestep control +callbacks = CallbackSet(summary_callback, analysis_callback, save_restart) + +############################################################################### +### solve the ODE problem ### + +sol = solve(ode_jac_sparse, + # Default `AutoForwardDiff()` is not yet working, see + # https://github.com/trixi-framework/Trixi.jl/issues/2369 + TRBDF2(; autodiff = AutoFiniteDiff()); + dt = 0.1, save_everystep = false, callback = callbacks); diff --git a/examples/tree_2d_dgsem/elixir_advection_implicit_sparse_jacobian_restart.jl b/examples/tree_2d_dgsem/elixir_advection_implicit_sparse_jacobian_restart.jl new file mode 100644 index 00000000000..645738599fd --- /dev/null +++ b/examples/tree_2d_dgsem/elixir_advection_implicit_sparse_jacobian_restart.jl @@ -0,0 +1,29 @@ +using Trixi + +############################################################################### +# create a restart file + +elixir_file = "elixir_advection_implicit_sparse_jacobian.jl" +restart_file = "restart_000000006.h5" + +trixi_include(@__MODULE__, joinpath(@__DIR__, elixir_file)) + +############################################################################### + +restart_filename = joinpath("out", restart_file) +tspan = (load_time(restart_filename), 2.0) +dt_restart = load_dt(restart_filename) + +ode_jac_sparse = semidiscretize(semi_float_type, tspan, + restart_filename, + jac_prototype = jac_prototype, + colorvec = coloring_vec) + +############################################################################### +# run the simulation + +sol = solve(ode_jac_sparse, + # Default `AutoForwardDiff()` is not yet working, see + # https://github.com/trixi-framework/Trixi.jl/issues/2369 + TRBDF2(; autodiff = AutoFiniteDiff()); + adaptive = true, dt = dt_restart, save_everystep = false, callback = callbacks); diff --git a/examples/tree_2d_dgsem/elixir_euler_astro_jet_amr.jl b/examples/tree_2d_dgsem/elixir_euler_astro_jet_amr.jl index e4ca14334c7..658b39d566b 100644 --- a/examples/tree_2d_dgsem/elixir_euler_astro_jet_amr.jl +++ b/examples/tree_2d_dgsem/elixir_euler_astro_jet_amr.jl @@ -35,7 +35,14 @@ boundary_conditions = (x_neg = BoundaryConditionDirichlet(initial_condition_astr y_neg = boundary_condition_periodic, y_pos = boundary_condition_periodic) -surface_flux = flux_lax_friedrichs # HLLC needs more shock capturing (alpha_max) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) # HLLC needs more shock capturing (alpha_max) volume_flux = flux_ranocha # works with Chandrashekar flux as well polydeg = 3 basis = LobattoLegendreBasis(polydeg) @@ -80,6 +87,11 @@ save_solution = SaveSolutionCallback(interval = 5000, save_final_solution = true, solution_variables = cons2prim) +# positivity limiter necessary for this tough example +positivity_limiter = PositivityPreservingLimiterZhangShu(thresholds = (5.0e-6, 5.0e-6), + variables = (Trixi.density, + pressure)) + amr_indicator = IndicatorHennemannGassner(semi, alpha_max = 1.0, alpha_min = 0.0001, @@ -95,18 +107,15 @@ amr_controller = ControllerThreeLevelCombined(semi, amr_indicator, indicator_sc, amr_callback = AMRCallback(semi, amr_controller, interval = 1, adapt_initial_condition = true, - adapt_initial_condition_only_refine = true) + adapt_initial_condition_only_refine = true, + limiter! = positivity_limiter) callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, amr_callback, save_solution) -# positivity limiter necessary for this tough example -stage_limiter! = PositivityPreservingLimiterZhangShu(thresholds = (5.0e-6, 5.0e-6), - variables = (Trixi.density, pressure)) - ############################################################################### # run the simulation # use adaptive time stepping based on error estimates, time step roughly dt = 1e-7 -sol = solve(ode, SSPRK43(stage_limiter!); +sol = solve(ode, SSPRK43(stage_limiter! = positivity_limiter); ode_default_options()..., callback = callbacks); diff --git a/examples/tree_2d_dgsem/elixir_euler_blast_wave.jl b/examples/tree_2d_dgsem/elixir_euler_blast_wave.jl index 3fa600e2eee..d903e406b98 100644 --- a/examples/tree_2d_dgsem/elixir_euler_blast_wave.jl +++ b/examples/tree_2d_dgsem/elixir_euler_blast_wave.jl @@ -35,7 +35,14 @@ function initial_condition_blast_wave(x, t, equations::CompressibleEulerEquation end initial_condition = initial_condition_blast_wave -surface_flux = flux_lax_friedrichs +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) volume_flux = flux_ranocha basis = LobattoLegendreBasis(3) indicator_sc = IndicatorHennemannGassner(equations, basis, diff --git a/examples/tree_2d_dgsem/elixir_euler_blast_wave_amr.jl b/examples/tree_2d_dgsem/elixir_euler_blast_wave_amr.jl index d1c1a087548..752b1c6e714 100644 --- a/examples/tree_2d_dgsem/elixir_euler_blast_wave_amr.jl +++ b/examples/tree_2d_dgsem/elixir_euler_blast_wave_amr.jl @@ -35,7 +35,14 @@ function initial_condition_blast_wave(x, t, equations::CompressibleEulerEquation end initial_condition = initial_condition_blast_wave -surface_flux = flux_lax_friedrichs +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) volume_flux = flux_ranocha basis = LobattoLegendreBasis(3) indicator_sc = IndicatorHennemannGassner(equations, basis, diff --git a/examples/tree_2d_dgsem/elixir_euler_blast_wave_sc_subcell_nonperiodic.jl b/examples/tree_2d_dgsem/elixir_euler_blast_wave_sc_subcell_nonperiodic.jl index fd1a237877c..f891b74d97c 100644 --- a/examples/tree_2d_dgsem/elixir_euler_blast_wave_sc_subcell_nonperiodic.jl +++ b/examples/tree_2d_dgsem/elixir_euler_blast_wave_sc_subcell_nonperiodic.jl @@ -36,7 +36,14 @@ initial_condition = initial_condition_blast_wave boundary_condition = BoundaryConditionDirichlet(initial_condition) -surface_flux = flux_lax_friedrichs +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) volume_flux = flux_ranocha basis = LobattoLegendreBasis(3) limiter_idp = SubcellLimiterIDP(equations, basis; diff --git a/examples/tree_2d_dgsem/elixir_euler_blob_amr.jl b/examples/tree_2d_dgsem/elixir_euler_blob_amr.jl index 2bb441f48cd..9d08daf4bd8 100644 --- a/examples/tree_2d_dgsem/elixir_euler_blob_amr.jl +++ b/examples/tree_2d_dgsem/elixir_euler_blob_amr.jl @@ -53,7 +53,14 @@ function initial_condition_blob(x, t, equations::CompressibleEulerEquations2D) end initial_condition = initial_condition_blob -surface_flux = flux_lax_friedrichs +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) volume_flux = flux_ranocha basis = LobattoLegendreBasis(4) diff --git a/examples/tree_2d_dgsem/elixir_euler_blob_mortar.jl b/examples/tree_2d_dgsem/elixir_euler_blob_mortar.jl index 4c0f727d0c6..783219f7c83 100644 --- a/examples/tree_2d_dgsem/elixir_euler_blob_mortar.jl +++ b/examples/tree_2d_dgsem/elixir_euler_blob_mortar.jl @@ -53,7 +53,14 @@ function initial_condition_blob(x, t, equations::CompressibleEulerEquations2D) end initial_condition = initial_condition_blob -surface_flux = flux_lax_friedrichs +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) volume_flux = flux_ranocha basis = LobattoLegendreBasis(3) diff --git a/examples/tree_2d_dgsem/elixir_euler_colliding_flow.jl b/examples/tree_2d_dgsem/elixir_euler_colliding_flow.jl index b1837ee62d7..2077df56846 100644 --- a/examples/tree_2d_dgsem/elixir_euler_colliding_flow.jl +++ b/examples/tree_2d_dgsem/elixir_euler_colliding_flow.jl @@ -38,7 +38,14 @@ boundary_conditions = (x_neg = BoundaryConditionDirichlet(initial_condition_coll y_neg = boundary_condition_periodic, y_pos = boundary_condition_periodic) -surface_flux = flux_lax_friedrichs # HLLC needs more shock capturing (alpha_max) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) # HLLC needs more shock capturing (alpha_max) volume_flux = flux_ranocha # works with Chandrashekar flux as well polydeg = 3 basis = LobattoLegendreBasis(polydeg) diff --git a/examples/tree_2d_dgsem/elixir_euler_colliding_flow_amr.jl b/examples/tree_2d_dgsem/elixir_euler_colliding_flow_amr.jl index 2b3c589f544..24421aefaa4 100644 --- a/examples/tree_2d_dgsem/elixir_euler_colliding_flow_amr.jl +++ b/examples/tree_2d_dgsem/elixir_euler_colliding_flow_amr.jl @@ -38,7 +38,14 @@ boundary_conditions = (x_neg = BoundaryConditionDirichlet(initial_condition_coll y_neg = boundary_condition_periodic, y_pos = boundary_condition_periodic) -surface_flux = flux_lax_friedrichs # HLLC needs more shock capturing (alpha_max) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) # HLLC needs more shock capturing (alpha_max) volume_flux = flux_ranocha # works with Chandrashekar flux as well polydeg = 3 basis = LobattoLegendreBasis(polydeg) @@ -82,6 +89,11 @@ save_solution = SaveSolutionCallback(interval = 1000, save_final_solution = true, solution_variables = cons2prim) +# positivity limiter necessary for this tough example +positivity_limiter = PositivityPreservingLimiterZhangShu(thresholds = (5.0e-6, 5.0e-6), + variables = (Trixi.density, + pressure)) + # Simulation also feasible without AMR: AMR reduces CPU time by a factor of about 2 amr_indicator = IndicatorHennemannGassner(semi, alpha_max = 1.0, @@ -98,18 +110,15 @@ amr_controller = ControllerThreeLevelCombined(semi, amr_indicator, indicator_sc, amr_callback = AMRCallback(semi, amr_controller, interval = 1, adapt_initial_condition = true, - adapt_initial_condition_only_refine = true) + adapt_initial_condition_only_refine = true, + limiter! = positivity_limiter) callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, amr_callback, save_solution) -# positivity limiter necessary for this tough example -stage_limiter! = PositivityPreservingLimiterZhangShu(thresholds = (5.0e-6, 5.0e-6), - variables = (Trixi.density, pressure)) - ############################################################################### # run the simulation # use adaptive time stepping based on error estimates, time step roughly dt = 5e-3 -sol = solve(ode, SSPRK43(stage_limiter!); +sol = solve(ode, SSPRK43(stage_limiter! = positivity_limiter); ode_default_options()..., callback = callbacks); diff --git a/examples/tree_2d_dgsem/elixir_euler_colliding_flow_amr_entropy_bounded.jl b/examples/tree_2d_dgsem/elixir_euler_colliding_flow_amr_entropy_bounded.jl index a0c0e2536f7..4eba2b62374 100644 --- a/examples/tree_2d_dgsem/elixir_euler_colliding_flow_amr_entropy_bounded.jl +++ b/examples/tree_2d_dgsem/elixir_euler_colliding_flow_amr_entropy_bounded.jl @@ -38,7 +38,14 @@ boundary_conditions = (x_neg = BoundaryConditionDirichlet(initial_condition_coll y_neg = boundary_condition_periodic, y_pos = boundary_condition_periodic) -surface_flux = flux_lax_friedrichs # HLLC needs more shock capturing (alpha_max) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) # HLLC needs more shock capturing (alpha_max) volume_flux = flux_ranocha # works with Chandrashekar flux as well polydeg = 3 basis = LobattoLegendreBasis(polydeg) diff --git a/examples/tree_2d_dgsem/elixir_euler_density_wave.jl b/examples/tree_2d_dgsem/elixir_euler_density_wave.jl index 3cdebcedee6..4b895ebed4f 100644 --- a/examples/tree_2d_dgsem/elixir_euler_density_wave.jl +++ b/examples/tree_2d_dgsem/elixir_euler_density_wave.jl @@ -3,8 +3,7 @@ using Trixi ############################################################################### # semidiscretization of the compressible Euler equations -gamma = 1.4 -equations = CompressibleEulerEquations2D(gamma) +equations = CompressibleEulerEquations2D(1.4) initial_condition = initial_condition_density_wave diff --git a/examples/tree_2d_dgsem/elixir_euler_kelvin_helmholtz_instability.jl b/examples/tree_2d_dgsem/elixir_euler_kelvin_helmholtz_instability.jl index 3c8b0b08f47..28ee9ca2b6e 100644 --- a/examples/tree_2d_dgsem/elixir_euler_kelvin_helmholtz_instability.jl +++ b/examples/tree_2d_dgsem/elixir_euler_kelvin_helmholtz_instability.jl @@ -31,7 +31,14 @@ function initial_condition_kelvin_helmholtz_instability(x, t, end initial_condition = initial_condition_kelvin_helmholtz_instability -surface_flux = flux_lax_friedrichs +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) volume_flux = flux_ranocha polydeg = 3 basis = LobattoLegendreBasis(polydeg) diff --git a/examples/tree_2d_dgsem/elixir_euler_kelvin_helmholtz_instability_amr.jl b/examples/tree_2d_dgsem/elixir_euler_kelvin_helmholtz_instability_amr.jl index ab923b57a39..5a287b33e4b 100644 --- a/examples/tree_2d_dgsem/elixir_euler_kelvin_helmholtz_instability_amr.jl +++ b/examples/tree_2d_dgsem/elixir_euler_kelvin_helmholtz_instability_amr.jl @@ -31,7 +31,14 @@ function initial_condition_kelvin_helmholtz_instability(x, t, end initial_condition = initial_condition_kelvin_helmholtz_instability -surface_flux = flux_lax_friedrichs +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) volume_flux = flux_ranocha polydeg = 3 basis = LobattoLegendreBasis(polydeg) diff --git a/examples/tree_2d_dgsem/elixir_euler_kelvin_helmholtz_instability_sc_subcell.jl b/examples/tree_2d_dgsem/elixir_euler_kelvin_helmholtz_instability_sc_subcell.jl index 192d4ee87ab..5d47dec8f96 100644 --- a/examples/tree_2d_dgsem/elixir_euler_kelvin_helmholtz_instability_sc_subcell.jl +++ b/examples/tree_2d_dgsem/elixir_euler_kelvin_helmholtz_instability_sc_subcell.jl @@ -30,7 +30,14 @@ function initial_condition_kelvin_helmholtz_instability(x, t, end initial_condition = initial_condition_kelvin_helmholtz_instability -surface_flux = flux_lax_friedrichs +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) volume_flux = flux_ranocha polydeg = 3 basis = LobattoLegendreBasis(polydeg) diff --git a/examples/tree_2d_dgsem/elixir_euler_positivity.jl b/examples/tree_2d_dgsem/elixir_euler_positivity.jl index 4c540953a5d..4e6f6eb2b19 100644 --- a/examples/tree_2d_dgsem/elixir_euler_positivity.jl +++ b/examples/tree_2d_dgsem/elixir_euler_positivity.jl @@ -38,7 +38,14 @@ function initial_condition_sedov_blast_wave(x, t, equations::CompressibleEulerEq end initial_condition = initial_condition_sedov_blast_wave -surface_flux = flux_lax_friedrichs +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) volume_flux = flux_ranocha basis = LobattoLegendreBasis(3) indicator_sc = IndicatorHennemannGassner(equations, basis, @@ -77,16 +84,23 @@ save_solution = SaveSolutionCallback(interval = 100, save_final_solution = true, solution_variables = cons2prim) +positivity_limiter = PositivityPreservingLimiterZhangShu(thresholds = (5.0e-6, 5.0e-6), + variables = (Trixi.density, + pressure)) + amr_indicator = IndicatorLöhner(semi, variable = density_pressure) + amr_controller = ControllerThreeLevel(semi, amr_indicator, base_level = 4, med_level = 0, med_threshold = 0.1, # med_level = current level max_level = 6, max_threshold = 0.3) + amr_callback = AMRCallback(semi, amr_controller, interval = 2, adapt_initial_condition = true, - adapt_initial_condition_only_refine = true) + adapt_initial_condition_only_refine = true, + limiter! = positivity_limiter) stepsize_callback = StepsizeCallback(cfl = 0.8) @@ -95,12 +109,11 @@ callbacks = CallbackSet(summary_callback, save_solution, amr_callback, stepsize_callback) -stage_limiter! = PositivityPreservingLimiterZhangShu(thresholds = (5.0e-6, 5.0e-6), - variables = (Trixi.density, pressure)) - ############################################################################### # run the simulation -sol = solve(ode, CarpenterKennedy2N54(stage_limiter!, williamson_condition = false); +sol = solve(ode, + CarpenterKennedy2N54(stage_limiter! = positivity_limiter, + williamson_condition = false); dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback ode_default_options()..., callback = callbacks); diff --git a/examples/tree_2d_dgsem/elixir_euler_riemannproblem_quadrants_amr.jl b/examples/tree_2d_dgsem/elixir_euler_riemannproblem_quadrants_amr.jl new file mode 100644 index 00000000000..3c898ae9d50 --- /dev/null +++ b/examples/tree_2d_dgsem/elixir_euler_riemannproblem_quadrants_amr.jl @@ -0,0 +1,169 @@ +using OrdinaryDiffEqSSPRK +using Trixi + +############################################################################### +## Semidiscretization of the compressible Euler equations + +equations = CompressibleEulerEquations2D(1.4) + +# Variant of the 4-quadrant Riemann problem considered in +# - Carsten W. Schulz-Rinne: +# Classification of the Riemann Problem for Two-Dimensional Gas Dynamics +# https://doi.org/10.1137/0524006 +# and +# - Carsten W. Schulz-Rinne, James P. Collins, and Harland M. Glaz +# Numerical Solution of the Riemann Problem for Two-Dimensional Gas Dynamics +# https://doi.org/10.1137/0914082 +function initial_condition_rp(x_, t, equations::CompressibleEulerEquations2D) + x, y = x_[1], x_[2] + + if x >= 0.5 && y >= 0.5 # 1st quadrant + rho, v1, v2, p = (0.5313, 0.0, 0.0, 0.4) + elseif x < 0.5 && y >= 0.5 # 2nd quadrant + rho, v1, v2, p = (1.0, 0.7276, 0.0, 1.0) + elseif x < 0.5 && y < 0.5 # 3rd quadrant + rho, v1, v2, p = (0.8, 0.0, 0.0, 1.0) + elseif x >= 0.5 && y < 0.5 # 4th quadrant + rho, v1, v2, p = (1.0, 0.0, 0.7276, 1.0) + end + + prim = SVector(rho, v1, v2, p) + return prim2cons(prim, equations) +end +initial_condition = initial_condition_rp + +# See Section 2.3 of the reference below for a discussion of robust +# subsonic inflow/outflow boundary conditions. +# +# - Jan-Reneé Carlson (2011) +# Inflow/Outflow Boundary Conditions with Application to FUN3D. +# [NASA TM 20110022658](https://ntrs.nasa.gov/citations/20110022658) +@inline function boundary_condition_subsonic(u_inner, orientation::Integer, + direction, x, t, + surface_flux_function, + equations::CompressibleEulerEquations2D) + rho_loc, v1_loc, v2_loc, p_loc = cons2prim(u_inner, equations) + + # For subsonic boundary: Take pressure from initial condition + p_loc = pressure(initial_condition_rp(x, t, equations), equations) + + prim = SVector(rho_loc, v1_loc, v2_loc, p_loc) + u_surface = prim2cons(prim, equations) + + return flux(u_surface, orientation, equations) +end + +# The flow is subsonic at all boundaries. +# For small enough simulation times, the solution remains at the initial condition +# *along the boundaries* of quadrants 2, 3, and 4. +# In quadrants 2 and 4 there are non-zero velocity components (v1 in quadrant 2, v2 in quadrant 4) +# normal to the boundary, which is troublesome for the `boundary_condition_do_nothing`. +# Thus, the `boundary_condition_subsonic` are used instead. +boundary_conditions = (x_neg = boundary_condition_subsonic, + x_pos = boundary_condition_do_nothing, + y_neg = boundary_condition_subsonic, + y_pos = boundary_condition_do_nothing) + +coordinates_min = (0.0, 0.0) +coordinates_max = (1.0, 1.0) + +mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 4, + n_cells_max = 100_000, + periodicity = false) + +# HLLC flux is strictly required for this problem +surface_flux = flux_hllc +volume_flux = flux_ranocha + +polydeg = 3 +basis = LobattoLegendreBasis(polydeg) +shock_indicator = IndicatorHennemannGassner(equations, basis, + alpha_max = 0.5, + alpha_min = 0.001, + alpha_smooth = true, + variable = Trixi.density) + +volume_integral = VolumeIntegralShockCapturingHG(shock_indicator; + volume_flux_dg = volume_flux, + volume_flux_fv = surface_flux) + +solver = DGSEM(polydeg = polydeg, surface_flux = surface_flux, + volume_integral = volume_integral) + +# Specialized function for computing coefficients of `func`, +# here the discontinuous `initial_condition_rp`. +# +# Shift the outer (i.e., ±1 on the reference element) nodes passed to the +# `func` inwards by the smallest amount possible, i.e., [-1 + ϵ, +1 - ϵ]. +# This avoids steep gradients in elements if a discontinuity is right at a cell boundary, +# i.e., if the jump location `x_jump` is at the position of an interface which is shared by +# the nodes x_{e-1}^{(i)} = x_{e}^{(1)}. +# +# In particular, this results in the typically desired behaviour for +# initial conditions of the form +# { u_1, if x <= x_jump +# u(x, t) = { +# { u_2, if x > x_jump +function Trixi.compute_coefficients!(backend::Nothing, u, + func::typeof(initial_condition_rp), t, + mesh::TreeMesh{2}, equations, dg::DG, cache) + Trixi.@threaded for element in eachelement(dg, cache) + for j in eachnode(dg), i in eachnode(dg) + x_node = Trixi.get_node_coords(cache.elements.node_coordinates, equations, dg, + i, j, element) + if i == 1 # left boundary node + x_node = SVector(nextfloat(x_node[1]), x_node[2]) + elseif i == nnodes(dg) # right boundary node + x_node = SVector(prevfloat(x_node[1]), x_node[2]) + end + if j == 1 # bottom boundary node + x_node = SVector(x_node[1], nextfloat(x_node[2])) + elseif j == nnodes(dg) # top boundary node + x_node = SVector(x_node[1], prevfloat(x_node[2])) + end + + u_node = func(x_node, t, equations) + Trixi.set_node_vars!(u, u_node, equations, dg, i, j, element) + end + end +end + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver; + boundary_conditions = boundary_conditions) + +############################################################################### +## ODE solvers, callbacks etc. + +tspan = (0.0, 0.25) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 1000 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +stepsize_callback = StepsizeCallback(cfl = 2.4) + +amr_indicator = IndicatorLöhner(semi, variable = Trixi.density) +amr_controller = ControllerThreeLevel(semi, amr_indicator, + base_level = 3, + med_level = 5, med_threshold = 0.01, + max_level = 8, max_threshold = 0.02) +amr_callback = AMRCallback(semi, amr_controller, + interval = 5, + adapt_initial_condition = true) + +callbacks = CallbackSet(summary_callback, + analysis_callback, alive_callback, + amr_callback, + stepsize_callback) + +############################################################################### +## Run the simulation + +sol = solve(ode, SSPRK54(); + dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep = false, callback = callbacks); diff --git a/examples/tree_2d_dgsem/elixir_euler_sedov_blast_wave.jl b/examples/tree_2d_dgsem/elixir_euler_sedov_blast_wave.jl index 8fbdf9ffd6d..6a87ae7aeab 100644 --- a/examples/tree_2d_dgsem/elixir_euler_sedov_blast_wave.jl +++ b/examples/tree_2d_dgsem/elixir_euler_sedov_blast_wave.jl @@ -38,7 +38,14 @@ function initial_condition_sedov_blast_wave(x, t, equations::CompressibleEulerEq end initial_condition = initial_condition_sedov_blast_wave -surface_flux = flux_lax_friedrichs +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) volume_flux = flux_chandrashekar basis = LobattoLegendreBasis(3) indicator_sc = IndicatorHennemannGassner(equations, basis, diff --git a/examples/tree_2d_dgsem/elixir_euler_sedov_blast_wave_sc_subcell.jl b/examples/tree_2d_dgsem/elixir_euler_sedov_blast_wave_sc_subcell.jl index 6517567fef6..d63e3cd06ea 100644 --- a/examples/tree_2d_dgsem/elixir_euler_sedov_blast_wave_sc_subcell.jl +++ b/examples/tree_2d_dgsem/elixir_euler_sedov_blast_wave_sc_subcell.jl @@ -37,7 +37,14 @@ function initial_condition_sedov_blast_wave(x, t, equations::CompressibleEulerEq end initial_condition = initial_condition_sedov_blast_wave -surface_flux = flux_lax_friedrichs +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) volume_flux = flux_chandrashekar basis = LobattoLegendreBasis(3) limiter_idp = SubcellLimiterIDP(equations, basis; diff --git a/examples/tree_2d_dgsem/elixir_euler_shockcapturing.jl b/examples/tree_2d_dgsem/elixir_euler_shockcapturing.jl index 805aeed576d..ed12ca28753 100644 --- a/examples/tree_2d_dgsem/elixir_euler_shockcapturing.jl +++ b/examples/tree_2d_dgsem/elixir_euler_shockcapturing.jl @@ -8,7 +8,14 @@ equations = CompressibleEulerEquations2D(1.4) initial_condition = initial_condition_weak_blast_wave -surface_flux = flux_lax_friedrichs +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) volume_flux = flux_shima_etal basis = LobattoLegendreBasis(3) indicator_sc = IndicatorHennemannGassner(equations, basis, diff --git a/examples/tree_2d_dgsem/elixir_euler_shockcapturing_subcell.jl b/examples/tree_2d_dgsem/elixir_euler_shockcapturing_subcell.jl index 018ce9673de..181b51a1b7a 100644 --- a/examples/tree_2d_dgsem/elixir_euler_shockcapturing_subcell.jl +++ b/examples/tree_2d_dgsem/elixir_euler_shockcapturing_subcell.jl @@ -34,7 +34,14 @@ function initial_condition_blast_wave(x, t, equations::CompressibleEulerEquation end initial_condition = initial_condition_blast_wave -surface_flux = flux_lax_friedrichs +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) volume_flux = flux_ranocha basis = LobattoLegendreBasis(3) limiter_idp = SubcellLimiterIDP(equations, basis; diff --git a/examples/tree_2d_dgsem/elixir_euler_source_terms.jl b/examples/tree_2d_dgsem/elixir_euler_source_terms.jl index 93755f3f840..6f7f030fc47 100644 --- a/examples/tree_2d_dgsem/elixir_euler_source_terms.jl +++ b/examples/tree_2d_dgsem/elixir_euler_source_terms.jl @@ -7,7 +7,14 @@ using Trixi equations = CompressibleEulerEquations2D(1.4) initial_condition = initial_condition_convergence_test -solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 3, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive)) coordinates_min = (0.0, 0.0) coordinates_max = (2.0, 2.0) diff --git a/examples/tree_2d_dgsem/elixir_euler_source_terms_amr_refine_coarsen.jl b/examples/tree_2d_dgsem/elixir_euler_source_terms_amr_refine_coarsen.jl index 8bf2a697d94..06fa6f0a6bd 100644 --- a/examples/tree_2d_dgsem/elixir_euler_source_terms_amr_refine_coarsen.jl +++ b/examples/tree_2d_dgsem/elixir_euler_source_terms_amr_refine_coarsen.jl @@ -52,7 +52,14 @@ import .TrixiExtensionEulerAMR equations = CompressibleEulerEquations2D(1.4) initial_condition = initial_condition_convergence_test -solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 3, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive)) coordinates_min = (0.0, 0.0) coordinates_max = (2.0, 2.0) diff --git a/examples/tree_2d_dgsem/elixir_euler_source_terms_nonperiodic.jl b/examples/tree_2d_dgsem/elixir_euler_source_terms_nonperiodic.jl index 3a99eb762f4..9f0bcbda2c2 100644 --- a/examples/tree_2d_dgsem/elixir_euler_source_terms_nonperiodic.jl +++ b/examples/tree_2d_dgsem/elixir_euler_source_terms_nonperiodic.jl @@ -16,7 +16,14 @@ boundary_conditions = (x_neg = boundary_condition, y_neg = boundary_condition, y_pos = boundary_condition) -solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 3, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive)) coordinates_min = (0.0, 0.0) coordinates_max = (2.0, 2.0) diff --git a/examples/tree_2d_dgsem/elixir_euler_subsonic_constant.jl b/examples/tree_2d_dgsem/elixir_euler_subsonic_constant.jl new file mode 100644 index 00000000000..f40ce8a9c1e --- /dev/null +++ b/examples/tree_2d_dgsem/elixir_euler_subsonic_constant.jl @@ -0,0 +1,104 @@ +using OrdinaryDiffEqSSPRK +using Trixi + +############################################################################### +## Semidiscretization of the compressible Euler equations + +equations = CompressibleEulerEquations2D(1.4) + +@inline function initial_condition_subsonic(x_, t, equations::CompressibleEulerEquations2D) + rho, v1, v2, p = (0.5313, 0.0, 0.0, 0.4) + + prim = SVector(rho, v1, v2, p) + return prim2cons(prim, equations) +end + +initial_condition = initial_condition_subsonic + +# Calculate the boundary flux from the inner state while using the pressure from the outer state +# when the flow is subsonic (which is always the case in this example). + +# If the naive approach of only using the inner state is used, the errors increase with the +# increase of refinement level, see https://github.com/trixi-framework/Trixi.jl/issues/2530 +# These errors arise from the corner points in this test. + +# See the reference below for a discussion on inflow/outflow boundary conditions. The subsonic +# outflow boundary conditions are discussed in Section 2.3. +# +# - Jan-Reneé Carlson (2011) +# Inflow/Outflow Boundary Conditions with Application to FUN3D. +# [NASA TM 20110022658](https://ntrs.nasa.gov/citations/20110022658) +@inline function boundary_condition_outflow_general(u_inner, orientation::Integer, + direction, x, t, + surface_flux_function, + equations::CompressibleEulerEquations2D) + rho_local, vx_local, vy_local, p_local = cons2prim(u_inner, equations) + a_local = sqrt(equations.gamma * p_local / rho_local) + v_mag = sqrt(vx_local^2 + vy_local^2) + Mach_local = abs(v_mag / a_local) + if Mach_local <= 1 # The `if` is not needed in this elixir but kept for generality + # In general, `p_local` need not be available from the initial condition + p_local = pressure(initial_condition_subsonic(x, t, equations), equations) + end + + prim = SVector(rho_local, vx_local, vy_local, p_local) + u_surface = prim2cons(prim, equations) + + return flux(u_surface, orientation, equations) +end + +boundary_conditions = (x_neg = boundary_condition_outflow_general, + x_pos = boundary_condition_outflow_general, + y_neg = boundary_condition_outflow_general, + y_pos = boundary_condition_outflow_general) + +coordinates_min = (0.0, 0.0) +coordinates_max = (1.0, 1.0) + +mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 6, + periodicity = false, n_cells_max = 512^2 * 16) + +surface_flux = flux_lax_friedrichs + +polydeg = 3 +basis = LobattoLegendreBasis(polydeg) + +volume_integral = VolumeIntegralWeakForm() + +solver = DGSEM(polydeg = polydeg, surface_flux = surface_flux, + volume_integral = volume_integral) + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + boundary_conditions = boundary_conditions) + +############################################################################### +## ODE solvers, callbacks etc. + +tspan = (0.0, 0.25) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 1000 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) + +alive_callback = AliveCallback(analysis_interval = 100) + +save_solution = SaveSolutionCallback(interval = 100, + save_initial_solution = true, + save_final_solution = true, + solution_variables = cons2prim) + +stepsize_callback = StepsizeCallback(cfl = 0.5) + +callbacks = CallbackSet(summary_callback, + analysis_callback, alive_callback, + save_solution, + stepsize_callback) + +############################################################################### +## Run the simulation +sol = solve(ode, SSPRK54(); + dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep = false, callback = callbacks); diff --git a/examples/tree_2d_dgsem/elixir_euler_vortex.jl b/examples/tree_2d_dgsem/elixir_euler_vortex.jl index 2ef5daf2a6c..9ce4bf71832 100644 --- a/examples/tree_2d_dgsem/elixir_euler_vortex.jl +++ b/examples/tree_2d_dgsem/elixir_euler_vortex.jl @@ -49,7 +49,14 @@ function initial_condition_isentropic_vortex(x, t, equations::CompressibleEulerE return prim2cons(prim, equations) end initial_condition = initial_condition_isentropic_vortex -solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 3, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive)) coordinates_min = (-10.0, -10.0) coordinates_max = (10.0, 10.0) diff --git a/examples/tree_2d_dgsem/elixir_euler_vortex_amr.jl b/examples/tree_2d_dgsem/elixir_euler_vortex_amr.jl index 72808a9331e..99565e981bf 100644 --- a/examples/tree_2d_dgsem/elixir_euler_vortex_amr.jl +++ b/examples/tree_2d_dgsem/elixir_euler_vortex_amr.jl @@ -114,7 +114,14 @@ function initial_condition_isentropic_vortex(x, t, equations::CompressibleEulerE return prim2cons(prim, equations) end initial_condition = initial_condition_isentropic_vortex -solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 3, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive)) coordinates_min = (-10.0, -10.0) coordinates_max = (10.0, 10.0) diff --git a/examples/tree_2d_dgsem/elixir_euler_vortex_er.jl b/examples/tree_2d_dgsem/elixir_euler_vortex_er.jl new file mode 100644 index 00000000000..0eff63e43c2 --- /dev/null +++ b/examples/tree_2d_dgsem/elixir_euler_vortex_er.jl @@ -0,0 +1,105 @@ +using Trixi + +############################################################################### +# semidiscretization of the compressible Euler equations + +# Ratio of specific heats +gamma = 1.4 +equations = CompressibleEulerEquations2D(gamma) + +EdgeLength = 20.0 + +N_passes = 1 +T_end = EdgeLength * N_passes +tspan = (0.0, T_end) + +""" + initial_condition_isentropic_vortex(x, t, equations::CompressibleEulerEquations2D) + +The classical isentropic vortex test case as presented in +https://spectrum.library.concordia.ca/id/eprint/985444/1/Paired-explicit-Runge-Kutta-schemes-for-stiff-sy_2019_Journal-of-Computation.pdf +""" +function initial_condition_isentropic_vortex(x, t, equations::CompressibleEulerEquations2D) + # Evaluate error after full domain traversion + if t == T_end + t = 0 + end + + # initial center of the vortex + inicenter = SVector(0.0, 0.0) + # strength of the vortex + S = 13.5 + # Radius of vortex + R = 1.5 + # Free-stream Mach + M = 0.4 + # base flow + v1 = 1.0 + v2 = 1.0 + vel = SVector(v1, v2) + + cent = inicenter + vel * t # advection of center + cent = x - cent # distance to centerpoint + cent = SVector(cent[2], -cent[1]) + r2 = cent[1]^2 + cent[2]^2 + + f = (1 - r2) / (2 * R^2) + + rho = (1 - (S * M / pi)^2 * (gamma - 1) * exp(2 * f) / 8)^(1 / (gamma - 1)) + + du = S / (2 * π * R) * exp(f) # vel. perturbation + vel = vel + du * cent + v1, v2 = vel + + p = rho^gamma / (gamma * M^2) + prim = SVector(rho, v1, v2, p) + return prim2cons(prim, equations) +end +initial_condition = initial_condition_isentropic_vortex + +# Volume flux stabilizes the simulation - in contrast to standard DGSEM with +# `surface_flux = flux_ranocha` only (which crashes). +# To turn this into a convergence test, use a flux with some dissipation, e.g. +# `flux_lax_friedrichs` or `flux_hll`. +solver = DGSEM(polydeg = 2, surface_flux = flux_ranocha, + volume_integral = VolumeIntegralFluxDifferencing(flux_ranocha)) + +coordinates_min = (-EdgeLength / 2, -EdgeLength / 2) +coordinates_max = (EdgeLength / 2, EdgeLength / 2) +mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 4, + n_cells_max = 100_000) + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) + +############################################################################### +# ODE solvers, callbacks etc. + +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_callback = AnalysisCallback(semi, interval = 10, + analysis_errors = Symbol[], # Switch off error computation to save some time + analysis_integrals = (entropy,), + save_analysis = true) + +stepsize_callback = StepsizeCallback(cfl = 1.0) + +callbacks = CallbackSet(summary_callback, + analysis_callback, + stepsize_callback) + +############################################################################### +# run the simulation + +# Ensure exact entropy conservation by employing a relaxation Runge-Kutta method +# Bisection solver is in general not recommended, as way more iterations than for the Newton case are needed +relaxation_solver = Trixi.RelaxationSolverBisection(max_iterations = 30, + gamma_min = 0.95, gamma_max = 1.05, + root_tol = eps(Float64), + gamma_tol = eps(Float64)) +ode_alg = Trixi.RelaxationCKL43(relaxation_solver = relaxation_solver) + +sol = Trixi.solve(ode, ode_alg; + dt = 42.0, save_everystep = false, callback = callbacks); diff --git a/examples/tree_2d_dgsem/elixir_euler_vortex_mortar.jl b/examples/tree_2d_dgsem/elixir_euler_vortex_mortar.jl index 216eeb1ff0c..36f2050962c 100644 --- a/examples/tree_2d_dgsem/elixir_euler_vortex_mortar.jl +++ b/examples/tree_2d_dgsem/elixir_euler_vortex_mortar.jl @@ -49,7 +49,14 @@ function initial_condition_isentropic_vortex(x, t, equations::CompressibleEulerE return prim2cons(prim, equations) end initial_condition = initial_condition_isentropic_vortex -solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 3, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive)) coordinates_min = (-10.0, -10.0) coordinates_max = (10.0, 10.0) diff --git a/examples/tree_2d_dgsem/elixir_euler_vortex_mortar_shockcapturing.jl b/examples/tree_2d_dgsem/elixir_euler_vortex_mortar_shockcapturing.jl index ec8ebfb769c..f32ce4d663b 100644 --- a/examples/tree_2d_dgsem/elixir_euler_vortex_mortar_shockcapturing.jl +++ b/examples/tree_2d_dgsem/elixir_euler_vortex_mortar_shockcapturing.jl @@ -50,7 +50,14 @@ function initial_condition_isentropic_vortex(x, t, equations::CompressibleEulerE end initial_condition = initial_condition_isentropic_vortex -surface_flux = flux_lax_friedrichs +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) volume_flux = flux_shima_etal polydeg = 3 diff --git a/examples/tree_2d_dgsem/elixir_euler_vortex_mortar_split.jl b/examples/tree_2d_dgsem/elixir_euler_vortex_mortar_split.jl index df66fec228e..7e9ed2ac9e4 100644 --- a/examples/tree_2d_dgsem/elixir_euler_vortex_mortar_split.jl +++ b/examples/tree_2d_dgsem/elixir_euler_vortex_mortar_split.jl @@ -51,7 +51,14 @@ end initial_condition = initial_condition_isentropic_vortex volume_flux = flux_shima_etal -solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs, +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 3, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive), volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) coordinates_min = (-10.0, -10.0) diff --git a/examples/tree_2d_dgsem/elixir_euler_vortex_shockcapturing.jl b/examples/tree_2d_dgsem/elixir_euler_vortex_shockcapturing.jl index 7bd6c5a1465..fb83f56810b 100644 --- a/examples/tree_2d_dgsem/elixir_euler_vortex_shockcapturing.jl +++ b/examples/tree_2d_dgsem/elixir_euler_vortex_shockcapturing.jl @@ -50,7 +50,14 @@ function initial_condition_isentropic_vortex(x, t, equations::CompressibleEulerE end initial_condition = initial_condition_isentropic_vortex -surface_flux = flux_lax_friedrichs +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) volume_flux = flux_shima_etal polydeg = 3 diff --git a/examples/tree_2d_dgsem/elixir_euleracoustics_co-rotating_vortex_pair.jl b/examples/tree_2d_dgsem/elixir_euleracoustics_co-rotating_vortex_pair.jl index 0755fa5d70f..446506f85d5 100644 --- a/examples/tree_2d_dgsem/elixir_euleracoustics_co-rotating_vortex_pair.jl +++ b/examples/tree_2d_dgsem/elixir_euleracoustics_co-rotating_vortex_pair.jl @@ -235,7 +235,14 @@ mesh = TreeMesh(coordinates_min, coordinates_max, periodicity = false) # Create DG solver with polynomial degree = 3 and (local) Lax-Friedrichs/Rusanov flux as surface flux -solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 3, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive)) ############################################################################### # semidiscretization Euler equations diff --git a/examples/tree_2d_dgsem/elixir_eulermulti_convergence_es.jl b/examples/tree_2d_dgsem/elixir_eulermulti_convergence_es.jl index 029cea3bbc9..be76bc709a0 100644 --- a/examples/tree_2d_dgsem/elixir_eulermulti_convergence_es.jl +++ b/examples/tree_2d_dgsem/elixir_eulermulti_convergence_es.jl @@ -9,7 +9,14 @@ equations = CompressibleEulerMulticomponentEquations2D(gammas = (1.4, 1.4), initial_condition = initial_condition_convergence_test volume_flux = flux_ranocha -solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs, +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 3, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive), volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) coordinates_min = (-1.0, -1.0) diff --git a/examples/tree_2d_dgsem/elixir_eulermulti_es.jl b/examples/tree_2d_dgsem/elixir_eulermulti_es.jl index 74f7e93425c..8cc0048515d 100644 --- a/examples/tree_2d_dgsem/elixir_eulermulti_es.jl +++ b/examples/tree_2d_dgsem/elixir_eulermulti_es.jl @@ -9,7 +9,14 @@ equations = CompressibleEulerMulticomponentEquations2D(gammas = (1.4, 1.4, 1.4, initial_condition = initial_condition_weak_blast_wave volume_flux = flux_ranocha -solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs, +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 3, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive), volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) coordinates_min = (-2.0, -2.0) diff --git a/examples/tree_2d_dgsem/elixir_eulermulti_shock_bubble.jl b/examples/tree_2d_dgsem/elixir_eulermulti_shock_bubble.jl index 43674a6b2c6..d884f6bfe72 100644 --- a/examples/tree_2d_dgsem/elixir_eulermulti_shock_bubble.jl +++ b/examples/tree_2d_dgsem/elixir_eulermulti_shock_bubble.jl @@ -82,7 +82,14 @@ function initial_condition_shock_bubble(x, t, end initial_condition = initial_condition_shock_bubble -surface_flux = flux_lax_friedrichs +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) volume_flux = flux_ranocha basis = LobattoLegendreBasis(3) indicator_sc = IndicatorHennemannGassner(equations, basis, diff --git a/examples/tree_2d_dgsem/elixir_eulermulti_shock_bubble_shockcapturing_subcell_minmax.jl b/examples/tree_2d_dgsem/elixir_eulermulti_shock_bubble_shockcapturing_subcell_minmax.jl index 9b2541bd34d..094a8db57e2 100644 --- a/examples/tree_2d_dgsem/elixir_eulermulti_shock_bubble_shockcapturing_subcell_minmax.jl +++ b/examples/tree_2d_dgsem/elixir_eulermulti_shock_bubble_shockcapturing_subcell_minmax.jl @@ -81,7 +81,14 @@ function initial_condition_shock_bubble(x, t, end initial_condition = initial_condition_shock_bubble -surface_flux = flux_lax_friedrichs +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) volume_flux = flux_ranocha basis = LobattoLegendreBasis(3) diff --git a/examples/tree_2d_dgsem/elixir_eulermulti_shock_bubble_shockcapturing_subcell_positivity.jl b/examples/tree_2d_dgsem/elixir_eulermulti_shock_bubble_shockcapturing_subcell_positivity.jl index 22f642a2e82..e1ee83f574a 100644 --- a/examples/tree_2d_dgsem/elixir_eulermulti_shock_bubble_shockcapturing_subcell_positivity.jl +++ b/examples/tree_2d_dgsem/elixir_eulermulti_shock_bubble_shockcapturing_subcell_positivity.jl @@ -81,7 +81,14 @@ function initial_condition_shock_bubble(x, t, end initial_condition = initial_condition_shock_bubble -surface_flux = flux_lax_friedrichs +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) volume_flux = flux_ranocha basis = LobattoLegendreBasis(3) diff --git a/examples/tree_2d_dgsem/elixir_mhd_alfven_wave.jl b/examples/tree_2d_dgsem/elixir_mhd_alfven_wave.jl index 0cbd9026fdd..0d7bf6198d3 100644 --- a/examples/tree_2d_dgsem/elixir_mhd_alfven_wave.jl +++ b/examples/tree_2d_dgsem/elixir_mhd_alfven_wave.jl @@ -3,14 +3,20 @@ using Trixi ############################################################################### # semidiscretization of the compressible ideal GLM-MHD equations -gamma = 5 / 3 -equations = IdealGlmMhdEquations2D(gamma) +equations = IdealGlmMhdEquations2D(5 / 3) initial_condition = initial_condition_convergence_test +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = (FluxLaxFriedrichs(max_abs_speed_naive), flux_nonconservative_powell) volume_flux = (flux_central, flux_nonconservative_powell) -solver = DGSEM(polydeg = 3, - surface_flux = (flux_lax_friedrichs, flux_nonconservative_powell), +solver = DGSEM(polydeg = 3, surface_flux = surface_flux, volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) coordinates_min = (0.0, 0.0) diff --git a/examples/tree_2d_dgsem/elixir_mhd_alfven_wave_dirichlet.jl b/examples/tree_2d_dgsem/elixir_mhd_alfven_wave_dirichlet.jl index 1f8173037b9..3308622c8a8 100644 --- a/examples/tree_2d_dgsem/elixir_mhd_alfven_wave_dirichlet.jl +++ b/examples/tree_2d_dgsem/elixir_mhd_alfven_wave_dirichlet.jl @@ -8,10 +8,18 @@ equations = IdealGlmMhdEquations2D(gamma) initial_condition = initial_condition_convergence_test +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = (FluxLaxFriedrichs(max_abs_speed_naive), + flux_nonconservative_powell_local_symmetric) volume_flux = (flux_central, flux_nonconservative_powell_local_symmetric) solver = DGSEM(polydeg = 3, - surface_flux = (flux_lax_friedrichs, - flux_nonconservative_powell_local_symmetric), + surface_flux = surface_flux, volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) coordinates_min = (0.0, 0.0) diff --git a/examples/tree_2d_dgsem/elixir_mhd_blast_wave.jl b/examples/tree_2d_dgsem/elixir_mhd_blast_wave.jl index 969bd723d33..3f65907d1de 100644 --- a/examples/tree_2d_dgsem/elixir_mhd_blast_wave.jl +++ b/examples/tree_2d_dgsem/elixir_mhd_blast_wave.jl @@ -39,7 +39,14 @@ function initial_condition_blast_wave(x, t, equations::IdealGlmMhdEquations2D) end initial_condition = initial_condition_blast_wave -surface_flux = (flux_lax_friedrichs, flux_nonconservative_powell) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = (FluxLaxFriedrichs(max_abs_speed_naive), flux_nonconservative_powell) volume_flux = (flux_central, flux_nonconservative_powell) basis = LobattoLegendreBasis(3) indicator_sc = IndicatorHennemannGassner(equations, basis, diff --git a/examples/tree_2d_dgsem/elixir_mhd_onion.jl b/examples/tree_2d_dgsem/elixir_mhd_onion.jl index c7f1d4820a4..bcf1db82363 100644 --- a/examples/tree_2d_dgsem/elixir_mhd_onion.jl +++ b/examples/tree_2d_dgsem/elixir_mhd_onion.jl @@ -32,9 +32,16 @@ mesh = TreeMesh(coordinates_min, coordinates_max, n_cells_max = 10_000, periodicity = false) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = (FluxLaxFriedrichs(max_abs_speed_naive), flux_nonconservative_powell) volume_flux = (flux_hindenlang_gassner, flux_nonconservative_powell) -solver = DGSEM(polydeg = 3, - surface_flux = (flux_lax_friedrichs, flux_nonconservative_powell), +solver = DGSEM(polydeg = 3, surface_flux = surface_flux, volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) boundary_conditions = (x_neg = BoundaryConditionDirichlet(initial_condition), diff --git a/examples/tree_2d_dgsem/elixir_mhd_orszag_tang.jl b/examples/tree_2d_dgsem/elixir_mhd_orszag_tang.jl index 1e2be122fe3..dd61b2ab0c2 100644 --- a/examples/tree_2d_dgsem/elixir_mhd_orszag_tang.jl +++ b/examples/tree_2d_dgsem/elixir_mhd_orszag_tang.jl @@ -30,7 +30,14 @@ function initial_condition_orszag_tang(x, t, equations::IdealGlmMhdEquations2D) end initial_condition = initial_condition_orszag_tang -surface_flux = (flux_lax_friedrichs, flux_nonconservative_powell) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = (FluxLaxFriedrichs(max_abs_speed_naive), flux_nonconservative_powell) volume_flux = (flux_central, flux_nonconservative_powell) basis = LobattoLegendreBasis(3) indicator_sc = IndicatorHennemannGassner(equations, basis, diff --git a/examples/tree_2d_dgsem/elixir_mhd_rotor.jl b/examples/tree_2d_dgsem/elixir_mhd_rotor.jl index 68d3119f7ca..0b1c126a0e8 100644 --- a/examples/tree_2d_dgsem/elixir_mhd_rotor.jl +++ b/examples/tree_2d_dgsem/elixir_mhd_rotor.jl @@ -44,7 +44,14 @@ function initial_condition_rotor(x, t, equations::IdealGlmMhdEquations2D) end initial_condition = initial_condition_rotor -surface_flux = (flux_lax_friedrichs, flux_nonconservative_powell) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = (FluxLaxFriedrichs(max_abs_speed_naive), flux_nonconservative_powell) volume_flux = (flux_hindenlang_gassner, flux_nonconservative_powell) polydeg = 4 basis = LobattoLegendreBasis(polydeg) diff --git a/examples/tree_2d_dgsem/elixir_mhd_shockcapturing_subcell.jl b/examples/tree_2d_dgsem/elixir_mhd_shockcapturing_subcell.jl index a372491c230..5e1e2a3c2d7 100644 --- a/examples/tree_2d_dgsem/elixir_mhd_shockcapturing_subcell.jl +++ b/examples/tree_2d_dgsem/elixir_mhd_shockcapturing_subcell.jl @@ -45,7 +45,15 @@ function initial_condition_blast_wave(x, t, equations::IdealGlmMhdEquations2D) end initial_condition = initial_condition_blast_wave -surface_flux = (flux_lax_friedrichs, flux_nonconservative_powell_local_symmetric) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = (FluxLaxFriedrichs(max_abs_speed_naive), + flux_nonconservative_powell_local_symmetric) volume_flux = (flux_derigs_etal, flux_nonconservative_powell_local_symmetric) basis = LobattoLegendreBasis(3) diff --git a/examples/tree_2d_dgsem/elixir_mhdmulti_convergence.jl b/examples/tree_2d_dgsem/elixir_mhdmulti_convergence.jl index 9bfbccb4230..e3e231c520c 100644 --- a/examples/tree_2d_dgsem/elixir_mhdmulti_convergence.jl +++ b/examples/tree_2d_dgsem/elixir_mhdmulti_convergence.jl @@ -9,9 +9,16 @@ equations = IdealGlmMhdMulticomponentEquations2D(gammas = (5 / 3, 5 / 3, 5 / 3), initial_condition = initial_condition_convergence_test +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = (FluxLaxFriedrichs(max_abs_speed_naive), flux_nonconservative_powell) volume_flux = (flux_hindenlang_gassner, flux_nonconservative_powell) -solver = DGSEM(polydeg = 3, - surface_flux = (flux_lax_friedrichs, flux_nonconservative_powell), +solver = DGSEM(polydeg = 3, surface_flux = surface_flux, volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) coordinates_min = (0.0, 0.0) diff --git a/examples/tree_2d_dgsem/elixir_mhdmulti_es.jl b/examples/tree_2d_dgsem/elixir_mhdmulti_es.jl index eca02f674ff..207d81cc154 100644 --- a/examples/tree_2d_dgsem/elixir_mhdmulti_es.jl +++ b/examples/tree_2d_dgsem/elixir_mhdmulti_es.jl @@ -9,9 +9,16 @@ equations = IdealGlmMhdMulticomponentEquations2D(gammas = (5 / 3, 5 / 3), initial_condition = initial_condition_weak_blast_wave +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = (FluxLaxFriedrichs(max_abs_speed_naive), flux_nonconservative_powell) volume_flux = (flux_hindenlang_gassner, flux_nonconservative_powell) -solver = DGSEM(polydeg = 3, - surface_flux = (flux_lax_friedrichs, flux_nonconservative_powell), +solver = DGSEM(polydeg = 3, surface_flux = surface_flux, volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) coordinates_min = (-2.0, -2.0) diff --git a/examples/tree_2d_dgsem/elixir_mhdmulti_rotor.jl b/examples/tree_2d_dgsem/elixir_mhdmulti_rotor.jl index 5f74fbc0bf8..e1f6bd2b176 100644 --- a/examples/tree_2d_dgsem/elixir_mhdmulti_rotor.jl +++ b/examples/tree_2d_dgsem/elixir_mhdmulti_rotor.jl @@ -45,7 +45,14 @@ function initial_condition_rotor(x, t, equations::IdealGlmMhdMulticomponentEquat end initial_condition = initial_condition_rotor -surface_flux = (flux_lax_friedrichs, flux_nonconservative_powell) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = (FluxLaxFriedrichs(max_abs_speed_naive), flux_nonconservative_powell) volume_flux = (flux_hindenlang_gassner, flux_nonconservative_powell) polydeg = 3 basis = LobattoLegendreBasis(polydeg) diff --git a/examples/tree_2d_dgsem/elixir_mhdmultiion_collisions.jl b/examples/tree_2d_dgsem/elixir_mhdmultiion_collisions.jl index d5f6ce7b1c0..9f4641cf973 100644 --- a/examples/tree_2d_dgsem/elixir_mhdmultiion_collisions.jl +++ b/examples/tree_2d_dgsem/elixir_mhdmultiion_collisions.jl @@ -103,7 +103,14 @@ tspan = (0.0, 0.1) # 100 [ps] # Entropy conservative volume numerical fluxes with standard LLF dissipation at interfaces volume_flux = (flux_ruedaramirez_etal, flux_nonconservative_ruedaramirez_etal) -surface_flux = (flux_lax_friedrichs, flux_nonconservative_central) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = (FluxLaxFriedrichs(max_abs_speed_naive), flux_nonconservative_central) solver = DGSEM(polydeg = 3, surface_flux = surface_flux, volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) diff --git a/examples/tree_2d_dgsem/elixir_navierstokes_convergence.jl b/examples/tree_2d_dgsem/elixir_navierstokes_convergence.jl index c5fae719c32..16ea4742315 100644 --- a/examples/tree_2d_dgsem/elixir_navierstokes_convergence.jl +++ b/examples/tree_2d_dgsem/elixir_navierstokes_convergence.jl @@ -13,7 +13,15 @@ equations_parabolic = CompressibleNavierStokesDiffusion2D(equations, mu = mu(), gradient_variables = GradientVariablesPrimitive()) # Create DG solver with polynomial degree = 3 and (local) Lax-Friedrichs/Rusanov flux as surface flux -solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs, + +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 3, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive), volume_integral = VolumeIntegralWeakForm()) coordinates_min = (-1.0, -1.0) # minimum coordinates (min(x), min(y)) diff --git a/examples/tree_2d_dgsem/elixir_navierstokes_kelvin_helmholtz_instability_sc_subcell.jl b/examples/tree_2d_dgsem/elixir_navierstokes_kelvin_helmholtz_instability_sc_subcell.jl index 9de9110b6d8..2b1d13cc5ca 100644 --- a/examples/tree_2d_dgsem/elixir_navierstokes_kelvin_helmholtz_instability_sc_subcell.jl +++ b/examples/tree_2d_dgsem/elixir_navierstokes_kelvin_helmholtz_instability_sc_subcell.jl @@ -35,7 +35,14 @@ function initial_condition_kelvin_helmholtz_instability(x, t, end initial_condition = initial_condition_kelvin_helmholtz_instability -surface_flux = flux_lax_friedrichs +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) volume_flux = flux_ranocha polydeg = 3 basis = LobattoLegendreBasis(polydeg) diff --git a/examples/tree_2d_dgsem/elixir_navierstokes_lid_driven_cavity.jl b/examples/tree_2d_dgsem/elixir_navierstokes_lid_driven_cavity.jl index d7b5e10dcef..cde7365d24b 100644 --- a/examples/tree_2d_dgsem/elixir_navierstokes_lid_driven_cavity.jl +++ b/examples/tree_2d_dgsem/elixir_navierstokes_lid_driven_cavity.jl @@ -12,7 +12,15 @@ equations_parabolic = CompressibleNavierStokesDiffusion2D(equations, mu = mu, Prandtl = prandtl_number()) # Create DG solver with polynomial degree = 3 and (local) Lax-Friedrichs/Rusanov flux as surface flux -solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) + +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 3, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive)) coordinates_min = (-1.0, -1.0) # minimum coordinates (min(x), min(y)) coordinates_max = (1.0, 1.0) # maximum coordinates (max(x), max(y)) diff --git a/examples/tree_2d_dgsem/elixir_navierstokes_taylor_green_vortex.jl b/examples/tree_2d_dgsem/elixir_navierstokes_taylor_green_vortex.jl index 09fad48c7ff..8c1239bfa00 100644 --- a/examples/tree_2d_dgsem/elixir_navierstokes_taylor_green_vortex.jl +++ b/examples/tree_2d_dgsem/elixir_navierstokes_taylor_green_vortex.jl @@ -5,10 +5,10 @@ using Trixi # semidiscretization of the compressible Navier-Stokes equations prandtl_number() = 0.72 -mu = 6.25e-4 # equivalent to Re = 1600 +mu() = 6.25e-4 # equivalent to Re = 1600 equations = CompressibleEulerEquations2D(1.4) -equations_parabolic = CompressibleNavierStokesDiffusion2D(equations, mu = mu, +equations_parabolic = CompressibleNavierStokesDiffusion2D(equations, mu = mu(), Prandtl = prandtl_number()) """ diff --git a/examples/tree_3d_dgsem/elixir_advection_er.jl b/examples/tree_3d_dgsem/elixir_advection_er.jl new file mode 100644 index 00000000000..988dd3d7d54 --- /dev/null +++ b/examples/tree_3d_dgsem/elixir_advection_er.jl @@ -0,0 +1,51 @@ +using Trixi + +############################################################################### +# semidiscretization of the linear advection equation + +advection_velocity = (1.0, 1.0, 1.0) +equations = LinearScalarAdvectionEquation3D(advection_velocity) + +solver = DGSEM(polydeg = 2, surface_flux = flux_central, + volume_integral = VolumeIntegralFluxDifferencing(flux_central)) # Entropy-conservative setup + +coordinates_min = (-1.0, -1.0, -1.0) +coordinates_max = (1.0, 1.0, 1.0) +# Create a uniformly refined mesh with periodic boundaries +mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 3, + n_cells_max = 30_000) + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition_convergence_test, + solver) + +############################################################################### +# ODE solvers, callbacks etc. + +ode = semidiscretize(semi, (0.0, 1.0)) + +summary_callback = SummaryCallback() + +analysis_interval = 1 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval, + analysis_errors = Symbol[], # Switch off error computation + # Note: `entropy` defaults to mathematical entropy + analysis_integrals = (entropy,), + analysis_filename = "entropy_ER.dat", + save_analysis = true) + +stepsize_callback = StepsizeCallback(cfl = 1.0) + +callbacks = CallbackSet(summary_callback, analysis_callback, + stepsize_callback) + +############################################################################### +# run the simulation + +relaxation_solver = Trixi.RelaxationSolverNewton(max_iterations = 3, + root_tol = eps(Float64), + gamma_tol = eps(Float64)) +ode_alg = Trixi.RelaxationRalston3(relaxation_solver = relaxation_solver) + +sol = Trixi.solve(ode, ode_alg, + dt = 42.0, save_everystep = false, callback = callbacks); diff --git a/examples/tree_3d_dgsem/elixir_euler_amr.jl b/examples/tree_3d_dgsem/elixir_euler_amr.jl index 5b64b623cd5..6d6ead637a2 100644 --- a/examples/tree_3d_dgsem/elixir_euler_amr.jl +++ b/examples/tree_3d_dgsem/elixir_euler_amr.jl @@ -25,7 +25,14 @@ function initial_condition_density_pulse(x, t, equations::CompressibleEulerEquat return SVector(rho, rho_v1, rho_v2, rho_v3, rho_e) end initial_condition = initial_condition_density_pulse -solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 3, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive)) coordinates_min = (-5.0, -5.0, -5.0) coordinates_max = (5.0, 5.0, 5.0) diff --git a/examples/tree_3d_dgsem/elixir_euler_blob_amr.jl b/examples/tree_3d_dgsem/elixir_euler_blob_amr.jl index 2fdc58967d0..f447a06ba04 100644 --- a/examples/tree_3d_dgsem/elixir_euler_blob_amr.jl +++ b/examples/tree_3d_dgsem/elixir_euler_blob_amr.jl @@ -92,16 +92,23 @@ save_solution = SaveSolutionCallback(interval = 200, save_final_solution = true, solution_variables = cons2prim) +positivity_limiter = PositivityPreservingLimiterZhangShu(thresholds = (1.0e-4, 1.0e-4), + variables = (Trixi.density, + pressure)) + amr_indicator = IndicatorLöhner(semi, variable = Trixi.density) + amr_controller = ControllerThreeLevel(semi, amr_indicator, base_level = 1, med_level = 0, med_threshold = 0.1, # med_level = current level max_level = 6, max_threshold = 0.3) + amr_callback = AMRCallback(semi, amr_controller, interval = 3, adapt_initial_condition = false, - adapt_initial_condition_only_refine = true) + adapt_initial_condition_only_refine = true, + limiter! = positivity_limiter) stepsize_callback = StepsizeCallback(cfl = 1.7) @@ -110,12 +117,11 @@ callbacks = CallbackSet(summary_callback, save_solution, amr_callback, stepsize_callback) -stage_limiter! = PositivityPreservingLimiterZhangShu(thresholds = (1.0e-4, 1.0e-4), - variables = (Trixi.density, pressure)) - ############################################################################### # run the simulation -sol = solve(ode, CarpenterKennedy2N54(stage_limiter!, williamson_condition = false); +sol = solve(ode, + CarpenterKennedy2N54(stage_limiter! = positivity_limiter, + williamson_condition = false); dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback ode_default_options()..., callback = callbacks); diff --git a/examples/tree_3d_dgsem/elixir_euler_mortar.jl b/examples/tree_3d_dgsem/elixir_euler_mortar.jl index 6e92676975c..fbf20e9ecca 100644 --- a/examples/tree_3d_dgsem/elixir_euler_mortar.jl +++ b/examples/tree_3d_dgsem/elixir_euler_mortar.jl @@ -7,7 +7,14 @@ using Trixi equations = CompressibleEulerEquations3D(1.4) initial_condition = initial_condition_convergence_test -solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 3, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive)) coordinates_min = (0.0, 0.0, 0.0) coordinates_max = (2.0, 2.0, 2.0) diff --git a/examples/tree_3d_dgsem/elixir_euler_source_terms.jl b/examples/tree_3d_dgsem/elixir_euler_source_terms.jl index 9403e8a4c63..c3f7c44c8cc 100644 --- a/examples/tree_3d_dgsem/elixir_euler_source_terms.jl +++ b/examples/tree_3d_dgsem/elixir_euler_source_terms.jl @@ -8,7 +8,14 @@ equations = CompressibleEulerEquations3D(1.4) initial_condition = initial_condition_convergence_test -solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs, +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 3, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive), volume_integral = VolumeIntegralWeakForm()) coordinates_min = (0.0, 0.0, 0.0) diff --git a/examples/tree_3d_dgsem/elixir_euler_taylor_green_vortex.jl b/examples/tree_3d_dgsem/elixir_euler_taylor_green_vortex.jl index ff7b138f086..6ff5d1c46ff 100644 --- a/examples/tree_3d_dgsem/elixir_euler_taylor_green_vortex.jl +++ b/examples/tree_3d_dgsem/elixir_euler_taylor_green_vortex.jl @@ -31,7 +31,14 @@ end initial_condition = initial_condition_taylor_green_vortex volume_flux = flux_ranocha -solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs, +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 3, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive), volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) coordinates_min = (-1.0, -1.0, -1.0) .* pi diff --git a/examples/tree_3d_dgsem/elixir_eulergravity_convergence.jl b/examples/tree_3d_dgsem/elixir_eulergravity_convergence.jl index 26864708210..861e7d565f7 100644 --- a/examples/tree_3d_dgsem/elixir_eulergravity_convergence.jl +++ b/examples/tree_3d_dgsem/elixir_eulergravity_convergence.jl @@ -25,7 +25,14 @@ semi_euler = SemidiscretizationHyperbolic(mesh, equations_euler, initial_conditi # semidiscretization of the hyperbolic diffusion equations equations_gravity = HyperbolicDiffusionEquations3D() -solver_gravity = DGSEM(polydeg, flux_lax_friedrichs) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver_gravity = DGSEM(polydeg, FluxLaxFriedrichs(max_abs_speed_naive)) semi_gravity = SemidiscretizationHyperbolic(mesh, equations_gravity, initial_condition, solver_gravity, diff --git a/examples/tree_3d_dgsem/elixir_mhd_alfven_wave.jl b/examples/tree_3d_dgsem/elixir_mhd_alfven_wave.jl index 278133a0f03..e99c9c8c775 100644 --- a/examples/tree_3d_dgsem/elixir_mhd_alfven_wave.jl +++ b/examples/tree_3d_dgsem/elixir_mhd_alfven_wave.jl @@ -8,9 +8,16 @@ equations = IdealGlmMhdEquations3D(5 / 3) initial_condition = initial_condition_convergence_test +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = (FluxLaxFriedrichs(max_abs_speed_naive), flux_nonconservative_powell) volume_flux = (flux_hindenlang_gassner, flux_nonconservative_powell) -solver = DGSEM(polydeg = 3, - surface_flux = (flux_lax_friedrichs, flux_nonconservative_powell), +solver = DGSEM(polydeg = 3, surface_flux = surface_flux, volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) coordinates_min = (-1.0, -1.0, -1.0) diff --git a/examples/tree_3d_dgsem/elixir_mhdmultiion_collisions.jl b/examples/tree_3d_dgsem/elixir_mhdmultiion_collisions.jl new file mode 100644 index 00000000000..6eb395f3748 --- /dev/null +++ b/examples/tree_3d_dgsem/elixir_mhdmultiion_collisions.jl @@ -0,0 +1,157 @@ + +using OrdinaryDiffEqLowStorageRK +using Trixi + +############################################################################### +# This elixir describes the frictional slowing of an ionized carbon fluid (C6+) with respect to another species +# of a background ionized carbon fluid with an initially nonzero relative velocity. It is the second slow-down +# test (fluids with different densities) described in: +# - Ghosh, D., Chapman, T. D., Berger, R. L., Dimits, A., & Banks, J. W. (2019). A +# multispecies, multifluid model for laser–induced counterstreaming plasma simulations. +# Computers & Fluids, 186, 38-57. [DOI: 10.1016/j.compfluid.2019.04.012](https://doi.org/10.1016/j.compfluid.2019.04.012). +# +# This is effectively a zero-dimensional case because the spatial gradients are zero, and we use it to test the +# collision source terms. +# +# To run this physically relevant test, we use the following characteristic quantities to non-dimensionalize +# the equations: +# Characteristic length: L_inf = 1.00E-03 m (domain size) +# Characteristic density: rho_inf = 1.99E+00 kg/m^3 (corresponds to a number density of 1e20 cm^{-3}) +# Characteristic vacuum permeability: mu0_inf = 1.26E-06 N/A^2 (for equations with mu0 = 1) +# Characteristic gas constant: R_inf = 6.92237E+02 J/kg/K (specific gas constant for a Carbon fluid) +# Characteristic velocity: V_inf = 1.00E+06 m/s +# +# The results of the paper can be reproduced using `source_terms = source_terms_collision_ion_ion` (i.e., only +# taking into account ion-ion collisions). However, we include ion-electron collisions assuming a constant +# electron temperature of 1 keV in this elixir to test the function `source_terms_collision_ion_electron` + +# Return the electron pressure for a constant electron temperature Te = 1 keV +function electron_pressure_constantTe(u, equations::IdealGlmMhdMultiIonEquations3D) + @unpack charge_to_mass = equations + Te = electron_temperature_constantTe(u, equations) + total_electron_charge = zero(eltype(u)) + for k in eachcomponent(equations) + rho_k = u[3 + (k - 1) * 5 + 1] + total_electron_charge += rho_k * charge_to_mass[k] + end + + # Boltzmann constant divided by elementary charge + RealT = eltype(u) + kB_e = convert(RealT, 7.86319034E-02) #[nondimensional] + + return total_electron_charge * kB_e * Te +end + +# Return the constant electron temperature Te = 1 keV +function electron_temperature_constantTe(u, equations::IdealGlmMhdMultiIonEquations3D) + RealT = eltype(u) + return convert(RealT, 0.008029953773) # [nondimensional] = 1 [keV] +end + +# semidiscretization of the ideal MHD equations +equations = IdealGlmMhdMultiIonEquations3D(gammas = (5 / 3, 5 / 3), + charge_to_mass = (76.3049060157692000, + 76.3049060157692000), # [nondimensional] + gas_constants = (1.0, 1.0), # [nondimensional] + molar_masses = (1.0, 1.0), # [nondimensional] + ion_ion_collision_constants = [0.0 0.4079382480442680; + 0.4079382480442680 0.0], # [nondimensional] (computed with eq (4.142) of Schunk & Nagy (2009)) + ion_electron_collision_constants = (8.56368379833E-06, + 8.56368379833E-06), # [nondimensional] (computed with eq (9) of Ghosh et al. (2019)) + electron_pressure = electron_pressure_constantTe, + electron_temperature = electron_temperature_constantTe, + initial_c_h = 0.0) # Deactivate GLM divergence cleaning + +# Frictional slowing of an ionized carbon fluid with respect to another background carbon fluid in motion +function initial_condition_slow_down(x, t, equations::IdealGlmMhdMultiIonEquations3D) + RealT = eltype(x) + + v11 = convert(RealT, 0.6550877) + v21 = zero(RealT) + v2 = v3 = zero(RealT) + B1 = B2 = B3 = zero(RealT) + rho1 = convert(RealT, 0.1) + rho2 = one(RealT) + + p1 = convert(RealT, 0.00040170535986) + p2 = convert(RealT, 0.00401705359856) + + psi = zero(RealT) + + return prim2cons(SVector(B1, B2, B3, rho1, v11, v2, v3, p1, rho2, v21, v2, v3, p2, psi), + equations) +end + +# Temperature of ion 1 +function temperature1(u, equations::IdealGlmMhdMultiIonEquations3D) + rho_1, _ = Trixi.get_component(1, u, equations) + p = pressure(u, equations) + + return p[1] / (rho_1 * equations.gas_constants[1]) +end + +# Temperature of ion 2 +function temperature2(u, equations::IdealGlmMhdMultiIonEquations3D) + rho_2, _ = Trixi.get_component(2, u, equations) + p = pressure(u, equations) + + return p[2] / (rho_2 * equations.gas_constants[2]) +end + +initial_condition = initial_condition_slow_down +tspan = (0.0, 0.1) # 100 [ps] + +# Entropy conservative volume numerical fluxes with standard LLF dissipation at interfaces +volume_flux = (flux_ruedaramirez_etal, flux_nonconservative_ruedaramirez_etal) +surface_flux = (flux_lax_friedrichs, flux_nonconservative_central) + +solver = DGSEM(polydeg = 3, surface_flux = surface_flux, + volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) + +coordinates_min = (0.0, 0.0, 0.0) +coordinates_max = (1.0, 1.0, 1.0) +# We use a very coarse mesh because this is a 0-dimensional case +mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 1, + n_cells_max = 1_000_000) + +# Ion-ion and ion-electron collision source terms +# In this particular case, we can omit source_terms_lorentz because the magnetic field is zero! +function source_terms(u, x, t, equations::IdealGlmMhdMultiIonEquations3D) + source_terms_collision_ion_ion(u, x, t, equations) + + source_terms_collision_ion_electron(u, x, t, equations) +end + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + source_terms = source_terms) + +############################################################################### +# ODE solvers, callbacks etc. + +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 1 +analysis_callback = AnalysisCallback(semi, + save_analysis = true, + interval = analysis_interval, + extra_analysis_integrals = (temperature1, + temperature2)) +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +stepsize_callback = StepsizeCallback(cfl = 0.01) # Very small CFL due to the stiff source terms + +save_restart = SaveRestartCallback(interval = 100, + save_final_restart = true) + +callbacks = CallbackSet(summary_callback, + analysis_callback, alive_callback, + save_restart, + stepsize_callback) + +############################################################################### + +sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false); + dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + ode_default_options()..., callback = callbacks); diff --git a/examples/tree_3d_dgsem/elixir_navierstokes_convergence.jl b/examples/tree_3d_dgsem/elixir_navierstokes_convergence.jl index 9259d5a42ca..1f1c734ba00 100644 --- a/examples/tree_3d_dgsem/elixir_navierstokes_convergence.jl +++ b/examples/tree_3d_dgsem/elixir_navierstokes_convergence.jl @@ -13,7 +13,15 @@ equations_parabolic = CompressibleNavierStokesDiffusion3D(equations, mu = mu(), gradient_variables = GradientVariablesPrimitive()) # Create DG solver with polynomial degree = 3 and (local) Lax-Friedrichs/Rusanov flux as surface flux -solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs, + +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 3, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive), volume_integral = VolumeIntegralWeakForm()) coordinates_min = (-1.0, -1.0, -1.0) # minimum coordinates (min(x), min(y), min(z)) diff --git a/examples/unstructured_2d_dgsem/elixir_acoustics_gauss_wall.jl b/examples/unstructured_2d_dgsem/elixir_acoustics_gauss_wall.jl index d2a9a53509c..17a87da4fc7 100644 --- a/examples/unstructured_2d_dgsem/elixir_acoustics_gauss_wall.jl +++ b/examples/unstructured_2d_dgsem/elixir_acoustics_gauss_wall.jl @@ -9,7 +9,15 @@ equations = AcousticPerturbationEquations2D(v_mean_global = (0.0, -0.5), rho_mean_global = 1.0) # Create DG solver with polynomial degree = 4 and (local) Lax-Friedrichs/Rusanov flux -solver = DGSEM(polydeg = 4, surface_flux = flux_lax_friedrichs) + +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 4, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive)) # Create unstructured quadrilateral mesh from a file mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/3c79baad6b4d73bb26ec6420b5d16f45/raw/22aefc4ec2107cf0bffc40e81dfbc52240c625b1/mesh_five_circles_in_circle.mesh", diff --git a/examples/unstructured_2d_dgsem/elixir_advection_basic.jl b/examples/unstructured_2d_dgsem/elixir_advection_basic.jl index af57b987ae8..e111c384ecc 100644 --- a/examples/unstructured_2d_dgsem/elixir_advection_basic.jl +++ b/examples/unstructured_2d_dgsem/elixir_advection_basic.jl @@ -29,7 +29,8 @@ semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition_convergen # ODE solvers, callbacks etc. # Create ODE problem with time span from 0.0 to 1.0 -ode = semidiscretize(semi, (0.0, 1.0)) +tspan = (0.0, 1.0) +ode = semidiscretize(semi, tspan) # At the beginning of the main loop, the SummaryCallback prints a summary of the simulation setup # and resets the timers diff --git a/examples/unstructured_2d_dgsem/elixir_euler_basic.jl b/examples/unstructured_2d_dgsem/elixir_euler_basic.jl index 10495f65a19..3bdedc1425e 100644 --- a/examples/unstructured_2d_dgsem/elixir_euler_basic.jl +++ b/examples/unstructured_2d_dgsem/elixir_euler_basic.jl @@ -19,7 +19,14 @@ boundary_conditions = Dict(:Slant => boundary_condition_convergence_test, ############################################################################### # Get the DG approximation space -solver = DGSEM(polydeg = 8, surface_flux = flux_lax_friedrichs) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 8, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive)) ############################################################################### # Get the curved quad mesh from a file (downloads the file if not available locally) diff --git a/examples/unstructured_2d_dgsem/elixir_euler_sedov.jl b/examples/unstructured_2d_dgsem/elixir_euler_sedov.jl index f080b09e059..b72e5a93e50 100644 --- a/examples/unstructured_2d_dgsem/elixir_euler_sedov.jl +++ b/examples/unstructured_2d_dgsem/elixir_euler_sedov.jl @@ -37,7 +37,15 @@ end initial_condition = initial_condition_sedov_blast_wave # Get the DG approximation space -surface_flux = flux_lax_friedrichs + +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = FluxLaxFriedrichs(max_abs_speed_naive) volume_flux = flux_ranocha polydeg = 6 basis = LobattoLegendreBasis(polydeg) diff --git a/examples/unstructured_2d_dgsem/elixir_euler_time_series.jl b/examples/unstructured_2d_dgsem/elixir_euler_time_series.jl index 789c1708d77..ea2b033f5ef 100644 --- a/examples/unstructured_2d_dgsem/elixir_euler_time_series.jl +++ b/examples/unstructured_2d_dgsem/elixir_euler_time_series.jl @@ -64,7 +64,14 @@ source_term = source_terms_convergence_shifted ############################################################################### # Get the DG approximation space -solver = DGSEM(polydeg = 6, surface_flux = flux_lax_friedrichs) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +solver = DGSEM(polydeg = 6, surface_flux = FluxLaxFriedrichs(max_abs_speed_naive)) ############################################################################### # Get the curved quad mesh from a file (downloads the file if not available locally) diff --git a/examples/unstructured_2d_dgsem/elixir_mhd_onion.jl b/examples/unstructured_2d_dgsem/elixir_mhd_onion.jl index de4a6db44e0..a6b5bc4e4ca 100644 --- a/examples/unstructured_2d_dgsem/elixir_mhd_onion.jl +++ b/examples/unstructured_2d_dgsem/elixir_mhd_onion.jl @@ -30,9 +30,16 @@ mesh_file = Trixi.download("https://gist.githubusercontent.com/patrickersing/8fe joinpath(@__DIR__, "mesh_mhd_onion.mesh")) mesh = UnstructuredMesh2D(mesh_file, periodicity = false) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +surface_flux = (FluxLaxFriedrichs(max_abs_speed_naive), flux_nonconservative_powell) volume_flux = (flux_hindenlang_gassner, flux_nonconservative_powell) -solver = DGSEM(polydeg = 3, - surface_flux = (flux_lax_friedrichs, flux_nonconservative_powell), +solver = DGSEM(polydeg = 3, surface_flux = surface_flux, volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) boundary_conditions = Dict(:Bottom => BoundaryConditionDirichlet(initial_condition), diff --git a/examples/unstructured_2d_fdsbp/elixir_euler_source_terms.jl b/examples/unstructured_2d_fdsbp/elixir_euler_source_terms.jl index 346d3c1c5f0..d907b40b0bc 100644 --- a/examples/unstructured_2d_fdsbp/elixir_euler_source_terms.jl +++ b/examples/unstructured_2d_fdsbp/elixir_euler_source_terms.jl @@ -14,8 +14,15 @@ initial_condition = initial_condition_convergence_test D_SBP = derivative_operator(SummationByPartsOperators.MattssonNordström2004(), derivative_order = 1, accuracy_order = 4, xmin = -1.0, xmax = 1.0, N = 10) +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. solver = FDSBP(D_SBP, - surface_integral = SurfaceIntegralStrongForm(flux_lax_friedrichs), + surface_integral = SurfaceIntegralStrongForm(FluxLaxFriedrichs(max_abs_speed_naive)), volume_integral = VolumeIntegralStrongForm()) ############################################################################### diff --git a/ext/TrixiCUDAExt.jl b/ext/TrixiCUDAExt.jl new file mode 100644 index 00000000000..681d2f53a1e --- /dev/null +++ b/ext/TrixiCUDAExt.jl @@ -0,0 +1,11 @@ +# Package extension for adding CUDA-based features to Trixi.jl +module TrixiCUDAExt + +import CUDA: CuArray +import Trixi + +function Trixi.storage_type(::Type{<:CuArray}) + return CuArray +end + +end diff --git a/ext/TrixiSparseConnectivityTracerExt.jl b/ext/TrixiSparseConnectivityTracerExt.jl new file mode 100644 index 00000000000..8ea9392a029 --- /dev/null +++ b/ext/TrixiSparseConnectivityTracerExt.jl @@ -0,0 +1,13 @@ +# Package extension for overloading of branching (if-clauses) base functions such as sqrt, log, etc. +module TrixiSparseConnectivityTracerExt + +import Trixi +import SparseConnectivityTracer: AbstractTracer + +# For the default package preference "sqrt_Trixi_NaN" we overload the `Base.sqrt` function +# to first check if the argument is < 0 and then return `NaN` instead of an error. +# To turn this behaviour off for the datatype `AbstractTracer` used in sparsity detection, +# we switch back to the Base implementation here which does not contain an if-clause. +Trixi.sqrt(x::AbstractTracer) = Base.sqrt(x) + +end diff --git a/setup_io/LocalPreferences.toml b/setup_io/LocalPreferences.toml new file mode 100644 index 00000000000..25613f4c2fa --- /dev/null +++ b/setup_io/LocalPreferences.toml @@ -0,0 +1,26 @@ +[HDF5] +libhdf5 = "/home/aruedara/Programs/hdf5/1.13.0_intel21/lib/libhdf5.so" +libhdf5_hl = "/home/aruedara/Programs/hdf5/1.13.0_intel21/lib/libhdf5_hl.so" + +[HDF5_jll] +libhdf5_hl_path = "/home/aruedara/Programs/hdf5/1.13.0_intel21/lib/libhdf5_hl.so" +libhdf5_path = "/home/aruedara/Programs/hdf5/1.13.0_intel21/lib/libhdf5.so" + +[MPIPreferences] +__clear__ = ["preloads_env_switch"] +_format = "1.0" +abi = "MPICH" +binary = "system" +cclibs = [] +libmpi = "libmpi" +mpiexec = "mpiexec" +preloads = [] + +[P4est] +libp4est = "/home/aruedara/Programs/t8code/t8code/build_intel21/lib64/libp4est.so" +libsc = "/home/aruedara/Programs/t8code/t8code/build_intel21/lib64/libsc.so" + +[T8code] +libp4est = "/home/aruedara/Programs/t8code/t8code/build_intel21/lib64/libp4est.so" +libt8 = "/home/aruedara/Programs/t8code/t8code/build_intel21/lib64/libt8.so" +libsc = "/home/aruedara/Programs/t8code/t8code/build_intel21/lib64/libsc.so" diff --git a/setup_io/README.md b/setup_io/README.md new file mode 100644 index 00000000000..7f43a5a1b79 --- /dev/null +++ b/setup_io/README.md @@ -0,0 +1,149 @@ +# ESiWACE3 Trixi.jl service + +## Instructions for terrabyte cluster + +You need to get an account at https://docs.terrabyte.lrz.de/services/identity/get-account/ +and set up two-factor authentication. + +Documentation is available here: https://docs.terrabyte.lrz.de/ + + +### Login +```shell +ssh login.terrabyte.lrz.de +``` +You have storage space at `$HOME` and `$SCRATCH` (not backed up, temporary). +There is also a common project storage space. Use +``` +export PROJECT=/dss/dsstbyfs02/pn76vi/pn76vi-dss-0000 +``` +to be able to access it as `$PROJECT`. + + +### Set up t8code +t8code has already been installed in `$PROJECT/install/t8code`. +Do the following only if you require an individual installation. + +1. Load modules + ```shell + module purge + module load spack/23.1.0 + module load gcc/12.2.0 + module load openmpi/4.1.5-gcc11 + ``` +2. Change to scratch folder + ```shell + cd $SCRATCH + ``` +3. Clone the repository + ```shell + git clone --branch 'v3.0.1' --depth 1 https://github.com/DLR-AMR/t8code.git + cd t8code + git submodule init + git submodule update + ``` +4. Build using cmake: + ```shell + module add cmake + mkdir build + cd build + cmake \ + -DCMAKE_C_COMPILER=mpicc \ + -DCMAKE_CXX_COMPILER=mpicxx \ + -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_INSTALL_PREFIX="$SCRATCH/install/t8code" \ + -DT8CODE_BUILD_TESTS=OFF \ + -DT8CODE_BUILD_TUTORIALS=OFF \ + -DT8CODE_BUILD_EXAMPLES=OFF \ + -DT8CODE_BUILD_BENCHMARKS=OFF \ + -DT8CODE_ENABLE_MPI=ON + .. + nice make -j8 + nice make install -j8 + ``` + + +### Set up Julia +Julia is not available on the cluster. We need to install it manually. +1. If there is no `.bashrc` or `.bash_profile` in your `$HOME` directory, create one + ``` + touch $HOME/.bashrc + ``` +2. Use the official Julia installer: + ```shell + curl -fsSL https://install.julialang.org | sh + ``` + Accept the defaults. Once finished you will be told to source your `.bashrc` or re-login. +3. Julia should now be available + ```shell + julia --version + ``` +4. Install the 1.11 branch + ```shell + juliaup add 1.11 + ``` + + +### Set up Trixi.jl +1. Clone the repository + ```shell + git clone https://github.com/benegee/Trixi.jl.git + git switch lc/gpu-develop + ``` +2. Go to the `esiwace` directory. We collect necessary environmental settings in + `profile`. Edit this file as neccessary and source it: + ```shell + . profile + ``` +3. The Julia project is configured by several files: `Project.toml` lists dependencies, + `Manifest.toml` lists exact version numbers for all installed packages, + `LocalPreferences.toml` contains advanced configuration options. + It should only be necessary to adapt `LocalPreference.toml` to reflect the t8code + installation path. +4. Open Julia via the `$JL` command and instantiate the project: + ```shell + $JL --project=. -e 'using Pkg; Pkg.develop(PackageSpec(path=".."))' + $JL --project=. -e 'using Pkg; Pkg.add(["OrdinaryDiffEq", "Trixi2Vtk"])' + $JL --project=. -e 'using Pkg; Pkg.instantiate()' + ``` + This will take some time! Some packages might throw errors. + + +### Check installation +1. Make sure that everything is precompiled by running the following: + ```shell + $JL --project=. -e 'using OrdinaryDiffEq, Trixi' + ``` + If there are still some errors, they might get resolved when running on compute nodes. +2. To test CUDA, first log in to a GPU node: + ```shell + salloc --cluster=hpda2 --partition=hpda2_testgpu --nodes=1 --ntasks-per-node=1 --gres=gpu:1 --time=00:30:00 + ``` + Then start Julia: + ```shell + $JL --project=. -e 'using CUDA; CUDA.versioninfo()' + ``` + This should print + ``` + CUDA runtime 11.8, local installation + ... + ``` + + + +## Launch +1. SLURM jobscripts are in `jobscripts`. Edit as necessary. At least, you have to specify + your mail address. +2. The actual simulation is configured in `run.jl` and based on Trixi.jl files in `elixirs`. +3. Send job to queue: + ```shell + sbatch jobscript/single_node.sh + ``` diff --git a/setup_io/profile b/setup_io/profile new file mode 100644 index 00000000000..09839f3ee29 --- /dev/null +++ b/setup_io/profile @@ -0,0 +1,8 @@ +module purge +# module load p4est/2.2_intel intel/19.0 intelmpi/2019 hdf5/1.13.0_intel +module load intel_new/2021_gnu_7.3.0 intelmpi/2021 t8code/1.3_intelmpi_2021 +unset I_MPI_PMI_LIBRARY + +export JULIA_DEPOT_PATH="/home/aruedara/julia-depot/" +export JL="julia +1.11" + diff --git a/src/Trixi.jl b/src/Trixi.jl index a707437655e..9412c33db6f 100644 --- a/src/Trixi.jl +++ b/src/Trixi.jl @@ -18,7 +18,7 @@ module Trixi using Preferences: @load_preference, set_preferences! const _PREFERENCE_SQRT = @load_preference("sqrt", "sqrt_Trixi_NaN") const _PREFERENCE_LOG = @load_preference("log", "log_Trixi_NaN") -const _PREFERENCE_POLYESTER = @load_preference("polyester", true) +const _PREFERENCE_THREADING = Symbol(@load_preference("backend", "polyester")) const _PREFERENCE_LOOPVECTORIZATION = @load_preference("loop_vectorization", true) # Include other packages that are used in Trixi.jl @@ -50,6 +50,7 @@ import SciMLBase: get_du, get_tmp_cache, u_modified!, using DelimitedFiles: readdlm using Downloads: Downloads +using Adapt: Adapt, adapt using CodeTracking: CodeTracking using ConstructionBase: ConstructionBase using DiffEqBase: DiffEqBase, get_tstops, get_tstops_array @@ -58,6 +59,7 @@ using DiffEqCallbacks: PeriodicCallback, PeriodicCallbackAffect using FillArrays: Ones, Zeros using ForwardDiff: ForwardDiff using HDF5: HDF5, h5open, attributes, create_dataset, datatype, dataspace +using KernelAbstractions: KernelAbstractions, @index, @kernel, get_backend, Backend using LinearMaps: LinearMap if _PREFERENCE_LOOPVECTORIZATION using LoopVectorization: LoopVectorization, @turbo, indices @@ -132,6 +134,7 @@ include("basic_types.jl") # Include all top-level source files include("auxiliary/auxiliary.jl") +include("auxiliary/vector_of_arrays.jl") include("auxiliary/mpi.jl") include("auxiliary/p4est.jl") include("auxiliary/t8code.jl") @@ -293,12 +296,6 @@ export SummaryCallback, SteadyStateCallback, AnalysisCallback, AliveCallback, DragCoefficientShearStress2D, LiftCoefficientShearStress2D, DragCoefficientPressure3D, LiftCoefficientPressure3D -# TODO: deprecation introduced in v0.11 -@deprecate DragCoefficientPressure DragCoefficientPressure2D -@deprecate LiftCoefficientPressure LiftCoefficientPressure2D -@deprecate DragCoefficientShearStress DragCoefficientShearStress2D -@deprecate LiftCoefficientShearStress LiftCoefficientShearStress2D - export load_mesh, load_time, load_timestep, load_timestep!, load_dt, load_adaptive_time_integrator! @@ -312,7 +309,9 @@ export trixi_include, examples_dir, get_examples, default_example, export ode_norm, ode_unstable_check -export convergence_test, jacobian_fd, jacobian_ad_forward, linear_structure +export convergence_test, + jacobian_fd, jacobian_ad_forward, jacobian_ad_forward_parabolic, + linear_structure export DGMulti, DGMultiBasis, estimate_dt, DGMultiMesh, GaussSBP diff --git a/src/auxiliary/auxiliary.jl b/src/auxiliary/auxiliary.jl index d3b94f6e611..14b699d958b 100644 --- a/src/auxiliary/auxiliary.jl +++ b/src/auxiliary/auxiliary.jl @@ -206,16 +206,19 @@ and [https://discourse.julialang.org/t/threads-threads-with-one-thread-how-to-re macro threaded(expr) # !!! danger "Heisenbug" # Look at the comments for `wrap_array` when considering to change this macro. - expr = if _PREFERENCE_POLYESTER + expr = @static if _PREFERENCE_THREADING === :polyester # Currently using `@batch` from Polyester.jl is more efficient, # bypasses the Julia task scheduler and provides parallelization with less overhead. quote $Trixi.@batch $(expr) end - else + elseif _PREFERENCE_THREADING === :static || + _PREFERENCE_THREADING === :kernelabstractions # The following code is a simple version using only `Threads.@threads` from the # standard library with an additional check whether only a single thread is used # to reduce some overhead (and allocations) for serial execution. + # If we want to execute on KernelAbstractions, we use the static backend here to fallback on, + # for loops that do not yet support GPU execution. quote let if $Threads.nthreads() == 1 @@ -225,6 +228,10 @@ macro threaded(expr) end end end + elseif _PREFERENCE_THREADING === :serial + quote + $(expr) + end end # Use `esc(quote ... end)` for nested macro calls as suggested in # https://github.com/JuliaLang/julia/issues/23221 diff --git a/src/auxiliary/containers.jl b/src/auxiliary/containers.jl index 90650f6abcf..5036863ff4b 100644 --- a/src/auxiliary/containers.jl +++ b/src/auxiliary/containers.jl @@ -314,4 +314,135 @@ end function raw_copy!(c::AbstractContainer, from::Int, destination::Int) raw_copy!(c, c, from, from, destination) end + +# Trixi storage types must implement these two Adapt.jl methods +function Adapt.adapt_structure(to, c::AbstractContainer) + error("Interface: Must implement Adapt.adapt_structure(to, ::$(typeof(c)))") +end + +function Adapt.parent_type(C::Type{<:AbstractContainer}) + error("Interface: Must implement Adapt.parent_type(::Type{$C}") +end + +function Adapt.unwrap_type(C::Type{<:AbstractContainer}) + return Adapt.unwrap_type(Adapt.parent_type(C)) +end + +# TODO: Upstream to Adapt +""" + storage_type(x) + +Return the storage type of `x`, which is a concrete array type, such as `Array`, `CuArray`, or `ROCArray`. +""" +function storage_type(x) + return storage_type(typeof(x)) +end + +function storage_type(T::Type) + error("Interface: Must implement storage_type(::Type{$T}") +end + +function storage_type(::Type{<:Array}) + Array +end + +function storage_type(C::Type{<:AbstractContainer}) + return storage_type(Adapt.unwrap_type(C)) +end + +# backend handling +""" + trixi_backend(x) + +Return the computational backend for `x`, which is either a KernelAbstractions backend or `nothing`. +If the backend is `nothing`, the default multi-threaded CPU backend is used. +""" +function trixi_backend(x) + if (_PREFERENCE_THREADING === :polyester && LoopVectorization.check_args(x)) || + (_PREFERENCE_THREADING !== :kernelabstractions && + get_backend(x) isa KernelAbstractions.CPU) + return nothing + end + return get_backend(x) +end + +# TODO: After https://github.com/SciML/RecursiveArrayTools.jl/pull/455 we need to investigate the right way to handle StaticArray as uEltype for MultiDG. +function trixi_backend(x::VectorOfArray) + u = parent(x) + # FIXME(vchuravy): This is a workaround because KA.get_backend is ambivalent of where a SArray is residing. + if eltype(u) <: StaticArrays.StaticArray + return nothing + end + if length(u) == 0 + error("VectorOfArray is empty, cannot determine backend.") + end + # Use the backend of the first element in the parent array + return get_backend(u[1]) +end + +# For some storage backends like CUDA.jl, empty arrays do seem to simply be +# null pointers which can cause `unsafe_wrap` to fail when calling +# Adapt.adapt (ArgumentError, see +# https://github.com/JuliaGPU/CUDA.jl/blob/v5.4.2/src/array.jl#L212-L229). +# To circumvent this, on length zero arrays this allocates +# a separate empty array instead of wrapping. +# However, since zero length arrays are not used in calculations, +# it should be okay if the underlying storage vectors and wrapped arrays +# are not the same as long as they are properly wrapped when `resize!`d etc. +function unsafe_wrap_or_alloc(to, vector, size) + if length(vector) == 0 + return similar(vector, size) + else + return unsafe_wrap(to, pointer(vector), size) + end +end + +struct TrixiAdaptor{Storage, RealT} end + +""" + trixi_adapt(Storage, RealT, x) + +Adapt `x` to the storage type `Storage` and real type `RealT`. +""" +function trixi_adapt(Storage, RealT, x) + adapt(TrixiAdaptor{Storage, RealT}(), x) +end + +# Custom rules +# 1. handling of StaticArrays +function Adapt.adapt_storage(::TrixiAdaptor{<:Any, RealT}, + x::StaticArrays.StaticArray) where {RealT} + StaticArrays.similar_type(x, RealT)(x) +end + +# 2. Handling of Arrays +function Adapt.adapt_storage(::TrixiAdaptor{Storage, RealT}, + x::AbstractArray{T}) where {Storage, RealT, + T <: AbstractFloat} + adapt(Storage{RealT}, x) +end + +function Adapt.adapt_storage(::TrixiAdaptor{Storage, RealT}, + x::AbstractArray{T}) where {Storage, RealT, + T <: StaticArrays.StaticArray} + adapt(Storage{StaticArrays.similar_type(T, RealT)}, x) +end + +# Our threaded cache contains MArray, it is unlikely that we would want to adapt those +function Adapt.adapt_storage(::TrixiAdaptor{Storage, RealT}, + x::Array{T}) where {Storage, RealT, + T <: StaticArrays.MArray} + adapt(Array{StaticArrays.similar_type(T, RealT)}, x) +end + +function Adapt.adapt_storage(::TrixiAdaptor{Storage, RealT}, + x::AbstractArray) where {Storage, RealT} + adapt(Storage, x) +end + +# 3. TODO: Should we have a fallback? But that would imply implementing things for NamedTuple again + +function unsafe_wrap_or_alloc(::TrixiAdaptor{Storage}, vec, size) where {Storage} + return unsafe_wrap_or_alloc(Storage, vec, size) +end end # @muladd diff --git a/src/auxiliary/math.jl b/src/auxiliary/math.jl index 241c3ab213a..e2fcab85fa0 100644 --- a/src/auxiliary/math.jl +++ b/src/auxiliary/math.jl @@ -8,18 +8,27 @@ const TRIXI_UUID = UUID("a7f1ee26-1774-49b1-8366-f1abc58fbfcb") """ - Trixi.set_polyester!(toggle::Bool; force = true) - -Toggle the usage of [Polyester.jl](https://github.com/JuliaSIMD/Polyester.jl) for multithreading. -By default, Polyester.jl is enabled, but it can -be useful for performance comparisons to switch to the Julia core backend. - -This does not fully disable Polyester.jl, -but only its use as part of Trixi.jl's [`@threaded`](@ref) macro. + Trixi.set_threading_backend!(backend::Symbol; force = true) + +Toggle and/or switch backend behavior used in multithreaded loops inside Trixi.jl. +The selected backend affects the behavior of Trixi.jl's [`@threaded`](@ref) macro, which is used +throughout the codebase for parallel loops. By default, Polyester.jl is enabled for +optimal performance, but switching backends can be useful for comparisons or debugging. + +# Available backends +- `:polyester`: Uses the default [Polyester.jl](https://github.com/JuliaSIMD/Polyester.jl) +- `:static`: Uses Julia's built-in static thread scheduling via `Threads.@threads :static` +- `:serial`: Disables threading, executing loops serially +- `:kernelabstractions`: Preferentially use the [KernelAbstractions.jl](https://github.com/JuliaGPU/KernelAbstractions.jl) + kernels written in Trixi.jl, falling back to `:static` execution. """ -function set_polyester!(toggle::Bool; force = true) - set_preferences!(TRIXI_UUID, "polyester" => toggle, force = force) - @info "Please restart Julia and reload Trixi.jl for the `polyester` change to take effect" +function set_threading_backend!(backend::Symbol = :polyester; force = true) + valid_backends = (:polyester, :static, :serial, :kernelabstractions) + if !(backend in valid_backends) + throw(ArgumentError("Invalid threading backend: $(backend). Current options are: $(join(valid_backends, ", "))")) + end + set_preferences!(TRIXI_UUID, "backend" => string(backend), force = force) + @info "Please restart Julia and reload Trixi.jl for the `backend` change to take effect" end """ @@ -52,9 +61,6 @@ function set_sqrt_type!(type; force = true) @info "Please restart Julia and reload Trixi.jl for the `sqrt` computation change to take effect" end -# TODO: deprecation introduced in v0.8 -@deprecate set_sqrt_type(type; force = true) set_sqrt_type!(type; force = true) false - @static if _PREFERENCE_SQRT == "sqrt_Trixi_NaN" """ Trixi.sqrt(x::Real) @@ -106,9 +112,6 @@ function set_log_type!(type; force = true) @info "Please restart Julia and reload Trixi.jl for the `log` computation change to take effect" end -# TODO: deprecation introduced in v0.8 -@deprecate set_log_type(type; force = true) set_log_type!(type; force = true) false - @static if _PREFERENCE_LOG == "log_Trixi_NaN" """ Trixi.log(x::Real) diff --git a/src/auxiliary/precompile.jl b/src/auxiliary/precompile.jl index e947e9773e1..c5cc0154094 100644 --- a/src/auxiliary/precompile.jl +++ b/src/auxiliary/precompile.jl @@ -384,11 +384,6 @@ function _precompile_manual_() # end # end @assert Base.precompile(Tuple{typeof(SummaryCallback)}) - @assert Base.precompile(Tuple{DiscreteCallback{typeof(Trixi.summary_callback), - typeof(Trixi.summary_callback), - typeof(Trixi.initialize_summary_callback), - typeof(SciMLBase.FINALIZE_DEFAULT), - typeof(nothing)}}) @assert Base.precompile(Tuple{typeof(summary_box), Base.TTY, String, Vector{Pair{String, Any}}}) # TODO: AMRCallback, ControllerThreeLevel, indicators @@ -520,75 +515,9 @@ function _precompile_manual_() @assert Base.precompile(Tuple{typeof(show), IOContext{Base.TTY}, MIME"text/plain", SemidiscretizationHyperbolic}) - # callbacks - summary_callback_type = DiscreteCallback{typeof(Trixi.summary_callback), - typeof(Trixi.summary_callback), - typeof(Trixi.initialize_summary_callback), - typeof(SciMLBase.FINALIZE_DEFAULT), - typeof(nothing)} - @assert Base.precompile(Tuple{typeof(show), Base.TTY, summary_callback_type}) - @assert Base.precompile(Tuple{typeof(show), IOContext{Base.TTY}, MIME"text/plain", - summary_callback_type}) - @assert Base.precompile(Tuple{summary_callback_type, Base.TTY}) - - # TODO: SteadyStateCallback, AnalysisCallback - - alive_callback_type = DiscreteCallback{AliveCallback, AliveCallback, - typeof(Trixi.initialize!), - typeof(SciMLBase.FINALIZE_DEFAULT), - typeof(nothing)} - @assert Base.precompile(Tuple{typeof(show), Base.TTY, alive_callback_type}) - @assert Base.precompile(Tuple{typeof(show), IOContext{Base.TTY}, MIME"text/plain", - alive_callback_type}) - - restart_callback_type = DiscreteCallback{SaveRestartCallback, SaveRestartCallback, - typeof(Trixi.initialize!), - typeof(SciMLBase.FINALIZE_DEFAULT), - typeof(nothing)} - @assert Base.precompile(Tuple{typeof(show), Base.TTY, restart_callback_type}) - @assert Base.precompile(Tuple{typeof(show), IOContext{Base.TTY}, MIME"text/plain", - restart_callback_type}) - - for solution_variables in (cons2cons, cons2prim) - save_solution_callback_type = DiscreteCallback{SaveSolutionCallback{typeof(solution_variables)}, - SaveSolutionCallback{typeof(solution_variables)}, - typeof(Trixi.initialize!), - typeof(SciMLBase.FINALIZE_DEFAULT), - typeof(nothing)} - @assert Base.precompile(Tuple{typeof(show), Base.TTY, - save_solution_callback_type}) - @assert Base.precompile(Tuple{typeof(show), IOContext{Base.TTY}, - MIME"text/plain", save_solution_callback_type}) - end - - # TODO: AMRCallback - - stepsize_callback_type = DiscreteCallback{StepsizeCallback{RealT}, - StepsizeCallback{RealT}, - typeof(Trixi.initialize!), - typeof(SciMLBase.FINALIZE_DEFAULT), - typeof(nothing)} - @assert Base.precompile(Tuple{typeof(show), Base.TTY, stepsize_callback_type}) - @assert Base.precompile(Tuple{typeof(show), IOContext{Base.TTY}, MIME"text/plain", - stepsize_callback_type}) - - glm_speed_callback_type = DiscreteCallback{GlmSpeedCallback{RealT}, - GlmSpeedCallback{RealT}, - typeof(Trixi.initialize!), - typeof(SciMLBase.FINALIZE_DEFAULT), - typeof(nothing)} - @assert Base.precompile(Tuple{typeof(show), Base.TTY, glm_speed_callback_type}) - @assert Base.precompile(Tuple{typeof(show), IOContext{Base.TTY}, MIME"text/plain", - glm_speed_callback_type}) - - lbm_collision_callback_type = DiscreteCallback{typeof(Trixi.lbm_collision_callback), - typeof(Trixi.lbm_collision_callback), - typeof(Trixi.initialize!), - typeof(SciMLBase.FINALIZE_DEFAULT), - typeof(nothing)} - @assert Base.precompile(Tuple{typeof(show), Base.TTY, lbm_collision_callback_type}) - @assert Base.precompile(Tuple{typeof(show), IOContext{Base.TTY}, MIME"text/plain", - lbm_collision_callback_type}) + # We do not precompile callbacks since they are based on + # SciML structures like `DiscreteCallback` that may change their + # type signatures in non-breaking releases. end @assert Base.precompile(Tuple{typeof(init_mpi)}) diff --git a/src/auxiliary/vector_of_arrays.jl b/src/auxiliary/vector_of_arrays.jl new file mode 100644 index 00000000000..0fa8dd7f1ec --- /dev/null +++ b/src/auxiliary/vector_of_arrays.jl @@ -0,0 +1,31 @@ +# By default, Julia/LLVM does not use fused multiply-add operations (FMAs). +# Since these FMAs can increase the performance of many numerical algorithms, +# we need to opt-in explicitly. +# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details. +@muladd begin +#! format: noindent + +# Wraps a Vector of Arrays, forwards `getindex` to the underlying Vector. +# Implements `Adapt.adapt_structure` to allow offloading to the GPU which is +# not possible for a plain Vector of Arrays. +struct VecOfArrays{T <: AbstractArray} + arrays::Vector{T} +end +Base.getindex(v::VecOfArrays, i::Int) = Base.getindex(v.arrays, i) +Base.IndexStyle(v::VecOfArrays) = Base.IndexStyle(v.arrays) +Base.size(v::VecOfArrays) = Base.size(v.arrays) +Base.length(v::VecOfArrays) = Base.length(v.arrays) +Base.eltype(v::VecOfArrays{T}) where {T} = T +function Adapt.adapt_structure(to, v::VecOfArrays) + return VecOfArrays([Adapt.adapt(to, arr) for arr in v.arrays]) +end +function Adapt.parent_type(::Type{<:VecOfArrays{T}}) where {T} + return T +end +function Adapt.unwrap_type(A::Type{<:VecOfArrays}) + Adapt.unwrap_type(Adapt.parent_type(A)) +end +function Base.convert(::Type{<:VecOfArrays}, v::Vector{<:AbstractArray}) + VecOfArrays(v) +end +end # @muladd diff --git a/src/callbacks_stage/entropy_bounded_limiter.jl b/src/callbacks_stage/entropy_bounded_limiter.jl index f8dc9a612d6..57bc658cf82 100644 --- a/src/callbacks_stage/entropy_bounded_limiter.jl +++ b/src/callbacks_stage/entropy_bounded_limiter.jl @@ -56,6 +56,8 @@ function (limiter!::EntropyBoundedLimiter)(u_ode, integrator, limiter_entropy_bounded!(u, u_prev, limiter!.exp_entropy_decrease_max, mesh_equations_solver_cache(semi)...) end + + return nothing end # Exponentiated entropy change for the thermodynamic entropy (see `entropy_thermodynamic`) diff --git a/src/callbacks_stage/entropy_bounded_limiter_1d.jl b/src/callbacks_stage/entropy_bounded_limiter_1d.jl index 217261d39f4..c90e90c7669 100644 --- a/src/callbacks_stage/entropy_bounded_limiter_1d.jl +++ b/src/callbacks_stage/entropy_bounded_limiter_1d.jl @@ -7,8 +7,6 @@ function limiter_entropy_bounded!(u, u_prev, exp_entropy_decrease_max, mesh::AbstractMesh{1}, equations, dg::DGSEM, cache) - @unpack weights = dg.basis - @threaded for element in eachelement(dg, cache) # Minimum exponentiated entropy within the current `element` # of the previous iterate `u_prev` @@ -35,14 +33,7 @@ function limiter_entropy_bounded!(u, u_prev, exp_entropy_decrease_max, # Limiting only if entropy DECREASE below a user defined threshold is detected. d_exp_s_min < exp_entropy_decrease_max || continue - # Compute mean value - u_mean = zero(get_node_vars(u, equations, dg, 1, element)) - for i in eachnode(dg) - u_node = get_node_vars(u, equations, dg, i, element) - u_mean += u_node * weights[i] - end - # Note that the reference element is [-1,1]^ndims(dg), thus the weights sum to 2 - u_mean = u_mean / 2^ndims(mesh) + u_mean = compute_u_mean(u, element, mesh, equations, dg, cache) entropy_change_mean = exp_entropy_change(pressure(u_mean, equations), density(u_mean, equations), diff --git a/src/callbacks_stage/entropy_bounded_limiter_2d.jl b/src/callbacks_stage/entropy_bounded_limiter_2d.jl index 36d70e4f2e5..85059cd71be 100644 --- a/src/callbacks_stage/entropy_bounded_limiter_2d.jl +++ b/src/callbacks_stage/entropy_bounded_limiter_2d.jl @@ -7,9 +7,6 @@ function limiter_entropy_bounded!(u, u_prev, exp_entropy_decrease_max, mesh::AbstractMesh{2}, equations, dg::DGSEM, cache) - @unpack weights = dg.basis - @unpack inverse_jacobian = cache.elements - @threaded for element in eachelement(dg, cache) # Minimum exponentiated entropy within the current `element` # of the previous iterate `u_prev` @@ -35,18 +32,8 @@ function limiter_entropy_bounded!(u, u_prev, exp_entropy_decrease_max, # Detect if limiting is necessary. # Limiting only if entropy DECREASE below a user defined threshold is detected. d_exp_s_min < exp_entropy_decrease_max || continue - # Compute mean value - u_mean = zero(get_node_vars(u, equations, dg, 1, 1, element)) - total_volume = zero(eltype(u)) - for j in eachnode(dg), i in eachnode(dg) - volume_jacobian = abs(inv(get_inverse_jacobian(inverse_jacobian, mesh, - i, j, element))) - u_node = get_node_vars(u, equations, dg, i, j, element) - u_mean += u_node * weights[i] * weights[j] * volume_jacobian - total_volume += weights[i] * weights[j] * volume_jacobian - end - # normalize with the total volume - u_mean = u_mean / total_volume + + u_mean = compute_u_mean(u, element, mesh, equations, dg, cache) entropy_change_mean = exp_entropy_change(pressure(u_mean, equations), density(u_mean, equations), diff --git a/src/callbacks_stage/entropy_bounded_limiter_3d.jl b/src/callbacks_stage/entropy_bounded_limiter_3d.jl index 6f194f0814d..97326d2c354 100644 --- a/src/callbacks_stage/entropy_bounded_limiter_3d.jl +++ b/src/callbacks_stage/entropy_bounded_limiter_3d.jl @@ -7,9 +7,6 @@ function limiter_entropy_bounded!(u, u_prev, exp_entropy_decrease_max, mesh::AbstractMesh{3}, equations, dg::DGSEM, cache) - @unpack weights = dg.basis - @unpack inverse_jacobian = cache.elements - @threaded for element in eachelement(dg, cache) # Minimum exponentiated entropy within the current `element` # of the previous iterate `u_prev` @@ -35,18 +32,7 @@ function limiter_entropy_bounded!(u, u_prev, exp_entropy_decrease_max, # Detect if limiting is necessary. Avoid division by ("near") zero d_exp_s_min < exp_entropy_decrease_max || continue - # Compute mean value - u_mean = zero(get_node_vars(u, equations, dg, 1, 1, 1, element)) - total_volume = zero(eltype(u)) - for k in eachnode(dg), j in eachnode(dg), i in eachnode(dg) - volume_jacobian = abs(inv(get_inverse_jacobian(inverse_jacobian, mesh, - i, j, k, element))) - u_node = get_node_vars(u, equations, dg, i, j, k, element) - u_mean += u_node * weights[i] * weights[j] * weights[k] * volume_jacobian - total_volume += weights[i] * weights[j] * weights[k] * volume_jacobian - end - # normalize with the total volume - u_mean = u_mean / total_volume + u_mean = compute_u_mean(u, element, mesh, equations, dg, cache) entropy_change_mean = exp_entropy_change(pressure(u_mean, equations), density(u_mean, equations), diff --git a/src/callbacks_stage/positivity_zhang_shu.jl b/src/callbacks_stage/positivity_zhang_shu.jl index 92141c4b26e..660aefe61f2 100644 --- a/src/callbacks_stage/positivity_zhang_shu.jl +++ b/src/callbacks_stage/positivity_zhang_shu.jl @@ -36,6 +36,15 @@ function (limiter!::PositivityPreservingLimiterZhangShu)(u_ode, integrator, limiter_zhang_shu!(u, limiter!.thresholds, limiter!.variables, mesh_equations_solver_cache(semi)...) end + + return nothing +end + +# Version used by the AMR callback +function (limiter!::PositivityPreservingLimiterZhangShu)(u, mesh, equations, solver, + cache) + limiter_zhang_shu!(u, limiter!.thresholds, limiter!.variables, mesh, equations, + solver, cache) end # Iterate over tuples in a type-stable way using "lispy tuple programming", diff --git a/src/callbacks_stage/positivity_zhang_shu_dg1d.jl b/src/callbacks_stage/positivity_zhang_shu_dg1d.jl index 7797eb95b09..64121dad745 100644 --- a/src/callbacks_stage/positivity_zhang_shu_dg1d.jl +++ b/src/callbacks_stage/positivity_zhang_shu_dg1d.jl @@ -7,8 +7,6 @@ function limiter_zhang_shu!(u, threshold::Real, variable, mesh::AbstractMesh{1}, equations, dg::DGSEM, cache) - @unpack weights = dg.basis - @threaded for element in eachelement(dg, cache) # determine minimum value value_min = typemax(eltype(u)) @@ -20,14 +18,7 @@ function limiter_zhang_shu!(u, threshold::Real, variable, # detect if limiting is necessary value_min < threshold || continue - # compute mean value - u_mean = zero(get_node_vars(u, equations, dg, 1, element)) - for i in eachnode(dg) - u_node = get_node_vars(u, equations, dg, i, element) - u_mean += u_node * weights[i] - end - # note that the reference element is [-1,1]^ndims(dg), thus the weights sum to 2 - u_mean = u_mean / 2^ndims(mesh) + u_mean = compute_u_mean(u, element, mesh, equations, dg, cache) # We compute the value directly with the mean values, as we assume that # Jensen's inequality holds (e.g. pressure for compressible Euler equations). diff --git a/src/callbacks_stage/positivity_zhang_shu_dg2d.jl b/src/callbacks_stage/positivity_zhang_shu_dg2d.jl index 813dd65878b..38805f988b7 100644 --- a/src/callbacks_stage/positivity_zhang_shu_dg2d.jl +++ b/src/callbacks_stage/positivity_zhang_shu_dg2d.jl @@ -7,9 +7,6 @@ function limiter_zhang_shu!(u, threshold::Real, variable, mesh::AbstractMesh{2}, equations, dg::DGSEM, cache) - @unpack weights = dg.basis - @unpack inverse_jacobian = cache.elements - @threaded for element in eachelement(dg, cache) # determine minimum value value_min = typemax(eltype(u)) @@ -21,18 +18,7 @@ function limiter_zhang_shu!(u, threshold::Real, variable, # detect if limiting is necessary value_min < threshold || continue - # compute mean value - u_mean = zero(get_node_vars(u, equations, dg, 1, 1, element)) - total_volume = zero(eltype(u)) - for j in eachnode(dg), i in eachnode(dg) - volume_jacobian = abs(inv(get_inverse_jacobian(inverse_jacobian, mesh, - i, j, element))) - u_node = get_node_vars(u, equations, dg, i, j, element) - u_mean += u_node * weights[i] * weights[j] * volume_jacobian - total_volume += weights[i] * weights[j] * volume_jacobian - end - # normalize with the total volume - u_mean = u_mean / total_volume + u_mean = compute_u_mean(u, element, mesh, equations, dg, cache) # We compute the value directly with the mean values, as we assume that # Jensen's inequality holds (e.g. pressure for compressible Euler equations). diff --git a/src/callbacks_stage/positivity_zhang_shu_dg3d.jl b/src/callbacks_stage/positivity_zhang_shu_dg3d.jl index 156abf35b4c..a99a9255d01 100644 --- a/src/callbacks_stage/positivity_zhang_shu_dg3d.jl +++ b/src/callbacks_stage/positivity_zhang_shu_dg3d.jl @@ -7,9 +7,6 @@ function limiter_zhang_shu!(u, threshold::Real, variable, mesh::AbstractMesh{3}, equations, dg::DGSEM, cache) - @unpack weights = dg.basis - @unpack inverse_jacobian = cache.elements - @threaded for element in eachelement(dg, cache) # determine minimum value value_min = typemax(eltype(u)) @@ -21,18 +18,7 @@ function limiter_zhang_shu!(u, threshold::Real, variable, # detect if limiting is necessary value_min < threshold || continue - # compute mean value - u_mean = zero(get_node_vars(u, equations, dg, 1, 1, 1, element)) - total_volume = zero(eltype(u)) - for k in eachnode(dg), j in eachnode(dg), i in eachnode(dg) - volume_jacobian = abs(inv(get_inverse_jacobian(inverse_jacobian, mesh, - i, j, k, element))) - u_node = get_node_vars(u, equations, dg, i, j, k, element) - u_mean += u_node * weights[i] * weights[j] * weights[k] * volume_jacobian - total_volume += weights[i] * weights[j] * weights[k] * volume_jacobian - end - # normalize with the total volume - u_mean = u_mean / total_volume + u_mean = compute_u_mean(u, element, mesh, equations, dg, cache) # We compute the value directly with the mean values, as we assume that # Jensen's inequality holds (e.g. pressure for compressible Euler equations). diff --git a/src/callbacks_stage/subcell_limiter_idp_correction.jl b/src/callbacks_stage/subcell_limiter_idp_correction.jl index d234a6a5507..e1cb42035d1 100644 --- a/src/callbacks_stage/subcell_limiter_idp_correction.jl +++ b/src/callbacks_stage/subcell_limiter_idp_correction.jl @@ -50,6 +50,7 @@ function (limiter!::SubcellLimiterIDPCorrection)(u_ode, semi, t, dt, # f_ij = alpha_ij * f^(FV)_ij + (1 - alpha_ij) * f^(DG)_ij # = f^(FV)_ij + (1 - alpha_ij) * f^(antidiffusive)_ij @trixi_timeit timer() "blending factors" solver.volume_integral.limiter(u, semi, + equations, solver, t, dt) diff --git a/src/callbacks_step/amr.jl b/src/callbacks_step/amr.jl index c86a1bdb44a..04752770f08 100644 --- a/src/callbacks_step/amr.jl +++ b/src/callbacks_step/amr.jl @@ -10,12 +10,14 @@ interval, adapt_initial_condition=true, adapt_initial_condition_only_refine=true, - dynamic_load_balancing=true) + dynamic_load_balancing=true, + limiter! = nothing) -Performs adaptive mesh refinement (AMR) every `interval` time steps -for a given semidiscretization `semi` using the chosen `controller`. +Performs adaptive mesh refinement (AMR) every `interval` time steps for a given semidiscretization +`semi` using the chosen `controller`. If specified, a positivity-preserving limiter `limiter!` is +applied to the solution after refinement and coarsening. """ -struct AMRCallback{Controller, Adaptor, Cache} +struct AMRCallback{Controller, Adaptor, Cache, Limiter} controller::Controller interval::Int adapt_initial_condition::Bool @@ -23,13 +25,15 @@ struct AMRCallback{Controller, Adaptor, Cache} dynamic_load_balancing::Bool adaptor::Adaptor amr_cache::Cache + limiter!::Limiter end function AMRCallback(semi, controller, adaptor; interval, adapt_initial_condition = true, adapt_initial_condition_only_refine = true, - dynamic_load_balancing = true) + dynamic_load_balancing = true, + limiter! = nothing) # check arguments if !(interval isa Integer && interval >= 0) throw(ArgumentError("`interval` must be a non-negative integer (provided `interval = $interval`)")) @@ -54,13 +58,15 @@ function AMRCallback(semi, controller, adaptor; to_coarsen = Int[] amr_cache = (; to_refine, to_coarsen) - amr_callback = AMRCallback{typeof(controller), typeof(adaptor), typeof(amr_cache)}(controller, - interval, - adapt_initial_condition, - adapt_initial_condition_only_refine, - dynamic_load_balancing, - adaptor, - amr_cache) + amr_callback = AMRCallback{typeof(controller), typeof(adaptor), typeof(amr_cache), + typeof(limiter!)}(controller, + interval, + adapt_initial_condition, + adapt_initial_condition_only_refine, + dynamic_load_balancing, + adaptor, + amr_cache, + limiter!) DiscreteCallback(condition, amr_callback, save_positions = (false, false), @@ -104,6 +110,9 @@ function Base.show(io::IO, mime::MIME"text/plain", amr_callback.adapt_initial_condition_only_refine ? "yes" : "no") end + if amr_callback.limiter! !== nothing + summary_line(io, "limiter", amr_callback.limiter!) + end summary_footer(io) end end @@ -152,6 +161,17 @@ function initialize!(cb::DiscreteCallback{Condition, Affect!}, u, t, break end end + + # Update initial state integrals of analysis callback if it exists + # See https://github.com/trixi-framework/Trixi.jl/issues/2536 for more information. + index = findfirst(cb -> cb.affect! isa AnalysisCallback, + integrator.opts.callback.discrete_callbacks) + if !isnothing(index) + analysis_callback = integrator.opts.callback.discrete_callbacks[index].affect! + + initial_state_integrals = integrate(integrator.u, semi) + analysis_callback.initial_state_integrals = initial_state_integrals + end end return nothing @@ -221,7 +241,7 @@ function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::TreeMesh, t, iter; only_refine = false, only_coarsen = false, passive_args = ()) - @unpack controller, adaptor = amr_callback + @unpack controller, adaptor, limiter! = amr_callback u = wrap_array(u_ode, mesh, equations, dg, cache) lambda = @trixi_timeit timer() "indicator" controller(u, mesh, equations, dg, cache, @@ -265,11 +285,11 @@ function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::TreeMesh, # refine solver @trixi_timeit timer() "solver" refine!(u_ode, adaptor, mesh, equations, dg, - cache, elements_to_refine) + cache, elements_to_refine, limiter!) for (p_u_ode, p_mesh, p_equations, p_dg, p_cache) in passive_args @trixi_timeit timer() "passive solver" refine!(p_u_ode, adaptor, p_mesh, p_equations, p_dg, p_cache, - elements_to_refine) + elements_to_refine, limiter!) end else # If there is nothing to refine, create empty array for later use @@ -332,11 +352,12 @@ function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::TreeMesh, # coarsen solver @trixi_timeit timer() "solver" coarsen!(u_ode, adaptor, mesh, equations, dg, - cache, elements_to_remove) + cache, elements_to_remove, limiter!) for (p_u_ode, p_mesh, p_equations, p_dg, p_cache) in passive_args @trixi_timeit timer() "passive solver" coarsen!(p_u_ode, adaptor, p_mesh, p_equations, p_dg, p_cache, - elements_to_remove) + elements_to_remove, + limiter!) end else # If there is nothing to coarsen, create empty array for later use @@ -371,7 +392,7 @@ function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::TreeMesh, semi::SemidiscretizationHyperbolicParabolic, t, iter; only_refine = false, only_coarsen = false) - @unpack controller, adaptor = amr_callback + @unpack controller, adaptor, limiter! = amr_callback u = wrap_array(u_ode, mesh, equations, dg, cache) # Indicator kept based on hyperbolic variables @@ -420,7 +441,7 @@ function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::TreeMesh, # refine solver @trixi_timeit timer() "solver" refine!(u_ode, adaptor, mesh, equations, dg, cache, cache_parabolic, - elements_to_refine) + elements_to_refine, limiter!) else # If there is nothing to refine, create empty array for later use refined_original_cells = Int[] @@ -484,7 +505,7 @@ function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::TreeMesh, # coarsen solver @trixi_timeit timer() "solver" coarsen!(u_ode, adaptor, mesh, equations, dg, cache, cache_parabolic, - elements_to_remove) + elements_to_remove, limiter!) else # If there is nothing to coarsen, create empty array for later use coarsened_original_cells = Int[] @@ -545,7 +566,7 @@ function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::P4estMesh, t, iter; only_refine = false, only_coarsen = false, passive_args = ()) - @unpack controller, adaptor = amr_callback + @unpack controller, adaptor, limiter! = amr_callback u = wrap_array(u_ode, mesh, equations, dg, cache) lambda = @trixi_timeit timer() "indicator" controller(u, mesh, equations, dg, cache, @@ -567,14 +588,18 @@ function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::P4estMesh, refined_original_cells = @trixi_timeit timer() "mesh" refine!(mesh) # Refine solver - @trixi_timeit timer() "solver" refine!(u_ode, adaptor, mesh, equations, dg, + @trixi_timeit timer() "solver" refine!(u_ode, adaptor, mesh, + equations, dg, cache, cache_parabolic, - refined_original_cells) + refined_original_cells, + limiter!) for (p_u_ode, p_mesh, p_equations, p_dg, p_cache) in passive_args - @trixi_timeit timer() "passive solver" refine!(p_u_ode, adaptor, p_mesh, + @trixi_timeit timer() "passive solver" refine!(p_u_ode, adaptor, + p_mesh, p_equations, p_dg, p_cache, - refined_original_cells) + refined_original_cells, + limiter!) end else # If there is nothing to refine, create empty array for later use @@ -588,12 +613,14 @@ function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::P4estMesh, # coarsen solver @trixi_timeit timer() "solver" coarsen!(u_ode, adaptor, mesh, equations, dg, cache, cache_parabolic, - coarsened_original_cells) + coarsened_original_cells, + limiter!) for (p_u_ode, p_mesh, p_equations, p_dg, p_cache) in passive_args @trixi_timeit timer() "passive solver" coarsen!(p_u_ode, adaptor, p_mesh, p_equations, p_dg, p_cache, - coarsened_original_cells) + coarsened_original_cells, + limiter!) end else # If there is nothing to coarsen, create empty array for later use @@ -651,7 +678,7 @@ function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::P4estMesh, t, iter; only_refine = false, only_coarsen = false, passive_args = ()) - @unpack controller, adaptor = amr_callback + @unpack controller, adaptor, limiter! = amr_callback u = wrap_array(u_ode, mesh, equations, dg, cache) lambda = @trixi_timeit timer() "indicator" controller(u, mesh, equations, dg, cache, @@ -675,12 +702,14 @@ function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::P4estMesh, # Refine solver @trixi_timeit timer() "solver" refine!(u_ode, adaptor, mesh, equations, dg, cache, - refined_original_cells) + refined_original_cells, + limiter!) for (p_u_ode, p_mesh, p_equations, p_dg, p_cache) in passive_args @trixi_timeit timer() "passive solver" refine!(p_u_ode, adaptor, p_mesh, p_equations, p_dg, p_cache, - refined_original_cells) + refined_original_cells, + limiter!) end else # If there is nothing to refine, create empty array for later use @@ -694,12 +723,13 @@ function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::P4estMesh, # coarsen solver @trixi_timeit timer() "solver" coarsen!(u_ode, adaptor, mesh, equations, dg, cache, - coarsened_original_cells) + coarsened_original_cells, limiter!) for (p_u_ode, p_mesh, p_equations, p_dg, p_cache) in passive_args @trixi_timeit timer() "passive solver" coarsen!(p_u_ode, adaptor, p_mesh, p_equations, p_dg, p_cache, - coarsened_original_cells) + coarsened_original_cells, + limiter!) end else # If there is nothing to coarsen, create empty array for later use @@ -743,7 +773,7 @@ function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::T8codeMesh, passive_args = ()) has_changed = false - @unpack controller, adaptor = amr_callback + @unpack controller, adaptor, limiter! = amr_callback u = wrap_array(u_ode, mesh, equations, dg, cache) indicators = @trixi_timeit timer() "indicator" controller(u, mesh, equations, dg, @@ -774,7 +804,7 @@ function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::T8codeMesh, if has_changed @trixi_timeit timer() "solver" adapt!(u_ode, adaptor, mesh, equations, dg, - cache, difference) + cache, difference, limiter!) end end diff --git a/src/callbacks_step/amr_dg.jl b/src/callbacks_step/amr_dg.jl index 19fa1dbab1e..663278232c7 100644 --- a/src/callbacks_step/amr_dg.jl +++ b/src/callbacks_step/amr_dg.jl @@ -89,6 +89,8 @@ function rebalance_solver!(u_ode::AbstractVector, MPI.Waitall(requests, MPI.Status) end end # GC.@preserve old_u_ode + + return nothing end # Construct cache for ControllerThreeLevel and ControllerThreeLevelCombined. diff --git a/src/callbacks_step/amr_dg1d.jl b/src/callbacks_step/amr_dg1d.jl index 09248a599c5..f4445b24f96 100644 --- a/src/callbacks_step/amr_dg1d.jl +++ b/src/callbacks_step/amr_dg1d.jl @@ -7,7 +7,7 @@ # Refine elements in the DG solver based on a list of cell_ids that should be refined function refine!(u_ode::AbstractVector, adaptor, mesh::TreeMesh{1}, - equations, dg::DGSEM, cache, elements_to_refine) + equations, dg::DGSEM, cache, elements_to_refine, limiter!) # Return early if there is nothing to do if isempty(elements_to_refine) return @@ -73,15 +73,20 @@ function refine!(u_ode::AbstractVector, adaptor, mesh::TreeMesh{1}, @assert ninterfaces(interfaces)==1 * nelements(dg, cache) ("For 1D and periodic domains, the number of interfaces must be the same as the number of elements") end + # Apply the positivity limiter to the solution + if limiter! !== nothing + limiter!(u, mesh, equations, dg, cache) + end + return nothing end function refine!(u_ode::AbstractVector, adaptor, mesh::TreeMesh{1}, equations, dg::DGSEM, cache, cache_parabolic, - elements_to_refine) + elements_to_refine, limiter!) # Call `refine!` for the hyperbolic part, which does the heavy lifting of # actually transferring the solution to the refined cells - refine!(u_ode, adaptor, mesh, equations, dg, cache, elements_to_refine) + refine!(u_ode, adaptor, mesh, equations, dg, cache, elements_to_refine, limiter!) # Resize parabolic helper variables @unpack viscous_container = cache_parabolic @@ -144,7 +149,7 @@ end # Coarsen elements in the DG solver based on a list of cell_ids that should be removed function coarsen!(u_ode::AbstractVector, adaptor, mesh::TreeMesh{1}, - equations, dg::DGSEM, cache, elements_to_remove) + equations, dg::DGSEM, cache, elements_to_remove, limiter!) # Return early if there is nothing to do if isempty(elements_to_remove) return @@ -219,15 +224,20 @@ function coarsen!(u_ode::AbstractVector, adaptor, mesh::TreeMesh{1}, @assert ninterfaces(interfaces)==1 * nelements(dg, cache) ("For 1D and periodic domains, the number of interfaces must be the same as the number of elements") end + # Apply the positivity limiter to the solution + if limiter! !== nothing + limiter!(u, mesh, equations, dg, cache) + end + return nothing end function coarsen!(u_ode::AbstractVector, adaptor, mesh::TreeMesh{1}, equations, dg::DGSEM, cache, cache_parabolic, - elements_to_remove) + elements_to_remove, limiter!) # Call `coarsen!` for the hyperbolic part, which does the heavy lifting of # actually transferring the solution to the coarsened cells - coarsen!(u_ode, adaptor, mesh, equations, dg, cache, elements_to_remove) + coarsen!(u_ode, adaptor, mesh, equations, dg, cache, elements_to_remove, limiter!) # Resize parabolic helper variables @unpack viscous_container = cache_parabolic @@ -282,5 +292,7 @@ function coarsen_elements!(u::AbstractArray{<:Any, 3}, element_id, # Update value set_node_vars!(u, acc, equations, dg, i, element_id) end + + return nothing end end # @muladd diff --git a/src/callbacks_step/amr_dg2d.jl b/src/callbacks_step/amr_dg2d.jl index 5ccae02b575..0b829e0db6f 100644 --- a/src/callbacks_step/amr_dg2d.jl +++ b/src/callbacks_step/amr_dg2d.jl @@ -74,6 +74,8 @@ function rebalance_solver!(u_ode::AbstractVector, mesh::TreeMesh{2}, equations, MPI.Waitall(requests, MPI.Status) end end # GC.@preserve old_u_ode + + return nothing end # Refine elements in the DG solver based on a list of cell_ids that should be refined. @@ -81,7 +83,7 @@ end # from the parent element into the four children elements. The solution on each child # element is then recovered by dividing by the new element Jacobians. function refine!(u_ode::AbstractVector, adaptor, mesh::Union{TreeMesh{2}, P4estMesh{2}}, - equations, dg::DGSEM, cache, elements_to_refine) + equations, dg::DGSEM, cache, elements_to_refine, limiter!) # Return early if there is nothing to do if isempty(elements_to_refine) if mpi_isparallel() @@ -174,16 +176,21 @@ function refine!(u_ode::AbstractVector, adaptor, mesh::Union{TreeMesh{2}, P4estM @assert ninterfaces(cache.interfaces)==ndims(mesh) * nelements(dg, cache) ("For $(ndims(mesh))D and periodic domains and conforming elements, the number of interfaces must be $(ndims(mesh)) times the number of elements") end + # Apply the positivity limiter to the solution + if limiter! !== nothing + limiter!(u, mesh, equations, dg, cache) + end + return nothing end function refine!(u_ode::AbstractVector, adaptor, mesh::Union{TreeMesh{2}, P4estMesh{2}, TreeMesh{3}, P4estMesh{3}}, equations, dg::DGSEM, cache, cache_parabolic, - elements_to_refine) + elements_to_refine, limiter!) # Call `refine!` for the hyperbolic part, which does the heavy lifting of # actually transferring the solution to the refined cells - refine!(u_ode, adaptor, mesh, equations, dg, cache, elements_to_refine) + refine!(u_ode, adaptor, mesh, equations, dg, cache, elements_to_refine, limiter!) # Resize parabolic helper variables @unpack viscous_container = cache_parabolic @@ -275,7 +282,7 @@ end # element is then recovered by dividing by the new element Jacobian. function coarsen!(u_ode::AbstractVector, adaptor, mesh::Union{TreeMesh{2}, P4estMesh{2}}, - equations, dg::DGSEM, cache, elements_to_remove) + equations, dg::DGSEM, cache, elements_to_remove, limiter!) # Return early if there is nothing to do if isempty(elements_to_remove) if mpi_isparallel() @@ -375,16 +382,21 @@ function coarsen!(u_ode::AbstractVector, adaptor, @assert ninterfaces(cache.interfaces)==ndims(mesh) * nelements(dg, cache) ("For $(ndims(mesh))D and periodic domains and conforming elements, the number of interfaces must be $(ndims(mesh)) times the number of elements") end + # Apply the positivity limiter to the solution + if limiter! !== nothing + limiter!(u, mesh, equations, dg, cache) + end + return nothing end function coarsen!(u_ode::AbstractVector, adaptor, mesh::Union{TreeMesh{2}, P4estMesh{2}, TreeMesh{3}, P4estMesh{3}}, equations, dg::DGSEM, cache, cache_parabolic, - elements_to_remove) + elements_to_remove, limiter!) # Call `coarsen!` for the hyperbolic part, which does the heavy lifting of # actually transferring the solution to the coarsened cells - coarsen!(u_ode, adaptor, mesh, equations, dg, cache, elements_to_remove) + coarsen!(u_ode, adaptor, mesh, equations, dg, cache, elements_to_remove, limiter!) # Resize parabolic helper variables @unpack viscous_container = cache_parabolic @@ -461,7 +473,7 @@ end # Coarsen and refine elements in the DG solver based on a difference list. function adapt!(u_ode::AbstractVector, adaptor, mesh::T8codeMesh{2}, equations, - dg::DGSEM, cache, difference) + dg::DGSEM, cache, difference, limiter!) # Return early if there is nothing to do. if !any(difference .!= 0) @@ -568,6 +580,11 @@ function adapt!(u_ode::AbstractVector, adaptor, mesh::T8codeMesh{2}, equations, end # while end # GC.@preserve old_u_ode old_inverse_jacobian + # Apply the positivity limiter to the solution + if limiter! !== nothing + limiter!(u, mesh, equations, dg, cache) + end + return nothing end end # @muladd diff --git a/src/callbacks_step/amr_dg3d.jl b/src/callbacks_step/amr_dg3d.jl index 5b3e824d041..f0eb5e65771 100644 --- a/src/callbacks_step/amr_dg3d.jl +++ b/src/callbacks_step/amr_dg3d.jl @@ -10,7 +10,7 @@ # from the parent element into the eight children elements. The solution on each child # element is then recovered by dividing by the new element Jacobians. function refine!(u_ode::AbstractVector, adaptor, mesh::Union{TreeMesh{3}, P4estMesh{3}}, - equations, dg::DGSEM, cache, elements_to_refine) + equations, dg::DGSEM, cache, elements_to_refine, limiter!) # Return early if there is nothing to do if isempty(elements_to_refine) if mpi_isparallel() @@ -106,6 +106,11 @@ function refine!(u_ode::AbstractVector, adaptor, mesh::Union{TreeMesh{3}, P4estM @assert ninterfaces(cache.interfaces)==ndims(mesh) * nelements(dg, cache) ("For $(ndims(mesh))D and periodic domains and conforming elements, the number of interfaces must be $(ndims(mesh)) times the number of elements") end + # Apply the positivity limiter to the solution + if limiter! !== nothing + limiter!(u, mesh, equations, dg, cache) + end + return nothing end @@ -191,7 +196,7 @@ end # element is then recovered by dividing by the new element Jacobian. function coarsen!(u_ode::AbstractVector, adaptor, mesh::Union{TreeMesh{3}, P4estMesh{3}}, - equations, dg::DGSEM, cache, elements_to_remove) + equations, dg::DGSEM, cache, elements_to_remove, limiter!) # Return early if there is nothing to do if isempty(elements_to_remove) if mpi_isparallel() @@ -294,6 +299,11 @@ function coarsen!(u_ode::AbstractVector, adaptor, @assert ninterfaces(cache.interfaces)==ndims(mesh) * nelements(dg, cache) ("For $(ndims(mesh))D and periodic domains and conforming elements, the number of interfaces must be $(ndims(mesh)) times the number of elements") end + # Apply the positivity limiter to the solution + if limiter! !== nothing + limiter!(u, mesh, equations, dg, cache) + end + return nothing end @@ -383,7 +393,7 @@ end # Coarsen and refine elements in the DG solver based on a difference list. function adapt!(u_ode::AbstractVector, adaptor, mesh::T8codeMesh{3}, equations, - dg::DGSEM, cache, difference) + dg::DGSEM, cache, difference, limiter!) # Return early if there is nothing to do. if !any(difference .!= 0) @@ -496,6 +506,11 @@ function adapt!(u_ode::AbstractVector, adaptor, mesh::T8codeMesh{3}, equations, end # while end # GC.@preserve old_u_ode old_inverse_jacobian + # Apply the positivity limiter to the solution + if limiter! !== nothing + limiter!(u, mesh, equations, dg, cache) + end + return nothing end end # @muladd diff --git a/src/callbacks_step/analysis_dg2d.jl b/src/callbacks_step/analysis_dg2d.jl index 8b7cfd14832..fa18c5af63a 100644 --- a/src/callbacks_step/analysis_dg2d.jl +++ b/src/callbacks_step/analysis_dg2d.jl @@ -377,10 +377,10 @@ function analyze(::Val{:linf_divb}, du, u, t, for j in eachnode(dg), i in eachnode(dg) divb = zero(eltype(u)) # Get the contravariant vectors Ja^1 and Ja^2 - Ja11, Ja12 = get_contravariant_vector(1, contravariant_vectors, i, j, - element) - Ja21, Ja22 = get_contravariant_vector(2, contravariant_vectors, i, j, - element) + Ja11, Ja12 = get_contravariant_vector(1, contravariant_vectors, + i, j, element) + Ja21, Ja22 = get_contravariant_vector(2, contravariant_vectors, + i, j, element) # Compute the transformed divergence for k in eachnode(dg) u_kj = get_node_vars(u, equations, dg, k, j, element) diff --git a/src/callbacks_step/analysis_dg3d.jl b/src/callbacks_step/analysis_dg3d.jl index c5011d558df..072ffc16096 100644 --- a/src/callbacks_step/analysis_dg3d.jl +++ b/src/callbacks_step/analysis_dg3d.jl @@ -343,12 +343,12 @@ function analyze(::Val{:l2_divb}, du, u, t, dg, cache, derivative_matrix divb = zero(eltype(u)) # Get the contravariant vectors Ja^1, Ja^2, and Ja^3 - Ja11, Ja12, Ja13 = get_contravariant_vector(1, contravariant_vectors, i, j, k, - element) - Ja21, Ja22, Ja23 = get_contravariant_vector(2, contravariant_vectors, i, j, k, - element) - Ja31, Ja32, Ja33 = get_contravariant_vector(3, contravariant_vectors, i, j, k, - element) + Ja11, Ja12, Ja13 = get_contravariant_vector(1, contravariant_vectors, + i, j, k, element) + Ja21, Ja22, Ja23 = get_contravariant_vector(2, contravariant_vectors, + i, j, k, element) + Ja31, Ja32, Ja33 = get_contravariant_vector(3, contravariant_vectors, + i, j, k, element) # Compute the transformed divergence for l in eachnode(dg) u_ljk = get_node_vars(u, equations, dg, l, j, k, element) @@ -420,12 +420,12 @@ function analyze(::Val{:linf_divb}, du, u, t, for k in eachnode(dg), j in eachnode(dg), i in eachnode(dg) divb = zero(eltype(u)) # Get the contravariant vectors Ja^1, Ja^2, and Ja^3 - Ja11, Ja12, Ja13 = get_contravariant_vector(1, contravariant_vectors, i, j, - k, element) - Ja21, Ja22, Ja23 = get_contravariant_vector(2, contravariant_vectors, i, j, - k, element) - Ja31, Ja32, Ja33 = get_contravariant_vector(3, contravariant_vectors, i, j, - k, element) + Ja11, Ja12, Ja13 = get_contravariant_vector(1, contravariant_vectors, + i, j, k, element) + Ja21, Ja22, Ja23 = get_contravariant_vector(2, contravariant_vectors, + i, j, k, element) + Ja31, Ja32, Ja33 = get_contravariant_vector(3, contravariant_vectors, + i, j, k, element) # Compute the transformed divergence for l in eachnode(dg) u_ljk = get_node_vars(u, equations, dg, l, j, k, element) diff --git a/src/callbacks_step/callbacks_step.jl b/src/callbacks_step/callbacks_step.jl index 09d197bf225..5979f557abd 100644 --- a/src/callbacks_step/callbacks_step.jl +++ b/src/callbacks_step/callbacks_step.jl @@ -9,7 +9,7 @@ # that should be saved function get_element_variables!(element_variables, u, mesh, equations, solver, cache, callback; kwargs...) - nothing + return nothing end @inline function get_element_variables!(element_variables, u_ode, diff --git a/src/callbacks_step/save_solution.jl b/src/callbacks_step/save_solution.jl index bb28b8f1d98..ac40bc42de0 100644 --- a/src/callbacks_step/save_solution.jl +++ b/src/callbacks_step/save_solution.jl @@ -276,6 +276,8 @@ end element_variables, solution_callback.node_variables, system = system) + + return nothing end @inline function save_solution_file(u_ode, t, dt, iter, @@ -289,6 +291,8 @@ end solution_callback, element_variables, node_variables; system = system) + + return nothing end # TODO: Taal refactor, move save_mesh_file? diff --git a/src/callbacks_step/stepsize_dg1d.jl b/src/callbacks_step/stepsize_dg1d.jl index cdb7ed21de4..7be0f074135 100644 --- a/src/callbacks_step/stepsize_dg1d.jl +++ b/src/callbacks_step/stepsize_dg1d.jl @@ -31,8 +31,9 @@ function max_dt(u, t, mesh::TreeMesh{1}, # e.g. for steady-state linear advection max_scaled_speed = nextfloat(zero(t)) + max_lambda1, = max_abs_speeds(equations) + @batch reduction=(max, max_scaled_speed) for element in eachelement(dg, cache) - max_lambda1, = max_abs_speeds(equations) inv_jacobian = cache.elements.inverse_jacobian[element] max_scaled_speed = max(max_scaled_speed, inv_jacobian * max_lambda1) end @@ -70,9 +71,9 @@ function max_dt(u, t, mesh::StructuredMesh{1}, # e.g. for steady-state linear advection max_scaled_speed = nextfloat(zero(t)) - @batch reduction=(max, max_scaled_speed) for element in eachelement(dg, cache) - max_lambda1, = max_abs_speeds(equations) + max_lambda1, = max_abs_speeds(equations) + @batch reduction=(max, max_scaled_speed) for element in eachelement(dg, cache) for i in eachnode(dg) inv_jacobian = cache.elements.inverse_jacobian[i, element] max_scaled_speed = max(max_scaled_speed, inv_jacobian * max_lambda1) diff --git a/src/callbacks_step/stepsize_dg2d.jl b/src/callbacks_step/stepsize_dg2d.jl index 1ce1385dd6a..a7c0dd2a0af 100644 --- a/src/callbacks_step/stepsize_dg2d.jl +++ b/src/callbacks_step/stepsize_dg2d.jl @@ -33,8 +33,9 @@ function max_dt(u, t, mesh::TreeMesh{2}, # e.g. for steady-state linear advection max_scaled_speed = nextfloat(zero(t)) + max_lambda1, max_lambda2 = max_abs_speeds(equations) + @batch reduction=(max, max_scaled_speed) for element in eachelement(dg, cache) - max_lambda1, max_lambda2 = max_abs_speeds(equations) inv_jacobian = cache.elements.inverse_jacobian[element] max_scaled_speed = max(max_scaled_speed, inv_jacobian * (max_lambda1 + max_lambda2)) @@ -94,11 +95,11 @@ function max_dt(u, t, lambda1, lambda2 = max_abs_speeds(u_node, equations) # Local speeds transformed to the reference element - Ja11, Ja12 = get_contravariant_vector(1, contravariant_vectors, i, j, - element) + Ja11, Ja12 = get_contravariant_vector(1, contravariant_vectors, + i, j, element) lambda1_transformed = abs(Ja11 * lambda1 + Ja12 * lambda2) - Ja21, Ja22 = get_contravariant_vector(2, contravariant_vectors, i, j, - element) + Ja21, Ja22 = get_contravariant_vector(2, contravariant_vectors, + i, j, element) lambda2_transformed = abs(Ja21 * lambda1 + Ja22 * lambda2) inv_jacobian = abs(inverse_jacobian[i, j, element]) @@ -128,11 +129,11 @@ function max_dt(u, t, @batch reduction=(max, max_scaled_speed) for element in eachelement(dg, cache) for j in eachnode(dg), i in eachnode(dg) # Local speeds transformed to the reference element - Ja11, Ja12 = get_contravariant_vector(1, contravariant_vectors, i, j, - element) + Ja11, Ja12 = get_contravariant_vector(1, contravariant_vectors, + i, j, element) lambda1_transformed = abs(Ja11 * max_lambda1 + Ja12 * max_lambda2) - Ja21, Ja22 = get_contravariant_vector(2, contravariant_vectors, i, j, - element) + Ja21, Ja22 = get_contravariant_vector(2, contravariant_vectors, + i, j, element) lambda2_transformed = abs(Ja21 * max_lambda1 + Ja22 * max_lambda2) inv_jacobian = abs(inverse_jacobian[i, j, element]) diff --git a/src/callbacks_step/stepsize_dg3d.jl b/src/callbacks_step/stepsize_dg3d.jl index 3324e819cee..897f7d8b22b 100644 --- a/src/callbacks_step/stepsize_dg3d.jl +++ b/src/callbacks_step/stepsize_dg3d.jl @@ -34,8 +34,9 @@ function max_dt(u, t, mesh::TreeMesh{3}, # e.g. for steady-state linear advection max_scaled_speed = nextfloat(zero(t)) + max_lambda1, max_lambda2, max_lambda3 = max_abs_speeds(equations) + @batch reduction=(max, max_scaled_speed) for element in eachelement(dg, cache) - max_lambda1, max_lambda2, max_lambda3 = max_abs_speeds(equations) inv_jacobian = cache.elements.inverse_jacobian[element] max_scaled_speed = max(max_scaled_speed, inv_jacobian * (max_lambda1 + max_lambda2 + max_lambda3)) @@ -58,14 +59,14 @@ function max_dt(u, t, mesh::Union{StructuredMesh{3}, P4estMesh{3}, T8codeMesh{3} u_node = get_node_vars(u, equations, dg, i, j, k, element) lambda1, lambda2, lambda3 = max_abs_speeds(u_node, equations) - Ja11, Ja12, Ja13 = get_contravariant_vector(1, contravariant_vectors, i, j, - k, element) + Ja11, Ja12, Ja13 = get_contravariant_vector(1, contravariant_vectors, + i, j, k, element) lambda1_transformed = abs(Ja11 * lambda1 + Ja12 * lambda2 + Ja13 * lambda3) - Ja21, Ja22, Ja23 = get_contravariant_vector(2, contravariant_vectors, i, j, - k, element) + Ja21, Ja22, Ja23 = get_contravariant_vector(2, contravariant_vectors, + i, j, k, element) lambda2_transformed = abs(Ja21 * lambda1 + Ja22 * lambda2 + Ja23 * lambda3) - Ja31, Ja32, Ja33 = get_contravariant_vector(3, contravariant_vectors, i, j, - k, element) + Ja31, Ja32, Ja33 = get_contravariant_vector(3, contravariant_vectors, + i, j, k, element) lambda3_transformed = abs(Ja31 * lambda1 + Ja32 * lambda2 + Ja33 * lambda3) inv_jacobian = abs(cache.elements.inverse_jacobian[i, j, k, element]) @@ -94,16 +95,16 @@ function max_dt(u, t, mesh::Union{StructuredMesh{3}, P4estMesh{3}, T8codeMesh{3} @batch reduction=(max, max_scaled_speed) for element in eachelement(dg, cache) for k in eachnode(dg), j in eachnode(dg), i in eachnode(dg) - Ja11, Ja12, Ja13 = get_contravariant_vector(1, contravariant_vectors, i, j, - k, element) + Ja11, Ja12, Ja13 = get_contravariant_vector(1, contravariant_vectors, + i, j, k, element) lambda1_transformed = abs(Ja11 * max_lambda1 + Ja12 * max_lambda2 + Ja13 * max_lambda3) - Ja21, Ja22, Ja23 = get_contravariant_vector(2, contravariant_vectors, i, j, - k, element) + Ja21, Ja22, Ja23 = get_contravariant_vector(2, contravariant_vectors, + i, j, k, element) lambda2_transformed = abs(Ja21 * max_lambda1 + Ja22 * max_lambda2 + Ja23 * max_lambda3) - Ja31, Ja32, Ja33 = get_contravariant_vector(3, contravariant_vectors, i, j, - k, element) + Ja31, Ja32, Ja33 = get_contravariant_vector(3, contravariant_vectors, + i, j, k, element) lambda3_transformed = abs(Ja31 * max_lambda1 + Ja32 * max_lambda2 + Ja33 * max_lambda3) diff --git a/src/callbacks_step/summary.jl b/src/callbacks_step/summary.jl index 4b3947582ef..3194cad0e6f 100644 --- a/src/callbacks_step/summary.jl +++ b/src/callbacks_step/summary.jl @@ -209,9 +209,7 @@ function initialize_summary_callback(cb::DiscreteCallback, u, t, integrator; # technical details setup = Pair{String, Any}["#threads" => Threads.nthreads()] - if !_PREFERENCE_POLYESTER - push!(setup, "Polyester" => "disabled") - end + push!(setup, "threading backend" => string(_PREFERENCE_THREADING)) if !_PREFERENCE_LOOPVECTORIZATION push!(setup, "LoopVectorization" => "disabled") end diff --git a/src/callbacks_step/time_series_dg_tree.jl b/src/callbacks_step/time_series_dg_tree.jl index 0af1688a8ed..399fda27678 100644 --- a/src/callbacks_step/time_series_dg_tree.jl +++ b/src/callbacks_step/time_series_dg_tree.jl @@ -114,6 +114,8 @@ function record_state_at_points!(point_data, u, solution_variables, end end end + + return nothing end # Record the solution variables at each given point for the 2D case @@ -147,6 +149,8 @@ function record_state_at_points!(point_data, u, solution_variables, end end end + + return nothing end # Record the solution variables at each given point for the 3D case @@ -181,5 +185,7 @@ function record_state_at_points!(point_data, u, solution_variables, end end end + + return nothing end end # @muladd diff --git a/src/callbacks_step/time_series_dg_unstructured.jl b/src/callbacks_step/time_series_dg_unstructured.jl index 85427f1273a..fde92401061 100644 --- a/src/callbacks_step/time_series_dg_unstructured.jl +++ b/src/callbacks_step/time_series_dg_unstructured.jl @@ -301,5 +301,7 @@ function record_state_at_points!(point_data, u, solution_variables, end end end + + return nothing end end # @muladd diff --git a/src/callbacks_step/visualization.jl b/src/callbacks_step/visualization.jl index 302e7e4462a..367b30bd573 100644 --- a/src/callbacks_step/visualization.jl +++ b/src/callbacks_step/visualization.jl @@ -5,13 +5,13 @@ @muladd begin #! format: noindent -mutable struct VisualizationCallback{SolutionVariables, VariableNames, PlotDataCreator, +mutable struct VisualizationCallback{PlotDataCreator, SolutionVariables, VariableNames, PlotCreator} + plot_data_creator::PlotDataCreator interval::Int solution_variables::SolutionVariables variable_names::VariableNames show_mesh::Bool - plot_data_creator::PlotDataCreator plot_creator::PlotCreator plot_arguments::Dict{Symbol, Any} end @@ -22,13 +22,13 @@ function Base.show(io::IO, VisualizationCallback } visualization_callback = cb.affect! - @unpack interval, plot_arguments, solution_variables, variable_names, show_mesh, plot_creator, plot_data_creator = visualization_callback + @unpack plot_data_creator, interval, plot_arguments, solution_variables, variable_names, show_mesh, plot_creator = visualization_callback print(io, "VisualizationCallback(", + "plot_data_creator=", plot_data_creator, ", ", "interval=", interval, ", ", "solution_variables=", solution_variables, ", ", "variable_names=", variable_names, ", ", "show_mesh=", show_mesh, ", ", - "plot_data_creator=", plot_data_creator, ", ", "plot_creator=", plot_creator, ", ", "plot_arguments=", plot_arguments, ")") end @@ -44,32 +44,32 @@ function Base.show(io::IO, ::MIME"text/plain", visualization_callback = cb.affect! setup = [ + "plot data creator" => visualization_callback.plot_data_creator, "interval" => visualization_callback.interval, "plot arguments" => visualization_callback.plot_arguments, "solution variables" => visualization_callback.solution_variables, "variable names" => visualization_callback.variable_names, "show mesh" => visualization_callback.show_mesh, - "plot creator" => visualization_callback.plot_creator, - "plot data creator" => visualization_callback.plot_data_creator + "plot creator" => visualization_callback.plot_creator ] summary_box(io, "VisualizationCallback", setup) end end """ - VisualizationCallback(; interval=0, - solution_variables=cons2prim, - variable_names=[], - show_mesh=false, - plot_data_creator=PlotData2D, - plot_creator=show_plot, - plot_arguments...) + VisualizationCallback(semi, plot_data_creator = nothing; + interval=0, + solution_variables=cons2prim, + variable_names=[], + show_mesh=false, + plot_creator=show_plot, + plot_arguments...) Create a callback that visualizes results during a simulation, also known as *in-situ visualization*. -!!! warning "Experimental implementation" - This is an experimental feature and may change in any future releases. +To customize the generated figure, `plot_data_creator` allows to use different plot data types. +Currently provided are [`PlotData1D`](@ref) and [`PlotData2D`](@ref), while the latter is used for both 2D and 3D. The `interval` specifies the number of time step iterations after which a new plot is generated. The available variables to plot are configured with the `solution_variables` parameter, which acts the @@ -77,16 +77,15 @@ same way as for the [`SaveSolutionCallback`](@ref). The variables to be actually selected by providing a single string or a list of strings to `variable_names`, and if `show_mesh` is `true`, an additional plot with the mesh will be generated. -To customize the generated figure, `plot_data_creator` allows to use different plot data types. With -`plot_creator` you can further specify an own function to visualize results, which must support the +With `plot_creator` you can further specify an own function to visualize results, which must support the same interface as the default implementation [`show_plot`](@ref). All remaining keyword arguments are collected and passed as additional arguments to the plotting command. """ -function VisualizationCallback(; interval = 0, +function VisualizationCallback(semi, plot_data_creator = nothing; + interval = 0, solution_variables = cons2prim, variable_names = [], show_mesh = false, - plot_data_creator = PlotData2D, plot_creator = show_plot, plot_arguments...) mpi_isparallel() && error("this callback does not work in parallel yet") @@ -95,10 +94,19 @@ function VisualizationCallback(; interval = 0, variable_names = String[variable_names] end - visualization_callback = VisualizationCallback(interval, + if plot_data_creator === nothing # No custom plot data type provided + if ndims(semi) == 1 + plot_data_creator = PlotData1D + else # 2D or 3D + plot_data_creator = PlotData2D + end + end + + visualization_callback = VisualizationCallback(plot_data_creator, + interval, solution_variables, variable_names, show_mesh, - plot_data_creator, plot_creator, + plot_creator, Dict{Symbol, Any}(plot_arguments)) # Warn users if they create a visualization callback without having loaded the Plots package @@ -145,7 +153,7 @@ end function (visualization_callback::VisualizationCallback)(integrator) u_ode = integrator.u semi = integrator.p - @unpack plot_arguments, solution_variables, variable_names, show_mesh, plot_data_creator, plot_creator = visualization_callback + @unpack plot_data_creator, plot_arguments, solution_variables, variable_names, show_mesh, plot_creator = visualization_callback # Extract plot data plot_data = plot_data_creator(u_ode, semi, solution_variables = solution_variables) @@ -177,9 +185,6 @@ variables in `variable_names` and, optionally, the mesh (if `show_mesh` is `true This function is the default `plot_creator` argument for the [`VisualizationCallback`](@ref). `time` and `timestep` are currently unused by this function. -!!! warning "Experimental implementation" - This is an experimental feature and may change in future releases. - See also: [`VisualizationCallback`](@ref), [`save_plot`](@ref) """ function show_plot(plot_data, variable_names; @@ -229,9 +234,6 @@ is `true`). Additionally, `plot_arguments` will be unpacked and passed as keywo The `timestep` is used in the filename. `time` is currently unused by this function. -!!! warning "Experimental implementation" - This is an experimental feature and may change in future releases. - See also: [`VisualizationCallback`](@ref), [`show_plot`](@ref) """ function save_plot(plot_data, variable_names; diff --git a/src/equations/compressible_euler_3d.jl b/src/equations/compressible_euler_3d.jl index ad4723ff4bf..573c94194e9 100644 --- a/src/equations/compressible_euler_3d.jl +++ b/src/equations/compressible_euler_3d.jl @@ -577,27 +577,20 @@ end # Unpack left and right state rho_e_ll = last(u_ll) rho_e_rr = last(u_rr) - rho_ll, rho_v1_ll, rho_v2_ll, rho_v3_ll, rho_e_ll = u_ll - rho_rr, rho_v1_rr, rho_v2_rr, rho_v3_rr, rho_e_rr = u_rr - - v1_ll = rho_v1_ll / rho_ll - v2_ll = rho_v2_ll / rho_ll - v3_ll = rho_v3_ll / rho_ll - v1_rr = rho_v1_rr / rho_rr - v2_rr = rho_v2_rr / rho_rr - v3_rr = rho_v3_rr / rho_rr + rho_ll, v1_ll, v2_ll, v3_ll, p_ll = cons2prim(u_ll, equations) + rho_rr, v1_rr, v2_rr, v3_rr, p_rr = cons2prim(u_rr, equations) # Average each factor of products in flux rho_avg = 0.5f0 * (rho_ll + rho_rr) v1_avg = 0.5f0 * (v1_ll + v1_rr) v2_avg = 0.5f0 * (v2_ll + v2_rr) v3_avg = 0.5f0 * (v3_ll + v3_rr) + p_avg = 0.5f0 * (p_ll + p_rr) + e_avg = 0.5f0 * (rho_e_ll / rho_ll + rho_e_rr / rho_rr) + v_dot_n_avg = v1_avg * normal_direction[1] + v2_avg * normal_direction[2] + v3_avg * normal_direction[3] - p_avg = 0.5f0 * ((equations.gamma - 1) * - (rho_e_ll - 0.5f0 * rho_ll * (v1_ll^2 + v2_ll^2 + v3_ll^2)) + - (equations.gamma - 1) * - (rho_e_rr - 0.5f0 * rho_rr * (v1_rr^2 + v2_rr^2 + v3_rr^2))) + p_avg = 0.5f0 * (p_ll + p_rr) e_avg = 0.5f0 * (rho_e_ll / rho_ll + rho_e_rr / rho_rr) # Calculate fluxes depending on normal_direction diff --git a/src/equations/compressible_navier_stokes_1d.jl b/src/equations/compressible_navier_stokes_1d.jl index 5d0e57c6e3a..89a0e87b9a2 100644 --- a/src/equations/compressible_navier_stokes_1d.jl +++ b/src/equations/compressible_navier_stokes_1d.jl @@ -30,28 +30,28 @@ The particular form of the compressible Navier-Stokes implemented is ```math \frac{\partial}{\partial t} \begin{pmatrix} -\rho \\ \rho v \\ \rho e +\rho \\ \rho v_1 \\ \rho e \end{pmatrix} + \frac{\partial}{\partial x} \begin{pmatrix} - \rho v \\ \rho v^2 + p \\ (\rho e + p) v + \rho v_1 \\ \rho v_1^2 + p \\ (\rho e + p) v_1 \end{pmatrix} = \frac{\partial}{\partial x} \begin{pmatrix} -0 \\ \tau \\ \tau v - q +0 \\ \tau \\ \tau v_1 - q \end{pmatrix} ``` where the system is closed with the ideal gas assumption giving ```math -p = (\gamma - 1) \left( \rho e - \frac{1}{2} \rho v^2 \right) +p = (\gamma - 1) \left( \rho e - \frac{1}{2} \rho v_1^2 \right) ``` as the pressure. The value of the adiabatic constant `gamma` is taken from the [`CompressibleEulerEquations1D`](@ref). The terms on the right hand side of the system above are built from the viscous stress ```math -\tau = \mu \frac{\partial}{\partial x} v +\tau = \mu \frac{\partial}{\partial x} v_1 ``` where the heat flux is ```math @@ -73,7 +73,7 @@ which is the form implemented below in the [`flux`](@ref) function. In one spatial dimensions we require gradients for two quantities, e.g., primitive quantities ```math -\frac{\partial}{\partial x} v,\, \frac{\partial}{\partial x} T +\frac{\partial}{\partial x} v_1,\, \frac{\partial}{\partial x} T ``` or the entropy variables ```math @@ -81,7 +81,7 @@ or the entropy variables ``` where ```math -w_2 = \frac{\rho v1}{p},\, w_3 = -\frac{\rho}{p} +w_2 = \frac{\rho v_1}{p},\, w_3 = -\frac{\rho}{p} ``` """ struct CompressibleNavierStokesDiffusion1D{GradientVariables, RealT <: Real, Mu, @@ -124,8 +124,8 @@ end # TODO: parabolic # This is the flexibility a user should have to select the different gradient variable types -# varnames(::typeof(cons2prim) , ::CompressibleNavierStokesDiffusion1D) = ("v1", "v2", "T") -# varnames(::typeof(cons2entropy), ::CompressibleNavierStokesDiffusion1D) = ("w2", "w3", "w4") +# varnames(::typeof(cons2prim) , ::CompressibleNavierStokesDiffusion1D) = ("v1", "T") +# varnames(::typeof(cons2entropy), ::CompressibleNavierStokesDiffusion1D) = ("w2", "w3") function varnames(variable_mapping, equations_parabolic::CompressibleNavierStokesDiffusion1D) @@ -150,7 +150,7 @@ function flux(u, gradients, orientation::Integer, equations::CompressibleNavierStokesDiffusion1D) # Here, `u` is assumed to be the "transformed" variables specified by `gradient_variable_transformation`. _, v1, _ = convert_transformed_to_primitive(u, equations) - # Here `gradients` is assumed to contain the gradients of the primitive variables (rho, v1, v2, T) + # Here `gradients` is assumed to contain the gradients of the primitive variables (rho, v1, T) # either computed directly or reverse engineered from the gradient of the entropy variables # by way of the `convert_gradient_variables` function. _, dv1dx, dTdx = convert_derivative_to_primitive(u, gradients, equations) @@ -215,8 +215,8 @@ end return cons2prim(entropy2cons(u_transformed, equations), equations) end -# Takes the solution values `u` and gradient of the entropy variables (w_2, w_3, w_4) and -# reverse engineers the gradients to be terms of the primitive variables (v1, v2, T). +# Takes the solution values `u` and gradient of the entropy variables (w_2, w_3) and +# reverse engineers the gradients to be terms of the primitive variables (v1, T). # Helpful because then the diffusive fluxes have the same form as on paper. # Note, the first component of `gradient_entropy_vars` contains gradient(rho) which is unused. # TODO: parabolic; entropy stable viscous terms @@ -230,7 +230,7 @@ end equations::CompressibleNavierStokesDiffusion1D{GradientVariablesEntropy}) # TODO: parabolic. This is inefficient to pass in transformed variables but then transform them back. - # We can fix this if we directly compute v1, v2, T from the entropy variables + # We can fix this if we directly compute v1, T from the entropy variables u = entropy2cons(w, equations) # calls a "modified" entropy2cons defined for CompressibleNavierStokesDiffusion1D rho, rho_v1, _ = u diff --git a/src/equations/equations.jl b/src/equations/equations.jl index d952deab7bd..850aec70ad5 100644 --- a/src/equations/equations.jl +++ b/src/equations/equations.jl @@ -569,8 +569,8 @@ end include("numerical_fluxes.jl") # Linear scalar advection -abstract type AbstractLinearScalarAdvectionEquation{NDIMS, NVARS} <: - AbstractEquations{NDIMS, NVARS} end +abstract type AbstractLinearScalarAdvectionEquation{NDIMS} <: + AbstractEquations{NDIMS, 1} end include("linear_scalar_advection_1d.jl") include("linear_scalar_advection_2d.jl") include("linear_scalar_advection_3d.jl") diff --git a/src/equations/ideal_glm_mhd_multiion.jl b/src/equations/ideal_glm_mhd_multiion.jl index 0d0229e8750..f3962ca74da 100644 --- a/src/equations/ideal_glm_mhd_multiion.jl +++ b/src/equations/ideal_glm_mhd_multiion.jl @@ -288,6 +288,30 @@ end return SVector(cons) end +# Computes the sum of the densities times the sum of the pressures +@inline function density_pressure(u, equations::AbstractIdealGlmMhdMultiIonEquations) + B1, B2, B3 = magnetic_field(u, equations) + psi = divergence_cleaning_field(u, equations) + + rho_total = zero(real(equations)) + p_total = zero(real(equations)) + for k in eachcomponent(equations) + rho, rho_v1, rho_v2, rho_v3, rho_e = get_component(k, u, equations) + rho_inv = 1 / rho + v1 = rho_v1 * rho_inv + v2 = rho_v2 * rho_inv + v3 = rho_v3 * rho_inv + gamma = equations.gammas[k] + + p = (gamma - 1) * + (rho_e - 0.5 * rho * (v1^2 + v2^2 + v3^2) - 0.5 * (B1^2 + B2^2 + B3^2) - 0.5 * psi^2) + + rho_total += rho + p_total += p + end + return rho_total * p_total +end + # Specialization of [`DissipationLaxFriedrichsEntropyVariables`](@ref) for the multi-ion GLM-MHD equations # For details on the multi-ion entropy Jacobian ``H`` see # - A. Rueda-Ramírez, A. Sikstel, G. Gassner, An Entropy-Stable Discontinuous Galerkin Discretization diff --git a/src/equations/ideal_glm_mhd_multiion_3d.jl b/src/equations/ideal_glm_mhd_multiion_3d.jl index f5a88ca4192..9759b88e7ed 100644 --- a/src/equations/ideal_glm_mhd_multiion_3d.jl +++ b/src/equations/ideal_glm_mhd_multiion_3d.jl @@ -7,7 +7,17 @@ @doc raw""" IdealGlmMhdMultiIonEquations3D(; gammas, charge_to_mass, + gas_constants = zero(SVector{length(gammas), + eltype(gammas)}), + molar_masses = zero(SVector{length(gammas), + eltype(gammas)}), + ion_ion_collision_constants = zeros(eltype(gammas), + length(gammas), + length(gammas)), + ion_electron_collision_constants = zero(SVector{length(gammas), + eltype(gammas)}), electron_pressure = electron_pressure_zero, + electron_temperature = electron_pressure_zero, initial_c_h = NaN) The ideal compressible multi-ion MHD equations in three space dimensions augmented with a @@ -19,6 +29,37 @@ assumes that the equations are non-dimensionalized, such that the vacuum permeab In case of more than one ion species, the specific heat capacity ratios `gammas` and the charge-to-mass ratios `charge_to_mass` should be passed as tuples, e.g., `gammas=(1.4, 1.667)`. +The ion-ion and ion-electron collision source terms can be computed using the functions +[`source_terms_collision_ion_ion`](@ref) and [`source_terms_collision_ion_electron`](@ref), respectively. + +For ion-ion collision terms, the optional keyword arguments `gas_constants`, `molar_masses`, and `ion_ion_collision_constants` +must be provided. For ion-electron collision terms, the optional keyword arguments `gas_constants`, `molar_masses`, +`ion_electron_collision_constants`, and `electron_temperature` are required. + +- **`gas_constants`** and **`molar_masses`** are tuples containing the gas constant and molar mass of each + ion species, respectively. The **molar masses** can be provided in any unit system, as they are only used to + compute ratios and are independent of the other arguments. + +- **`ion_ion_collision_constants`** is a symmetric matrix that contains coefficients to compute the collision + frequencies between pairs of ion species. For example, `ion_ion_collision_constants[2, 3]` contains the collision + coefficient for collisions between the ion species 2 and the ion species 3. These constants are derived using the kinetic + theory of gases (see, e.g., *Schunk & Nagy, 2000*). They are related to the collision coefficients ``B_{st}`` listed + in Table 4.3 of *Schunk & Nagy (2000)*, but are scaled by the molecular mass of ion species ``t`` (i.e., + `ion_ion_collision_constants[2, 3] = ` ``B_{st}/m_{t}``) and must be provided in consistent physical units + (Schunk & Nagy use ``cm^3 K^{3/2} / s``). + See [`source_terms_collision_ion_ion`](@ref) for more details on how these constants are used to compute the collision + frequencies. + +- **`ion_electron_collision_constants`** is a tuple containing coefficients to compute the ion-electron collision frequency + for each ion species. They correspond to the collision coefficients `B_{se}` divided by the elementary charge. + The ion-electron collision frequencies can also be computed using the kinetic theory + of gases (see, e.g., *Schunk & Nagy, 2000*). See [`source_terms_collision_ion_electron`](@ref) for more details on how these + constants are used to compute the collision frequencies. + +- **`electron_temperature`** is a function with the signature `electron_temperature(u, equations)` that can be used + compute the electron temperature as a function of the state `u`. The electron temperature is relevant for the computation + of the ion-electron collision source terms. + The argument `electron_pressure` can be used to pass a function that computes the electron pressure as a function of the state `u` with the signature `electron_pressure(u, equations)`. By default, the electron pressure is zero. @@ -33,58 +74,120 @@ References: - A. Rueda-Ramírez, A. Sikstel, G. Gassner, An Entropy-Stable Discontinuous Galerkin Discretization of the Ideal Multi-Ion Magnetohydrodynamics System (2024). Journal of Computational Physics. [DOI: 10.1016/j.jcp.2024.113655](https://doi.org/10.1016/j.jcp.2024.113655). +- Schunk, R. W., & Nagy, A. F. (2000). Ionospheres: Physics, plasma physics, and chemistry. + Cambridge university press. [DOI: 10.1017/CBO9780511635342](https://doi.org/10.1017/CBO9780511635342). !!! info "The multi-ion GLM-MHD equations require source terms" In case of more than one ion species, the multi-ion GLM-MHD equations should ALWAYS be used with [`source_terms_lorentz`](@ref). """ mutable struct IdealGlmMhdMultiIonEquations3D{NVARS, NCOMP, RealT <: Real, - ElectronPressure} <: + ElectronPressure, ElectronTemperature} <: AbstractIdealGlmMhdMultiIonEquations{3, NVARS, NCOMP} gammas::SVector{NCOMP, RealT} # Heat capacity ratios charge_to_mass::SVector{NCOMP, RealT} # Charge to mass ratios + gas_constants::SVector{NCOMP, RealT} # Specific gas constants + molar_masses::SVector{NCOMP, RealT} # Molar masses (can be provided in any units as they are only used to compute ratios) + ion_ion_collision_constants::Array{RealT, 2} # Symmetric matrix of collision frequency coefficients + ion_electron_collision_constants::SVector{NCOMP, RealT} # Constants for the ion-electron collision frequencies. The collision frequency is obtained as constant * (e * n_e) / T_e^1.5 electron_pressure::ElectronPressure # Function to compute the electron pressure + electron_temperature::ElectronTemperature # Function to compute the electron temperature c_h::RealT # GLM cleaning speed - function IdealGlmMhdMultiIonEquations3D{NVARS, NCOMP, RealT, - ElectronPressure}(gammas - ::SVector{NCOMP, RealT}, - charge_to_mass - ::SVector{NCOMP, RealT}, - electron_pressure - ::ElectronPressure, - c_h::RealT) where - {NVARS, NCOMP, RealT <: Real, ElectronPressure} + function IdealGlmMhdMultiIonEquations3D{NVARS, NCOMP, RealT, ElectronPressure, + ElectronTemperature}(gammas + ::SVector{NCOMP, + RealT}, + charge_to_mass + ::SVector{NCOMP, + RealT}, + gas_constants + ::SVector{NCOMP, + RealT}, + molar_masses + ::SVector{NCOMP, + RealT}, + ion_ion_collision_constants + ::Array{RealT, 2}, + ion_electron_collision_constants + ::SVector{NCOMP, + RealT}, + electron_pressure + ::ElectronPressure, + electron_temperature + ::ElectronTemperature, + c_h::RealT) where + {NVARS, NCOMP, RealT <: Real, ElectronPressure, ElectronTemperature} NCOMP >= 1 || throw(DimensionMismatch("`gammas` and `charge_to_mass` have to be filled with at least one value")) - new(gammas, charge_to_mass, electron_pressure, c_h) + new(gammas, charge_to_mass, gas_constants, molar_masses, + ion_ion_collision_constants, + ion_electron_collision_constants, electron_pressure, electron_temperature, + c_h) end end function IdealGlmMhdMultiIonEquations3D(; gammas, charge_to_mass, + gas_constants = zero(SVector{length(gammas), + eltype(gammas)}), + molar_masses = zero(SVector{length(gammas), + eltype(gammas)}), + ion_ion_collision_constants = zeros(eltype(gammas), + length(gammas), + length(gammas)), + ion_electron_collision_constants = zero(SVector{length(gammas), + eltype(gammas)}), electron_pressure = electron_pressure_zero, + electron_temperature = electron_pressure_zero, initial_c_h = convert(eltype(gammas), NaN)) _gammas = promote(gammas...) _charge_to_mass = promote(charge_to_mass...) - RealT = promote_type(eltype(_gammas), eltype(_charge_to_mass)) + _gas_constants = promote(gas_constants...) + _molar_masses = promote(molar_masses...) + _ion_electron_collision_constants = promote(ion_electron_collision_constants...) + RealT = promote_type(eltype(_gammas), eltype(_charge_to_mass), + eltype(_gas_constants), eltype(_molar_masses), + eltype(ion_ion_collision_constants), + eltype(_ion_electron_collision_constants)) __gammas = SVector(map(RealT, _gammas)) __charge_to_mass = SVector(map(RealT, _charge_to_mass)) + __gas_constants = SVector(map(RealT, _gas_constants)) + __molar_masses = SVector(map(RealT, _molar_masses)) + __ion_ion_collision_constants = map(RealT, ion_ion_collision_constants) + __ion_electron_collision_constants = SVector(map(RealT, + _ion_electron_collision_constants)) NVARS = length(_gammas) * 5 + 4 NCOMP = length(_gammas) return IdealGlmMhdMultiIonEquations3D{NVARS, NCOMP, RealT, - typeof(electron_pressure)}(__gammas, - __charge_to_mass, - electron_pressure, - initial_c_h) + typeof(electron_pressure), + typeof(electron_temperature)}(__gammas, + __charge_to_mass, + __gas_constants, + __molar_masses, + __ion_ion_collision_constants, + __ion_electron_collision_constants, + electron_pressure, + electron_temperature, + initial_c_h) end # Outer constructor for `@reset` works correctly -function IdealGlmMhdMultiIonEquations3D(gammas, charge_to_mass, electron_pressure, c_h) +function IdealGlmMhdMultiIonEquations3D(gammas, charge_to_mass, gas_constants, + molar_masses, ion_ion_collision_constants, + ion_electron_collision_constants, + electron_pressure, + electron_temperature, + c_h) return IdealGlmMhdMultiIonEquations3D(gammas = gammas, charge_to_mass = charge_to_mass, + gas_constants = gas_constants, + molar_masses = molar_masses, + ion_ion_collision_constants = ion_ion_collision_constants, + ion_electron_collision_constants = ion_electron_collision_constants, electron_pressure = electron_pressure, + electron_temperature = electron_temperature, initial_c_h = c_h) end @@ -816,6 +919,133 @@ The term is composed of four individual non-conservative terms: return SVector(f) end +@inline function flux_nonconservative_central(u_ll, u_rr, + normal_direction::AbstractVector, + equations::IdealGlmMhdMultiIonEquations3D) + @unpack charge_to_mass = equations + # Unpack left and right states to get the magnetic field + B1_ll, B2_ll, B3_ll = magnetic_field(u_ll, equations) + B1_rr, B2_rr, B3_rr = magnetic_field(u_rr, equations) + psi_ll = divergence_cleaning_field(u_ll, equations) + psi_rr = divergence_cleaning_field(u_rr, equations) + B_dot_n_ll = B1_ll * normal_direction[1] + + B2_ll * normal_direction[2] + + B3_ll * normal_direction[3] + B_dot_n_rr = B1_rr * normal_direction[1] + + B2_rr * normal_direction[2] + + B3_rr * normal_direction[3] + B_dot_n_avg = 0.5f0 * (B_dot_n_ll + B_dot_n_rr) + + # Compute important averages + B1_avg = 0.5f0 * (B1_ll + B1_rr) + B2_avg = 0.5f0 * (B2_ll + B2_rr) + B3_avg = 0.5f0 * (B3_ll + B3_rr) + mag_norm_ll = B1_ll^2 + B2_ll^2 + B3_ll^2 + mag_norm_rr = B1_rr^2 + B2_rr^2 + B3_rr^2 + mag_norm_avg = 0.5f0 * (mag_norm_ll + mag_norm_rr) + psi_avg = 0.5f0 * (psi_ll + psi_rr) + + # Mean electron pressure + pe_ll = equations.electron_pressure(u_ll, equations) + pe_rr = equations.electron_pressure(u_rr, equations) + pe_mean = 0.5f0 * (pe_ll + pe_rr) + + # Compute charge ratio of u_ll + charge_ratio_ll = zero(MVector{ncomponents(equations), eltype(u_ll)}) + total_electron_charge = zero(eltype(u_ll)) + for k in eachcomponent(equations) + rho_k = u_ll[3 + (k - 1) * 5 + 1] # Extract densities from conserved variable vector + charge_ratio_ll[k] = rho_k * charge_to_mass[k] + total_electron_charge += charge_ratio_ll[k] + end + charge_ratio_ll ./= total_electron_charge + + # Compute auxiliary variables + v1_plus_ll, v2_plus_ll, v3_plus_ll, vk1_plus_ll, vk2_plus_ll, vk3_plus_ll = charge_averaged_velocities(u_ll, + equations) + v1_plus_rr, v2_plus_rr, v3_plus_rr, vk1_plus_rr, vk2_plus_rr, vk3_plus_rr = charge_averaged_velocities(u_rr, + equations) + v_plus_dot_n_ll = (v1_plus_ll * normal_direction[1] + + v2_plus_ll * normal_direction[2] + + v3_plus_ll * normal_direction[3]) + f = zero(MVector{nvariables(equations), eltype(u_ll)}) + + # Entries of Godunov-Powell term for induction equation (multiply by 2 because the non-conservative flux is + # multiplied by 0.5 whenever it's used in the Trixi code) + f[1] = 2 * v1_plus_ll * B_dot_n_avg + f[2] = 2 * v2_plus_ll * B_dot_n_avg + f[3] = 2 * v3_plus_ll * B_dot_n_avg + + for k in eachcomponent(equations) + # Compute terms for each species + # (we multiply by 2 because the non-conservative flux is multiplied by 0.5 whenever it's used in the Trixi code) + + # Compute term Lorentz term + f2_ll = ((0.5f0 * mag_norm_ll + pe_ll) * normal_direction[1] - + B_dot_n_ll * B1_ll) + f2_rr = ((0.5f0 * mag_norm_rr + pe_rr) * normal_direction[1] - + B_dot_n_rr * B1_rr) + f2 = charge_ratio_ll[k] * (f2_ll + f2_rr) + + f3_ll = ((0.5f0 * mag_norm_ll + pe_ll) * normal_direction[2] - + B_dot_n_ll * B2_ll) + f3_rr = ((0.5f0 * mag_norm_rr + pe_rr) * normal_direction[2] - + B_dot_n_rr * B2_rr) + f3 = charge_ratio_ll[k] * (f3_ll + f3_rr) + + f4_ll = ((0.5f0 * mag_norm_ll + pe_ll) * normal_direction[3] - + B_dot_n_ll * B3_ll) + f4_rr = ((0.5f0 * mag_norm_rr + pe_rr) * normal_direction[3] - + B_dot_n_rr * B3_rr) + f4 = charge_ratio_ll[k] * (f4_ll + f4_rr) + + f5 = (vk1_plus_ll[k] * normal_direction[1] + + vk2_plus_ll[k] * normal_direction[2] + + vk3_plus_ll[k] * normal_direction[3]) * pe_mean * 2 + + # Compute multi-ion term (vanishes for NCOMP==1) + vk1_minus_ll = v1_plus_ll - vk1_plus_ll[k] + vk2_minus_ll = v2_plus_ll - vk2_plus_ll[k] + vk3_minus_ll = v3_plus_ll - vk3_plus_ll[k] + vk1_minus_rr = v1_plus_rr - vk1_plus_rr[k] + vk2_minus_rr = v2_plus_rr - vk2_plus_rr[k] + vk3_minus_rr = v3_plus_rr - vk3_plus_rr[k] + f5 += ((B2_ll * ((vk1_minus_ll * B2_ll - vk2_minus_ll * B1_ll) + + (vk1_minus_rr * B2_rr - vk2_minus_rr * B1_rr)) + + B3_ll * ((vk1_minus_ll * B3_ll - vk3_minus_ll * B1_ll) + + (vk1_minus_rr * B3_rr - vk3_minus_rr * B1_rr))) * + normal_direction[1] + + (B1_ll * ((vk2_minus_ll * B1_ll - vk1_minus_ll * B2_ll) + + (vk2_minus_rr * B1_rr - vk1_minus_rr * B2_rr)) + + B3_ll * ((vk2_minus_ll * B3_ll - vk3_minus_ll * B2_ll) + + (vk2_minus_rr * B3_rr - vk3_minus_rr * B2_rr))) * + normal_direction[2] + + (B1_ll * ((vk3_minus_ll * B1_ll - vk1_minus_ll * B3_ll) + + (vk3_minus_rr * B1_rr - vk1_minus_rr * B3_rr)) + + B2_ll * ((vk3_minus_ll * B2_ll - vk2_minus_ll * B3_ll) + + (vk3_minus_rr * B2_rr - vk2_minus_rr * B3_rr))) * + normal_direction[3]) + + # Compute Godunov-Powell term + f2 += charge_ratio_ll[k] * B1_ll * B_dot_n_avg * 2 + f3 += charge_ratio_ll[k] * B2_ll * B_dot_n_avg * 2 + f4 += charge_ratio_ll[k] * B3_ll * B_dot_n_avg * 2 + f5 += (v1_plus_ll * B1_ll + v2_plus_ll * B2_ll + v3_plus_ll * B3_ll) * + B_dot_n_avg * 2 + + # Compute GLM term for the energy + f5 += v_plus_dot_n_ll * psi_ll * psi_avg * 2 + + # Add to the flux vector + set_component!(f, k, 0, f2, f3, f4, f5, equations) + end + # Compute GLM term for psi (multiply by 2 because the non-conservative flux is + # multiplied by 0.5 whenever it's used in the Trixi code) + f[end] = 2 * v_plus_dot_n_ll * psi_avg + + return SVector(f) +end + """ flux_ruedaramirez_etal(u_ll, u_rr, orientation, equations::IdealGlmMhdMultiIonEquations3D) @@ -1514,12 +1744,17 @@ end (rho_e - 0.5f0 * rho * (v1^2 + v2^2 + v3^2) - 0.5f0 * (B1^2 + B2^2 + B3^2) - 0.5f0 * psi^2) a_square = gamma * p * rho_inv - inv_sqrt_rho = 1 / sqrt(rho) - b1 = B1 * inv_sqrt_rho - b2 = B2 * inv_sqrt_rho - b3 = B3 * inv_sqrt_rho - b_square = b1^2 + b2^2 + b3^2 + if isapprox(equations.charge_to_mass[k], 0.0) + b_square = b1 = b2 = b3 = 0.0 + else + inv_sqrt_rho = 1 / sqrt(rho) + + b1 = B1 * inv_sqrt_rho + b2 = B2 * inv_sqrt_rho + b3 = B3 * inv_sqrt_rho + b_square = b1^2 + b2^2 + b3^2 + end if orientation == 1 c_f = max(c_f, @@ -1564,20 +1799,26 @@ end (rho_e - 0.5f0 * rho * (v1^2 + v2^2 + v3^2) - 0.5f0 * (B1^2 + B2^2 + B3^2) - 0.5f0 * psi^2) a_square = gamma * p * rho_inv - inv_sqrt_rho = 1 / sqrt(rho) - b1 = B1 * inv_sqrt_rho - b2 = B2 * inv_sqrt_rho - b3 = B3 * inv_sqrt_rho - b_square = b1^2 + b2^2 + b3^2 - b_dot_n_squared = (b1 * normal_direction[1] + - b2 * normal_direction[2] + - b3 * normal_direction[3])^2 / norm_squared + if isapprox(equations.charge_to_mass[k], 0.0) + b_square = 0.0 + b_dot_n_squared = 0.0 + else + inv_sqrt_rho = 1 / sqrt(rho) + + b1 = B1 * inv_sqrt_rho + b2 = B2 * inv_sqrt_rho + b3 = B3 * inv_sqrt_rho + b_square = b1^2 + b2^2 + b3^2 + b_dot_n_squared = (b1 * normal_direction[1] + + b2 * normal_direction[2] + + b3 * normal_direction[3])^2 / norm_squared + end c_f = max(c_f, sqrt((0.5f0 * (a_square + b_square) + - 0.5f0 * sqrt((a_square + b_square)^2 - - 4 * a_square * b_dot_n_squared)) * + 0.5f0 * + sqrt((a_square + b_square)^2 - 4 * a_square * b_dot_n_squared)) * norm_squared)) end diff --git a/src/equations/linear_scalar_advection_1d.jl b/src/equations/linear_scalar_advection_1d.jl index 2a9a6f35e97..4692f56af99 100644 --- a/src/equations/linear_scalar_advection_1d.jl +++ b/src/equations/linear_scalar_advection_1d.jl @@ -15,7 +15,7 @@ The linear scalar advection equation in one space dimension with constant velocity `a`. """ struct LinearScalarAdvectionEquation1D{RealT <: Real} <: - AbstractLinearScalarAdvectionEquation{1, 1} + AbstractLinearScalarAdvectionEquation{1} advection_velocity::SVector{1, RealT} end @@ -33,10 +33,7 @@ varnames(::typeof(cons2prim), ::LinearScalarAdvectionEquation1D) = ("scalar",) A constant initial condition to test free-stream preservation. """ function initial_condition_constant(x, t, equation::LinearScalarAdvectionEquation1D) - # Store translated coordinate for easy use of exact solution RealT = eltype(x) - x_trans = x - equation.advection_velocity * t - return SVector(RealT(2)) end diff --git a/src/equations/linear_scalar_advection_2d.jl b/src/equations/linear_scalar_advection_2d.jl index ffae697e8d9..b90f24c1947 100644 --- a/src/equations/linear_scalar_advection_2d.jl +++ b/src/equations/linear_scalar_advection_2d.jl @@ -15,7 +15,7 @@ The linear scalar advection equation in two space dimensions with constant velocity `a`. """ struct LinearScalarAdvectionEquation2D{RealT <: Real} <: - AbstractLinearScalarAdvectionEquation{2, 1} + AbstractLinearScalarAdvectionEquation{2} advection_velocity::SVector{2, RealT} end @@ -46,10 +46,7 @@ end A constant initial condition to test free-stream preservation. """ function initial_condition_constant(x, t, equation::LinearScalarAdvectionEquation2D) - # Store translated coordinate for easy use of exact solution RealT = eltype(x) - x_trans = x_trans_periodic_2d(x - equation.advection_velocity * t) - return SVector(RealT(2)) end diff --git a/src/equations/linear_scalar_advection_3d.jl b/src/equations/linear_scalar_advection_3d.jl index 3d9643aa2fd..61f035ea703 100644 --- a/src/equations/linear_scalar_advection_3d.jl +++ b/src/equations/linear_scalar_advection_3d.jl @@ -15,7 +15,7 @@ The linear scalar advection equation in three space dimensions with constant velocity `a`. """ struct LinearScalarAdvectionEquation3D{RealT <: Real} <: - AbstractLinearScalarAdvectionEquation{3, 1} + AbstractLinearScalarAdvectionEquation{3} advection_velocity::SVector{3, RealT} end @@ -37,10 +37,7 @@ varnames(::typeof(cons2prim), ::LinearScalarAdvectionEquation3D) = ("scalar",) A constant initial condition to test free-stream preservation. """ function initial_condition_constant(x, t, equation::LinearScalarAdvectionEquation3D) - # Store translated coordinate for easy use of exact solution RealT = eltype(x) - x_trans = x - equation.advection_velocity * t - return SVector(RealT(2)) end diff --git a/src/equations/numerical_fluxes.jl b/src/equations/numerical_fluxes.jl index 18423235da0..ffff5e63d9c 100644 --- a/src/equations/numerical_fluxes.jl +++ b/src/equations/numerical_fluxes.jl @@ -154,18 +154,18 @@ function Base.show(io::IO, d::DissipationGlobalLaxFriedrichs) end """ - DissipationLocalLaxFriedrichs(max_abs_speed=max_abs_speed_naive) + DissipationLocalLaxFriedrichs(max_abs_speed=max_abs_speed) Create a local Lax-Friedrichs dissipation operator where the maximum absolute wave speed is estimated as `max_abs_speed(u_ll, u_rr, orientation_or_normal_direction, equations)`, -defaulting to [`max_abs_speed_naive`](@ref). +defaulting to [`max_abs_speed`](@ref). """ struct DissipationLocalLaxFriedrichs{MaxAbsSpeed} max_abs_speed::MaxAbsSpeed end -DissipationLocalLaxFriedrichs() = DissipationLocalLaxFriedrichs(max_abs_speed_naive) +DissipationLocalLaxFriedrichs() = DissipationLocalLaxFriedrichs(max_abs_speed) @inline function (dissipation::DissipationLocalLaxFriedrichs)(u_ll, u_rr, orientation_or_normal_direction, @@ -188,6 +188,8 @@ Simple and fast estimate of the maximal wave speed of the Riemann problem with l For non-integer arguments `normal_direction` in one dimension, `max_abs_speed_naive` returns `abs(normal_direction[1]) * max_abs_speed_naive(u_ll, u_rr, 1, equations)`. + +Slightly more diffusive/overestimating than [`max_abs_speed`](@ref). """ function max_abs_speed_naive end @@ -211,6 +213,8 @@ i.e., the wave speeds used in `max_dt` which computes the maximum stable time st For non-integer arguments `normal_direction` in one dimension, `max_abs_speed_naive` returns `abs(normal_direction[1]) * max_abs_speed_naive(u_ll, u_rr, 1, equations)`. + +Defaults to [`min_max_speed_naive`](@ref) if no specialized version for the 'equations` at hand is available. """ @inline function max_abs_speed(u_ll, u_rr, orientation_or_normal_direction, @@ -222,13 +226,13 @@ end const FluxLaxFriedrichs{MaxAbsSpeed} = FluxPlusDissipation{typeof(flux_central), DissipationLocalLaxFriedrichs{MaxAbsSpeed}} """ - FluxLaxFriedrichs(max_abs_speed=max_abs_speed_naive) + FluxLaxFriedrichs(max_abs_speed=max_abs_speed) Local Lax-Friedrichs (Rusanov) flux with maximum wave speed estimate provided by `max_abs_speed`, cf. [`DissipationLocalLaxFriedrichs`](@ref) and [`max_abs_speed_naive`](@ref). """ -function FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive) +function FluxLaxFriedrichs(max_abs_speed = max_abs_speed) FluxPlusDissipation(flux_central, DissipationLocalLaxFriedrichs(max_abs_speed)) end @@ -244,7 +248,7 @@ See [`FluxLaxFriedrichs`](@ref). const flux_lax_friedrichs = FluxLaxFriedrichs() @doc raw""" - DissipationLaxFriedrichsEntropyVariables(max_abs_speed=max_abs_speed_naive) + DissipationLaxFriedrichsEntropyVariables(max_abs_speed=max_abs_speed) Create a local Lax-Friedrichs-type dissipation operator that is provably entropy stable. This operator must be used together with an entropy-conservative two-point flux function (e.g., `flux_ec`) to yield @@ -259,7 +263,7 @@ f^{\mathrm{ES}} = f^{\mathrm{EC}} - \frac{1}{2} \lambda_{\mathrm{max}} H (w_r - ``` where ``f^{\mathrm{EC}}`` is the entropy-conservative two-point flux function (computed with, e.g., `flux_ec`), ``\lambda_{\mathrm{max}}`` is the maximum wave speed estimated as `max_abs_speed(u_l, u_r, orientation_or_normal_direction, equations)`, -defaulting to [`max_abs_speed_naive`](@ref), ``H`` is a symmetric positive-definite dissipation matrix that +defaulting to [`max_abs_speed`](@ref), ``H`` is a symmetric positive-definite dissipation matrix that depends on the left and right states `u_l` and `u_r`, and ``(w_r - w_l)`` is the jump in entropy variables. Ideally, ``H (w_r - w_l) = (u_r - u_l)``, such that the dissipation operator is consistent with the local Lax-Friedrichs dissipation. @@ -277,7 +281,7 @@ struct DissipationLaxFriedrichsEntropyVariables{MaxAbsSpeed} max_abs_speed::MaxAbsSpeed end -DissipationLaxFriedrichsEntropyVariables() = DissipationLaxFriedrichsEntropyVariables(max_abs_speed_naive) +DissipationLaxFriedrichsEntropyVariables() = DissipationLaxFriedrichsEntropyVariables(max_abs_speed) function Base.show(io::IO, d::DissipationLaxFriedrichsEntropyVariables) print(io, "DissipationLaxFriedrichsEntropyVariables(", d.max_abs_speed, ")") diff --git a/src/meshes/mesh_io.jl b/src/meshes/mesh_io.jl index 1d3395b1b56..b66b581d0d1 100644 --- a/src/meshes/mesh_io.jl +++ b/src/meshes/mesh_io.jl @@ -365,7 +365,7 @@ function save_mesh_file(mesh::DGMultiMesh, basis, output_directory, timestep = 0 # Transfer vectors of vectors to a matrix (2D array) and store into h5 file. for (idim, vectors) in enumerate(get_VXYZ(mesh.md)) matrix = zeros(length(vectors[1]), length(vectors)) - for ielem in 1:length(vectors) + for ielem in eachindex(vectors) @views matrix[:, ielem] .= vectors[ielem] end # ASCII: Char(58) => 'X' diff --git a/src/meshes/p4est_mesh.jl b/src/meshes/p4est_mesh.jl index acebd413235..e8c49503e61 100644 --- a/src/meshes/p4est_mesh.jl +++ b/src/meshes/p4est_mesh.jl @@ -2184,6 +2184,10 @@ end function balance!(mesh::P4estMesh{3}, init_fn = C_NULL) p8est_balance(mesh.p4est, P8EST_CONNECT_FACE, init_fn) + # As in 2D p4est meshes, p8est_balance needs to be called twice sometimes for + # the mesh to be properly balanced. + # See https://github.com/cburstedde/p4est/issues/112 + p8est_balance(mesh.p4est, P8EST_CONNECT_FACE, init_fn) end function partition!(mesh::P4estMesh{2}; weight_fn = C_NULL) diff --git a/src/meshes/structured_mesh_view.jl b/src/meshes/structured_mesh_view.jl index 0b0cccfc7fc..d931d25bbe1 100644 --- a/src/meshes/structured_mesh_view.jl +++ b/src/meshes/structured_mesh_view.jl @@ -105,6 +105,8 @@ function calc_node_coordinates!(node_coordinates, element, node_coordinates[:, i, j, element] .= mapping(cell_x_offset + dx / 2 * nodes[i], cell_y_offset + dy / 2 * nodes[j]) end + + return nothing end # Does not save the mesh itself to an HDF5 file. Instead saves important attributes diff --git a/src/meshes/t8code_mesh.jl b/src/meshes/t8code_mesh.jl index c4f6db59af3..4412b9396bb 100644 --- a/src/meshes/t8code_mesh.jl +++ b/src/meshes/t8code_mesh.jl @@ -111,12 +111,12 @@ end boundary_names, treeIDs, neighIDs, faces, duals, orientations, levels, num_elements_per_tree) -Constructor for the `T8codeMesh`. Typically called by the `load_mesh` routine. +Constructor for the `T8codeMesh`. Typically called by the `load_mesh` routine. # Arguments -- `ndims`: Dimension of the mesh. -- `ntrees`: Global number of trees. -- `nelements`: Global number of elements. +- `ndims`: Dimension of the mesh. +- `ntrees`: Global number of trees. +- `nelements`: Global number of elements. - `tree_node_coordinates`: Node coordinates for each tree: [dimension, i, j, k, tree] - `nodes`: Array of interpolation nodes. - `boundary_names`: List of boundary names. @@ -1578,10 +1578,3 @@ function get_cmesh_info(cmesh::Ptr{t8_cmesh}, ndims) return treeIDs, neighIDs, faces, duals, orientations end - -#! format: off -@deprecate T8codeMesh{2}(conn::Ptr{p4est_connectivity}; kwargs...) T8codeMesh(conn::Ptr{p4est_connectivity}; kwargs...) -@deprecate T8codeMesh{3}(conn::Ptr{p8est_connectivity}; kwargs...) T8codeMesh(conn::Ptr{p8est_connectivity}; kwargs...) -@deprecate T8codeMesh{2}(meshfile::String; kwargs...) T8codeMesh(meshfile::String, 2; kwargs...) -@deprecate T8codeMesh{3}(meshfile::String; kwargs...) T8codeMesh(meshfile::String, 3; kwargs...) -#! format: on diff --git a/src/meshes/tree_mesh.jl b/src/meshes/tree_mesh.jl index 620ab1257fd..e715c926fa0 100644 --- a/src/meshes/tree_mesh.jl +++ b/src/meshes/tree_mesh.jl @@ -252,5 +252,7 @@ end isperiodic(mesh::TreeMesh) = isperiodic(mesh.tree) isperiodic(mesh::TreeMesh, dimension) = isperiodic(mesh.tree, dimension) +Base.real(::TreeMesh{NDIMS, TreeType, RealT}) where {NDIMS, TreeType, RealT} = RealT + include("parallel_tree_mesh.jl") end # @muladd diff --git a/src/semidiscretization/semidiscretization.jl b/src/semidiscretization/semidiscretization.jl index cc3900d42da..a629ff64f0d 100644 --- a/src/semidiscretization/semidiscretization.jl +++ b/src/semidiscretization/semidiscretization.jl @@ -78,13 +78,30 @@ function calc_error_norms(u_ode, t, analyzer, semi::AbstractSemidiscretization, end """ - semidiscretize(semi::AbstractSemidiscretization, tspan) + semidiscretize(semi::AbstractSemidiscretization, tspan; + jac_prototype::Union{AbstractMatrix, Nothing} = nothing, + colorvec::Union{AbstractVector, Nothing} = nothing, + storage_type = nothing, + real_type = nothing) Wrap the semidiscretization `semi` as an ODE problem in the time interval `tspan` that can be passed to `solve` from the [SciML ecosystem](https://diffeq.sciml.ai/latest/). + +Optional keyword arguments: +- `jac_prototype`: Expected to come from [SparseConnectivityTracer.jl](https://github.com/adrhill/SparseConnectivityTracer.jl). + Specifies the sparsity structure of the Jacobian to enable e.g. efficient implicit time stepping. +- `colorvec`: Expected to come from [SparseMatrixColorings.jl](https://github.com/gdalle/SparseMatrixColorings.jl). + Allows for even faster Jacobian computation if a sparse `jac_prototype` is given (optional). +- `storage_type` and `real_type`: Configure the underlying computational datastructures. + `storage_type` changes the fundamental array type being used, allowing the experimental use of `CuArray` + or other GPU array types. `real_type` changes the computational data type being used. """ function semidiscretize(semi::AbstractSemidiscretization, tspan; - reset_threads = true) + jac_prototype::Union{AbstractMatrix, Nothing} = nothing, + colorvec::Union{AbstractVector, Nothing} = nothing, + reset_threads = true, + storage_type = nothing, + real_type = nothing) # Optionally reset Polyester.jl threads. See # https://github.com/trixi-framework/Trixi.jl/issues/1583 # https://github.com/JuliaSIMD/Polyester.jl/issues/30 @@ -92,25 +109,64 @@ function semidiscretize(semi::AbstractSemidiscretization, tspan; Polyester.reset_threads!() end - u0_ode = compute_coefficients(first(tspan), semi) + if !(storage_type === nothing && real_type === nothing) + if storage_type === nothing + storage_type = Array + end + if real_type === nothing + real_type = real(semi) + end + semi = trixi_adapt(storage_type, real_type, semi) + if eltype(tspan) !== real_type + tspan = convert.(real_type, tspan) + end + end + + u0_ode = compute_coefficients(first(tspan), semi) # Invoke initial condition + # TODO: MPI, do we want to synchronize loading and print debug statements, e.g. using # mpi_isparallel() && MPI.Barrier(mpi_comm()) # See https://github.com/trixi-framework/Trixi.jl/issues/328 iip = true # is-inplace, i.e., we modify a vector when calling rhs! specialize = SciMLBase.FullSpecialize # specialize on rhs! and parameters (semi) - return ODEProblem{iip, specialize}(rhs!, u0_ode, tspan, semi) + + # Check if Jacobian prototype is provided for sparse Jacobian + if jac_prototype !== nothing + # Convert `jac_prototype` to real type, as seen here: + # https://docs.sciml.ai/DiffEqDocs/stable/tutorials/advanced_ode_example/#Declaring-a-Sparse-Jacobian-with-Automatic-Sparsity-Detection + ode = SciMLBase.ODEFunction(rhs!, + jac_prototype = convert.(eltype(u0_ode), + jac_prototype), + colorvec = colorvec) # coloring vector is optional + + return ODEProblem{iip, specialize}(ode, u0_ode, tspan, semi) + else + # We could also construct an `ODEFunction` without the Jacobian here, + # but we stick to the more light-weight direct in-place function `rhs!`. + return ODEProblem{iip, specialize}(rhs!, u0_ode, tspan, semi) + end end """ - semidiscretize(semi::AbstractSemidiscretization, tspan, - restart_file::AbstractString) + semidiscretize(semi::AbstractSemidiscretization, tspan, + restart_file::AbstractString; + jac_prototype::Union{AbstractMatrix, Nothing} = nothing, + colorvec::Union{AbstractVector, Nothing} = nothing) Wrap the semidiscretization `semi` as an ODE problem in the time interval `tspan` that can be passed to `solve` from the [SciML ecosystem](https://diffeq.sciml.ai/latest/). The initial condition etc. is taken from the `restart_file`. + +Optional keyword arguments: +- `jac_prototype`: Expected to come from [SparseConnectivityTracer.jl](https://github.com/adrhill/SparseConnectivityTracer.jl). + Specifies the sparsity structure of the Jacobian to enable e.g. efficient implicit time stepping. +- `colorvec`: Expected to come from [SparseMatrixColorings.jl](https://github.com/gdalle/SparseMatrixColorings.jl). + Allows for even faster Jacobian computation. Not necessarily required when `jac_prototype` is given. """ function semidiscretize(semi::AbstractSemidiscretization, tspan, restart_file::AbstractString; + jac_prototype::Union{AbstractMatrix, Nothing} = nothing, + colorvec::Union{AbstractVector, Nothing} = nothing, reset_threads = true) # Optionally reset Polyester.jl threads. See # https://github.com/trixi-framework/Trixi.jl/issues/1583 @@ -119,13 +175,29 @@ function semidiscretize(semi::AbstractSemidiscretization, tspan, Polyester.reset_threads!() end - u0_ode = load_restart_file(semi, restart_file) + u0_ode = load_restart_file(semi, restart_file) # Load initial condition from restart file + # TODO: MPI, do we want to synchronize loading and print debug statements, e.g. using # mpi_isparallel() && MPI.Barrier(mpi_comm()) # See https://github.com/trixi-framework/Trixi.jl/issues/328 iip = true # is-inplace, i.e., we modify a vector when calling rhs! specialize = SciMLBase.FullSpecialize # specialize on rhs! and parameters (semi) - return ODEProblem{iip, specialize}(rhs!, u0_ode, tspan, semi) + + # Check if Jacobian prototype is provided for sparse Jacobian + if jac_prototype !== nothing + # Convert `jac_prototype` to real type, as seen here: + # https://docs.sciml.ai/DiffEqDocs/stable/tutorials/advanced_ode_example/#Declaring-a-Sparse-Jacobian-with-Automatic-Sparsity-Detection + ode = SciMLBase.ODEFunction(rhs!, + jac_prototype = convert.(eltype(u0_ode), + jac_prototype), + colorvec = colorvec) # coloring vector is optional + + return ODEProblem{iip, specialize}(ode, u0_ode, tspan, semi) + else + # We could also construct an `ODEFunction` without the Jacobian here, + # but we stick to the more light-weight direct in-place function `rhs!`. + return ODEProblem{iip, specialize}(rhs!, u0_ode, tspan, semi) + end end """ @@ -155,9 +227,10 @@ end Same as [`compute_coefficients`](@ref) but stores the result in `u_ode`. """ function compute_coefficients!(u_ode, func, t, semi::AbstractSemidiscretization) + backend = trixi_backend(u_ode) u = wrap_array(u_ode, semi) # Call `compute_coefficients` defined by the solver - compute_coefficients!(u, func, t, mesh_equations_solver_cache(semi)...) + compute_coefficients!(backend, u, func, t, mesh_equations_solver_cache(semi)...) end """ @@ -200,7 +273,7 @@ end Uses the right-hand side operator of the semidiscretization `semi` and simple second order finite difference to compute the Jacobian `J` -of the semidiscretization `semi` at state `u0_ode`. +of the semidiscretization `semi` at time `t0` and state `u0_ode`. """ function jacobian_fd(semi::AbstractSemidiscretization; t0 = zero(real(semi)), @@ -220,7 +293,13 @@ function jacobian_fd(semi::AbstractSemidiscretization; # use second order finite difference to estimate Jacobian matrix for idx in eachindex(u0_ode) # determine size of fluctuation - epsilon = sqrt(eps(u0_ode[idx])) + # This is the approach used by FiniteDiff.jl to compute the + # step size, which assures that the finite difference is accurate + # for very small and very large absolute values `u0_ode[idx]`. + # See https://github.com/trixi-framework/Trixi.jl/pull/2514#issuecomment-3190534904. + absstep = sqrt(eps(typeof(u0_ode[idx]))) + relstep = absstep + epsilon = max(relstep * abs(u0_ode[idx]), absstep) # plus fluctuation u_ode[idx] = u0_ode[idx] + epsilon @@ -230,7 +309,7 @@ function jacobian_fd(semi::AbstractSemidiscretization; u_ode[idx] = u0_ode[idx] - epsilon rhs!(dum_ode, u_ode, semi, t0) - # restore linearisation state + # restore linearization state u_ode[idx] = u0_ode[idx] # central second order finite difference @@ -247,7 +326,7 @@ end Uses the right-hand side operator of the semidiscretization `semi` and forward mode automatic differentiation to compute the Jacobian `J` -of the semidiscretization `semi` at state `u0_ode`. +of the semidiscretization `semi` at time `t0` and state `u0_ode`. """ function jacobian_ad_forward(semi::AbstractSemidiscretization; t0 = zero(real(semi)), @@ -268,7 +347,7 @@ end function _jacobian_ad_forward(semi, t0, u0_ode, du_ode, config) new_semi = remake(semi, uEltype = eltype(config)) # Create anonymous function passed as first argument to `ForwardDiff.jacobian` to match - # `ForwardDiff.jacobian(f!, y::AbstractArray, x::AbstractArray, + # `ForwardDiff.jacobian(f!, y::AbstractArray, x::AbstractArray, # cfg::JacobianConfig = JacobianConfig(f!, y, x), check=Val{true}())` J = ForwardDiff.jacobian(du_ode, u0_ode, config) do du_ode, u_ode Trixi.rhs!(du_ode, u_ode, new_semi, t0) @@ -302,7 +381,7 @@ end function _jacobian_ad_forward_structarrays(semi, t0, u0_ode_plain, du_ode_plain, config) new_semi = remake(semi, uEltype = eltype(config)) # Create anonymous function passed as first argument to `ForwardDiff.jacobian` to match - # `ForwardDiff.jacobian(f!, y::AbstractArray, x::AbstractArray, + # `ForwardDiff.jacobian(f!, y::AbstractArray, x::AbstractArray, # cfg::JacobianConfig = JacobianConfig(f!, y, x), check=Val{true}())` J = ForwardDiff.jacobian(du_ode_plain, u0_ode_plain, config) do du_ode_plain, u_ode_plain diff --git a/src/semidiscretization/semidiscretization_coupled.jl b/src/semidiscretization/semidiscretization_coupled.jl index 434fcb4a120..750d9525419 100644 --- a/src/semidiscretization/semidiscretization_coupled.jl +++ b/src/semidiscretization/semidiscretization_coupled.jl @@ -644,6 +644,8 @@ function copy_to_coupled_boundary!(boundary_condition::BoundaryConditionCoupled{ j_node += j_node_step end end + + return nothing end ################################################################################ @@ -686,6 +688,8 @@ end for v in eachvariable(equations) surface_flux_values[v, surface_node_indices..., direction, element] = flux[v] end + + return nothing end @inline function calc_boundary_flux_by_direction!(surface_flux_values, u, t, @@ -728,6 +732,8 @@ end 0.5f0 * noncons_flux[v]) end + + return nothing end function get_boundary_indices(element, orientation, diff --git a/src/semidiscretization/semidiscretization_hyperbolic.jl b/src/semidiscretization/semidiscretization_hyperbolic.jl index 7496a345661..2a563c02229 100644 --- a/src/semidiscretization/semidiscretization_hyperbolic.jl +++ b/src/semidiscretization/semidiscretization_hyperbolic.jl @@ -27,25 +27,6 @@ mutable struct SemidiscretizationHyperbolic{Mesh, Equations, InitialCondition, solver::Solver cache::Cache performance_counter::PerformanceCounter - - function SemidiscretizationHyperbolic{Mesh, Equations, InitialCondition, - BoundaryConditions, SourceTerms, Solver, - Cache}(mesh::Mesh, equations::Equations, - initial_condition::InitialCondition, - boundary_conditions::BoundaryConditions, - source_terms::SourceTerms, - solver::Solver, - cache::Cache) where {Mesh, Equations, - InitialCondition, - BoundaryConditions, - SourceTerms, - Solver, - Cache} - performance_counter = PerformanceCounter() - - new(mesh, equations, initial_condition, boundary_conditions, source_terms, - solver, cache, performance_counter) - end end """ @@ -71,6 +52,8 @@ function SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver check_periodicity_mesh_boundary_conditions(mesh, _boundary_conditions) + performance_counter = PerformanceCounter() + SemidiscretizationHyperbolic{typeof(mesh), typeof(equations), typeof(initial_condition), typeof(_boundary_conditions), typeof(source_terms), @@ -78,9 +61,13 @@ function SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver initial_condition, _boundary_conditions, source_terms, solver, - cache) + cache, + performance_counter) end +# @eval due to @muladd +@eval Adapt.@adapt_structure(SemidiscretizationHyperbolic) + # Create a new semidiscretization but change some parameters compared to the input. # `Base.similar` follows a related concept but would require us to `copy` the `mesh`, # which would impact the performance. Instead, `SciMLBase.remake` has exactly the diff --git a/src/semidiscretization/semidiscretization_hyperbolic_parabolic.jl b/src/semidiscretization/semidiscretization_hyperbolic_parabolic.jl index 7dab1f748ec..54ede387fa2 100644 --- a/src/semidiscretization/semidiscretization_hyperbolic_parabolic.jl +++ b/src/semidiscretization/semidiscretization_hyperbolic_parabolic.jl @@ -375,4 +375,45 @@ function _jacobian_ad_forward(semi::SemidiscretizationHyperbolicParabolic, t0, u return J end + +""" + jacobian_ad_forward_parabolic(semi::SemidiscretizationHyperbolicParabolic; + t0=zero(real(semi)), + u0_ode=compute_coefficients(t0, semi)) + +Uses the *parabolic part* of the right-hand side operator of the [`SemidiscretizationHyperbolicParabolic`](@ref) `semi` +and forward mode automatic differentiation to compute the Jacobian `J` of the +parabolic/diffusive contribution only at time `t0` and state `u0_ode`. + +This might be useful for operator-splitting methods, e.g., the construction of optimized +time integrators which optimize different methods for the hyperbolic and parabolic part separately. +""" +function jacobian_ad_forward_parabolic(semi::SemidiscretizationHyperbolicParabolic; + t0 = zero(real(semi)), + u0_ode = compute_coefficients(t0, semi)) + jacobian_ad_forward_parabolic(semi, t0, u0_ode) +end + +# The following version is for plain arrays +function jacobian_ad_forward_parabolic(semi::SemidiscretizationHyperbolicParabolic, + t0, u0_ode) + du_ode = similar(u0_ode) + config = ForwardDiff.JacobianConfig(nothing, du_ode, u0_ode) + + # Use a function barrier since the generation of the `config` we use above + # is not type-stable + _jacobian_ad_forward_parabolic(semi, t0, u0_ode, du_ode, config) +end + +function _jacobian_ad_forward_parabolic(semi, t0, u0_ode, du_ode, config) + new_semi = remake(semi, uEltype = eltype(config)) + # Create anonymous function passed as first argument to `ForwardDiff.jacobian` to match + # `ForwardDiff.jacobian(f!, y::AbstractArray, x::AbstractArray, + # cfg::JacobianConfig = JacobianConfig(f!, y, x), check=Val{true}())` + J = ForwardDiff.jacobian(du_ode, u0_ode, config) do du_ode, u_ode + Trixi.rhs_parabolic!(du_ode, u_ode, new_semi, t0) + end + + return J +end end # @muladd diff --git a/src/solvers/dg.jl b/src/solvers/dg.jl index ad211b3c003..509c12dab95 100644 --- a/src/solvers/dg.jl +++ b/src/solvers/dg.jl @@ -265,6 +265,18 @@ function Base.show(io::IO, mime::MIME"text/plain", end end +# Required to be able to run `SimpleSSPRK33` without `VolumeIntegralSubcellLimiting` +Base.resize!(semi, volume_integral::AbstractVolumeIntegral, new_size) = nothing + +function Base.resize!(semi, volume_integral::VolumeIntegralSubcellLimiting, new_size) + # Resize container antidiffusive_fluxes + resize!(semi.cache.antidiffusive_fluxes, new_size) + + # Resize container subcell_limiter_coefficients + @unpack limiter = volume_integral + resize!(limiter.cache.subcell_limiter_coefficients, new_size) +end + # TODO: FD. Should this definition live in a different file because it is # not strictly a DG method? """ @@ -415,6 +427,9 @@ struct DG{Basis, Mortar, SurfaceIntegral, VolumeIntegral} volume_integral::VolumeIntegral end +# @eval due to @muladd +@eval Adapt.@adapt_structure(DG) + function Base.show(io::IO, dg::DG) @nospecialize dg # reduce precompilation time @@ -639,8 +654,11 @@ include("fdsbp_unstructured/fdsbp.jl") function allocate_coefficients(mesh::AbstractMesh, equations, dg::DG, cache) # We must allocate a `Vector` in order to be able to `resize!` it (AMR). # cf. wrap_array - zeros(eltype(cache.elements), - nvariables(equations) * nnodes(dg)^ndims(mesh) * nelements(dg, cache)) + u_ode = similar(cache.elements.node_coordinates, eltype(cache.elements), + nvariables(equations) * nnodes(dg)^ndims(mesh) * + nelements(dg, cache)) + fill!(u_ode, zero(eltype(u_ode))) + return u_ode end @inline function wrap_array(u_ode::AbstractVector, mesh::AbstractMesh, equations, @@ -663,7 +681,7 @@ end # since LoopVectorization does not support `ForwardDiff.Dual`s. Hence, we use # optimized `PtrArray`s whenever possible and fall back to plain `Array`s # otherwise. - if _PREFERENCE_POLYESTER && LoopVectorization.check_args(u_ode) + if _PREFERENCE_THREADING === :polyester && LoopVectorization.check_args(u_ode) # This version using `PtrArray`s from StrideArrays.jl is very fast and # does not result in allocations. # @@ -683,7 +701,8 @@ end # (nvariables(equations), ntuple(_ -> nnodes(dg), ndims(mesh))..., nelements(dg, cache))) else # The following version is reasonably fast and allows us to `resize!(u_ode, ...)`. - unsafe_wrap(Array{eltype(u_ode), ndims(mesh) + 2}, pointer(u_ode), + ArrayType = Trixi.storage_type(u_ode) + unsafe_wrap(ArrayType{eltype(u_ode), ndims(mesh) + 2}, pointer(u_ode), (nvariables(equations), ntuple(_ -> nnodes(dg), ndims(mesh))..., nelements(dg, cache))) end @@ -697,7 +716,7 @@ end nvariables(equations) * nnodes(dg)^ndims(mesh) * nelements(dg, cache) end # See comments on the DGSEM version above - if _PREFERENCE_POLYESTER && LoopVectorization.check_args(u_ode) + if _PREFERENCE_THREADING === :polyester && LoopVectorization.check_args(u_ode) # Here, we do not specialize on the number of nodes using `StaticInt` since # - it will not be type stable (SBP operators just store it as a runtime value) # - FD methods tend to use high node counts @@ -732,8 +751,8 @@ end nelements(dg, cache))) end -function compute_coefficients!(u, func, t, mesh::AbstractMesh{1}, equations, dg::DG, - cache) +function compute_coefficients!(backend::Nothing, u, func, t, mesh::AbstractMesh{1}, + equations, dg::DG, cache) @threaded for element in eachelement(dg, cache) for i in eachnode(dg) x_node = get_node_coords(cache.elements.node_coordinates, equations, dg, i, @@ -751,22 +770,51 @@ function compute_coefficients!(u, func, t, mesh::AbstractMesh{1}, equations, dg: set_node_vars!(u, u_node, equations, dg, i, element) end end + + return nothing end -function compute_coefficients!(u, func, t, mesh::AbstractMesh{2}, equations, dg::DG, - cache) +function compute_coefficients!(backend::Nothing, u, func, t, mesh::AbstractMesh{2}, + equations, dg::DG, cache) + @unpack node_coordinates = cache.elements @threaded for element in eachelement(dg, cache) - for j in eachnode(dg), i in eachnode(dg) - x_node = get_node_coords(cache.elements.node_coordinates, equations, dg, i, - j, element) - u_node = func(x_node, t, equations) - set_node_vars!(u, u_node, equations, dg, i, j, element) - end + compute_coefficients_element!(u, func, t, equations, dg, node_coordinates, + element) + end + + return nothing +end + +function compute_coefficients!(backend::Backend, u, func, t, mesh::AbstractMesh{2}, + equations, dg::DG, cache) + nelements(dg, cache) == 0 && return nothing + @unpack node_coordinates = cache.elements + kernel! = compute_coefficients_kernel!(backend) + kernel!(u, func, t, equations, dg, node_coordinates, + ndrange = nelements(dg, cache)) + return nothing +end + +@kernel function compute_coefficients_kernel!(u, func, t, equations, + dg::DG, node_coordinates) + element = @index(Global) + compute_coefficients_element!(u, func, t, equations, dg, node_coordinates, element) +end + +function compute_coefficients_element!(u, func, t, equations, dg::DG, + node_coordinates, element) + for j in eachnode(dg), i in eachnode(dg) + x_node = get_node_coords(node_coordinates, equations, dg, i, + j, element) + u_node = func(x_node, t, equations) + set_node_vars!(u, u_node, equations, dg, i, j, element) end + + return nothing end -function compute_coefficients!(u, func, t, mesh::AbstractMesh{3}, equations, dg::DG, - cache) +function compute_coefficients!(backend::Nothing, u, func, t, mesh::AbstractMesh{3}, + equations, dg::DG, cache) @threaded for element in eachelement(dg, cache) for k in eachnode(dg), j in eachnode(dg), i in eachnode(dg) x_node = get_node_coords(cache.elements.node_coordinates, equations, dg, i, @@ -775,16 +823,19 @@ function compute_coefficients!(u, func, t, mesh::AbstractMesh{3}, equations, dg: set_node_vars!(u, u_node, equations, dg, i, j, k, element) end end + + return nothing end # Discretizations specific to each mesh type of Trixi.jl + # If some functionality is shared by multiple combinations of meshes/solvers, # it is defined in the directory of the most basic mesh and solver type. # The most basic solver type in Trixi.jl is DGSEM (historic reasons and background # of the main contributors). # We consider the `TreeMesh` to be the most basic mesh type since it is Cartesian -# and was the first mesh in Trixi.jl. The order of the other mesh types is the same -# as the include order below. +# and was the first mesh in Trixi.jl. +# The order of the other mesh types is the same as the include order below. include("dgsem_tree/dg.jl") include("dgsem_structured/dg.jl") include("dgsem_unstructured/dg.jl") diff --git a/src/solvers/dgmulti/dg.jl b/src/solvers/dgmulti/dg.jl index b2a547ac64d..e3e01d42171 100644 --- a/src/solvers/dgmulti/dg.jl +++ b/src/solvers/dgmulti/dg.jl @@ -159,7 +159,7 @@ function reset_du!(du, dg::DGMulti, other_args...) du[i] = zero(eltype(du)) end - return du + return nothing end # Constructs cache variables for both affine and non-affine (curved) DGMultiMeshes @@ -209,7 +209,7 @@ function create_cache(mesh::DGMultiMesh{NDIMS}, equations, dg::DGMultiWeakForm, local_values_threaded, flux_threaded, rotated_flux_threaded) end -function compute_coefficients!(u, initial_condition, t, +function compute_coefficients!(::Nothing, u, initial_condition, t, mesh::DGMultiMesh, equations, dg::DGMulti, cache) md = mesh.md rd = dg.basis @@ -223,6 +223,8 @@ function compute_coefficients!(u, initial_condition, t, # multiplying by Pq computes the L2 projection apply_to_each_field(mul_by!(rd.Pq), u, u_values) + + return nothing end # estimates the timestep based on polynomial degree and mesh. Does not account for physics (e.g., @@ -267,13 +269,12 @@ function max_dt(u, t, mesh::DGMultiMesh, @unpack md = mesh rd = dg.basis + # Compute max_speeds only once, since it's constant for all nodes/elements + max_speeds = max_abs_speeds(equations) + dt_min = Inf for e in eachelement(mesh, dg, cache) h_e = StartUpDG.estimate_h(e, rd, md) - max_speeds = ntuple(_ -> nextfloat(zero(t)), NDIMS) - for i in Base.OneTo(rd.Np) # loop over nodes - max_speeds = max.(max_abs_speeds(equations), max_speeds) - end dt_min = min(dt_min, h_e / sum(max_speeds)) end # This mimics `max_dt` for `TreeMesh`, except that `nnodes(dg)` is replaced by @@ -289,6 +290,8 @@ function prolong2interfaces!(cache, u, rd = dg.basis @unpack u_face_values = cache apply_to_each_field(mul_by!(rd.Vf), u_face_values, u) + + return nothing end # version for affine meshes @@ -320,6 +323,8 @@ function calc_volume_integral!(du, u, mesh::DGMultiMesh, end end end + + return nothing end # version for curved meshes @@ -366,6 +371,8 @@ function calc_volume_integral!(du, u, mesh::DGMultiMesh{NDIMS, <:NonAffine}, view(du, :, e), rotated_flux_values) end end + + return nothing end function calc_interface_flux!(cache, surface_integral::SurfaceIntegralWeakForm, @@ -386,6 +393,8 @@ function calc_interface_flux!(cache, surface_integral::SurfaceIntegralWeakForm, normal = SVector{NDIMS}(getindex.(nxyzJ, idM)) / Jf[idM] flux_face_values[idM] = surface_flux(uM, uP, normal, equations) * Jf[idM] end + + return nothing end function calc_interface_flux!(cache, surface_integral::SurfaceIntegralWeakForm, @@ -419,6 +428,8 @@ function calc_interface_flux!(cache, surface_integral::SurfaceIntegralWeakForm, Jf[idM] end end + + return nothing end # assumes cache.flux_face_values is computed and filled with @@ -428,6 +439,8 @@ function calc_surface_integral!(du, u, mesh::DGMultiMesh, equations, dg::DGMulti, cache) rd = dg.basis apply_to_each_field(mul_by_accum!(rd.LIFT), du, cache.flux_face_values) + + return nothing end # Specialize for nodal SBP discretizations. Uses that Vf*u = u[Fmask,:] @@ -441,6 +454,8 @@ function prolong2interfaces!(cache, u, u_face_values[i, e] = u[fid, e] end end + + return nothing end # Specialize for nodal SBP discretizations. Uses that du = LIFT*u is equivalent to @@ -457,6 +472,8 @@ function calc_surface_integral!(du, u, mesh::DGMultiMesh, equations, du[fid, e] = du[fid, e] + flux_face_values[i, e] * lift_scalings[i] end end + + return nothing end # do nothing for periodic (default) boundary conditions @@ -472,6 +489,8 @@ function calc_boundary_flux!(cache, t, boundary_conditions, mesh, key, mesh, have_nonconservative_terms, equations, dg) end + + return nothing end function calc_single_boundary_flux!(cache, t, boundary_condition, boundary_key, mesh, @@ -516,6 +535,8 @@ function calc_single_boundary_flux!(cache, t, boundary_condition, boundary_key, # Note: modifying the values of the reshaped array modifies the values of cache.flux_face_values. # However, we don't have to re-reshape, since cache.flux_face_values still retains its original shape. + + return nothing end function calc_single_boundary_flux!(cache, t, boundary_condition, boundary_key, mesh, @@ -564,6 +585,8 @@ function calc_single_boundary_flux!(cache, t, boundary_condition, boundary_key, # Note: modifying the values of the reshaped array modifies the values of cache.flux_face_values. # However, we don't have to re-reshape, since cache.flux_face_values still retains its original shape. + + return nothing end # inverts Jacobian and scales by -1.0 @@ -575,6 +598,8 @@ function invert_jacobian!(du, mesh::DGMultiMesh, equations, dg::DGMulti, cache; du[i, e] *= scaling * invJ end end + + return nothing end # inverts Jacobian using weight-adjusted DG, and scales by -1.0. @@ -601,6 +626,8 @@ function invert_jacobian!(du, mesh::DGMultiMesh{NDIMS, <:NonAffine}, equations, # project back to polynomials apply_to_each_field(mul_by!(Pq), view(du, :, e), du_at_quad_points) end + + return nothing end # Multiple calc_sources! to resolve method ambiguities @@ -631,6 +658,8 @@ function calc_sources!(du, u, t, source_terms, end apply_to_each_field(mul_by_accum!(Pq), view(du, :, e), source_values) end + + return nothing end function rhs!(du, u, t, mesh, equations, diff --git a/src/solvers/dgmulti/dg_parabolic.jl b/src/solvers/dgmulti/dg_parabolic.jl index f1b6eba956f..fabb952e01b 100644 --- a/src/solvers/dgmulti/dg_parabolic.jl +++ b/src/solvers/dgmulti/dg_parabolic.jl @@ -67,6 +67,8 @@ function transform_variables!(u_transformed, u, mesh, @threaded for i in eachindex(u) u_transformed[i] = transformation(u[i], equations_parabolic) end + + return nothing end # TODO: reuse entropy projection computations for DGMultiFluxDiff{<:Polynomial} (including `GaussSBP` solvers) @@ -86,6 +88,8 @@ function calc_gradient_surface_integral!(gradients, u, scalar_flux_face_values, view(gradients[dim], :, e), local_flux_values) end end + + return nothing end function calc_gradient_volume_integral!(gradients, u, mesh::DGMultiMesh, @@ -105,6 +109,8 @@ function calc_gradient_volume_integral!(gradients, u, mesh::DGMultiMesh, view(gradients[i], :, e), view(u, :, e)) end end + + return nothing end function calc_gradient_volume_integral!(gradients, u, mesh::DGMultiMesh{NDIMS, <:NonAffine}, @@ -131,6 +137,8 @@ function calc_gradient_volume_integral!(gradients, u, mesh::DGMultiMesh{NDIMS, < end end end + + return nothing end function calc_gradient_interface_flux!(scalar_flux_face_values, @@ -149,6 +157,8 @@ function calc_gradient_interface_flux!(scalar_flux_face_values, # stable on curved meshes with variable geometric terms. scalar_flux_face_values[idM] = 0.5f0 * (uP - uM) end + + return nothing end function calc_gradient!(gradients, u::StructArray, t, mesh::DGMultiMesh, @@ -181,6 +191,8 @@ function calc_gradient!(gradients, u::StructArray, t, mesh::DGMultiMesh, mesh, equations, dg, cache, cache_parabolic) invert_jacobian_gradient!(gradients, mesh, equations, dg, cache, cache_parabolic) + + return nothing end # affine mesh - constant Jacobian version @@ -198,6 +210,8 @@ function invert_jacobian_gradient!(gradients, mesh::DGMultiMesh, equations, dg:: end end end + + return nothing end # non-affine mesh - variable Jacobian version @@ -212,6 +226,8 @@ function invert_jacobian_gradient!(gradients, mesh::DGMultiMesh{NDIMS, <:NonAffi end end end + + return nothing end # do nothing for periodic domains @@ -233,6 +249,8 @@ function calc_boundary_flux!(flux, u, t, operator_type, boundary_conditions, # recurse on the remainder of the boundary conditions calc_boundary_flux!(flux, u, t, operator_type, Base.tail(boundary_conditions), mesh, equations, dg, cache, cache_parabolic) + + return nothing end # terminate recursion @@ -308,6 +326,8 @@ function calc_viscous_fluxes!(flux_viscous, u, gradients, mesh::DGMultiMesh, end end end + + return nothing end # no penalization for a BR1 parabolic solver @@ -348,6 +368,8 @@ function calc_divergence_volume_integral!(du, u, flux_viscous, mesh::DGMultiMesh view(du, :, e), view(flux_viscous[i], :, e)) end end + + return nothing end function calc_divergence_volume_integral!(du, u, flux_viscous, @@ -375,6 +397,8 @@ function calc_divergence_volume_integral!(du, u, flux_viscous, view(du, :, e), local_viscous_flux) end end + + return nothing end function calc_divergence_interface_flux!(scalar_flux_face_values, @@ -398,6 +422,8 @@ function calc_divergence_interface_flux!(scalar_flux_face_values, end scalar_flux_face_values[idM] = flux_face_value end + + return nothing end function calc_divergence!(du, u::StructArray, t, flux_viscous, mesh::DGMultiMesh, @@ -434,6 +460,8 @@ function calc_divergence!(du, u::StructArray, t, flux_viscous, mesh::DGMultiMesh # surface contributions apply_to_each_field(mul_by_accum!(cache_parabolic.divergence_lift_matrix), du, scalar_flux_face_values) + + return nothing end # assumptions: parabolic terms are of the form div(f(u, grad(u))) and diff --git a/src/solvers/dgmulti/shock_capturing.jl b/src/solvers/dgmulti/shock_capturing.jl index 0a4a3623206..617cb056133 100644 --- a/src/solvers/dgmulti/shock_capturing.jl +++ b/src/solvers/dgmulti/shock_capturing.jl @@ -79,20 +79,27 @@ function (indicator_hg::IndicatorHennemannGassner)(u, mesh::DGMultiMesh, # multiply by invVDM::SimpleKronecker LinearAlgebra.mul!(modal_, inverse_vandermonde, indicator) + # Create Returns functors to return the constructor args (e.g., Base.OneTo(dg.basis.N)) no matter what + # Returns(Base.OneTo(dg.basis.N)) equiv to _ -> Base.OneTo(dg.basis.N), with possibly fewer allocs + return_N_plus_one = Returns(dg.basis.N + 1) + return_to_N_minus_one = Returns(Base.OneTo(dg.basis.N - 1)) + return_to_N = Returns(Base.OneTo(dg.basis.N)) + # As of Julia 1.9, Base.ReshapedArray does not produce allocations when setting values. # Thus, Base.ReshapedArray should be used if you are setting values in the array. # `reshape` is fine if you are only accessing values. # Here, we reshape modal coefficients to expose the tensor product structure. - modal = Base.ReshapedArray(modal_, ntuple(_ -> dg.basis.N + 1, NDIMS), ()) + + modal = Base.ReshapedArray(modal_, ntuple(return_N_plus_one, NDIMS), ()) # Calculate total energies for all modes, all modes minus the highest mode, and # all modes without the two highest modes - total_energy = sum(x -> x^2, modal) - clip_1_ranges = ntuple(_ -> Base.OneTo(dg.basis.N), NDIMS) - clip_2_ranges = ntuple(_ -> Base.OneTo(dg.basis.N - 1), NDIMS) + total_energy = sum(abs2, modal) + clip_1_ranges = ntuple(return_to_N, NDIMS) + clip_2_ranges = ntuple(return_to_N_minus_one, NDIMS) # These splattings do not seem to allocate as of Julia 1.9.0? - total_energy_clip1 = sum(x -> x^2, view(modal, clip_1_ranges...)) - total_energy_clip2 = sum(x -> x^2, view(modal, clip_2_ranges...)) + total_energy_clip1 = sum(abs2, view(modal, clip_1_ranges...)) + total_energy_clip2 = sum(abs2, view(modal, clip_2_ranges...)) # Calculate energy in higher modes if !(iszero(total_energy)) @@ -145,6 +152,8 @@ function apply_smoothing!(mesh::DGMultiMesh, alpha, alpha_tmp, dg::DGMulti, cach alpha[element] = max(alpha[element], 0.5 * alpha_neighbor) end end + + return nothing end function calc_volume_integral!(du, u, diff --git a/src/solvers/dgsem/basis_lobatto_legendre.jl b/src/solvers/dgsem/basis_lobatto_legendre.jl index 777348aa8ce..9647f172e20 100644 --- a/src/solvers/dgsem/basis_lobatto_legendre.jl +++ b/src/solvers/dgsem/basis_lobatto_legendre.jl @@ -34,6 +34,32 @@ struct LobattoLegendreBasis{RealT <: Real, NNODES, # negative adjoint wrt the SBP dot product end +function Adapt.adapt_structure(to, basis::LobattoLegendreBasis) + inverse_vandermonde_legendre = adapt(to, basis.inverse_vandermonde_legendre) + RealT = eltype(inverse_vandermonde_legendre) + + nodes = SVector{<:Any, RealT}(basis.nodes) + weights = SVector{<:Any, RealT}(basis.weights) + inverse_weights = SVector{<:Any, RealT}(basis.inverse_weights) + boundary_interpolation = adapt(to, basis.boundary_interpolation) + derivative_matrix = adapt(to, basis.derivative_matrix) + derivative_split = adapt(to, basis.derivative_split) + derivative_split_transpose = adapt(to, basis.derivative_split_transpose) + derivative_dhat = adapt(to, basis.derivative_dhat) + return LobattoLegendreBasis{RealT, nnodes(basis), typeof(nodes), + typeof(inverse_vandermonde_legendre), + typeof(boundary_interpolation), + typeof(derivative_matrix)}(nodes, + weights, + inverse_weights, + inverse_vandermonde_legendre, + boundary_interpolation, + derivative_matrix, + derivative_split, + derivative_split_transpose, + derivative_dhat) +end + function LobattoLegendreBasis(RealT, polydeg::Integer) nnodes_ = polydeg + 1 @@ -155,6 +181,17 @@ struct LobattoLegendreMortarL2{RealT <: Real, NNODES, reverse_lower::ReverseMatrix end +function Adapt.adapt_structure(to, mortar::LobattoLegendreMortarL2) + forward_upper = adapt(to, mortar.forward_upper) + forward_lower = adapt(to, mortar.forward_lower) + reverse_upper = adapt(to, mortar.reverse_upper) + reverse_lower = adapt(to, mortar.reverse_lower) + return LobattoLegendreMortarL2{eltype(forward_upper), nnodes(mortar), + typeof(forward_upper), + typeof(reverse_upper)}(forward_upper, forward_lower, + reverse_upper, reverse_lower) +end + function MortarL2(basis::LobattoLegendreBasis) RealT = real(basis) nnodes_ = nnodes(basis) diff --git a/src/solvers/dgsem/dgsem.jl b/src/solvers/dgsem/dgsem.jl index bb4b02571a5..7a0bece6cd7 100644 --- a/src/solvers/dgsem/dgsem.jl +++ b/src/solvers/dgsem/dgsem.jl @@ -51,7 +51,7 @@ function DGSEM(RealT, polydeg::Integer, end # This API is no longer documented, and we recommend avoiding its public use. -function DGSEM(polydeg, surface_flux = flux_central, +function DGSEM(polydeg::Integer, surface_flux = flux_central, volume_integral = VolumeIntegralWeakForm()) DGSEM(Float64, polydeg, surface_flux, volume_integral) end @@ -71,4 +71,64 @@ end @inline polydeg(dg::DGSEM) = polydeg(dg.basis) Base.summary(io::IO, dg::DGSEM) = print(io, "DGSEM(polydeg=$(polydeg(dg)))") + +# `compute_u_mean` used in: +# (Stage-) Callbacks `EntropyBoundedLimiter` and `PositivityPreservingLimiterZhangShu` + +# positional arguments `mesh` and `cache` passed in to match signature of 2D/3D functions +@inline function compute_u_mean(u::AbstractArray{<:Any, 3}, element, + mesh::AbstractMesh{1}, equations, dg::DGSEM, cache) + @unpack weights = dg.basis + + u_mean = zero(get_node_vars(u, equations, dg, 1, element)) + for i in eachnode(dg) + u_node = get_node_vars(u, equations, dg, i, element) + u_mean += u_node * weights[i] + end + # normalize with the total volume + # note that the reference element is [-1,1], thus the weights sum to 2 + return 0.5f0 * u_mean +end + +@inline function compute_u_mean(u::AbstractArray{<:Any, 4}, element, + mesh::AbstractMesh{2}, equations, dg::DGSEM, cache) + @unpack weights = dg.basis + @unpack inverse_jacobian = cache.elements + + node_volume = zero(real(mesh)) + total_volume = zero(node_volume) + + u_mean = zero(get_node_vars(u, equations, dg, 1, 1, element)) + for j in eachnode(dg), i in eachnode(dg) + volume_jacobian = abs(inv(get_inverse_jacobian(inverse_jacobian, mesh, + i, j, element))) + node_volume = weights[i] * weights[j] * volume_jacobian + total_volume += node_volume + + u_node = get_node_vars(u, equations, dg, i, j, element) + u_mean += u_node * node_volume + end + return u_mean / total_volume # normalize with the total volume +end + +@inline function compute_u_mean(u::AbstractArray{<:Any, 5}, element, + mesh::AbstractMesh{3}, equations, dg::DGSEM, cache) + @unpack weights = dg.basis + @unpack inverse_jacobian = cache.elements + + node_volume = zero(real(mesh)) + total_volume = zero(node_volume) + + u_mean = zero(get_node_vars(u, equations, dg, 1, 1, 1, element)) + for k in eachnode(dg), j in eachnode(dg), i in eachnode(dg) + volume_jacobian = abs(inv(get_inverse_jacobian(inverse_jacobian, mesh, + i, j, k, element))) + node_volume = weights[i] * weights[j] * weights[k] * volume_jacobian + total_volume += node_volume + + u_node = get_node_vars(u, equations, dg, i, j, k, element) + u_mean += u_node * node_volume + end + return u_mean / total_volume # normalize with the total volume +end end # @muladd diff --git a/src/solvers/dgsem_p4est/containers.jl b/src/solvers/dgsem_p4est/containers.jl index a070db6b701..3f74f699f19 100644 --- a/src/solvers/dgsem_p4est/containers.jl +++ b/src/solvers/dgsem_p4est/containers.jl @@ -5,26 +5,38 @@ @muladd begin #! format: noindent -mutable struct P4estElementContainer{NDIMS, RealT <: Real, uEltype <: Real, NDIMSP1, - NDIMSP2, NDIMSP3} <: AbstractContainer +mutable struct P4estElementContainer{NDIMS, RealT <: Real, uEltype <: Real, + NDIMSP1, NDIMSP2, NDIMSP3, + ArrayRealTNDIMSP1 <: DenseArray{RealT, NDIMSP1}, + ArrayRealTNDIMSP2 <: DenseArray{RealT, NDIMSP2}, + ArrayRealTNDIMSP3 <: DenseArray{RealT, NDIMSP3}, + VectorRealT <: DenseVector{RealT}, + ArrayuEltypeNDIMSP2 <: + DenseArray{uEltype, NDIMSP2}, + VectoruEltype <: DenseVector{uEltype}} <: + AbstractContainer # Physical coordinates at each node - node_coordinates::Array{RealT, NDIMSP2} # [orientation, node_i, node_j, node_k, element] + node_coordinates::ArrayRealTNDIMSP2 # [orientation, node_i, node_j, node_k, element] + # Jacobian matrix of the transformation # [jacobian_i, jacobian_j, node_i, node_j, node_k, element] where jacobian_i is the first index of the Jacobian matrix,... - jacobian_matrix::Array{RealT, NDIMSP3} + jacobian_matrix::ArrayRealTNDIMSP3 + # Contravariant vectors, scaled by J, in Kopriva's blue book called Ja^i_n (i index, n dimension) - contravariant_vectors::Array{RealT, NDIMSP3} # [dimension, index, node_i, node_j, node_k, element] + contravariant_vectors::ArrayRealTNDIMSP3 # [dimension, index, node_i, node_j, node_k, element] + # 1/J where J is the Jacobian determinant (determinant of Jacobian matrix) - inverse_jacobian::Array{RealT, NDIMSP1} # [node_i, node_j, node_k, element] + inverse_jacobian::ArrayRealTNDIMSP1 # [node_i, node_j, node_k, element] + # Buffer for calculated surface flux - surface_flux_values::Array{uEltype, NDIMSP2} # [variable, i, j, direction, element] + surface_flux_values::ArrayuEltypeNDIMSP2 # [variable, i, j, direction, element] # internal `resize!`able storage - _node_coordinates::Vector{RealT} - _jacobian_matrix::Vector{RealT} - _contravariant_vectors::Vector{RealT} - _inverse_jacobian::Vector{RealT} - _surface_flux_values::Vector{uEltype} + _node_coordinates::VectorRealT + _jacobian_matrix::VectorRealT + _contravariant_vectors::VectorRealT + _inverse_jacobian::VectorRealT + _surface_flux_values::VectoruEltype end @inline function nelements(elements::P4estElementContainer) @@ -36,7 +48,7 @@ end RealT, uEltype } - uEltype + return uEltype end # Only one-dimensional `Array`s are `resize!`able in Julia. @@ -51,31 +63,41 @@ function Base.resize!(elements::P4estElementContainer, capacity) n_dims = ndims(elements) n_nodes = size(elements.node_coordinates, 2) n_variables = size(elements.surface_flux_values, 1) + ArrayType = storage_type(elements) resize!(_node_coordinates, n_dims * n_nodes^n_dims * capacity) - elements.node_coordinates = unsafe_wrap(Array, pointer(_node_coordinates), - (n_dims, ntuple(_ -> n_nodes, n_dims)..., - capacity)) + elements.node_coordinates = unsafe_wrap_or_alloc(ArrayType, + _node_coordinates, + (n_dims, + ntuple(_ -> n_nodes, n_dims)..., + capacity)) resize!(_jacobian_matrix, n_dims^2 * n_nodes^n_dims * capacity) - elements.jacobian_matrix = unsafe_wrap(Array, pointer(_jacobian_matrix), - (n_dims, n_dims, - ntuple(_ -> n_nodes, n_dims)..., capacity)) + elements.jacobian_matrix = unsafe_wrap_or_alloc(ArrayType, + _jacobian_matrix, + (n_dims, n_dims, + ntuple(_ -> n_nodes, n_dims)..., + capacity)) resize!(_contravariant_vectors, length(_jacobian_matrix)) - elements.contravariant_vectors = unsafe_wrap(Array, pointer(_contravariant_vectors), - size(elements.jacobian_matrix)) + elements.contravariant_vectors = unsafe_wrap_or_alloc(ArrayType, + _contravariant_vectors, + size(elements.jacobian_matrix)) resize!(_inverse_jacobian, n_nodes^n_dims * capacity) - elements.inverse_jacobian = unsafe_wrap(Array, pointer(_inverse_jacobian), - (ntuple(_ -> n_nodes, n_dims)..., capacity)) + elements.inverse_jacobian = unsafe_wrap_or_alloc(ArrayType, + _inverse_jacobian, + (ntuple(_ -> n_nodes, n_dims)..., + capacity)) resize!(_surface_flux_values, n_variables * n_nodes^(n_dims - 1) * (n_dims * 2) * capacity) - elements.surface_flux_values = unsafe_wrap(Array, pointer(_surface_flux_values), - (n_variables, - ntuple(_ -> n_nodes, n_dims - 1)..., - n_dims * 2, capacity)) + elements.surface_flux_values = unsafe_wrap_or_alloc(ArrayType, + _surface_flux_values, + (n_variables, + ntuple(_ -> n_nodes, + n_dims - 1)..., + n_dims * 2, capacity)) return nothing end @@ -116,34 +138,109 @@ function init_elements(mesh::Union{P4estMesh{NDIMS, NDIMS, RealT}, ntuple(_ -> nnodes(basis), NDIMS - 1)..., NDIMS * 2, nelements)) - elements = P4estElementContainer{NDIMS, RealT, uEltype, NDIMS + 1, NDIMS + 2, - NDIMS + 3}(node_coordinates, jacobian_matrix, - contravariant_vectors, - inverse_jacobian, surface_flux_values, - _node_coordinates, _jacobian_matrix, - _contravariant_vectors, - _inverse_jacobian, _surface_flux_values) + elements = P4estElementContainer{NDIMS, RealT, uEltype, + NDIMS + 1, NDIMS + 2, NDIMS + 3, + Array{RealT, NDIMS + 1}, + Array{RealT, NDIMS + 2}, + Array{RealT, NDIMS + 3}, + Vector{RealT}, + Array{uEltype, NDIMS + 2}, Vector{uEltype}}(node_coordinates, + jacobian_matrix, + contravariant_vectors, + inverse_jacobian, + surface_flux_values, + _node_coordinates, + _jacobian_matrix, + _contravariant_vectors, + _inverse_jacobian, + _surface_flux_values) init_elements!(elements, mesh, basis) return elements end -mutable struct P4estInterfaceContainer{NDIMS, uEltype <: Real, NDIMSP2} <: +function Adapt.parent_type(::Type{<:P4estElementContainer{<:Any, <:Any, <:Any, <:Any, + <:Any, <:Any, ArrayT}}) where {ArrayT} + ArrayT +end + +# Manual adapt_structure since we have aliasing memory +function Adapt.adapt_structure(to, + elements::P4estElementContainer{NDIMS}) where {NDIMS} + # Adapt underlying storage + _node_coordinates = adapt(to, elements._node_coordinates) + _jacobian_matrix = adapt(to, elements._jacobian_matrix) + _contravariant_vectors = adapt(to, elements._contravariant_vectors) + _inverse_jacobian = adapt(to, elements._inverse_jacobian) + _surface_flux_values = adapt(to, elements._surface_flux_values) + + RealT = eltype(_inverse_jacobian) + uEltype = eltype(_surface_flux_values) + + # Wrap arrays again + node_coordinates = unsafe_wrap_or_alloc(to, _node_coordinates, + size(elements.node_coordinates)) + jacobian_matrix = unsafe_wrap_or_alloc(to, _jacobian_matrix, + size(elements.jacobian_matrix)) + contravariant_vectors = unsafe_wrap_or_alloc(to, _contravariant_vectors, + size(jacobian_matrix)) + inverse_jacobian = unsafe_wrap_or_alloc(to, _inverse_jacobian, + size(elements.inverse_jacobian)) + surface_flux_values = unsafe_wrap_or_alloc(to, _surface_flux_values, + size(elements.surface_flux_values)) + + new_type_params = (NDIMS, + RealT, + uEltype, + NDIMS + 1, + NDIMS + 2, + NDIMS + 3, + typeof(inverse_jacobian), # ArrayRealTNDIMSP1 + typeof(node_coordinates), # ArrayRealTNDIMSP2 + typeof(jacobian_matrix), # ArrayRealTNDIMSP3 + typeof(_node_coordinates), # VectorRealT + typeof(surface_flux_values), # ArrayuEltypeNDIMSP2 + typeof(_surface_flux_values)) # VectoruEltype + return P4estElementContainer{new_type_params...}(node_coordinates, + jacobian_matrix, + contravariant_vectors, + inverse_jacobian, + surface_flux_values, + _node_coordinates, + _jacobian_matrix, + _contravariant_vectors, + _inverse_jacobian, + _surface_flux_values) +end + +mutable struct P4estInterfaceContainer{NDIMS, uEltype <: Real, NDIMSP2, + uArray <: DenseArray{uEltype, NDIMSP2}, + IdsMatrix <: DenseMatrix{Int}, + IndicesMatrix <: + DenseMatrix{NTuple{NDIMS, Symbol}}, + uVector <: DenseVector{uEltype}, + IdsVector <: DenseVector{Int}, + IndicesVector <: + DenseVector{NTuple{NDIMS, Symbol}}} <: AbstractContainer - u::Array{uEltype, NDIMSP2} # [primary/secondary, variable, i, j, interface] - neighbor_ids::Matrix{Int} # [primary/secondary, interface] - node_indices::Matrix{NTuple{NDIMS, Symbol}} # [primary/secondary, interface] + u::uArray # [primary/secondary, variable, i, j, interface] + neighbor_ids::IdsMatrix # [primary/secondary, interface] + node_indices::IndicesMatrix # [primary/secondary, interface] # internal `resize!`able storage - _u::Vector{uEltype} - _neighbor_ids::Vector{Int} - _node_indices::Vector{NTuple{NDIMS, Symbol}} + _u::uVector + _neighbor_ids::IdsVector + _node_indices::IndicesVector end @inline function ninterfaces(interfaces::P4estInterfaceContainer) size(interfaces.neighbor_ids, 2) end @inline Base.ndims(::P4estInterfaceContainer{NDIMS}) where {NDIMS} = NDIMS +@inline function Base.eltype(::P4estInterfaceContainer{NDIMS, uEltype}) where {NDIMS, + uEltype} + return uEltype +end # See explanation of Base.resize! for the element container function Base.resize!(interfaces::P4estInterfaceContainer, capacity) @@ -152,17 +249,20 @@ function Base.resize!(interfaces::P4estInterfaceContainer, capacity) n_dims = ndims(interfaces) n_nodes = size(interfaces.u, 3) n_variables = size(interfaces.u, 2) + ArrayType = storage_type(interfaces) resize!(_u, 2 * n_variables * n_nodes^(n_dims - 1) * capacity) - interfaces.u = unsafe_wrap(Array, pointer(_u), + interfaces.u = unsafe_wrap(ArrayType, pointer(_u), (2, n_variables, ntuple(_ -> n_nodes, n_dims - 1)..., capacity)) resize!(_neighbor_ids, 2 * capacity) - interfaces.neighbor_ids = unsafe_wrap(Array, pointer(_neighbor_ids), (2, capacity)) + interfaces.neighbor_ids = unsafe_wrap(ArrayType, pointer(_neighbor_ids), + (2, capacity)) resize!(_node_indices, 2 * capacity) - interfaces.node_indices = unsafe_wrap(Array, pointer(_node_indices), (2, capacity)) + interfaces.node_indices = unsafe_wrap(ArrayType, pointer(_node_indices), + (2, capacity)) return nothing end @@ -189,10 +289,15 @@ function init_interfaces(mesh::Union{P4estMesh, P4estMeshView, T8codeMesh}, equa _node_indices = Vector{NTuple{NDIMS, Symbol}}(undef, 2 * n_interfaces) node_indices = unsafe_wrap(Array, pointer(_node_indices), (2, n_interfaces)) - interfaces = P4estInterfaceContainer{NDIMS, uEltype, NDIMS + 2}(u, neighbor_ids, - node_indices, - _u, _neighbor_ids, - _node_indices) + interfaces = P4estInterfaceContainer{NDIMS, uEltype, NDIMS + 2, + typeof(u), typeof(neighbor_ids), + typeof(node_indices), typeof(_u), + typeof(_neighbor_ids), typeof(_node_indices)}(u, + neighbor_ids, + node_indices, + _u, + _neighbor_ids, + _node_indices) init_interfaces!(interfaces, mesh) @@ -205,21 +310,58 @@ function init_interfaces!(interfaces, mesh::Union{P4estMesh, P4estMeshView}) return interfaces end -mutable struct P4estBoundaryContainer{NDIMS, uEltype <: Real, NDIMSP1} <: +function Adapt.parent_type(::Type{<:P4estInterfaceContainer{<:Any, <:Any, <:Any, + ArrayT}}) where {ArrayT} + ArrayT +end + +# Manual adapt_structure since we have aliasing memory +function Adapt.adapt_structure(to, interfaces::P4estInterfaceContainer) + # Adapt underlying storage + _u = adapt(to, interfaces._u) + _neighbor_ids = adapt(to, interfaces._neighbor_ids) + _node_indices = adapt(to, interfaces._node_indices) + # Wrap arrays again + u = unsafe_wrap_or_alloc(to, _u, size(interfaces.u)) + neighbor_ids = unsafe_wrap_or_alloc(to, _neighbor_ids, + size(interfaces.neighbor_ids)) + node_indices = unsafe_wrap_or_alloc(to, _node_indices, + size(interfaces.node_indices)) + + NDIMS = ndims(interfaces) + new_type_params = (NDIMS, + eltype(_u), + NDIMS + 2, + typeof(u), typeof(neighbor_ids), typeof(node_indices), + typeof(_u), typeof(_neighbor_ids), typeof(_node_indices)) + return P4estInterfaceContainer{new_type_params...}(u, neighbor_ids, node_indices, + _u, _neighbor_ids, _node_indices) +end + +mutable struct P4estBoundaryContainer{NDIMS, uEltype <: Real, NDIMSP1, + uArray <: DenseArray{uEltype, NDIMSP1}, + IdsVector <: DenseVector{Int}, + IndicesVector <: + DenseVector{NTuple{NDIMS, Symbol}}, + uVector <: DenseVector{uEltype}} <: AbstractContainer - u::Array{uEltype, NDIMSP1} # [variables, i, j, boundary] - neighbor_ids::Vector{Int} # [boundary] - node_indices::Vector{NTuple{NDIMS, Symbol}} # [boundary] - name::Vector{Symbol} # [boundary] + u::uArray # [variables, i, j, boundary] + neighbor_ids::IdsVector # [boundary] + node_indices::IndicesVector # [boundary] + name::Vector{Symbol} # [boundary] # internal `resize!`able storage - _u::Vector{uEltype} + _u::uVector end @inline function nboundaries(boundaries::P4estBoundaryContainer) length(boundaries.neighbor_ids) end @inline Base.ndims(::P4estBoundaryContainer{NDIMS}) where {NDIMS} = NDIMS +@inline function Base.eltype(::P4estBoundaryContainer{NDIMS, uEltype}) where {NDIMS, + uEltype} + uEltype +end # See explanation of Base.resize! for the element container function Base.resize!(boundaries::P4estBoundaryContainer, capacity) @@ -228,9 +370,10 @@ function Base.resize!(boundaries::P4estBoundaryContainer, capacity) n_dims = ndims(boundaries) n_nodes = size(boundaries.u, 2) n_variables = size(boundaries.u, 1) + ArrayType = storage_type(boundaries) resize!(_u, n_variables * n_nodes^(n_dims - 1) * capacity) - boundaries.u = unsafe_wrap(Array, pointer(_u), + boundaries.u = unsafe_wrap(ArrayType, pointer(_u), (n_variables, ntuple(_ -> n_nodes, n_dims - 1)..., capacity)) @@ -263,9 +406,11 @@ function init_boundaries(mesh::Union{P4estMesh, P4estMeshView, T8codeMesh}, equa node_indices = Vector{NTuple{NDIMS, Symbol}}(undef, n_boundaries) names = Vector{Symbol}(undef, n_boundaries) - boundaries = P4estBoundaryContainer{NDIMS, uEltype, NDIMS + 1}(u, neighbor_ids, - node_indices, names, - _u) + boundaries = P4estBoundaryContainer{NDIMS, uEltype, NDIMS + 1, typeof(u), + typeof(neighbor_ids), typeof(node_indices), + typeof(_u)}(u, neighbor_ids, + node_indices, names, + _u) if n_boundaries > 0 init_boundaries!(boundaries, mesh) @@ -312,6 +457,25 @@ function init_boundaries_iter_face_inner(info_pw, boundaries, boundary_id, mesh) return nothing end +function Adapt.parent_type(::Type{<:P4estBoundaryContainer{<:Any, <:Any, <:Any, ArrayT}}) where {ArrayT} + ArrayT +end + +# Manual adapt_structure since we have aliasing memory +function Adapt.adapt_structure(to, boundaries::P4estBoundaryContainer) + _u = adapt(to, boundaries._u) + u = unsafe_wrap_or_alloc(to, _u, size(boundaries.u)) + neighbor_ids = adapt(to, boundaries.neighbor_ids) + node_indices = adapt(to, boundaries.node_indices) + name = boundaries.name + + NDIMS = ndims(boundaries) + return P4estBoundaryContainer{NDIMS, eltype(_u), NDIMS + 1, typeof(u), + typeof(neighbor_ids), typeof(node_indices), + typeof(_u)}(u, neighbor_ids, node_indices, + name, _u) +end + # Container data structure (structure-of-arrays style) for DG L2 mortars # # The positions used in `neighbor_ids` are 1:3 (in 2D) or 1:5 (in 3D), where 1:2 (in 2D) @@ -337,20 +501,32 @@ end # │ └─────────────┴─────────────┘ └───────────────────────────┘ # │ # ⋅────> ξ -mutable struct P4estMortarContainer{NDIMS, uEltype <: Real, NDIMSP1, NDIMSP3} <: +mutable struct P4estMortarContainer{NDIMS, uEltype <: Real, NDIMSP1, NDIMSP3, + uArray <: DenseArray{uEltype, NDIMSP3}, + IdsMatrix <: DenseMatrix{Int}, + IndicesMatrix <: + DenseMatrix{NTuple{NDIMS, Symbol}}, + uVector <: DenseVector{uEltype}, + IdsVector <: DenseVector{Int}, + IndicesVector <: + DenseVector{NTuple{NDIMS, Symbol}}} <: AbstractContainer - u::Array{uEltype, NDIMSP3} # [small/large side, variable, position, i, j, mortar] - neighbor_ids::Matrix{Int} # [position, mortar] - node_indices::Matrix{NTuple{NDIMS, Symbol}} # [small/large, mortar] + u::uArray # [small/large side, variable, position, i, j, mortar] + neighbor_ids::IdsMatrix # [position, mortar] + node_indices::IndicesMatrix # [small/large, mortar] # internal `resize!`able storage - _u::Vector{uEltype} - _neighbor_ids::Vector{Int} - _node_indices::Vector{NTuple{NDIMS, Symbol}} + _u::uVector + _neighbor_ids::IdsVector + _node_indices::IndicesVector end @inline nmortars(mortars::P4estMortarContainer) = size(mortars.neighbor_ids, 2) @inline Base.ndims(::P4estMortarContainer{NDIMS}) where {NDIMS} = NDIMS +@inline function Base.eltype(::P4estMortarContainer{NDIMS, uEltype}) where {NDIMS, + uEltype} + uEltype +end # See explanation of Base.resize! for the element container function Base.resize!(mortars::P4estMortarContainer, capacity) @@ -359,18 +535,19 @@ function Base.resize!(mortars::P4estMortarContainer, capacity) n_dims = ndims(mortars) n_nodes = size(mortars.u, 4) n_variables = size(mortars.u, 2) + ArrayType = storage_type(mortars) resize!(_u, 2 * n_variables * 2^(n_dims - 1) * n_nodes^(n_dims - 1) * capacity) - mortars.u = unsafe_wrap(Array, pointer(_u), + mortars.u = unsafe_wrap(ArrayType, pointer(_u), (2, n_variables, 2^(n_dims - 1), ntuple(_ -> n_nodes, n_dims - 1)..., capacity)) resize!(_neighbor_ids, (2^(n_dims - 1) + 1) * capacity) - mortars.neighbor_ids = unsafe_wrap(Array, pointer(_neighbor_ids), + mortars.neighbor_ids = unsafe_wrap(ArrayType, pointer(_neighbor_ids), (2^(n_dims - 1) + 1, capacity)) resize!(_node_indices, 2 * capacity) - mortars.node_indices = unsafe_wrap(Array, pointer(_node_indices), (2, capacity)) + mortars.node_indices = unsafe_wrap(ArrayType, pointer(_node_indices), (2, capacity)) return nothing end @@ -398,12 +575,15 @@ function init_mortars(mesh::Union{P4estMesh, P4estMeshView, T8codeMesh}, equatio _node_indices = Vector{NTuple{NDIMS, Symbol}}(undef, 2 * n_mortars) node_indices = unsafe_wrap(Array, pointer(_node_indices), (2, n_mortars)) - mortars = P4estMortarContainer{NDIMS, uEltype, NDIMS + 1, NDIMS + 3}(u, - neighbor_ids, - node_indices, - _u, - _neighbor_ids, - _node_indices) + mortars = P4estMortarContainer{NDIMS, uEltype, NDIMS + 1, NDIMS + 3, typeof(u), + typeof(neighbor_ids), typeof(node_indices), + typeof(_u), typeof(_neighbor_ids), + typeof(_node_indices)}(u, + neighbor_ids, + node_indices, + _u, + _neighbor_ids, + _node_indices) if n_mortars > 0 init_mortars!(mortars, mesh) @@ -418,6 +598,34 @@ function init_mortars!(mortars, mesh::Union{P4estMesh, P4estMeshView}) return mortars end +function Adapt.parent_type(::Type{<:P4estMortarContainer{<:Any, <:Any, <:Any, <:Any, + ArrayT}}) where {ArrayT} + ArrayT +end + +# Manual adapt_structure since we have aliasing memory +function Adapt.adapt_structure(to, mortars::P4estMortarContainer) + # Adapt underlying storage + _u = adapt(to, mortars._u) + _neighbor_ids = adapt(to, mortars._neighbor_ids) + _node_indices = adapt(to, mortars._node_indices) + + # Wrap arrays again + u = unsafe_wrap_or_alloc(to, _u, size(mortars.u)) + neighbor_ids = unsafe_wrap_or_alloc(to, _neighbor_ids, size(mortars.neighbor_ids)) + node_indices = unsafe_wrap_or_alloc(to, _node_indices, size(mortars.node_indices)) + + NDIMS = ndims(mortars) + new_type_params = (NDIMS, + eltype(_u), + NDIMS + 1, + NDIMS + 3, + typeof(u), typeof(neighbor_ids), typeof(node_indices), + typeof(_u), typeof(_neighbor_ids), typeof(_node_indices)) + return P4estMortarContainer{new_type_params...}(u, neighbor_ids, node_indices, + _u, _neighbor_ids, _node_indices) +end + function reinitialize_containers!(mesh::P4estMesh, equations, dg::DGSEM, cache) # Re-initialize elements container @unpack elements = cache diff --git a/src/solvers/dgsem_p4est/containers_parallel.jl b/src/solvers/dgsem_p4est/containers_parallel.jl index 676b37efff3..4b6fc813703 100644 --- a/src/solvers/dgsem_p4est/containers_parallel.jl +++ b/src/solvers/dgsem_p4est/containers_parallel.jl @@ -5,15 +5,19 @@ @muladd begin #! format: noindent -mutable struct P4estMPIInterfaceContainer{NDIMS, uEltype <: Real, NDIMSP2} <: +mutable struct P4estMPIInterfaceContainer{NDIMS, uEltype <: Real, NDIMSP2, + uArray <: DenseArray{uEltype, NDIMSP2}, + VecInt <: DenseVector{Int}, + IndicesVector <: + DenseVector{NTuple{NDIMS, Symbol}}, + uVector <: DenseVector{uEltype}} <: AbstractContainer - u::Array{uEltype, NDIMSP2} # [primary/secondary, variable, i, j, interface] - local_neighbor_ids::Vector{Int} # [interface] - node_indices::Vector{NTuple{NDIMS, Symbol}} # [interface] - local_sides::Vector{Int} # [interface] - + u::uArray # [primary/secondary, variable, i, j, interface] + local_neighbor_ids::VecInt # [interface] + node_indices::IndicesVector # [interface] + local_sides::VecInt # [interface] # internal `resize!`able storage - _u::Vector{uEltype} + _u::uVector end @inline function nmpiinterfaces(interfaces::P4estMPIInterfaceContainer) @@ -27,9 +31,10 @@ function Base.resize!(mpi_interfaces::P4estMPIInterfaceContainer, capacity) n_dims = ndims(mpi_interfaces) n_nodes = size(mpi_interfaces.u, 3) n_variables = size(mpi_interfaces.u, 2) + ArrayType = storage_type(mpi_interfaces) resize!(_u, 2 * n_variables * n_nodes^(n_dims - 1) * capacity) - mpi_interfaces.u = unsafe_wrap(Array, pointer(_u), + mpi_interfaces.u = unsafe_wrap(ArrayType, pointer(_u), (2, n_variables, ntuple(_ -> n_nodes, n_dims - 1)..., capacity)) @@ -64,11 +69,13 @@ function init_mpi_interfaces(mesh::Union{ParallelP4estMesh, ParallelT8codeMesh}, local_sides = Vector{Int}(undef, n_mpi_interfaces) - mpi_interfaces = P4estMPIInterfaceContainer{NDIMS, uEltype, NDIMS + 2}(u, - local_neighbor_ids, - node_indices, - local_sides, - _u) + mpi_interfaces = P4estMPIInterfaceContainer{NDIMS, uEltype, NDIMS + 2, + typeof(u), typeof(local_neighbor_ids), + typeof(node_indices), typeof(_u)}(u, + local_neighbor_ids, + node_indices, + local_sides, + _u) init_mpi_interfaces!(mpi_interfaces, mesh) @@ -81,6 +88,32 @@ function init_mpi_interfaces!(mpi_interfaces, mesh::ParallelP4estMesh) return mpi_interfaces end +function Adapt.parent_type(::Type{<:Trixi.P4estMPIInterfaceContainer{<:Any, <:Any, + <:Any, A}}) where {A} + return A +end + +# Manual adapt_structure since we have aliasing memory +function Adapt.adapt_structure(to, mpi_interfaces::P4estMPIInterfaceContainer) + # Adapt Vectors and underlying storage + _u = adapt(to, mpi_interfaces._u) + local_neighbor_ids = adapt(to, mpi_interfaces.local_neighbor_ids) + node_indices = adapt(to, mpi_interfaces.node_indices) + local_sides = adapt(to, mpi_interfaces.local_sides) + + # Wrap array again + u = unsafe_wrap_or_alloc(to, _u, size(mpi_interfaces.u)) + + NDIMS = ndims(mpi_interfaces) + return P4estMPIInterfaceContainer{NDIMS, eltype(u), + NDIMS + 2, + typeof(u), typeof(local_neighbor_ids), + typeof(node_indices), typeof(_u)}(u, + local_neighbor_ids, + node_indices, + local_sides, _u) +end + # Container data structure (structure-of-arrays style) for DG L2 mortars # # Similar to `P4estMortarContainer`. The field `neighbor_ids` has been split up into @@ -88,14 +121,17 @@ end # available elements belonging to a particular MPI mortar. Furthermore, `normal_directions` holds # the normal vectors on the surface of the small elements for each mortar. mutable struct P4estMPIMortarContainer{NDIMS, uEltype <: Real, RealT <: Real, NDIMSP1, - NDIMSP2, NDIMSP3} <: AbstractContainer - u::Array{uEltype, NDIMSP3} # [small/large side, variable, position, i, j, mortar] - local_neighbor_ids::Vector{Vector{Int}} # [mortar][ids] - local_neighbor_positions::Vector{Vector{Int}} # [mortar][positions] - node_indices::Matrix{NTuple{NDIMS, Symbol}} # [small/large, mortar] - normal_directions::Array{RealT, NDIMSP2} # [dimension, i, j, position, mortar] + NDIMSP2, NDIMSP3, + uArray <: DenseArray{uEltype, NDIMSP3}, + uVector <: DenseVector{uEltype}} <: + AbstractContainer + u::uArray # [small/large side, variable, position, i, j, mortar] + local_neighbor_ids::Vector{Vector{Int}} # [mortar][ids] + local_neighbor_positions::Vector{Vector{Int}} # [mortar][positions] + node_indices::Matrix{NTuple{NDIMS, Symbol}} # [small/large, mortar] + normal_directions::Array{RealT, NDIMSP2} # [dimension, i, j, position, mortar] # internal `resize!`able storage - _u::Vector{uEltype} + _u::uVector _node_indices::Vector{NTuple{NDIMS, Symbol}} _normal_directions::Vector{RealT} end @@ -164,11 +200,12 @@ function init_mpi_mortars(mesh::Union{ParallelP4estMesh, ParallelT8codeMesh}, eq 2^(NDIMS - 1), n_mpi_mortars)) mpi_mortars = P4estMPIMortarContainer{NDIMS, uEltype, RealT, NDIMS + 1, NDIMS + 2, - NDIMS + 3}(u, local_neighbor_ids, - local_neighbor_positions, - node_indices, normal_directions, - _u, _node_indices, - _normal_directions) + NDIMS + 3, typeof(u), + typeof(_u)}(u, local_neighbor_ids, + local_neighbor_positions, + node_indices, normal_directions, + _u, _node_indices, + _normal_directions) if n_mpi_mortars > 0 init_mpi_mortars!(mpi_mortars, mesh, basis, elements) @@ -184,6 +221,35 @@ function init_mpi_mortars!(mpi_mortars, mesh::ParallelP4estMesh, basis, elements return mpi_mortars end +function Adapt.adapt_structure(to, mpi_mortars::P4estMPIMortarContainer) + # TODO: GPU + # Only parts of this container are adapted, since we currently don't + # use `local_neighbor_ids`, `local_neighbor_positions`, `normal_directions` + # on the GPU. If we do need them we need to redesign this to use the VecOfArrays + # approach. + + _u = adapt(to, mpi_mortars._u) + _node_indices = mpi_mortars._node_indices + _normal_directions = mpi_mortars._normal_directions + + u = unsafe_wrap_or_alloc(to, _u, size(mpi_mortars.u)) + local_neighbor_ids = mpi_mortars.local_neighbor_ids + local_neighbor_positions = mpi_mortars.local_neighbor_positions + node_indices = mpi_mortars.node_indices + normal_directions = mpi_mortars.normal_directions + + NDIMS = ndims(mpi_mortars) + return P4estMPIMortarContainer{NDIMS, eltype(_u), + eltype(_normal_directions), + NDIMS + 1, NDIMS + 2, NDIMS + 3, + typeof(u), typeof(_u)}(u, local_neighbor_ids, + local_neighbor_positions, + node_indices, + normal_directions, _u, + _node_indices, + _normal_directions) +end + # Overload init! function for regular interfaces, regular mortars and boundaries since they must # call the appropriate init_surfaces! function for parallel p4est meshes function init_interfaces!(interfaces, mesh::ParallelP4estMesh) diff --git a/src/solvers/dgsem_p4est/dg_2d.jl b/src/solvers/dgsem_p4est/dg_2d.jl index 066cd363739..95464c8c12c 100644 --- a/src/solvers/dgsem_p4est/dg_2d.jl +++ b/src/solvers/dgsem_p4est/dg_2d.jl @@ -20,9 +20,11 @@ function create_cache(mesh::Union{P4estMesh{2}, P4estMeshView{2}, T8codeMesh{2}} fstar_secondary_lower_threaded = MA2d[MA2d(undef) for _ in 1:Threads.nthreads()] u_threaded = MA2d[MA2d(undef) for _ in 1:Threads.nthreads()] - (; fstar_primary_upper_threaded, fstar_primary_lower_threaded, - fstar_secondary_upper_threaded, fstar_secondary_lower_threaded, - u_threaded) + cache = (; fstar_primary_upper_threaded, fstar_primary_lower_threaded, + fstar_secondary_upper_threaded, fstar_secondary_lower_threaded, + u_threaded) + + return cache end # index_to_start_step_2d(index::Symbol, index_range) @@ -208,6 +210,8 @@ end surface_flux_values[v, primary_node_index, primary_direction_index, primary_element_index] = flux_[v] surface_flux_values[v, secondary_node_index, secondary_direction_index, secondary_element_index] = -flux_[v] end + + return nothing end # Inlined version of the interface flux computation for equations with conservative and nonconservative terms @@ -244,6 +248,8 @@ end 0.5f0 * noncons_secondary[v]) end + + return nothing end function prolong2boundaries!(cache, u, @@ -308,6 +314,8 @@ function calc_boundary_flux!(cache, t, boundary_condition::BC, boundary_indexing j_node += j_node_step end end + + return nothing end # inlined version of the boundary flux calculation along a physical interface @@ -339,6 +347,8 @@ end for v in eachvariable(equations) surface_flux_values[v, node_index, direction_index, element_index] = flux_[v] end + + return nothing end # inlined version of the boundary flux with nonconservative terms calculation along a physical interface @@ -377,6 +387,8 @@ end 0.5f0 * noncons_flux[v] end + + return nothing end function prolong2mortars!(cache, u, @@ -568,6 +580,8 @@ end dg, node_index) set_node_vars!(fstar_secondary[position_index], flux_plus_noncons_secondary, equations, dg, node_index) + + return nothing end @inline function mortar_fluxes_to_elements!(surface_flux_values, diff --git a/src/solvers/dgsem_p4est/dg_2d_parabolic.jl b/src/solvers/dgsem_p4est/dg_2d_parabolic.jl index 7ccdf4ea7a5..34e20f0fd68 100644 --- a/src/solvers/dgsem_p4est/dg_2d_parabolic.jl +++ b/src/solvers/dgsem_p4est/dg_2d_parabolic.jl @@ -190,10 +190,10 @@ function calc_gradient!(gradients, u_transformed, t, # now that the reference coordinate gradients are computed, transform them node-by-node to physical gradients # using the contravariant vectors for j in eachnode(dg), i in eachnode(dg) - Ja11, Ja12 = get_contravariant_vector(1, contravariant_vectors, i, j, - element) - Ja21, Ja22 = get_contravariant_vector(2, contravariant_vectors, i, j, - element) + Ja11, Ja12 = get_contravariant_vector(1, contravariant_vectors, + i, j, element) + Ja21, Ja22 = get_contravariant_vector(2, contravariant_vectors, + i, j, element) gradients_reference_1 = get_node_vars(gradients_x, equations_parabolic, dg, i, j, element) @@ -467,6 +467,8 @@ end surface_flux_values[v, primary_node_index, primary_direction_index, primary_element_index] = flux_[v] surface_flux_values[v, secondary_node_index, secondary_direction_index, secondary_element_index] = flux_[v] end + + return nothing end # This is the version used when calculating the divergence of the viscous fluxes @@ -825,6 +827,8 @@ end # Copy flux to buffer set_node_vars!(fstar_primary[position_index], flux_, equations, dg, node_index) set_node_vars!(fstar_secondary[position_index], flux_, equations, dg, node_index) + + return nothing end # TODO: parabolic, finish implementing `calc_boundary_flux_gradients!` and `calc_boundary_flux_divergence!` @@ -926,7 +930,7 @@ end function calc_boundary_flux_by_type!(cache, t, BCs::Tuple{}, BC_indices::Tuple{}, operator_type, mesh::P4estMesh, equations, surface_integral, dg::DG) - nothing + return nothing end function calc_boundary_flux!(cache, t, @@ -988,6 +992,8 @@ function calc_boundary_flux!(cache, t, j_node += j_node_step end end + + return nothing end function apply_jacobian_parabolic!(du, mesh::P4estMesh{2}, diff --git a/src/solvers/dgsem_p4est/dg_2d_parallel.jl b/src/solvers/dgsem_p4est/dg_2d_parallel.jl index 1491cd50c6c..000ca415828 100644 --- a/src/solvers/dgsem_p4est/dg_2d_parallel.jl +++ b/src/solvers/dgsem_p4est/dg_2d_parallel.jl @@ -131,6 +131,8 @@ end for v in eachvariable(equations) surface_flux_values[v, surface_node_index, local_direction_index, local_element_index] = flux_[v] end + + return nothing end # Inlined version of the interface flux computation for non-conservative equations @@ -164,6 +166,8 @@ end local_direction_index, local_element_index] = flux_[v] + 0.5f0 * noncons_flux_[v] end + + return nothing end function prolong2mpimortars!(cache, u, @@ -308,6 +312,8 @@ end # Copy flux to buffer set_node_vars!(fstar_primary[position_index], flux, equations, dg, node_index) set_node_vars!(fstar_secondary[position_index], flux, equations, dg, node_index) + + return nothing end # Inlined version of the mortar flux computation on small elements for non-conservative equations @@ -336,6 +342,8 @@ end 0.5f0 * noncons_flux_secondary[v] end + + return nothing end @inline function mpi_mortar_fluxes_to_elements!(surface_flux_values, diff --git a/src/solvers/dgsem_p4est/dg_3d.jl b/src/solvers/dgsem_p4est/dg_3d.jl index e59f502c86c..63cf78ddd94 100644 --- a/src/solvers/dgsem_p4est/dg_3d.jl +++ b/src/solvers/dgsem_p4est/dg_3d.jl @@ -13,18 +13,18 @@ function create_cache(mesh::Union{P4estMesh{3}, T8codeMesh{3}}, equations, fstar_primary_threaded = [Array{uEltype, 4}(undef, nvariables(equations), nnodes(mortar_l2), nnodes(mortar_l2), 4) - for _ in 1:Threads.nthreads()] + for _ in 1:Threads.nthreads()] |> VecOfArrays fstar_secondary_threaded = [Array{uEltype, 4}(undef, nvariables(equations), nnodes(mortar_l2), nnodes(mortar_l2), 4) - for _ in 1:Threads.nthreads()] + for _ in 1:Threads.nthreads()] |> VecOfArrays fstar_tmp_threaded = [Array{uEltype, 3}(undef, nvariables(equations), nnodes(mortar_l2), nnodes(mortar_l2)) - for _ in 1:Threads.nthreads()] + for _ in 1:Threads.nthreads()] |> VecOfArrays u_threaded = [Array{uEltype, 3}(undef, nvariables(equations), nnodes(mortar_l2), nnodes(mortar_l2)) - for _ in 1:Threads.nthreads()] + for _ in 1:Threads.nthreads()] |> VecOfArrays (; fstar_primary_threaded, fstar_secondary_threaded, fstar_tmp_threaded, u_threaded) end @@ -274,6 +274,8 @@ end surface_flux_values[v, secondary_i_node_index, secondary_j_node_index, secondary_direction_index, secondary_element_index] = -flux_[v] end + + return nothing end # Inlined function for interface flux computation for flux + nonconservative terms @@ -312,6 +314,8 @@ end 0.5f0 * noncons_secondary[v]) end + + return nothing end function prolong2boundaries!(cache, u, @@ -398,6 +402,8 @@ function calc_boundary_flux!(cache, t, boundary_condition::BC, boundary_indexing k_node += k_node_step_j end end + + return nothing end # inlined version of the boundary flux calculation along a physical interface @@ -432,6 +438,8 @@ end surface_flux_values[v, i_node_index, j_node_index, direction_index, element_index] = flux_[v] end + + return nothing end # inlined version of the boundary flux calculation along a physical interface @@ -472,6 +480,8 @@ end direction_index, element_index] = flux[v] + 0.5f0 * noncons_flux[v] end + + return nothing end function prolong2mortars!(cache, u, @@ -674,6 +684,8 @@ end i_node_index, j_node_index, position_index) set_node_vars!(fstar_secondary, flux, equations, dg, i_node_index, j_node_index, position_index) + + return nothing end # Inlined version of the mortar flux computation on small elements for conservation fluxes @@ -710,6 +722,8 @@ end i_node_index, j_node_index, position_index) + + return nothing end @inline function mortar_fluxes_to_elements!(surface_flux_values, diff --git a/src/solvers/dgsem_p4est/dg_3d_parabolic.jl b/src/solvers/dgsem_p4est/dg_3d_parabolic.jl index 1fe22e42fe7..4f545c8861f 100644 --- a/src/solvers/dgsem_p4est/dg_3d_parabolic.jl +++ b/src/solvers/dgsem_p4est/dg_3d_parabolic.jl @@ -377,6 +377,8 @@ end surface_flux_values[v, primary_i_node_index, primary_j_node_index, primary_direction_index, primary_element_index] = flux_[v] surface_flux_values[v, secondary_i_node_index, secondary_j_node_index, secondary_direction_index, secondary_element_index] = flux_[v] end + + return nothing end # This is the version used when calculating the divergence of the viscous fluxes @@ -400,9 +402,8 @@ function calc_volume_integral!(du, flux_viscous, # Compute the contravariant flux by taking the scalar product of the # first contravariant vector Ja^1 and the flux vector - Ja11, Ja12, Ja13 = get_contravariant_vector(1, contravariant_vectors, i, j, - k, - element) + Ja11, Ja12, Ja13 = get_contravariant_vector(1, contravariant_vectors, + i, j, k, element) contravariant_flux1 = Ja11 * flux1 + Ja12 * flux2 + Ja13 * flux3 for ii in eachnode(dg) multiply_add_to_node_vars!(du, derivative_dhat[ii, i], @@ -412,9 +413,8 @@ function calc_volume_integral!(du, flux_viscous, # Compute the contravariant flux by taking the scalar product of the # second contravariant vector Ja^2 and the flux vector - Ja21, Ja22, Ja23 = get_contravariant_vector(2, contravariant_vectors, i, j, - k, - element) + Ja21, Ja22, Ja23 = get_contravariant_vector(2, contravariant_vectors, + i, j, k, element) contravariant_flux2 = Ja21 * flux1 + Ja22 * flux2 + Ja23 * flux3 for jj in eachnode(dg) multiply_add_to_node_vars!(du, derivative_dhat[jj, j], @@ -424,9 +424,8 @@ function calc_volume_integral!(du, flux_viscous, # Compute the contravariant flux by taking the scalar product of the # second contravariant vector Ja^2 and the flux vector - Ja31, Ja32, Ja33 = get_contravariant_vector(3, contravariant_vectors, i, j, - k, - element) + Ja31, Ja32, Ja33 = get_contravariant_vector(3, contravariant_vectors, + i, j, k, element) contravariant_flux3 = Ja31 * flux1 + Ja32 * flux2 + Ja33 * flux3 for kk in eachnode(dg) multiply_add_to_node_vars!(du, derivative_dhat[kk, k], @@ -865,6 +864,8 @@ end position_index) set_node_vars!(fstar_secondary, flux_, equations, dg, i_node_index, j_node_index, position_index) + + return nothing end # TODO: parabolic, finish implementing `calc_boundary_flux_gradients!` and `calc_boundary_flux_divergence!` @@ -997,6 +998,8 @@ function calc_boundary_flux!(cache, t, k_node += k_node_step_j end end + + return nothing end function apply_jacobian_parabolic!(du, mesh::P4estMesh{3}, diff --git a/src/solvers/dgsem_p4est/dg_3d_parallel.jl b/src/solvers/dgsem_p4est/dg_3d_parallel.jl index aad54f40afb..520bc1c0599 100644 --- a/src/solvers/dgsem_p4est/dg_3d_parallel.jl +++ b/src/solvers/dgsem_p4est/dg_3d_parallel.jl @@ -265,6 +265,8 @@ end surface_flux_values[v, surface_i_node_index, surface_j_node_index, local_direction_index, local_element_index] = flux_[v] end + + return nothing end # Inlined version of the interface flux computation for non-conservative equations @@ -299,6 +301,8 @@ end local_direction_index, local_element_index] = flux_[v] + 0.5f0 * noncons_flux_[v] end + + return nothing end function prolong2mpimortars!(cache, u, @@ -497,6 +501,8 @@ end i_node_index, j_node_index, position_index) set_node_vars!(fstar_secondary, flux, equations, dg, i_node_index, j_node_index, position_index) + + return nothing end # Inlined version of the mortar flux computation on small elements for non-conservative equations @@ -526,6 +532,8 @@ end 0.5f0 * noncons_flux_secondary[v] end + + return nothing end @inline function mpi_mortar_fluxes_to_elements!(surface_flux_values, diff --git a/src/solvers/dgsem_p4est/dg_parallel.jl b/src/solvers/dgsem_p4est/dg_parallel.jl index 2cc201dd1f0..7acddf07b4b 100644 --- a/src/solvers/dgsem_p4est/dg_parallel.jl +++ b/src/solvers/dgsem_p4est/dg_parallel.jl @@ -5,12 +5,13 @@ @muladd begin #! format: noindent -mutable struct P4estMPICache{uEltype} +mutable struct P4estMPICache{BufferType <: DenseVector, + VecInt <: DenseVector{<:Integer}} mpi_neighbor_ranks::Vector{Int} - mpi_neighbor_interfaces::Vector{Vector{Int}} - mpi_neighbor_mortars::Vector{Vector{Int}} - mpi_send_buffers::Vector{Vector{uEltype}} - mpi_recv_buffers::Vector{Vector{uEltype}} + mpi_neighbor_interfaces::VecOfArrays{VecInt} + mpi_neighbor_mortars::VecOfArrays{VecInt} + mpi_send_buffers::VecOfArrays{BufferType} + mpi_recv_buffers::VecOfArrays{BufferType} mpi_send_requests::Vector{MPI.Request} mpi_recv_requests::Vector{MPI.Request} n_elements_by_rank::OffsetArray{Int, 1, Array{Int, 1}} @@ -25,25 +26,29 @@ function P4estMPICache(uEltype) end mpi_neighbor_ranks = Vector{Int}(undef, 0) - mpi_neighbor_interfaces = Vector{Vector{Int}}(undef, 0) - mpi_neighbor_mortars = Vector{Vector{Int}}(undef, 0) - mpi_send_buffers = Vector{Vector{uEltype}}(undef, 0) - mpi_recv_buffers = Vector{Vector{uEltype}}(undef, 0) + mpi_neighbor_interfaces = Vector{Vector{Int}}(undef, 0) |> VecOfArrays + mpi_neighbor_mortars = Vector{Vector{Int}}(undef, 0) |> VecOfArrays + mpi_send_buffers = Vector{Vector{uEltype}}(undef, 0) |> VecOfArrays + mpi_recv_buffers = Vector{Vector{uEltype}}(undef, 0) |> VecOfArrays mpi_send_requests = Vector{MPI.Request}(undef, 0) mpi_recv_requests = Vector{MPI.Request}(undef, 0) n_elements_by_rank = OffsetArray(Vector{Int}(undef, 0), 0:-1) n_elements_global = 0 first_element_global_id = 0 - P4estMPICache{uEltype}(mpi_neighbor_ranks, mpi_neighbor_interfaces, - mpi_neighbor_mortars, - mpi_send_buffers, mpi_recv_buffers, - mpi_send_requests, mpi_recv_requests, - n_elements_by_rank, n_elements_global, - first_element_global_id) + P4estMPICache{Vector{uEltype}, Vector{Int}}(mpi_neighbor_ranks, + mpi_neighbor_interfaces, + mpi_neighbor_mortars, + mpi_send_buffers, mpi_recv_buffers, + mpi_send_requests, mpi_recv_requests, + n_elements_by_rank, n_elements_global, + first_element_global_id) end -@inline Base.eltype(::P4estMPICache{uEltype}) where {uEltype} = uEltype +@inline Base.eltype(::P4estMPICache{BufferType}) where {BufferType} = eltype(BufferType) + +# @eval due to @muladd +@eval Adapt.@adapt_structure(P4estMPICache) ## # Note that the code in `start_mpi_send`/`finish_mpi_receive!` is sensitive to inference on (at least) Julia 1.10. @@ -265,16 +270,16 @@ end function init_mpi_cache!(mpi_cache::P4estMPICache, mesh::ParallelP4estMesh, mpi_interfaces, mpi_mortars, nvars, n_nodes, uEltype) - mpi_neighbor_ranks, mpi_neighbor_interfaces, mpi_neighbor_mortars = init_mpi_neighbor_connectivity(mpi_interfaces, - mpi_mortars, - mesh) + mpi_neighbor_ranks, _mpi_neighbor_interfaces, _mpi_neighbor_mortars = init_mpi_neighbor_connectivity(mpi_interfaces, + mpi_mortars, + mesh) - mpi_send_buffers, mpi_recv_buffers, mpi_send_requests, mpi_recv_requests = init_mpi_data_structures(mpi_neighbor_interfaces, - mpi_neighbor_mortars, - ndims(mesh), - nvars, - n_nodes, - uEltype) + _mpi_send_buffers, _mpi_recv_buffers, mpi_send_requests, mpi_recv_requests = init_mpi_data_structures(_mpi_neighbor_interfaces, + _mpi_neighbor_mortars, + ndims(mesh), + nvars, + n_nodes, + uEltype) # Determine local and total number of elements n_elements_global = Int(mesh.p4est.global_num_quadrants[]) @@ -286,6 +291,11 @@ function init_mpi_cache!(mpi_cache::P4estMPICache, mesh::ParallelP4estMesh, first_element_global_id = Int(mesh.p4est.global_first_quadrant[mpi_rank() + 1]) + 1 @assert n_elements_global==sum(n_elements_by_rank) "error in total number of elements" + mpi_neighbor_interfaces = VecOfArrays(_mpi_neighbor_interfaces) + mpi_neighbor_mortars = VecOfArrays(_mpi_neighbor_mortars) + mpi_send_buffers = VecOfArrays(_mpi_send_buffers) + mpi_recv_buffers = VecOfArrays(_mpi_recv_buffers) + # TODO reuse existing structures @pack! mpi_cache = mpi_neighbor_ranks, mpi_neighbor_interfaces, mpi_neighbor_mortars, diff --git a/src/solvers/dgsem_p4est/subcell_limiters_2d.jl b/src/solvers/dgsem_p4est/subcell_limiters_2d.jl index 06fbb30f03d..d42b63789b1 100644 --- a/src/solvers/dgsem_p4est/subcell_limiters_2d.jl +++ b/src/solvers/dgsem_p4est/subcell_limiters_2d.jl @@ -5,9 +5,9 @@ @muladd begin #! format: noindent -function calc_bounds_twosided_interface!(var_min, var_max, variable, u, t, semi, - mesh::P4estMesh{2}) - _, equations, dg, cache = mesh_equations_solver_cache(semi) +function calc_bounds_twosided_interface!(var_min, var_max, variable, + u, t, semi, mesh::P4estMesh{2}, equations) + _, _, dg, cache = mesh_equations_solver_cache(semi) (; boundary_conditions) = semi (; neighbor_ids, node_indices) = cache.interfaces diff --git a/src/solvers/dgsem_structured/containers.jl b/src/solvers/dgsem_structured/containers.jl index 7b0d275c5b5..c75ef6ded1f 100644 --- a/src/solvers/dgsem_structured/containers.jl +++ b/src/solvers/dgsem_structured/containers.jl @@ -5,19 +5,23 @@ @muladd begin #! format: noindent -struct ElementContainer{NDIMS, RealT <: Real, uEltype <: Real, NDIMSP1, NDIMSP2, - NDIMSP3} +struct ElementContainer{NDIMS, RealT <: Real, uEltype <: Real, + NDIMSP1, NDIMSP2, NDIMSP3} # Physical coordinates at each node - node_coordinates::Array{RealT, NDIMSP2} # [orientation, node_i, node_j, node_k, element] + node_coordinates::Array{RealT, NDIMSP2} # [orientation, node_i, node_j, node_k, element] # ID of neighbor element in negative direction in orientation - left_neighbors::Array{Int, 2} # [orientation, elements] + left_neighbors::Array{Int, 2} # [orientation, elements] + # Jacobian matrix of the transformation # [jacobian_i, jacobian_j, node_i, node_j, node_k, element] where jacobian_i is the first index of the Jacobian matrix,... jacobian_matrix::Array{RealT, NDIMSP3} + # Contravariant vectors, scaled by J, in Kopriva's blue book called Ja^i_n (i index, n dimension) - contravariant_vectors::Array{RealT, NDIMSP3} # [dimension, index, node_i, node_j, node_k, element] + contravariant_vectors::Array{RealT, NDIMSP3} # [dimension, index, node_i, node_j, node_k, element] + # 1/J where J is the Jacobian determinant (determinant of Jacobian matrix) - inverse_jacobian::Array{RealT, NDIMSP1} # [node_i, node_j, node_k, element] + inverse_jacobian::Array{RealT, NDIMSP1} # [node_i, node_j, node_k, element] + # Buffer for calculated surface flux surface_flux_values::Array{uEltype, NDIMSP2} # [variable, i, j, direction, element] end @@ -61,7 +65,7 @@ end function Base.eltype(::ElementContainer{NDIMS, RealT, uEltype}) where {NDIMS, RealT, uEltype} - uEltype + return uEltype end include("containers_1d.jl") diff --git a/src/solvers/dgsem_structured/containers_1d.jl b/src/solvers/dgsem_structured/containers_1d.jl index 1a1bb183cb3..803ed9fd055 100644 --- a/src/solvers/dgsem_structured/containers_1d.jl +++ b/src/solvers/dgsem_structured/containers_1d.jl @@ -44,6 +44,8 @@ function calc_node_coordinates!(node_coordinates, cell_x, mapping, # node_coordinates are the mapped reference node_coordinates node_coordinates[1, i, cell_x] = mapping(cell_x_offset + dx / 2 * nodes[i])[1] end + + return nothing end # Calculate Jacobian matrix of the mapping from the reference element to the element in the physical domain diff --git a/src/solvers/dgsem_structured/containers_2d.jl b/src/solvers/dgsem_structured/containers_2d.jl index 8a0722fc5d5..c0e1a7ba9ab 100644 --- a/src/solvers/dgsem_structured/containers_2d.jl +++ b/src/solvers/dgsem_structured/containers_2d.jl @@ -53,6 +53,8 @@ function calc_node_coordinates!(node_coordinates, element, node_coordinates[:, i, j, element] .= mapping(cell_x_offset + dx / 2 * nodes[i], cell_y_offset + dy / 2 * nodes[j]) end + + return nothing end # Calculate Jacobian matrix of the mapping from the reference element to the element in the physical domain diff --git a/src/solvers/dgsem_structured/containers_3d.jl b/src/solvers/dgsem_structured/containers_3d.jl index 75cc98bf2b7..73a2765aecc 100644 --- a/src/solvers/dgsem_structured/containers_3d.jl +++ b/src/solvers/dgsem_structured/containers_3d.jl @@ -59,6 +59,8 @@ function calc_node_coordinates!(node_coordinates, element, cell_z_offset + dz / 2 * nodes[k]) end + + return nothing end # Calculate Jacobian matrix of the mapping from the reference element to the element in the physical domain diff --git a/src/solvers/dgsem_structured/dg.jl b/src/solvers/dgsem_structured/dg.jl index 1e9bfcc4a2e..b54d4092f0c 100644 --- a/src/solvers/dgsem_structured/dg.jl +++ b/src/solvers/dgsem_structured/dg.jl @@ -39,6 +39,7 @@ end direction, node_indices, surface_node_indices, element) @assert isperiodic(mesh, orientation) + return nothing end @inline function calc_boundary_flux_by_direction!(surface_flux_values, u, t, @@ -52,6 +53,7 @@ end direction, node_indices, surface_node_indices, element) @assert isperiodic(mesh, orientation) + return nothing end @inline function calc_boundary_flux_by_direction!(surface_flux_values, u, t, @@ -89,6 +91,8 @@ end for v in eachvariable(equations) surface_flux_values[v, surface_node_indices..., direction, element] = flux[v] end + + return nothing end @inline function calc_boundary_flux_by_direction!(surface_flux_values, u, t, @@ -129,6 +133,8 @@ end 0.5f0 * noncons_flux[v]) end + + return nothing end @inline function get_inverse_jacobian(inverse_jacobian, diff --git a/src/solvers/dgsem_structured/dg_1d.jl b/src/solvers/dgsem_structured/dg_1d.jl index 4827063559b..ee2832e66a8 100644 --- a/src/solvers/dgsem_structured/dg_1d.jl +++ b/src/solvers/dgsem_structured/dg_1d.jl @@ -75,6 +75,7 @@ function calc_boundary_flux!(cache, u, t, boundary_condition::BoundaryConditionP mesh::StructuredMesh{1}, equations, surface_integral, dg::DG) @assert isperiodic(mesh) + return nothing end function calc_boundary_flux!(cache, u, t, boundary_conditions::NamedTuple, @@ -112,5 +113,7 @@ function calc_boundary_flux!(cache, u, t, boundary_conditions::NamedTuple, for v in eachvariable(equations) surface_flux_values[v, direction, nelements(dg, cache)] = flux[v] end + + return nothing end end # @muladd diff --git a/src/solvers/dgsem_structured/dg_2d.jl b/src/solvers/dgsem_structured/dg_2d.jl index 54349c6bf83..a02a44bf4dd 100644 --- a/src/solvers/dgsem_structured/dg_2d.jl +++ b/src/solvers/dgsem_structured/dg_2d.jl @@ -142,8 +142,8 @@ end for jj in (j + 1):nnodes(dg) u_node_jj = get_node_vars(u, equations, dg, i, jj, element) # pull the contravariant vectors and compute the average - Ja2_node_jj = get_contravariant_vector(2, contravariant_vectors, i, jj, - element) + Ja2_node_jj = get_contravariant_vector(2, contravariant_vectors, + i, jj, element) Ja2_avg = 0.5f0 * (Ja2_node + Ja2_node_jj) # compute the contravariant sharp flux in the direction of the # averaged contravariant vector @@ -154,6 +154,8 @@ end equations, dg, i, jj, element) end end + + return nothing end @inline function flux_differencing_kernel!(du, u, @@ -220,6 +222,8 @@ end multiply_add_to_node_vars!(du, alpha * 0.5f0, integral_contribution, equations, dg, i, j, element) end + + return nothing end # Computing the normal vector for the FV method on curvilinear subcells. @@ -244,8 +248,8 @@ end fstar1_R[:, nnodes(dg) + 1, :] .= zero(eltype(fstar1_R)) for j in eachnode(dg) - normal_direction = get_contravariant_vector(1, contravariant_vectors, 1, j, - element) + normal_direction = get_contravariant_vector(1, contravariant_vectors, + 1, j, element) for i in 2:nnodes(dg) u_ll = get_node_vars(u, equations, dg, i - 1, j, element) @@ -271,8 +275,8 @@ end fstar2_R[:, :, nnodes(dg) + 1] .= zero(eltype(fstar2_R)) for i in eachnode(dg) - normal_direction = get_contravariant_vector(2, contravariant_vectors, i, 1, - element) + normal_direction = get_contravariant_vector(2, contravariant_vectors, + i, 1, element) for j in 2:nnodes(dg) u_ll = get_node_vars(u, equations, dg, i, j - 1, element) @@ -555,6 +559,7 @@ function calc_boundary_flux!(cache, u, t, boundary_condition::BoundaryConditionP mesh::Union{StructuredMesh{2}, StructuredMeshView{2}}, equations, surface_integral, dg::DG) @assert isperiodic(mesh) + return nothing end function calc_boundary_flux!(cache, u, t, boundary_conditions::NamedTuple, @@ -623,6 +628,8 @@ function calc_boundary_flux!(cache, u, t, boundary_conditions::NamedTuple, direction, (i, nnodes(dg)), (i,), element) end end + + return nothing end function apply_jacobian!(du, diff --git a/src/solvers/dgsem_structured/dg_3d.jl b/src/solvers/dgsem_structured/dg_3d.jl index 983a62d4144..aba79f3a5a5 100644 --- a/src/solvers/dgsem_structured/dg_3d.jl +++ b/src/solvers/dgsem_structured/dg_3d.jl @@ -187,6 +187,8 @@ end equations, dg, i, j, kk, element) end end + + return nothing end @inline function flux_differencing_kernel!(du, u, @@ -268,6 +270,8 @@ end multiply_add_to_node_vars!(du, alpha * 0.5f0, integral_contribution, equations, dg, i, j, k, element) end + + return nothing end # Computing the normal vector for the FV method on curvilinear subcells. @@ -691,6 +695,7 @@ function calc_boundary_flux!(cache, u, t, boundary_condition::BoundaryConditionP mesh::StructuredMesh{3}, equations, surface_integral, dg::DG) @assert isperiodic(mesh) + return nothing end function calc_boundary_flux!(cache, u, t, boundary_conditions::NamedTuple, @@ -791,6 +796,8 @@ function calc_boundary_flux!(cache, u, t, boundary_conditions::NamedTuple, element) end end + + return nothing end function apply_jacobian!(du, diff --git a/src/solvers/dgsem_structured/indicators_1d.jl b/src/solvers/dgsem_structured/indicators_1d.jl index a6d518699dd..999a08b7a1e 100644 --- a/src/solvers/dgsem_structured/indicators_1d.jl +++ b/src/solvers/dgsem_structured/indicators_1d.jl @@ -23,5 +23,7 @@ function apply_smoothing!(mesh::StructuredMesh{1}, alpha, alpha_tmp, dg, cache) alpha[element] = max(alpha_tmp[element], 0.5f0 * alpha_tmp[left], alpha[element]) end + + return nothing end end # @muladd diff --git a/src/solvers/dgsem_structured/indicators_2d.jl b/src/solvers/dgsem_structured/indicators_2d.jl index 52d6ac2a955..33a7ca8dbc9 100644 --- a/src/solvers/dgsem_structured/indicators_2d.jl +++ b/src/solvers/dgsem_structured/indicators_2d.jl @@ -28,5 +28,7 @@ function apply_smoothing!(mesh::StructuredMesh{2}, alpha, alpha_tmp, dg, cache) alpha[element] = max(alpha_tmp[element], 0.5f0 * alpha_tmp[lower], alpha[element]) end + + return nothing end end # @muladd diff --git a/src/solvers/dgsem_structured/indicators_3d.jl b/src/solvers/dgsem_structured/indicators_3d.jl index 8b477da2e8f..ff6f955bd1d 100644 --- a/src/solvers/dgsem_structured/indicators_3d.jl +++ b/src/solvers/dgsem_structured/indicators_3d.jl @@ -33,5 +33,7 @@ function apply_smoothing!(mesh::StructuredMesh{3}, alpha, alpha_tmp, dg, cache) alpha[element] = max(alpha_tmp[element], 0.5f0 * alpha_tmp[front], alpha[element]) end + + return nothing end end # @muladd diff --git a/src/solvers/dgsem_structured/subcell_limiters_2d.jl b/src/solvers/dgsem_structured/subcell_limiters_2d.jl index 2e59244b142..4be1993bdbe 100644 --- a/src/solvers/dgsem_structured/subcell_limiters_2d.jl +++ b/src/solvers/dgsem_structured/subcell_limiters_2d.jl @@ -5,9 +5,9 @@ @muladd begin #! format: noindent -function calc_bounds_twosided_interface!(var_min, var_max, variable, u, t, semi, - mesh::StructuredMesh{2}) - _, equations, dg, cache = mesh_equations_solver_cache(semi) +function calc_bounds_twosided_interface!(var_min, var_max, variable, + u, t, semi, mesh::StructuredMesh{2}, equations) + _, _, dg, cache = mesh_equations_solver_cache(semi) (; boundary_conditions) = semi (; contravariant_vectors) = cache.elements @@ -57,7 +57,8 @@ function calc_bounds_twosided_interface!(var_min, var_max, variable, u, t, semi, for cell_y in axes(mesh, 2) element = linear_indices[begin, cell_y] for j in eachnode(dg) - Ja1 = get_contravariant_vector(1, contravariant_vectors, 1, j, element) + Ja1 = get_contravariant_vector(1, contravariant_vectors, + 1, j, element) u_inner = get_node_vars(u, equations, dg, 1, j, element) u_outer = get_boundary_outer_state(u_inner, t, boundary_conditions[1], Ja1, 1, @@ -73,8 +74,8 @@ function calc_bounds_twosided_interface!(var_min, var_max, variable, u, t, semi, for cell_y in axes(mesh, 2) element = linear_indices[end, cell_y] for j in eachnode(dg) - Ja1 = get_contravariant_vector(1, contravariant_vectors, nnodes(dg), j, - element) + Ja1 = get_contravariant_vector(1, contravariant_vectors, + nnodes(dg), j, element) u_inner = get_node_vars(u, equations, dg, nnodes(dg), j, element) u_outer = get_boundary_outer_state(u_inner, t, boundary_conditions[2], Ja1, 2, @@ -94,7 +95,8 @@ function calc_bounds_twosided_interface!(var_min, var_max, variable, u, t, semi, for cell_x in axes(mesh, 1) element = linear_indices[cell_x, begin] for i in eachnode(dg) - Ja2 = get_contravariant_vector(2, contravariant_vectors, i, 1, element) + Ja2 = get_contravariant_vector(2, contravariant_vectors, + i, 1, element) u_inner = get_node_vars(u, equations, dg, i, 1, element) u_outer = get_boundary_outer_state(u_inner, t, boundary_conditions[3], Ja2, 3, @@ -110,8 +112,8 @@ function calc_bounds_twosided_interface!(var_min, var_max, variable, u, t, semi, for cell_x in axes(mesh, 1) element = linear_indices[cell_x, end] for i in eachnode(dg) - Ja2 = get_contravariant_vector(2, contravariant_vectors, i, nnodes(dg), - element) + Ja2 = get_contravariant_vector(2, contravariant_vectors, + i, nnodes(dg), element) u_inner = get_node_vars(u, equations, dg, i, nnodes(dg), element) u_outer = get_boundary_outer_state(u_inner, t, boundary_conditions[4], Ja2, 4, @@ -179,7 +181,8 @@ function calc_bounds_onesided_interface!(var_minmax, minmax, variable, u, t, sem for cell_y in axes(mesh, 2) element = linear_indices[begin, cell_y] for j in eachnode(dg) - Ja1 = get_contravariant_vector(1, contravariant_vectors, 1, j, element) + Ja1 = get_contravariant_vector(1, contravariant_vectors, + 1, j, element) u_inner = get_node_vars(u, equations, dg, 1, j, element) u_outer = get_boundary_outer_state(u_inner, t, boundary_conditions[1], Ja1, 1, @@ -194,8 +197,8 @@ function calc_bounds_onesided_interface!(var_minmax, minmax, variable, u, t, sem for cell_y in axes(mesh, 2) element = linear_indices[end, cell_y] for j in eachnode(dg) - Ja1 = get_contravariant_vector(1, contravariant_vectors, nnodes(dg), j, - element) + Ja1 = get_contravariant_vector(1, contravariant_vectors, + nnodes(dg), j, element) u_inner = get_node_vars(u, equations, dg, nnodes(dg), j, element) u_outer = get_boundary_outer_state(u_inner, t, boundary_conditions[2], Ja1, 2, @@ -214,7 +217,8 @@ function calc_bounds_onesided_interface!(var_minmax, minmax, variable, u, t, sem for cell_x in axes(mesh, 1) element = linear_indices[cell_x, begin] for i in eachnode(dg) - Ja2 = get_contravariant_vector(2, contravariant_vectors, i, 1, element) + Ja2 = get_contravariant_vector(2, contravariant_vectors, + i, 1, element) u_inner = get_node_vars(u, equations, dg, i, 1, element) u_outer = get_boundary_outer_state(u_inner, t, boundary_conditions[3], Ja2, 3, @@ -229,8 +233,8 @@ function calc_bounds_onesided_interface!(var_minmax, minmax, variable, u, t, sem for cell_x in axes(mesh, 1) element = linear_indices[cell_x, end] for i in eachnode(dg) - Ja2 = get_contravariant_vector(2, contravariant_vectors, i, nnodes(dg), - element) + Ja2 = get_contravariant_vector(2, contravariant_vectors, + i, nnodes(dg), element) u_inner = get_node_vars(u, equations, dg, i, nnodes(dg), element) u_outer = get_boundary_outer_state(u_inner, t, boundary_conditions[4], Ja2, 4, diff --git a/src/solvers/dgsem_tree/containers.jl b/src/solvers/dgsem_tree/containers.jl index 3f05daf81d8..68d2805ae45 100644 --- a/src/solvers/dgsem_tree/containers.jl +++ b/src/solvers/dgsem_tree/containers.jl @@ -50,6 +50,8 @@ function reinitialize_containers!(mesh::TreeMesh, equations, dg::DGSEM, cache) init_mpi_cache!(mpi_cache, mesh, elements, mpi_interfaces, mpi_mortars, nvariables(equations), nnodes(dg), eltype(elements)) end + + return nothing end # Dimension-specific implementations diff --git a/src/solvers/dgsem_tree/dg.jl b/src/solvers/dgsem_tree/dg.jl index fd9728168d8..fb7f7f9dbf1 100644 --- a/src/solvers/dgsem_tree/dg.jl +++ b/src/solvers/dgsem_tree/dg.jl @@ -12,7 +12,7 @@ function reset_du!(du, dg, cache) du[.., element] .= zero(eltype(du)) end - return du + return nothing end function volume_jacobian(element, mesh::TreeMesh, cache) diff --git a/src/solvers/dgsem_tree/dg_1d.jl b/src/solvers/dgsem_tree/dg_1d.jl index fc8cd1055f5..659a3babdcc 100644 --- a/src/solvers/dgsem_tree/dg_1d.jl +++ b/src/solvers/dgsem_tree/dg_1d.jl @@ -174,6 +174,8 @@ function calc_volume_integral!(du, u, equations, volume_integral.volume_flux, dg, cache) end + + return nothing end @inline function flux_differencing_kernel!(du, u, @@ -424,6 +426,8 @@ function calc_interface_flux!(surface_flux_values, surface_flux_values[v, right_direction, right_id] = flux[v] end end + + return nothing end function calc_interface_flux!(surface_flux_values, @@ -496,6 +500,8 @@ end function calc_boundary_flux!(cache, t, boundary_condition::BoundaryConditionPeriodic, mesh::TreeMesh{1}, equations, surface_integral, dg::DG) @assert isempty(eachboundary(dg, cache)) + + return nothing end function calc_boundary_flux!(cache, t, boundary_conditions::NamedTuple, @@ -516,6 +522,8 @@ function calc_boundary_flux!(cache, t, boundary_conditions::NamedTuple, have_nonconservative_terms(equations), equations, surface_integral, dg, cache, 2, firsts[2], lasts[2]) + + return nothing end function calc_boundary_flux_by_direction!(surface_flux_values::AbstractArray{<:Any, 3}, diff --git a/src/solvers/dgsem_tree/dg_1d_parabolic.jl b/src/solvers/dgsem_tree/dg_1d_parabolic.jl index d0e873cde1e..06a6a4488ec 100644 --- a/src/solvers/dgsem_tree/dg_1d_parabolic.jl +++ b/src/solvers/dgsem_tree/dg_1d_parabolic.jl @@ -119,6 +119,8 @@ function transform_variables!(u_transformed, u, mesh::TreeMesh{1}, i, element) end end + + return nothing end # This is the version used when calculating the divergence of the viscous fluxes @@ -249,6 +251,8 @@ function calc_viscous_fluxes!(flux_viscous, gradients, u_transformed, mesh::Tree i, element) end end + + return nothing end function calc_boundary_flux_gradients!(cache, t, @@ -290,6 +294,8 @@ function calc_boundary_flux_gradients!(cache, t, equations_parabolic, surface_integral, dg, cache, 2, firsts[2], lasts[2]) + + return nothing end function calc_boundary_flux_by_direction_gradient!(surface_flux_values::AbstractArray{<:Any, @@ -357,7 +363,10 @@ function calc_boundary_flux_divergence!(cache, t, equations_parabolic, surface_integral, dg, cache, 2, firsts[2], lasts[2]) + + return nothing end + function calc_boundary_flux_by_direction_divergence!(surface_flux_values::AbstractArray{<:Any, 3}, t, @@ -433,6 +442,8 @@ function calc_gradient_interface_flux!(surface_flux_values, surface_flux_values[v, right_direction, right_id] = flux[v] end end + + return nothing end # Calculate the gradient of the transformed variables diff --git a/src/solvers/dgsem_tree/dg_2d.jl b/src/solvers/dgsem_tree/dg_2d.jl index acd1df6c8e2..8b30219d29b 100644 --- a/src/solvers/dgsem_tree/dg_2d.jl +++ b/src/solvers/dgsem_tree/dg_2d.jl @@ -89,11 +89,11 @@ end # The methods below are specialized on the mortar type # and called from the basic `create_cache` method at the top. -function create_cache(mesh::Union{TreeMesh{2}, StructuredMesh{2}, UnstructuredMesh2D, - P4estMesh{2}, P4estMeshView{2}, T8codeMesh{2}}, - equations, mortar_l2::LobattoLegendreMortarL2, uEltype) +function create_cache(mesh::TreeMesh{2}, equations, + mortar_l2::LobattoLegendreMortarL2, uEltype) # TODO: Taal performance using different types - MA2d = MArray{Tuple{nvariables(equations), nnodes(mortar_l2)}, uEltype, 2, + MA2d = MArray{Tuple{nvariables(equations), nnodes(mortar_l2)}, + uEltype, 2, nvariables(equations) * nnodes(mortar_l2)} fstar_primary_upper_threaded = MA2d[MA2d(undef) for _ in 1:Threads.nthreads()] fstar_primary_lower_threaded = MA2d[MA2d(undef) for _ in 1:Threads.nthreads()] @@ -104,8 +104,10 @@ function create_cache(mesh::Union{TreeMesh{2}, StructuredMesh{2}, UnstructuredMe # fstar_upper_threaded = [A2d(undef, nvariables(equations), nnodes(mortar_l2)) for _ in 1:Threads.nthreads()] # fstar_lower_threaded = [A2d(undef, nvariables(equations), nnodes(mortar_l2)) for _ in 1:Threads.nthreads()] - (; fstar_primary_upper_threaded, fstar_primary_lower_threaded, - fstar_secondary_upper_threaded, fstar_secondary_lower_threaded) + cache = (; fstar_primary_upper_threaded, fstar_primary_lower_threaded, + fstar_secondary_upper_threaded, fstar_secondary_lower_threaded) + + return cache end # TODO: Taal discuss/refactor timer, allowing users to pass a custom timer? @@ -718,6 +720,8 @@ function calc_boundary_flux!(cache, t, boundary_conditions::NamedTuple, have_nonconservative_terms(equations), equations, surface_integral, dg, cache, 4, firsts[4], lasts[4]) + + return nothing end function calc_boundary_flux_by_direction!(surface_flux_values::AbstractArray{<:Any, 4}, diff --git a/src/solvers/dgsem_tree/dg_2d_parabolic.jl b/src/solvers/dgsem_tree/dg_2d_parabolic.jl index a6477bbb886..232e13de88b 100644 --- a/src/solvers/dgsem_tree/dg_2d_parabolic.jl +++ b/src/solvers/dgsem_tree/dg_2d_parabolic.jl @@ -132,6 +132,8 @@ function transform_variables!(u_transformed, u, mesh::Union{TreeMesh{2}, P4estMe i, j, element) end end + + return nothing end # This is the version used when calculating the divergence of the viscous fluxes @@ -325,6 +327,8 @@ function calc_viscous_fluxes!(flux_viscous, i, j, element) end end + + return nothing end # TODO: parabolic; decide if we should keep this, and if so, extend to 3D. @@ -388,7 +392,10 @@ function calc_boundary_flux_gradients!(cache, t, equations_parabolic, surface_integral, dg, cache, 4, firsts[4], lasts[4]) + + return nothing end + function calc_boundary_flux_by_direction_gradient!(surface_flux_values::AbstractArray{<:Any, 4}, t, @@ -466,6 +473,8 @@ function calc_boundary_flux_divergence!(cache, t, equations_parabolic, surface_integral, dg, cache, 4, firsts[4], lasts[4]) + + return nothing end function calc_boundary_flux_by_direction_divergence!(surface_flux_values::AbstractArray{<:Any, 4}, @@ -782,6 +791,8 @@ function calc_gradient_interface_flux!(surface_flux_values, end end end + + return nothing end # Calculate the gradient of the transformed variables diff --git a/src/solvers/dgsem_tree/dg_2d_subcell_limiters.jl b/src/solvers/dgsem_tree/dg_2d_subcell_limiters.jl index 83c403bceae..03d0cfd33a1 100644 --- a/src/solvers/dgsem_tree/dg_2d_subcell_limiters.jl +++ b/src/solvers/dgsem_tree/dg_2d_subcell_limiters.jl @@ -73,6 +73,8 @@ function calc_volume_integral!(du, u, volume_integral, limiter, dg, cache) end + + return nothing end @inline function subcell_limiting_kernel!(du, u, element, diff --git a/src/solvers/dgsem_tree/dg_3d.jl b/src/solvers/dgsem_tree/dg_3d.jl index 56c67192008..7c8f5e0749c 100644 --- a/src/solvers/dgsem_tree/dg_3d.jl +++ b/src/solvers/dgsem_tree/dg_3d.jl @@ -110,9 +110,8 @@ end # The methods below are specialized on the mortar type # and called from the basic `create_cache` method at the top. -function create_cache(mesh::Union{TreeMesh{3}, StructuredMesh{3}, P4estMesh{3}, - T8codeMesh{3}}, - equations, mortar_l2::LobattoLegendreMortarL2, uEltype) +function create_cache(mesh::TreeMesh{3}, equations, + mortar_l2::LobattoLegendreMortarL2, uEltype) # TODO: Taal compare performance of different types A3d = Array{uEltype, 3} fstar_primary_upper_left_threaded = A3d[A3d(undef, nvariables(equations), @@ -149,11 +148,13 @@ function create_cache(mesh::Union{TreeMesh{3}, StructuredMesh{3}, P4estMesh{3}, nnodes(mortar_l2)) for _ in 1:Threads.nthreads()] - (; fstar_primary_upper_left_threaded, fstar_primary_upper_right_threaded, - fstar_primary_lower_left_threaded, fstar_primary_lower_right_threaded, - fstar_secondary_upper_left_threaded, fstar_secondary_upper_right_threaded, - fstar_secondary_lower_left_threaded, fstar_secondary_lower_right_threaded, - fstar_tmp1_threaded) + cache = (; fstar_primary_upper_left_threaded, fstar_primary_upper_right_threaded, + fstar_primary_lower_left_threaded, fstar_primary_lower_right_threaded, + fstar_secondary_upper_left_threaded, fstar_secondary_upper_right_threaded, + fstar_secondary_lower_left_threaded, fstar_secondary_lower_right_threaded, + fstar_tmp1_threaded) + + return cache end # TODO: Taal discuss/refactor timer, allowing users to pass a custom timer? @@ -292,6 +293,8 @@ function calc_volume_integral!(du, u, nonconservative_terms, equations, volume_integral.volume_flux, dg, cache) end + + return nothing end @inline function flux_differencing_kernel!(du, u, @@ -341,6 +344,8 @@ end equations, dg, i, j, kk, element) end end + + return nothing end @inline function flux_differencing_kernel!(du, u, @@ -392,6 +397,8 @@ end multiply_add_to_node_vars!(du, alpha * 0.5f0, integral_contribution, equations, dg, i, j, k, element) end + + return nothing end # TODO: Taal dimension agnostic @@ -688,6 +695,8 @@ function calc_interface_flux!(surface_flux_values, end end end + + return nothing end function calc_interface_flux!(surface_flux_values, @@ -793,6 +802,7 @@ end function calc_boundary_flux!(cache, t, boundary_condition::BoundaryConditionPeriodic, mesh::TreeMesh{3}, equations, surface_integral, dg::DG) @assert isempty(eachboundary(dg, cache)) + return nothing end function calc_boundary_flux!(cache, t, boundary_conditions::NamedTuple, @@ -823,6 +833,8 @@ function calc_boundary_flux!(cache, t, boundary_conditions::NamedTuple, calc_boundary_flux_by_direction!(surface_flux_values, t, boundary_conditions[6], equations, surface_integral, dg, cache, 6, firsts[6], lasts[6]) + + return nothing end function calc_boundary_flux_by_direction!(surface_flux_values::AbstractArray{<:Any, 5}, diff --git a/src/solvers/dgsem_tree/dg_3d_parabolic.jl b/src/solvers/dgsem_tree/dg_3d_parabolic.jl index db61500a613..a39d704199d 100644 --- a/src/solvers/dgsem_tree/dg_3d_parabolic.jl +++ b/src/solvers/dgsem_tree/dg_3d_parabolic.jl @@ -22,6 +22,8 @@ function transform_variables!(u_transformed, u, mesh::Union{TreeMesh{3}, P4estMe i, j, k, element) end end + + return nothing end # This is the version used when calculating the divergence of the viscous fluxes @@ -273,6 +275,8 @@ function calc_viscous_fluxes!(flux_viscous, i, j, k, element) end end + + return nothing end # TODO: parabolic; decide if we should keep this. @@ -348,6 +352,8 @@ function calc_boundary_flux_gradients!(cache, t, equations_parabolic, surface_integral, dg, cache, 6, firsts[6], lasts[6]) + + return nothing end function calc_boundary_flux_by_direction_gradient!(surface_flux_values::AbstractArray{<:Any, @@ -439,7 +445,10 @@ function calc_boundary_flux_divergence!(cache, t, equations_parabolic, surface_integral, dg, cache, 6, firsts[6], lasts[6]) + + return nothing end + function calc_boundary_flux_by_direction_divergence!(surface_flux_values::AbstractArray{<:Any, 5}, t, @@ -914,6 +923,8 @@ function calc_gradient_interface_flux!(surface_flux_values, end end end + + return nothing end # Calculate the gradient of the transformed variables diff --git a/src/solvers/dgsem_tree/indicators_1d.jl b/src/solvers/dgsem_tree/indicators_1d.jl index bdc0c265220..8dedca2b112 100644 --- a/src/solvers/dgsem_tree/indicators_1d.jl +++ b/src/solvers/dgsem_tree/indicators_1d.jl @@ -106,6 +106,8 @@ function apply_smoothing!(mesh::TreeMesh{1}, alpha, alpha_tmp, dg, cache) alpha[left] = max(alpha_tmp[left], 0.5f0 * alpha_tmp[right], alpha[left]) alpha[right] = max(alpha_tmp[right], 0.5f0 * alpha_tmp[left], alpha[right]) end + + return nothing end # this method is used when the indicator is constructed as for shock-capturing volume integrals diff --git a/src/solvers/dgsem_tree/indicators_2d.jl b/src/solvers/dgsem_tree/indicators_2d.jl index 5fb3098c050..deaeff557c2 100644 --- a/src/solvers/dgsem_tree/indicators_2d.jl +++ b/src/solvers/dgsem_tree/indicators_2d.jl @@ -129,7 +129,7 @@ function apply_smoothing!(mesh::Union{TreeMesh{2}, P4estMesh{2}, T8codeMesh{2}}, alpha[large] = max(alpha_tmp[large], 0.5f0 * alpha_tmp[upper], alpha[large]) end - return alpha + return nothing end # this method is used when the indicator is constructed as for shock-capturing volume integrals diff --git a/src/solvers/dgsem_tree/indicators_3d.jl b/src/solvers/dgsem_tree/indicators_3d.jl index 8fa8b9e3c03..3fd228f9883 100644 --- a/src/solvers/dgsem_tree/indicators_3d.jl +++ b/src/solvers/dgsem_tree/indicators_3d.jl @@ -148,6 +148,8 @@ function apply_smoothing!(mesh::Union{TreeMesh{3}, P4estMesh{3}, T8codeMesh{3}}, alpha[large] = max(alpha_tmp[large], 0.5f0 * alpha_tmp[upper_right], alpha[large]) end + + return nothing end # this method is used when the indicator is constructed as for shock-capturing volume integrals diff --git a/src/solvers/dgsem_tree/subcell_limiters_2d.jl b/src/solvers/dgsem_tree/subcell_limiters_2d.jl index 5a2c3ed9f8f..539405f0a1e 100644 --- a/src/solvers/dgsem_tree/subcell_limiters_2d.jl +++ b/src/solvers/dgsem_tree/subcell_limiters_2d.jl @@ -30,8 +30,9 @@ function create_cache(limiter::Type{SubcellLimiterIDP}, equations::AbstractEquat idp_bounds_delta_global) end -function (limiter::SubcellLimiterIDP)(u::AbstractArray{<:Any, 4}, semi, dg::DGSEM, t, - dt; +function (limiter::SubcellLimiterIDP)(u::AbstractArray{<:Any, 4}, + semi, equations, dg::DGSEM, + t, dt; kwargs...) @unpack alpha = limiter.cache.subcell_limiter_coefficients # TODO: Do not abuse `reset_du!` but maybe implement a generic `set_zero!` @@ -70,8 +71,9 @@ end ############################################################################### # Calculation of local bounds using low-order FV solution -@inline function calc_bounds_twosided!(var_min, var_max, variable, u, t, semi) - mesh, equations, dg, cache = mesh_equations_solver_cache(semi) +@inline function calc_bounds_twosided!(var_min, var_max, variable, + u, t, semi, equations) + mesh, _, dg, cache = mesh_equations_solver_cache(semi) # Calc bounds inside elements @threaded for element in eachelement(dg, cache) var_min[:, :, element] .= typemax(eltype(var_min)) @@ -102,12 +104,15 @@ end end # Values at element boundary - calc_bounds_twosided_interface!(var_min, var_max, variable, u, t, semi, mesh) + calc_bounds_twosided_interface!(var_min, var_max, variable, + u, t, semi, mesh, equations) + return nothing end -@inline function calc_bounds_twosided_interface!(var_min, var_max, variable, u, t, semi, - mesh::TreeMesh2D) - _, equations, dg, cache = mesh_equations_solver_cache(semi) +@inline function calc_bounds_twosided_interface!(var_min, var_max, variable, + u, t, semi, mesh::TreeMesh2D, + equations) + _, _, dg, cache = mesh_equations_solver_cache(semi) (; boundary_conditions) = semi # Calc bounds at interfaces and periodic boundaries for interface in eachinterface(dg, cache) @@ -210,6 +215,8 @@ end # Values at element boundary calc_bounds_onesided_interface!(var_minmax, min_or_max, variable, u, t, semi, mesh) + + return nothing end @inline function calc_bounds_onesided_interface!(var_minmax, min_or_max, variable, u, t, @@ -289,7 +296,7 @@ end end @inline function idp_local_twosided!(alpha, limiter, u, t, dt, semi, variable) - mesh, _, dg, cache = mesh_equations_solver_cache(semi) + mesh, equations, dg, cache = mesh_equations_solver_cache(semi) (; antidiffusive_flux1_L, antidiffusive_flux2_L, antidiffusive_flux1_R, antidiffusive_flux2_R) = cache.antidiffusive_fluxes (; inverse_weights) = dg.basis @@ -297,7 +304,7 @@ end variable_string = string(variable) var_min = variable_bounds[Symbol(variable_string, "_min")] var_max = variable_bounds[Symbol(variable_string, "_max")] - calc_bounds_twosided!(var_min, var_max, variable, u, t, semi) + calc_bounds_twosided!(var_min, var_max, variable, u, t, semi, equations) @threaded for element in eachelement(dg, semi.cache) for j in eachnode(dg), i in eachnode(dg) diff --git a/src/solvers/dgsem_unstructured/containers_2d.jl b/src/solvers/dgsem_unstructured/containers_2d.jl index f51dd09801b..eec5df5f9de 100644 --- a/src/solvers/dgsem_unstructured/containers_2d.jl +++ b/src/solvers/dgsem_unstructured/containers_2d.jl @@ -7,11 +7,11 @@ # Container data structure (structure-of-arrays style) for DG elements on curved unstructured mesh struct UnstructuredElementContainer2D{RealT <: Real, uEltype <: Real} - node_coordinates::Array{RealT, 4} # [ndims, nnodes, nnodes, nelement] - jacobian_matrix::Array{RealT, 5} # [ndims, ndims, nnodes, nnodes, nelement] - inverse_jacobian::Array{RealT, 3} # [nnodes, nnodes, nelement] - contravariant_vectors::Array{RealT, 5} # [ndims, ndims, nnodes, nnodes, nelement] - normal_directions::Array{RealT, 4} # [ndims, nnodes, local sides, nelement] + node_coordinates::Array{RealT, 4} # [ndims, nnodes, nnodes, nelement] + jacobian_matrix::Array{RealT, 5} # [ndims, ndims, nnodes, nnodes, nelement] + inverse_jacobian::Array{RealT, 3} # [nnodes, nnodes, nelement] + contravariant_vectors::Array{RealT, 5} # [ndims, ndims, nnodes, nnodes, nelement] + normal_directions::Array{RealT, 4} # [ndims, nnodes, local sides, nelement] surface_flux_values::Array{uEltype, 4} # [variables, nnodes, local sides, elements] end diff --git a/src/solvers/dgsem_unstructured/dg_2d.jl b/src/solvers/dgsem_unstructured/dg_2d.jl index 208848eec62..4f90ba11a46 100644 --- a/src/solvers/dgsem_unstructured/dg_2d.jl +++ b/src/solvers/dgsem_unstructured/dg_2d.jl @@ -384,6 +384,8 @@ function calc_boundary_flux!(cache, t, boundary_condition::BC, boundary_indexing node, side, element, boundary) end end + + return nothing end # inlined version of the boundary flux calculation along a physical interface where the @@ -414,6 +416,8 @@ end for v in eachvariable(equations) surface_flux_values[v, node_index, side_index, element_index] = flux[v] end + + return nothing end # inlined version of the boundary flux and nonconseravtive terms calculation along a @@ -454,6 +458,8 @@ end 0.5f0 * noncons_flux[v] end + + return nothing end # Note! The local side numbering for the unstructured quadrilateral element implementation differs diff --git a/src/solvers/dgsem_unstructured/indicators_2d.jl b/src/solvers/dgsem_unstructured/indicators_2d.jl index e331cb5ee71..07a9a809c2e 100644 --- a/src/solvers/dgsem_unstructured/indicators_2d.jl +++ b/src/solvers/dgsem_unstructured/indicators_2d.jl @@ -20,5 +20,7 @@ function apply_smoothing!(mesh::UnstructuredMesh2D, alpha, alpha_tmp, dg, cache) alpha[left] = max(alpha_tmp[left], 0.5f0 * alpha_tmp[right], alpha[left]) alpha[right] = max(alpha_tmp[right], 0.5f0 * alpha_tmp[left], alpha[right]) end + + return nothing end end # @muladd diff --git a/src/solvers/dgsem_unstructured/sort_boundary_conditions.jl b/src/solvers/dgsem_unstructured/sort_boundary_conditions.jl index 0cb3bd7f409..d6cf6e1ce6d 100644 --- a/src/solvers/dgsem_unstructured/sort_boundary_conditions.jl +++ b/src/solvers/dgsem_unstructured/sort_boundary_conditions.jl @@ -13,9 +13,10 @@ It stores a set of global indices for each boundary condition type and name to e during the call to `calc_boundary_flux!`. The original dictionary form of the boundary conditions set by the user in the elixir file is also stored for printing. """ -mutable struct UnstructuredSortedBoundaryTypes{N, BCs <: NTuple{N, Any}} +mutable struct UnstructuredSortedBoundaryTypes{N, BCs <: NTuple{N, Any}, + Vec <: AbstractVector{<:Integer}} boundary_condition_types::BCs # specific boundary condition type(s), e.g. BoundaryConditionDirichlet - boundary_indices::NTuple{N, Vector{Int}} # integer vectors containing global boundary indices + boundary_indices::NTuple{N, Vec} # integer vectors containing global boundary indices boundary_dictionary::Dict{Symbol, Any} # boundary conditions as set by the user in the elixir file boundary_symbol_indices::Dict{Symbol, Vector{Int}} # integer vectors containing global boundary indices per boundary identifier end @@ -33,10 +34,11 @@ function UnstructuredSortedBoundaryTypes(boundary_conditions::Dict, cache) boundary_symbol_indices = Dict{Symbol, Vector{Int}}() container = UnstructuredSortedBoundaryTypes{n_boundary_types, - typeof(boundary_condition_types)}(boundary_condition_types, - boundary_indices, - boundary_conditions, - boundary_symbol_indices) + typeof(boundary_condition_types), + Vector{Int}}(boundary_condition_types, + boundary_indices, + boundary_conditions, + boundary_symbol_indices) initialize!(container, cache) end @@ -119,4 +121,7 @@ function initialize!(boundary_types_container::UnstructuredSortedBoundaryTypes{N return boundary_types_container end + +# @eval due to @muladd +@eval Adapt.@adapt_structure(UnstructuredSortedBoundaryTypes) end # @muladd diff --git a/src/time_integration/methods_2N.jl b/src/time_integration/methods_2N.jl index 40b6e65cca6..d115aa366d1 100644 --- a/src/time_integration/methods_2N.jl +++ b/src/time_integration/methods_2N.jl @@ -6,7 +6,7 @@ #! format: noindent # Abstract base type for time integration schemes of storage class `2N` -abstract type SimpleAlgorithm2N end +abstract type SimpleAlgorithm2N <: AbstractTimeIntegrationAlgorithm end """ CarpenterKennedy2N54() @@ -75,7 +75,7 @@ struct CarpenterKennedy2N43 <: SimpleAlgorithm2N end # This struct is needed to fake https://github.com/SciML/OrdinaryDiffEq.jl/blob/0c2048a502101647ac35faabd80da8a5645beac7/src/integrators/type.jl#L1 -mutable struct SimpleIntegrator2NOptions{Callback} +mutable struct SimpleIntegratorOptions{Callback} callback::Callback # callbacks; used in Trixi.jl adaptive::Bool # whether the algorithm is adaptive; ignored dtmax::Float64 # ignored @@ -83,17 +83,18 @@ mutable struct SimpleIntegrator2NOptions{Callback} tstops::Vector{Float64} # tstops from https://diffeq.sciml.ai/v6.8/basics/common_solver_opts/#Output-Control-1; ignored end -function SimpleIntegrator2NOptions(callback, tspan; maxiters = typemax(Int), kwargs...) - SimpleIntegrator2NOptions{typeof(callback)}(callback, false, Inf, maxiters, - [last(tspan)]) +function SimpleIntegratorOptions(callback, tspan; maxiters = typemax(Int), kwargs...) + SimpleIntegratorOptions{typeof(callback)}(callback, false, Inf, maxiters, + [last(tspan)]) end # This struct is needed to fake https://github.com/SciML/OrdinaryDiffEq.jl/blob/0c2048a502101647ac35faabd80da8a5645beac7/src/integrators/type.jl#L77 # This implements the interface components described at # https://diffeq.sciml.ai/v6.8/basics/integrator/#Handing-Integrators-1 # which are used in Trixi.jl. -mutable struct SimpleIntegrator2N{RealT <: Real, uType, Params, Sol, F, Alg, - SimpleIntegrator2NOptions} <: AbstractTimeIntegrator +mutable struct SimpleIntegrator2N{RealT <: Real, uType <: AbstractVector, + Params, Sol, F, Alg, + SimpleIntegratorOptions} <: AbstractTimeIntegrator u::uType du::uType u_tmp::uType @@ -105,19 +106,10 @@ mutable struct SimpleIntegrator2N{RealT <: Real, uType, Params, Sol, F, Alg, sol::Sol # faked f::F # `rhs!` of the semidiscretization alg::Alg # SimpleAlgorithm2N - opts::SimpleIntegrator2NOptions + opts::SimpleIntegratorOptions finalstep::Bool # added for convenience end -# Forward integrator.stats.naccept to integrator.iter (see GitHub PR#771) -function Base.getproperty(integrator::SimpleIntegrator2N, field::Symbol) - if field === :stats - return (naccept = getfield(integrator, :iter),) - end - # general fallback - return getfield(integrator, field) -end - function init(ode::ODEProblem, alg::SimpleAlgorithm2N; dt, callback::Union{CallbackSet, Nothing} = nothing, kwargs...) u = copy(ode.u0) @@ -127,47 +119,14 @@ function init(ode::ODEProblem, alg::SimpleAlgorithm2N; iter = 0 integrator = SimpleIntegrator2N(u, du, u_tmp, t, dt, zero(dt), iter, ode.p, (prob = ode,), ode.f, alg, - SimpleIntegrator2NOptions(callback, ode.tspan; - kwargs...), false) + SimpleIntegratorOptions(callback, ode.tspan; + kwargs...), false) - # initialize callbacks - if callback isa CallbackSet - foreach(callback.continuous_callbacks) do cb - throw(ArgumentError("Continuous callbacks are unsupported with the 2N storage time integration methods.")) - end - foreach(callback.discrete_callbacks) do cb - cb.initialize(cb, integrator.u, integrator.t, integrator) - end - end + initialize_callbacks!(callback, integrator) return integrator end -# Fakes `solve`: https://diffeq.sciml.ai/v6.8/basics/overview/#Solving-the-Problems-1 -function solve(ode::ODEProblem, alg::SimpleAlgorithm2N; - dt, callback = nothing, kwargs...) - integrator = init(ode, alg, dt = dt, callback = callback; kwargs...) - - # Start actual solve - solve!(integrator) -end - -function solve!(integrator::SimpleIntegrator2N) - @unpack prob = integrator.sol - - integrator.finalstep = false - - @trixi_timeit timer() "main loop" while !integrator.finalstep - step!(integrator) - end # "main loop" timer - - finalize_callbacks(integrator) - - return TimeIntegratorSolution((first(prob.tspan), integrator.t), - (prob.u0, integrator.u), - integrator.sol.prob) -end - function step!(integrator::SimpleIntegrator2N) @unpack prob = integrator.sol @unpack alg = integrator @@ -179,12 +138,7 @@ function step!(integrator::SimpleIntegrator2N) error("time step size `dt` is NaN") end - # if the next iteration would push the simulation beyond the end time, set dt accordingly - if integrator.t + integrator.dt > t_end || - isapprox(integrator.t + integrator.dt, t_end) - integrator.dt = t_end - integrator.t - terminate!(integrator) - end + limit_dt!(integrator, t_end) # one time step integrator.u_tmp .= 0 @@ -205,46 +159,25 @@ function step!(integrator::SimpleIntegrator2N) integrator.iter += 1 integrator.t += integrator.dt - @trixi_timeit timer() "Step-Callbacks" begin - # handle callbacks - if callbacks isa CallbackSet - foreach(callbacks.discrete_callbacks) do cb - if cb.condition(integrator.u, integrator.t, integrator) - cb.affect!(integrator) - end - return nothing - end - end - end + @trixi_timeit timer() "Step-Callbacks" handle_callbacks!(callbacks, integrator) - # respect maximum number of iterations - if integrator.iter >= integrator.opts.maxiters && !integrator.finalstep - @warn "Interrupted. Larger maxiters is needed." - terminate!(integrator) - end + check_max_iter!(integrator) + + return nothing end # get a cache where the RHS can be stored -get_du(integrator::SimpleIntegrator2N) = integrator.du get_tmp_cache(integrator::SimpleIntegrator2N) = (integrator.u_tmp,) # some algorithms from DiffEq like FSAL-ones need to be informed when a callback has modified u u_modified!(integrator::SimpleIntegrator2N, ::Bool) = false -# used by adaptive timestepping algorithms in DiffEq -function set_proposed_dt!(integrator::SimpleIntegrator2N, dt) - integrator.dt = dt -end - -# Required e.g. for `glm_speed_callback` -function get_proposed_dt(integrator::SimpleIntegrator2N) - return integrator.dt -end - # stop the time integration function terminate!(integrator::SimpleIntegrator2N) integrator.finalstep = true empty!(integrator.opts.tstops) + + return nothing end # used for AMR @@ -252,5 +185,7 @@ function Base.resize!(integrator::SimpleIntegrator2N, new_size) resize!(integrator.u, new_size) resize!(integrator.du, new_size) resize!(integrator.u_tmp, new_size) + + return nothing end end # @muladd diff --git a/src/time_integration/methods_3Sstar.jl b/src/time_integration/methods_3Sstar.jl index cee944727c4..31d5e622f1e 100644 --- a/src/time_integration/methods_3Sstar.jl +++ b/src/time_integration/methods_3Sstar.jl @@ -6,7 +6,7 @@ #! format: noindent # Abstract base type for time integration schemes of storage class `3S*` -abstract type SimpleAlgorithm3Sstar end +abstract type SimpleAlgorithm3Sstar <: AbstractTimeIntegrationAlgorithm end """ HypDiffN3Erk3Sstar52() @@ -130,23 +130,9 @@ struct ParsaniKetchesonDeconinck3Sstar32 <: SimpleAlgorithm3Sstar end end -mutable struct SimpleIntegrator3SstarOptions{Callback} - callback::Callback # callbacks; used in Trixi.jl - adaptive::Bool # whether the algorithm is adaptive; ignored - dtmax::Float64 # ignored - maxiters::Int # maximal number of time steps - tstops::Vector{Float64} # tstops from https://diffeq.sciml.ai/v6.8/basics/common_solver_opts/#Output-Control-1; ignored -end - -function SimpleIntegrator3SstarOptions(callback, tspan; maxiters = typemax(Int), - kwargs...) - SimpleIntegrator3SstarOptions{typeof(callback)}(callback, false, Inf, maxiters, - [last(tspan)]) -end - -mutable struct SimpleIntegrator3Sstar{RealT <: Real, uType, Params, Sol, F, Alg, - SimpleIntegrator3SstarOptions} <: - AbstractTimeIntegrator +mutable struct SimpleIntegrator3Sstar{RealT <: Real, uType <: AbstractVector, + Params, Sol, F, Alg, + SimpleIntegratorOptions} <: AbstractTimeIntegrator u::uType du::uType u_tmp1::uType @@ -159,19 +145,10 @@ mutable struct SimpleIntegrator3Sstar{RealT <: Real, uType, Params, Sol, F, Alg, sol::Sol # faked f::F # `rhs!` of the semidiscretization alg::Alg # SimpleAlgorithm3Sstar - opts::SimpleIntegrator3SstarOptions + opts::SimpleIntegratorOptions finalstep::Bool # added for convenience end -# Forward integrator.stats.naccept to integrator.iter (see GitHub PR#771) -function Base.getproperty(integrator::SimpleIntegrator3Sstar, field::Symbol) - if field === :stats - return (naccept = getfield(integrator, :iter),) - end - # general fallback - return getfield(integrator, field) -end - function init(ode::ODEProblem, alg::SimpleAlgorithm3Sstar; dt, callback::Union{CallbackSet, Nothing} = nothing, kwargs...) u = copy(ode.u0) @@ -183,48 +160,15 @@ function init(ode::ODEProblem, alg::SimpleAlgorithm3Sstar; integrator = SimpleIntegrator3Sstar(u, du, u_tmp1, u_tmp2, t, dt, zero(dt), iter, ode.p, (prob = ode,), ode.f, alg, - SimpleIntegrator3SstarOptions(callback, - ode.tspan; - kwargs...), false) + SimpleIntegratorOptions(callback, + ode.tspan; + kwargs...), false) - # initialize callbacks - if callback isa CallbackSet - foreach(callback.continuous_callbacks) do cb - throw(ArgumentError("Continuous callbacks are unsupported with the 3 star time integration methods.")) - end - foreach(callback.discrete_callbacks) do cb - cb.initialize(cb, integrator.u, integrator.t, integrator) - end - end + initialize_callbacks!(callback, integrator) return integrator end -# Fakes `solve`: https://diffeq.sciml.ai/v6.8/basics/overview/#Solving-the-Problems-1 -function solve(ode::ODEProblem, alg::SimpleAlgorithm3Sstar; - dt, callback = nothing, kwargs...) - integrator = init(ode, alg, dt = dt, callback = callback; kwargs...) - - # Start actual solve - solve!(integrator) -end - -function solve!(integrator::SimpleIntegrator3Sstar) - @unpack prob = integrator.sol - - integrator.finalstep = false - - @trixi_timeit timer() "main loop" while !integrator.finalstep - step!(integrator) - end # "main loop" timer - - finalize_callbacks(integrator) - - return TimeIntegratorSolution((first(prob.tspan), integrator.t), - (prob.u0, integrator.u), - integrator.sol.prob) -end - function step!(integrator::SimpleIntegrator3Sstar) @unpack prob = integrator.sol @unpack alg = integrator @@ -236,12 +180,7 @@ function step!(integrator::SimpleIntegrator3Sstar) error("time step size `dt` is NaN") end - # if the next iteration would push the simulation beyond the end time, set dt accordingly - if integrator.t + integrator.dt > t_end || - isapprox(integrator.t + integrator.dt, t_end) - integrator.dt = t_end - integrator.t - terminate!(integrator) - end + limit_dt!(integrator, t_end) # one time step integrator.u_tmp1 .= zero(eltype(integrator.u_tmp1)) @@ -268,48 +207,27 @@ function step!(integrator::SimpleIntegrator3Sstar) integrator.iter += 1 integrator.t += integrator.dt - @trixi_timeit timer() "Step-Callbacks" begin - # handle callbacks - if callbacks isa CallbackSet - foreach(callbacks.discrete_callbacks) do cb - if cb.condition(integrator.u, integrator.t, integrator) - cb.affect!(integrator) - end - return nothing - end - end - end + @trixi_timeit timer() "Step-Callbacks" handle_callbacks!(callbacks, integrator) - # respect maximum number of iterations - if integrator.iter >= integrator.opts.maxiters && !integrator.finalstep - @warn "Interrupted. Larger maxiters is needed." - terminate!(integrator) - end + check_max_iter!(integrator) + + return nothing end # get a cache where the RHS can be stored -get_du(integrator::SimpleIntegrator3Sstar) = integrator.du function get_tmp_cache(integrator::SimpleIntegrator3Sstar) - (integrator.u_tmp1, integrator.u_tmp2) + return (integrator.u_tmp1, integrator.u_tmp2) end # some algorithms from DiffEq like FSAL-ones need to be informed when a callback has modified u u_modified!(integrator::SimpleIntegrator3Sstar, ::Bool) = false -# used by adaptive timestepping algorithms in DiffEq -function set_proposed_dt!(integrator::SimpleIntegrator3Sstar, dt) - integrator.dt = dt -end - -# Required e.g. for `glm_speed_callback` -function get_proposed_dt(integrator::SimpleIntegrator3Sstar) - return integrator.dt -end - # stop the time integration function terminate!(integrator::SimpleIntegrator3Sstar) integrator.finalstep = true empty!(integrator.opts.tstops) + + return nothing end # used for AMR @@ -318,5 +236,7 @@ function Base.resize!(integrator::SimpleIntegrator3Sstar, new_size) resize!(integrator.du, new_size) resize!(integrator.u_tmp1, new_size) resize!(integrator.u_tmp2, new_size) + + return nothing end end # @muladd diff --git a/src/time_integration/methods_SSP.jl b/src/time_integration/methods_SSP.jl index de5e97ebd20..5603009e121 100644 --- a/src/time_integration/methods_SSP.jl +++ b/src/time_integration/methods_SSP.jl @@ -7,7 +7,7 @@ # Abstract base type for time integration schemes of explicit strong stability-preserving (SSP) # Runge-Kutta (RK) methods. They are high-order time discretizations that guarantee the TVD property. -abstract type SimpleAlgorithmSSP end +abstract type SimpleAlgorithmSSP <: AbstractTimeIntegrationAlgorithm end """ SimpleSSPRK33(; stage_callbacks=()) @@ -77,11 +77,12 @@ end # This implements the interface components described at # https://diffeq.sciml.ai/v6.8/basics/integrator/#Handing-Integrators-1 # which are used in Trixi. -mutable struct SimpleIntegratorSSP{RealT <: Real, uType, Params, Sol, F, Alg, +mutable struct SimpleIntegratorSSP{RealT <: Real, uType, + Params, Sol, F, Alg, SimpleIntegratorSSPOptions} <: AbstractTimeIntegrator u::uType du::uType - r0::uType + u_tmp::uType t::RealT tdir::RealT # DIRection of time integration, i.e., if one marches forward or backward in time dt::RealT # current time step @@ -116,122 +117,49 @@ end has_tstop(integrator::SimpleIntegratorSSP) = !isempty(integrator.opts.tstops) first_tstop(integrator::SimpleIntegratorSSP) = first(integrator.opts.tstops) -# Forward integrator.stats.naccept to integrator.iter (see GitHub PR#771) -function Base.getproperty(integrator::SimpleIntegratorSSP, field::Symbol) - if field === :stats - return (naccept = getfield(integrator, :iter),) - end - # general fallback - return getfield(integrator, field) -end - -""" - solve(ode, alg; dt, callbacks, kwargs...) - -The following structures and methods provide the infrastructure for SSP Runge-Kutta methods -of type `SimpleAlgorithmSSP`. -""" -function solve(ode::ODEProblem, alg = SimpleSSPRK33()::SimpleAlgorithmSSP; - dt, callback::Union{CallbackSet, Nothing} = nothing, kwargs...) +function init(ode::ODEProblem, alg::SimpleAlgorithmSSP; + dt, callback::Union{CallbackSet, Nothing} = nothing, kwargs...) u = copy(ode.u0) du = similar(u) - r0 = similar(u) + u_tmp = similar(u) t = first(ode.tspan) tdir = sign(ode.tspan[end] - ode.tspan[1]) iter = 0 - integrator = SimpleIntegratorSSP(u, du, r0, t, tdir, dt, dt, iter, ode.p, + integrator = SimpleIntegratorSSP(u, du, u_tmp, t, tdir, dt, dt, iter, ode.p, (prob = ode,), ode.f, alg, SimpleIntegratorSSPOptions(callback, ode.tspan; kwargs...), false, true, false) # resize container - resize!(integrator.p, nelements(integrator.p.solver, integrator.p.cache)) + resize!(integrator.p, integrator.p.solver.volume_integral, + nelements(integrator.p.solver, integrator.p.cache)) - # initialize callbacks - if callback isa CallbackSet - foreach(callback.continuous_callbacks) do cb - throw(ArgumentError("Continuous callbacks are unsupported with the SSP time integration methods.")) - end - foreach(callback.discrete_callbacks) do cb - cb.initialize(cb, integrator.u, integrator.t, integrator) - end - end + # Standard callbacks + initialize_callbacks!(callback, integrator) + # Addition for `SimpleAlgorithmSSP` which may have stage callbacks for stage_callback in alg.stage_callbacks init_callback(stage_callback, integrator.p) end - solve!(integrator) + return integrator end function solve!(integrator::SimpleIntegratorSSP) @unpack prob = integrator.sol - @unpack alg = integrator - t_end = last(prob.tspan) - callbacks = integrator.opts.callback integrator.finalstep = false - @trixi_timeit timer() "main loop" while !integrator.finalstep - if isnan(integrator.dt) - error("time step size `dt` is NaN") - end - - modify_dt_for_tstops!(integrator) - # if the next iteration would push the simulation beyond the end time, set dt accordingly - if integrator.t + integrator.dt > t_end || - isapprox(integrator.t + integrator.dt, t_end) - integrator.dt = t_end - integrator.t - terminate!(integrator) - end - - @. integrator.r0 = integrator.u - for stage in eachindex(alg.c) - t_stage = integrator.t + integrator.dt * alg.c[stage] - # compute du - integrator.f(integrator.du, integrator.u, integrator.p, t_stage) - - # perform forward Euler step - @. integrator.u = integrator.u + integrator.dt * integrator.du - - for stage_callback in alg.stage_callbacks - stage_callback(integrator.u, integrator, stage) - end - - # perform convex combination - @. integrator.u = (alg.numerator_a[stage] * integrator.r0 + - alg.numerator_b[stage] * integrator.u) / - alg.denominator[stage] - end - - integrator.iter += 1 - integrator.t += integrator.dt - - @trixi_timeit timer() "Step-Callbacks" begin - # handle callbacks - if callbacks isa CallbackSet - foreach(callbacks.discrete_callbacks) do cb - if cb.condition(integrator.u, integrator.t, integrator) - cb.affect!(integrator) - end - return nothing - end - end - end - - # respect maximum number of iterations - if integrator.iter >= integrator.opts.maxiters && !integrator.finalstep - @warn "Interrupted. Larger maxiters is needed." - terminate!(integrator) - end + @trixi_timeit timer() "main loop" while !integrator.finalstep + step!(integrator) end # Empty the tstops array. # This cannot be done in terminate!(integrator::SimpleIntegratorSSP) because DiffEqCallbacks.PeriodicCallbackAffect would return at error. extract_all!(integrator.opts.tstops) - for stage_callback in alg.stage_callbacks + for stage_callback in integrator.alg.stage_callbacks finalize_callback(stage_callback, integrator.p) end @@ -241,26 +169,60 @@ function solve!(integrator::SimpleIntegratorSSP) (prob.u0, integrator.u), prob) end -# get a cache where the RHS can be stored -get_du(integrator::SimpleIntegratorSSP) = integrator.du -get_tmp_cache(integrator::SimpleIntegratorSSP) = (integrator.r0,) +function step!(integrator::SimpleIntegratorSSP) + @unpack prob = integrator.sol + @unpack alg = integrator + t_end = last(prob.tspan) + callbacks = integrator.opts.callback -# some algorithms from DiffEq like FSAL-ones need to be informed when a callback has modified u -u_modified!(integrator::SimpleIntegratorSSP, ::Bool) = false + @assert !integrator.finalstep + if isnan(integrator.dt) + error("time step size `dt` is NaN") + end -# used by adaptive timestepping algorithms in DiffEq -function set_proposed_dt!(integrator::SimpleIntegratorSSP, dt) - (integrator.dt = dt; integrator.dtcache = dt) -end + modify_dt_for_tstops!(integrator) + + limit_dt!(integrator, t_end) + + @. integrator.u_tmp = integrator.u + for stage in eachindex(alg.c) + t_stage = integrator.t + integrator.dt * alg.c[stage] + # compute du + integrator.f(integrator.du, integrator.u, integrator.p, t_stage) + + # perform forward Euler step + @. integrator.u = integrator.u + integrator.dt * integrator.du + + for stage_callback in alg.stage_callbacks + stage_callback(integrator.u, integrator, stage) + end + + # perform convex combination + @. integrator.u = (alg.numerator_a[stage] * integrator.u_tmp + + alg.numerator_b[stage] * integrator.u) / + alg.denominator[stage] + end + integrator.iter += 1 + integrator.t += integrator.dt -# used by adaptive timestepping algorithms in DiffEq -function get_proposed_dt(integrator::SimpleIntegratorSSP) - return ifelse(integrator.opts.adaptive, integrator.dt, integrator.dtcache) + @trixi_timeit timer() "Step-Callbacks" handle_callbacks!(callbacks, integrator) + + check_max_iter!(integrator) + + return nothing end +# get a cache where the RHS can be stored +get_tmp_cache(integrator::SimpleIntegratorSSP) = (integrator.u_tmp,) + +# some algorithms from DiffEq like FSAL-ones need to be informed when a callback has modified u +u_modified!(integrator::SimpleIntegratorSSP, ::Bool) = false + # stop the time integration function terminate!(integrator::SimpleIntegratorSSP) integrator.finalstep = true + + return nothing end """ @@ -285,32 +247,21 @@ function modify_dt_for_tstops!(integrator::SimpleIntegratorSSP) min(abs(integrator.dtcache), abs(tdir_tstop - tdir_t)) # step! to the end end end + + return nothing end # used for AMR function Base.resize!(integrator::SimpleIntegratorSSP, new_size) resize!(integrator.u, new_size) resize!(integrator.du, new_size) - resize!(integrator.r0, new_size) + resize!(integrator.u_tmp, new_size) # Resize container # new_size = n_variables * n_nodes^n_dims * n_elements n_elements = nelements(integrator.p.solver, integrator.p.cache) - resize!(integrator.p, n_elements) -end - -function Base.resize!(semi::AbstractSemidiscretization, new_size) - resize!(semi, semi.solver.volume_integral, new_size) -end - -Base.resize!(semi, volume_integral::AbstractVolumeIntegral, new_size) = nothing - -function Base.resize!(semi, volume_integral::VolumeIntegralSubcellLimiting, new_size) - # Resize container antidiffusive_fluxes - resize!(semi.cache.antidiffusive_fluxes, new_size) + resize!(integrator.p, integrator.p.solver.volume_integral, n_elements) - # Resize container subcell_limiter_coefficients - @unpack limiter = volume_integral - resize!(limiter.cache.subcell_limiter_coefficients, new_size) + return nothing end end # @muladd diff --git a/src/time_integration/paired_explicit_runge_kutta/methods_PERK2.jl b/src/time_integration/paired_explicit_runge_kutta/methods_PERK2.jl index 3f9460dd433..ad443a7ad47 100644 --- a/src/time_integration/paired_explicit_runge_kutta/methods_PERK2.jl +++ b/src/time_integration/paired_explicit_runge_kutta/methods_PERK2.jl @@ -190,7 +190,8 @@ end # This implements the interface components described at # https://diffeq.sciml.ai/v6.8/basics/integrator/#Handing-Integrators-1 # which are used in Trixi. -mutable struct PairedExplicitRK2Integrator{RealT <: Real, uType, Params, Sol, F, +mutable struct PairedExplicitRK2Integrator{RealT <: Real, uType <: AbstractVector, + Params, Sol, F, PairedExplicitRKOptions} <: AbstractPairedExplicitRKSingleIntegrator u::uType @@ -234,15 +235,7 @@ function init(ode::ODEProblem, alg::PairedExplicitRK2; false, true, false, k1) - # initialize callbacks - if callback isa CallbackSet - for cb in callback.continuous_callbacks - throw(ArgumentError("Continuous callbacks are unsupported with paired explicit Runge-Kutta methods.")) - end - for cb in callback.discrete_callbacks - cb.initialize(cb, integrator.u, integrator.t, integrator) - end - end + initialize_callbacks!(callback, integrator) return integrator end @@ -260,12 +253,7 @@ function step!(integrator::PairedExplicitRK2Integrator) modify_dt_for_tstops!(integrator) - # if the next iteration would push the simulation beyond the end time, set dt accordingly - if integrator.t + integrator.dt > t_end || - isapprox(integrator.t + integrator.dt, t_end) - integrator.dt = t_end - integrator.t - terminate!(integrator) - end + limit_dt!(integrator, t_end) @trixi_timeit timer() "Paired Explicit Runge-Kutta ODE integration step" begin # First and second stage are identical across all single/standalone PERK methods @@ -287,22 +275,10 @@ function step!(integrator::PairedExplicitRK2Integrator) integrator.iter += 1 integrator.t += integrator.dt - @trixi_timeit timer() "Step-Callbacks" begin - # handle callbacks - if callbacks isa CallbackSet - foreach(callbacks.discrete_callbacks) do cb - if cb.condition(integrator.u, integrator.t, integrator) - cb.affect!(integrator) - end - return nothing - end - end - end + @trixi_timeit timer() "Step-Callbacks" handle_callbacks!(callbacks, integrator) - # respect maximum number of iterations - if integrator.iter >= integrator.opts.maxiters && !integrator.finalstep - @warn "Interrupted. Larger maxiters is needed." - terminate!(integrator) - end + check_max_iter!(integrator) + + return nothing end end # @muladd diff --git a/src/time_integration/paired_explicit_runge_kutta/methods_PERK3.jl b/src/time_integration/paired_explicit_runge_kutta/methods_PERK3.jl index 4f7344678d9..81169287996 100644 --- a/src/time_integration/paired_explicit_runge_kutta/methods_PERK3.jl +++ b/src/time_integration/paired_explicit_runge_kutta/methods_PERK3.jl @@ -183,7 +183,8 @@ end # This implements the interface components described at # https://diffeq.sciml.ai/v6.8/basics/integrator/#Handing-Integrators-1 # which are used in Trixi.jl. -mutable struct PairedExplicitRK3Integrator{RealT <: Real, uType, Params, Sol, F, +mutable struct PairedExplicitRK3Integrator{RealT <: Real, uType <: AbstractVector, + Params, Sol, F, PairedExplicitRKOptions} <: AbstractPairedExplicitRKSingleIntegrator u::uType @@ -230,15 +231,7 @@ function init(ode::ODEProblem, alg::PairedExplicitRK3; false, true, false, k1, kS1) - # initialize callbacks - if callback isa CallbackSet - for cb in callback.continuous_callbacks - throw(ArgumentError("Continuous callbacks are unsupported with paired explicit Runge-Kutta methods.")) - end - for cb in callback.discrete_callbacks - cb.initialize(cb, integrator.u, integrator.t, integrator) - end - end + initialize_callbacks!(callback, integrator) return integrator end @@ -256,12 +249,7 @@ function step!(integrator::PairedExplicitRK3Integrator) modify_dt_for_tstops!(integrator) - # if the next iteration would push the simulation beyond the end time, set dt accordingly - if integrator.t + integrator.dt > t_end || - isapprox(integrator.t + integrator.dt, t_end) - integrator.dt = t_end - integrator.t - terminate!(integrator) - end + limit_dt!(integrator, t_end) @trixi_timeit timer() "Paired Explicit Runge-Kutta ODE integration step" begin # First and second stage are identical across all single/standalone PERK methods @@ -292,23 +280,11 @@ function step!(integrator::PairedExplicitRK3Integrator) integrator.iter += 1 integrator.t += integrator.dt - @trixi_timeit timer() "Step-Callbacks" begin - # handle callbacks - if callbacks isa CallbackSet - foreach(callbacks.discrete_callbacks) do cb - if cb.condition(integrator.u, integrator.t, integrator) - cb.affect!(integrator) - end - return nothing - end - end - end + @trixi_timeit timer() "Step-Callbacks" handle_callbacks!(callbacks, integrator) - # respect maximum number of iterations - if integrator.iter >= integrator.opts.maxiters && !integrator.finalstep - @warn "Interrupted. Larger maxiters is needed." - terminate!(integrator) - end + check_max_iter!(integrator) + + return nothing end function Base.resize!(integrator::PairedExplicitRK3Integrator, new_size) @@ -318,5 +294,7 @@ function Base.resize!(integrator::PairedExplicitRK3Integrator, new_size) resize!(integrator.k1, new_size) resize!(integrator.kS1, new_size) + + return nothing end end # @muladd diff --git a/src/time_integration/paired_explicit_runge_kutta/methods_PERK4.jl b/src/time_integration/paired_explicit_runge_kutta/methods_PERK4.jl index d0571b9b0ae..d7951651640 100644 --- a/src/time_integration/paired_explicit_runge_kutta/methods_PERK4.jl +++ b/src/time_integration/paired_explicit_runge_kutta/methods_PERK4.jl @@ -184,7 +184,8 @@ end # This implements the interface components described at # https://diffeq.sciml.ai/v6.8/basics/integrator/#Handing-Integrators-1 # which are used in Trixi.jl. -mutable struct PairedExplicitRK4Integrator{RealT <: Real, uType, Params, Sol, F, +mutable struct PairedExplicitRK4Integrator{RealT <: Real, uType <: AbstractVector, + Params, Sol, F, PairedExplicitRKOptions} <: AbstractPairedExplicitRKSingleIntegrator u::uType @@ -228,15 +229,7 @@ function init(ode::ODEProblem, alg::PairedExplicitRK4; false, true, false, k1) - # initialize callbacks - if callback isa CallbackSet - for cb in callback.continuous_callbacks - throw(ArgumentError("Continuous callbacks are unsupported with paired explicit Runge-Kutta methods.")) - end - for cb in callback.discrete_callbacks - cb.initialize(cb, integrator.u, integrator.t, integrator) - end - end + initialize_callbacks!(callback, integrator) return integrator end @@ -280,6 +273,8 @@ end integrator.u[i] += 0.5 * integrator.dt * (integrator.k1[i] + integrator.du[i]) end + + return nothing end function step!(integrator::PairedExplicitRK4Integrator) @@ -295,12 +290,7 @@ function step!(integrator::PairedExplicitRK4Integrator) modify_dt_for_tstops!(integrator) - # if the next iteration would push the simulation beyond the end time, set dt accordingly - if integrator.t + integrator.dt > t_end || - isapprox(integrator.t + integrator.dt, t_end) - integrator.dt = t_end - integrator.t - terminate!(integrator) - end + limit_dt!(integrator, t_end) @trixi_timeit timer() "Paired Explicit Runge-Kutta ODE integration step" begin PERK_k1!(integrator, prob.p) @@ -317,21 +307,10 @@ function step!(integrator::PairedExplicitRK4Integrator) integrator.iter += 1 integrator.t += integrator.dt - @trixi_timeit timer() "Step-Callbacks" begin - # handle callbacks - if callbacks isa CallbackSet - for cb in callbacks.discrete_callbacks - if cb.condition(integrator.u, integrator.t, integrator) - cb.affect!(integrator) - end - end - end - end + @trixi_timeit timer() "Step-Callbacks" handle_callbacks!(callbacks, integrator) - # respect maximum number of iterations - if integrator.iter >= integrator.opts.maxiters && !integrator.finalstep - @warn "Interrupted. Larger maxiters is needed." - terminate!(integrator) - end + check_max_iter!(integrator) + + return nothing end end # @muladd diff --git a/src/time_integration/paired_explicit_runge_kutta/paired_explicit_runge_kutta.jl b/src/time_integration/paired_explicit_runge_kutta/paired_explicit_runge_kutta.jl index dfa5ed60d51..333ebc14983 100644 --- a/src/time_integration/paired_explicit_runge_kutta/paired_explicit_runge_kutta.jl +++ b/src/time_integration/paired_explicit_runge_kutta/paired_explicit_runge_kutta.jl @@ -10,14 +10,14 @@ include("polynomial_optimizer.jl") # Abstract base type for both single/standalone and multi-level # PERK (Paired Explicit Runge-Kutta) time integration schemes -abstract type AbstractPairedExplicitRK end +abstract type AbstractPairedExplicitRK <: AbstractTimeIntegrationAlgorithm end # Abstract base type for single/standalone PERK time integration schemes abstract type AbstractPairedExplicitRKSingle <: AbstractPairedExplicitRK end # This struct is needed to fake https://github.com/SciML/OrdinaryDiffEq.jl/blob/0c2048a502101647ac35faabd80da8a5645beac7/src/integrators/type.jl#L1 mutable struct PairedExplicitRKOptions{Callback, TStops} callback::Callback # callbacks; used in Trixi - adaptive::Bool # whether the algorithm is adaptive + adaptive::Bool # whether the algorithm is adaptive (false) dtmax::Float64 # ignored maxiters::Int # maximal number of time steps tstops::TStops # tstops from https://diffeq.sciml.ai/v6.8/basics/common_solver_opts/#Output-Control-1; ignored @@ -64,15 +64,6 @@ function calculate_cfl(ode_algorithm::AbstractPairedExplicitRK, ode) return cfl_number end -# Forward integrator.stats.naccept to integrator.iter (see GitHub PR#771) -function Base.getproperty(integrator::AbstractPairedExplicitRKIntegrator, field::Symbol) - if field === :stats - return (naccept = getfield(integrator, :iter),) - end - # general fallback - return getfield(integrator, field) -end - """ add_tstop!(integrator::AbstractPairedExplicitRKIntegrator, t) Add a time stop during the time integration process. @@ -87,39 +78,18 @@ function add_tstop!(integrator::AbstractPairedExplicitRKIntegrator, t) pop!(integrator.opts.tstops) end push!(integrator.opts.tstops, integrator.tdir * t) + + return nothing end has_tstop(integrator::AbstractPairedExplicitRKIntegrator) = !isempty(integrator.opts.tstops) first_tstop(integrator::AbstractPairedExplicitRKIntegrator) = first(integrator.opts.tstops) -# Fakes `solve`: https://diffeq.sciml.ai/v6.8/basics/overview/#Solving-the-Problems-1 -function solve(ode::ODEProblem, alg::AbstractPairedExplicitRK; - dt, callback = nothing, kwargs...) - integrator = init(ode, alg, dt = dt, callback = callback; kwargs...) - - # Start actual solve - solve!(integrator) -end - -function solve!(integrator::AbstractPairedExplicitRKIntegrator) - @unpack prob = integrator.sol - - integrator.finalstep = false - - @trixi_timeit timer() "main loop" while !integrator.finalstep - step!(integrator) - end - - finalize_callbacks(integrator) - - return TimeIntegratorSolution((first(prob.tspan), integrator.t), - (prob.u0, integrator.u), - integrator.sol.prob) -end - # Function that computes the first stage of a general PERK method @inline function PERK_k1!(integrator::AbstractPairedExplicitRKIntegrator, p) integrator.f(integrator.k1, integrator.u, p, integrator.t) + + return nothing end @inline function PERK_k2!(integrator::AbstractPairedExplicitRKSingleIntegrator, p, alg) @@ -130,6 +100,8 @@ end integrator.f(integrator.du, integrator.u_tmp, p, integrator.t + alg.c[2] * integrator.dt) + + return nothing end @inline function PERK_ki!(integrator::AbstractPairedExplicitRKSingleIntegrator, p, alg, @@ -144,6 +116,8 @@ end integrator.f(integrator.du, integrator.u_tmp, p, integrator.t + alg.c[stage] * integrator.dt) + + return nothing end # used for AMR (Adaptive Mesh Refinement) @@ -153,27 +127,21 @@ function Base.resize!(integrator::AbstractPairedExplicitRKIntegrator, new_size) resize!(integrator.u_tmp, new_size) resize!(integrator.k1, new_size) + + return nothing end # get a cache where the RHS can be stored -get_du(integrator::AbstractPairedExplicitRKIntegrator) = integrator.du get_tmp_cache(integrator::AbstractPairedExplicitRKIntegrator) = (integrator.u_tmp,) # some algorithms from DiffEq like FSAL-ones need to be informed when a callback has modified u u_modified!(integrator::AbstractPairedExplicitRKIntegrator, ::Bool) = false -# used by adaptive timestepping algorithms in DiffEq -function set_proposed_dt!(integrator::AbstractPairedExplicitRKIntegrator, dt) - (integrator.dt = dt; integrator.dtcache = dt) -end - -function get_proposed_dt(integrator::AbstractPairedExplicitRKIntegrator) - return ifelse(integrator.opts.adaptive, integrator.dt, integrator.dtcache) -end - # stop the time integration function terminate!(integrator::AbstractPairedExplicitRKIntegrator) integrator.finalstep = true + + return nothing end """ @@ -199,6 +167,8 @@ function modify_dt_for_tstops!(integrator::AbstractPairedExplicitRKIntegrator) min(abs(integrator.dtcache), abs(tdir_tstop - tdir_t)) # step! to the end end end + + return nothing end # Add definitions of functions related to polynomial optimization by NLsolve here diff --git a/src/time_integration/relaxation_methods/entropy_relaxation.jl b/src/time_integration/relaxation_methods/entropy_relaxation.jl new file mode 100644 index 00000000000..914302f7afe --- /dev/null +++ b/src/time_integration/relaxation_methods/entropy_relaxation.jl @@ -0,0 +1,337 @@ +# By default, Julia/LLVM does not use fused multiply-add operations (FMAs). +# Since these FMAs can increase the performance of many numerical algorithms, +# we need to opt-in explicitly. +# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details. +@muladd begin +#! format: noindent + +@inline function integrate_w_dot_stage(stage, u_stage, + mesh::Union{TreeMesh{1}, StructuredMesh{1}}, + equations, dg::DG, cache) + @trixi_timeit timer() "Integrate w ⋅ k" begin + # Calculate ∫(∂S/∂u ⋅ k)dΩ = ∫(w ⋅ k)dΩ + integrate_via_indices(u_stage, mesh, equations, dg, cache, + stage) do u_stage, i, element, equations, dg, stage + w_node = cons2entropy(get_node_vars(u_stage, equations, dg, + i, element), + equations) + stage_node = get_node_vars(stage, equations, dg, i, element) + dot(w_node, stage_node) + end + end +end + +@inline function integrate_w_dot_stage(stage, u_stage, + mesh::Union{TreeMesh{2}, StructuredMesh{2}, + UnstructuredMesh2D, P4estMesh{2}, + T8codeMesh{2}}, + equations, dg::DG, cache) + @trixi_timeit timer() "Integrate w ⋅ k" begin + # Calculate ∫(∂S/∂u ⋅ k)dΩ = ∫(w ⋅ k)dΩ + integrate_via_indices(u_stage, mesh, equations, dg, cache, + stage) do u_stage, i, j, element, equations, dg, stage + w_node = cons2entropy(get_node_vars(u_stage, equations, dg, + i, j, element), + equations) + stage_node = get_node_vars(stage, equations, dg, i, j, element) + dot(w_node, stage_node) + end + end +end + +@inline function integrate_w_dot_stage(stage, u_stage, + mesh::Union{TreeMesh{3}, StructuredMesh{3}, + P4estMesh{3}, T8codeMesh{3}}, + equations, dg::DG, cache) + @trixi_timeit timer() "Integrate w ⋅ k" begin + # Calculate ∫(∂S/∂u ⋅ k)dΩ = ∫(w ⋅ k)dΩ + integrate_via_indices(u_stage, mesh, equations, dg, cache, + stage) do u_stage, i, j, k, element, equations, dg, stage + w_node = cons2entropy(get_node_vars(u_stage, equations, dg, + i, j, k, element), + equations) + stage_node = get_node_vars(stage, equations, dg, i, j, k, element) + dot(w_node, stage_node) + end + end +end + +@inline function entropy_difference(gamma, S_old, dS, u_gamma_dir, mesh, + equations, dg::DG, cache) + return integrate(entropy, u_gamma_dir, mesh, equations, dg, cache) - + S_old - gamma * dS # `dS` is true entropy change computed from stages +end + +@inline function add_direction!(u_tmp_wrap, u_wrap, dir_wrap, gamma, + dg::DG, cache) + @threaded for element in eachelement(dg, cache) + @views @. u_tmp_wrap[.., element] = u_wrap[.., element] + + gamma * dir_wrap[.., element] + end +end + +""" + AbstractRelaxationSolver + +Abstract type for relaxation solvers used to compute the relaxation parameter `` \\gamma`` +in the entropy relaxation time integration methods +[`SubDiagonalRelaxationAlgorithm`](@ref) and [`vanderHouwenRelaxationAlgorithm`](@ref). +Implemented methods are [`RelaxationSolverBisection`](@ref) and [`RelaxationSolverNewton`](@ref). +""" +abstract type AbstractRelaxationSolver end + +@doc raw""" + RelaxationSolverBisection(; max_iterations = 25, + root_tol = 1e-15, gamma_tol = 1e-13, + gamma_min = 0.1, gamma_max = 1.2) + +Solve the relaxation equation +```math +H \big(\boldsymbol U_{n+1}(\gamma_n) \big) = +H \left( \boldsymbol U_n + \Delta t \gamma_n \sum_{i=1}^Sb_i \boldsymbol K_i \right) \overset{!}{=} +H(\boldsymbol U_n) + \gamma_n \Delta H (\boldsymbol U_n) +``` +with true entropy change +```math +\Delta H \coloneqq +\Delta t \sum_{i=1}^S b_i +\left \langle \frac{\partial H(\boldsymbol U_{n,i})}{\partial \boldsymbol U_{n,i}}, +\boldsymbol K_i +\right \rangle +``` +for the relaxation parameter ``\gamma_n`` using a bisection method. +Supposed to be supplied to a relaxation Runge-Kutta method such as [`SubDiagonalAlgorithm`](@ref) or [`vanderHouwenRelaxationAlgorithm`](@ref). + +# Arguments +- `max_iterations::Int`: Maximum number of bisection iterations. +- `root_tol::RealT`: Function-tolerance for the relaxation equation, i.e., + the absolute defect of the left and right-hand side of the equation above, i.e., + the solver stops if + ``\left|H_{n+1} - \left(H_n + \gamma_n \Delta H( \boldsymbol U_n) \right) \right| \leq \text{root\_tol}``. +- `gamma_tol::RealT`: Absolute tolerance for the bracketing interval length, i.e., the bisection stops if + ``|\gamma_{\text{max}} - \gamma_{\text{min}}| \leq \text{gamma\_tol}``. +- `gamma_min::RealT`: Lower bound of the initial bracketing interval. +- `gamma_max::RealT`: Upper bound of the initial bracketing interval. +""" +struct RelaxationSolverBisection{RealT <: Real} <: AbstractRelaxationSolver + # General parameters + max_iterations::Int # Maximum number of bisection iterations + root_tol::RealT # Function-tolerance for the relaxation equation + gamma_tol::RealT # Absolute tolerance for the bracketing interval length + # Method-specific parameters + gamma_min::RealT # Lower bound of the initial bracketing interval + gamma_max::RealT # Upper bound of the initial bracketing interval +end + +function RelaxationSolverBisection(; max_iterations = 25, + root_tol = 1e-15, gamma_tol = 1e-13, + gamma_min = 0.1, gamma_max = 1.2) + return RelaxationSolverBisection(max_iterations, root_tol, gamma_tol, + gamma_min, gamma_max) +end + +function Base.show(io::IO, relaxation_solver::RelaxationSolverBisection) + print(io, "RelaxationSolverBisection(max_iterations=", + relaxation_solver.max_iterations, + ", root_tol=", relaxation_solver.root_tol, + ", gamma_tol=", relaxation_solver.gamma_tol, + ", gamma_min=", relaxation_solver.gamma_min, + ", gamma_max=", relaxation_solver.gamma_max, ")") +end +function Base.show(io::IO, ::MIME"text/plain", + relaxation_solver::RelaxationSolverBisection) + if get(io, :compact, false) + show(io, relaxation_solver) + else + setup = [ + "max_iterations" => relaxation_solver.max_iterations, + "root_tol" => relaxation_solver.root_tol, + "gamma_tol" => relaxation_solver.gamma_tol, + "gamma_min" => relaxation_solver.gamma_min, + "gamma_max" => relaxation_solver.gamma_max + ] + summary_box(io, "RelaxationSolverBisection", setup) + end +end + +function relaxation_solver!(integrator, u_tmp_wrap, u_wrap, dir_wrap, dS, + mesh, equations, dg::DG, cache, + relaxation_solver::RelaxationSolverBisection) + @unpack max_iterations, root_tol, gamma_tol, gamma_min, gamma_max = relaxation_solver + + add_direction!(u_tmp_wrap, u_wrap, dir_wrap, gamma_max, dg, cache) + @trixi_timeit timer() "ΔH" r_max=entropy_difference(gamma_max, integrator.S_old, dS, + u_tmp_wrap, mesh, + equations, dg, cache) + + add_direction!(u_tmp_wrap, u_wrap, dir_wrap, gamma_min, dg, cache) + @trixi_timeit timer() "ΔH" r_min=entropy_difference(gamma_min, integrator.S_old, dS, + u_tmp_wrap, mesh, + equations, dg, cache) + + entropy_residual = 0 + # Check if there exists a root for `r` in the interval [gamma_min, gamma_max] + if r_max > 0 && r_min < 0 + iterations = 0 + while gamma_max - gamma_min > gamma_tol && iterations < max_iterations + integrator.gamma = (gamma_max + gamma_min) / 2 + + add_direction!(u_tmp_wrap, u_wrap, dir_wrap, integrator.gamma, dg, cache) + @trixi_timeit timer() "ΔH" entropy_residual=entropy_difference(integrator.gamma, + integrator.S_old, + dS, + u_tmp_wrap, + mesh, + equations, + dg, cache) + if abs(entropy_residual) <= root_tol # Sufficiently close at root + break + end + + # Bisect interval + if entropy_residual < 0 + gamma_min = integrator.gamma + else + gamma_max = integrator.gamma + end + iterations += 1 + end + else # No proper bracketing interval found + integrator.gamma = 1 + add_direction!(u_tmp_wrap, u_wrap, dir_wrap, integrator.gamma, dg, cache) + @trixi_timeit timer() "ΔH" entropy_residual=entropy_difference(integrator.gamma, + integrator.S_old, + dS, u_tmp_wrap, + mesh, equations, + dg, cache) + end + # Update old entropy + integrator.S_old += integrator.gamma * dS + entropy_residual + + return nothing +end + +@doc raw""" + RelaxationSolverNewton(; max_iterations = 5, + root_tol = 1e-15, gamma_tol = 1e-13, + gamma_min = 1e-13, step_scaling = 1.0) + +Solve the relaxation equation +```math +H \big(\boldsymbol U_{n+1}(\gamma_n) \big) = +H \left( \boldsymbol U_n + \Delta t \gamma_n \sum_{i=1}^Sb_i \boldsymbol K_i \right) \overset{!}{=} +H(\boldsymbol U_n) + \gamma_n \Delta H (\boldsymbol U_n) +``` +with true entropy change +```math +\Delta H \coloneqq +\Delta t \sum_{i=1}^S b_i +\left \langle \frac{\partial H(\boldsymbol U_{n,i})}{\partial \boldsymbol U_{n,i}}, +\boldsymbol K_i +\right \rangle +``` +for the relaxation parameter ``\gamma_n`` using Newton's method. +The derivative of the relaxation function is known and can be directly computed. +Supposed to be supplied to a relaxation Runge-Kutta method such as [`SubDiagonalAlgorithm`](@ref) or [`vanderHouwenRelaxationAlgorithm`](@ref). + +# Arguments +- `max_iterations::Int`: Maximum number of Newton iterations. +- `root_tol::RealT`: Function-tolerance for the relaxation equation, i.e., + the absolute defect of the left and right-hand side of the equation above, i.e., + the solver stops if + ``|H_{n+1} - (H_n + \gamma_n \Delta H( \boldsymbol U_n))| \leq \text{root\_tol}``. +- `gamma_tol::RealT`: Absolute tolerance for the Newton update step size, i.e., the solver stops if + ``|\gamma_{\text{new}} - \gamma_{\text{old}}| \leq \text{gamma\_tol}``. +- `gamma_min::RealT`: Minimum relaxation parameter. If the Newton iteration results a value smaller than this, + the relaxation parameter is set to 1. +- `step_scaling::RealT`: Scaling factor for the Newton step. For `step_scaling > 1` the Newton procedure is accelerated, while for `step_scaling < 1` it is damped. +""" +struct RelaxationSolverNewton{RealT <: Real} <: AbstractRelaxationSolver + # General parameters + max_iterations::Int # Maximum number of Newton iterations + root_tol::RealT # Function-tolerance for the relaxation equation + gamma_tol::RealT # Absolute tolerance for the Newton update step size + # Method-specific parameters + # Minimum relaxation parameter. If the Newton iteration computes a value smaller than this, + # the relaxation parameter is set to 1. + gamma_min::RealT + step_scaling::RealT # Scaling factor for the Newton step +end +function RelaxationSolverNewton(; max_iterations = 5, + root_tol = 1e-15, gamma_tol = 1e-13, + gamma_min = 1e-13, step_scaling = 1.0) + return RelaxationSolverNewton(max_iterations, root_tol, gamma_tol, + gamma_min, step_scaling) +end + +function Base.show(io::IO, + relaxation_solver::RelaxationSolverNewton) + print(io, "RelaxationSolverNewton(max_iterations=", + relaxation_solver.max_iterations, + ", root_tol=", relaxation_solver.root_tol, + ", gamma_tol=", relaxation_solver.gamma_tol, + ", gamma_min=", relaxation_solver.gamma_min, + ", step_scaling=", relaxation_solver.step_scaling, ")") +end + +function Base.show(io::IO, ::MIME"text/plain", + relaxation_solver::RelaxationSolverNewton) + if get(io, :compact, false) + show(io, relaxation_solver) + else + setup = [ + "max_iterations" => relaxation_solver.max_iterations, + "root_tol" => relaxation_solver.root_tol, + "gamma_tol" => relaxation_solver.gamma_tol, + "gamma_min" => relaxation_solver.gamma_min, + "step_scaling" => relaxation_solver.step_scaling + ] + summary_box(io, "RelaxationSolverNewton", setup) + end +end + +function relaxation_solver!(integrator, + u_tmp_wrap, u_wrap, dir_wrap, dS, + mesh, equations, dg::DG, cache, + relaxation_solver::RelaxationSolverNewton) + @unpack max_iterations, root_tol, gamma_tol, gamma_min, step_scaling = relaxation_solver + + iterations = 0 + entropy_residual = 0 + while iterations < max_iterations + add_direction!(u_tmp_wrap, u_wrap, dir_wrap, integrator.gamma, dg, cache) + @trixi_timeit timer() "ΔH" entropy_residual=entropy_difference(integrator.gamma, + integrator.S_old, + dS, u_tmp_wrap, + mesh, equations, + dg, cache) + + if abs(entropy_residual) <= root_tol # Sufficiently close at root + break + end + + # Derivative of object relaxation function `r` with respect to `gamma` + dr = integrate_w_dot_stage(dir_wrap, u_tmp_wrap, mesh, equations, dg, cache) - + dS + + step = step_scaling * entropy_residual / dr # Newton-Raphson update step + if abs(step) <= gamma_tol # Prevent unnecessary small steps + break + end + + integrator.gamma -= step # Perform Newton-Raphson update + iterations += 1 + end + + # Catch Newton failures + if integrator.gamma < gamma_min || isnan(integrator.gamma) || + isinf(integrator.gamma) + integrator.gamma = 1 + entropy_residual = 0 # May be very large, avoid using this in `S_old` + end + # Update old entropy + integrator.S_old += integrator.gamma * dS + entropy_residual + + return nothing +end +end # @muladd diff --git a/src/time_integration/relaxation_methods/methods_subdiagonal.jl b/src/time_integration/relaxation_methods/methods_subdiagonal.jl new file mode 100644 index 00000000000..009a8cc5d61 --- /dev/null +++ b/src/time_integration/relaxation_methods/methods_subdiagonal.jl @@ -0,0 +1,280 @@ +# By default, Julia/LLVM does not use fused multiply-add operations (FMAs). +# Since these FMAs can increase the performance of many numerical algorithms, +# we need to opt-in explicitly. +# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details. +@muladd begin +#! format: noindent + +@doc raw""" + SubDiagonalAlgorithm + +Abstract type for sub-diagonal Runge-Kutta methods, i.e., +methods with a Butcher tableau of the form +```math +\begin{array} + {c|c|c c c c c} + i & \boldsymbol c & & & A & & \\ + \hline + 1 & 0 & & & & &\\ + 2 & c_2 & c_2 & & & & \\ + 3 & c_3 & 0 & c_3 & & & \\ + 4 & c_4 & 0 & 0 & c_4 & \\ + \vdots & & \vdots & \vdots & \ddots & \ddots & \\ + S & c_S & 0 & & \dots & 0 & c_S \\ + \hline + & & b_1 & b_2 & \dots & & b_S +\end{array} +``` + +Currently implemented are the third-order, three-stage method by Ralston [`Ralston3`](@ref) +and the canonical fourth-order, four-stage method by Kutta [`RK44`](@ref). +""" +abstract type SubDiagonalAlgorithm <: AbstractTimeIntegrationAlgorithm end + +""" + SubDiagonalRelaxationAlgorithm + +Abstract type for sub-diagonal Runge-Kutta algorithms (see [`SubDiagonalAlgorithm`](@ref)) +with relaxation to achieve entropy conservation/stability. +In addition to the standard Runge-Kutta method, these algorithms are equipped with a +relaxation solver [`AbstractRelaxationSolver`](@ref) which is used to compute the relaxation parameter ``\\gamma``. +This allows the relaxation methods to suppress entropy defects due to the time stepping. + +For details on the relaxation procedure, see +- Ketcheson (2019) + Relaxation Runge-Kutta Methods: Conservation and Stability for Inner-Product Norms + [DOI: 10.1137/19M1263662](https://doi.org/10.1137/19M1263662) +- Ranocha et al. (2020) + Relaxation Runge-Kutta Methods: Fully Discrete Explicit Entropy-Stable Schemes for the Compressible Euler and Navier-Stokes Equations + [DOI: 10.1137/19M1263480](https://doi.org/10.1137/19M1263480) + +Currently implemented are the third-order, three-stage method by Ralston [`Ralston3`](@ref) +and the canonical fourth-order, four-stage method by Kutta [`RK44`](@ref). +""" +abstract type SubDiagonalRelaxationAlgorithm <: + AbstractRelaxationTimeIntegrationAlgorithm end + +""" + Ralston3() + +Relaxation version of Ralston's third-order Runge-Kutta method, implemented as a [`SubDiagonalAlgorithm`](@ref). +The weight vector is given by ``\\boldsymbol b = [2/9, 1/3, 4/9]`` and the +abscissae/timesteps by ``\\boldsymbol c = [0.0, 0.5, 0.75]``. + +This method has minimum local error bound among the ``S=p=3`` methods. +- Ralston (1962) + Runge-Kutta Methods with Minimum Error Bounds + [DOI: 10.1090/S0025-5718-1962-0150954-0](https://doi.org/10.1090/S0025-5718-1962-0150954-0) +""" +struct Ralston3 <: SubDiagonalAlgorithm + b::SVector{3, Float64} + c::SVector{3, Float64} +end +function Ralston3() + b = SVector(2 / 9, 1 / 3, 4 / 9) + c = SVector(0.0, 0.5, 0.75) + + return Ralston3(b, c) +end + +""" + RelaxationRalston3(; relaxation_solver = RelaxationSolverNewton()) + +Relaxation version of Ralston's third-order Runge-Kutta method [`Ralston3()`](@ref), +implemented as a [`SubDiagonalRelaxationAlgorithm`](@ref). +The default relaxation solver [`AbstractRelaxationSolver`](@ref) is [`RelaxationSolverNewton`](@ref). +""" +struct RelaxationRalston3{AbstractRelaxationSolver} <: SubDiagonalRelaxationAlgorithm + sub_diagonal_alg::Ralston3 + relaxation_solver::AbstractRelaxationSolver +end +function RelaxationRalston3(; relaxation_solver = RelaxationSolverNewton()) + return RelaxationRalston3{typeof(relaxation_solver)}(Ralston3(), relaxation_solver) +end + +""" + RK44() + +The canonical fourth-order Runge-Kutta method, implemented as a [`SubDiagonalAlgorithm`](@ref). +The weight vector is given by ``\\boldsymbol b = [1/6, 1/3, 1/3, 1/6]`` and the +abscissae/timesteps by ``\\boldsymbol c = [0.0, 0.5, 0.5, 1.0]``. +""" +struct RK44 <: SubDiagonalAlgorithm + b::SVector{4, Float64} + c::SVector{4, Float64} +end +function RK44() + b = SVector(1 / 6, 1 / 3, 1 / 3, 1 / 6) + c = SVector(0.0, 0.5, 0.5, 1.0) + + return RK44(b, c) +end + +""" + RelaxationRK44(; relaxation_solver = RelaxationSolverNewton()) + +Relaxation version of the canonical fourth-order Runge-Kutta method [`RK44()`](@ref), +implemented as a [`SubDiagonalRelaxationAlgorithm`](@ref). +The default relaxation solver [`AbstractRelaxationSolver`](@ref) is [`RelaxationSolverNewton`](@ref). +""" +struct RelaxationRK44{AbstractRelaxationSolver} <: SubDiagonalRelaxationAlgorithm + sub_diagonal_alg::RK44 + relaxation_solver::AbstractRelaxationSolver +end +function RelaxationRK44(; relaxation_solver = RelaxationSolverNewton()) + return RelaxationRK44{typeof(relaxation_solver)}(RK44(), relaxation_solver) +end + +# This struct is needed to fake https://github.com/SciML/OrdinaryDiffEq.jl/blob/0c2048a502101647ac35faabd80da8a5645beac7/src/integrators/type.jl#L77 +# This implements the interface components described at +# https://diffeq.sciml.ai/v6.8/basics/integrator/#Handing-Integrators-1 +# which are used in Trixi.jl. +mutable struct SubDiagonalRelaxationIntegrator{RealT <: Real, uType <: AbstractVector, + Params, Sol, F, Alg, + SimpleIntegratorOptions, + AbstractRelaxationSolver} <: + RelaxationIntegrator + u::uType + du::uType + u_tmp::uType + t::RealT + dt::RealT # current time step + dtcache::RealT # ignored + iter::Int # current number of time steps (iteration) + p::Params # will be the semidiscretization from Trixi.jl + sol::Sol # faked + f::F # `rhs` of the semidiscretization + alg::Alg # `SubDiagonalRelaxationAlgorithm` + opts::SimpleIntegratorOptions + finalstep::Bool # added for convenience + # Addition for Relaxation methodology + direction::uType # RK update, i.e., sum of stages K_i times weights b_i + gamma::RealT # Relaxation parameter + S_old::RealT # Entropy of previous iterate + relaxation_solver::AbstractRelaxationSolver + # Note: Could add another register which would store the summed-up + # dot products ∑ₖ (wₖ ⋅ kₖ) and then integrate only once and not per stage k + # Could also add option `recompute_entropy` for entropy-conservative problems + # to save redundant computations. +end + +function init(ode::ODEProblem, alg::SubDiagonalRelaxationAlgorithm; + dt, callback::Union{CallbackSet, Nothing} = nothing, kwargs...) + u = copy(ode.u0) + du = zero(u) + u_tmp = zero(u) + + t = first(ode.tspan) + iter = 0 + + # For entropy relaxation + direction = zero(u) + gamma = one(eltype(u)) + semi = ode.p + u_wrap = wrap_array(u, semi) + S_old = integrate(entropy, u_wrap, semi.mesh, semi.equations, semi.solver, + semi.cache) + + integrator = SubDiagonalRelaxationIntegrator(u, du, u_tmp, t, dt, zero(dt), iter, + ode.p, (prob = ode,), ode.f, + alg.sub_diagonal_alg, + SimpleIntegratorOptions(callback, + ode.tspan; + kwargs...), + false, + direction, gamma, S_old, + alg.relaxation_solver) + + initialize_callbacks!(callback, integrator) + + return integrator +end + +function step!(integrator::SubDiagonalRelaxationIntegrator) + @unpack prob = integrator.sol + @unpack alg = integrator + t_end = last(prob.tspan) + callbacks = integrator.opts.callback + + @assert !integrator.finalstep + if isnan(integrator.dt) + error("time step size `dt` is NaN") + end + + limit_dt!(integrator, t_end) + + @trixi_timeit timer() "Relaxation sub-diagonal RK integration step" begin + mesh, equations, dg, cache = mesh_equations_solver_cache(prob.p) + + u_wrap = wrap_array(integrator.u, prob.p) + u_tmp_wrap = wrap_array(integrator.u_tmp, prob.p) + + # First stage + integrator.f(integrator.du, integrator.u, prob.p, integrator.t) + # Try to enable optimizations due to `muladd` by computing this factor only once, see + # https://github.com/trixi-framework/Trixi.jl/pull/2480#discussion_r2224529532 + b1_dt = alg.b[1] * integrator.dt + @threaded for i in eachindex(integrator.u) + integrator.direction[i] = b1_dt * integrator.du[i] + end + + du_wrap = wrap_array(integrator.du, prob.p) + # Entropy change due to first stage + dS = b1_dt * integrate_w_dot_stage(du_wrap, u_wrap, mesh, equations, dg, cache) + + # Second to last stage + for stage in 2:length(alg.c) + c_dt = alg.c[stage] * integrator.dt + @threaded for i in eachindex(integrator.u) + integrator.u_tmp[i] = integrator.u[i] + c_dt * integrator.du[i] + end + integrator.f(integrator.du, integrator.u_tmp, prob.p, + integrator.t + alg.c[stage] * integrator.dt) + b_dt = alg.b[stage] * integrator.dt + @threaded for i in eachindex(integrator.u) + integrator.direction[i] = integrator.direction[i] + + b_dt * integrator.du[i] + end + + # Entropy change due to current stage + dS += b_dt * + integrate_w_dot_stage(du_wrap, u_tmp_wrap, mesh, equations, dg, cache) + end + + direction_wrap = wrap_array(integrator.direction, prob.p) + + @trixi_timeit timer() "Relaxation solver" relaxation_solver!(integrator, + u_tmp_wrap, u_wrap, + direction_wrap, dS, + mesh, equations, + dg, cache, + integrator.relaxation_solver) + + integrator.iter += 1 + update_t_relaxation!(integrator) + + # Do relaxed update + @threaded for i in eachindex(integrator.u) + integrator.u[i] = integrator.u[i] + + integrator.gamma * integrator.direction[i] + end + end + + @trixi_timeit timer() "Step-Callbacks" handle_callbacks!(callbacks, integrator) + + check_max_iter!(integrator) + + return nothing +end + +# used for AMR +function Base.resize!(integrator::SubDiagonalRelaxationIntegrator, new_size) + resize!(integrator.u, new_size) + resize!(integrator.du, new_size) + resize!(integrator.u_tmp, new_size) + # Relaxation addition + resize!(integrator.direction, new_size) + + return nothing +end +end # @muladd diff --git a/src/time_integration/relaxation_methods/methods_vanderHouwen.jl b/src/time_integration/relaxation_methods/methods_vanderHouwen.jl new file mode 100644 index 00000000000..852ad27a5a2 --- /dev/null +++ b/src/time_integration/relaxation_methods/methods_vanderHouwen.jl @@ -0,0 +1,347 @@ +# By default, Julia/LLVM does not use fused multiply-add operations (FMAs). +# Since these FMAs can increase the performance of many numerical algorithms, +# we need to opt-in explicitly. +# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details. +@muladd begin +#! format: noindent + +@doc raw""" + vanderHouwenAlgorithm + +Abstract type for sub-diagonal Runge-Kutta methods, i.e., +methods with a Butcher tableau of the form +```math +\begin{array} + {c|c|c c c c c c} + i & \boldsymbol c & & & A & & & \\ + \hline + 1 & 0 & & & & & & \\ + 2 & c_2 & a_{21} & & & & & \\ + 3 & c_3 & b_1 & a_{32} & & & & \\ + 4 & c_4 & b_1 & b_2 & a_{43} & & & \\ + \vdots & \vdots & \vdots & \vdots & \ddots & \ddots & & \\ + S & c_S & b_1 & b_2 & \dots & b_{S-2} & a_{S, S-1} & \\ + \hline + & & b_1 & b_2 & \dots & b_{S-2} & b_{S-1} & b_S +\end{array} +``` + +Currently implemented methods are the Carpenter-Kennedy-Lewis 4-stage, 3rd-order method [`CKL43`](@ref) +and the Carpenter-Kennedy-Lewis 5-stage, 4th-order method [`CKL54`](@ref) which are optimized for the +compressible Navier-Stokes equations. +""" +abstract type vanderHouwenAlgorithm <: AbstractTimeIntegrationAlgorithm end + +""" + vanderHouwenRelaxationAlgorithm + +Abstract type for van-der-Houwen type Runge-Kutta algorithms (see [`vanderHouwenAlgorithm`](@ref)) +with relaxation to achieve entropy-conservation/stability. +In addition to the standard Runge-Kutta method, these algorithms are equipped with a +relaxation solver [`AbstractRelaxationSolver`](@ref) which is used to compute the relaxation parameter ``\\gamma``. +This allows the relaxation methods to suppress entropy defects due to the time stepping. + +For details on the relaxation procedure, see +- Ketcheson (2019) + Relaxation Runge-Kutta Methods: Conservation and Stability for Inner-Product Norms + [DOI: 10.1137/19M1263662](https://doi.org/10.1137/19M1263662) +- Ranocha et al. (2020) + Relaxation Runge-Kutta Methods: Fully Discrete Explicit Entropy-Stable Schemes for the Compressible Euler and Navier-Stokes Equations + [DOI: 10.1137/19M1263480](https://doi.org/10.1137/19M1263480) + +Currently implemented methods are the Carpenter-Kennedy-Lewis 4-stage, 3rd-order method [`RelaxationCKL43`](@ref) +and the Carpenter-Kennedy-Lewis 5-stage, 4th-order method [`RelaxationCKL54`](@ref) which are optimized for the +compressible Navier-Stokes equations. +""" +abstract type vanderHouwenRelaxationAlgorithm <: + AbstractRelaxationTimeIntegrationAlgorithm end + +""" + CKL43() + +Carpenter-Kennedy-Lewis 4-stage, 3rd-order low-storage Runge-Kutta method, +optimized for the compressible Navier-Stokes equations. +Implemented as a [`vanderHouwenAlgorithm`](@ref). +For the exact coefficients consult the original paper: + +- Kennedy, Carpenter, Lewis (2000) + Low-storage, explicit Runge-Kutta schemes for the compressible Navier-Stokes equations + [DOI: 10.1016/S0168-9274(99)00141-5](https://doi.org/10.1016/S0168-9274(99)00141-5) +""" +struct CKL43 <: vanderHouwenAlgorithm + a::SVector{4, Float64} + b::SVector{4, Float64} + c::SVector{4, Float64} +end +function CKL43() + a = SVector(0.0, + 11847461282814 / 36547543011857, + 3943225443063 / 7078155732230, + -346793006927 / 4029903576067) + + b = SVector(1017324711453 / 9774461848756, + 8237718856693 / 13685301971492, + 57731312506979 / 19404895981398, + -101169746363290 / 37734290219643) + c = SVector(0.0, + a[2], + b[1] + a[3], + b[1] + b[2] + a[4]) + + return CKL43(a, b, c) +end + +""" + RelaxationCKL43(; relaxation_solver = RelaxationSolverNewton()) + +Relaxation version of the 4-stage, 3rd-order low-storage Runge-Kutta method [`CKL43()`](@ref), +implemented as a [`vanderHouwenRelaxationAlgorithm`](@ref). +The default relaxation solver [`AbstractRelaxationSolver`](@ref) is [`RelaxationSolverNewton`](@ref). +""" +struct RelaxationCKL43{AbstractRelaxationSolver} <: vanderHouwenRelaxationAlgorithm + van_der_houwen_alg::CKL43 + relaxation_solver::AbstractRelaxationSolver +end +function RelaxationCKL43(; relaxation_solver = RelaxationSolverNewton()) + return RelaxationCKL43{typeof(relaxation_solver)}(CKL43(), relaxation_solver) +end + +""" + CKL54() + +Carpenter-Kennedy-Lewis 5-stage, 4th-order low-storage Runge-Kutta method, +optimized for the compressible Navier-Stokes equations. +Implemented as a [`vanderHouwenAlgorithm`](@ref). +For the exact coefficients consult the original paper: + +- Kennedy, Carpenter, Lewis (2000) + Low-storage, explicit Runge-Kutta schemes for the compressible Navier-Stokes equations + [DOI: 10.1016/S0168-9274(99)00141-5](https://doi.org/10.1016/S0168-9274(99)00141-5) +""" +struct CKL54 <: vanderHouwenAlgorithm + a::SVector{5, Float64} + b::SVector{5, Float64} + c::SVector{5, Float64} +end +function CKL54() + a = SVector(0.0, + 970286171893 / 4311952581923, + 6584761158862 / 12103376702013, + 2251764453980 / 15575788980749, + 26877169314380 / 34165994151039) + + b = SVector(1153189308089 / 22510343858157, + 1772645290293 / 4653164025191, + -1672844663538 / 4480602732383, + 2114624349019 / 3568978502595, + 5198255086312 / 14908931495163) + c = SVector(0.0, + a[2], + b[1] + a[3], + b[1] + b[2] + a[4], + b[1] + b[2] + b[3] + a[5]) + + return CKL54(a, b, c) +end + +""" + RelaxationCKL54(; relaxation_solver = RelaxationSolverNewton()) + +Relaxation version of the 4-stage, 3rd-order low-storage Runge-Kutta method [`CKL54()`](@ref), +implemented as a [`vanderHouwenRelaxationAlgorithm`](@ref). +The default relaxation solver [`AbstractRelaxationSolver`](@ref) is [`RelaxationSolverNewton`](@ref). +""" +struct RelaxationCKL54{AbstractRelaxationSolver} <: vanderHouwenRelaxationAlgorithm + van_der_houwen_alg::CKL54 + relaxation_solver::AbstractRelaxationSolver +end +function RelaxationCKL54(; relaxation_solver = RelaxationSolverNewton()) + return RelaxationCKL54{typeof(relaxation_solver)}(CKL54(), relaxation_solver) +end + +# This struct is needed to fake https://github.com/SciML/OrdinaryDiffEq.jl/blob/0c2048a502101647ac35faabd80da8a5645beac7/src/integrators/type.jl#L77 +# This implements the interface components described at +# https://diffeq.sciml.ai/v6.8/basics/integrator/#Handing-Integrators-1 +# which are used in Trixi.jl. +mutable struct vanderHouwenRelaxationIntegrator{RealT <: Real, uType <: AbstractVector, + Params, Sol, F, Alg, + SimpleIntegratorOptions, + AbstractRelaxationSolver} <: + RelaxationIntegrator + u::uType + du::uType + u_tmp::uType + t::RealT + dt::RealT # current time step + dtcache::RealT # ignored + iter::Int # current number of time steps (iteration) + p::Params # will be the semidiscretization from Trixi.jl + sol::Sol # faked + f::F # `rhs` of the semidiscretization + alg::Alg # `vanderHouwenRelaxationAlgorithm` + opts::SimpleIntegratorOptions + finalstep::Bool # added for convenience + # Addition for efficient implementation + k_prev::uType + # Addition for Relaxation methodology + direction::uType # RK update, i.e., sum of stages K_i times weights b_i + gamma::RealT # Relaxation parameter + S_old::RealT # Entropy of previous iterate + relaxation_solver::AbstractRelaxationSolver + # Note: Could add another register which would store the summed-up + # dot products ∑ₖ (wₖ ⋅ kₖ) and then integrate only once and not per stage k + # Could also add option `recompute_entropy` for entropy-conservative problems + # to save redundant computations. +end + +function init(ode::ODEProblem, alg::vanderHouwenRelaxationAlgorithm; + dt, callback::Union{CallbackSet, Nothing} = nothing, kwargs...) + u = copy(ode.u0) + du = similar(u) + u_tmp = copy(u) + k_prev = similar(u) + + t = first(ode.tspan) + iter = 0 + + # For entropy relaxation + direction = zero(u) + gamma = one(eltype(u)) + semi = ode.p + u_wrap = wrap_array(u, semi) + S_old = integrate(entropy, u_wrap, semi.mesh, semi.equations, semi.solver, + semi.cache) + + integrator = vanderHouwenRelaxationIntegrator(u, du, u_tmp, t, dt, zero(dt), iter, + ode.p, (prob = ode,), ode.f, + alg.van_der_houwen_alg, + SimpleIntegratorOptions(callback, + ode.tspan; + kwargs...), + false, + k_prev, direction, gamma, S_old, + alg.relaxation_solver) + + initialize_callbacks!(callback, integrator) + + return integrator +end + +function step!(integrator::vanderHouwenRelaxationIntegrator) + @unpack prob = integrator.sol + @unpack alg = integrator + t_end = last(prob.tspan) + callbacks = integrator.opts.callback + + @assert !integrator.finalstep + if isnan(integrator.dt) + error("time step size `dt` is NaN") + end + + limit_dt!(integrator, t_end) + + @trixi_timeit timer() "Relaxation vdH RK integration step" begin + num_stages = length(alg.c) + + mesh, equations, dg, cache = mesh_equations_solver_cache(prob.p) + u_wrap = wrap_array(integrator.u, prob.p) + u_tmp_wrap = wrap_array(integrator.u_tmp, prob.p) + + # First stage + integrator.f(integrator.du, integrator.u, prob.p, integrator.t) + # Try to enable optimizations due to `muladd` by computing this factor only once, see + # https://github.com/trixi-framework/Trixi.jl/pull/2480#discussion_r2224529532 + b1dt = alg.b[1] * integrator.dt + @threaded for i in eachindex(integrator.u) + integrator.direction[i] = b1dt * integrator.du[i] + + integrator.k_prev[i] = integrator.du[i] # Faster than broadcasted version (with .=) + end + + du_wrap = wrap_array(integrator.du, prob.p) + # Entropy change due to first stage + dS = alg.b[1] * integrator.dt * + integrate_w_dot_stage(du_wrap, u_wrap, mesh, equations, dg, cache) + + a2_dt = alg.a[2] * integrator.dt + @threaded for i in eachindex(integrator.u) + integrator.u_tmp[i] = integrator.u[i] + a2_dt * integrator.du[i] + end + + # Second to last stage + for stage in 2:(num_stages - 1) + integrator.f(integrator.du, integrator.u_tmp, prob.p, + integrator.t + alg.c[stage] * integrator.dt) + + # Entropy change due to current stage + bs_dt = alg.b[stage] * integrator.dt + dS += bs_dt * + integrate_w_dot_stage(du_wrap, u_tmp_wrap, mesh, equations, dg, cache) + + bsminus1_minus_as = alg.b[stage - 1] - alg.a[stage] + @threaded for i in eachindex(integrator.u) + # Try to enable optimizations due to `muladd` by avoiding `+=` + # https://github.com/trixi-framework/Trixi.jl/pull/2480#discussion_r2224531702 + integrator.direction[i] = integrator.direction[i] + + bs_dt * integrator.du[i] + + # Subtract previous stage contribution from `u_tmp` and add most recent one + integrator.u_tmp[i] = integrator.u_tmp[i] + + integrator.dt * + (bsminus1_minus_as * integrator.k_prev[i] + + alg.a[stage + 1] * integrator.du[i]) + + integrator.k_prev[i] = integrator.du[i] # Faster than broadcasted version (with .=) + end + end + + # Last stage + integrator.f(integrator.du, integrator.u_tmp, prob.p, + integrator.t + alg.c[num_stages] * integrator.dt) + + bs_dt = alg.b[num_stages] * integrator.dt + dS += bs_dt * + integrate_w_dot_stage(du_wrap, u_tmp_wrap, mesh, equations, dg, cache) + + @threaded for i in eachindex(integrator.u) + integrator.direction[i] = integrator.direction[i] + bs_dt * integrator.du[i] + end + + direction_wrap = wrap_array(integrator.direction, prob.p) + + @trixi_timeit timer() "Relaxation solver" relaxation_solver!(integrator, + u_tmp_wrap, u_wrap, + direction_wrap, dS, + mesh, equations, + dg, cache, + integrator.relaxation_solver) + + integrator.iter += 1 + update_t_relaxation!(integrator) + + # Do relaxed update + @threaded for i in eachindex(integrator.u) + integrator.u[i] = integrator.u[i] + + integrator.gamma * integrator.direction[i] + end + end + + @trixi_timeit timer() "Step-Callbacks" handle_callbacks!(callbacks, integrator) + + check_max_iter!(integrator) + + return nothing +end + +# used for AMR +function Base.resize!(integrator::vanderHouwenRelaxationIntegrator, new_size) + resize!(integrator.u, new_size) + resize!(integrator.du, new_size) + resize!(integrator.u_tmp, new_size) + resize!(integrator.k_prev, new_size) + # Relaxation addition + resize!(integrator.direction, new_size) + + return nothing +end +end # @muladd diff --git a/src/time_integration/relaxation_methods/relaxation_methods.jl b/src/time_integration/relaxation_methods/relaxation_methods.jl new file mode 100644 index 00000000000..b0a793dfae4 --- /dev/null +++ b/src/time_integration/relaxation_methods/relaxation_methods.jl @@ -0,0 +1,30 @@ +abstract type AbstractRelaxationTimeIntegrationAlgorithm <: AbstractTimeIntegrationAlgorithm end + +abstract type RelaxationIntegrator <: AbstractTimeIntegrator end + +get_tmp_cache(integrator::RelaxationIntegrator) = (integrator.u_tmp,) + +# some algorithms from DiffEq like FSAL-ones need to be informed when a callback has modified u +u_modified!(integrator::RelaxationIntegrator, ::Bool) = false + +# stop the time integration +function terminate!(integrator::RelaxationIntegrator) + integrator.finalstep = true + empty!(integrator.opts.tstops) + + return nothing +end + +@inline function update_t_relaxation!(integrator::RelaxationIntegrator) + # Check if due to entropy relaxation the final time would not be reached + if integrator.finalstep == true && integrator.gamma != 1 + integrator.gamma = 1 + end + integrator.t += integrator.gamma * integrator.dt + + return nothing +end + +include("entropy_relaxation.jl") +include("methods_subdiagonal.jl") +include("methods_vanderHouwen.jl") diff --git a/src/time_integration/time_integration.jl b/src/time_integration/time_integration.jl index 54eca71d376..0c542e2f722 100644 --- a/src/time_integration/time_integration.jl +++ b/src/time_integration/time_integration.jl @@ -16,6 +16,113 @@ end # Abstract supertype of Trixi.jl's own time integrators for dispatch abstract type AbstractTimeIntegrator end +# Abstract supertype for the time integration algorithms of Trixi.jl +abstract type AbstractTimeIntegrationAlgorithm end + +# get a cache where the RHS can be stored +get_du(integrator::AbstractTimeIntegrator) = integrator.du + +# Forward integrator.stats.naccept to integrator.iter (see GitHub PR#771) +function Base.getproperty(integrator::AbstractTimeIntegrator, field::Symbol) + if field === :stats + return (naccept = getfield(integrator, :iter),) + end + # general fallback + return getfield(integrator, field) +end + +# used by adaptive timestepping algorithms in DiffEq +@inline function set_proposed_dt!(integrator::AbstractTimeIntegrator, dt) + (integrator.dt = dt; integrator.dtcache = dt) + + return nothing +end + +# Required e.g. for `glm_speed_callback` +@inline function get_proposed_dt(integrator::AbstractTimeIntegrator) + return integrator.dt +end + +@inline function limit_dt!(integrator::AbstractTimeIntegrator, t_end) + # if the next iteration would push the simulation beyond the end time, set dt accordingly + if integrator.t + integrator.dt > t_end || + isapprox(integrator.t + integrator.dt, t_end) + integrator.dt = t_end - integrator.t + terminate!(integrator) + end + + return nothing +end + +function initialize_callbacks!(callbacks::Union{CallbackSet, Nothing}, + integrator::AbstractTimeIntegrator) + # initialize callbacks + if callbacks isa CallbackSet + foreach(callbacks.continuous_callbacks) do cb + throw(ArgumentError("Continuous callbacks are unsupported.")) + end + foreach(callbacks.discrete_callbacks) do cb + cb.initialize(cb, integrator.u, integrator.t, integrator) + end + end + + return nothing +end + +function handle_callbacks!(callbacks::Union{CallbackSet, Nothing}, + integrator::AbstractTimeIntegrator) + # handle callbacks + if callbacks isa CallbackSet + foreach(callbacks.discrete_callbacks) do cb + if cb.condition(integrator.u, integrator.t, integrator) + cb.affect!(integrator) + end + return nothing + end + end + + return nothing +end + +@inline function check_max_iter!(integrator::AbstractTimeIntegrator) + # respect maximum number of iterations + if integrator.iter >= integrator.opts.maxiters && !integrator.finalstep + @warn "Interrupted. Larger maxiters is needed." + terminate!(integrator) + end + + return nothing +end + +""" + Trixi.solve(ode::ODEProblem, alg::AbstractTimeIntegrationAlgorithm; + dt, callbacks, kwargs...) + +Fakes `solve` from https://diffeq.sciml.ai/v6.8/basics/overview/#Solving-the-Problems-1 +""" +function solve(ode::ODEProblem, alg::AbstractTimeIntegrationAlgorithm; + dt, callback = nothing, kwargs...) + integrator = init(ode, alg, dt = dt, callback = callback; kwargs...) + + # Start actual solve + solve!(integrator) +end + +function solve!(integrator::AbstractTimeIntegrator) + @unpack prob = integrator.sol + + integrator.finalstep = false + + @trixi_timeit timer() "main loop" while !integrator.finalstep + step!(integrator) + end + + finalize_callbacks(integrator) + + return TimeIntegratorSolution((first(prob.tspan), integrator.t), + (prob.u0, integrator.u), prob) +end + # Interface required by DiffEqCallbacks.jl function DiffEqBase.get_tstops(integrator::AbstractTimeIntegrator) return integrator.opts.tstops @@ -38,10 +145,13 @@ function finalize_callbacks(integrator::AbstractTimeIntegrator) cb.finalize(cb, integrator.u, integrator.t, integrator) end end + + return nothing end include("methods_2N.jl") include("methods_3Sstar.jl") include("methods_SSP.jl") include("paired_explicit_runge_kutta/paired_explicit_runge_kutta.jl") +include("relaxation_methods/relaxation_methods.jl") end # @muladd diff --git a/src/visualization/recipes_plots.jl b/src/visualization/recipes_plots.jl index 81b6a38fffd..14e985c6360 100644 --- a/src/visualization/recipes_plots.jl +++ b/src/visualization/recipes_plots.jl @@ -6,8 +6,6 @@ #! format: noindent # Visualize a single variable in a 2D plot (default: heatmap) -# -# Note: This is an experimental feature and may be changed in future releases without notice. RecipesBase.@recipe function f(pds::PlotDataSeries{<:AbstractPlotData{2}}) @unpack plot_data, variable_id = pds @unpack x, y, data, variable_names, orientation_x, orientation_y = plot_data @@ -32,8 +30,6 @@ RecipesBase.@recipe function f(pds::PlotDataSeries{<:AbstractPlotData{2}}) end # Visualize the mesh in a 2D plot -# -# Note: This is an experimental feature and may be changed in future releases without notice. RecipesBase.@recipe function f(pm::PlotMesh{<:AbstractPlotData{2}}) @unpack plot_data = pm @unpack x, y, mesh_vertices_x, mesh_vertices_y = plot_data @@ -55,8 +51,6 @@ RecipesBase.@recipe function f(pm::PlotMesh{<:AbstractPlotData{2}}) end # Visualize the mesh in a 2D plot -# -# Note: This is an experimental feature and may be changed in future releases without notice. RecipesBase.@recipe function f(pm::PlotMesh{<:PlotData2DCartesian{<:Any, <:AbstractVector{<:AbstractVector}}}) @unpack plot_data = pm @@ -79,8 +73,6 @@ RecipesBase.@recipe function f(pm::PlotMesh{<:PlotData2DCartesian{<:Any, end # Plot all available variables at once for convenience -# -# Note: This is an experimental feature and may be changed in future releases without notice. RecipesBase.@recipe function f(pd::AbstractPlotData) # Create layout that is as square as possible, when there are more than 3 subplots. # This is done with a preference for more columns than rows if not. @@ -152,8 +144,6 @@ end # Create a plot directly from a TrixiODESolution for convenience # The plot is created by a PlotData1D or PlotData2D object. -# -# Note: This is an experimental feature and may be changed in future releases without notice. RecipesBase.@recipe function f(sol::TrixiODESolution) # Redirect everything to the recipes below return sol.u[end], sol.prob.p diff --git a/src/visualization/types.jl b/src/visualization/types.jl index 97e67c4940d..f1f5421e105 100644 --- a/src/visualization/types.jl +++ b/src/visualization/types.jl @@ -3,7 +3,6 @@ # This is a union of a Trixi.jl-specific SciMLBase.ODESolution and of Trixi.jl's own # TimeIntegratorSolution. # -# Note: This is an experimental feature and may be changed in future releases without notice. #! format: off const TrixiODESolution = Union{ODESolution{T, N, uType, uType2, DType, tType, rateType, discType, P} where {T, N, uType, uType2, DType, tType, rateType, discType, P<:ODEProblem{uType_, tType_, isinplace, P_, F_} where @@ -42,9 +41,6 @@ end Base.getindex(pd::AbstractPlotData, variable_name) Extract a single variable `variable_name` from `pd` for plotting with `Plots.plot`. - -!!! warning "Experimental implementation" - This is an experimental feature and may change in future releases. """ function Base.getindex(pd::AbstractPlotData, variable_name) variable_id = findfirst(isequal(variable_name), pd.variable_names) @@ -63,9 +59,6 @@ Base.eltype(pd::AbstractPlotData) = Pair{String, PlotDataSeries{typeof(pd)}} Holds all relevant data for creating 2D plots of multiple solution variables and to visualize the mesh. - -!!! warning "Experimental implementation" - This is an experimental feature and may change in future releases. """ struct PlotData2DCartesian{Coordinates, Data, VariableNames, Vertices} <: AbstractPlotData{2} @@ -123,9 +116,6 @@ end Holds all relevant data for creating 1D plots of multiple solution variables and to visualize the mesh. - -!!! warning "Experimental implementation" - This is an experimental feature and may change in future releases. """ struct PlotData1D{Coordinates, Data, VariableNames, Vertices} <: AbstractPlotData{1} x::Coordinates @@ -146,8 +136,6 @@ function Base.show(io::IO, pd::PlotData1D) end # Auxiliary data structure for visualizing a single variable -# -# Note: This is an experimental feature and may be changed in future releases without notice. struct PlotDataSeries{PD <: AbstractPlotData} plot_data::PD variable_id::Int @@ -177,9 +165,6 @@ end getmesh(pd::AbstractPlotData) Extract grid lines from `pd` for plotting with `Plots.plot`. - -!!! warning "Experimental implementation" - This is an experimental feature and may change in future releases. """ getmesh(pd::AbstractPlotData) = PlotMesh(pd) @@ -195,6 +180,10 @@ Create a new `PlotData2D` object that can be used for visualizing 2D/3D DGSEM so from the solution are used for plotting. This can be changed by passing an appropriate conversion function to `solution_variables`. +For coupled semidiscretizations, i.e., `semi isa` [`SemidiscretizationCoupled`](@ref) a vector of +`PlotData2D` objects is returned, one for each semidiscretization which is part of the +coupled semidiscretization. + If `grid_lines` is `true`, also extract grid vertices for visualizing the mesh. The output resolution is indirectly set via `max_supported_level`: all data is interpolated to `2^max_supported_level` uniformly distributed points in each spatial direction, also setting the @@ -207,9 +196,6 @@ When visualizing data from a three-dimensional simulation, a 2D slice is extract The slice position is specified by a `point` that lies on it, which defaults to `(0.0, 0.0, 0.0)`. Both of these values are ignored when visualizing 2D data. -!!! warning "Experimental implementation" - This is an experimental feature and may change in future releases. - # Examples ```julia julia> using Trixi, Plots @@ -233,6 +219,23 @@ function PlotData2D(u_ode, semi; kwargs...) kwargs...) end +function PlotData2D(u_ode, semi::SemidiscretizationCoupled; kwargs...) + plot_data_array = [] + @unpack semis = semi + + foreach_enumerate(semis) do (i, semi_) + u_loc = get_system_u_ode(u_ode, i, semi) + u_loc_wrapped = wrap_array_native(u_loc, semi_) + + push!(plot_data_array, + PlotData2D(u_loc_wrapped, + mesh_equations_solver_cache(semi_)...; + kwargs...)) + end + + return plot_data_array +end + # Redirect `PlotDataTriangulated2D` constructor. function PlotData2DTriangulated(u_ode, semi; kwargs...) PlotData2DTriangulated(wrap_array_native(u_ode, semi), @@ -294,9 +297,6 @@ end Create a `PlotData2D` object from a solution object created by either `OrdinaryDiffEq.solve!` (which returns a `SciMLBase.ODESolution`) or Trixi.jl's own `solve!` (which returns a `TimeIntegratorSolution`). - -!!! warning "Experimental implementation" - This is an experimental feature and may change in future releases. """ function PlotData2D(sol::TrixiODESolution; kwargs...) PlotData2D(sol.u[end], sol.prob.p; kwargs...) @@ -548,9 +548,6 @@ This applies analogously to three-dimensional simulations, where `slice` may be Another way to visualize 2D/3D data is by creating a plot along a given curve. This is done with the keyword argument `curve`. It can be set to a list of 2D/3D points which define the curve. When using `curve` any other input from `slice` or `point` will be ignored. - -!!! warning "Experimental implementation" - This is an experimental feature and may change in future releases. """ function PlotData1D(u_ode, semi; kwargs...) PlotData1D(wrap_array_native(u_ode, semi), @@ -761,9 +758,6 @@ end Create a `PlotData1D` object from a solution object created by either `OrdinaryDiffEq.solve!` (which returns a `SciMLBase.ODESolution`) or Trixi.jl's own `solve!` (which returns a `TimeIntegratorSolution`). - -!!! warning "Experimental implementation" - This is an experimental feature and may change in future releases. """ function PlotData1D(sol::TrixiODESolution; kwargs...) PlotData1D(sol.u[end], sol.prob.p; kwargs...) diff --git a/test/Project.toml b/test/Project.toml index 3559f8cb6e2..a86382a9d78 100644 --- a/test/Project.toml +++ b/test/Project.toml @@ -1,18 +1,22 @@ [deps] ADTypes = "47edcb42-4c32-4615-8424-f2b9edc5f35b" +Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" Aqua = "4c88cf16-eb10-579e-8560-4a9242c79595" CairoMakie = "13f3f980-e62b-5c42-98c6-ff1f3baf88f0" Convex = "f65535da-76fb-5f13-bab9-19810c17039a" +CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba" DelimitedFiles = "8bb1440f-4735-579b-a4ab-409b98df4dab" DoubleFloats = "497a8b3b-efae-58df-a0af-a86822472b78" Downloads = "f43a241f-c20a-4ad4-852c-f6b1247861c6" ECOS = "e2685f51-7e38-5353-a97d-a921fd2c8199" ExplicitImports = "7d51a73a-1435-4ff3-83d9-f097790105c7" -FFMPEG = "c87230d0-a227-11e9-1b43-d7ebe4e7570a" +FiniteDiff = "6a86dc24-6348-571c-b903-95158fe2bd41" ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" +LinearSolve = "7ed4a6bd-45f5-4d41-b270-4a48e9bafcae" MPI = "da04e1cc-30fd-572f-bb4f-1f8673147195" NLsolve = "2774e3e8-f4cf-5e23-947b-6d7e65073b56" +OrdinaryDiffEqBDF = "6ad6398a-0878-4a85-9266-38940aa047c8" OrdinaryDiffEqFeagin = "101fe9f7-ebb6-4678-b671-3a81e7194747" OrdinaryDiffEqHighOrderRK = "d28bc4f8-55e1-4f49-af69-84c1a99f0f58" OrdinaryDiffEqLowOrderRK = "1344f307-1e59-4825-a18e-ace9aa3fa4c6" @@ -24,25 +28,32 @@ Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7" Quadmath = "be4d8f0f-7fa4-5f49-b795-2f01399ab2dd" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" +SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" +SparseConnectivityTracer = "9f842d2f-2579-4b1d-911e-f412cf18a3f5" +SparseMatrixColorings = "0a514795-09f3-496d-8182-132a7b665d35" StableRNGs = "860ef19b-820b-49d6-a774-d7a799459cd3" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" TrixiTest = "0a316866-cbd0-4425-8bcb-08103b2c1f26" [compat] +Adapt = "4" ADTypes = "1.11" Aqua = "0.8" CairoMakie = "0.12, 0.13, 0.14, 0.15" Convex = "0.16" +CUDA = "5.8" DelimitedFiles = "1" DoubleFloats = "1.4.0" Downloads = "1" ECOS = "1.1.2" ExplicitImports = "1.0.1" -FFMPEG = "0.4" +FiniteDiff = "2.27.0" ForwardDiff = "0.10.36, 1" LinearAlgebra = "1" +LinearSolve = "2.36.1, 3" MPI = "0.20.6" NLsolve = "4.5.1" +OrdinaryDiffEqBDF = "1.1" OrdinaryDiffEqFeagin = "1" OrdinaryDiffEqHighOrderRK = "1.1" OrdinaryDiffEqLowOrderRK = "1.2" @@ -54,6 +65,9 @@ Plots = "1.26" Printf = "1" Quadmath = "0.5.10" Random = "1" +SparseArrays = "1" +SparseConnectivityTracer = "1.0.1" +SparseMatrixColorings = "0.4.21" StableRNGs = "1.0.2" Test = "1" -TrixiTest = "0.1" +TrixiTest = "0.1.4" \ No newline at end of file diff --git a/test/runtests.jl b/test/runtests.jl index db2c2e9dd88..8f35e1fb58d 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -109,4 +109,13 @@ const TRIXI_NTHREADS = clamp(Sys.CPU_THREADS, 2, 3) @time if TRIXI_TEST == "all" || TRIXI_TEST == "paper_self_gravitating_gas_dynamics" include("test_paper_self_gravitating_gas_dynamics.jl") end + + @time if TRIXI_TEST == "all" || TRIXI_TEST == "CUDA" + import CUDA + if CUDA.functional() + include("test_cuda.jl") + else + @warn "Unable to run CUDA tests on this machine" + end + end end diff --git a/test/test_aqua.jl b/test/test_aqua.jl index 9b3f2d67903..154088995ca 100644 --- a/test/test_aqua.jl +++ b/test/test_aqua.jl @@ -10,6 +10,7 @@ include("test_trixi.jl") @timed_testset "Aqua.jl" begin Aqua.test_all(Trixi, ambiguities = false, + unbound_args = false, # FIXME: UnstructuredSortedBoundaryTypes # exceptions necessary for adding a new method `StartUpDG.estimate_h` # in src/solvers/dgmulti/sbp.jl piracies = (treat_as_own = [Trixi.StartUpDG.RefElemData, diff --git a/test/test_cuda.jl b/test/test_cuda.jl new file mode 100644 index 00000000000..4380ab0e111 --- /dev/null +++ b/test/test_cuda.jl @@ -0,0 +1,79 @@ +module TestCUDA + +using Test +using Trixi + +include("test_trixi.jl") + +# Start with a clean environment: remove Trixi.jl output directory if it exists +outdir = "out" +isdir(outdir) && rm(outdir, recursive = true) + +EXAMPLES_DIR = joinpath(examples_dir(), "p4est_2d_dgsem") + +@trixi_testset "elixir_advection_basic_gpu.jl native" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_basic_gpu.jl"), + # Expected errors are exactly the same as with TreeMesh! + l2=8.311947673061856e-6, + linf=6.627000273229378e-5,) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + @test real(ode.p.solver) == Float64 + @test real(ode.p.solver.basis) == Float64 + @test real(ode.p.solver.mortar) == Float64 + # TODO: remake ignores the mesh itself as well + @test real(ode.p.mesh) == Float64 + + @test ode.u0 isa Array + @test ode.p.solver.basis.derivative_matrix isa Array + + @test Trixi.storage_type(ode.p.cache.elements) === Array + @test Trixi.storage_type(ode.p.cache.interfaces) === Array + @test Trixi.storage_type(ode.p.cache.boundaries) === Array + @test Trixi.storage_type(ode.p.cache.mortars) === Array +end + +@trixi_testset "elixir_advection_basic_gpu.jl Float32 / CUDA" begin + # Using CUDA inside the testset since otherwise the bindings are hiddend by the anonymous modules + using CUDA + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_basic_gpu.jl"), + # Expected errors are exactly the same as with TreeMesh! + l2=nothing, # TODO: GPU. [Float32(8.311947673061856e-6)], + linf=nothing, # TODO: GPU. [Float32(6.627000273229378e-5)], + RealT=Float32, + real_type=Float32, + storage_type=CuArray, + sol=nothing,) # TODO: GPU. Remove this once we can run the simulation on the GPU + # # Ensure that we do not have excessive memory allocations + # # (e.g., from type instabilities) + # let + # t = sol.t[end] + # u_ode = sol.u[end] + # du_ode = similar(u_ode) + # @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + # end + @test real(ode.p.solver) == Float32 + @test real(ode.p.solver.basis) == Float32 + @test real(ode.p.solver.mortar) == Float32 + # TODO: remake ignores the mesh itself as well + @test real(ode.p.mesh) == Float64 + + @test ode.u0 isa CuArray + @test ode.p.solver.basis.derivative_matrix isa CuArray + + @test Trixi.storage_type(ode.p.cache.elements) === CuArray + @test Trixi.storage_type(ode.p.cache.interfaces) === CuArray + @test Trixi.storage_type(ode.p.cache.boundaries) === CuArray + @test Trixi.storage_type(ode.p.cache.mortars) === CuArray +end + +# Clean up afterwards: delete Trixi.jl output directory +@test_nowarn isdir(outdir) && rm(outdir, recursive = true) + +end # module diff --git a/test/test_dgmulti_1d.jl b/test/test_dgmulti_1d.jl index 2f34d623aa9..630462d5f73 100644 --- a/test/test_dgmulti_1d.jl +++ b/test/test_dgmulti_1d.jl @@ -94,6 +94,7 @@ end end @trixi_testset "elixir_euler_flux_diff.jl (convergence)" begin + using Trixi: convergence_test mean_convergence = convergence_test(@__MODULE__, joinpath(EXAMPLES_DIR, "elixir_euler_flux_diff.jl"), 3) @@ -135,6 +136,7 @@ end end @trixi_testset "elixir_euler_flux_diff.jl (FD SBP)" begin + using Trixi: SummationByPartsOperators, derivative_operator global D = derivative_operator(SummationByPartsOperators.MattssonNordström2004(), derivative_order = 1, accuracy_order = 4, @@ -166,8 +168,7 @@ end end @trixi_testset "elixir_euler_modified_sod.jl" begin - @test_trixi_include(joinpath(examples_dir(), "dgmulti_1d", - "elixir_euler_modified_sod.jl"), + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_modified_sod.jl"), cells_per_dimension=(16,), l2=[0.26352391505659767, 0.4528974787813885, 0.9310255091126164], linf=[ @@ -208,6 +209,7 @@ end end @trixi_testset "DGMulti with periodic SBP unit test" begin + using Trixi: periodic_derivative_operator, DGMulti, Line, DGMultiMesh # see https://github.com/trixi-framework/Trixi.jl/pull/1013 global D = periodic_derivative_operator(derivative_order = 1, accuracy_order = 4, diff --git a/test/test_dgmulti_2d.jl b/test/test_dgmulti_2d.jl index b3883bd0fdb..c4fedd2f63a 100644 --- a/test/test_dgmulti_2d.jl +++ b/test/test_dgmulti_2d.jl @@ -316,6 +316,7 @@ end end @trixi_testset "elixir_euler_weakform.jl (convergence)" begin + using Trixi: convergence_test mean_convergence = convergence_test(@__MODULE__, joinpath(EXAMPLES_DIR, "elixir_euler_weakform.jl"), 2) @@ -534,6 +535,7 @@ end end @trixi_testset "elixir_euler_weakform.jl (FD SBP)" begin + using Trixi: SummationByPartsOperators, derivative_operator global D = derivative_operator(SummationByPartsOperators.MattssonNordström2004(), derivative_order = 1, accuracy_order = 4, @@ -568,6 +570,7 @@ end end @trixi_testset "elixir_euler_weakform.jl (FD SBP, EC)" begin + using Trixi: SummationByPartsOperators, derivative_operator global D = derivative_operator(SummationByPartsOperators.MattssonNordström2004(), derivative_order = 1, accuracy_order = 4, @@ -650,6 +653,7 @@ end end @trixi_testset "elixir_euler_fdsbp_periodic.jl (arbitrary reference and physical domains)" begin + using Trixi: periodic_derivative_operator global D = periodic_derivative_operator(derivative_order = 1, accuracy_order = 4, xmin = -200.0, @@ -682,6 +686,7 @@ end end @trixi_testset "elixir_euler_fdsbp_periodic.jl (CGSEM)" begin + using Trixi: SummationByPartsOperators D_local = SummationByPartsOperators.legendre_derivative_operator(xmin = 0.0, xmax = 1.0, N = 4) diff --git a/test/test_dgmulti_3d.jl b/test/test_dgmulti_3d.jl index 04e27bdedcf..71acc738df4 100644 --- a/test/test_dgmulti_3d.jl +++ b/test/test_dgmulti_3d.jl @@ -5,7 +5,7 @@ using Trixi include("test_trixi.jl") -EXAMPLES_DIR = pkgdir(Trixi, "examples", "dgmulti_3d") +EXAMPLES_DIR = joinpath(examples_dir(), "dgmulti_3d") # Start with a clean environment: remove Trixi.jl output directory if it exists outdir = "out" @@ -282,6 +282,7 @@ end end @trixi_testset "elixir_euler_weakform_periodic.jl (FD SBP)" begin + using Trixi: SummationByPartsOperators, derivative_operator global D = derivative_operator(SummationByPartsOperators.MattssonNordström2004(), derivative_order = 1, accuracy_order = 2, @@ -316,6 +317,7 @@ end end @trixi_testset "elixir_euler_weakform_periodic.jl (FD SBP, EC)" begin + using Trixi: SummationByPartsOperators, derivative_operator global D = derivative_operator(SummationByPartsOperators.MattssonNordström2004(), derivative_order = 1, accuracy_order = 2, diff --git a/test/test_mpi.jl b/test/test_mpi.jl index cd3ea7eb6e1..887eb9e01f5 100644 --- a/test/test_mpi.jl +++ b/test/test_mpi.jl @@ -30,6 +30,7 @@ CI_ON_WINDOWS = (get(ENV, "GITHUB_ACTIONS", false) == "true") && Sys.iswindows() end # MPI @trixi_testset "MPI supporting functionality" begin + using Trixi: Trixi, ode_norm, SVector t = 0.5 let u = 1.0 @test ode_norm(u, t) ≈ Trixi.DiffEqBase.ODE_DEFAULT_NORM(u, t) diff --git a/test/test_mpi_p4est_2d.jl b/test/test_mpi_p4est_2d.jl index 7da834b224e..728659751f3 100644 --- a/test/test_mpi_p4est_2d.jl +++ b/test/test_mpi_p4est_2d.jl @@ -5,7 +5,7 @@ using Trixi include("test_trixi.jl") -const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_2d_dgsem") +EXAMPLES_DIR = joinpath(examples_dir(), "p4est_2d_dgsem") @testset "P4estMesh MPI 2D" begin #! format: noindent @@ -20,8 +20,8 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_2d_dgsem") linf=[6.627000273229378e-5]) @testset "error-based step size control" begin - Trixi.mpi_isroot() && println("-"^100) - Trixi.mpi_isroot() && + mpi_isroot() && println("-"^100) + mpi_isroot() && println("elixir_advection_basic.jl with error-based step size control") # Use callbacks without stepsize_callback to test error-based step size control @@ -30,7 +30,7 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_2d_dgsem") ode_default_options()..., callback = callbacks) summary_callback() errors = analysis_callback(sol) - if Trixi.mpi_isroot() + if mpi_isroot() @test errors.l2≈[3.3022040342579066e-5] rtol=1.0e-4 @test errors.linf≈[0.00011787417954578494] rtol=1.0e-4 end @@ -83,7 +83,7 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_2d_dgsem") "elixir_advection_amr_solution_independent.jl"), # Expected errors are exactly the same as with TreeMesh! l2=[4.949660644033807e-5], - linf=[0.0004867846262313763],) + linf=[0.0004867846262313763]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) @@ -99,7 +99,7 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_2d_dgsem") @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_amr_unstructured_flag.jl"), l2=[0.0012808538770535593], - linf=[0.01752690016659812],) + linf=[0.01752690016659812]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) @@ -114,7 +114,7 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_2d_dgsem") @trixi_testset "elixir_advection_restart.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_restart.jl"), l2=[4.507575525876275e-6], - linf=[6.21489667023134e-5],) + linf=[6.21489667023134e-5]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) @@ -180,7 +180,8 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_2d_dgsem") # Test MPI-parallel handling of .inp meshes NOT generated by HOHQMesh @trixi_testset "elixir_euler_SD7003airfoil.jl" begin - @test_trixi_include(joinpath(examples_dir(), "p4est_2d_dgsem", + using Trixi: SemidiscretizationHyperbolic, AnalysisCallback + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_navierstokes_SD7003airfoil.jl"), semi=SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver; diff --git a/test/test_mpi_p4est_3d.jl b/test/test_mpi_p4est_3d.jl index 41ec5056656..efa52b05447 100644 --- a/test/test_mpi_p4est_3d.jl +++ b/test/test_mpi_p4est_3d.jl @@ -5,7 +5,7 @@ using Trixi include("test_trixi.jl") -const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_3d_dgsem") +EXAMPLES_DIR = joinpath(examples_dir(), "p4est_3d_dgsem") @testset "P4estMesh MPI 3D" begin #! format: noindent @@ -20,8 +20,8 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_3d_dgsem") linf=[0.0014537194925779984]) @testset "error-based step size control" begin - Trixi.mpi_isroot() && println("-"^100) - Trixi.mpi_isroot() && + mpi_isroot() && println("-"^100) + mpi_isroot() && println("elixir_advection_basic.jl with error-based step size control") # Use callbacks without stepsize_callback to test error-based step size control @@ -31,7 +31,7 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_3d_dgsem") ode_default_options()..., callback = callbacks) summary_callback() errors = analysis_callback(sol) - if Trixi.mpi_isroot() + if mpi_isroot() @test errors.l2≈[0.00016800412839949264] rtol=1.0e-4 @test errors.linf≈[0.0014548839020096516] rtol=1.0e-4 end @@ -51,7 +51,7 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_3d_dgsem") @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_amr.jl"), # Expected errors are exactly the same as with TreeMesh! l2=[9.773852895157622e-6], - linf=[0.0005853874124926162],) + linf=[0.0005853874124926162]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) @@ -83,7 +83,7 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_3d_dgsem") @trixi_testset "elixir_advection_restart.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_restart.jl"), l2=[0.002590388934758452], - linf=[0.01840757696885409],) + linf=[0.01840757696885409]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) diff --git a/test/test_mpi_t8code_2d.jl b/test/test_mpi_t8code_2d.jl index 974225f9698..d25b7cfef14 100644 --- a/test/test_mpi_t8code_2d.jl +++ b/test/test_mpi_t8code_2d.jl @@ -5,7 +5,7 @@ using Trixi include("test_trixi.jl") -const EXAMPLES_DIR = pkgdir(Trixi, "examples", "t8code_2d_dgsem") +EXAMPLES_DIR = joinpath(examples_dir(), "t8code_2d_dgsem") @testset "T8codeMesh MPI 2D" begin #! format: noindent @@ -20,8 +20,8 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "t8code_2d_dgsem") linf=[6.627000273229378e-5]) @testset "error-based step size control" begin - Trixi.mpi_isroot() && println("-"^100) - Trixi.mpi_isroot() && + mpi_isroot() && println("-"^100) + mpi_isroot() && println("elixir_advection_basic.jl with error-based step size control") # Use callbacks without stepsize_callback to test error-based step size control @@ -30,7 +30,7 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "t8code_2d_dgsem") ode_default_options()..., callback = callbacks) summary_callback() errors = analysis_callback(sol) - if Trixi.mpi_isroot() + if mpi_isroot() @test errors.l2≈[3.3022040342579066e-5] rtol=1.0e-4 @test errors.linf≈[0.00011787417954578494] rtol=1.0e-4 end @@ -83,7 +83,7 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "t8code_2d_dgsem") "elixir_advection_amr_solution_independent.jl"), # Expected errors are exactly the same as with TreeMesh! l2=[4.949660644033807e-5], - linf=[0.0004867846262313763],) + linf=[0.0004867846262313763]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) @@ -115,7 +115,7 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "t8code_2d_dgsem") @trixi_testset "elixir_advection_restart.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_restart.jl"), l2=[4.507575525876275e-6], - linf=[6.21489667023134e-5],) + linf=[6.21489667023134e-5]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) diff --git a/test/test_mpi_t8code_3d.jl b/test/test_mpi_t8code_3d.jl index 3dbab4da7f2..561016add44 100644 --- a/test/test_mpi_t8code_3d.jl +++ b/test/test_mpi_t8code_3d.jl @@ -5,7 +5,7 @@ using Trixi include("test_trixi.jl") -const EXAMPLES_DIR = pkgdir(Trixi, "examples", "t8code_3d_dgsem") +EXAMPLES_DIR = joinpath(examples_dir(), "t8code_3d_dgsem") @testset "T8codeMesh MPI 3D" begin #! format: noindent @@ -20,8 +20,8 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "t8code_3d_dgsem") linf=[0.0014537194925779984]) @testset "error-based step size control" begin - Trixi.mpi_isroot() && println("-"^100) - Trixi.mpi_isroot() && + mpi_isroot() && println("-"^100) + mpi_isroot() && println("elixir_advection_basic.jl with error-based step size control") # Use callbacks without stepsize_callback to test error-based step size control @@ -31,7 +31,7 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "t8code_3d_dgsem") ode_default_options()..., callback = callbacks) summary_callback() errors = analysis_callback(sol) - if Trixi.mpi_isroot() + if mpi_isroot() @test errors.l2≈[0.00016800412839949264] rtol=1.0e-4 @test errors.linf≈[0.0014548839020096516] rtol=1.0e-4 end @@ -51,7 +51,7 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "t8code_3d_dgsem") @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_amr.jl"), # Expected errors are exactly the same as with TreeMesh! l2=[1.1302812803902801e-5], - linf=[0.0007889950196294793],) + linf=[0.0007889950196294793]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) @@ -83,7 +83,7 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "t8code_3d_dgsem") @trixi_testset "elixir_advection_restart.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_restart.jl"), l2=[0.002590388934758452], - linf=[0.01840757696885409],) + linf=[0.01840757696885409]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) diff --git a/test/test_mpi_tree.jl b/test/test_mpi_tree.jl index 8ac4bdd5055..7152438e7f2 100644 --- a/test/test_mpi_tree.jl +++ b/test/test_mpi_tree.jl @@ -5,7 +5,7 @@ using Trixi include("test_trixi.jl") -const EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_2d_dgsem") +EXAMPLES_DIR = joinpath(examples_dir(), "tree_2d_dgsem") # Needed to skip certain tests on Windows CI CI_ON_WINDOWS = (get(ENV, "GITHUB_ACTIONS", false) == "true") && Sys.iswindows() @@ -25,8 +25,8 @@ CI_ON_WINDOWS = (get(ENV, "GITHUB_ACTIONS", false) == "true") && Sys.iswindows() @trixi_testset "elixir_advection_restart.jl" begin using OrdinaryDiffEqLowStorageRK: RDPK3SpFSAL49 - Trixi.mpi_isroot() && println("═"^100) - Trixi.mpi_isroot() && + mpi_isroot() && println("═"^100) + mpi_isroot() && println(joinpath(EXAMPLES_DIR, "elixir_advection_timeintegration_adaptive.jl")) trixi_include(@__MODULE__, @@ -35,8 +35,8 @@ CI_ON_WINDOWS = (get(ENV, "GITHUB_ACTIONS", false) == "true") && Sys.iswindows() alg = RDPK3SpFSAL49(), tspan = (0.0, 10.0)) l2_expected, linf_expected = analysis_callback(sol) - Trixi.mpi_isroot() && println("═"^100) - Trixi.mpi_isroot() && + mpi_isroot() && println("═"^100) + mpi_isroot() && println(joinpath(EXAMPLES_DIR, "elixir_advection_restart.jl")) # Errors are exactly the same as in the elixir_advection_extended.jl trixi_include(@__MODULE__, @@ -45,8 +45,8 @@ CI_ON_WINDOWS = (get(ENV, "GITHUB_ACTIONS", false) == "true") && Sys.iswindows() base_elixir = "elixir_advection_timeintegration_adaptive.jl") l2_actual, linf_actual = analysis_callback(sol) - Trixi.mpi_isroot() && @test l2_actual == l2_expected - Trixi.mpi_isroot() && @test linf_actual == linf_expected + mpi_isroot() && @test l2_actual == l2_expected + mpi_isroot() && @test linf_actual == linf_expected end @trixi_testset "elixir_advection_mortar.jl" begin @@ -60,7 +60,7 @@ CI_ON_WINDOWS = (get(ENV, "GITHUB_ACTIONS", false) == "true") && Sys.iswindows() @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_amr.jl"), # Expected errors are exactly the same as in the serial test! l2=[4.913300828257469e-5], - linf=[0.00045263895394385967],) + linf=[0.00045263895394385967]) end @trixi_testset "elixir_advection_amr_nonperiodic.jl" begin @@ -68,7 +68,7 @@ CI_ON_WINDOWS = (get(ENV, "GITHUB_ACTIONS", false) == "true") && Sys.iswindows() "elixir_advection_amr_nonperiodic.jl"), # Expected errors are exactly the same as in the serial test! l2=[3.2207388565869075e-5], - linf=[0.0007508059772436404],) + linf=[0.0007508059772436404]) end @trixi_testset "elixir_advection_restart_amr.jl" begin @@ -81,15 +81,16 @@ CI_ON_WINDOWS = (get(ENV, "GITHUB_ACTIONS", false) == "true") && Sys.iswindows() # Linear scalar advection with AMR # These example files are only for testing purposes and have no practical use @trixi_testset "elixir_advection_amr_refine_twice.jl" begin + using Trixi: Trixi # Here, we also test that SaveSolutionCallback prints multiple mesh files with AMR # Start with a clean environment: remove Trixi.jl output directory if it exists outdir = "out" - Trixi.mpi_isroot() && isdir(outdir) && rm(outdir, recursive = true) + mpi_isroot() && isdir(outdir) && rm(outdir, recursive = true) Trixi.MPI.Barrier(Trixi.mpi_comm()) @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_amr_refine_twice.jl"), l2=[0.00020547512522578292], - linf=[0.007831753383083506],) + linf=[0.007831753383083506]) meshfiles = filter(file -> endswith(file, ".h5") && startswith(file, "mesh"), readdir(outdir)) @test length(meshfiles) > 1 @@ -99,7 +100,7 @@ CI_ON_WINDOWS = (get(ENV, "GITHUB_ACTIONS", false) == "true") && Sys.iswindows() @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_amr_coarsen_twice.jl"), l2=[0.0014321062757891826], - linf=[0.0253454486893413],) + linf=[0.0253454486893413]) end # Hyperbolic diffusion @@ -204,7 +205,7 @@ CI_ON_WINDOWS = (get(ENV, "GITHUB_ACTIONS", false) == "true") && Sys.iswindows() 0.0002973166773747593, 0.0002973166773760916, 0.001154106793870291 - ],) + ]) end end @@ -245,8 +246,8 @@ CI_ON_WINDOWS = (get(ENV, "GITHUB_ACTIONS", false) == "true") && Sys.iswindows() ]) @testset "error-based step size control" begin - Trixi.mpi_isroot() && println("-"^100) - Trixi.mpi_isroot() && + mpi_isroot() && println("-"^100) + mpi_isroot() && println("elixir_euler_ec.jl with error-based step size control") # Use callbacks without stepsize_callback to test error-based step size control @@ -257,7 +258,7 @@ CI_ON_WINDOWS = (get(ENV, "GITHUB_ACTIONS", false) == "true") && Sys.iswindows() ode_default_options()..., callback = callbacks) summary_callback() errors = analysis_callback(sol) - if Trixi.mpi_isroot() + if mpi_isroot() @test errors.l2≈[ 0.061653630426688116, 0.05006930431098764, @@ -323,7 +324,7 @@ CI_ON_WINDOWS = (get(ENV, "GITHUB_ACTIONS", false) == "true") && Sys.iswindows() 0.03857193149447702, 0.031090457959835893, 0.12125130332971423 - ],) + ]) end if !CI_ON_WINDOWS # see comment on `CI_ON_WINDOWS` in `test/test_mpi.jl` diff --git a/test/test_p4est_2d.jl b/test/test_p4est_2d.jl index 8f903a849d2..b0852789b0a 100644 --- a/test/test_p4est_2d.jl +++ b/test/test_p4est_2d.jl @@ -27,6 +27,34 @@ isdir(outdir) && rm(outdir, recursive = true) du_ode = similar(u_ode) @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 end + semi32 = Trixi.trixi_adapt(Array, Float32, semi) + @test real(semi32.solver) == Float32 + @test real(semi32.solver.basis) == Float32 + @test real(semi32.solver.mortar) == Float32 + # TODO: remake ignores the mesh itself as well + @test real(semi32.mesh) == Float64 +end + +@trixi_testset "elixir_advection_basic.jl (Float32)" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_basic_gpu.jl"), + # Expected errors are exactly the same as with TreeMesh! + l2=[Float32(8.311947673061856e-6)], + linf=[Float32(6.627000273229378e-5)], + RealT=Float32, + real_type=Float32) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test_broken (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + @test real(ode.p.solver) == Float32 + @test real(ode.p.solver.basis) == Float32 + @test real(ode.p.solver.mortar) == Float32 + # TODO: remake ignores the mesh itself as well + @test real(ode.p.mesh) == Float64 end @trixi_testset "elixir_advection_nonconforming_flag.jl" begin @@ -63,7 +91,7 @@ end "elixir_advection_amr_solution_independent.jl"), # Expected errors are exactly the same as with StructuredMesh! l2=[4.949660644033807e-5], - linf=[0.0004867846262313763],) + linf=[0.0004867846262313763]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let @@ -78,7 +106,7 @@ end @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_amr_unstructured_flag.jl"), l2=[0.0012808538770535593], - linf=[0.01752690016659812],) + linf=[0.01752690016659812]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let @@ -92,7 +120,7 @@ end @trixi_testset "elixir_advection_restart.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_restart.jl"), l2=[4.507575525876275e-6], - linf=[6.21489667023134e-5],) + linf=[6.21489667023134e-5]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let @@ -106,7 +134,7 @@ end @trixi_testset "elixir_advection_restart_amr.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_restart_amr.jl"), l2=[2.869137983727866e-6], - linf=[3.8353423270964804e-5],) + linf=[3.8353423270964804e-5]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let @@ -335,6 +363,7 @@ end end @trixi_testset "elixir_euler_sedov_blast_wave_sc_subcell.jl" begin + using Trixi: Trixi, DGSEM, SemidiscretizationHyperbolic, semidiscretize, CallbackSet @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_sedov_blast_wave_sc_subcell.jl"), l2=[ @@ -362,6 +391,31 @@ end # Corresponding issue: https://github.com/trixi-framework/Trixi.jl/issues/1877 @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 15000 end + + # Test `resize!` + ode_alg = Trixi.SimpleSSPRK33(stage_callbacks = stage_callbacks) + integrator = Trixi.init(ode, ode_alg, dt = 42.0, callback = callbacks) + + resize!(integrator, 42) + @test length(integrator.u) == 42 + @test length(integrator.du) == 42 + @test length(integrator.u_tmp) == 42 + + # Test `resize!` for non `VolumeIntegralSubcellLimiting` + let + solver = DGSEM(basis, surface_flux) + semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) + + ode = semidiscretize(semi, tspan) + ode_alg = Trixi.SimpleSSPRK33(stage_callbacks = (;)) + callbacks = CallbackSet(summary_callback) + integrator = Trixi.init(ode, ode_alg, dt = 11.0, callback = callbacks) + + resize!(integrator, 4711) + @test length(integrator.u) == 4711 + @test length(integrator.du) == 4711 + @test length(integrator.u_tmp) == 4711 + end end @trixi_testset "elixir_euler_sedov.jl with HLLC Flux" begin @@ -841,6 +895,7 @@ end end @trixi_testset "elixir_euler_blast_wave_pure_fv.jl" begin + using Trixi: Trixi @test_trixi_include(joinpath(pkgdir(Trixi, "examples", "tree_2d_dgsem"), "elixir_euler_blast_wave_pure_fv.jl"), l2=[ @@ -903,8 +958,8 @@ end end @trixi_testset "elixir_euler_SD7003airfoil.jl" begin - @test_trixi_include(joinpath(examples_dir(), "p4est_2d_dgsem", - "elixir_navierstokes_SD7003airfoil.jl"), + using Trixi: SemidiscretizationHyperbolic, AnalysisCallback + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_navierstokes_SD7003airfoil.jl"), semi=SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver; boundary_conditions = boundary_conditions_hyp), @@ -936,8 +991,7 @@ end end @trixi_testset "elixir_euler_density_wave_tracers.jl" begin - @test_trixi_include(joinpath(examples_dir(), "p4est_2d_dgsem", - "elixir_euler_density_wave_tracers.jl"), + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_density_wave_tracers.jl"), l2=[ 0.0012704690524147188, 0.00012704690527390463, @@ -963,6 +1017,32 @@ end @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 end end + +@trixi_testset "elixir_euler_cylinder_bowshock_mach3.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_euler_cylinder_bowshock_mach3.jl"), + tspan=(0.0, 1e-3), + l2=[ + 0.03787745781612722, + 0.03339276348608649, + 0.05301001151898993, + 0.2868802674001281 + ], + linf=[ + 2.5347156069842978, + 2.6657123832452414, + 3.786891603220761, + 21.305497055838977 + ]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end end # Clean up afterwards: delete Trixi.jl output directory diff --git a/test/test_p4est_3d.jl b/test/test_p4est_3d.jl index 729045d83f8..fbca9fc9091 100644 --- a/test/test_p4est_3d.jl +++ b/test/test_p4est_3d.jl @@ -62,7 +62,7 @@ end @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_amr.jl"), # Expected errors are exactly the same as with TreeMesh! l2=[9.773852895157622e-6], - linf=[0.0005853874124926162],) + linf=[0.0005853874124926162]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let @@ -106,7 +106,7 @@ end @trixi_testset "elixir_advection_restart.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_restart.jl"), l2=[0.002590388934758452], - linf=[0.01840757696885409],) + linf=[0.01840757696885409]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let @@ -551,6 +551,43 @@ end end end +@trixi_testset "elixir_mhd_alfven_wave_er.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_mhd_alfven_wave_er.jl"), + l2=[ + 0.0052864046546744065, + 0.009963357787771665, + 0.006635699953141596, + 0.01295540589311982, + 0.013939326496053958, + 0.010192741315114568, + 0.004631666336074305, + 0.012267586777052244, + 0.0018063823439272181 + ], + linf=[ + 0.021741826900806394, + 0.0470226920658848, + 0.025036937229995254, + 0.05043002191230382, + 0.06018360063552164, + 0.04338351710391075, + 0.023607975939848536, + 0.050740527490335, + 0.006909064342577296 + ]) + # Larger values for allowed allocations due to usage of custom + # integrator which are not *recorded* for the methods from + # OrdinaryDiffEq.jl + # Corresponding issue: https://github.com/trixi-framework/Trixi.jl/issues/1877 + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 15_000 + end +end + @trixi_testset "elixir_mhd_alfven_wave_nonconforming.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_mhd_alfven_wave_nonconforming.jl"), @@ -718,18 +755,18 @@ end @trixi_testset "elixir_euler_weak_blast_wave_amr.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_weak_blast_wave_amr.jl"), l2=[ - 0.012046270976464931, - 0.01894521652831441, - 0.01951983946363743, - 0.019748755875702628, - 0.15017285006198244 + 0.01374649869395016, + 0.01993458602992416, + 0.020403214655756098, + 0.020408263790073853, + 0.14975849029503904 ], linf=[ - 0.3156585581400839, - 0.6653806948576124, - 0.5451454769741236, - 0.558669830478818, - 3.6406796982784635 + 0.4411601724293266, + 0.668308654218055, + 0.7351134068050753, + 0.5955002383710662, + 3.1811162616598985 ], tspan=(0.0, 0.025),) # Ensure that we do not have excessive memory allocations @@ -751,9 +788,9 @@ end @test isapprox(state_integrals[5], initial_state_integrals[5], atol = 1e-13) end -@trixi_testset "elixir_euler_OMNERA_M6_wing.jl" begin +@trixi_testset "elixir_euler_ONERA_M6_wing.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, - "elixir_euler_OMNERA_M6_wing.jl"), + "elixir_euler_ONERA_M6_wing.jl"), l2=[ 1.3302852203314697e-7, 7.016342225152883e-8, diff --git a/test/test_p4est_3d_mhdmultiion.jl b/test/test_p4est_3d_mhdmultiion.jl index a2ab1f34f7e..633899a4a20 100644 --- a/test/test_p4est_3d_mhdmultiion.jl +++ b/test/test_p4est_3d_mhdmultiion.jl @@ -55,6 +55,13 @@ EXAMPLES_DIR = joinpath(examples_dir(), "p4est_3d_dgsem") end end +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. @trixi_testset "Provably entropy-stable LLF-type fluxes for multi-ion GLM-MHD" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_mhdmultiion_ec.jl"), l2=[ @@ -90,7 +97,7 @@ end 0.0014540668676888365 ], surface_flux=(FluxPlusDissipation(flux_ruedaramirez_etal, - DissipationLaxFriedrichsEntropyVariables()), + DissipationLaxFriedrichsEntropyVariables(max_abs_speed_naive)), flux_nonconservative_ruedaramirez_etal), tspan=(0.0, 0.05)) # Ensure that we do not have excessive memory allocations @@ -102,5 +109,49 @@ end @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 end end + +@trixi_testset "elixir_mhdmultiion_convergence.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_mhdmultiion_convergence.jl"), + l2=[ + 2.7007694451840977e-5, + 2.252632596997783e-5, + 1.830892822103072e-5, + 1.7457386132678724e-5, + 3.965825276181703e-5, + 6.886878771068099e-5, + 3.216774733720572e-5, + 0.00013796601797391608, + 2.762642533644496e-5, + 7.877500410069398e-5, + 0.00012184040930856932, + 8.918795955887214e-5, + 0.0002122739932637704, + 1.0532691581216071e-6 + ], + linf=[ + 0.0005846835977684206, + 0.00031591380039502903, + 0.0002529555339790268, + 0.0003873459403432866, + 0.0007355557980894822, + 0.0012929706727252688, + 0.0002558003707378437, + 0.0028085112041740246, + 0.0006114366794293113, + 0.001257825301983151, + 0.0018924211424776738, + 0.0007347447431757664, + 0.004148291057411768, + 1.8948511576480304e-5 + ], tspan=(0.0, 0.05)) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end end end # module diff --git a/test/test_paper_self_gravitating_gas_dynamics.jl b/test/test_paper_self_gravitating_gas_dynamics.jl index b7437888ce6..be84defb5e8 100644 --- a/test/test_paper_self_gravitating_gas_dynamics.jl +++ b/test/test_paper_self_gravitating_gas_dynamics.jl @@ -9,7 +9,7 @@ include("test_trixi.jl") outdir = "out" isdir(outdir) && rm(outdir, recursive = true) -const EXAMPLES_DIR = pkgdir(Trixi, "examples", "paper_self_gravitating_gas_dynamics") +EXAMPLES_DIR = joinpath(examples_dir(), "paper_self_gravitating_gas_dynamics") # Numerical examples from the Euler-gravity paper @testset "paper_self_gravitating_gas_dynamics" begin diff --git a/test/test_parabolic_1d.jl b/test/test_parabolic_1d.jl index 8969b06eb13..479a42ca9df 100644 --- a/test/test_parabolic_1d.jl +++ b/test/test_parabolic_1d.jl @@ -5,6 +5,8 @@ using Trixi include("test_trixi.jl") +EXAMPLES_DIR = joinpath(examples_dir(), "tree_1d_dgsem") + # Start with a clean environment: remove Trixi output directory if it exists outdir = "out" isdir(outdir) && rm(outdir, recursive = true) @@ -13,8 +15,7 @@ isdir(outdir) && rm(outdir, recursive = true) #! format: noindent @trixi_testset "TreeMesh1D: elixir_advection_diffusion.jl" begin - @test_trixi_include(joinpath(examples_dir(), "tree_1d_dgsem", - "elixir_advection_diffusion.jl"), + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_diffusion.jl"), initial_refinement_level=4, tspan=(0.0, 0.4), polydeg=3, l2=[8.40483031802723e-6], linf=[2.8990878868540015e-5]) @@ -29,8 +30,7 @@ isdir(outdir) && rm(outdir, recursive = true) end @trixi_testset "TreeMesh1D: elixir_advection_diffusion_ldg.jl" begin - @test_trixi_include(joinpath(examples_dir(), "tree_1d_dgsem", - "elixir_advection_diffusion_ldg.jl"), + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_diffusion_ldg.jl"), initial_refinement_level=4, tspan=(0.0, 0.4), polydeg=3, l2=[9.234438322146518e-6], linf=[5.425491770139068e-5]) # Ensure that we do not have excessive memory allocations @@ -44,8 +44,7 @@ end end @trixi_testset "TreeMesh1D: elixir_diffusion_ldg.jl" begin - @test_trixi_include(joinpath(examples_dir(), "tree_1d_dgsem", - "elixir_diffusion_ldg.jl"), + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_diffusion_ldg.jl"), initial_refinement_level=4, tspan=(0.0, 0.4), polydeg=3, l2=[9.235894939144276e-6], linf=[5.402550135213957e-5]) # Ensure that we do not have excessive memory allocations @@ -58,9 +57,21 @@ end end end +@trixi_testset "TreeMesh1D: elixir_diffusion_ldg_newton_krylov.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_diffusion_ldg_newton_krylov.jl"), + l2=[4.2710445174631516e-6], linf=[2.28491835256861e-5]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end + @trixi_testset "TreeMesh1D: elixir_advection_diffusion_restart.jl" begin - @test_trixi_include(joinpath(examples_dir(), "tree_1d_dgsem", - "elixir_advection_diffusion_restart.jl"), + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_diffusion_restart.jl"), l2=[1.0679933947301556e-5], linf=[3.910500545667439e-5]) # Ensure that we do not have excessive memory allocations @@ -74,8 +85,7 @@ end end @trixi_testset "TreeMesh1D: elixir_advection_diffusion.jl (AMR)" begin - @test_trixi_include(joinpath(examples_dir(), "tree_1d_dgsem", - "elixir_advection_diffusion.jl"), + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_diffusion.jl"), tspan=(0.0, 0.0), initial_refinement_level=5) tspan = (0.0, 1.0) ode = semidiscretize(semi, tspan) @@ -107,7 +117,7 @@ end end @trixi_testset "TreeMesh1D: elixir_navierstokes_convergence_periodic.jl" begin - @test_trixi_include(joinpath(examples_dir(), "tree_1d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_navierstokes_convergence_periodic.jl"), l2=[ 0.0001133835907077494, @@ -130,7 +140,7 @@ end end @trixi_testset "TreeMesh1D: elixir_navierstokes_convergence_periodic.jl: GradientVariablesEntropy" begin - @test_trixi_include(joinpath(examples_dir(), "tree_1d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_navierstokes_convergence_periodic.jl"), equations_parabolic=CompressibleNavierStokesDiffusion1D(equations, mu = mu(), @@ -157,7 +167,7 @@ end end @trixi_testset "TreeMesh1D: elixir_navierstokes_convergence_walls.jl" begin - @test_trixi_include(joinpath(examples_dir(), "tree_1d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_navierstokes_convergence_walls.jl"), l2=[ 0.0004702331100298379, @@ -181,7 +191,7 @@ end end @trixi_testset "TreeMesh1D: elixir_navierstokes_convergence_walls.jl: GradientVariablesEntropy" begin - @test_trixi_include(joinpath(examples_dir(), "tree_1d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_navierstokes_convergence_walls.jl"), equations_parabolic=CompressibleNavierStokesDiffusion1D(equations, mu = mu(), @@ -209,7 +219,7 @@ end end @trixi_testset "TreeMesh1D: elixir_navierstokes_convergence_walls_amr.jl" begin - @test_trixi_include(joinpath(examples_dir(), "tree_1d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_navierstokes_convergence_walls_amr.jl"), equations_parabolic=CompressibleNavierStokesDiffusion1D(equations, mu = mu(), @@ -236,7 +246,7 @@ end end @trixi_testset "TreeMesh1D: elixir_navierstokes_convergence_walls_amr.jl: GradientVariablesEntropy" begin - @test_trixi_include(joinpath(examples_dir(), "tree_1d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_navierstokes_convergence_walls_amr.jl"), equations_parabolic=CompressibleNavierStokesDiffusion1D(equations, mu = mu(), @@ -264,8 +274,7 @@ end end @trixi_testset "TreeMesh1D: elixir_navierstokes_viscous_shock.jl" begin - @test_trixi_include(joinpath(examples_dir(), "tree_1d_dgsem", - "elixir_navierstokes_viscous_shock.jl"), + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_navierstokes_viscous_shock.jl"), l2=[ 0.00025762354103445303, 0.0001433692781569829, @@ -285,6 +294,29 @@ end @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 end end + +@trixi_testset "TreeMesh1D: elixir_navierstokes_viscous_shock_imex.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_navierstokes_viscous_shock_imex.jl"), + l2=[ + 0.0016637374421260447, + 0.0014571616754917322, + 0.0014844170557610763 + ], + linf=[ + 0.0054568179823693, + 0.003950567209489719, + 0.004092222605649232 + ]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end end # Clean up afterwards: delete Trixi output directory diff --git a/test/test_parabolic_2d.jl b/test/test_parabolic_2d.jl index d25af1bda9b..5e7dbc12865 100644 --- a/test/test_parabolic_2d.jl +++ b/test/test_parabolic_2d.jl @@ -5,6 +5,8 @@ using Trixi include("test_trixi.jl") +EXAMPLES_DIR = examples_dir() + # Start with a clean environment: remove Trixi.jl output directory if it exists outdir = "out" isdir(outdir) && rm(outdir, recursive = true) @@ -13,6 +15,7 @@ isdir(outdir) && rm(outdir, recursive = true) #! format: noindent @trixi_testset "DGMulti 2D rhs_parabolic!" begin + using Trixi dg = DGMulti(polydeg = 2, element_type = Quad(), approximation_type = Polynomial(), surface_integral = SurfaceIntegralWeakForm(flux_central), volume_integral = VolumeIntegralWeakForm()) @@ -28,9 +31,9 @@ isdir(outdir) && rm(outdir, recursive = true) semi = SemidiscretizationHyperbolicParabolic(mesh, equations, equations_parabolic, initial_condition, dg) - @test_nowarn_mod show(stdout, semi) - @test_nowarn_mod show(stdout, MIME"text/plain"(), semi) - @test_nowarn_mod show(stdout, boundary_condition_do_nothing) + @trixi_test_nowarn show(stdout, semi) + @trixi_test_nowarn show(stdout, MIME"text/plain"(), semi) + @trixi_test_nowarn show(stdout, boundary_condition_do_nothing) @test nvariables(semi) == nvariables(equations) @test Base.ndims(semi) == Base.ndims(mesh) @@ -44,8 +47,8 @@ isdir(outdir) && rm(outdir, recursive = true) # test "do nothing" BC just returns first argument @test boundary_condition_do_nothing(u0, nothing) == u0 - @unpack cache, cache_parabolic, equations_parabolic = semi - @unpack gradients = cache_parabolic + (; cache, cache_parabolic, equations_parabolic) = semi + (; gradients) = cache_parabolic for dim in eachindex(gradients) fill!(gradients[dim], zero(eltype(gradients[dim]))) end @@ -58,7 +61,7 @@ isdir(outdir) && rm(outdir, recursive = true) Trixi.calc_gradient!(gradients, u0, t, mesh, equations_parabolic, boundary_condition_periodic, dg, parabolic_scheme, cache, cache_parabolic) - @unpack x, y, xq, yq = mesh.md + (; x, y, xq, yq) = mesh.md @test getindex.(gradients[1], 1) ≈ 2 * xq .* yq @test getindex.(gradients[2], 1) ≈ xq .^ 2 @@ -79,7 +82,7 @@ isdir(outdir) && rm(outdir, recursive = true) end @trixi_testset "DGMulti: elixir_advection_diffusion.jl" begin - @test_trixi_include(joinpath(examples_dir(), "dgmulti_2d", + @test_trixi_include(joinpath(EXAMPLES_DIR, "dgmulti_2d", "elixir_advection_diffusion.jl"), cells_per_dimension=(4, 4), tspan=(0.0, 0.1), l2=[0.2485803335154642], @@ -95,7 +98,7 @@ end end @trixi_testset "DGMulti: elixir_advection_diffusion_periodic.jl" begin - @test_trixi_include(joinpath(examples_dir(), "dgmulti_2d", + @test_trixi_include(joinpath(EXAMPLES_DIR, "dgmulti_2d", "elixir_advection_diffusion_periodic.jl"), cells_per_dimension=(4, 4), tspan=(0.0, 0.1), l2=[0.03180371984888462], @@ -111,7 +114,7 @@ end end @trixi_testset "DGMulti: elixir_advection_diffusion_nonperiodic.jl" begin - @test_trixi_include(joinpath(examples_dir(), "dgmulti_2d", + @test_trixi_include(joinpath(EXAMPLES_DIR, "dgmulti_2d", "elixir_advection_diffusion_nonperiodic.jl"), cells_per_dimension=(4, 4), tspan=(0.0, 0.1), l2=[0.002123168335604323], @@ -127,7 +130,7 @@ end end @trixi_testset "DGMulti: elixir_navierstokes_convergence.jl" begin - @test_trixi_include(joinpath(examples_dir(), "dgmulti_2d", + @test_trixi_include(joinpath(EXAMPLES_DIR, "dgmulti_2d", "elixir_navierstokes_convergence.jl"), cells_per_dimension=(4, 4), tspan=(0.0, 0.1), l2=[ @@ -153,7 +156,7 @@ end end @trixi_testset "DGMulti: elixir_navierstokes_convergence_curved.jl" begin - @test_trixi_include(joinpath(examples_dir(), "dgmulti_2d", + @test_trixi_include(joinpath(EXAMPLES_DIR, "dgmulti_2d", "elixir_navierstokes_convergence_curved.jl"), cells_per_dimension=(4, 4), tspan=(0.0, 0.1), l2=[ @@ -167,7 +170,7 @@ end 0.04103131887989486, 0.03990424032494211, 0.13094018584692968 - ],) + ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let @@ -179,7 +182,7 @@ end end @trixi_testset "DGMulti: elixir_navierstokes_lid_driven_cavity.jl" begin - @test_trixi_include(joinpath(examples_dir(), "dgmulti_2d", + @test_trixi_include(joinpath(EXAMPLES_DIR, "dgmulti_2d", "elixir_navierstokes_lid_driven_cavity.jl"), cells_per_dimension=(4, 4), tspan=(0.0, 0.5), l2=[ @@ -205,7 +208,7 @@ end end @trixi_testset "TreeMesh2D: elixir_advection_diffusion.jl" begin - @test_trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "tree_2d_dgsem", "elixir_advection_diffusion.jl"), initial_refinement_level=2, tspan=(0.0, 0.4), polydeg=5, l2=[4.0915532997994255e-6], @@ -221,7 +224,7 @@ end end @trixi_testset "TreeMesh2D: elixir_advection_diffusion.jl (LDG)" begin - @test_trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "tree_2d_dgsem", "elixir_advection_diffusion.jl"), solver_parabolic=ViscousFormulationLocalDG(), initial_refinement_level=2, tspan=(0.0, 0.4), polydeg=5, @@ -237,7 +240,7 @@ end end @trixi_testset "TreeMesh2D: elixir_advection_diffusion.jl (Refined mesh)" begin - @test_trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "tree_2d_dgsem", "elixir_advection_diffusion.jl"), tspan=(0.0, 0.0)) LLID = Trixi.local_leaf_cells(mesh.tree) @@ -271,7 +274,7 @@ end end @trixi_testset "TreeMesh2D: elixir_advection_diffusion_amr.jl" begin - @test_trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "tree_2d_dgsem", "elixir_advection_diffusion_amr.jl"), initial_refinement_level=2, base_level=2, @@ -290,7 +293,7 @@ end end @trixi_testset "TreeMesh2D: elixir_advection_diffusion_nonperiodic.jl" begin - @test_trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "tree_2d_dgsem", "elixir_advection_diffusion_nonperiodic.jl"), initial_refinement_level=2, tspan=(0.0, 0.1), l2=[0.007646800618485118], @@ -306,7 +309,7 @@ end end @trixi_testset "TreeMesh2D: elixir_advection_diffusion_nonperiodic.jl (LDG)" begin - @test_trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "tree_2d_dgsem", "elixir_advection_diffusion_nonperiodic.jl"), initial_refinement_level=2, tspan=(0.0, 0.1), solver_parabolic=ViscousFormulationLocalDG(), @@ -322,7 +325,7 @@ end end @trixi_testset "TreeMesh2D: elixir_navierstokes_convergence.jl" begin - @test_trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "tree_2d_dgsem", "elixir_navierstokes_convergence.jl"), initial_refinement_level=2, tspan=(0.0, 0.1), analysis_callback=AnalysisCallback(semi, @@ -353,7 +356,8 @@ end end @trixi_testset "TreeMesh2D: elixir_navierstokes_convergence.jl (isothermal walls)" begin - @test_trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", + using Trixi: Trixi + @test_trixi_include(joinpath(EXAMPLES_DIR, "tree_2d_dgsem", "elixir_navierstokes_convergence.jl"), initial_refinement_level=2, tspan=(0.0, 0.1), heat_bc_top_bottom=Isothermal((x, t, equations) -> Trixi.temperature(initial_condition_navier_stokes_convergence_test(x, @@ -383,7 +387,7 @@ end end @trixi_testset "TreeMesh2D: elixir_navierstokes_convergence.jl (Entropy gradient variables)" begin - @test_trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "tree_2d_dgsem", "elixir_navierstokes_convergence.jl"), initial_refinement_level=2, tspan=(0.0, 0.1), gradient_variables=GradientVariablesEntropy(), @@ -410,7 +414,7 @@ end end @trixi_testset "TreeMesh2D: elixir_navierstokes_convergence.jl (Entropy gradient variables, isothermal walls)" begin - @test_trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "tree_2d_dgsem", "elixir_navierstokes_convergence.jl"), initial_refinement_level=2, tspan=(0.0, 0.1), gradient_variables=GradientVariablesEntropy(), @@ -441,7 +445,7 @@ end end @trixi_testset "TreeMesh2D: elixir_navierstokes_convergence.jl (flux differencing)" begin - @test_trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "tree_2d_dgsem", "elixir_navierstokes_convergence.jl"), initial_refinement_level=2, tspan=(0.0, 0.1), volume_integral=VolumeIntegralFluxDifferencing(flux_central), @@ -468,7 +472,7 @@ end end @trixi_testset "TreeMesh2D: elixir_navierstokes_convergence.jl (Refined mesh)" begin - @test_trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "tree_2d_dgsem", "elixir_navierstokes_convergence.jl"), tspan=(0.0, 0.0), initial_refinement_level=3) LLID = Trixi.local_leaf_cells(mesh.tree) @@ -509,7 +513,7 @@ end end @trixi_testset "TreeMesh2D: elixir_navierstokes_lid_driven_cavity.jl" begin - @test_trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "tree_2d_dgsem", "elixir_navierstokes_lid_driven_cavity.jl"), initial_refinement_level=2, tspan=(0.0, 0.5), l2=[ @@ -535,7 +539,7 @@ end end @trixi_testset "TreeMesh2D: elixir_navierstokes_shearlayer_amr.jl" begin - @test_trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "tree_2d_dgsem", "elixir_navierstokes_shearlayer_amr.jl"), l2=[ 0.005155557460409018, @@ -553,7 +557,7 @@ end end @trixi_testset "TreeMesh2D: elixir_navierstokes_taylor_green_vortex_sutherland.jl" begin - @test_trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "tree_2d_dgsem", "elixir_navierstokes_taylor_green_vortex_sutherland.jl"), l2=[ 0.001452856280034929, @@ -571,7 +575,7 @@ end end @trixi_testset "P4estMesh2D: elixir_advection_diffusion_periodic.jl" begin - @test_trixi_include(joinpath(examples_dir(), "p4est_2d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "p4est_2d_dgsem", "elixir_advection_diffusion_periodic.jl"), trees_per_dimension=(1, 1), initial_refinement_level=2, tspan=(0.0, 0.5), @@ -588,7 +592,7 @@ end end @trixi_testset "P4estMesh2D: elixir_advection_diffusion_periodic.jl" begin - @test_trixi_include(joinpath(examples_dir(), "p4est_2d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "p4est_2d_dgsem", "elixir_advection_diffusion_periodic.jl"), trees_per_dimension=(1, 1), initial_refinement_level=2, tspan=(0.0, 0.5), @@ -605,7 +609,7 @@ end end @trixi_testset "P4estMesh2D: elixir_advection_diffusion_periodic_curved.jl" begin - @test_trixi_include(joinpath(examples_dir(), "p4est_2d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "p4est_2d_dgsem", "elixir_advection_diffusion_periodic_curved.jl"), trees_per_dimension=(1, 1), initial_refinement_level=2, tspan=(0.0, 0.5), @@ -622,7 +626,7 @@ end end @trixi_testset "P4estMesh2D: elixir_advection_diffusion_periodic_amr.jl" begin - @test_trixi_include(joinpath(examples_dir(), "p4est_2d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "p4est_2d_dgsem", "elixir_advection_diffusion_periodic_amr.jl"), tspan=(0.0, 0.01), l2=[0.014715887539773128], @@ -638,7 +642,7 @@ end end @trixi_testset "P4estMesh2D: elixir_advection_diffusion_nonperiodic_amr.jl" begin - @test_trixi_include(joinpath(examples_dir(), "p4est_2d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "p4est_2d_dgsem", "elixir_advection_diffusion_nonperiodic_amr.jl"), tspan=(0.0, 0.01), l2=[0.007934195641974433], @@ -654,7 +658,7 @@ end end @trixi_testset "P4estMesh2D: elixir_advection_diffusion_nonperiodic_curved.jl" begin - @test_trixi_include(joinpath(examples_dir(), "p4est_2d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "p4est_2d_dgsem", "elixir_advection_diffusion_nonperiodic_curved.jl"), trees_per_dimension=(1, 1), initial_refinement_level=2, tspan=(0.0, 0.5), @@ -671,7 +675,7 @@ end end @trixi_testset "P4estMesh2D: elixir_navierstokes_convergence.jl" begin - @test_trixi_include(joinpath(examples_dir(), "p4est_2d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "p4est_2d_dgsem", "elixir_navierstokes_convergence.jl"), initial_refinement_level=1, tspan=(0.0, 0.2), l2=[ @@ -697,7 +701,7 @@ end end @trixi_testset "P4estMesh2D: elixir_navierstokes_convergence_nonperiodic.jl" begin - @test_trixi_include(joinpath(examples_dir(), "p4est_2d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "p4est_2d_dgsem", "elixir_navierstokes_convergence_nonperiodic.jl"), initial_refinement_level=1, tspan=(0.0, 0.2), l2=[ @@ -723,7 +727,7 @@ end end @trixi_testset "P4estMesh2D: elixir_navierstokes_lid_driven_cavity.jl" begin - @test_trixi_include(joinpath(examples_dir(), "p4est_2d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "p4est_2d_dgsem", "elixir_navierstokes_lid_driven_cavity.jl"), initial_refinement_level=2, tspan=(0.0, 0.5), l2=[ @@ -749,7 +753,7 @@ end end @trixi_testset "P4estMesh2D: elixir_navierstokes_lid_driven_cavity_amr.jl" begin - @test_trixi_include(joinpath(examples_dir(), "p4est_2d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "p4est_2d_dgsem", "elixir_navierstokes_lid_driven_cavity_amr.jl"), tspan=(0.0, 1.0), l2=[ @@ -771,7 +775,7 @@ end end @trixi_testset "elixir_navierstokes_NACA0012airfoil_mach08.jl" begin - @test_trixi_include(joinpath(examples_dir(), "p4est_2d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "p4est_2d_dgsem", "elixir_navierstokes_NACA0012airfoil_mach08.jl"), l2=[0.000186486564226516, 0.0005076712323400374, @@ -817,7 +821,7 @@ end end @trixi_testset "elixir_navierstokes_NACA0012airfoil_mach085_restart.jl" begin - @test_trixi_include(joinpath(examples_dir(), "p4est_2d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "p4est_2d_dgsem", "elixir_navierstokes_NACA0012airfoil_mach085_restart.jl"), l2=[ 6.191672324705442e-6, @@ -842,7 +846,7 @@ end end @trixi_testset "P4estMesh2D: elixir_navierstokes_viscous_shock.jl" begin - @test_trixi_include(joinpath(examples_dir(), "p4est_2d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "p4est_2d_dgsem", "elixir_navierstokes_viscous_shock.jl"), l2=[ 0.0002576236264053728, @@ -866,8 +870,34 @@ end end end +@trixi_testset "P4estMesh2D: elixir_navierstokes_viscous_shock_newton_krylov.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "p4est_2d_dgsem", + "elixir_navierstokes_viscous_shock_newton_krylov.jl"), + tspan=(0.0, 0.1), + l2=[ + 3.468233560427797e-5, + 2.64864594855224e-5, + 7.879490760481979e-10, + 2.8748482665365446e-5 + ], + linf=[ + 0.00018754529350140103, + 0.00014045634087878067, + 9.043610782328732e-9, + 0.00014499382160382268 + ]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end + @trixi_testset "elixir_navierstokes_SD7003airfoil.jl" begin - @test_trixi_include(joinpath(examples_dir(), "p4est_2d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "p4est_2d_dgsem", "elixir_navierstokes_SD7003airfoil.jl"), l2=[ 9.292899618740586e-5, @@ -893,7 +923,7 @@ end end @trixi_testset "elixir_navierstokes_SD7003airfoil.jl (CFL-Interval)" begin - @test_trixi_include(joinpath(examples_dir(), "p4est_2d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "p4est_2d_dgsem", "elixir_navierstokes_SD7003airfoil.jl"), l2=[ 9.292895651912815e-5, @@ -920,7 +950,7 @@ end end @trixi_testset "elixir_navierstokes_vortex_street.jl" begin - @test_trixi_include(joinpath(examples_dir(), "p4est_2d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "p4est_2d_dgsem", "elixir_navierstokes_vortex_street.jl"), l2=[ 0.012420217727434794, @@ -946,7 +976,7 @@ end end @trixi_testset "elixir_navierstokes_poiseuille_flow.jl" begin - @test_trixi_include(joinpath(examples_dir(), "p4est_2d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "p4est_2d_dgsem", "elixir_navierstokes_poiseuille_flow.jl"), l2=[ 0.028671228188785286, @@ -971,7 +1001,7 @@ end end @trixi_testset "elixir_navierstokes_kelvin_helmholtz_instability_sc_subcell.jl" begin - @test_trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "tree_2d_dgsem", "elixir_navierstokes_kelvin_helmholtz_instability_sc_subcell.jl"), l2=[ 0.1987691550257618, @@ -1001,7 +1031,7 @@ end end @trixi_testset "elixir_navierstokes_freestream_symmetry.jl" begin - @test_trixi_include(joinpath(examples_dir(), "p4est_2d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "p4est_2d_dgsem", "elixir_navierstokes_freestream_symmetry.jl"), l2=[ 4.37868326434923e-15, @@ -1026,7 +1056,7 @@ end end @trixi_testset "elixir_navierstokes_couette_flow.jl" begin - @test_trixi_include(joinpath(examples_dir(), "p4est_2d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "p4est_2d_dgsem", "elixir_navierstokes_couette_flow.jl"), l2=[ 0.009585252225488753, @@ -1051,7 +1081,7 @@ end end @trixi_testset "elixir_navierstokes_blast_reflective.jl" begin - @test_trixi_include(joinpath(examples_dir(), "p4est_2d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "p4est_2d_dgsem", "elixir_navierstokes_blast_reflective.jl"), l2=[ 0.08271777454941344, diff --git a/test/test_parabolic_3d.jl b/test/test_parabolic_3d.jl index bc61a3ba365..341292c2e02 100644 --- a/test/test_parabolic_3d.jl +++ b/test/test_parabolic_3d.jl @@ -5,6 +5,8 @@ using Trixi include("test_trixi.jl") +EXAMPLES_DIR = examples_dir() + # Start with a clean environment: remove Trixi.jl output directory if it exists outdir = "out" isdir(outdir) && rm(outdir, recursive = true) @@ -13,7 +15,7 @@ isdir(outdir) && rm(outdir, recursive = true) #! format: noindent @trixi_testset "DGMulti: elixir_navierstokes_convergence.jl" begin - @test_trixi_include(joinpath(examples_dir(), "dgmulti_3d", + @test_trixi_include(joinpath(EXAMPLES_DIR, "dgmulti_3d", "elixir_navierstokes_convergence.jl"), cells_per_dimension=(4, 4, 4), tspan=(0.0, 0.1), l2=[ @@ -41,7 +43,7 @@ isdir(outdir) && rm(outdir, recursive = true) end @trixi_testset "DGMulti: elixir_navierstokes_convergence_curved.jl" begin - @test_trixi_include(joinpath(examples_dir(), "dgmulti_3d", + @test_trixi_include(joinpath(EXAMPLES_DIR, "dgmulti_3d", "elixir_navierstokes_convergence_curved.jl"), cells_per_dimension=(4, 4, 4), tspan=(0.0, 0.1), l2=[ @@ -69,7 +71,7 @@ end end @trixi_testset "DGMulti: elixir_navierstokes_taylor_green_vortex.jl" begin - @test_trixi_include(joinpath(examples_dir(), "dgmulti_3d", + @test_trixi_include(joinpath(EXAMPLES_DIR, "dgmulti_3d", "elixir_navierstokes_taylor_green_vortex.jl"), cells_per_dimension=(4, 4, 4), tspan=(0.0, 0.25), l2=[ @@ -97,7 +99,7 @@ end end @trixi_testset "TreeMesh3D: elixir_navierstokes_convergence.jl" begin - @test_trixi_include(joinpath(examples_dir(), "tree_3d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "tree_3d_dgsem", "elixir_navierstokes_convergence.jl"), initial_refinement_level=2, tspan=(0.0, 0.1), l2=[ @@ -125,7 +127,7 @@ end end @trixi_testset "TreeMesh3D: elixir_navierstokes_convergence.jl (isothermal walls)" begin - @test_trixi_include(joinpath(examples_dir(), "tree_3d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "tree_3d_dgsem", "elixir_navierstokes_convergence.jl"), initial_refinement_level=2, tspan=(0.0, 0.1), heat_bc_top_bottom=Isothermal((x, t, equations) -> Trixi.temperature(initial_condition_navier_stokes_convergence_test(x, @@ -157,7 +159,7 @@ end end @trixi_testset "TreeMesh3D: elixir_navierstokes_convergence.jl (Entropy gradient variables)" begin - @test_trixi_include(joinpath(examples_dir(), "tree_3d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "tree_3d_dgsem", "elixir_navierstokes_convergence.jl"), initial_refinement_level=2, tspan=(0.0, 0.1), gradient_variables=GradientVariablesEntropy(), @@ -186,7 +188,7 @@ end end @trixi_testset "TreeMesh3D: elixir_navierstokes_convergence.jl (Entropy gradient variables, isothermal walls)" begin - @test_trixi_include(joinpath(examples_dir(), "tree_3d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "tree_3d_dgsem", "elixir_navierstokes_convergence.jl"), initial_refinement_level=2, tspan=(0.0, 0.1), gradient_variables=GradientVariablesEntropy(), @@ -219,7 +221,7 @@ end end @trixi_testset "TreeMesh3D: elixir_navierstokes_convergence.jl (flux differencing)" begin - @test_trixi_include(joinpath(examples_dir(), "tree_3d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "tree_3d_dgsem", "elixir_navierstokes_convergence.jl"), initial_refinement_level=2, tspan=(0.0, 0.1), volume_integral=VolumeIntegralFluxDifferencing(flux_central), @@ -248,7 +250,7 @@ end end @trixi_testset "TreeMesh3D: elixir_navierstokes_convergence.jl (Refined mesh)" begin - @test_trixi_include(joinpath(examples_dir(), "tree_3d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "tree_3d_dgsem", "elixir_navierstokes_convergence.jl"), tspan=(0.0, 0.0)) LLID = Trixi.local_leaf_cells(mesh.tree) @@ -291,7 +293,7 @@ end end @trixi_testset "TreeMesh3D: elixir_navierstokes_taylor_green_vortex.jl" begin - @test_trixi_include(joinpath(examples_dir(), "tree_3d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "tree_3d_dgsem", "elixir_navierstokes_taylor_green_vortex.jl"), initial_refinement_level=2, tspan=(0.0, 0.25), l2=[ @@ -319,7 +321,7 @@ end end @trixi_testset "TreeMesh3D: elixir_navierstokes_taylor_green_vortex.jl (Refined mesh)" begin - @test_trixi_include(joinpath(examples_dir(), "tree_3d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "tree_3d_dgsem", "elixir_navierstokes_taylor_green_vortex.jl"), tspan=(0.0, 0.0)) LLID = Trixi.local_leaf_cells(mesh.tree) @@ -367,7 +369,7 @@ end end @trixi_testset "P4estMesh3D: elixir_navierstokes_convergence.jl" begin - @test_trixi_include(joinpath(examples_dir(), "p4est_3d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "p4est_3d_dgsem", "elixir_navierstokes_convergence.jl"), initial_refinement_level=2, tspan=(0.0, 0.1), l2=[ @@ -395,7 +397,7 @@ end end @trixi_testset "P4estMesh3D: elixir_navierstokes_taylor_green_vortex.jl" begin - @test_trixi_include(joinpath(examples_dir(), "p4est_3d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "p4est_3d_dgsem", "elixir_navierstokes_taylor_green_vortex.jl"), initial_refinement_level=2, tspan=(0.0, 0.25), surface_flux=FluxHLL(min_max_speed_naive), @@ -424,7 +426,7 @@ end end @trixi_testset "TreeMesh3D: elixir_advection_diffusion_amr.jl" begin - @test_trixi_include(joinpath(examples_dir(), "tree_3d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "tree_3d_dgsem", "elixir_advection_diffusion_amr.jl"), initial_refinement_level=2, base_level=2, @@ -443,7 +445,7 @@ end end @trixi_testset "TreeMesh3D: elixir_advection_diffusion_amr.jl (LDG)" begin - @test_trixi_include(joinpath(examples_dir(), "tree_3d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "tree_3d_dgsem", "elixir_advection_diffusion_amr.jl"), solver_parabolic=ViscousFormulationLocalDG(), initial_refinement_level=2, @@ -463,7 +465,7 @@ end end @trixi_testset "TreeMesh3D: elixir_advection_diffusion_nonperiodic.jl" begin - @test_trixi_include(joinpath(examples_dir(), "tree_3d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "tree_3d_dgsem", "elixir_advection_diffusion_nonperiodic.jl"), l2=[0.0009808996243280868], linf=[0.01732621559135459]) @@ -478,7 +480,7 @@ end end @trixi_testset "TreeMesh3D: elixir_advection_diffusion_nonperiodic.jl (LDG)" begin - @test_trixi_include(joinpath(examples_dir(), "tree_3d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "tree_3d_dgsem", "elixir_advection_diffusion_nonperiodic.jl"), solver_parabolic=ViscousFormulationLocalDG(), l2=[0.0009432415534931421], linf=[0.016955330290404563]) @@ -493,7 +495,7 @@ end end @trixi_testset "P4estMesh3D: elixir_navierstokes_taylor_green_vortex_amr.jl" begin - @test_trixi_include(joinpath(examples_dir(), "p4est_3d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "p4est_3d_dgsem", "elixir_navierstokes_taylor_green_vortex_amr.jl"), initial_refinement_level=0, max_level=2, @@ -523,7 +525,7 @@ end end @trixi_testset "P4estMesh3D: elixir_navierstokes_blast_wave_amr.jl" begin - @test_trixi_include(joinpath(examples_dir(), "p4est_3d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "p4est_3d_dgsem", "elixir_navierstokes_blast_wave_amr.jl"), tspan=(0.0, 0.01), l2=[ @@ -546,7 +548,7 @@ end end @trixi_testset "P4estMesh3D: elixir_navierstokes_viscous_shock.jl" begin - @test_trixi_include(joinpath(examples_dir(), "p4est_3d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "p4est_3d_dgsem", "elixir_navierstokes_viscous_shock.jl"), l2=[ 0.0002576235461250765, @@ -573,7 +575,7 @@ end end @trixi_testset "P4estMesh3D: elixir_navierstokes_viscous_shock_dirichlet_bc.jl" begin - @test_trixi_include(joinpath(examples_dir(), "p4est_3d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "p4est_3d_dgsem", "elixir_navierstokes_viscous_shock_dirichlet_bc.jl"), l2=[ 0.0002576236289909761, @@ -600,7 +602,7 @@ end end @trixi_testset "P4estMesh3D: elixir_navierstokes_crm.jl" begin - @test_trixi_include(joinpath(examples_dir(), "p4est_3d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "p4est_3d_dgsem", "elixir_navierstokes_crm.jl"), l2=[ 2.2353998537135728e-10, diff --git a/test/test_performance_specializations_2d.jl b/test/test_performance_specializations_2d.jl index 4fd39c78f64..4b89d281455 100644 --- a/test/test_performance_specializations_2d.jl +++ b/test/test_performance_specializations_2d.jl @@ -5,6 +5,8 @@ using Trixi include("test_trixi.jl") +EXAMPLES_DIR = examples_dir() + # Start with a clean environment: remove Trixi.jl output directory if it exists outdir = "out" isdir(outdir) && rm(outdir, recursive = true) @@ -14,7 +16,7 @@ isdir(outdir) && rm(outdir, recursive = true) @timed_testset "TreeMesh2D, flux_shima_etal_turbo" begin trixi_include(@__MODULE__, - joinpath(examples_dir(), "tree_2d_dgsem", "elixir_euler_ec.jl"), + joinpath(EXAMPLES_DIR, "tree_2d_dgsem", "elixir_euler_ec.jl"), initial_refinement_level = 0, tspan = (0.0, 0.0), polydeg = 3, volume_flux = flux_shima_etal_turbo, surface_flux = flux_shima_etal_turbo) @@ -54,7 +56,7 @@ end @timed_testset "TreeMesh2D, flux_ranocha_turbo" begin trixi_include(@__MODULE__, - joinpath(examples_dir(), "tree_2d_dgsem", "elixir_euler_ec.jl"), + joinpath(EXAMPLES_DIR, "tree_2d_dgsem", "elixir_euler_ec.jl"), initial_refinement_level = 0, tspan = (0.0, 0.0), polydeg = 3, volume_flux = flux_ranocha_turbo, surface_flux = flux_ranocha_turbo) u_ode = copy(sol.u[end]) @@ -93,7 +95,7 @@ end @timed_testset "StructuredMesh2D, flux_shima_etal_turbo" begin trixi_include(@__MODULE__, - joinpath(examples_dir(), "structured_2d_dgsem", "elixir_euler_ec.jl"), + joinpath(EXAMPLES_DIR, "structured_2d_dgsem", "elixir_euler_ec.jl"), cells_per_dimension = (1, 1), tspan = (0.0, 0.0), polydeg = 3, volume_flux = flux_shima_etal_turbo, surface_flux = flux_shima_etal_turbo) @@ -133,7 +135,7 @@ end @timed_testset "StructuredMesh2D, flux_ranocha_turbo" begin trixi_include(@__MODULE__, - joinpath(examples_dir(), "structured_2d_dgsem", "elixir_euler_ec.jl"), + joinpath(EXAMPLES_DIR, "structured_2d_dgsem", "elixir_euler_ec.jl"), cells_per_dimension = (1, 1), tspan = (0.0, 0.0), polydeg = 3, volume_flux = flux_ranocha_turbo, surface_flux = flux_ranocha_turbo) u_ode = copy(sol.u[end]) diff --git a/test/test_performance_specializations_3d.jl b/test/test_performance_specializations_3d.jl index 929fc7e3621..6b18f44c702 100644 --- a/test/test_performance_specializations_3d.jl +++ b/test/test_performance_specializations_3d.jl @@ -5,6 +5,8 @@ using Trixi include("test_trixi.jl") +EXAMPLES_DIR = examples_dir() + # Start with a clean environment: remove Trixi.jl output directory if it exists outdir = "out" isdir(outdir) && rm(outdir, recursive = true) @@ -14,7 +16,7 @@ isdir(outdir) && rm(outdir, recursive = true) @timed_testset "TreeMesh3D, flux_shima_etal_turbo" begin trixi_include(@__MODULE__, - joinpath(examples_dir(), "tree_3d_dgsem", "elixir_euler_ec.jl"), + joinpath(EXAMPLES_DIR, "tree_3d_dgsem", "elixir_euler_ec.jl"), initial_refinement_level = 0, tspan = (0.0, 0.0), polydeg = 3, volume_flux = flux_shima_etal_turbo, surface_flux = flux_shima_etal_turbo) @@ -54,7 +56,7 @@ end @timed_testset "TreeMesh3D, flux_ranocha_turbo" begin trixi_include(@__MODULE__, - joinpath(examples_dir(), "tree_3d_dgsem", "elixir_euler_ec.jl"), + joinpath(EXAMPLES_DIR, "tree_3d_dgsem", "elixir_euler_ec.jl"), initial_refinement_level = 0, tspan = (0.0, 0.0), polydeg = 3, volume_flux = flux_ranocha_turbo, surface_flux = flux_ranocha_turbo) u_ode = copy(sol.u[end]) @@ -93,7 +95,7 @@ end @timed_testset "StructuredMesh3D, flux_shima_etal_turbo" begin trixi_include(@__MODULE__, - joinpath(examples_dir(), "structured_3d_dgsem", "elixir_euler_ec.jl"), + joinpath(EXAMPLES_DIR, "structured_3d_dgsem", "elixir_euler_ec.jl"), cells_per_dimension = (1, 1, 1), tspan = (0.0, 0.0), polydeg = 3, volume_flux = flux_shima_etal_turbo, surface_flux = flux_shima_etal_turbo) @@ -133,7 +135,7 @@ end @timed_testset "StructuredMesh3D, flux_ranocha_turbo" begin trixi_include(@__MODULE__, - joinpath(examples_dir(), "structured_3d_dgsem", "elixir_euler_ec.jl"), + joinpath(EXAMPLES_DIR, "structured_3d_dgsem", "elixir_euler_ec.jl"), cells_per_dimension = (1, 1, 1), tspan = (0.0, 0.0), polydeg = 3, volume_flux = flux_ranocha_turbo, surface_flux = flux_ranocha_turbo) u_ode = copy(sol.u[end]) diff --git a/test/test_special_elixirs.jl b/test/test_special_elixirs.jl index 77dec71f8ca..9a52a7ec2ea 100644 --- a/test/test_special_elixirs.jl +++ b/test/test_special_elixirs.jl @@ -13,7 +13,7 @@ include("test_trixi.jl") outdir = "out" isdir(outdir) && rm(outdir, recursive = true) -const EXAMPLES_DIR = pkgdir(Trixi, "examples") +EXAMPLES_DIR = examples_dir() @testset "Special elixirs" begin #! format: noindent @@ -110,8 +110,54 @@ end @test A * x ≈ Ax end +@testset "Test Jacobian of DG (1D)" begin + @timed_testset "TreeMesh: Linear advection" begin + trixi_include(@__MODULE__, + joinpath(EXAMPLES_DIR, "tree_1d_fdsbp", + "elixir_advection_upwind.jl"), + tspan = (0.0, 0.0)) + + A, _ = linear_structure(semi) + + J = jacobian_ad_forward(semi) + @test Matrix(A) ≈ J + λ = eigvals(J) + @test maximum(real, λ) < 10 * sqrt(eps(real(semi))) + + J = jacobian_fd(semi) + @test Matrix(A) ≈ J + λ = eigvals(J) + @test maximum(real, λ) < 10 * sqrt(eps(real(semi))) + + # See https://github.com/trixi-framework/Trixi.jl/pull/2514 + @test count(real.(λ) .>= -10) > 5 + # See https://github.com/trixi-framework/Trixi.jl/pull/2522 + t0 = zero(real(semi)) + u0_ode = 1e9 * compute_coefficients(t0, semi) + J = jacobian_fd(semi; t0, u0_ode) + λ = eigvals(J) + @test count((-200 .<= real.(λ) .<= -10) .&& (-100 .<= imag.(λ) .<= 100)) == 0 + @test count(isapprox.(imag.(λ), 0.0, atol = 10 * sqrt(eps(real(semi))))) == 2 + end + + @timed_testset "StructuredMesh: Compressible Euler equations" begin + trixi_include(@__MODULE__, + joinpath(EXAMPLES_DIR, "structured_1d_dgsem", + "elixir_euler_source_terms.jl"), + tspan = (0.0, 0.0)) + + J = jacobian_ad_forward(semi) + λ = eigvals(J) + @test maximum(real, λ) < 1e-13 + + J = jacobian_fd(semi) + λ = eigvals(J) + @test maximum(real, λ) < 5e-8 + end +end + @testset "Test Jacobian of DG (2D)" begin - @timed_testset "Linear advection" begin + @timed_testset "TreeMesh: Linear advection" begin trixi_include(@__MODULE__, joinpath(EXAMPLES_DIR, "tree_2d_dgsem", "elixir_advection_extended.jl"), @@ -129,7 +175,7 @@ end @test maximum(real, λ) < 10 * sqrt(eps(real(semi))) end - @timed_testset "Linear advection-diffusion" begin + @timed_testset "TreeMesh: Linear advection-diffusion" begin trixi_include(@__MODULE__, joinpath(EXAMPLES_DIR, "tree_2d_dgsem", "elixir_advection_diffusion.jl"), @@ -138,9 +184,15 @@ end J = jacobian_ad_forward(semi) λ = eigvals(J) @test maximum(real, λ) < 10 * sqrt(eps(real(semi))) + + J_parabolic = jacobian_ad_forward_parabolic(semi) + λ_parabolic = eigvals(J_parabolic) + # Parabolic spectrum is real and negative + @test maximum(real, λ_parabolic) < 10^(-14) + @test maximum(imag, λ_parabolic) < 10^(-14) end - @timed_testset "Compressible Euler equations" begin + @timed_testset "TreeMesh: Compressible Euler equations" begin trixi_include(@__MODULE__, joinpath(EXAMPLES_DIR, "tree_2d_dgsem", "elixir_euler_density_wave.jl"), @@ -163,9 +215,8 @@ end jacobian_ad_forward(semi) end - @timed_testset "DGMulti (weak form)" begin - gamma = 1.4 - equations = CompressibleEulerEquations2D(gamma) + @timed_testset "DGMulti: Euler, weak form" begin + equations = CompressibleEulerEquations2D(1.4) initial_condition = initial_condition_density_wave solver = DGMulti(polydeg = 5, element_type = Quad(), @@ -186,9 +237,8 @@ end @test maximum(real, λ) < 7.0e-7 end - @timed_testset "DGMulti (SBP, flux differencing)" begin - gamma = 1.4 - equations = CompressibleEulerEquations2D(gamma) + @timed_testset "DGMulti: Euler, SBP & flux differencing" begin + equations = CompressibleEulerEquations2D(1.4) initial_condition = initial_condition_density_wave solver = DGMulti(polydeg = 5, element_type = Quad(), @@ -210,7 +260,7 @@ end end end - @timed_testset "Navier-Stokes" begin + @timed_testset "TreeMesh: Navier-Stokes" begin trixi_include(@__MODULE__, joinpath(EXAMPLES_DIR, "tree_2d_dgsem", "elixir_navierstokes_taylor_green_vortex.jl"), @@ -219,9 +269,15 @@ end J = jacobian_ad_forward(semi) λ = eigvals(J) @test maximum(real, λ) < 0.2 + + J_parabolic = jacobian_ad_forward_parabolic(semi) + λ_parabolic = eigvals(J_parabolic) + # Parabolic spectrum is real and negative + @test maximum(real, λ_parabolic) < 10^(-16) + @test maximum(imag, λ_parabolic) < 10^(-15) end - @timed_testset "MHD" begin + @timed_testset "TreeMesh: MHD" begin trixi_include(@__MODULE__, joinpath(EXAMPLES_DIR, "tree_2d_dgsem", "elixir_mhd_alfven_wave.jl"), @@ -229,7 +285,16 @@ end @test_nowarn jacobian_ad_forward(semi) end - @timed_testset "EulerGravity" begin + @timed_testset "UnstructuredMesh2D: Advection" begin + trixi_include(@__MODULE__, + joinpath(EXAMPLES_DIR, "unstructured_2d_dgsem", + "elixir_advection_basic.jl"), + tspan = (0.0, 0.0)) + + @test_nowarn jacobian_ad_forward(semi) + end + + @timed_testset "TreeMesh: EulerGravity" begin trixi_include(@__MODULE__, joinpath(EXAMPLES_DIR, "paper_self_gravitating_gas_dynamics", @@ -239,6 +304,44 @@ end λ = eigvals(J) @test maximum(real, λ) < 1.5 end + + @timed_testset "StructuredMesh: Polytropic Euler equations" begin + trixi_include(@__MODULE__, + joinpath(EXAMPLES_DIR, "structured_2d_dgsem", + "elixir_eulerpolytropic_wave.jl"), + cells_per_dimension = (6, 6), + tspan = (0.0, 0.0)) + + J = jacobian_ad_forward(semi) + λ = eigvals(J) + @test maximum(real, λ) < 0.05 + end + + @timed_testset "P4estMesh: Navier-Stokes" begin + trixi_include(@__MODULE__, + joinpath(EXAMPLES_DIR, "p4est_2d_dgsem", + "elixir_navierstokes_viscous_shock.jl"), + tspan = (0.0, 0.0)) + + J = jacobian_ad_forward(semi) + λ = eigvals(J) + @test maximum(real, λ) < 0.05 + + J_parabolic = jacobian_ad_forward_parabolic(semi) + λ_parabolic = eigvals(J_parabolic) + # Parabolic spectrum is real and negative + @test maximum(real, λ_parabolic) < 8e-14 + @test maximum(imag, λ_parabolic) < 8e-14 + end + + @timed_testset "T8codeMesh: Advection" begin + trixi_include(@__MODULE__, + joinpath(EXAMPLES_DIR, "t8code_2d_dgsem", + "elixir_advection_unstructured_flag.jl"), + tspan = (0.0, 0.0), initial_refinement_level = 0, + polydeg = 2) + @test_nowarn jacobian_ad_forward(semi) + end end @timed_testset "Test linear structure (3D)" begin @@ -252,17 +355,52 @@ end end @timed_testset "Test Jacobian of DG (3D)" begin - trixi_include(@__MODULE__, - joinpath(EXAMPLES_DIR, "tree_3d_dgsem", - "elixir_advection_extended.jl"), - tspan = (0.0, 0.0), initial_refinement_level = 1) - A, _ = linear_structure(semi) + @timed_testset "TreeMesh: Advection" begin + trixi_include(@__MODULE__, + joinpath(EXAMPLES_DIR, "tree_3d_dgsem", + "elixir_advection_extended.jl"), + tspan = (0.0, 0.0), initial_refinement_level = 1) + A, _ = linear_structure(semi) + + J = jacobian_ad_forward(semi) + @test Matrix(A) ≈ J + + J = jacobian_fd(semi) + @test Matrix(A) ≈ J + end + + @timed_testset "StructuredMesh: MHD" begin + trixi_include(@__MODULE__, + joinpath(EXAMPLES_DIR, "structured_3d_dgsem", + "elixir_mhd_alfven_wave.jl"), + cells_per_dimension = (2, 2, 2), + polydeg = 2, + tspan = (0.0, 0.0)) + + @test_nowarn jacobian_ad_forward(semi) + end + + @timed_testset "P4estMesh: Navier-Stokes" begin + trixi_include(@__MODULE__, + joinpath(EXAMPLES_DIR, "p4est_3d_dgsem", + "elixir_navierstokes_convergence.jl"), + initial_refinement_level = 0, + tspan = (0.0, 0.0)) + + @test_nowarn jacobian_ad_forward(semi) + @test_nowarn jacobian_ad_forward_parabolic(semi) + end - J = jacobian_ad_forward(semi) - @test Matrix(A) ≈ J + @timed_testset "T8CodeMesh: Advection" begin + trixi_include(@__MODULE__, + joinpath(EXAMPLES_DIR, "t8code_3d_dgsem", + "elixir_advection_cubed_sphere.jl"), + polydeg = 2, + trees_per_face_dimension = 3, layers = 2, + tspan = (0.0, 0.0)) - J = jacobian_fd(semi) - @test Matrix(A) ≈ J + @test_nowarn jacobian_ad_forward(semi) + end end @testset "AD using ForwardDiff" begin @@ -328,8 +466,8 @@ end end @timed_testset "elixir_euler_ad.jl" begin - @test_nowarn_mod trixi_include(joinpath(examples_dir(), "special_elixirs", - "elixir_euler_ad.jl")) + @test_trixi_include(joinpath(examples_dir(), "special_elixirs", + "elixir_euler_ad.jl")) end end end diff --git a/test/test_structured_1d.jl b/test/test_structured_1d.jl index 40a238b262a..fa7d5f37a8c 100644 --- a/test/test_structured_1d.jl +++ b/test/test_structured_1d.jl @@ -5,7 +5,7 @@ using Trixi include("test_trixi.jl") -EXAMPLES_DIR = pkgdir(Trixi, "examples", "structured_1d_dgsem") +EXAMPLES_DIR = joinpath(examples_dir(), "structured_1d_dgsem") # Start with a clean environment: remove Trixi.jl output directory if it exists outdir = "out" @@ -59,6 +59,7 @@ end end @trixi_testset "elixir_advection_float128.jl" begin + using Quadmath: Float128 @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_float128.jl"), l2=Float128[6.49879312655540217059228636803492411e-09], linf=Float128[5.35548407857266390181158920649552284e-08]) @@ -198,6 +199,39 @@ end end end +@trixi_testset "elixir_euler_weak_blast_er.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_euler_weak_blast_er.jl"), + analysis_interval=100, + l2=[0.1199630838410044, + 0.1562196058317499, + 0.44836353019483344], + linf=[ + 0.2255546997256792, + 0.29412938937652194, + 0.8558237244455227 + ]) + # Larger values for allowed allocations due to usage of custom + # integrator which are not *recorded* for the methods from + # OrdinaryDiffEq.jl + # Corresponding issue: https://github.com/trixi-framework/Trixi.jl/issues/1877 + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 15_000 + end + + # test both short and long printing formats + @test_nowarn show(relaxation_solver) + println() + @test_nowarn println(relaxation_solver) + println() + @test_nowarn display(relaxation_solver) + # Test `:compact` printing + show(IOContext(IOBuffer(), :compact => true), MIME"text/plain"(), relaxation_solver) +end + @trixi_testset "elixir_linearizedeuler_characteristic_system.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_linearizedeuler_characteristic_system.jl"), @@ -229,6 +263,7 @@ end end @trixi_testset "elixir_euler_convergence_pure_fv.jl" begin + using Trixi: Trixi @test_trixi_include(joinpath(pkgdir(Trixi, "examples", "tree_1d_dgsem"), "elixir_euler_convergence_pure_fv.jl"), mesh=StructuredMesh(16, (0.0,), (2.0,)), diff --git a/test/test_structured_2d.jl b/test/test_structured_2d.jl index be421737cc7..bc0b2ea880f 100644 --- a/test/test_structured_2d.jl +++ b/test/test_structured_2d.jl @@ -5,7 +5,7 @@ using Trixi include("test_trixi.jl") -EXAMPLES_DIR = pkgdir(Trixi, "examples", "structured_2d_dgsem") +EXAMPLES_DIR = joinpath(examples_dir(), "structured_2d_dgsem") # Start with a clean environment: remove Trixi.jl output directory if it exists outdir = "out" @@ -59,7 +59,7 @@ end 6.314906965187994e-5, 6.31490696496595e-5, 6.314906965032563e-5 - ],) + ]) @testset "analysis_callback(sol) for AnalysisCallbackCoupled" begin errors = analysis_callback(sol) @@ -84,6 +84,9 @@ end @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 end end + + # Test plotdata construction for coupled semidiscretization + @test_nowarn pd = PlotData2D(sol) end @trixi_testset "elixir_advection_meshview.jl" begin @@ -95,7 +98,7 @@ end linf=[ 6.627000273318195e-5, 6.62700027264096e-5 - ],) + ]) @testset "analysis_callback(sol) for AnalysisCallbackCoupled" begin # Ensure that we do not have excessive memory allocations @@ -275,7 +278,7 @@ end @trixi_testset "elixir_advection_restart.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_restart.jl"), l2=[4.219208035582454e-6], - linf=[3.438434404412494e-5],) + linf=[3.438434404412494e-5]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let @@ -319,6 +322,32 @@ end end end +@trixi_testset "elixir_euler_convergence_implicit_sparse_jacobian.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_euler_convergence_implicit_sparse_jacobian.jl"), + tspan=(0.0, 1.0), + l2=[ + 0.0025545032994393493, + 0.0025848892135096136, + 0.002585815262287367, + 0.0031668773337869584 + ], + linf=[ + 0.010367159504626189, + 0.009326212633131492, + 0.008372785091578683, + 0.011242647117379434 + ]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi_float_type, t)) < 1000 + end +end + @trixi_testset "elixir_eulermulti_convergence_ec.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_eulermulti_convergence_ec.jl"), l2=[ @@ -555,9 +584,16 @@ end end end -@trixi_testset "elixir_euler_free_stream.jl with FluxRotated(flux_lax_friedrichs)" begin +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +@trixi_testset "elixir_euler_free_stream.jl with FluxRotated(FluxLaxFriedrichs(max_abs_speed_naive))" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_free_stream.jl"), - surface_flux=FluxRotated(flux_lax_friedrichs), + surface_flux=FluxRotated(FluxLaxFriedrichs(max_abs_speed_naive)), l2=[ 2.063350241405049e-15, 1.8571016296925367e-14, @@ -1097,7 +1133,14 @@ end 0.0, 2.6014507178710646e-5 ], - surface_flux=(flux_lax_friedrichs, + # Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of + # `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. + # In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. + # Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. + # To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. + # We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the + # `StepsizeCallback` (CFL-Condition) and less diffusion. + surface_flux=(FluxLaxFriedrichs(max_abs_speed_naive), flux_nonconservative_powell_local_jump), volume_flux=(flux_central, flux_nonconservative_powell_local_jump), @@ -1153,7 +1196,7 @@ end 5.317911003777098e-7, 9.92786092363085e-7, 3.430672968714232e-8 - ],) + ]) @testset "analysis_callback(sol) for AnalysisCallbackCoupled" begin errors = analysis_callback(sol) diff --git a/test/test_structured_3d.jl b/test/test_structured_3d.jl index 30dc69d3bdb..c6c474fe4ab 100644 --- a/test/test_structured_3d.jl +++ b/test/test_structured_3d.jl @@ -5,7 +5,7 @@ using Trixi include("test_trixi.jl") -EXAMPLES_DIR = pkgdir(Trixi, "examples", "structured_3d_dgsem") +EXAMPLES_DIR = joinpath(examples_dir(), "structured_3d_dgsem") # Start with a clean environment: remove Trixi.jl output directory if it exists outdir = "out" @@ -62,7 +62,7 @@ end @trixi_testset "elixir_advection_restart.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_restart.jl"), l2=[0.0025903889347585777], - linf=[0.018407576968841655],) + linf=[0.018407576968841655]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let @@ -126,9 +126,16 @@ end end end -@trixi_testset "elixir_euler_free_stream.jl with FluxRotated(flux_lax_friedrichs)" begin +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +@trixi_testset "elixir_euler_free_stream.jl with FluxRotated(FluxLaxFriedrichs(max_abs_speed_naive))" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_free_stream.jl"), - surface_flux=FluxRotated(flux_lax_friedrichs), + surface_flux=FluxRotated(FluxLaxFriedrichs(max_abs_speed_naive)), l2=[ 2.8815700334367128e-15, 9.361915278236651e-15, @@ -269,7 +276,7 @@ end 0.01282206030593043, 0.03911437990598213, 0.021962225923304324, 0.03169774571258743, 0.021591564663781426, 0.034028148178115364, - 0.020084593242858988],) + 0.020084593242858988]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let @@ -280,7 +287,14 @@ end end end -@trixi_testset "elixir_mhd_alfven_wave.jl with flux_lax_friedrichs" begin +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. +@trixi_testset "elixir_mhd_alfven_wave.jl with FluxLaxFriedrichs(max_abs_speed_naive)" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_mhd_alfven_wave.jl"), l2=[0.0030477691235949685, 0.00145609137038748, 0.0009092809766088607, 0.0017949926915475929, @@ -292,7 +306,8 @@ end 0.02126543791857216, 0.031563506812970266, 0.02116105422516923, 0.03419432640106229, 0.020324891223351533], - surface_flux=(flux_lax_friedrichs, flux_nonconservative_powell),) + surface_flux=(FluxLaxFriedrichs(max_abs_speed_naive), + flux_nonconservative_powell),) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let diff --git a/test/test_t8code_2d.jl b/test/test_t8code_2d.jl index a771991772c..1bc03be5c81 100644 --- a/test/test_t8code_2d.jl +++ b/test/test_t8code_2d.jl @@ -16,6 +16,7 @@ mkdir(outdir) #! format: noindent @trixi_testset "test load mesh from path" begin + using Trixi: T8codeMesh mktempdir() do path @test_throws "Unknown file extension: .unknown_ext" begin mesh = T8codeMesh(touch(joinpath(path, "dummy.unknown_ext")), 2) @@ -33,6 +34,7 @@ end # @test_throws "Discovered negative volumes" begin @test begin + using Trixi: Trixi, T8codeMesh # Unstructured mesh with six cells which have left-handed node ordering. mesh_file = Trixi.download("https://gist.githubusercontent.com/jmark/bfe0d45f8e369298d6cc637733819013/raw/cecf86edecc736e8b3e06e354c494b2052d41f7a/rectangle_with_negative_volumes.msh", joinpath(EXAMPLES_DIR, @@ -46,6 +48,7 @@ end @trixi_testset "test t8code mesh from p4est connectivity" begin @test begin + using Trixi: Trixi, T8codeMesh # Here we use the connectivity constructor from `P4est.jl` since the # method dispatch works only on `Ptr{p4est_connectivity}` which # actually is `Ptr{P4est.LibP4est.p4est_connectivity}`. @@ -58,6 +61,7 @@ end @trixi_testset "test t8code mesh from ABAQUS HOHQMesh file" begin @test begin + using Trixi: Trixi, T8codeMesh # Unstructured ABAQUS mesh file created with HOHQMesh.. file_path = Trixi.download("https://gist.githubusercontent.com/jmark/9e0da4306e266617eeb19bc56b0e7feb/raw/e6856e1deb648a807f6bb6d6dcacff9e55d94e2a/round_2d_tank.inp", joinpath(EXAMPLES_DIR, "round_2d_tank.inp")) @@ -117,7 +121,7 @@ end @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_amr_unstructured_flag.jl"), l2=[0.002019623611753929], - linf=[0.03542375961299987],) + linf=[0.03542375961299987]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let @@ -134,7 +138,7 @@ end "elixir_advection_amr_solution_independent.jl"), # Expected errors are exactly the same as with StructuredMesh! l2=[4.949660644033807e-5], - linf=[0.0004867846262313763],) + linf=[0.0004867846262313763]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let @@ -148,7 +152,7 @@ end @trixi_testset "elixir_advection_restart.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_restart.jl"), l2=[4.507575525876275e-6], - linf=[6.21489667023134e-5],) + linf=[6.21489667023134e-5]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) @@ -164,7 +168,7 @@ end # This test is identical to the one in `test_p4est_2d.jl`. @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_restart_amr.jl"), l2=[2.869137983727866e-6], - linf=[3.8353423270964804e-5],) + linf=[3.8353423270964804e-5]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let diff --git a/test/test_t8code_3d.jl b/test/test_t8code_3d.jl index 352d73df7d0..f442a0bb3b6 100644 --- a/test/test_t8code_3d.jl +++ b/test/test_t8code_3d.jl @@ -15,6 +15,7 @@ mkdir(outdir) @testset "T8codeMesh3D" begin @trixi_testset "test t8code mesh from p8est connectivity" begin @test begin + using Trixi: Trixi, T8codeMesh # Here we use the connectivity constructor from `P4est.jl` since the # method dispatch works only on `Ptr{p8est_connectivity}` which # actually is `Ptr{P4est.LibP4est.p8est_connectivity}`. @@ -78,7 +79,7 @@ mkdir(outdir) @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_amr.jl"), # Expected errors are exactly the same as with TreeMesh! l2=[1.1302812803902801e-5], - linf=[0.0007889950196294793],) + linf=[0.0007889950196294793]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let @@ -127,7 +128,7 @@ mkdir(outdir) @trixi_testset "elixir_advection_restart.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_restart.jl"), l2=[0.002590388934758452], - linf=[0.01840757696885409],) + linf=[0.01840757696885409]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let @@ -310,6 +311,7 @@ mkdir(outdir) end @trixi_testset "elixir_euler_convergence_pure_fv.jl" begin + using Trixi: Trixi @test_trixi_include(joinpath(pkgdir(Trixi, "examples", "tree_3d_dgsem"), "elixir_euler_convergence_pure_fv.jl"), l2=[ diff --git a/test/test_threaded.jl b/test/test_threaded.jl index d4515e0492f..cf6359e0a0c 100644 --- a/test/test_threaded.jl +++ b/test/test_threaded.jl @@ -5,6 +5,8 @@ using Trixi include("test_trixi.jl") +EXAMPLES_DIR = examples_dir() + # Start with a clean environment: remove Trixi.jl output directory if it exists outdir = "out" Trixi.mpi_isroot() && isdir(outdir) && rm(outdir, recursive = true) @@ -15,23 +17,23 @@ Trixi.MPI.Barrier(Trixi.mpi_comm()) @testset "TreeMesh" begin @trixi_testset "elixir_advection_restart.jl" begin - elixir = joinpath(examples_dir(), "tree_2d_dgsem", + elixir = joinpath(EXAMPLES_DIR, "tree_2d_dgsem", "elixir_advection_extended.jl") - Trixi.mpi_isroot() && println("═"^100) - Trixi.mpi_isroot() && println(elixir) + mpi_isroot() && println("═"^100) + mpi_isroot() && println(elixir) trixi_include(@__MODULE__, elixir, tspan = (0.0, 10.0)) l2_expected, linf_expected = analysis_callback(sol) - elixir = joinpath(examples_dir(), "tree_2d_dgsem", + elixir = joinpath(EXAMPLES_DIR, "tree_2d_dgsem", "elixir_advection_restart.jl") - Trixi.mpi_isroot() && println("═"^100) - Trixi.mpi_isroot() && println(elixir) + mpi_isroot() && println("═"^100) + mpi_isroot() && println(elixir) # Errors are exactly the same as in the elixir_advection_extended.jl trixi_include(@__MODULE__, elixir) l2_actual, linf_actual = analysis_callback(sol) - Trixi.mpi_isroot() && @test l2_actual == l2_expected - Trixi.mpi_isroot() && @test linf_actual == linf_expected + mpi_isroot() && @test l2_actual == l2_expected + mpi_isroot() && @test linf_actual == linf_expected # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) @@ -44,7 +46,7 @@ Trixi.MPI.Barrier(Trixi.mpi_comm()) end @trixi_testset "elixir_advection_restart.jl with threaded time integration" begin - @test_trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "tree_2d_dgsem", "elixir_advection_restart.jl"), alg=CarpenterKennedy2N54(williamson_condition = false, thread = Trixi.True()), @@ -54,7 +56,7 @@ Trixi.MPI.Barrier(Trixi.mpi_comm()) end @trixi_testset "elixir_advection_amr_refine_twice.jl" begin - @test_trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "tree_2d_dgsem", "elixir_advection_amr_refine_twice.jl"), l2=[0.00020547512522578292], linf=[0.007831753383083506]) @@ -70,7 +72,7 @@ Trixi.MPI.Barrier(Trixi.mpi_comm()) end @trixi_testset "elixir_advection_amr_coarsen_twice.jl" begin - @test_trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "tree_2d_dgsem", "elixir_advection_amr_coarsen_twice.jl"), l2=[0.0014321062757891826], linf=[0.0253454486893413]) @@ -86,7 +88,7 @@ Trixi.MPI.Barrier(Trixi.mpi_comm()) end @trixi_testset "elixir_euler_source_terms_nonperiodic.jl" begin - @test_trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "tree_2d_dgsem", "elixir_euler_source_terms_nonperiodic.jl"), l2=[ 2.259440511766445e-6, @@ -113,7 +115,7 @@ Trixi.MPI.Barrier(Trixi.mpi_comm()) end @trixi_testset "elixir_euler_ec.jl" begin - @test_trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "tree_2d_dgsem", "elixir_euler_ec.jl"), l2=[ 0.061751715597716854, @@ -139,7 +141,7 @@ Trixi.MPI.Barrier(Trixi.mpi_comm()) end @trixi_testset "elixir_euler_positivity.jl" begin - @test_trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "tree_2d_dgsem", "elixir_euler_positivity.jl"), l2=[ 0.48862067511841695, @@ -165,7 +167,7 @@ Trixi.MPI.Barrier(Trixi.mpi_comm()) end @trixi_testset "elixir_advection_diffusion.jl" begin - @test_trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "tree_2d_dgsem", "elixir_advection_diffusion.jl"), initial_refinement_level=2, tspan=(0.0, 0.4), polydeg=5, alg=RDPK3SpFSAL49(thread = Trixi.True()), @@ -183,7 +185,7 @@ Trixi.MPI.Barrier(Trixi.mpi_comm()) end @trixi_testset "FDSBP, elixir_advection_extended.jl" begin - @test_trixi_include(joinpath(examples_dir(), "tree_2d_fdsbp", + @test_trixi_include(joinpath(EXAMPLES_DIR, "tree_2d_fdsbp", "elixir_advection_extended.jl"), l2=[2.898644263922225e-6], linf=[8.491517930142578e-6], @@ -200,7 +202,7 @@ Trixi.MPI.Barrier(Trixi.mpi_comm()) end @trixi_testset "FDSBP, elixir_euler_convergence.jl" begin - @test_trixi_include(joinpath(examples_dir(), "tree_2d_fdsbp", + @test_trixi_include(joinpath(EXAMPLES_DIR, "tree_2d_fdsbp", "elixir_euler_convergence.jl"), l2=[ 1.7088389997042244e-6, @@ -229,7 +231,7 @@ end @testset "StructuredMesh" begin @trixi_testset "elixir_advection_restart.jl with waving flag mesh" begin - @test_trixi_include(joinpath(examples_dir(), "structured_2d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "structured_2d_dgsem", "elixir_advection_restart.jl"), l2=[0.00016265538265929818], linf=[0.0015194252169410394], @@ -248,7 +250,7 @@ end end @trixi_testset "elixir_mhd_ec.jl" begin - @test_trixi_include(joinpath(examples_dir(), "structured_2d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "structured_2d_dgsem", "elixir_mhd_ec.jl"), l2=[0.04937478399958968, 0.0611701500558669, 0.06099805934392425, 0.031551737882277144, @@ -275,7 +277,7 @@ end @testset "UnstructuredMesh" begin @trixi_testset "elixir_acoustics_gauss_wall.jl" begin - @test_trixi_include(joinpath(examples_dir(), "unstructured_2d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "unstructured_2d_dgsem", "elixir_acoustics_gauss_wall.jl"), l2=[0.029330394861252995, 0.029345079728907965, 0.03803795043486467, 0.0, @@ -300,7 +302,7 @@ end @testset "P4estMesh" begin @trixi_testset "elixir_euler_source_terms_nonconforming_unstructured_flag.jl" begin - @test_trixi_include(joinpath(examples_dir(), "p4est_2d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "p4est_2d_dgsem", "elixir_euler_source_terms_nonconforming_unstructured_flag.jl"), l2=[ 0.0034516244508588046, @@ -326,7 +328,7 @@ end end @trixi_testset "elixir_eulergravity_convergence.jl" begin - @test_trixi_include(joinpath(examples_dir(), "p4est_2d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "p4est_2d_dgsem", "elixir_eulergravity_convergence.jl"), l2=[ 0.00024871265138964204, @@ -346,7 +348,7 @@ end @testset "T8codeMesh" begin @trixi_testset "elixir_euler_source_terms_nonconforming_unstructured_flag.jl" begin - @test_trixi_include(joinpath(examples_dir(), "t8code_2d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "t8code_2d_dgsem", "elixir_euler_source_terms_nonconforming_unstructured_flag.jl"), l2=[ 0.0034516244508588046, @@ -363,7 +365,7 @@ end end @trixi_testset "elixir_eulergravity_convergence.jl" begin - @test_trixi_include(joinpath(examples_dir(), "t8code_2d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "t8code_2d_dgsem", "elixir_eulergravity_convergence.jl"), l2=[ 0.00024871265138964204, @@ -383,7 +385,7 @@ end @testset "DGMulti" begin @trixi_testset "elixir_euler_weakform.jl (SBP, EC)" begin - @test_trixi_include(joinpath(examples_dir(), "dgmulti_2d", + @test_trixi_include(joinpath(EXAMPLES_DIR, "dgmulti_2d", "elixir_euler_weakform.jl"), cells_per_dimension=(4, 4), volume_integral=VolumeIntegralFluxDifferencing(flux_ranocha), @@ -400,7 +402,7 @@ end 0.05321027922532284, 0.05321027922605448, 0.13392025411839015 - ],) + ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) @@ -413,7 +415,7 @@ end end @trixi_testset "elixir_euler_curved.jl with threaded time integration" begin - @test_trixi_include(joinpath(examples_dir(), "dgmulti_2d", + @test_trixi_include(joinpath(EXAMPLES_DIR, "dgmulti_2d", "elixir_euler_curved.jl"), alg=RDPK3SpFSAL49(thread = Trixi.True()), l2=[ @@ -440,7 +442,7 @@ end end @trixi_testset "elixir_euler_triangulate_pkg_mesh.jl" begin - @test_trixi_include(joinpath(examples_dir(), "dgmulti_2d", + @test_trixi_include(joinpath(EXAMPLES_DIR, "dgmulti_2d", "elixir_euler_triangulate_pkg_mesh.jl"), l2=[ 2.344076909832665e-6, @@ -466,7 +468,7 @@ end end @trixi_testset "elixir_euler_fdsbp_periodic.jl (2D)" begin - @test_trixi_include(joinpath(examples_dir(), "dgmulti_2d", + @test_trixi_include(joinpath(EXAMPLES_DIR, "dgmulti_2d", "elixir_euler_fdsbp_periodic.jl"), l2=[ 1.3333320340010056e-6, @@ -492,7 +494,7 @@ end end @trixi_testset "elixir_euler_fdsbp_periodic.jl (3D)" begin - @test_trixi_include(joinpath(examples_dir(), + @test_trixi_include(joinpath(EXAMPLES_DIR, "dgmulti_3d/elixir_euler_fdsbp_periodic.jl"), l2=[ 7.561468750241556e-5, diff --git a/test/test_tree_1d.jl b/test/test_tree_1d.jl index d8658062b90..e16d4bcd1bc 100644 --- a/test/test_tree_1d.jl +++ b/test/test_tree_1d.jl @@ -5,7 +5,7 @@ using Trixi include("test_trixi.jl") -EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_1d_dgsem") +EXAMPLES_DIR = joinpath(examples_dir(), "tree_1d_dgsem") # Start with a clean environment: remove Trixi.jl output directory if it exists outdir = "out" @@ -64,7 +64,8 @@ end l2=[0.00017373554109980247], linf=[0.0006021275678165239], maxiters=1, - initial_condition=Trixi.initial_condition_sin) + initial_condition=Trixi.initial_condition_sin, + visualization=TrivialCallback()) end @trixi_testset "elixir_advection_extended.jl with initial_condition_constant" begin @@ -72,7 +73,8 @@ end l2=[2.441369287653687e-16], linf=[4.440892098500626e-16], maxiters=1, - initial_condition=initial_condition_constant) + initial_condition=initial_condition_constant, + visualization=TrivialCallback()) end @trixi_testset "elixir_advection_extended.jl with initial_condition_linear_x" begin @@ -82,7 +84,8 @@ end maxiters=1, initial_condition=Trixi.initial_condition_linear_x, boundary_conditions=Trixi.boundary_condition_linear_x, - periodicity=false) + periodicity=false, + visualization=TrivialCallback()) end @trixi_testset "elixir_advection_extended.jl with initial_condition_convergence_test" begin @@ -92,7 +95,8 @@ end maxiters=1, initial_condition=initial_condition_convergence_test, boundary_conditions=BoundaryConditionDirichlet(initial_condition_convergence_test), - periodicity=false) + periodicity=false, + visualization=TrivialCallback()) end end @@ -181,6 +185,7 @@ end redirect_stderr(f) do trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_extended.jl"), + visualization = TrivialCallback(), summary_callback = TrivialCallback(), analysis_callback = TrivialCallback(), alive_callback = TrivialCallback()) @@ -274,7 +279,7 @@ end # Create a DGSEM solver with polynomials of degree `polydeg` volume_flux = (flux_central, flux_nonconservative) - surface_flux = (flux_lax_friedrichs, flux_nonconservative) + surface_flux = (FluxLaxFriedrichs(max_abs_speed_naive), flux_nonconservative) solver = DGSEM(polydeg = 3, surface_flux = surface_flux, volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) diff --git a/test/test_tree_1d_advection.jl b/test/test_tree_1d_advection.jl index 98ee7626c17..bbe4b23c4a2 100644 --- a/test/test_tree_1d_advection.jl +++ b/test/test_tree_1d_advection.jl @@ -5,7 +5,7 @@ using Trixi include("test_trixi.jl") -EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_1d_dgsem") +EXAMPLES_DIR = joinpath(examples_dir(), "tree_1d_dgsem") @testset "Linear scalar advection" begin #! format: noindent @@ -42,7 +42,7 @@ end @trixi_testset "elixir_advection_amr.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_amr.jl"), l2=[0.3540206249507417], - linf=[0.9999896603382347],) + linf=[0.9999896603382347]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let @@ -56,7 +56,7 @@ end @trixi_testset "elixir_advection_amr_nonperiodic.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_amr_nonperiodic.jl"), l2=[4.283508859843524e-6], - linf=[3.235356127918171e-5],) + linf=[3.235356127918171e-5]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let @@ -157,6 +157,7 @@ end end @trixi_testset "elixir_advection_doublefloat.jl" begin + using DoubleFloats: Double64 @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_doublefloat.jl"), l2=Double64[6.80895929885700039832943251427357703e-11], linf=Double64[5.82834770064525291688100323411704252e-10]) diff --git a/test/test_tree_1d_burgers.jl b/test/test_tree_1d_burgers.jl index a4e60d050db..6fa2064ea18 100644 --- a/test/test_tree_1d_burgers.jl +++ b/test/test_tree_1d_burgers.jl @@ -5,7 +5,7 @@ using Trixi include("test_trixi.jl") -EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_1d_dgsem") +EXAMPLES_DIR = joinpath(examples_dir(), "tree_1d_dgsem") @testset "Inviscid Burgers" begin #! format: noindent diff --git a/test/test_tree_1d_euler.jl b/test/test_tree_1d_euler.jl index 56e76e80db3..d30db8be899 100644 --- a/test/test_tree_1d_euler.jl +++ b/test/test_tree_1d_euler.jl @@ -5,7 +5,7 @@ using Trixi include("test_trixi.jl") -EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_1d_dgsem") +EXAMPLES_DIR = joinpath(examples_dir(), "tree_1d_dgsem") @testset "Compressible Euler" begin #! format: noindent @@ -15,13 +15,12 @@ EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_1d_dgsem") l2=[ 2.2527950196212703e-8, 1.8187357193835156e-8, - 7.705669939973104e-8 - ], + 7.705669939973104e-8], linf=[ 1.6205433861493646e-7, 1.465427772462391e-7, 5.372255111879554e-7 - ],) + ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let @@ -281,7 +280,7 @@ end 2.9766770877037168, 0.16838100902295852, 2.6655773445485798 - ],) + ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let @@ -316,7 +315,7 @@ end 3.4296365168219216, 0.17635583964559245, 2.6574584326179505 - ],) + ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let @@ -370,7 +369,7 @@ end @trixi_testset "elixir_euler_positivity.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_positivity.jl"), l2=[1.6493820253458906, 0.19793887460986834, 0.9783506076125921], - linf=[4.71751203912051, 0.5272411022735763, 2.7426163947635844],) + linf=[4.71751203912051, 0.5272411022735763, 2.7426163947635844]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let @@ -416,6 +415,8 @@ end end @trixi_testset "test_quasi_1D_entropy" begin + using Trixi: CompressibleEulerEquationsQuasi1D, CompressibleEulerEquations1D, + entropy, SVector a = 0.9 u_1D = SVector(1.1, 0.2, 2.1) u_quasi_1D = SVector(a * 1.1, a * 0.2, a * 2.1, a) diff --git a/test/test_tree_1d_eulergravity.jl b/test/test_tree_1d_eulergravity.jl index 70cc294812d..d264a3f4c72 100644 --- a/test/test_tree_1d_eulergravity.jl +++ b/test/test_tree_1d_eulergravity.jl @@ -5,7 +5,7 @@ using Trixi include("test_trixi.jl") -EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_1d_dgsem") +EXAMPLES_DIR = joinpath(examples_dir(), "tree_1d_dgsem") @testset "Compressible Euler with self-gravity" begin #! format: noindent diff --git a/test/test_tree_1d_eulermulti.jl b/test/test_tree_1d_eulermulti.jl index 7f5b6d50c94..c6c149021c6 100644 --- a/test/test_tree_1d_eulermulti.jl +++ b/test/test_tree_1d_eulermulti.jl @@ -2,15 +2,16 @@ module TestExamples1DEulerMulti using Test using Trixi -using ForwardDiff include("test_trixi.jl") -EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_1d_dgsem") +EXAMPLES_DIR = joinpath(examples_dir(), "tree_1d_dgsem") @testset "Compressible Euler Multicomponent" begin @trixi_testset "Testing entropy2cons and cons2entropy" begin using ForwardDiff + using Trixi: Trixi, CompressibleEulerMulticomponentEquations1D, cons2entropy, + entropy2cons, SVector gammas = (1.3272378792562836, 1.5269959187969864, 1.8362285750521512, 1.0409061360276926, 1.4652015053812224, 1.3626493264184423) gas_constants = (1.817636851910076, 6.760820475922636, 5.588953939749113, diff --git a/test/test_tree_1d_fdsbp.jl b/test/test_tree_1d_fdsbp.jl index 5034ab2ca4a..d8f66d2d5a0 100644 --- a/test/test_tree_1d_fdsbp.jl +++ b/test/test_tree_1d_fdsbp.jl @@ -5,7 +5,7 @@ using Trixi include("test_trixi.jl") -EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_1d_fdsbp") +EXAMPLES_DIR = joinpath(examples_dir(), "tree_1d_fdsbp") @testset "Linear scalar advection" begin #! format: noindent diff --git a/test/test_tree_1d_hypdiff.jl b/test/test_tree_1d_hypdiff.jl index 562c940d44e..9a619f2fe89 100644 --- a/test/test_tree_1d_hypdiff.jl +++ b/test/test_tree_1d_hypdiff.jl @@ -5,7 +5,7 @@ using Trixi include("test_trixi.jl") -EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_1d_dgsem") +EXAMPLES_DIR = joinpath(examples_dir(), "tree_1d_dgsem") @testset "Hyperbolic diffusion" begin #! format: noindent diff --git a/test/test_tree_1d_linearizedeuler.jl b/test/test_tree_1d_linearizedeuler.jl index 210ad8645de..8620d85ba5c 100644 --- a/test/test_tree_1d_linearizedeuler.jl +++ b/test/test_tree_1d_linearizedeuler.jl @@ -4,7 +4,7 @@ using Trixi include("test_trixi.jl") -EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_1d_dgsem") +EXAMPLES_DIR = joinpath(examples_dir(), "tree_1d_dgsem") @testset "Linearized Euler Equations 1D" begin #! format: noindent diff --git a/test/test_tree_1d_maxwell.jl b/test/test_tree_1d_maxwell.jl index 0d936f703fe..4c368f6cbc3 100644 --- a/test/test_tree_1d_maxwell.jl +++ b/test/test_tree_1d_maxwell.jl @@ -5,7 +5,7 @@ using Trixi include("test_trixi.jl") -EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_1d_dgsem") +EXAMPLES_DIR = joinpath(examples_dir(), "tree_1d_dgsem") @testset "Maxwell" begin #! format: noindent diff --git a/test/test_tree_1d_mhd.jl b/test/test_tree_1d_mhd.jl index 3505e62d4e8..23d0d8dbc11 100644 --- a/test/test_tree_1d_mhd.jl +++ b/test/test_tree_1d_mhd.jl @@ -5,7 +5,7 @@ using Trixi include("test_trixi.jl") -EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_1d_dgsem") +EXAMPLES_DIR = joinpath(examples_dir(), "tree_1d_dgsem") @testset "MHD" begin #! format: noindent @@ -186,8 +186,7 @@ end 1.0526758874956808, 5.995204332975845e-15, 1.5122922036932964, - 0.0 - ],) + 0.0]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let diff --git a/test/test_tree_1d_mhdmulti.jl b/test/test_tree_1d_mhdmulti.jl index 9c23158f7dc..9877af33350 100644 --- a/test/test_tree_1d_mhdmulti.jl +++ b/test/test_tree_1d_mhdmulti.jl @@ -5,7 +5,7 @@ using Trixi include("test_trixi.jl") -EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_1d_dgsem") +EXAMPLES_DIR = joinpath(examples_dir(), "tree_1d_dgsem") @testset "MHD Multicomponent" begin #! format: noindent @@ -110,7 +110,7 @@ end 0.9541678878162702, 5.773159728050814e-15, 1.4595119339458051, 0.0, 0.18201910908829552, - 0.36403821817659104],) + 0.36403821817659104]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let diff --git a/test/test_tree_1d_traffic_flow_lwr.jl b/test/test_tree_1d_traffic_flow_lwr.jl index 54412e314b3..36a32fd06d6 100644 --- a/test/test_tree_1d_traffic_flow_lwr.jl +++ b/test/test_tree_1d_traffic_flow_lwr.jl @@ -5,7 +5,7 @@ using Trixi include("test_trixi.jl") -EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_1d_dgsem") +EXAMPLES_DIR = joinpath(examples_dir(), "tree_1d_dgsem") @testset "Traffic-flow LWR" begin #! format: noindent diff --git a/test/test_tree_2d_acoustics.jl b/test/test_tree_2d_acoustics.jl index 070eca87728..6f1cd73e819 100644 --- a/test/test_tree_2d_acoustics.jl +++ b/test/test_tree_2d_acoustics.jl @@ -5,7 +5,7 @@ using Trixi include("test_trixi.jl") -EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_2d_dgsem") +EXAMPLES_DIR = joinpath(examples_dir(), "tree_2d_dgsem") @testset "Acoustic Perturbation" begin #! format: noindent diff --git a/test/test_tree_2d_advection.jl b/test/test_tree_2d_advection.jl index f0ec24ccca2..2f6be383716 100644 --- a/test/test_tree_2d_advection.jl +++ b/test/test_tree_2d_advection.jl @@ -5,7 +5,7 @@ using Trixi include("test_trixi.jl") -EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_2d_dgsem") +EXAMPLES_DIR = joinpath(examples_dir(), "tree_2d_dgsem") @testset "Linear scalar advection" begin #! format: noindent @@ -14,7 +14,7 @@ EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_2d_dgsem") @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_basic.jl"), # Expected errors are exactly the same as in the parallel test! l2=[8.311947673061856e-6], - linf=[6.627000273229378e-5],) + linf=[6.627000273229378e-5]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let @@ -40,6 +40,49 @@ end end end +@trixi_testset "elixir_advection_implicit_sparse_jacobian.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_advection_implicit_sparse_jacobian.jl"), + l2=[0.003003253325111022], linf=[0.004256250998163846]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi_float_type, t)) < 1000 + end +end + +@trixi_testset "elixir_advection_implicit_sparse_jacobian_restart.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_advection_implicit_sparse_jacobian_restart.jl"), + l2=[0.007964280656552015], linf=[0.011267546271397588]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi_float_type, t)) < 1000 + end +end + +@trixi_testset "elixir_advection_implicit_sparse_jacobian_restart.jl (no colorvec)" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_advection_implicit_sparse_jacobian_restart.jl"), + colorvec=nothing, + l2=[0.007964280656552015], linf=[0.011267546271397588]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi_float_type, t)) < 1000 + end +end + @trixi_testset "elixir_advection_restart.jl" begin using OrdinaryDiffEqSSPRK: SSPRK43 println("═"^100) @@ -82,7 +125,7 @@ end @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_amr.jl"), # Expected errors are exactly the same as in the parallel test! l2=[4.913300828257469e-5], - linf=[0.00045263895394385967],) + linf=[0.00045263895394385967]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let @@ -97,7 +140,7 @@ end @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_amr_nonperiodic.jl"), # Expected errors are exactly the same as in the parallel test! l2=[3.2207388565869075e-5], - linf=[0.0007508059772436404],) + linf=[0.0007508059772436404]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let @@ -112,7 +155,7 @@ end @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_amr_solution_independent.jl"), l2=[4.949660644033807e-5], - linf=[0.0004867846262313763],) + linf=[0.0004867846262313763]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let @@ -134,7 +177,7 @@ end @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_amr_visualization.jl"), l2=[0.0007225529919720868], - linf=[0.005954447875428925],) + linf=[0.005954447875428925]) # Restore GKSwstype to previous value (if it was set) if !isinteractive() @@ -149,7 +192,7 @@ end @trixi_testset "elixir_advection_timeintegration.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_timeintegration.jl"), l2=[2.4976030518356626e-5], - linf=[0.0005531580316338533],) + linf=[0.0005531580316338533]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let diff --git a/test/test_tree_2d_euler.jl b/test/test_tree_2d_euler.jl index adac6d59d4e..8dcd351ffa9 100644 --- a/test/test_tree_2d_euler.jl +++ b/test/test_tree_2d_euler.jl @@ -5,7 +5,7 @@ using Trixi include("test_trixi.jl") -EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_2d_dgsem") +EXAMPLES_DIR = joinpath(examples_dir(), "tree_2d_dgsem") @testset "Compressible Euler" begin #! format: noindent @@ -928,7 +928,7 @@ end 0.03857193149447702, 0.031090457959835893, 0.12125130332971423 - ],) + ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let @@ -939,6 +939,41 @@ end end end +@trixi_testset "elixir_euler_vortex_er.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_vortex_er.jl"), + l2=[ + 0.02611497083247329, + 0.1381802635983644, + 0.11459980510262816, + 0.43782810743830725 + ], + linf=[ + 0.2918576464635866, + 1.1190399715083816, + 0.7978297797951908, + 3.8946074718596115 + ]) + # Larger values for allowed allocations due to usage of custom + # integrator which are not *recorded* for the methods from + # OrdinaryDiffEq.jl + # Corresponding issue: https://github.com/trixi-framework/Trixi.jl/issues/1877 + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 15_000 + end + + # test both short and long printing formats + @test_nowarn show(relaxation_solver) + println() + @test_nowarn println(relaxation_solver) + println() + @test_nowarn display(relaxation_solver) + # Test `:compact` printing + show(IOContext(IOBuffer(), :compact => true), MIME"text/plain"(), relaxation_solver) +end + @trixi_testset "elixir_euler_ec.jl with boundary_condition_slip_wall" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_ec.jl"), l2=[ @@ -1046,6 +1081,55 @@ end end end end + +# Constant subsonic flow test +@trixi_testset "elixir_euler_subsonic_constant.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_subsonic_constant.jl"), + l2=[ + 9.135564506684991e-14, 1.9441147665983966e-14, + 1.94425866451226e-14, 1.7503189225604875e-13 + ], + linf=[ + 1.0769163338864018e-13, 8.487407677783974e-14, + 8.515583343047957e-14, 2.0472512574087887e-13 + ], + initial_refinement_level=7, + tspan=(0.0, 0.1)) # this test is sensitive to the CFL factor + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end + +@trixi_testset "elixir_euler_riemannproblem_quadrants_amr.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_euler_riemannproblem_quadrants_amr.jl"), + tspan=(0.0, 0.05), + l2=[ + 0.12802172216950314, + 0.1333199240875145, + 0.13331992408751456, + 0.48888051192644405 + ], + linf=[ + 0.853710403180942, + 0.9151148367639803, + 0.9151148367639808, + 3.4300525777582864 + ]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end end end # module diff --git a/test/test_tree_2d_eulermulti.jl b/test/test_tree_2d_eulermulti.jl index de2944eb40d..f8822509ebf 100644 --- a/test/test_tree_2d_eulermulti.jl +++ b/test/test_tree_2d_eulermulti.jl @@ -5,11 +5,13 @@ using Trixi include("test_trixi.jl") -EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_2d_dgsem") +EXAMPLES_DIR = joinpath(examples_dir(), "tree_2d_dgsem") @testset "Compressible Euler Multicomponent" begin @trixi_testset "Testing entropy2cons and cons2entropy" begin using ForwardDiff + using Trixi: Trixi, CompressibleEulerMulticomponentEquations2D, cons2entropy, + entropy2cons, SVector gammas = (1.1546412974182538, 1.1171560258914812, 1.097107661471476, 1.0587601652669245, 1.6209889683979308, 1.6732209755396386, 1.2954303574165822) diff --git a/test/test_tree_2d_eulerpolytropic.jl b/test/test_tree_2d_eulerpolytropic.jl index dd6bb5700c2..edeb5e683fa 100644 --- a/test/test_tree_2d_eulerpolytropic.jl +++ b/test/test_tree_2d_eulerpolytropic.jl @@ -5,7 +5,7 @@ using Trixi include("test_trixi.jl") -EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_2d_dgsem") +EXAMPLES_DIR = joinpath(examples_dir(), "tree_2d_dgsem") @testset "Polytropic Euler" begin #! format: noindent diff --git a/test/test_tree_2d_fdsbp.jl b/test/test_tree_2d_fdsbp.jl index ae0bb4157d7..69c1e23a206 100644 --- a/test/test_tree_2d_fdsbp.jl +++ b/test/test_tree_2d_fdsbp.jl @@ -5,7 +5,7 @@ using Trixi include("test_trixi.jl") -EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_2d_fdsbp") +EXAMPLES_DIR = joinpath(examples_dir(), "tree_2d_fdsbp") @testset "Linear scalar advection" begin #! format: noindent @@ -27,11 +27,12 @@ EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_2d_fdsbp") end @trixi_testset "elixir_advection_extended.jl with periodic operators" begin - global D = SummationByPartsOperators.periodic_derivative_operator(derivative_order = 1, - accuracy_order = 4, - xmin = 0.0, - xmax = 1.0, - N = 40) + using Trixi: periodic_derivative_operator + global D = periodic_derivative_operator(derivative_order = 1, + accuracy_order = 4, + xmin = 0.0, + xmax = 1.0, + N = 40) @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_extended.jl"), l2=[1.1239649404463432e-5], linf=[1.5895264629195438e-5], diff --git a/test/test_tree_2d_hypdiff.jl b/test/test_tree_2d_hypdiff.jl index 21a20ff2d4c..601209ee3df 100644 --- a/test/test_tree_2d_hypdiff.jl +++ b/test/test_tree_2d_hypdiff.jl @@ -5,7 +5,7 @@ using Trixi include("test_trixi.jl") -EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_2d_dgsem") +EXAMPLES_DIR = joinpath(examples_dir(), "tree_2d_dgsem") @testset "Hyperbolic diffusion" begin #! format: noindent diff --git a/test/test_tree_2d_lbm.jl b/test/test_tree_2d_lbm.jl index 4705c9d0d03..279f369686e 100644 --- a/test/test_tree_2d_lbm.jl +++ b/test/test_tree_2d_lbm.jl @@ -5,7 +5,7 @@ using Trixi include("test_trixi.jl") -EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_2d_dgsem") +EXAMPLES_DIR = joinpath(examples_dir(), "tree_2d_dgsem") @testset "Lattice-Boltzmann" begin #! format: noindent diff --git a/test/test_tree_2d_linearizedeuler.jl b/test/test_tree_2d_linearizedeuler.jl index b1d34895a63..a7f009e6128 100644 --- a/test/test_tree_2d_linearizedeuler.jl +++ b/test/test_tree_2d_linearizedeuler.jl @@ -4,7 +4,7 @@ using Trixi include("test_trixi.jl") -EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_2d_dgsem") +EXAMPLES_DIR = joinpath(examples_dir(), "tree_2d_dgsem") @testset "Linearized Euler Equations 2D" begin #! format: noindent diff --git a/test/test_tree_2d_mhd.jl b/test/test_tree_2d_mhd.jl index c64ae68cf03..0f0b770afdc 100644 --- a/test/test_tree_2d_mhd.jl +++ b/test/test_tree_2d_mhd.jl @@ -5,7 +5,7 @@ using Trixi include("test_trixi.jl") -EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_2d_dgsem") +EXAMPLES_DIR = joinpath(examples_dir(), "tree_2d_dgsem") @testset "MHD" begin #! format: noindent @@ -457,7 +457,14 @@ end 6.1013422157115546e-03 ], tspan=(0.0, 0.003), - surface_flux=(flux_lax_friedrichs, + # Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of + # `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. + # In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. + # Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. + # To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. + # We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the + # `StepsizeCallback` (CFL-Condition) and less diffusion. + surface_flux=(FluxLaxFriedrichs(max_abs_speed_naive), flux_nonconservative_powell_local_jump), volume_flux=(flux_derigs_etal, flux_nonconservative_powell_local_jump)) diff --git a/test/test_tree_2d_mhdmulti.jl b/test/test_tree_2d_mhdmulti.jl index 19eaa28cb6e..ba4804a3e37 100644 --- a/test/test_tree_2d_mhdmulti.jl +++ b/test/test_tree_2d_mhdmulti.jl @@ -5,7 +5,7 @@ using Trixi include("test_trixi.jl") -EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_2d_dgsem") +EXAMPLES_DIR = joinpath(examples_dir(), "tree_2d_dgsem") @testset "MHD Multicomponent" begin #! format: noindent diff --git a/test/test_tree_2d_mhdmultiion.jl b/test/test_tree_2d_mhdmultiion.jl index fdf3c586581..fc88ad0284e 100644 --- a/test/test_tree_2d_mhdmultiion.jl +++ b/test/test_tree_2d_mhdmultiion.jl @@ -55,6 +55,13 @@ EXAMPLES_DIR = joinpath(examples_dir(), "tree_2d_dgsem") end end +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. @trixi_testset "Provably entropy-stable LLF-type fluxes for multi-ion GLM-MHD" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_mhdmultiion_ec.jl"), l2=[ @@ -90,7 +97,7 @@ end 0.003287251595115295 ], surface_flux=(FluxPlusDissipation(flux_ruedaramirez_etal, - DissipationLaxFriedrichsEntropyVariables()), + DissipationLaxFriedrichsEntropyVariables(max_abs_speed_naive)), flux_nonconservative_ruedaramirez_etal)) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) @@ -102,6 +109,13 @@ end end end +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. @trixi_testset "elixir_mhdmultiion_ec.jl with local Lax-Friedrichs at the surface" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_mhdmultiion_ec.jl"), l2=[ @@ -136,7 +150,8 @@ end 0.49493751138636366, 0.003287414714660175 ], - surface_flux=(flux_lax_friedrichs, flux_nonconservative_central)) + surface_flux=(FluxLaxFriedrichs(max_abs_speed_naive), + flux_nonconservative_central)) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let diff --git a/test/test_tree_2d_part1.jl b/test/test_tree_2d_part1.jl index 2af1f29fcb6..adf6c226d2e 100644 --- a/test/test_tree_2d_part1.jl +++ b/test/test_tree_2d_part1.jl @@ -5,7 +5,7 @@ using Trixi include("test_trixi.jl") -EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_2d_dgsem") +EXAMPLES_DIR = joinpath(examples_dir(), "tree_2d_dgsem") # Start with a clean environment: remove Trixi.jl output directory if it exists outdir = "out" diff --git a/test/test_tree_3d_advection.jl b/test/test_tree_3d_advection.jl index 1d894d23793..c3c47f90add 100644 --- a/test/test_tree_3d_advection.jl +++ b/test/test_tree_3d_advection.jl @@ -5,7 +5,7 @@ using Trixi include("test_trixi.jl") -EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_3d_dgsem") +EXAMPLES_DIR = joinpath(examples_dir(), "tree_3d_dgsem") @testset "Linear scalar advection" begin #! format: noindent @@ -27,7 +27,7 @@ end @trixi_testset "elixir_advection_restart.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_restart.jl"), l2=[0.00016017848135651983], - linf=[0.0014175368788298393],) + linf=[0.0014175368788298393]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let @@ -103,7 +103,7 @@ end @trixi_testset "elixir_advection_amr.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_amr.jl"), l2=[9.773852895157622e-6], - linf=[0.0005853874124926162],) + linf=[0.0005853874124926162]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) @@ -114,6 +114,22 @@ end @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 end end + +@trixi_testset "elixir_advection_er.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_er.jl"), + l2=[0.005193350046445726], linf=[0.025986449692943836]) + + # Larger values for allowed allocations due to usage of custom + # integrator which are not *recorded* for the methods from + # OrdinaryDiffEq.jl + # Corresponding issue: https://github.com/trixi-framework/Trixi.jl/issues/1877 + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 15_000 + end +end end end # module diff --git a/test/test_tree_3d_euler.jl b/test/test_tree_3d_euler.jl index a4b840c7618..a9394e89457 100644 --- a/test/test_tree_3d_euler.jl +++ b/test/test_tree_3d_euler.jl @@ -5,7 +5,7 @@ using Trixi include("test_trixi.jl") -EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_3d_dgsem") +EXAMPLES_DIR = joinpath(examples_dir(), "tree_3d_dgsem") @testset "Compressible Euler" begin #! format: noindent @@ -25,7 +25,7 @@ EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_3d_dgsem") 0.032179231640894645, 0.032179231640895534, 0.0655408023333299 - ],) + ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let diff --git a/test/test_tree_3d_eulergravity.jl b/test/test_tree_3d_eulergravity.jl index a1eedd14446..2c3f64164b1 100644 --- a/test/test_tree_3d_eulergravity.jl +++ b/test/test_tree_3d_eulergravity.jl @@ -5,7 +5,7 @@ using Trixi include("test_trixi.jl") -EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_3d_dgsem") +EXAMPLES_DIR = joinpath(examples_dir(), "tree_3d_dgsem") @testset "Compressible Euler with self-gravity" begin #! format: noindent diff --git a/test/test_tree_3d_fdsbp.jl b/test/test_tree_3d_fdsbp.jl index 4cac6011713..d25089561c0 100644 --- a/test/test_tree_3d_fdsbp.jl +++ b/test/test_tree_3d_fdsbp.jl @@ -5,7 +5,7 @@ using Trixi include("test_trixi.jl") -EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_3d_fdsbp") +EXAMPLES_DIR = joinpath(examples_dir(), "tree_3d_fdsbp") @testset "Linear scalar advection" begin #! format: noindent @@ -26,11 +26,12 @@ EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_3d_fdsbp") end @trixi_testset "elixir_advection_extended.jl with periodic operators" begin - global D = SummationByPartsOperators.periodic_derivative_operator(derivative_order = 1, - accuracy_order = 4, - xmin = 0.0, - xmax = 1.0, - N = 10) + using Trixi: periodic_derivative_operator + global D = periodic_derivative_operator(derivative_order = 1, + accuracy_order = 4, + xmin = 0.0, + xmax = 1.0, + N = 10) @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_extended.jl"), l2=[5.228248923012878e-9], linf=[9.24430243465224e-9], diff --git a/test/test_tree_3d_hypdiff.jl b/test/test_tree_3d_hypdiff.jl index 6bf57e08c2d..fc6f546693b 100644 --- a/test/test_tree_3d_hypdiff.jl +++ b/test/test_tree_3d_hypdiff.jl @@ -5,7 +5,7 @@ using Trixi include("test_trixi.jl") -EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_3d_dgsem") +EXAMPLES_DIR = joinpath(examples_dir(), "tree_3d_dgsem") @testset "Hyperbolic diffusion" begin #! format: noindent diff --git a/test/test_tree_3d_lbm.jl b/test/test_tree_3d_lbm.jl index dc7e770dfa4..b2da985c105 100644 --- a/test/test_tree_3d_lbm.jl +++ b/test/test_tree_3d_lbm.jl @@ -5,7 +5,7 @@ using Trixi include("test_trixi.jl") -EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_3d_dgsem") +EXAMPLES_DIR = joinpath(examples_dir(), "tree_3d_dgsem") @testset "Lattice-Boltzmann" begin #! format: noindent diff --git a/test/test_tree_3d_linearizedeuler.jl b/test/test_tree_3d_linearizedeuler.jl index 0390b0cbcf8..79780d7001f 100644 --- a/test/test_tree_3d_linearizedeuler.jl +++ b/test/test_tree_3d_linearizedeuler.jl @@ -4,7 +4,7 @@ using Trixi include("test_trixi.jl") -EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_3d_dgsem") +EXAMPLES_DIR = joinpath(examples_dir(), "tree_3d_dgsem") @testset "Linearized Euler Equations 3D" begin #! format: noindent diff --git a/test/test_tree_3d_mhd.jl b/test/test_tree_3d_mhd.jl index 4822d2ff608..7baf830ea3f 100644 --- a/test/test_tree_3d_mhd.jl +++ b/test/test_tree_3d_mhd.jl @@ -5,7 +5,7 @@ using Trixi include("test_trixi.jl") -EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_3d_dgsem") +EXAMPLES_DIR = joinpath(examples_dir(), "tree_3d_dgsem") @testset "MHD" begin #! format: noindent @@ -185,6 +185,8 @@ end end @trixi_testset "elixir_mhd_alfven_wave.jl with Orszag-Tang setup + flux_hlle" begin + using Trixi: prim2cons, flux_hlle, flux_nonconservative_powell, flux_central, + SVector # OBS! This setup does not make much sense and is only used to exercise all components of the # flux_hlle implementation @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_mhd_alfven_wave.jl"), diff --git a/test/test_tree_3d_mhdmultiion.jl b/test/test_tree_3d_mhdmultiion.jl index e0fcac8e987..ac81f911ea0 100644 --- a/test/test_tree_3d_mhdmultiion.jl +++ b/test/test_tree_3d_mhdmultiion.jl @@ -55,6 +55,13 @@ EXAMPLES_DIR = joinpath(examples_dir(), "tree_3d_dgsem") end end +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. @trixi_testset "Provably entropy-stable LLF-type fluxes for multi-ion GLM-MHD" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_mhdmultiion_ec.jl"), l2=[ @@ -90,7 +97,7 @@ end 0.04959304829601756 ], surface_flux=(FluxPlusDissipation(flux_ruedaramirez_etal, - DissipationLaxFriedrichsEntropyVariables()), + DissipationLaxFriedrichsEntropyVariables(max_abs_speed_naive)), flux_nonconservative_ruedaramirez_etal), tspan=(0.0, 0.05)) # Ensure that we do not have excessive memory allocations @@ -103,6 +110,13 @@ end end end +# Up to version 0.13.0, `max_abs_speed_naive` was used as the default wave speed estimate of +# `const flux_lax_friedrichs = FluxLaxFriedrichs(), i.e., `FluxLaxFriedrichs(max_abs_speed = max_abs_speed_naive)`. +# In the `StepsizeCallback`, though, the less diffusive `max_abs_speeds` is employed which is consistent with `max_abs_speed`. +# Thus, we exchanged in PR#2458 the default wave speed used in the LLF flux to `max_abs_speed`. +# To ensure that every example still runs we specify explicitly `FluxLaxFriedrichs(max_abs_speed_naive)`. +# We remark, however, that the now default `max_abs_speed` is in general recommended due to compliance with the +# `StepsizeCallback` (CFL-Condition) and less diffusion. @trixi_testset "elixir_mhdmultiion_ec.jl with local Lax-Friedrichs at the surface" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_mhdmultiion_ec.jl"), l2=[ @@ -137,7 +151,8 @@ end 0.9587397297591731, 0.04960518070421052 ], - surface_flux=(flux_lax_friedrichs, flux_nonconservative_central), + surface_flux=(FluxLaxFriedrichs(max_abs_speed_naive), + flux_nonconservative_central), tspan=(0.0, 0.05)) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) @@ -148,6 +163,50 @@ end @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 end end + +@trixi_testset "Multi-ion GLM-MHD collision source terms (3D)" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_mhdmultiion_collisions.jl"), + l2=[ + 0.0, + 0.0, + 0.0, + 0.0, + 0.0595534208484378, + 0.0, + 0.0, + 0.019718485574500753, + 0.0, + 0.059553420848437816, + 0.0, + 0.0, + 0.01738507024352939, + 0.0 + ], + linf=[ + 0.0, + 0.0, + 0.0, + 0.0, + 0.059553420848437816, + 0.0, + 0.0, + 0.019718485574500757, + 0.0, + 0.05955342084843786, + 0.0, + 0.0, + 0.017385070243529404, + 0.0 + ]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end end end # module diff --git a/test/test_tree_3d_part2.jl b/test/test_tree_3d_part2.jl index 4b9da039f98..df47e9bb9f5 100644 --- a/test/test_tree_3d_part2.jl +++ b/test/test_tree_3d_part2.jl @@ -5,6 +5,8 @@ using Trixi include("test_trixi.jl") +EXAMPLES_DIR = examples_dir() + # Start with a clean environment: remove Trixi.jl output directory if it exists outdir = "out" isdir(outdir) && rm(outdir, recursive = true) @@ -29,6 +31,8 @@ end @trixi_testset "Additional tests in 3D" begin @trixi_testset "compressible Euler" begin + using Trixi: CompressibleEulerEquations3D, energy_total, energy_kinetic, + energy_internal eqn = CompressibleEulerEquations3D(1.4) @test isapprox(energy_total([1.0, 2.0, 3.0, 4.0, 20.0], eqn), 20.0) @@ -37,14 +41,14 @@ end end @trixi_testset "hyperbolic diffusion" begin + using Trixi: HyperbolicDiffusionEquations3D @test_nowarn HyperbolicDiffusionEquations3D(nu = 1.0) eqn = HyperbolicDiffusionEquations3D(nu = 1.0) end end @trixi_testset "Displaying components 3D" begin - @test_nowarn include(joinpath(examples_dir(), "tree_3d_dgsem", - "elixir_advection_amr.jl")) + @test_nowarn include(joinpath(EXAMPLES_DIR, "elixir_advection_amr.jl")) # test both short and long printing formats @test_nowarn show(mesh) diff --git a/test/test_tree_3d_part3.jl b/test/test_tree_3d_part3.jl index fd91a106d9c..fd64566867d 100644 --- a/test/test_tree_3d_part3.jl +++ b/test/test_tree_3d_part3.jl @@ -28,6 +28,9 @@ isdir(outdir) && rm(outdir, recursive = true) end @trixi_testset "Additional tests in 3D" begin + using Trixi: Trixi, IdealGlmMhdEquations3D, density, pressure, density_pressure, + energy_total, energy_kinetic, energy_magnetic, energy_internal, + cross_helicity @testset "ideal GLM MHD" begin eqn = IdealGlmMhdEquations3D(1.4) u = [1.0, 2.0, 3.0, 4.0, 20.0, 0.1, 0.2, 0.3, 1.5] diff --git a/test/test_trixi.jl b/test/test_trixi.jl index 00baf2162d0..776633c4be4 100644 --- a/test/test_trixi.jl +++ b/test/test_trixi.jl @@ -1,196 +1,30 @@ -using Test: @test -using TrixiTest: @trixi_test_nowarn -import Trixi - -# Use a macro to avoid world age issues when defining new initial conditions etc. -# inside an elixir. -""" - @test_trixi_include(elixir; l2=nothing, linf=nothing, RealT=Float64, - atol=500*eps(RealT), rtol=sqrt(eps(RealT)), - parameters...) - -Test Trixi by calling `trixi_include(elixir; parameters...)`. -By default, only the absence of error output is checked. -If `l2` or `linf` are specified, in addition the resulting L2/Linf errors -are compared approximately against these reference values, using `atol, rtol` -as absolute/relative tolerance. -""" -macro test_trixi_include(elixir, args...) - # Note: The variables below are just Symbols, not actual errors/types - local l2 = get_kwarg(args, :l2, nothing) - local linf = get_kwarg(args, :linf, nothing) - local RealT = get_kwarg(args, :RealT, :Float64) - if RealT === :Float64 - atol_default = 500 * eps(Float64) - rtol_default = sqrt(eps(Float64)) - elseif RealT === :Float32 - atol_default = 500 * eps(Float32) - rtol_default = sqrt(eps(Float32)) - elseif RealT === :Float128 - atol_default = 500 * eps(Float128) - rtol_default = sqrt(eps(Float128)) - elseif RealT === :Double64 - atol_default = 500 * eps(Double64) - rtol_default = sqrt(eps(Double64)) - end - local atol = get_kwarg(args, :atol, atol_default) - local rtol = get_kwarg(args, :rtol, rtol_default) - - local kwargs = Pair{Symbol, Any}[] - for arg in args - if (arg.head == :(=) && - !(arg.args[1] in (:l2, :linf, :RealT, :atol, :rtol))) - push!(kwargs, Pair(arg.args...)) - end - end - - quote - Trixi.mpi_isroot() && println("═"^100) - Trixi.mpi_isroot() && println($elixir) - - # if `maxiters` is set in tests, it is usually set to a small number to - # run only a few steps - ignore possible warnings coming from that - if any(==(:maxiters) ∘ first, $kwargs) - additional_ignore_content = [ - r"┌ Warning: Interrupted\. Larger maxiters is needed\..*\n└ @ SciMLBase .+\n", - r"┌ Warning: Interrupted\. Larger maxiters is needed\..*\n└ @ Trixi .+\n"] - else - additional_ignore_content = [] - end - - # evaluate examples in the scope of the module they're called from - @test_nowarn_mod trixi_include(@__MODULE__, $elixir; $kwargs...) additional_ignore_content - - # if present, compare l2 and linf errors against reference values - if !isnothing($l2) || !isnothing($linf) - l2_measured, linf_measured = analysis_callback(sol) - - if Trixi.mpi_isroot() && !isnothing($l2) - @test length($l2) == length(l2_measured) - for (l2_expected, l2_actual) in zip($l2, l2_measured) - @test isapprox(l2_expected, l2_actual, atol = $atol, rtol = $rtol) - end - end - - if Trixi.mpi_isroot() && !isnothing($linf) - @test length($linf) == length(linf_measured) - for (linf_expected, linf_actual) in zip($linf, linf_measured) - @test isapprox(linf_expected, linf_actual, atol = $atol, rtol = $rtol) - end - end - end - - Trixi.mpi_isroot() && println("═"^100) - Trixi.mpi_isroot() && println("\n\n") - end -end - -# Get the first value assigned to `keyword` in `args` and return `default_value` -# if there are no assignments to `keyword` in `args`. -function get_kwarg(args, keyword, default_value) - val = default_value - for arg in args - if arg.head == :(=) && arg.args[1] == keyword - val = arg.args[2] - break - end - end - return val -end - -function expr_to_named_tuple(expr) - result = (;) - - for arg in expr.args - if arg.head != :(=) - error("Invalid expression") - end - result = (; result..., arg.args[1] => arg.args[2]) - end - return result -end - -macro test_nowarn_mod(expr, additional_ignore_content = []) - quote - add_to_additional_ignore_content = [ - # We need to ignore steady state information reported by our callbacks - r"┌ Info: Steady state tolerance reached\n│ steady_state_callback .+\n└ t = .+\n", - # NOTE: These warnings arose from Julia 1.10 onwards - r"WARNING: Method definition .* in module .* at .* overwritten .*.\n", - # Warnings from third party packages - r"┌ Warning: Problem status ALMOST_INFEASIBLE; solution may be inaccurate.\n└ @ Convex ~/.julia/packages/Convex/.*\n", - r"┌ Warning: Problem status ALMOST_OPTIMAL; solution may be inaccurate.\n└ @ Convex ~/.julia/packages/Convex/.*\n", - # Warnings for higher-precision floating data types - r"┌ Warning: #= /home/runner/work/Trixi.jl/Trixi.jl/src/solvers/dgsem/interpolation.jl:118 =#:\n│ `LoopVectorization.check_args` on your inputs failed; running fallback `@inbounds @fastmath` loop instead.\n│ Use `warn_check_args=false`, e.g. `@turbo warn_check_args=false ...`, to disable this warning.\n└ @ Trixi ~/.julia/packages/LoopVectorization/.*\n", - r"┌ Warning: #= /home/runner/work/Trixi.jl/Trixi.jl/src/solvers/dgsem/interpolation.jl:136 =#:\n│ `LoopVectorization.check_args` on your inputs failed; running fallback `@inbounds @fastmath` loop instead.\n│ Use `warn_check_args=false`, e.g. `@turbo warn_check_args=false ...`, to disable this warning.\n└ @ Trixi ~/.julia/packages/LoopVectorization/.*\n" - ] - append!($additional_ignore_content, add_to_additional_ignore_content) - @trixi_test_nowarn $(esc(expr)) $additional_ignore_content - end -end - -""" - @timed_testset "name of the testset" #= code to test #= - -Similar to `@testset`, but prints the name of the testset and its runtime -after execution. -""" -macro timed_testset(name, expr) - @assert name isa String - quote - local time_start = time_ns() - @testset $name $expr - local time_stop = time_ns() - if Trixi.mpi_isroot() - flush(stdout) - @info("Testset "*$name*" finished in " - *string(1.0e-9 * (time_stop - time_start))*" seconds.\n") - flush(stdout) - end - end -end - -""" - @trixi_testset "name of the testset" #= code to test #= - -Similar to `@testset`, but wraps the code inside a temporary module to avoid -namespace pollution. It also `include`s this file again to provide the -definition of `@test_trixi_include`. Moreover, it records the execution time -of the testset similarly to [`timed_testset`](@ref). -""" -macro trixi_testset(name, expr) - @assert name isa String - # TODO: `@eval` is evil - # We would like to use - # mod = gensym(name) - # ... - # module $mod - # to create new module names for every test set. However, this is not - # compatible with the dirty hack using `@eval` to get the mapping when - # loading structured, curvilinear meshes. Thus, we need to use a plain - # module name here. - quote - local time_start = time_ns() - @eval module TrixiTestModule - using Test - using Trixi - include(@__FILE__) - # We define `EXAMPLES_DIR` in (nearly) all test modules and use it to - # get the path to the elixirs to be tested. However, that's not required - # and we want to fail gracefully if it's not defined. - try - import ..EXAMPLES_DIR - catch - nothing - end - @testset $name $expr - end - local time_stop = time_ns() - if Trixi.mpi_isroot() - flush(stdout) - @info("Testset "*$name*" finished in " - *string(1.0e-9 * (time_stop - time_start))*" seconds.\n") - end - nothing - end +using Test: @test, @testset +using TrixiTest +using Trixi: examples_dir + +macro test_trixi_include(expr, args...) + local add_to_additional_ignore_content = [ + # We need to ignore steady state information reported by our callbacks + r"┌ Info: Steady state tolerance reached\n│ steady_state_callback .+\n└ t = .+\n", + # NOTE: These warnings arose from Julia 1.10 onwards + r"WARNING: Method definition .* in module .* at .* overwritten .*.\n", + # Warnings from third party packages + r"┌ Warning: Problem status ALMOST_INFEASIBLE; solution may be inaccurate.\n└ @ Convex ~/.julia/packages/Convex/.*\n", + r"┌ Warning: Problem status ALMOST_OPTIMAL; solution may be inaccurate.\n└ @ Convex ~/.julia/packages/Convex/.*\n", + # Warnings for higher-precision floating data types + r"┌ Warning: #= /home/runner/work/Trixi.jl/Trixi.jl/src/solvers/dgsem/interpolation.jl:118 =#:\n│ `LoopVectorization.check_args` on your inputs failed; running fallback `@inbounds @fastmath` loop instead.\n│ Use `warn_check_args=false`, e.g. `@turbo warn_check_args=false ...`, to disable this warning.\n└ @ Trixi ~/.julia/packages/LoopVectorization/.*\n", + r"┌ Warning: #= /home/runner/work/Trixi.jl/Trixi.jl/src/solvers/dgsem/interpolation.jl:136 =#:\n│ `LoopVectorization.check_args` on your inputs failed; running fallback `@inbounds @fastmath` loop instead.\n│ Use `warn_check_args=false`, e.g. `@turbo warn_check_args=false ...`, to disable this warning.\n└ @ Trixi ~/.julia/packages/LoopVectorization/.*\n" + ] + # if `maxiters` is set in tests, it is usually set to a small number to + # run only a few steps - ignore possible warnings coming from that + if any(expr.args[1] == (:maxiters) for expr in args) + push!(add_to_additional_ignore_content, + r"┌ Warning: Interrupted\. Larger maxiters is needed\..*\n└ @ Trixi .+\n") + end + args = append_to_kwargs(args, :additional_ignore_content, + add_to_additional_ignore_content) + ex = quote + @test_trixi_include_base($expr, $(args...)) + end + return esc(ex) end diff --git a/test/test_unit.jl b/test/test_unit.jl index 1bec6f600d9..54403a3e3c2 100644 --- a/test/test_unit.jl +++ b/test/test_unit.jl @@ -4,6 +4,7 @@ using Test using Trixi using LinearAlgebra: norm, dot +using SparseArrays using DelimitedFiles: readdlm # Use Convex and ECOS to load the extension that extends functions for testing @@ -15,6 +16,11 @@ using ECOS: Optimizer # PERK Single p3 Constructors using NLsolve: nlsolve +import SparseConnectivityTracer: TracerSparsityDetector, jacobian_eltype, jacobian_sparsity +import SparseMatrixColorings: ColoringProblem, GreedyColoringAlgorithm, coloring, + column_colors +import FiniteDiff: finite_difference_jacobian! + include("test_trixi.jl") # Start with a clean environment: remove Trixi.jl output directory if it exists @@ -674,10 +680,9 @@ end @timed_testset "StepsizeCallback" begin # Ensure a proper error is thrown if used with adaptive time integration schemes - @test_nowarn_mod trixi_include(@__MODULE__, - joinpath(examples_dir(), "tree_2d_dgsem", - "elixir_advection_diffusion.jl"), - tspan = (0, 0.05)) + @test_trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", + "elixir_advection_diffusion.jl"), + tspan=(0, 0.05)) @test_throws ArgumentError solve(ode, alg; ode_default_options()..., callback = StepsizeCallback(cfl = 1.0)) @@ -685,10 +690,9 @@ end @timed_testset "TimeSeriesCallback" begin # Test the 2D TreeMesh version of the callback and some warnings - @test_nowarn_mod trixi_include(@__MODULE__, - joinpath(examples_dir(), "tree_2d_dgsem", - "elixir_acoustics_gaussian_source.jl"), - tspan = (0, 0.05)) + @test_trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", + "elixir_acoustics_gaussian_source.jl"), + tspan=(0, 0.05)) point_data_1 = time_series.affect!.point_data[1] @test all(isapprox.(point_data_1[1:7], @@ -702,6 +706,39 @@ end @test_throws ArgumentError TimeSeriesCallback(semi, [1.0 1.0 1.0; 2.0 2.0 2.0]) end +@timed_testset "resize! RelaxationIntegrators" begin + equations = LinearScalarAdvectionEquation1D(42.0) + solver = DGSEM(polydeg = 0, surface_flux = flux_ranocha) + mesh = TreeMesh((0.0,), (1.0,), + initial_refinement_level = 2, + n_cells_max = 30_000) + semi = SemidiscretizationHyperbolic(mesh, equations, + initial_condition_convergence_test, + solver) + u0 = zeros(4) + tspan = (0.0, 1.0) + ode = semidiscretize(semi, tspan) + + ode_alg = Trixi.RelaxationRK44() # SubDiagonalAlgorithm + integrator = Trixi.init(ode, ode_alg; dt = 1.0) # SubDiagonalRelaxationIntegrator + + resize!(integrator, 1001) + @test length(integrator.u) == 1001 + @test length(integrator.du) == 1001 + @test length(integrator.u_tmp) == 1001 + @test length(integrator.direction) == 1001 + + ode_alg = Trixi.RelaxationCKL54() # vanderHouwenAlgorithm + integrator = Trixi.init(ode, ode_alg; dt = 1.0) # vanderHouwenRelaxationIntegrator + + resize!(integrator, 42) + @test length(integrator.u) == 42 + @test length(integrator.du) == 42 + @test length(integrator.u_tmp) == 42 + @test length(integrator.k_prev) == 42 + @test length(integrator.direction) == 42 +end + @timed_testset "Consistency check for single point flux: CEMCE" begin equations = CompressibleEulerMulticomponentEquations2D(gammas = (1.4, 1.4), gas_constants = (0.4, 0.4)) @@ -2571,5 +2608,90 @@ end equations) end end + +@testset "SparseConnectivityTracer FiniteDiff Jacobian" begin + ############################################################################### + ### equations, solver, mesh ### + + advection_velocities = (0.2, -0.7) + equations = LinearScalarAdvectionEquation2D(advection_velocities) + + float_type = Float64 # Datatype for the actual simulation + solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs, RealT = float_type) + + coordinates_min = (-1.0, -1.0) + coordinates_max = (1.0, 1.0) + + mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 4, + n_cells_max = 30_000) + + ############################################################################### + ### semidiscretization for sparsity detection ### + + jac_detector = TracerSparsityDetector() + # We need to construct the semidiscretization with the correct + # sparsity-detection ready datatype, which is retrieved here + jac_eltype = jacobian_eltype(float_type, jac_detector) + + # Semidiscretization for sparsity pattern detection + semi_jac_type = SemidiscretizationHyperbolic(mesh, equations, + initial_condition_convergence_test, + solver, + uEltype = jac_eltype) # Need to supply Jacobian element type + + tspan = (0.0, 1.0) # Re-used for wrapping `rhs` below + + # Call `semidiscretize` to create the ODE problem to have access to the + # initial condition based on which the sparsity pattern is computed + ode_jac_type = semidiscretize(semi_jac_type, tspan) + u0_ode = ode_jac_type.u0 + du_ode = similar(u0_ode) + + ############################################################################### + ### Compute the Jacobian sparsity pattern ### + + # Wrap the `Trixi.rhs!` function to match the signature `f!(du, u)`, see + # https://adrianhill.de/SparseConnectivityTracer.jl/stable/user/api/#ADTypes.jacobian_sparsity + rhs_jac_type! = (du_ode, u0_ode) -> Trixi.rhs!(du_ode, u0_ode, semi_jac_type, + tspan[1]) + + jac_prototype = jacobian_sparsity(rhs_jac_type!, du_ode, u0_ode, jac_detector) + + coloring_prob = ColoringProblem(; structure = :nonsymmetric, partition = :column) + coloring_alg = GreedyColoringAlgorithm(; decompression = :direct) + coloring_result = coloring(jac_prototype, coloring_prob, coloring_alg) + coloring_vec = column_colors(coloring_result) + + ############################################################################### + ### float-type semidiscretization ### + + semi_float_type = SemidiscretizationHyperbolic(mesh, equations, + initial_condition_convergence_test, + solver) + + ode_float_type = semidiscretize(semi_float_type, tspan) + u0_ode = ode_float_type.u0 + du_ode = similar(u0_ode) + N = length(u0_ode) + + rhs_float_type! = (du_ode, u0_ode) -> Trixi.rhs!(du_ode, u0_ode, semi_float_type, + tspan[1]) + + ############################################################################### + ### sparsity-aware finite diff ### + + jac_sparse_finite_diff = spzeros(N, N) + finite_difference_jacobian!(jac_sparse_finite_diff, rhs_float_type!, + u0_ode, sparsity = jac_prototype, + colorvec = coloring_vec) + + jac_finite_diff = jacobian_fd(semi_float_type) + + @test isapprox(jac_finite_diff, jac_sparse_finite_diff; rtol = 5e-8) + @test isapprox(jac_finite_diff, Matrix(jac_sparse_finite_diff); rtol = 5e-8) + @test isapprox(sparse(jac_finite_diff), jac_sparse_finite_diff; rtol = 5e-8) +end end + end #module diff --git a/test/test_unstructured_2d.jl b/test/test_unstructured_2d.jl index d16bc96fb83..db93b780aeb 100644 --- a/test/test_unstructured_2d.jl +++ b/test/test_unstructured_2d.jl @@ -2,10 +2,11 @@ module TestExamplesUnstructuredMesh2D using Test using Trixi +using Adapt include("test_trixi.jl") -EXAMPLES_DIR = pkgdir(Trixi, "examples", "unstructured_2d_dgsem") +EXAMPLES_DIR = joinpath(examples_dir(), "unstructured_2d_dgsem") # Start with a clean environment: remove Trixi.jl output directory if it exists outdir = "out" @@ -32,6 +33,12 @@ isdir(outdir) && rm(outdir, recursive = true) du_ode = similar(u_ode) @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 end + semi32 = Trixi.trixi_adapt(Array, Float32, semi) + @test real(semi32.solver) == Float32 + @test real(semi32.solver.basis) == Float32 + @test real(semi32.solver.mortar) == Float32 + # TODO: remake ignores the mesh as well + @test real(semi32.mesh) == Float64 end @trixi_testset "elixir_euler_free_stream.jl" begin @@ -83,6 +90,7 @@ end end @trixi_testset "elixir_euler_basic.jl" begin + using Trixi: default_example_unstructured @test_trixi_include(default_example_unstructured(), l2=[ 0.0007213418215265047, @@ -120,7 +128,7 @@ end 0.005243995459478956, 0.004685630332338153, 0.01750217718347713 - ],) + ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let @@ -341,7 +349,8 @@ end # TODO: FD; for now put the unstructured tests for the 2D FDSBP here. @trixi_testset "FDSBP (central): elixir_advection_basic.jl" begin - @test_trixi_include(joinpath(pkgdir(Trixi, "examples", "unstructured_2d_fdsbp"), + using Trixi: examples_dir + @test_trixi_include(joinpath(examples_dir(), "unstructured_2d_fdsbp", "elixir_advection_basic.jl"), l2=[0.0001105211407319266], linf=[0.0004199363734466166]) @@ -356,7 +365,8 @@ end end @trixi_testset "FDSBP (central): elixir_euler_source_terms.jl" begin - @test_trixi_include(joinpath(pkgdir(Trixi, "examples", "unstructured_2d_fdsbp"), + using Trixi: examples_dir + @test_trixi_include(joinpath(examples_dir(), "unstructured_2d_fdsbp", "elixir_euler_source_terms.jl"), l2=[8.155544666380138e-5, 0.0001477863788446318, @@ -378,7 +388,8 @@ end end @trixi_testset "FDSBP (central): elixir_euler_free_stream.jl" begin - @test_trixi_include(joinpath(pkgdir(Trixi, "examples", "unstructured_2d_fdsbp"), + using Trixi: examples_dir + @test_trixi_include(joinpath(examples_dir(), "unstructured_2d_fdsbp", "elixir_euler_free_stream.jl"), l2=[5.4329175009362306e-14, 1.0066867437607972e-13, @@ -401,7 +412,8 @@ end end @trixi_testset "FDSBP (upwind): elixir_euler_source_terms_upwind.jl" begin - @test_trixi_include(joinpath(pkgdir(Trixi, "examples", "unstructured_2d_fdsbp"), + using Trixi: examples_dir + @test_trixi_include(joinpath(examples_dir(), "unstructured_2d_fdsbp", "elixir_euler_source_terms_upwind.jl"), l2=[4.085391175504837e-5, 7.19179253772227e-5, @@ -424,7 +436,8 @@ end end @trixi_testset "FDSBP (upwind): elixir_euler_source_terms_upwind.jl with LF splitting" begin - @test_trixi_include(joinpath(pkgdir(Trixi, "examples", "unstructured_2d_fdsbp"), + using Trixi: examples_dir + @test_trixi_include(joinpath(examples_dir(), "unstructured_2d_fdsbp", "elixir_euler_source_terms_upwind.jl"), l2=[ 3.8300274213823844e-5, @@ -452,7 +465,8 @@ end end @trixi_testset "FDSBP (upwind): elixir_euler_free_stream_upwind.jl" begin - @test_trixi_include(joinpath(pkgdir(Trixi, "examples", "unstructured_2d_fdsbp"), + using Trixi: examples_dir + @test_trixi_include(joinpath(examples_dir(), "unstructured_2d_fdsbp", "elixir_euler_free_stream_upwind.jl"), l2=[3.2114065566681054e-14, 2.132488788134846e-14, @@ -475,7 +489,8 @@ end end @trixi_testset "FDSBP (upwind): elixir_euler_free_stream_upwind_float32.jl" begin - @test_trixi_include(joinpath(pkgdir(Trixi, "examples", "unstructured_2d_fdsbp"), + using Trixi: examples_dir + @test_trixi_include(joinpath(examples_dir(), "unstructured_2d_fdsbp", "elixir_euler_free_stream_upwind_float32.jl"), l2=[0, 0, 0, 0], linf=[0, 0, 0, 0], diff --git a/test/test_visualization.jl b/test/test_visualization.jl index 28cfc52edcf..41f2bff5a05 100644 --- a/test/test_visualization.jl +++ b/test/test_visualization.jl @@ -11,7 +11,7 @@ using CairoMakie include("test_trixi.jl") -EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_2d_dgsem") +EXAMPLES_DIR = examples_dir() # Start with a clean environment: remove Trixi.jl output directory if it exists outdir = "out" @@ -35,9 +35,8 @@ test_examples_2d = Dict("TreeMesh" => ("tree_2d_dgsem", @testset "PlotData2D, PlotDataSeries, PlotMesh with $mesh" for mesh in keys(test_examples_2d) # Run Trixi.jl directory, elixir = test_examples_2d[mesh] - @test_nowarn_mod trixi_include(@__MODULE__, - joinpath(examples_dir(), directory, elixir), - tspan = (0, 0.1)) + @test_trixi_include(joinpath(EXAMPLES_DIR, directory, elixir), + tspan=(0, 0.1)) # Constructor tests if mesh == "TreeMesh" @@ -53,7 +52,7 @@ test_examples_2d = Dict("TreeMesh" => ("tree_2d_dgsem", pd = PlotData2D(sol) # show - @test_nowarn_mod show(stdout, pd) + @trixi_test_nowarn show(stdout, pd) println(stdout) # getindex @@ -79,22 +78,22 @@ test_examples_2d = Dict("TreeMesh" => ("tree_2d_dgsem", pds = pd["p"] @test pds.plot_data == pd @test pds.variable_id == 4 - @test_nowarn_mod show(stdout, pds) + @trixi_test_nowarn show(stdout, pds) println(stdout) # getmesh/PlotMesh @test getmesh(pd) == Trixi.PlotMesh(pd) @test getmesh(pd).plot_data == pd - @test_nowarn_mod show(stdout, getmesh(pd)) + @trixi_test_nowarn show(stdout, getmesh(pd)) println(stdout) @testset "2D plot recipes" begin pd = PlotData2D(sol) - @test_nowarn_mod Plots.plot(sol) - @test_nowarn_mod Plots.plot(pd) - @test_nowarn_mod Plots.plot(pd["p"]) - @test_nowarn_mod Plots.plot(getmesh(pd)) + @trixi_test_nowarn Plots.plot(sol) + @trixi_test_nowarn Plots.plot(pd) + @trixi_test_nowarn Plots.plot(pd["p"]) + @trixi_test_nowarn Plots.plot(getmesh(pd)) semi = sol.prob.p if mesh == "DGMulti" @@ -104,30 +103,30 @@ test_examples_2d = Dict("TreeMesh" => ("tree_2d_dgsem", u = sol.u[end] end scalar_data = StructArrays.component(u, 1) - @test_nowarn_mod Plots.plot(ScalarPlotData2D(scalar_data, semi)) + @trixi_test_nowarn Plots.plot(ScalarPlotData2D(scalar_data, semi)) else cache = semi.cache x = view(cache.elements.node_coordinates, 1, :, :, :) - @test_nowarn_mod Plots.plot(ScalarPlotData2D(x, semi)) + @trixi_test_nowarn Plots.plot(ScalarPlotData2D(x, semi)) end end @testset "1D plot from 2D solution" begin if mesh != "DGMulti" @testset "Create 1D plot as slice" begin - @test_nowarn_mod PlotData1D(sol, slice = :y, point = (0.5, 0.0)) isa - PlotData1D - @test_nowarn_mod PlotData1D(sol, slice = :x, point = (0.5, 0.0)) isa - PlotData1D + @trixi_test_nowarn PlotData1D(sol, slice = :y, point = (0.5, 0.0)) isa + PlotData1D + @trixi_test_nowarn PlotData1D(sol, slice = :x, point = (0.5, 0.0)) isa + PlotData1D pd1D = PlotData1D(sol, slice = :y, point = (0.5, 0.0)) - @test_nowarn_mod Plots.plot(pd1D) + @trixi_test_nowarn Plots.plot(pd1D) @testset "Create 1D plot along curve" begin curve = zeros(2, 10) curve[1, :] = range(-1, 1, length = 10) - @test_nowarn_mod PlotData1D(sol, curve = curve) isa PlotData1D + @trixi_test_nowarn PlotData1D(sol, curve = curve) isa PlotData1D pd1D = PlotData1D(sol, curve = curve) - @test_nowarn_mod Plots.plot(pd1D) + @trixi_test_nowarn Plots.plot(pd1D) end end end @@ -136,17 +135,16 @@ end @timed_testset "PlotData1D, PlotDataSeries, PlotMesh" begin # Run Trixi.jl - @test_nowarn_mod trixi_include(@__MODULE__, - joinpath(examples_dir(), "tree_1d_dgsem", - "elixir_euler_blast_wave.jl"), - tspan = (0, 0.1)) + @test_trixi_include(joinpath(EXAMPLES_DIR, "tree_1d_dgsem", + "elixir_euler_blast_wave.jl"), + tspan=(0, 0.1)) # Constructor @test PlotData1D(sol) isa PlotData1D pd = PlotData1D(sol) # show - @test_nowarn_mod show(stdout, pd) + @trixi_test_nowarn show(stdout, pd) println(stdout) # getindex @@ -170,13 +168,13 @@ end pds = pd["p"] @test pds.plot_data == pd @test pds.variable_id == 3 - @test_nowarn_mod show(stdout, pds) + @trixi_test_nowarn show(stdout, pds) println(stdout) # getmesh/PlotMesh @test getmesh(pd) == Trixi.PlotMesh(pd) @test getmesh(pd).plot_data == pd - @test_nowarn_mod show(stdout, getmesh(pd)) + @trixi_test_nowarn show(stdout, getmesh(pd)) println(stdout) # nvisnodes @@ -189,15 +187,15 @@ end @testset "1D plot recipes" begin pd = PlotData1D(sol) - @test_nowarn_mod Plots.plot(sol) - @test_nowarn_mod Plots.plot(sol, reinterpolate = false) - @test_nowarn_mod Plots.plot(pd) - @test_nowarn_mod Plots.plot(pd["p"]) - @test_nowarn_mod Plots.plot(getmesh(pd)) + @trixi_test_nowarn Plots.plot(sol) + @trixi_test_nowarn Plots.plot(sol, reinterpolate = false) + @trixi_test_nowarn Plots.plot(pd) + @trixi_test_nowarn Plots.plot(pd["p"]) + @trixi_test_nowarn Plots.plot(getmesh(pd)) initial_condition_t_end(x, equations) = initial_condition(x, last(tspan), equations) - @test_nowarn_mod Plots.plot(initial_condition_t_end, semi) - @test_nowarn_mod Plots.plot((x, equations) -> x, semi) + @trixi_test_nowarn Plots.plot(initial_condition_t_end, semi) + @trixi_test_nowarn Plots.plot((x, equations) -> x, semi) end # Fake a PlotDataXD objects to test code for plotting multiple variables on at least two rows @@ -208,7 +206,7 @@ end variable_names = string.('a':'e') mesh_vertices_x1d = [x[begin], x[end]] fake1d = PlotData1D(x, data1d, variable_names, mesh_vertices_x1d, 0) - @test_nowarn_mod Plots.plot(fake1d) + @trixi_test_nowarn Plots.plot(fake1d) y = x data2d = [rand(11, 11) for _ in 1:5] @@ -216,17 +214,19 @@ end mesh_vertices_y2d = [0.0, 0.0, 1.0, 1.0] fake2d = Trixi.PlotData2DCartesian(x, y, data2d, variable_names, mesh_vertices_x2d, mesh_vertices_y2d, 0, 0) - @test_nowarn_mod Plots.plot(fake2d) + @trixi_test_nowarn Plots.plot(fake2d) end end @timed_testset "1D plot from 2D solution" begin @trixi_testset "Create 1D plot along curve" begin using OrdinaryDiffEqSSPRK + using Trixi @testset "$MeshType" for MeshType in (P4estMesh, T8codeMesh) equations = CompressibleEulerEquations2D(1.4) - solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) + solver = DGSEM(polydeg = 3, + surface_flux = FluxLaxFriedrichs(max_abs_speed_naive)) coordinates_min = (-1.0, -1.0) coordinates_max = (+1.0, +1.0) @@ -259,8 +259,10 @@ end end @trixi_testset "PlotData1D gives correct results" begin + using Trixi equations = CompressibleEulerEquations2D(1.4) - solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) + solver = DGSEM(polydeg = 3, + surface_flux = FluxLaxFriedrichs(max_abs_speed_naive)) coordinates_min = (-1.0, -1.0) coordinates_max = (+1.0, +1.0) initial_refinement_level = 3 @@ -397,57 +399,52 @@ end # Test two different approximation types since these use different memory layouts: # - structure of arrays for `Polynomial()` # - array of structures for `SBP()` - @test_nowarn_mod trixi_include(@__MODULE__, - joinpath(examples_dir(), "dgmulti_1d", - "elixir_euler_flux_diff.jl"), - tspan = (0.0, 0.0), - approximation_type = Polynomial()) + @test_trixi_include(joinpath(EXAMPLES_DIR, "dgmulti_1d", + "elixir_euler_flux_diff.jl"), + tspan=(0.0, 0.0), + approximation_type=Polynomial()) @test PlotData1D(sol) isa PlotData1D initial_condition_t_end(x, equations) = initial_condition(x, last(tspan), equations) - @test_nowarn_mod Plots.plot(initial_condition_t_end, semi) - @test_nowarn_mod Plots.plot((x, equations) -> x, semi) - - @test_nowarn_mod trixi_include(@__MODULE__, - joinpath(examples_dir(), "dgmulti_1d", - "elixir_euler_flux_diff.jl"), - tspan = (0.0, 0.0), - approximation_type = SBP()) + @trixi_test_nowarn Plots.plot(initial_condition_t_end, semi) + @trixi_test_nowarn Plots.plot((x, equations) -> x, semi) + + @test_trixi_include(joinpath(EXAMPLES_DIR, "dgmulti_1d", + "elixir_euler_flux_diff.jl"), + tspan=(0.0, 0.0), + approximation_type=SBP()) @test PlotData1D(sol) isa PlotData1D - @test_nowarn_mod Plots.plot(initial_condition_t_end, semi) - @test_nowarn_mod Plots.plot((x, equations) -> x, semi) + @trixi_test_nowarn Plots.plot(initial_condition_t_end, semi) + @trixi_test_nowarn Plots.plot((x, equations) -> x, semi) end @timed_testset "1D plot recipes (StructuredMesh)" begin - @test_nowarn_mod trixi_include(@__MODULE__, - joinpath(examples_dir(), "structured_1d_dgsem", - "elixir_euler_source_terms.jl"), - tspan = (0.0, 0.0)) + @test_trixi_include(joinpath(EXAMPLES_DIR, "structured_1d_dgsem", + "elixir_euler_source_terms.jl"), + tspan=(0.0, 0.0)) pd = PlotData1D(sol) initial_condition_t_end(x, equations) = initial_condition(x, last(tspan), equations) - @test_nowarn_mod Plots.plot(sol) - @test_nowarn_mod Plots.plot(pd) - @test_nowarn_mod Plots.plot(pd["p"]) - @test_nowarn_mod Plots.plot(sol.u[end], semi) - @test_nowarn_mod Plots.plot(initial_condition_t_end, semi) - @test_nowarn_mod Plots.plot((x, equations) -> x, semi) + @trixi_test_nowarn Plots.plot(sol) + @trixi_test_nowarn Plots.plot(pd) + @trixi_test_nowarn Plots.plot(pd["p"]) + @trixi_test_nowarn Plots.plot(sol.u[end], semi) + @trixi_test_nowarn Plots.plot(initial_condition_t_end, semi) + @trixi_test_nowarn Plots.plot((x, equations) -> x, semi) end @timed_testset "plot time series" begin - @test_nowarn_mod trixi_include(@__MODULE__, - joinpath(examples_dir(), "tree_2d_dgsem", - "elixir_acoustics_gaussian_source.jl"), - tspan = (0, 0.05)) + @test_trixi_include(joinpath(EXAMPLES_DIR, "tree_2d_dgsem", + "elixir_acoustics_gaussian_source.jl"), + tspan=(0, 0.05)) - @test_nowarn_mod Plots.plot(time_series, 1) + @trixi_test_nowarn Plots.plot(time_series, 1) @test PlotData1D(time_series, 1) isa PlotData1D end @timed_testset "adapt_to_mesh_level" begin - @test_nowarn_mod trixi_include(@__MODULE__, - joinpath(examples_dir(), "tree_2d_dgsem", - "elixir_advection_basic.jl"), - analysis_callback = Trixi.TrivialCallback()) + @test_trixi_include(joinpath(EXAMPLES_DIR, "tree_2d_dgsem", + "elixir_advection_basic.jl"), + analysis_callback=Trixi.TrivialCallback()) @test adapt_to_mesh_level(sol, 5) isa Tuple u_ode_level5, semi_level5 = adapt_to_mesh_level(sol, 5) @@ -459,66 +456,65 @@ end end @timed_testset "plot 3D" begin - @test_nowarn_mod trixi_include(@__MODULE__, - joinpath(examples_dir(), "tree_3d_dgsem", - "elixir_advection_basic.jl"), - analysis_callback = Trixi.TrivialCallback(), - initial_refinement_level = 1) + @test_trixi_include(joinpath(EXAMPLES_DIR, "tree_3d_dgsem", + "elixir_advection_basic.jl"), + analysis_callback=Trixi.TrivialCallback(), + initial_refinement_level=1) @test PlotData2D(sol) isa Trixi.PlotData2DCartesian @test PlotData2D(sol, slice = :yz) isa Trixi.PlotData2DCartesian @test PlotData2D(sol, slice = :xz) isa Trixi.PlotData2DCartesian @testset "1D plot from 3D solution and Tree-mesh" begin @testset "Create 1D plot as slice" begin - @test_nowarn_mod PlotData1D(sol) isa PlotData1D + @trixi_test_nowarn PlotData1D(sol) isa PlotData1D pd1D = PlotData1D(sol) - @test_nowarn_mod Plots.plot(pd1D) - @test_nowarn_mod PlotData1D(sol, slice = :y, point = (0.5, 0.3, 0.1)) isa - PlotData1D - @test_nowarn_mod PlotData1D(sol, slice = :z, point = (0.1, 0.3, 0.3)) isa - PlotData1D + @trixi_test_nowarn Plots.plot(pd1D) + @trixi_test_nowarn PlotData1D(sol, slice = :y, point = (0.5, 0.3, 0.1)) isa + PlotData1D + @trixi_test_nowarn PlotData1D(sol, slice = :z, point = (0.1, 0.3, 0.3)) isa + PlotData1D end @testset "Create 1D plot along curve" begin curve = zeros(3, 10) curve[1, :] = range(-1.0, -0.5, length = 10) - @test_nowarn_mod PlotData1D(sol, curve = curve) isa PlotData1D + @trixi_test_nowarn PlotData1D(sol, curve = curve) isa PlotData1D pd1D = PlotData1D(sol, curve = curve) - @test_nowarn_mod Plots.plot(pd1D) + @trixi_test_nowarn Plots.plot(pd1D) end end - @test_nowarn_mod trixi_include(@__MODULE__, - joinpath(examples_dir(), "structured_3d_dgsem", - "elixir_advection_basic.jl")) + @test_trixi_include(joinpath(EXAMPLES_DIR, "structured_3d_dgsem", + "elixir_advection_basic.jl")) @testset "1D plot from 3D solution and general mesh" begin @testset "Create 1D plot as slice" begin - @test_nowarn_mod PlotData1D(sol) isa PlotData1D + @trixi_test_nowarn PlotData1D(sol) isa PlotData1D pd1D = PlotData1D(sol) - @test_nowarn_mod Plots.plot(pd1D) - @test_nowarn_mod PlotData1D(sol, slice = :y, point = (0.5, 0.3, 0.1)) isa - PlotData1D - @test_nowarn_mod PlotData1D(sol, slice = :z, point = (0.1, 0.3, 0.3)) isa - PlotData1D + @trixi_test_nowarn Plots.plot(pd1D) + @trixi_test_nowarn PlotData1D(sol, slice = :y, point = (0.5, 0.3, 0.1)) isa + PlotData1D + @trixi_test_nowarn PlotData1D(sol, slice = :z, point = (0.1, 0.3, 0.3)) isa + PlotData1D end @testset "Create 1D plot along curve" begin curve = zeros(3, 10) curve[1, :] = range(-1.0, 1.0, length = 10) - @test_nowarn_mod PlotData1D(sol, curve = curve) isa PlotData1D + @trixi_test_nowarn PlotData1D(sol, curve = curve) isa PlotData1D pd1D = PlotData1D(sol, curve = curve) - @test_nowarn_mod Plots.plot(pd1D) + @trixi_test_nowarn Plots.plot(pd1D) end end @timed_testset "1D plot from 3D solution on P4estMesh and T8codeMesh" begin @trixi_testset "Create 1D plot along curve" begin using OrdinaryDiffEqSSPRK - + using Trixi @testset "$MeshType" for MeshType in (P4estMesh, T8codeMesh) equations = CompressibleEulerEquations3D(1.4) - solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) + solver = DGSEM(polydeg = 3, + surface_flux = FluxLaxFriedrichs(max_abs_speed_naive)) coordinates_min = (-1.0, -1.0, -1.0) coordinates_max = (+1.0, +1.0, +1.0) @@ -552,8 +548,10 @@ end end @trixi_testset "PlotData1D gives correct results" begin + using Trixi equations = CompressibleEulerEquations3D(1.4) - solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) + solver = DGSEM(polydeg = 3, + surface_flux = FluxLaxFriedrichs(max_abs_speed_naive)) coordinates_min = (-1.0, -1.0, -1.0) coordinates_max = (+1.0, +1.0, +1.0) initial_refinement_level = 3 @@ -694,11 +692,11 @@ end end @timed_testset "plotting TimeIntegratorSolution" begin - @test_trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", + @test_trixi_include(joinpath(EXAMPLES_DIR, "tree_2d_dgsem", "elixir_hypdiff_lax_friedrichs.jl"), maxiters=1, analysis_callback=Trixi.TrivialCallback(), initial_refinement_level=1) - @test_nowarn_mod Plots.plot(sol) + @trixi_test_nowarn Plots.plot(sol) end @timed_testset "VisualizationCallback" begin @@ -710,13 +708,13 @@ end ENV["GKSwstype"] = "100" end - @test_nowarn_mod trixi_include(@__MODULE__, - joinpath(examples_dir(), "tree_2d_dgsem", - "elixir_advection_amr_visualization.jl"), - visualization = VisualizationCallback(interval = 20, - clims = (0, 1), - plot_creator = Trixi.save_plot), - tspan = (0.0, 3.0)) + @test_trixi_include(joinpath(EXAMPLES_DIR, "tree_2d_dgsem", + "elixir_advection_amr_visualization.jl"), + visualization=VisualizationCallback(semi; + interval = 20, + clims = (0, 1), + plot_creator = Trixi.save_plot), + tspan=(0.0, 3.0)) @testset "elixir_advection_amr_visualization.jl with save_plot" begin @test isfile(joinpath(outdir, "solution_000000000.png")) @@ -725,10 +723,10 @@ end end @testset "show" begin - @test_nowarn_mod show(stdout, visualization) + @trixi_test_nowarn show(stdout, visualization) println(stdout) - @test_nowarn_mod show(stdout, "text/plain", visualization) + @trixi_test_nowarn show(stdout, "text/plain", visualization) println(stdout) end @@ -743,39 +741,38 @@ end end @timed_testset "Makie visualization tests for UnstructuredMesh2D" begin - @test_nowarn_mod trixi_include(@__MODULE__, - joinpath(examples_dir(), "unstructured_2d_dgsem", - "elixir_euler_wall_bc.jl")) + @test_trixi_include(joinpath(EXAMPLES_DIR, "unstructured_2d_dgsem", + "elixir_euler_wall_bc.jl")) # test interactive surface plot - @test_nowarn_mod Trixi.iplot(sol) + @trixi_test_nowarn Trixi.iplot(sol) # also test when using PlotData2D object @test PlotData2D(sol) isa Trixi.PlotData2DTriangulated - @test_nowarn_mod Makie.plot(PlotData2D(sol)) + @trixi_test_nowarn Makie.plot(PlotData2D(sol)) # test interactive ScalarPlotData2D plotting semi = sol.prob.p x = view(semi.cache.elements.node_coordinates, 1, :, :, :) # extracts the node x coordinates y = view(semi.cache.elements.node_coordinates, 2, :, :, :) # extracts the node x coordinates - @test_nowarn_mod iplot(ScalarPlotData2D(x .+ y, semi), plot_mesh = true) + @trixi_test_nowarn iplot(ScalarPlotData2D(x .+ y, semi), plot_mesh = true) # test heatmap plot - @test_nowarn_mod Makie.plot(sol, plot_mesh = true) + @trixi_test_nowarn Makie.plot(sol, plot_mesh = true) # test unpacking/iteration for FigureAndAxes fa = Makie.plot(sol) fig, axes = fa - @test_nowarn_mod Base.show(fa) === nothing - @test_nowarn_mod typeof(fig) <: Makie.Figure - @test_nowarn_mod typeof(axes) <: AbstractArray{<:Makie.Axis} + @trixi_test_nowarn Base.show(fa) === nothing + @trixi_test_nowarn typeof(fig) <: Makie.Figure + @trixi_test_nowarn typeof(axes) <: AbstractArray{<:Makie.Axis} # test plotting of constant solutions with Makie # related issue: https://github.com/MakieOrg/Makie.jl/issues/931 for i in eachindex(sol.u) fill!(sol.u[i], one(eltype(sol.u[i]))) end - @test_nowarn_mod Trixi.iplot(sol) + @trixi_test_nowarn Trixi.iplot(sol) end end