Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion NEWS.md
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ for human readability.

#### Changed

- The numerical solution is wrapped in a `VectorOfArrays` from
- The numerical solution is wrapped in a `VectorOfArray` from
[RecursiveArrayTools.jl](https://github.com/SciML/RecursiveArrayTools.jl)
for `DGMulti` solvers ([#2150]). You can use `Base.parent` to unwrap
the original data.
Expand Down
2 changes: 1 addition & 1 deletion Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ PrecompileTools = "1.2"
Preferences = "1.4"
Printf = "1"
RecipesBase = "1.3.4"
RecursiveArrayTools = "3.31.1"
RecursiveArrayTools = "3.34.1"
Reexport = "1.2"
Requires = "1.3"
SciMLBase = "2.67.0"
Expand Down
1 change: 0 additions & 1 deletion src/Trixi.jl
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,6 @@ include("basic_types.jl")

# Include all top-level source files
include("auxiliary/auxiliary.jl")
include("auxiliary/vector_of_arrays.jl")
include("auxiliary/mpi.jl")
include("auxiliary/p4est.jl")
include("auxiliary/t8code.jl")
Expand Down
31 changes: 0 additions & 31 deletions src/auxiliary/vector_of_arrays.jl

This file was deleted.

2 changes: 1 addition & 1 deletion src/solvers/dgsem_p4est/containers_parallel.jl
Original file line number Diff line number Diff line change
Expand Up @@ -225,7 +225,7 @@ function Adapt.adapt_structure(to, mpi_mortars::P4estMPIMortarContainer)
# TODO: GPU
# Only parts of this container are adapted, since we currently don't
# use `local_neighbor_ids`, `local_neighbor_positions`, `normal_directions`
# on the GPU. If we do need them we need to redesign this to use the VecOfArrays
# on the GPU. If we do need them we need to redesign this to use the VectorOfArray
# approach.

_u = adapt(to, mpi_mortars._u)
Expand Down
43 changes: 23 additions & 20 deletions src/solvers/dgsem_p4est/dg_3d.jl
Original file line number Diff line number Diff line change
Expand Up @@ -10,21 +10,24 @@
function create_cache(mesh::Union{P4estMesh{3}, T8codeMesh{3}}, equations,
mortar_l2::LobattoLegendreMortarL2, uEltype)
# TODO: Taal compare performance of different types
fstar_primary_threaded = [Array{uEltype, 4}(undef, nvariables(equations),
nnodes(mortar_l2),
nnodes(mortar_l2), 4)
for _ in 1:Threads.nthreads()] |> VecOfArrays
fstar_secondary_threaded = [Array{uEltype, 4}(undef, nvariables(equations),
fstar_primary_threaded = VectorOfArray([Array{uEltype, 4}(undef,
nvariables(equations),
nnodes(mortar_l2),
nnodes(mortar_l2), 4)
for _ in 1:Threads.nthreads()])
fstar_secondary_threaded = VectorOfArray([Array{uEltype, 4}(undef,
nvariables(equations),
nnodes(mortar_l2),
nnodes(mortar_l2), 4)
for _ in 1:Threads.nthreads()])
fstar_tmp_threaded = VectorOfArray([Array{uEltype, 3}(undef, nvariables(equations),
nnodes(mortar_l2),
nnodes(mortar_l2))
for _ in 1:Threads.nthreads()])
u_threaded = VectorOfArray([Array{uEltype, 3}(undef, nvariables(equations),
nnodes(mortar_l2),
nnodes(mortar_l2), 4)
for _ in 1:Threads.nthreads()] |> VecOfArrays

fstar_tmp_threaded = [Array{uEltype, 3}(undef, nvariables(equations),
nnodes(mortar_l2), nnodes(mortar_l2))
for _ in 1:Threads.nthreads()] |> VecOfArrays
u_threaded = [Array{uEltype, 3}(undef, nvariables(equations), nnodes(mortar_l2),
nnodes(mortar_l2))
for _ in 1:Threads.nthreads()] |> VecOfArrays
nnodes(mortar_l2))
for _ in 1:Threads.nthreads()])

(; fstar_primary_threaded, fstar_secondary_threaded, fstar_tmp_threaded, u_threaded)
end
Expand Down Expand Up @@ -519,9 +522,9 @@ function prolong2mortars!(cache, u,

# Buffer to copy solution values of the large element in the correct orientation
# before interpolating
u_buffer = cache.u_threaded[Threads.threadid()]
u_buffer = cache.u_threaded.u[Threads.threadid()]
# temporary buffer for projections
fstar_tmp = fstar_tmp_threaded[Threads.threadid()]
fstar_tmp = fstar_tmp_threaded.u[Threads.threadid()]

# Copy solution of large element face to buffer in the
# correct orientation
Expand Down Expand Up @@ -590,9 +593,9 @@ function calc_mortar_flux!(surface_flux_values,

@threaded for mortar in eachmortar(dg, cache)
# Choose thread-specific pre-allocated container
fstar_primary = fstar_primary_threaded[Threads.threadid()]
fstar_secondary = fstar_secondary_threaded[Threads.threadid()]
fstar_tmp = fstar_tmp_threaded[Threads.threadid()]
fstar_primary = fstar_primary_threaded.u[Threads.threadid()]
fstar_secondary = fstar_secondary_threaded.u[Threads.threadid()]
fstar_tmp = fstar_tmp_threaded.u[Threads.threadid()]

# Get index information on the small elements
small_indices = node_indices[1, mortar]
Expand Down Expand Up @@ -638,7 +641,7 @@ function calc_mortar_flux!(surface_flux_values,

# Buffer to interpolate flux values of the large element to before
# copying in the correct orientation
u_buffer = cache.u_threaded[Threads.threadid()]
u_buffer = cache.u_threaded.u[Threads.threadid()]

# in calc_interface_flux!, the interface flux is computed once over each
# interface using the normal from the "primary" element. The result is then
Expand Down
10 changes: 5 additions & 5 deletions src/solvers/dgsem_p4est/dg_3d_parabolic.jl
Original file line number Diff line number Diff line change
Expand Up @@ -697,10 +697,10 @@ function prolong2mortars_divergence!(cache, flux_viscous,

# Buffer to copy solution values of the large element in the correct orientation
# before interpolating
u_buffer = cache.u_threaded[Threads.threadid()]
u_buffer = cache.u_threaded.u[Threads.threadid()]

# temporary buffer for projections
fstar_tmp = fstar_tmp_threaded[Threads.threadid()]
fstar_tmp = fstar_tmp_threaded.u[Threads.threadid()]

# Copy solution of large element face to buffer in the
# correct orientation
Expand Down Expand Up @@ -787,8 +787,8 @@ function calc_mortar_flux_divergence!(surface_flux_values,

@threaded for mortar in eachmortar(dg, cache)
# Choose thread-specific pre-allocated container
fstar = fstar_primary_threaded[Threads.threadid()]
fstar_tmp = fstar_tmp_threaded[Threads.threadid()]
fstar = fstar_primary_threaded.u[Threads.threadid()]
fstar_tmp = fstar_tmp_threaded.u[Threads.threadid()]

# Get index information on the small elements
small_indices = node_indices[1, mortar]
Expand Down Expand Up @@ -831,7 +831,7 @@ function calc_mortar_flux_divergence!(surface_flux_values,

# Buffer to interpolate flux values of the large element to before
# copying in the correct orientation
u_buffer = cache.u_threaded[Threads.threadid()]
u_buffer = cache.u_threaded.u[Threads.threadid()]

# this reuses the hyperbolic version of `mortar_fluxes_to_elements!`
mortar_fluxes_to_elements!(surface_flux_values,
Expand Down
12 changes: 6 additions & 6 deletions src/solvers/dgsem_p4est/dg_3d_parallel.jl
Original file line number Diff line number Diff line change
Expand Up @@ -335,9 +335,9 @@ function prolong2mpimortars!(cache, u,
if position == 5 # -> large element
# Buffer to copy solution values of the large element in the correct orientation
# before interpolating
u_buffer = cache.u_threaded[Threads.threadid()]
u_buffer = cache.u_threaded.u[Threads.threadid()]
# temporary buffer for projections
fstar_tmp = cache.fstar_tmp_threaded[Threads.threadid()]
fstar_tmp = cache.fstar_tmp_threaded.u[Threads.threadid()]

i_large = i_large_start
j_large = j_large_start
Expand Down Expand Up @@ -423,9 +423,9 @@ function calc_mpi_mortar_flux!(surface_flux_values,

@threaded for mortar in eachmpimortar(dg, cache)
# Choose thread-specific pre-allocated container
fstar_primary = fstar_primary_threaded[Threads.threadid()]
fstar_secondary = fstar_secondary_threaded[Threads.threadid()]
fstar_tmp = fstar_tmp_threaded[Threads.threadid()]
fstar_primary = fstar_primary_threaded.u[Threads.threadid()]
fstar_secondary = fstar_secondary_threaded.u[Threads.threadid()]
fstar_tmp = fstar_tmp_threaded.u[Threads.threadid()]

# Get index information on the small elements
small_indices = node_indices[1, mortar]
Expand Down Expand Up @@ -465,7 +465,7 @@ function calc_mpi_mortar_flux!(surface_flux_values,

# Buffer to interpolate flux values of the large element to before
# copying in the correct orientation
u_buffer = cache.u_threaded[Threads.threadid()]
u_buffer = cache.u_threaded.u[Threads.threadid()]

mpi_mortar_fluxes_to_elements!(surface_flux_values,
mesh, equations, mortar_l2, dg, cache,
Expand Down
Loading
Loading