diff --git a/CHANGELOG.md b/CHANGELOG.md index 95876f0a..1998b4ff 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,7 +6,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## Version [v0.5.3] - 2026-02-19 ### Added -* No new functionality added +* Added optional adaptive time stepping based on Courant number control (`AdaptiveTimeStepping`) [#98](@ref) ### Fixed diff --git a/examples/2D_flatplate_fixedT_KOmega.jl b/examples/2D_flatplate_fixedT_KOmega.jl index 1d17947a..dc96f04b 100644 --- a/examples/2D_flatplate_fixedT_KOmega.jl +++ b/examples/2D_flatplate_fixedT_KOmega.jl @@ -99,7 +99,8 @@ solvers = ( solver = Gmres(), # Bicgstab(), Gmres() preconditioner = Jacobi(), convergence = 1e-7, - relax = 0.3, + # relax = 0.3, + relax = 0.2, atol = 1e-2 ), h = SolverSetup( diff --git a/src/Calculate/Calculate_0_gradient.jl b/src/Calculate/Calculate_0_gradient.jl index a775b6ea..ccff5514 100644 --- a/src/Calculate/Calculate_0_gradient.jl +++ b/src/Calculate/Calculate_0_gradient.jl @@ -255,4 +255,9 @@ function grad!(grad::Grad{Midpoint,F,R,I,M}, phif, phi, BCs, time, config) where correct_interpolation!(grad, phif, phi, config) green_gauss!(grad, phif, config) end +end + +# This is a gradient method that does not take BCs as arguments (implicitly assuming the the gradient at boundaries is 0) +function grad!(grad::Grad{Gauss,F,R,I,M}, phif, phi, time, config) where {F,R<:VectorField,I,M} + green_gauss!(grad, phif, config) end \ No newline at end of file diff --git a/src/Discretise/Discretise_1_schemes.jl b/src/Discretise/Discretise_1_schemes.jl index c8bc324c..5fa41abb 100644 --- a/src/Discretise/Discretise_1_schemes.jl +++ b/src/Discretise/Discretise_1_schemes.jl @@ -30,7 +30,7 @@ end @inline scheme_source!( term::Operator{F,P,I,Time{Euler}}, cell, cID, cIndex, prev, runtime) where {F,P,I} = begin volume = cell.volume - vol_rdt = volume/runtime.dt + vol_rdt = volume/runtime.dt[1] # Increment sparse and b arrays ac = vol_rdt @@ -48,7 +48,7 @@ end @inline scheme_source!( term::Operator{F,P,I,Time{CrankNicolson}}, cell, cID, cIndex, prev, runtime) where {F,P,I} = begin volume = cell.volume - vol_rdt = volume/runtime.dt + vol_rdt = volume/runtime.dt[1] # Increment sparse and b arrays ac = vol_rdt diff --git a/src/ModelPhysics/0_type_definition.jl b/src/ModelPhysics/0_type_definition.jl index cfc1b16b..457db290 100644 --- a/src/ModelPhysics/0_type_definition.jl +++ b/src/ModelPhysics/0_type_definition.jl @@ -4,6 +4,7 @@ export Momentum export AbstractTimeModel export Transient, Steady + """ struct Physics{T,F,SO,M,Tu,E,D,BI} time::T @@ -159,4 +160,4 @@ end mutable struct ModelState{T1} residuals::T1 converged::Bool -end +end \ No newline at end of file diff --git a/src/ModelPhysics/Energy/Energy.jl b/src/ModelPhysics/Energy/Energy.jl index bdf57ebb..aff4e932 100644 --- a/src/ModelPhysics/Energy/Energy.jl +++ b/src/ModelPhysics/Energy/Energy.jl @@ -19,7 +19,6 @@ include("energy_types.jl") # Energy models include("Sensible_Enthalpy.jl") -# include("Laplace_Energy.jl") include("Conduction.jl") # Property Models diff --git a/src/ModelPhysics/Energy/Sensible_Enthalpy.jl b/src/ModelPhysics/Energy/Sensible_Enthalpy.jl index f8d9849d..b11b7600 100644 --- a/src/ModelPhysics/Energy/Sensible_Enthalpy.jl +++ b/src/ModelPhysics/Energy/Sensible_Enthalpy.jl @@ -159,7 +159,7 @@ function energy!( # Kbounded = ScalarField(mesh) Pr = model.fluid.Pr - dt = runtime.dt + dt = runtime.dt[1] # Pre-allocate auxiliary variables TF = _get_float(mesh) diff --git a/src/ModelPhysics/ModelPhysics.jl b/src/ModelPhysics/ModelPhysics.jl index 56d36e97..65f9327e 100644 --- a/src/ModelPhysics/ModelPhysics.jl +++ b/src/ModelPhysics/ModelPhysics.jl @@ -34,4 +34,5 @@ include("Turbulence/Turbulence.jl") include("FluidProperties/FluidProperties.jl") + end # end module \ No newline at end of file diff --git a/src/ModelPhysics/Turbulence/RANS_laminar.jl b/src/ModelPhysics/Turbulence/RANS_laminar.jl index 77193196..fc991945 100644 --- a/src/ModelPhysics/Turbulence/RANS_laminar.jl +++ b/src/ModelPhysics/Turbulence/RANS_laminar.jl @@ -102,6 +102,19 @@ function save_output(model::Physics{T,F,SO,M,Tu,E,D,BI}, outputWriter, iteration write_results(iteration, time, model.domain, outputWriter, config.boundaries, args...) end +# function save_output(model::Physics{T,F,SO,M,Tu,E,D,BI}, outputWriter, iteration, time, config +# ) where {T,F<:Multiphase,SO,M,Tu<:Laminar,E<:Nothing,D,BI} + +# args = ( +# ("U", model.momentum.U), +# ("p", model.momentum.p), +# ("alpha", model.fluid.alpha), +# ("rho", model.fluid.rho), +# # ("p_rgh", model.fluid.p_rgh) +# ) +# write_results(iteration, time, model.domain, outputWriter, config.boundaries, args...) +# end + function save_output(model::Physics{T,F,SO,M,Tu,E,D,BI}, outputWriter, iteration, time, config ) where {T,F,SO,M,Tu<:Laminar,E<:Nothing,D,BI} args = ( diff --git a/src/Simulate/Simulate_0_types.jl b/src/Simulate/Simulate_0_types.jl index eb62c3fd..ef9fd6f1 100644 --- a/src/Simulate/Simulate_0_types.jl +++ b/src/Simulate/Simulate_0_types.jl @@ -2,12 +2,12 @@ export Configuration export Hardware """ - @kwdef struct Configuration{SC,SL,RT,HW,PP} + struct Configuration{SC,SL,RT,HW,PP} schemes::SC solvers::SL runtime::RT hardware::HW - postprocess::PP = nothing + postprocess::PP end The `Configuration` type is passed to all flow solvers and provides all the relevant information to run a simulation. @@ -29,16 +29,29 @@ config = Configuration( solvers=solvers, schemes=schemes, runtime=runtime, hardware=hardware, boundaries=BCs) ``` """ -@kwdef struct Configuration{SC,SL,RT,HW,BC,PP} +struct Configuration{SC,SL,RT,HW,BC,PP} schemes::SC solvers::SL runtime::RT hardware::HW boundaries::BC - postprocess::PP = nothing + postprocess::PP end Adapt.@adapt_structure Configuration +Configuration(; schemes, solvers, runtime, hardware, boundaries, postprocess=nothing) = begin +Configuration( + schemes, + solvers, + adapt(hardware.backend, runtime), + hardware, + boundaries, + postprocess +) +end + + + """ hardware = Hardware(backend, workgroup) diff --git a/src/Solve/Solve_1_api.jl b/src/Solve/Solve_1_api.jl index ce05d82c..0a153a72 100644 --- a/src/Solve/Solve_1_api.jl +++ b/src/Solve/Solve_1_api.jl @@ -3,6 +3,7 @@ export explicit_relaxation!, implicit_relaxation!, implicit_relaxation_diagdom!, export solve_system! export solve_equation! export residual! +export AdaptiveTimeStepping struct SolverSetup{ F<:AbstractFloat, @@ -81,11 +82,50 @@ SolverSetup(; float_type(atol), float_type(rtol)) -struct Runtime{I<:Integer,F<:AbstractFloat} +struct AdaptiveTimeStepping{F<:AbstractFloat} + maxCo::F + minShrink::F + maxGrow::F +end +Adapt.@adapt_structure AdaptiveTimeStepping + +""" + AdaptiveTimeStepping(; + # keyword arguments + + maxCo=0.75, + minShrink=0.1, + maxGrow=1.2 + ) + +Constructs an `AdaptiveTimeStepping` object used to control automatic time-step adjustment +based on the Courant number. + +This struct is passed optionally to `Runtime` and enables adaptive time stepping in transient +simulations. If not provided, a fixed time step is used. + +# Input arguments + +- `maxCo::AbstractFloat`: target maximum Courant number. The time step will be adjusted + such that the computed Courant number approaches this value. +- `minShrink::AbstractFloat`: lower bound on the multiplicative factor applied to the + current time step. Prevents excessively large reductions in a single update. +- `maxGrow::AbstractFloat`: upper bound on the multiplicative factor applied to the + current time step. Prevents excessive time-step growth. +""" +AdaptiveTimeStepping(; + maxCo=0.75, + minShrink=0.1, + maxGrow=1.2 +) = AdaptiveTimeStepping(float(maxCo), float(minShrink), float(maxGrow)) + +struct Runtime{I<:Integer,F<:AbstractFloat, V<:AbstractVector{F}, A<:Union{Nothing, AdaptiveTimeStepping}} iterations::I - dt::F + dt::V write_interval::I + adaptive::A end +Adapt.@adapt_structure Runtime """ Runtime(; @@ -93,7 +133,8 @@ end iterations::I, write_interval::I, - time_step::N + time_step::N, + adaptive::A ) where {I<:Integer,N<:Number} = begin # returned Runtime struct @@ -101,7 +142,8 @@ end ( iterations=iterations, dt=time_step, - write_interval=write_interval + write_interval=write_interval, + adaptive=adaptive ) end @@ -112,6 +154,7 @@ This is a convenience function to set the top-level runtime information. The inp - `iterations::Integer`: specifies the number of iterations in a simulation run. - `write_interval::Integer`: defines how often simulation results are written to file (on the current working directory). The interval is currently based on number of iterations. Set to `-1` to run without writing results to file. - `time_step::AbstractFloat`: the time step to use in the simulation. Notice that for steady solvers this is simply a counter and it is recommended to simply use `1`. +- `adaptive::Union{Nothing, AdaptiveTimeStepping}`: optionally enables adaptive time stepping. Pass an `AdaptiveTimeStepping` object to automatically adjust `dt` based on the Courant number during transient simulations. Defaults to `nothing`, meaning a fixed time step is used. # Example @@ -119,8 +162,13 @@ This is a convenience function to set the top-level runtime information. The inp runtime = Runtime(iterations=2000, time_step=1, write_interval=2000) ``` """ -Runtime(; iterations::I, write_interval::I, time_step::N) where {I<:Integer,N<:Number} = begin - Runtime(iterations, float(time_step), write_interval) +Runtime(; iterations::I, + write_interval::I, + time_step::N, + adaptive=nothing) where {I<:Integer,N<:Number} = begin + + val = float(time_step) + Runtime(iterations, [val], write_interval, adaptive) end # Set schemes function definition with default set variables diff --git a/src/Solvers/Solvers_0_functions.jl b/src/Solvers/Solvers_0_functions.jl index 8b50c47d..544a35d3 100644 --- a/src/Solvers/Solvers_0_functions.jl +++ b/src/Solvers/Solvers_0_functions.jl @@ -267,7 +267,7 @@ end @kernel function _max_courant_number!(cellsCourant, U, runtime, mesh::Mesh3) i = @index(Global) @uniform cells = mesh.cells - dt = runtime.dt + dt = runtime.dt[1] umag = norm(U[i]) volume = cells[i].volume dx = volume^0.333333 @@ -277,9 +277,20 @@ end @kernel function _max_courant_number!(cellsCourant, U, runtime, mesh::Mesh2) i = @index(Global) @uniform cells = mesh.cells - dt = runtime.dt + dt = runtime.dt[1] umag = norm(U[i]) volume = cells[i].volume dx = volume^0.5 cellsCourant[i] = umag * dt / dx +end + + +update_dt!(runtime::Runtime{<:Any,<:Any,<:Any,Nothing}, courant) = nothing + +function update_dt!(runtime::Runtime{<:Any,<:Any,<:Any,<:AdaptiveTimeStepping}, courant) + (; maxCo, maxGrow, minShrink) = runtime.adaptive + + courant_factor = maxCo / (courant + eps()) + new_dt_factor = clamp(courant_factor, minShrink, maxGrow) + runtime.dt .= runtime.dt .* new_dt_factor end \ No newline at end of file diff --git a/src/Solvers/Solvers_1_CSIMPLE.jl b/src/Solvers/Solvers_1_CSIMPLE.jl index 9bf96db3..596eb05a 100644 --- a/src/Solvers/Solvers_1_CSIMPLE.jl +++ b/src/Solvers/Solvers_1_CSIMPLE.jl @@ -131,8 +131,11 @@ function CSIMPLE( (; iterations, write_interval,dt) = runtime (; backend) = hardware + dt_cpu = zeros(_get_float(mesh), 1) + copyto!(dt_cpu, config.runtime.dt) + # rho = get_flux(U_eqn, 1) - postprocess = convert_time_to_iterations(postprocess,model,dt,iterations) + postprocess = convert_time_to_iterations(postprocess,model,dt_cpu[1],iterations) mdotf = get_flux(U_eqn, 2) mueff = get_flux(U_eqn, 3) mueffgradUt = get_source(U_eqn, 2) diff --git a/src/Solvers/Solvers_1_LAPLACE.jl b/src/Solvers/Solvers_1_LAPLACE.jl index cb49ee2c..f0da8c49 100644 --- a/src/Solvers/Solvers_1_LAPLACE.jl +++ b/src/Solvers/Solvers_1_LAPLACE.jl @@ -125,8 +125,10 @@ function LAPLACE( (; iterations, write_interval, dt) = runtime (; backend) = hardware + dt_cpu = zeros(_get_float(mesh), 1) + copyto!(dt_cpu, config.runtime.dt) - postprocess = convert_time_to_iterations(postprocess,model,dt,iterations) + postprocess = convert_time_to_iterations(postprocess,model,dt_cpu[1],iterations) @info "Starting LAPLACE loops..." progress = Progress(iterations; dt=1.0, showspeed=true) @@ -154,7 +156,7 @@ function LAPLACE( ProgressMeter.next!( progress, showvalues = [ - (:time, iteration*runtime.dt), + (:time, iteration*dt_cpu[1]), (:T_residual, R_T[iteration]) ] ) diff --git a/src/Solvers/Solvers_1_SIMPLE.jl b/src/Solvers/Solvers_1_SIMPLE.jl index 141e3966..936ec328 100644 --- a/src/Solvers/Solvers_1_SIMPLE.jl +++ b/src/Solvers/Solvers_1_SIMPLE.jl @@ -112,8 +112,11 @@ function SIMPLE( (; solvers, schemes, runtime, hardware, boundaries, postprocess) = config (; iterations, write_interval,dt) = runtime (; backend) = hardware + + dt_cpu = zeros(_get_float(mesh), 1) + copyto!(dt_cpu, config.runtime.dt) - postprocess = convert_time_to_iterations(postprocess,model,dt,iterations) + postprocess = convert_time_to_iterations(postprocess,model,dt_cpu[1],iterations) mdotf = get_flux(U_eqn, 2) nueff = get_flux(U_eqn, 3) rDf = get_flux(p_eqn, 1) @@ -150,7 +153,6 @@ function SIMPLE( grad!(∇p, pf, p, boundaries.p, time, config) limit_gradient!(schemes.p.limiter, ∇p, p, config) - update_nueff!(nueff, nu, model.turbulence, config) @info "Starting SIMPLE loops..." diff --git a/src/Solvers/Solvers_2_CPISO.jl b/src/Solvers/Solvers_2_CPISO.jl index 01ead3f3..06dc8e34 100644 --- a/src/Solvers/Solvers_2_CPISO.jl +++ b/src/Solvers/Solvers_2_CPISO.jl @@ -146,8 +146,11 @@ function CPISO( (; solvers, schemes, runtime, hardware, boundaries,postprocess) = config (; iterations, write_interval, dt) = runtime (; backend) = hardware + + dt_cpu = zeros(_get_float(mesh), 1) + copyto!(dt_cpu, config.runtime.dt) - postprocess = convert_time_to_iterations(postprocess,model,dt,iterations) + postprocess = convert_time_to_iterations(postprocess,model,dt_cpu[1],iterations) mdotf = get_flux(U_eqn, 2) mueff = get_flux(U_eqn, 3) mueffgradUt = get_source(U_eqn, 2) @@ -225,7 +228,8 @@ function CPISO( progress = Progress(iterations; dt=1.0, showspeed=true) for iteration ∈ 1:iterations - time = iteration *dt + copyto!(dt_cpu, config.runtime.dt) + time += dt_cpu[1] ## CHECK GRADU AND EXPLICIT STRESSES # grad!(gradU, Uf, U, boundaries.U, time, config) # calculated in `turbulence!` @@ -281,7 +285,7 @@ function CPISO( flux!(mdotf, Uf, config) @. mdotf.values *= rhof.values @. corr -= mdotf.values - @. corr *= 0.0/runtime.dt + @. corr *= 0.0/dt_cpu[1] @. mdotf.values += rhorDf.values*corr/rhof.values div!(divHv, mdotf, config) end @@ -327,13 +331,15 @@ function CPISO( # Velocity and boundaries correction correct_velocity!(U, Hv, ∇p, rD, config) # why is this not rhorD? - @. dpdt.values = (p.values-prev)/runtime.dt + @. dpdt.values = (p.values-prev)/dt_cpu[1] turbulence!(turbulenceModel, model, S, prev, time, config) update_nueff!(nueff, nu, model.turbulence, config) end # corrector loop end - maxCourant = max_courant_number!(cellsCourant, model, config) + courant = max_courant_number!(cellsCourant, model, config) + + update_dt!(config.runtime, courant) R_ux[iteration] = rx R_uy[iteration] = ry @@ -342,8 +348,8 @@ function CPISO( ProgressMeter.next!( progress, showvalues = [ - (:time, iteration*runtime.dt), - (:Courant, maxCourant), + (:time, iteration*dt_cpu[1]), + (:Courant, courant), (:Ux, R_ux[iteration]), (:Uy, R_uy[iteration]), (:Uz, R_uz[iteration]), diff --git a/src/Solvers/Solvers_2_PISO.jl b/src/Solvers/Solvers_2_PISO.jl index af97c7c4..2b277daf 100644 --- a/src/Solvers/Solvers_2_PISO.jl +++ b/src/Solvers/Solvers_2_PISO.jl @@ -49,8 +49,11 @@ function PISO( (; solvers, schemes, runtime, hardware, boundaries, postprocess) = config (; iterations, write_interval, dt) = runtime (; backend) = hardware + + dt_cpu = zeros(_get_float(mesh), 1) + copyto!(dt_cpu, config.runtime.dt) - postprocess = convert_time_to_iterations(postprocess,model,dt,iterations) + postprocess = convert_time_to_iterations(postprocess,model,dt_cpu[1],iterations) mdotf = get_flux(U_eqn, 2) nueff = get_flux(U_eqn, 3) rDf = get_flux(p_eqn, 1) @@ -103,7 +106,8 @@ function PISO( @time for iteration ∈ 1:iterations - time = iteration *dt + copyto!(dt_cpu, config.runtime.dt) + time += dt_cpu[1] rx, ry, rz = solve_equation!( U_eqn, U, boundaries.U, solvers.U, xdir, ydir, zdir, config; time=time) @@ -172,8 +176,12 @@ function PISO( turbulence!(turbulenceModel, model, S, prev, time, config) update_nueff!(nueff, nu, model.turbulence, config) - maxCourant = max_courant_number!(cellsCourant, model, config) + courant = max_courant_number!(cellsCourant, model, config) + + update_dt!(config.runtime, courant) + + R_ux[iteration] = rx R_uy[iteration] = ry R_uz[iteration] = rz @@ -181,8 +189,9 @@ function PISO( ProgressMeter.next!( progress, showvalues = [ - (:time, iteration*runtime.dt), - (:Courant, maxCourant), + (:dt, dt_cpu[1]), + (:time, time), + (:Courant, courant), (:Ux, R_ux[iteration]), (:Uy, R_uy[iteration]), (:Uz, R_uz[iteration]), diff --git a/src/Solvers/Solvers_3_solver_dispatch.jl b/src/Solvers/Solvers_3_solver_dispatch.jl index 210445ce..4b343e27 100644 --- a/src/Solvers/Solvers_3_solver_dispatch.jl +++ b/src/Solvers/Solvers_3_solver_dispatch.jl @@ -44,8 +44,6 @@ residuals.p """ run!() = nothing # dummy function for providing general documentation - - # Laplace solver (steady) """ run!( diff --git a/test/0_TEST_CASES/2d_incompressible_transient_cylinder_oscillating.jl b/test/0_TEST_CASES/2d_incompressible_transient_cylinder_oscillating.jl index 145d0d51..4cc4d86c 100644 --- a/test/0_TEST_CASES/2d_incompressible_transient_cylinder_oscillating.jl +++ b/test/0_TEST_CASES/2d_incompressible_transient_cylinder_oscillating.jl @@ -8,7 +8,8 @@ mesh_file = joinpath(grids_dir, grid) mesh = UNV2D_mesh(mesh_file, scale=0.001) # backend = CUDABackend(); workgroup = 32 -backend = CPU(); workgroup = 1024; activate_multithread(backend) +# backend = CPU(); workgroup = 1024; activate_multithread(backend) +backend = CPU(); workgroup = 1024 hardware = Hardware(backend=backend, workgroup=workgroup) mesh_dev = adapt(backend, mesh) diff --git a/test/0_TEST_CASES/2d_laplace_steady.jl b/test/0_TEST_CASES/2d_laplace_steady.jl index b91cff42..12cd464c 100644 --- a/test/0_TEST_CASES/2d_laplace_steady.jl +++ b/test/0_TEST_CASES/2d_laplace_steady.jl @@ -13,7 +13,8 @@ mesh_file = joinpath(grids_dir, grid) mesh = UNV2D_mesh(mesh_file) @test typeof(mesh) <: Mesh2 -backend = CPU(); workgroup = 1024; activate_multithread(backend) +# backend = CPU(); workgroup = 1024; activate_multithread(backend) +backend = CPU(); workgroup = 1024 hardware = Hardware(backend=backend, workgroup=workgroup) diff --git a/test/0_TEST_CASES/adaptive_dt.jl b/test/0_TEST_CASES/adaptive_dt.jl new file mode 100644 index 00000000..be3ac27d --- /dev/null +++ b/test/0_TEST_CASES/adaptive_dt.jl @@ -0,0 +1,173 @@ +# The idea is to run four cases: without adaptive time-stepping, and with it when maxCo=0.25,0.5,0.75 +# The number of iterations for each individual case was selected so that the final simulation time is approximately the same (10 seconds): + + # 2000 iterations: non-adaptive + # 2867 iterations: maxCo=0.25 + # 1434 iterations: maxCo=0.5 + # 957 iterations: maxCo=0.75 + +# Then we compare if the average velocity magnitude at the outlet is identical across all these cases despite different dt + + +using XCALibre + +grids_dir = pkgdir(XCALibre, "examples/0_GRIDS") +grid = "backwardFacingStep_10mm.unv" +mesh_file = joinpath(grids_dir, grid) + +mesh = UNV2D_mesh(mesh_file, scale=0.001) + +# backend = CUDABackend(); workgroup = 32 +backend = CPU(); workgroup = 1024; activate_multithread(backend) + +hardware = Hardware(backend=backend, workgroup=workgroup) +mesh_dev = adapt(backend, mesh) + +velocity = [0.5, 0.0, 0.0] +nu = 1e-3 +Re = velocity[1]*0.1/nu + +model = Physics( + time = Transient(), + fluid = Fluid{Incompressible}(nu = nu), + turbulence = RANS{Laminar}(), + energy = Energy{Isothermal}(), + domain = mesh_dev + ) + +function inlet_velocity(coords, time, index) + ux = 0.5 + 0.1 * sin(2π*0.1*time) + return SVector(ux, 0.0, 0.0) +end + +BCs = assign( + region = mesh_dev, + ( + U = [ + DirichletFunction(:inlet, inlet_velocity), + Zerogradient(:outlet), + Wall(:wall, [0.0, 0.0, 0.0]), + Wall(:top, [0.0, 0.0, 0.0]) + ], + p = [ + Zerogradient(:inlet), + Dirichlet(:outlet, 0.0), + Wall(:wall), + Wall(:top) + ] + ) +) + +schemes = ( + U = Schemes(time=Euler), + p = Schemes() +) + + +solvers = ( + U = SolverSetup( + solver = Bicgstab(), # Bicgstab(), Gmres() + preconditioner = Jacobi(), + convergence = 1e-7, + relax = 1.0, + ), + p = SolverSetup( + solver = Cg(), # Bicgstab(), Gmres() + preconditioner = Jacobi(), + convergence = 1e-7, + relax = 1.0, + ) +) + +runtime = Runtime( + iterations=2000, time_step=0.005, write_interval=10) + +config = Configuration( + solvers=solvers, schemes=schemes, runtime=runtime, hardware=hardware, boundaries=BCs) + +GC.gc() + +initialise!(model.momentum.U, velocity) +initialise!(model.momentum.p, 0.0) + +residuals = run!(model, config) # 9.39k allocs + +outlet_result_1 = boundary_average(:outlet, model.momentum.U, BCs.U, config) + + + +adaptive = AdaptiveTimeStepping( + maxCo=0.25, + minShrink=0.1, + maxGrow=1.2 + ) + +runtime = Runtime( + iterations=2867, time_step=0.005, write_interval=10, adaptive=adaptive) + +config = Configuration( + solvers=solvers, schemes=schemes, runtime=runtime, hardware=hardware, boundaries=BCs) + +GC.gc() + +initialise!(model.momentum.U, velocity) +initialise!(model.momentum.p, 0.0) + +residuals = run!(model, config) # 9.39k allocs + +outlet_result_2 = boundary_average(:outlet, model.momentum.U, BCs.U, config) + + + +adaptive = AdaptiveTimeStepping( + maxCo=0.5, + minShrink=0.1, + maxGrow=1.2 + ) + +runtime = Runtime( + iterations=1434, time_step=0.005, write_interval=10, adaptive=adaptive) + +config = Configuration( + solvers=solvers, schemes=schemes, runtime=runtime, hardware=hardware, boundaries=BCs) + +GC.gc() + +initialise!(model.momentum.U, velocity) +initialise!(model.momentum.p, 0.0) + +residuals = run!(model, config) # 9.39k allocs + +outlet_result_3 = boundary_average(:outlet, model.momentum.U, BCs.U, config) + + + +adaptive = AdaptiveTimeStepping( + maxCo=0.75, + minShrink=0.1, + maxGrow=1.2 + ) + +runtime = Runtime( + iterations=957, time_step=0.005, write_interval=10, adaptive=adaptive) + +config = Configuration( + solvers=solvers, schemes=schemes, runtime=runtime, hardware=hardware, boundaries=BCs) + +GC.gc() + +initialise!(model.momentum.U, velocity) +initialise!(model.momentum.p, 0.0) + +residuals = run!(model, config) # 9.39k allocs + +outlet_result_4 = boundary_average(:outlet, model.momentum.U, BCs.U, config) + +n1 = norm(outlet_result_1) +n2 = norm(outlet_result_2) +n3 = norm(outlet_result_3) +n4 = norm(outlet_result_4) + +@test isapprox(n2, n1; rtol=0.01) +@test isapprox(n3, n1; rtol=0.01) +@test isapprox(n4, n1; rtol=0.01) \ No newline at end of file diff --git a/test/Project.toml b/test/Project.toml index b7eb6116..020e4659 100644 --- a/test/Project.toml +++ b/test/Project.toml @@ -7,3 +7,4 @@ SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" SparseMatricesCSR = "a0a7dd2c-ebf4-11e9-1f05-cf50bc540ca1" StaticArrays = "90137ffa-7385-5640-81b9-e52037218182" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" +ThreadPinning = "811555cd-349b-4f26-b7bc-1f208b848042" diff --git a/test/runtests.jl b/test/runtests.jl index c67d8b27..fe4411ea 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -3,9 +3,15 @@ using LinearAlgebra using SparseArrays using SparseMatricesCSR using StaticArrays +using ThreadPinning using Test -BLAS.set_num_threads(1) +# @info "Pinning Threads" +# pinthreads(:cores) + +# @info "Setting BLAS threads to 1" +# BLAS.set_num_threads(1) + workgroupsize(mesh) = length(mesh.cells) ÷ Threads.nthreads() TEST_CASES_DIR = pkgdir(XCALibre, "test/0_TEST_CASES") @@ -49,6 +55,18 @@ TEST_CASES_DIR = pkgdir(XCALibre, "test/0_TEST_CASES") end end + @testset "Adaptive time-stepping Test" begin + + test_files = [ + "adaptive_dt.jl" + ] + + for test ∈ test_files + test_path = joinpath(TEST_CASES_DIR, test) + include(test_path) + end + end + @testset "Post-processing unit Test" begin include("unit_test_field_average.jl") include("unit_test_field_rms.jl") diff --git a/test/unit_test_laplace.jl b/test/unit_test_laplace.jl index ec4e74e4..4c6a485b 100644 --- a/test/unit_test_laplace.jl +++ b/test/unit_test_laplace.jl @@ -13,7 +13,8 @@ mesh_file = joinpath(grids_dir, grid) mesh = UNV2D_mesh(mesh_file) -backend = CPU(); workgroup = 1024; activate_multithread(backend) +# backend = CPU(); workgroup = 1024; activate_multithread(backend) +backend = CPU(); workgroup = 1024 hardware = Hardware(backend=backend, workgroup=workgroup) diff --git a/test/unit_test_setFields.jl b/test/unit_test_setFields.jl index 95a55fe8..18d0594c 100644 --- a/test/unit_test_setFields.jl +++ b/test/unit_test_setFields.jl @@ -6,7 +6,8 @@ mesh_file = joinpath(grids_dir, grid) mesh = UNV2D_mesh(mesh_file) -backend = CPU(); workgroup = AutoTune(); activate_multithread(backend) +# backend = CPU(); workgroup = AutoTune(); activate_multithread(backend) +backend = CPU(); workgroup = AutoTune() hardware = Hardware(backend=backend, workgroup=workgroup) mesh_dev = adapt(backend, mesh)