diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index b3cc0c7d..75c7307b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,54 +1,150 @@ name: CI + on: - push: - branches: [main] - tags: ["*"] pull_request: + paths: + - '.github/workflows/ci.yml' + - 'src/**' + - 'test/**' + - 'Project.toml' + push: + branches: + - main + tags: '*' + paths: + - '.github/workflows/ci.yml' + - 'src/**' + - 'test/**' + - 'Project.toml' + +concurrency: + # Skip intermediate builds: always. + # Cancel intermediate builds: only if it is a pull request build. + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ startsWith(github.ref, 'refs/pull/') }} + jobs: - test: - name: Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} + checkpointing_tests: + name: Checkpointing Tests - Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} + runs-on: ${{ matrix.os }} + timeout-minutes: 120 + strategy: + fail-fast: false + matrix: + version: + - '1.10' + os: + - ubuntu-latest + arch: + - x64 + steps: + - uses: actions/checkout@v5 + - uses: julia-actions/setup-julia@v2 + with: + version: ${{ matrix.version }} + arch: ${{ matrix.arch }} + - uses: julia-actions/cache@v2 + - uses: julia-actions/julia-buildpkg@v1 + - uses: julia-actions/julia-runtest@v1 + env: + TEST_GROUP: "checkpointing" + + distributed: + name: Distributed Tests - Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} + runs-on: ${{ matrix.os }} + timeout-minutes: 120 + strategy: + fail-fast: false + matrix: + version: + - '1.10' + os: + - ubuntu-latest + arch: + - x64 + steps: + - uses: actions/checkout@v5 + - uses: julia-actions/setup-julia@v2 + with: + version: ${{ matrix.version }} + arch: ${{ matrix.arch }} + - uses: julia-actions/cache@v2 + - uses: julia-actions/julia-buildpkg@v1 + - uses: julia-actions/julia-runtest@v1 + env: + TEST_GROUP: "distributed" + + advection: + name: Sea Ice Advection - Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} runs-on: ${{ matrix.os }} + timeout-minutes: 120 strategy: fail-fast: false matrix: version: - '1.10' - - '1.12' os: - ubuntu-latest arch: - x64 - - x86 - include: - # test macOS and Windows with latest Julia only - - os: macOS-latest - arch: x64 - version: '1.10' - - os: windows-latest - arch: x64 - version: '1.10' - - os: windows-latest - arch: x86 - version: '1.10' steps: - - uses: actions/checkout@v4 - - uses: julia-actions/setup-julia@latest + - uses: actions/checkout@v5 + - uses: julia-actions/setup-julia@v2 with: version: ${{ matrix.version }} arch: ${{ matrix.arch }} - - uses: actions/cache@v3 + - uses: julia-actions/cache@v2 + - uses: julia-actions/julia-buildpkg@v1 + - uses: julia-actions/julia-runtest@v1 env: - cache-name: cache-artifacts + TEST_GROUP: "advection" + + timestepping: + name: Time Stepping - Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} + runs-on: ${{ matrix.os }} + timeout-minutes: 120 + strategy: + fail-fast: false + matrix: + version: + - '1.10' + os: + - ubuntu-latest + arch: + - x64 + steps: + - uses: actions/checkout@v5 + - uses: julia-actions/setup-julia@v2 with: - path: ~/.julia/artifacts - key: ${{ runner.os }}-test-${{ env.cache-name }}-${{ hashFiles('**/Project.toml') }} - restore-keys: | - ${{ runner.os }}-test-${{ env.cache-name }}- - ${{ runner.os }}-test- - ${{ runner.os }}- + version: ${{ matrix.version }} + arch: ${{ matrix.arch }} + - uses: julia-actions/cache@v2 - uses: julia-actions/julia-buildpkg@v1 - uses: julia-actions/julia-runtest@v1 - - uses: julia-actions/julia-processcoverage@v1 - - uses: codecov/codecov-action@v2 + env: + TEST_GROUP: "timestepping" + + enthalpy: + name: Enthalpy Model - Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} + runs-on: ${{ matrix.os }} + timeout-minutes: 120 + strategy: + fail-fast: false + matrix: + version: + - '1.10' + os: + - ubuntu-latest + arch: + - x64 + steps: + - uses: actions/checkout@v5 + - uses: julia-actions/setup-julia@v2 with: - file: lcov.info + version: ${{ matrix.version }} + arch: ${{ matrix.arch }} + - uses: julia-actions/cache@v2 + - uses: julia-actions/julia-buildpkg@v1 + - uses: julia-actions/julia-runtest@v1 + env: + TEST_GROUP: "enthalpy" diff --git a/src/SeaIceDynamics/sea_ice_momentum_equations.jl b/src/SeaIceDynamics/sea_ice_momentum_equations.jl index a5eed438..7648fa02 100644 --- a/src/SeaIceDynamics/sea_ice_momentum_equations.jl +++ b/src/SeaIceDynamics/sea_ice_momentum_equations.jl @@ -59,7 +59,7 @@ function SeaIceMomentumEquation(grid; top_momentum_stress = nothing, bottom_momentum_stress = nothing, free_drift = nothing, - solver = SplitExplicitSolver(150), + solver = SplitExplicitSolver(grid; substeps=150), minimum_concentration = 1e-3, minimum_mass = 1.0) diff --git a/src/SeaIceDynamics/split_explicit_momentum_equations.jl b/src/SeaIceDynamics/split_explicit_momentum_equations.jl index cfe8b76a..b0c3636e 100644 --- a/src/SeaIceDynamics/split_explicit_momentum_equations.jl +++ b/src/SeaIceDynamics/split_explicit_momentum_equations.jl @@ -1,25 +1,41 @@ using Oceananigans.Grids: AbstractGrid, architecture, halo_size -using Oceananigans.BoundaryConditions: fill_halo_regions!, fill_halo_size, fill_halo_offset using Oceananigans.Utils: configure_kernel using Oceananigans.Architectures: convert_to_device using Oceananigans.Fields: instantiated_location, boundary_conditions +using Oceananigans.DistributedComputations: DistributedGrid using Oceananigans.ImmersedBoundaries: peripheral_node +using Oceananigans.BoundaryConditions: fill_halo_regions!, fill_halo_size, fill_halo_offset + +using Oceananigans.Models.HydrostaticFreeSurfaceModels.SplitExplicitFreeSurfaces: split_explicit_kernel_size -struct SplitExplicitSolver - substeps :: Int +struct SplitExplicitSolver{I, K} + substeps :: I + kernel_parameters :: K end """ - SplitExplicitSolver(; substeps=120) + SplitExplicitSolver(grid; substeps=120) Creates a `SplitExplicitSolver` that controls the dynamical evolution of sea-ice momentum by subcycling `substeps` times in between each ice_thermodynamics / tracer advection time step. The default number of substeps is 120. """ -SplitExplicitSolver(; substeps=120) = SplitExplicitSolver(substeps) +SplitExplicitSolver(grid; substeps=120) = SplitExplicitSolver(substeps, :xy) + +function SplitExplicitSolver(grid::DistributedGrid; substeps=120) + Nx, Ny, _ = size(grid) + Hx, Hy, _ = halo_size(grid) + TX, TY, _ = topology(grid) + kernel_sizes = map(split_explicit_kernel_size, (TX, TY), (Nx, Ny), (Hx, Hy)) + return SplitExplicitSolver(substeps, KernelParameters(kernel_sizes...)) +end + +# When no grid is provided, we assume a serial grid with default kernel parameters +SplitExplicitSolver(; substeps=120) = SplitExplicitSolver(substeps, :xy) const SplitExplicitMomentumEquation = SeaIceMomentumEquation{<:SplitExplicitSolver} +const ExtendedSplitExplicitMomentumEquation = SeaIceMomentumEquation{<:SplitExplicitSolver{<:Any, <:KernelParameters}} """ time_step_momentum!(model, rheology::AbstractExplicitRheology, Δt) @@ -58,10 +74,10 @@ function time_step_momentum!(model, dynamics::SplitExplicitMomentumEquation, Δt ℵ = model.ice_concentration, ρ = model.ice_density)) - active_cells_map = Oceananigans.Grids.get_active_column_map(grid) + params = dynamics.solver.kernel_parameters - u_velocity_kernel!, _ = configure_kernel(arch, grid, :xy, _u_velocity_step!; active_cells_map) - v_velocity_kernel!, _ = configure_kernel(arch, grid, :xy, _v_velocity_step!; active_cells_map) + u_velocity_kernel!, _ = configure_kernel(arch, grid, params, _u_velocity_step!) + v_velocity_kernel!, _ = configure_kernel(arch, grid, params, _v_velocity_step!) substeps = dynamics.solver.substeps initialize_rheology!(model, dynamics.rheology) @@ -101,6 +117,9 @@ function time_step_momentum!(model, dynamics::SplitExplicitMomentumEquation, Δt converted_stresses_args = convert_to_device(arch, stresses_args) for substep in 1 : substeps + fill_halo_regions!(converted_u_halo...; only_local_halos = true) + fill_halo_regions!(converted_v_halo...; only_local_halos = true) + # Compute stresses! depending on the particular rheology implementation compute_stresses!(dynamics, converted_stresses_args...) @@ -115,9 +134,6 @@ function time_step_momentum!(model, dynamics::SplitExplicitMomentumEquation, Δt v_velocity_kernel!(converted_v_args...) u_velocity_kernel!(converted_u_args...) end - - fill_halo_regions!(converted_u_halo...) - fill_halo_regions!(converted_v_halo...) end end diff --git a/src/sea_ice_model.jl b/src/sea_ice_model.jl index a9beb064..99564116 100644 --- a/src/sea_ice_model.jl +++ b/src/sea_ice_model.jl @@ -4,13 +4,18 @@ using Oceananigans.TimeSteppers: TimeStepper using Oceananigans.BoundaryConditions: regularize_field_boundary_conditions using Oceananigans: tupleit, tracernames using Oceananigans.Forcings: model_forcing +using Oceananigans.Grids: halo_size, topology, with_halo +using Oceananigans.Grids: LeftConnected, RightConnected, FullyConnected +using ClimaSeaIce.SeaIceDynamics: ExtendedSplitExplicitMomentumEquation using ClimaSeaIce.SeaIceThermodynamics: PrescribedTemperature using ClimaSeaIce.SeaIceThermodynamics.HeatBoundaryConditions: flux_summary @inline instantiate(T::DataType) = T() @inline instantiate(T) = T +const ConnectedTopology = Union{LeftConnected, RightConnected, FullyConnected} + struct SeaIceModel{GR, TD, D, TS, CL, U, T, IT, IC, ID, CT, STF, A, F, Arch} <: AbstractModel{TS, Arch} architecture :: Arch grid :: GR @@ -72,8 +77,24 @@ function SeaIceModel(grid; boundary_conditions = regularize_field_boundary_conditions(boundary_conditions, grid, field_names) if isnothing(velocities) - u = Field{Face, Center, Nothing}(grid, boundary_conditions=boundary_conditions.u) - v = Field{Center, Face, Nothing}(grid, boundary_conditions=boundary_conditions.v) + + # Extend the halos for the velocity fields if the dynamics is + # an extended split explicit momentum equation + if dynamics isa ExtendedSplitExplicitMomentumEquation + old_halos = halo_size(grid) + Nsubsteps = length(dynamics.solver.substeps) + TX, TY = topology(grid) + Hx = TX() isa ConnectedTopology ? Nsubsteps + old_halos[1] : old_halos[1] + Hy = TY() isa ConnectedTopology ? Nsubsteps + old_halos[2] : old_halos[2] + + new_halos = (Hx, Hy, old_halos[3]) + velocity_grid = with_halo(new_halos, grid) + else + velocity_grid = grid + end + + u = Field{Face, Center, Nothing}(velocity_grid, boundary_conditions=boundary_conditions.u) + v = Field{Center, Face, Nothing}(velocity_grid, boundary_conditions=boundary_conditions.v) velocities = (; u, v) end diff --git a/src/sea_ice_time_stepping.jl b/src/sea_ice_time_stepping.jl index c683e85f..913fc910 100644 --- a/src/sea_ice_time_stepping.jl +++ b/src/sea_ice_time_stepping.jl @@ -83,10 +83,9 @@ function update_state!(model::SIM) foreach(prognostic_fields(model)) do field mask_immersed_field_xy!(field, k=size(model.grid, 3)) + fill_halo_regions!(field, model.clock, fields(model)) end - fill_halo_regions!(prognostic_fields(model), model.clock, fields(model)) - update_model_field_time_series!(model, model.clock) return nothing @@ -104,4 +103,4 @@ function update_model_field_time_series!(model::SeaIceModel, clock::Clock) end return nothing -end \ No newline at end of file +end diff --git a/test/distributed_tests_utils.jl b/test/distributed_tests_utils.jl new file mode 100644 index 00000000..fdcd03de --- /dev/null +++ b/test/distributed_tests_utils.jl @@ -0,0 +1,57 @@ +using JLD2 +using MPI +using Oceananigans +using Oceananigans.DistributedComputations: reconstruct_global_field, reconstruct_global_grid +using Oceananigans.Units +using ClimaSeaIce +using ClimaSeaIce.SeaIceDynamics: SeaIceMomentumEquation, ElastoViscoPlasticRheology, SemiImplicitStress + +# Run the distributed grid simulation and save down reconstructed results +function run_distributed_sea_ice(arch, filename) + distributed_grid = RectilinearGrid(arch; + size = (100, 100, 1), + x = (-10kilometers, 10kilometers), + y = (-10kilometers, 10kilometers), + z = (-1, 0), + halo = (5, 5, 5)) + + model = run_distributed_simulation(distributed_grid) + + u = reconstruct_global_field(model.velocities.u) + v = reconstruct_global_field(model.velocities.v) + + if arch.local_rank == 0 + jldsave(filename; u = Array(interior(u, :, :, 1)), + v = Array(interior(v, :, :, 1))) + end + + MPI.Barrier(MPI.COMM_WORLD) + MPI.Finalize() + + return nothing +end + +# Just a random simulation on a rectilinear grid +function run_distributed_simulation(grid) + + τᵤ = 0.01 + τᵥ = 0.01 + τₒ = SemiImplicitStress() + + # We use an elasto-visco-plastic rheology and WENO seventh order + # for advection of h and ℵ + dynamics = SeaIceMomentumEquation(grid; + top_momentum_stress = (u=τᵤ, v=τᵥ), + bottom_momentum_stress = τₒ, + rheology = ElastoViscoPlasticRheology(), + solver = SplitExplicitSolver(grid, substeps=50)) + + model = SeaIceModel(grid; ice_thermodynamics = nothing, dynamics, advection = WENO(order=7)) + + for N in 1:100 + time_step!(model, 1minutes) + end + + return model +end + diff --git a/test/runtests.jl b/test/runtests.jl index 980ec83c..efe922d1 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -4,26 +4,41 @@ using Oceananigans using ClimaSeaIce using ClimaSeaIce.EnthalpyMethodSeaIceModels: EnthalpyMethodSeaIceModel, MolecularDiffusivity -κ = 1e-5 -grid = RectilinearGrid(size=3, z=(-3, 0), topology=(Flat, Flat, Bounded)) -closure = MolecularDiffusivity(grid, κ_ice=κ, κ_water=κ) -model = EnthalpyMethodSeaIceModel(; grid, closure) - -@test model isa EnthalpyMethodSeaIceModel - -# Test that it runs -simulation = Simulation(model; Δt = 0.1 / κ, stop_iteration=3) - -@test begin - try - run!(simulation) - true - catch - false +TEST_GROUP = get(ENV, "TEST_GROUP", "all") + +if TEST_GROUP == "all" || TEST_GROUP == "enthalpy" + κ = 1e-5 + grid = RectilinearGrid(size=3, z=(-3, 0), topology=(Flat, Flat, Bounded)) + closure = MolecularDiffusivity(grid, κ_ice=κ, κ_water=κ) + model = EnthalpyMethodSeaIceModel(; grid, closure) + + @test model isa EnthalpyMethodSeaIceModel + + # Test that it runs + simulation = Simulation(model; Δt = 0.1 / κ, stop_iteration=3) + + @test begin + try + run!(simulation) + true + catch + false + end end end -include("test_sea_ice_advection.jl") -include("test_time_stepping.jl") -include("test_distributed_sea_ice.jl") -include("test_checkpointing.jl") +if TEST_GROUP == "all" || TEST_GROUP == "advection" + include("test_sea_ice_advection.jl") +end + +if TEST_GROUP == "all" || TEST_GROUP == "timestepping" + include("test_time_stepping.jl") +end + +if TEST_GROUP == "all" || TEST_GROUP == "checkpointing" + include("test_checkpointing.jl") +end + +if TEST_GROUP == "all" || TEST_GROUP == "distributed" + include("test_distributed_sea_ice.jl") +end diff --git a/test/test_distributed_sea_ice.jl b/test/test_distributed_sea_ice.jl index 5d7316a2..7e1be202 100644 --- a/test/test_distributed_sea_ice.jl +++ b/test/test_distributed_sea_ice.jl @@ -1,56 +1,94 @@ -using MPI -using JLD2 +using Oceananigans +using ClimaSeaIce -distributed_sea_ice = """ - using ClimaSeaIce - using Oceananigans - using Oceananigans.DistributedComputations - using JLD2 +include("distributed_tests_utils.jl") - arch = Distributed(CPU(); partition=Partition(x=2, y=2)) +run_slab_distributed_grid = """ + using MPI + MPI.Init() - grid_2d = RectilinearGrid(arch, size=(10, 10), x=(0, 1), y=(0, 1), topology=(Bounded, Bounded, Flat)) - grid_3d = RectilinearGrid(arch, size=(10, 10, 10), x=(0, 1), y=(0, 1), z=(-1, 0), topology=(Bounded, Bounded, Bounded)) + include("distributed_tests_utils.jl") + arch = Distributed(CPU(), partition = Partition(1, 4)) + run_distributed_sea_ice(arch, "distributed_yslab_seaice.jld2") +""" - iterations = Int[] +run_pencil_distributed_grid = """ + using MPI + MPI.Init() - for grid in (grid_2d, grid_3d) - rheologies = (ElastoViscoPlasticRheology(), ViscousRheology(ν=1000)) - advections = (WENO(), UpwindBiased(order=5)) + include("distributed_tests_utils.jl") + arch = Distributed(CPU(), partition = Partition(2, 2)) + run_distributed_sea_ice(arch, "distributed_pencil_seaice.jld2") +""" - ice_thermodynamics = (nothing, SlabSeaIceThermodynamics(grid)) +run_large_pencil_distributed_grid = """ + using MPI + MPI.Init() - coriolises = (nothing, FPlane(latitude=45), BetaPlane(latitude=45)) - solvers = (ExplicitSolver(), SplitExplicitSolver()) + include("distributed_tests_utils.jl") + arch = Distributed(CPU(), partition = Partition(4, 2)) + run_distributed_sea_ice(arch, "distributed_large_pencil_seaice.jld2") +""" - for coriolis in coriolises, advection in advections, rheology in rheologies, ice_thermodynamics in ice_thermodynamics, solver in solvers - dynamics = SeaIceMomentumEquation(grid; coriolis, rheology, solver) +@testset "Test distributed seaiceGrid simulations..." begin + # Run the serial computation + grid = RectilinearGrid(CPU(); + size = (100, 100, 1), + x = (-10kilometers, 10kilometers), + y = (-10kilometers, 10kilometers), + z = (-1, 0), + halo = (5, 5, 5)) - model = SeaIceModel(grid; dynamics, ice_thermodynamics, advection) - simulation = Simulation(model, Δt=1.0, stop_iteration=1) + model = run_distributed_simulation(grid) - run!(simulation) + # Retrieve Serial quantities + us, vs = model.velocities - push!(iterations, model.clock.iteration) - end - end + us = interior(us, :, :, 1) + vs = interior(vs, :, :, 1) - DistributedComputations.all_reduce!(+, iterations, arch) + # Run the distributed grid simulation with a slab configuration + write("distributed_slab_tests.jl", run_slab_distributed_grid) + run(`$(mpiexec()) -n 4 $(Base.julia_cmd()) --project -O0 distributed_slab_tests.jl`) + rm("distributed_slab_tests.jl") - @root jldsave("iterations.jld2"; iterations) -""" + # Retrieve Parallel quantities + up = jldopen("distributed_yslab_seaice.jld2")["u"] + vp = jldopen("distributed_yslab_seaice.jld2")["v"] + + rm("distributed_yslab_seaice.jld2") + + # Test slab partitioning + @test all(us .≈ up) + @test all(vs .≈ vp) + + # Run the distributed grid simulation with a pencil configuration + write("distributed_tests.jl", run_pencil_distributed_grid) + run(`$(mpiexec()) -n 4 $(Base.julia_cmd()) --project -O0 distributed_tests.jl`) + rm("distributed_tests.jl") + + # Retrieve Parallel quantities + up = jldopen("distributed_pencil_seaice.jld2")["u"] + vp = jldopen("distributed_pencil_seaice.jld2")["v"] + + rm("distributed_pencil_seaice.jld2") + + @test all(us .≈ up) + @test all(vs .≈ vp) + + # We try now with more ranks in the x-direction. This is not a trivial + # test as we are now splitting, not only where the singularities are, but + # also in the middle of the north fold. This is a more challenging test + write("distributed_large_pencil_tests.jl", run_large_pencil_distributed_grid) + run(`$(mpiexec()) -n 8 $(Base.julia_cmd()) --project -O0 distributed_large_pencil_tests.jl`) + rm("distributed_large_pencil_tests.jl") + + # Retrieve Parallel quantities + up = jldopen("distributed_large_pencil_seaice.jld2")["u"] + vp = jldopen("distributed_large_pencil_seaice.jld2")["v"] + + rm("distributed_large_pencil_seaice.jld2") -@testset "Sea ice Models" begin - @info "Testing distributed sea ice models runs..." - write("distributed_sea_ice_tests.jl", distributed_sea_ice) - run(`$(mpiexec()) -n 4 $(Base.julia_cmd()) --project -O0 --check-bounds=yes distributed_sea_ice_tests.jl`) - rm("distributed_sea_ice_tests.jl") - - @info "Checking that all cores ran all configurations up to 1 iteration..." - @test isfile("iterations.jld2") - file = jldopen("iterations.jld2") - data = file["iterations"] - @test all(data .== 4) - close(file) - rm("iterations.jld2") -end + @test all(us .≈ up) + @test all(vs .≈ vp) +end \ No newline at end of file