|
| 1 | +# # Create an initial model setup for Chmy and run it in parallel |
| 2 | +# |
| 3 | +# ## Aim |
| 4 | +# In this tutorial, your will learn how to use [Chmy](https://github.com/PTsolvers/Chmy.jl) to perform a 2D diffusion simulation |
| 5 | +# on one or multiple CPU's or GPU's. |
| 6 | +# `Chmy` is a package that allows you to specify grids and fields and create finite difference simulations |
| 7 | +# |
| 8 | +# ## 1. Load Chmy and required packages |
| 9 | +using Chmy, Chmy.Architectures, Chmy.Grids, Chmy.Fields, Chmy.BoundaryConditions, Chmy.GridOperators, Chmy.KernelLaunch |
| 10 | +using KernelAbstractions |
| 11 | +using Printf |
| 12 | +using CairoMakie |
| 13 | +using GeophysicalModelGenerator |
| 14 | + |
| 15 | +# In case you want to use GPU's, you need to sort out whether you have AMD or NVIDIA GPU's |
| 16 | +# and load the package accordingly: |
| 17 | +#= |
| 18 | + using AMDGPU |
| 19 | + AMDGPU.allowscalar(false) |
| 20 | + using CUDA |
| 21 | + CUDA.allowscalar(false) |
| 22 | +=# |
| 23 | + |
| 24 | +# To run this in parallel you need to load this: |
| 25 | +using Chmy.Distributed |
| 26 | +using MPI |
| 27 | +MPI.Init() |
| 28 | + |
| 29 | +# ## 2. Define computational routines |
| 30 | +# You need to specify compute kernel for the gradients: |
| 31 | +@kernel inbounds = true function compute_q!(q, C, χ, g::StructuredGrid, O) |
| 32 | + I = @index(Global, NTuple) |
| 33 | + I = I + O |
| 34 | + q.x[I...] = -χ * ∂x(C, g, I...) |
| 35 | + q.y[I...] = -χ * ∂y(C, g, I...) |
| 36 | +end |
| 37 | + |
| 38 | +# You need to specify compute kernel to update the concentration |
| 39 | +@kernel inbounds = true function update_C!(C, q, Δt, g::StructuredGrid, O) |
| 40 | + I = @index(Global, NTuple) |
| 41 | + I = I + O |
| 42 | + C[I...] -= Δt * divg(q, g, I...) |
| 43 | +end |
| 44 | + |
| 45 | +# And a main function is required: |
| 46 | +@views function main(backend=CPU(); nxy_l=(126, 126)) |
| 47 | + arch = Arch(backend, MPI.COMM_WORLD, (0, 0)) |
| 48 | + topo = topology(arch) |
| 49 | + me = global_rank(topo) |
| 50 | + |
| 51 | + ## geometry |
| 52 | + dims_l = nxy_l |
| 53 | + dims_g = dims_l .* dims(topo) |
| 54 | + grid = UniformGrid(arch; origin=(-2, -2), extent=(4, 4), dims=dims_g) |
| 55 | + launch = Launcher(arch, grid, outer_width=(16, 8)) |
| 56 | + |
| 57 | + ##@info "mpi" me grid |
| 58 | + |
| 59 | + nx, ny = dims_g |
| 60 | + ## physics |
| 61 | + χ = 1.0 |
| 62 | + ## numerics |
| 63 | + Δt = minimum(spacing(grid))^2 / χ / ndims(grid) / 2.1 |
| 64 | + ## allocate fields |
| 65 | + C = Field(backend, grid, Center()) |
| 66 | + P = Field(backend, grid, Center(), Int32) # phases |
| 67 | + |
| 68 | + q = VectorField(backend, grid) |
| 69 | + C_v = (me==0) ? KernelAbstractions.zeros(CPU(), Float64, size(interior(C)) .* dims(topo)) : nothing |
| 70 | + |
| 71 | + ## Use the `GeophysicalModelGenerator` to set the initial conditions. Note that |
| 72 | + ## you have to call this for a `Phases` and a `Temp` grid, which we call `C` here. |
| 73 | + add_box!(P,C,grid, xlim=(-1.0,1.0), zlim=(-1.0,1.0), phase=ConstantPhase(4), T=ConstantTemp(400)) |
| 74 | + |
| 75 | + ## set BC's and updates the halo: |
| 76 | + bc!(arch, grid, C => Neumann(); exchange=C) |
| 77 | + |
| 78 | + ## visualisation |
| 79 | + fig = Figure(; size=(400, 320)) |
| 80 | + ax = Axis(fig[1, 1]; aspect=DataAspect(), xlabel="x", ylabel="y", title="it = 0") |
| 81 | + plt = heatmap!(ax, centers(grid)..., interior(C) |> Array; colormap=:turbo) |
| 82 | + Colorbar(fig[1, 2], plt) |
| 83 | + ## action |
| 84 | + nt = 100 |
| 85 | + for it in 1:nt |
| 86 | + (me==0) && @printf("it = %d/%d \n", it, nt) |
| 87 | + launch(arch, grid, compute_q! => (q, C, χ, grid)) |
| 88 | + launch(arch, grid, update_C! => (C, q, Δt, grid); bc=batch(grid, C => Neumann(); exchange=C)) |
| 89 | + end |
| 90 | + KernelAbstractions.synchronize(backend) |
| 91 | + gather!(arch, C_v, C) |
| 92 | + if me == 0 |
| 93 | + fig = Figure(; size=(400, 320)) |
| 94 | + ax = Axis(fig[1, 1]; aspect=DataAspect(), xlabel="x", ylabel="y", title="it = 0") |
| 95 | + plt = heatmap!(ax, C_v; colormap=:turbo) # how to get the global grid for axes? |
| 96 | + Colorbar(fig[1, 2], plt) |
| 97 | + save("out_gather_$nx.png", fig) |
| 98 | + end |
| 99 | + return |
| 100 | +end |
| 101 | + |
| 102 | +# In the code above, the part that calls `GMG` is: |
| 103 | + |
| 104 | +# ```julia |
| 105 | +# add_box!(P,C,grid, xlim=(-1.0,1.0), zlim=(-1.0,1.0), phase=ConstantPhase(4), T=ConstantTemp(400)) |
| 106 | +# ``` |
| 107 | +# which works just like any of the other GMG function. |
| 108 | + |
| 109 | +# ## 3. Run the simulation on one CPU machine or GPU card: |
| 110 | + |
| 111 | +# Running the code on the CPU is done with this: |
| 112 | +n = 128 |
| 113 | +main(; nxy_l=(n, n) .- 2) |
| 114 | + |
| 115 | +# If you instead want to run this on AMD or NVIDIA GPU's do this: |
| 116 | +## main(ROCBackend(); nxy_l=(n, n) .- 2) |
| 117 | +## main(CUDABackend(); nxy_l=(n, n) .- 2) |
| 118 | + |
| 119 | +# And we need to finalize the simulation with |
| 120 | +MPI.Finalize() |
| 121 | + |
| 122 | + |
| 123 | +# ## 4. Run the simulation on an MPI-parallel machine |
| 124 | +# If you want to run this on multiple cores, you will need to setup the [MPI.jl]() package, |
| 125 | +# such that `mpiexecjl` is created on the command line. |
| 126 | +# |
| 127 | +# You can than run it with: |
| 128 | +# mpiexecjl -n 4 --project=. julia Tutorial_Chmy_MPI.jl |
| 129 | + |
| 130 | +# The full file can be downloaded [here](../../../tutorials/Tutorial_Chmy_MPI.jl) |
| 131 | + |
| 132 | +#src Note: The markdown page is generated using: |
| 133 | +#src Literate.markdown("tutorials/Tutorial_Chmy_MPI.jl","docs/src/man",keepcomments=true, execute=false, codefence = "```julia" => "```") |
0 commit comments