Skip to content
Open
Show file tree
Hide file tree
Changes from 20 commits
Commits
Show all changes
34 commits
Select commit Hold shift + click to select a range
3f5099a
Add CI rules for ALPS
luraess Aug 14, 2025
383f33b
Update
luraess Aug 14, 2025
e873240
Fixup
luraess Aug 14, 2025
632f44f
Fixup
luraess Aug 14, 2025
4fe15a4
Add more testing
luraess Aug 14, 2025
124e4de
Trigger buildkite on 1.12
luraess Aug 14, 2025
fd8226c
Fixup
luraess Aug 14, 2025
a96a984
Add missing deps
luraess Aug 14, 2025
da33c65
Fixup ref test
luraess Aug 14, 2025
a541bee
Fixup
luraess Aug 14, 2025
1d7b5bc
Fixup IGG call
luraess Aug 14, 2025
f75b0ba
More fixes
luraess Aug 14, 2025
001315d
Fixup
luraess Aug 14, 2025
6651039
fix init particles call
albert-de-montserrat Aug 14, 2025
6a89663
Fixup
luraess Aug 14, 2025
7374f02
allow scalar indexing
albert-de-montserrat Aug 14, 2025
a6a8db0
Allow scalar
luraess Aug 14, 2025
8b38f67
try baremetal runner
luraess Aug 14, 2025
77c2265
Merge branch 'lr/alps-ci' of github.com:JuliaGeodynamics/JustPIC.jl i…
luraess Aug 14, 2025
afa9e82
Fixup
luraess Aug 14, 2025
819f058
Fixup
luraess Aug 14, 2025
6a920c0
Fixup
luraess Aug 14, 2025
2eda7ff
update 2D MPI advection
albert-de-montserrat Aug 14, 2025
b356294
New try
luraess Aug 14, 2025
f62e319
Merge branch 'lr/alps-ci' of github.com:JuliaGeodynamics/JustPIC.jl i…
luraess Aug 14, 2025
1500f05
Up
luraess Aug 14, 2025
6016e34
Cleanup
luraess Aug 14, 2025
504f439
Cleanup
luraess Aug 14, 2025
39db6f2
Cleanup
luraess Aug 14, 2025
9698797
Format
luraess Aug 14, 2025
f6d1da6
AMDGPU fix trial
luraess Aug 14, 2025
0e296c2
Fixup project
luraess Aug 15, 2025
bca90e6
Fixup format
luraess Aug 15, 2025
ca1fb3f
Add badge
luraess Aug 19, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .buildkite/run_tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ steps:
version:
- "1.10"
- "1.11"
- "1.12-nightly"
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Wouldnt it make more sense to have 'pre' here for the pre-releases?

plugins:
- JuliaCI/julia#v1:
version: "{{matrix.version}}"
Expand Down Expand Up @@ -33,6 +34,7 @@ steps:
version:
- "1.10"
- "1.11"
- "1.12-nightly"
plugins:
- JuliaCI/julia#v1:
version: "{{matrix.version}}"
Expand Down
47 changes: 47 additions & 0 deletions ci/cscs-gh200.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
include:
- remote: 'https://gitlab.com/cscs-ci/recipes/-/raw/master/templates/v2/.ci-ext.yml'

unit_test:
extends: .uenv-runner-daint-gh200
image: julia/25.5:v1
script:
- julia -e 'println("Instantiating project");
using Pkg;
Pkg.develop(; path=pwd())'
- julia -e 'println("Running tests");
using Pkg;
Pkg.test("JustPIC"; test_args=["--backend=CUDA"])'
variables:
WITH_UENV_VIEW: 'juliaup'
SLURM_JOB_NUM_NODES: 1
SLURM_NTASKS_PER_NODE: 1
SLURM_GPUS_PER_TASK: 1
SLURM_TIMELIMIT: "00:30:00"

# ref_test:
# extends: .uenv-runner-daint-gh200
# image: julia/25.5:v1
# script:
# - julia --project -e 'using Pkg; Pkg.instantiate()'
# - julia --project -e 'using Pkg; Pkg.add("CUDA")'
# - julia --project scripts/single_particle_advection_MPI_ci.jl
# variables:
# WITH_UENV_VIEW: 'juliaup'
# SLURM_JOB_NUM_NODES: 2
# SLURM_NTASKS_PER_NODE: 4
# SLURM_GPUS_PER_TASK: 1
# SLURM_TIMELIMIT: "00:30:00"

ref_test:
extends: .baremetal-runner-daint-gh200
script:
- echo "Preparing the test environment (single rank)"
- srun -n 1 --uenv julia/25.5:v1 julia --project -e 'using Pkg; Pkg.instantiate()'
- srun -n 1 --uenv julia/25.5:v1 julia --project -e 'using Pkg; Pkg.add("CUDA")'
- echo "Running the reference test (multiple ranks)"
- srun --uenv julia/25.5:v1 julia --project scripts/single_particle_advection_MPI_ci.jl
variables:
SLURM_JOB_NUM_NODES: 2
SLURM_NTASKS_PER_NODE: 4
SLURM_GPUS_PER_TASK: 1
SLURM_TIMELIMIT: "00:30:00"
165 changes: 165 additions & 0 deletions scripts/single_particle_advection_MPI_ci.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,165 @@
using CUDA
CUDA.allowscalar(true) # enable scalar operations on CUDA arrays
using JustPIC, JustPIC._2D

using ImplicitGlobalGrid
import MPI

# Threads is the default backend,
# to run on a CUDA GPU load CUDA.jl (i.e. "using CUDA"),
# and to run on an AMD GPU load AMDGPU.jl (i.e. "using AMDGPU")
const backend = CUDABackend # Options: CPUBackend, CUDABackend, AMDGPUBackend
# const backend = JustPIC.CPUBackend

CUDA.allowscalar(true)

using ParallelStencil
@init_parallel_stencil(CUDA, Float64, 2)
# @init_parallel_stencil(Threads, Float64, 2)

# using GLMakie

@inline init_particle_fields_cellarrays(particles, ::Val{N}) where {N} = ntuple(_ -> @fill(0.0, size(particles.coords[1])..., celldims = (cellsize(particles.index))), Val(N))

function expand_range(x::AbstractRange)
dx = x[2] - x[1]
n = length(x)
x1, x2 = extrema(x)
xI = x1 - dx
xF = x2 + dx
return LinRange(xI, xF, n + 2)
end

function expand_range(x::AbstractArray, dx)
x1, x2 = extrema(x)
xI = x1 - dx
xF = x2 + dx
return x = TA(backend)(vcat(xI, x, xF))
end

# Analytical flow solution
vx_stream(x, y) = 250 * sin(π * x) * cos(π * y)
vy_stream(x, y) = -250 * cos(π * x) * sin(π * y)
g(x) = Point2f(
vx_stream(x[1], x[2]),
vy_stream(x[1], x[2])
)

function main()
# Initialize particles -------------------------------
nxcell, max_xcell, min_xcell = 24, 24, 18
n = 32
nx = ny = n - 1
me, dims, = init_global_grid(
n - 1, n - 1, 1;
init_MPI = JustPIC.MPI.Initialized() ? false : true,
select_device = false
)
Lx = Ly = 1.0
dxi = dx, dy = Lx / (nx_g() - 1), Ly / (ny_g() - 1)
# nodal vertices
xvi = xv, yv = let
dummy = zeros(n, n)
xv = TA(backend)([x_g(i, dx, dummy) for i in axes(dummy, 1)])
yv = TA(backend)([y_g(i, dx, dummy) for i in axes(dummy, 2)])
xv, yv
end
# nodal centers
xci = xc, yc = let
dummy = zeros(nx, ny)
xc = TA(backend)([x_g(i, dx, dummy) for i in axes(dummy, 1)])
yc = TA(backend)([y_g(i, dx, dummy) for i in axes(dummy, 2)])
xc, yc
end

# staggered grid for the velocity components
grid_vx = xv, expand_range(yc, dy)
grid_vy = expand_range(xc, dx), yv

particles = init_particles(backend, nxcell, max_xcell, min_xcell, xvi...)

# allocate particle field
particle_args = ()

# Cell fields -------------------------------
Vx = TA(backend)([vx_stream(x, y) for x in grid_vx[1], y in grid_vx[2]])
Vy = TA(backend)([vy_stream(x, y) for x in grid_vy[1], y in grid_vy[2]])
V = Vx, Vy

# time step
dt = min(dx / maximum(abs.(Vx)), dy / maximum(abs.(Vy)))

nx_v = (size(particles.coords[1].data, 2)) * dims[1]
ny_v = (size(particles.coords[1].data, 3) - 2) * dims[2]
px_v = fill(NaN, nx_v, ny_v)
py_v = fill(NaN, nx_v, ny_v)
index_v = fill(false, nx_v, ny_v)
px_nohalo = fill(NaN, size(particles.coords[1].data, 2), size(particles.coords[1].data, 3) - 2)
py_nohalo = fill(NaN, size(particles.coords[1].data, 2), size(particles.coords[1].data, 3) - 2)
index_nohalo = fill(false, size(particles.coords[1].data, 2), size(particles.coords[1].data, 3) - 2)

p = [(NaN, NaN)]

# Advection test
niter = 150
for iter in 1:niter

# advect particles
advection!(particles, RungeKutta2(2 / 3), V, (grid_vx, grid_vy), dt)
# update halos
update_cell_halo!(particles.coords..., particle_args...)
update_cell_halo!(particles.index)

# shuffle particles
move_particles!(particles, xvi, particle_args)

# gather particle data - for plotting only
@views px_nohalo .= particles.coords[1].data[1, :, 2:(end - 1)]
@views py_nohalo .= particles.coords[2].data[1, :, 2:(end - 1)]
@views index_nohalo .= particles.index.data[1, :, 2:(end - 1)]
gather!(px_nohalo, px_v)
gather!(py_nohalo, py_v)
gather!(index_nohalo, index_v)

if me == 0
p_i = (px_v[index_v][1], py_v[index_v][1])
push!(p, p_i)
end

# if me == 0 && iter % 10 == 0
# w = 0.504
# offset = 0.5 - (w - 0.5)
# f, ax, = lines(
# [0, w, w, 0, 0],
# [0, 0, w, w, 0],
# linewidth = 3
# )
# lines!(
# ax,
# [0, w, w, 0, 0] .+ offset,
# [0, 0, w, w, 0],
# linewidth = 3
# )
# lines!(
# ax,
# [0, w, w, 0, 0] .+ offset,
# [0, 0, w, w, 0] .+ offset,
# linewidth = 3
# )
# lines!(
# ax,
# [0, w, w, 0, 0],
# [0, 0, w, w, 0] .+ offset,
# linewidth = 3
# )
# streamplot!(ax, g, LinRange(0, 1, 100), LinRange(0, 1, 100))
# lines!(ax, p, color = :red)
# scatter!(ax, p[end], color = :black)
# save("figs/trajectory_MPI_$iter.png", f)
# end
end

return finalize_global_grid()
end

main()
Loading