@@ -44,8 +44,10 @@ module m_mpi_proxy
4444
4545 !> @name Generic flags used to identify and report MPI errors
4646 !> @{
47- integer, private :: err_code, ierr, i_halo_size
47+ integer, private :: err_code, ierr
4848 !> @}
49+
50+ integer :: i_halo_size
4951 !$acc declare create(i_halo_size)
5052
5153contains
@@ -70,9 +72,7 @@ contains
7072 end if
7173
7274 !$acc declare create(i_halo_size)
73-
7475 @:ALLOCATE(ib_buff_send(0:i_halo_size), ib_buff_recv(0:i_halo_size))
75-
7676 end if
7777#endif
7878
@@ -235,11 +235,11 @@ contains
235235
236236 end subroutine s_mpi_bcast_user_inputs
237237
238- subroutine s_mpi_sendrecv_ib_buffers(ib_markers, gp_layers, mpi_dir, pbc_loc)
238+ subroutine s_mpi_sendrecv_ib_buffers(ib_markers, mpi_dir, pbc_loc)
239239
240240 type(integer_field), intent(inout) :: ib_markers
241241
242- integer, intent(in) :: gp_layers, mpi_dir, pbc_loc
242+ integer, intent(in) :: mpi_dir, pbc_loc
243243
244244 integer :: i, j, k, l, r, q !< Generic loop iterators
245245
@@ -261,8 +261,8 @@ contains
261261
262262 buffer_counts = (/ &
263263 gp_layers*(n + 1)*(p + 1), &
264- gp_layers*(m + 2*buff_size + 1)*(p + 1), &
265- gp_layers*(m + 2*buff_size + 1)*(n + 2*buff_size + 1) &
264+ gp_layers*(m + 2*gp_layers + 1)*(p + 1), &
265+ gp_layers*(m + 2*gp_layers + 1)*(n + 2*gp_layers + 1) &
266266 /)
267267
268268 buffer_count = buffer_counts(mpi_dir)
@@ -312,8 +312,8 @@ contains
312312 !$acc parallel loop collapse(3) gang vector default(present) private(r)
313313 do l = 0, p
314314 do k = 0, gp_layers - 1
315- do j = -buff_size , m + buff_size
316- r = ((j + buff_size ) + (m + 2*buff_size + 1)* &
315+ do j = -gp_layers , m + gp_layers
316+ r = ((j + gp_layers ) + (m + 2*gp_layers + 1)* &
317317 (k + gp_layers*l))
318318 ib_buff_send(r) = ib_markers%sf(j, k + pack_offset, l)
319319 end do
@@ -322,10 +322,10 @@ contains
322322 #:else
323323 !$acc parallel loop collapse(3) gang vector default(present) private(r)
324324 do l = 0, gp_layers - 1
325- do k = -buff_size , n + buff_size
326- do j = -buff_size , m + buff_size
327- r = ((j + buff_size ) + (m + 2*buff_size + 1)* &
328- ((k + buff_size ) + (n + 2*buff_size + 1)*l))
325+ do k = -gp_layers , n + gp_layers
326+ do j = -gp_layers , m + gp_layers
327+ r = ((j + gp_layers ) + (m + 2*gp_layers + 1)* &
328+ ((k + gp_layers ) + (n + 2*gp_layers + 1)*l))
329329 ib_buff_send(r) = ib_markers%sf(j, k, l + pack_offset)
330330 end do
331331 end do
@@ -389,8 +389,8 @@ contains
389389 !$acc parallel loop collapse(3) gang vector default(present) private(r)
390390 do l = 0, p
391391 do k = -gp_layers, -1
392- do j = -buff_size , m + buff_size
393- r = ((j + buff_size ) + (m + 2*buff_size + 1)* &
392+ do j = -gp_layers , m + gp_layers
393+ r = ((j + gp_layers ) + (m + 2*gp_layers + 1)* &
394394 ((k + gp_layers) + gp_layers*l))
395395 ib_markers%sf(j, k + unpack_offset, l) = ib_buff_recv(r)
396396 end do
@@ -400,10 +400,10 @@ contains
400400 ! Unpacking buffer from bc_z%beg
401401 !$acc parallel loop collapse(3) gang vector default(present) private(r)
402402 do l = -gp_layers, -1
403- do k = -buff_size , n + buff_size
404- do j = -buff_size , m + buff_size
405- r = ((j + buff_size ) + (m + 2*buff_size + 1)* &
406- ((k + buff_size ) + (n + 2*buff_size + 1)* &
403+ do k = -gp_layers , n + gp_layers
404+ do j = -gp_layers , m + gp_layers
405+ r = ((j + gp_layers ) + (m + 2*gp_layers + 1)* &
406+ ((k + gp_layers ) + (n + 2*gp_layers + 1)* &
407407 (l + gp_layers)))
408408 ib_markers%sf(j, k, l + unpack_offset) = ib_buff_recv(r)
409409 end do
@@ -420,9 +420,11 @@ contains
420420 subroutine s_mpi_send_random_number(phi_rn, num_freq)
421421 integer, intent(in) :: num_freq
422422 real(wp), intent(inout), dimension(1:num_freq) :: phi_rn
423+
423424#ifdef MFC_MPI
424425 call MPI_BCAST(phi_rn, num_freq, mpi_p, 0, MPI_COMM_WORLD, ierr)
425426#endif
427+
426428 end subroutine s_mpi_send_random_number
427429
428430 subroutine s_finalize_mpi_proxy_module()
0 commit comments