@@ -13,14 +13,14 @@ module m_mpi_proxy
1313 use mpi !< Message passing interface (MPI) module
1414#endif
1515
16+ use m_helper
17+
1618 use m_derived_types !< Definitions of the derived types
1719
1820 use m_global_parameters !< Global parameters for the code
1921
2022 use m_mpi_common
2123
22- use m_helper
23-
2424 implicit none
2525
2626 integer, private :: err_code, ierr, v_size !<
@@ -51,7 +51,6 @@ contains
5151 ! for the sake of simplicity, both variables are provided sufficient
5252 ! storage to hold the largest buffer in the computational domain.
5353
54-
5554 if (n > 0) then
5655 if (p > 0) then
5756
@@ -70,15 +69,14 @@ contains
7069
7170 v_size = sys_size
7271
73- allocate(q_prims_buff_send(0:halo_size))
72+ allocate (q_prims_buff_send(0:halo_size))
7473
75- allocate(q_prims_buff_recv(0:ubound(q_prims_buff_send, 1)))
74+ allocate (q_prims_buff_recv(0:ubound(q_prims_buff_send, 1)))
7675
7776#endif
7877
7978 end subroutine s_initialize_mpi_proxy_module
8079
81-
8280 !> Since only processor with rank 0 is in charge of reading
8381 !! and checking the consistency of the user provided inputs,
8482 !! these are not available to the remaining processors. This
@@ -98,7 +96,7 @@ contains
9896 & ' loops_x' , ' loops_y' , ' loops_z' , ' model_eqns' , ' num_fluids' , &
9997 & ' weno_order' , ' precision' , ' perturb_flow_fluid' , &
10098 & ' perturb_sph_fluid' , ' num_patches' , ' thermal' , ' nb' , ' dist_type' ,&
101- & ' R0_type' , ' relax_model' , ' num_ibs' , ' n_start' , ' elliptic_smoothing_iters' ]
99+ & ' R0_type' , ' relax_model' , ' num_ibs' , ' n_start' , ' elliptic_smoothing_iters' ]
102100 call MPI_BCAST(${VAR}$, 1, MPI_INTEGER, 0, MPI_COMM_WORLD, ierr)
103101 #:endfor
104102
@@ -108,8 +106,8 @@ contains
108106 & ' mixlayer_perturb' , ' bubbles_euler' , ' polytropic' , ' polydisperse' ,&
109107 & ' qbmm' , ' file_per_process' , ' adv_n' , ' ib' , ' cfl_adap_dt' , &
110108 & ' cfl_const_dt' , ' cfl_dt' , ' surface_tension' , &
111- & ' hyperelasticity' , ' pre_stress' , ' viscous ' , ' bubbles_lagrange ' , &
112- & ' elliptic_smoothing ' ]
109+ & ' hyperelasticity' , ' pre_stress' , ' elliptic_smoothing ' , ' viscous ' , &
110+ & ' bubbles_lagrange ' ]
113111 call MPI_BCAST(${VAR}$, 1, MPI_LOGICAL, 0, MPI_COMM_WORLD, ierr)
114112 #:endfor
115113 call MPI_BCAST(fluid_rho(1), num_fluids_max, MPI_LOGICAL, 0, MPI_COMM_WORLD, ierr)
@@ -484,7 +482,6 @@ contains
484482 num_procs_y/), (/.true., &
485483 .true./), .false., MPI_COMM_CART, &
486484 ierr)
487-
488485 ! Finding corresponding Cartesian coordinates of the local
489486 ! processor rank in newly declared cartesian communicator
490487 call MPI_CART_COORDS(MPI_COMM_CART, proc_rank, 2, &
@@ -668,10 +665,10 @@ contains
668665#ifdef MFC_MPI
669666
670667 buffer_counts = (/ &
671- buff_size*sys_size*(n + 1)*(p + 1), &
672- buff_size*sys_size*(m + 2*buff_size + 1)*(p + 1), &
673- buff_size*v_size*(m + 2*buff_size + 1)*(n + 2*buff_size + 1) &
674- /)
668+ buff_size*sys_size*(n + 1)*(p + 1), &
669+ buff_size*sys_size*(m + 2*buff_size + 1)*(p + 1), &
670+ buff_size*v_size*(m + 2*buff_size + 1)*(n + 2*buff_size + 1) &
671+ /)
675672
676673 buffer_count = buffer_counts(mpi_dir)
677674 boundary_conditions = (/bc_x, bc_y, bc_z/)
@@ -754,10 +751,10 @@ contains
754751 p_recv => q_prims_buff_recv(0)
755752
756753 call MPI_SENDRECV( &
757- p_send, buffer_count, mpi_p, dst_proc, send_tag, &
758- p_recv, buffer_count, mpi_p, src_proc, recv_tag, &
759- MPI_COMM_WORLD, MPI_STATUS_IGNORE, ierr)
760-
754+ p_send, buffer_count, mpi_p, dst_proc, send_tag, &
755+ p_recv, buffer_count, mpi_p, src_proc, recv_tag, &
756+ MPI_COMM_WORLD, MPI_STATUS_IGNORE, ierr)
757+
761758 ! Unpack Received Buffer
762759 #:for mpi_dir in [1, 2, 3]
763760 if (mpi_dir == ${mpi_dir}$) then
@@ -769,7 +766,7 @@ contains
769766 do i = 1, sys_size
770767 r = (i - 1) + v_size* &
771768 (j + buff_size*((k + 1) + (n + 1)*l))
772- q_prim_vf(i)%sf(j + unpack_offset, k, l) = q_prims_buff_recv(r)
769+ q_prim_vf(i)%sf(j + unpack_offset, k, l) = q_prims_buff_recv(r)
773770#if defined(__INTEL_COMPILER)
774771 if (ieee_is_nan(q_prim_vf(i)%sf(j, k, l))) then
775772 print *, "Error", j, k, l, i
@@ -781,7 +778,7 @@ contains
781778 end do
782779 end do
783780
784- #:elif mpi_dir == 2
781+ #:elif mpi_dir == 2
785782 !$acc parallel loop collapse(4) gang vector default(present) private(r)
786783 do i = 1, sys_size
787784 do l = 0, p
@@ -790,7 +787,7 @@ contains
790787 r = (i - 1) + v_size* &
791788 ((j + buff_size) + (m + 2*buff_size + 1)* &
792789 ((k + buff_size) + buff_size*l))
793- q_prim_vf(i)%sf(j, k + unpack_offset, l) = q_prims_buff_recv(r)
790+ q_prim_vf(i)%sf(j, k + unpack_offset, l) = q_prims_buff_recv(r)
794791#if defined(__INTEL_COMPILER)
795792 if (ieee_is_nan(q_prim_vf(i)%sf(j, k, l))) then
796793 print *, "Error", j, k, l, i
@@ -812,7 +809,7 @@ contains
812809 ((j + buff_size) + (m + 2*buff_size + 1)* &
813810 ((k + buff_size) + (n + 2*buff_size + 1)* &
814811 (l + buff_size)))
815- q_prim_vf(i)%sf(j, k, l + unpack_offset) = q_prims_buff_recv(r)
812+ q_prim_vf(i)%sf(j, k, l + unpack_offset) = q_prims_buff_recv(r)
816813#if defined(__INTEL_COMPILER)
817814 if (ieee_is_nan(q_prim_vf(i)%sf(j, k, l))) then
818815 print *, "Error", j, k, l, i
@@ -831,4 +828,13 @@ contains
831828
832829 end subroutine s_mpi_sendrecv_variables_buffers
833830
831+ !> Module deallocation and/or disassociation procedures
832+ subroutine s_finalize_mpi_proxy_module
833+
834+ #ifdef MFC_MPI
835+ deallocate (q_prims_buff_send, q_prims_buff_recv)
836+ #endif
837+
838+ end subroutine s_finalize_mpi_proxy_module
839+
834840end module m_mpi_proxy
0 commit comments