@@ -48,7 +48,7 @@ contains
4848
4949 !> Computation of parameters, allocation procedures, and/or
5050 !! any other tasks needed to properly setup the module
51- subroutine s_initialize_mpi_proxy_module
51+ impure subroutine s_initialize_mpi_proxy_module
5252
5353#ifdef MFC_MPI
5454
@@ -144,7 +144,7 @@ contains
144144 !! these are not available to the remaining processors. This
145145 !! subroutine is then in charge of broadcasting the required
146146 !! information.
147- subroutine s_mpi_bcast_user_inputs
147+ impure subroutine s_mpi_bcast_user_inputs
148148
149149#ifdef MFC_MPI
150150 integer :: i !< Generic loop iterator
@@ -206,7 +206,7 @@ contains
206206 !! as well as recomputing some of the global parameters so
207207 !! that they reflect the configuration of sub-domain that
208208 !! is overseen by the local processor.
209- subroutine s_mpi_decompose_computational_domain
209+ impure subroutine s_mpi_decompose_computational_domain
210210
211211#ifdef MFC_MPI
212212
@@ -646,7 +646,7 @@ contains
646646 !! cell-boundary locations is communicated.
647647 !! @param pbc_loc Processor boundary condition (PBC) location
648648 !! @param sweep_coord Coordinate direction normal to the processor boundary
649- subroutine s_mpi_sendrecv_grid_vars_buffer_regions(pbc_loc, sweep_coord)
649+ impure subroutine s_mpi_sendrecv_grid_vars_buffer_regions(pbc_loc, sweep_coord)
650650
651651 character(LEN=3), intent(in) :: pbc_loc
652652 character, intent(in) :: sweep_coord
@@ -846,7 +846,7 @@ contains
846846 !! @param pbc_loc Processor boundary condition (PBC) location
847847 !! @param sweep_coord Coordinate direction normal to the processor boundary
848848 !! @param q_particle Projection of the lagrangian particles in the Eulerian framework
849- subroutine s_mpi_sendrecv_cons_vars_buffer_regions(q_cons_vf, pbc_loc, &
849+ impure subroutine s_mpi_sendrecv_cons_vars_buffer_regions(q_cons_vf, pbc_loc, &
850850 sweep_coord, q_particle)
851851
852852 type(scalar_field), &
@@ -1496,7 +1496,7 @@ contains
14961496 !! @param spatial_extents Spatial extents for each processor' s sub- domain. First dimension
14971497 !! corresponds to the minimum and maximum values, respectively, while
14981498 !! the second dimension corresponds to the processor rank.
1499- subroutine s_mpi_gather_spatial_extents (spatial_extents )
1499+ impure subroutine s_mpi_gather_spatial_extents (spatial_extents )
15001500
15011501 real (wp), dimension (1 :, 0 :), intent (INOUT ) :: spatial_extents
15021502
@@ -1615,7 +1615,7 @@ contains
16151615 !! puts back together the grid of the entire computational
16161616 !! domain on the rank 0 processor. This is only done for 1D
16171617 !! simulations.
1618- subroutine s_mpi_defragment_1d_grid_variable
1618+ impure subroutine s_mpi_defragment_1d_grid_variable
16191619
16201620#ifdef MFC_MPI
16211621
@@ -1651,7 +1651,7 @@ contains
16511651 !! First dimension of array corresponds to the former' s minimum and
16521652 !! maximum values, respectively, while second dimension corresponds
16531653 !! to each processor' s rank.
1654- subroutine s_mpi_gather_data_extents (q_sf , data_extents )
1654+ impure subroutine s_mpi_gather_data_extents (q_sf , data_extents )
16551655
16561656 real (wp), dimension (:, :, :), intent (in ) :: q_sf
16571657
@@ -1681,7 +1681,7 @@ contains
16811681 !! This is only done for 1D simulations.
16821682 !! @param q_sf Flow variable defined on a single computational sub- domain
16831683 !! @param q_root_sf Flow variable defined on the entire computational domain
1684- subroutine s_mpi_defragment_1d_flow_variable (q_sf , q_root_sf )
1684+ impure subroutine s_mpi_defragment_1d_flow_variable (q_sf , q_root_sf )
16851685
16861686 real (wp), &
16871687 dimension (0 :m), &
@@ -1705,7 +1705,7 @@ contains
17051705 end subroutine s_mpi_defragment_1d_flow_variable
17061706
17071707 !> Deallocation procedures for the module
1708- subroutine s_finalize_mpi_proxy_module
1708+ impure subroutine s_finalize_mpi_proxy_module
17091709
17101710#ifdef MFC_MPI
17111711
0 commit comments