@@ -54,6 +54,8 @@ module mpas_block_decomp
5454 subroutine mpas_block_decomp_cells_for_proc (dminfo , partial_global_graph_info , local_cell_list , block_id , block_start , &
5555 block_count , numBlocks , explicitProcDecomp , blockFilePrefix , procFilePrefix )!{{{
5656
57+ use mpas_timer, only : mpas_timer_start, mpas_timer_stop
58+
5759 implicit none
5860
5961 type (dm_info), intent (inout ) :: dminfo !< Input: domain information
@@ -84,6 +86,8 @@ subroutine mpas_block_decomp_cells_for_proc(dminfo, partial_global_graph_info, l
8486
8587 no_blocks = .false.
8688
89+ call mpas_timer_start(' mpas_block_decomp_cells_for_proc' )
90+
8791 if (numBlocks == 0) then
8892 dminfo % total_blocks = dminfo % nProcs
8993 else
@@ -321,6 +325,8 @@ subroutine mpas_block_decomp_cells_for_proc(dminfo, partial_global_graph_info, l
321325 end if
322326 end if
323327
328+ call mpas_timer_stop(' mpas_block_decomp_cells_for_proc' )
329+
324330 end subroutine mpas_block_decomp_cells_for_proc!}}}
325331
326332!***********************************************************************
@@ -341,6 +347,7 @@ end subroutine mpas_block_decomp_cells_for_proc!}}}
341347#ifdef MPAS_SCOTCH
342348 subroutine mpas_block_decomp_scotch(dminfo, partial_global_graph_info, blockFilePrefix, blockFilename)!{{{
343349
350+ use mpas_timer, only : mpas_timer_start, mpas_timer_stop
344351#ifdef MPAS_USE_MPI_F08
345352 use mpi_f08, only : MPI_Comm, MPI_INTEGER, MPI_Comm_dup, MPI_Comm_free, MPI_Gather, MPI_Gatherv
346353#else
@@ -386,6 +393,8 @@ subroutine mpas_block_decomp_scotch(dminfo, partial_global_graph_info, blockFile
386393 allocate(global_block_id_arr(partial_global_graph_info % nVerticesTotal))
387394 allocate(local_block_id_arr(partial_global_graph_info % nVertices))
388395
396+ call mpas_timer_start(' scotch_total' )
397+
389398 ! Count the number of edges (including to ghost cells) in the portion of graph
390399 ! owned by the current rank. Each edge is counted twice, once for each vertex,
391400 ! with the exception of edges to ghost vertices, which are counted only once.
@@ -451,10 +460,13 @@ subroutine mpas_block_decomp_scotch(dminfo, partial_global_graph_info, blockFile
451460 ! Initialize the strategy data structure
452461 call scotch_stratinit (stradat)
453462
463+ call mpas_timer_start(' scotch_graph_partitioning' )
454464 ! Partition the distributed graph and save the result in local_block_id_arr
455465 npart = dminfo % nProcs
456466 call scotch_dgraphpart (scotchdgraph, npart, stradat, local_block_id_arr)
457467
468+ call mpas_timer_stop(' scotch_graph_partitioning' )
469+
458470 ! After the paritioning above, each processor would not necessarily have information about all of the
459471 ! vertices it owns. To obtain this information, Scotch provides a convenience function to redistribute the graph
460472 ! to all processors, so that each processor has information about all of the vertices it owns.
@@ -533,6 +545,8 @@ subroutine mpas_block_decomp_scotch(dminfo, partial_global_graph_info, blockFile
533545
534546 call MPI_Comm_free(localcomm, mpi_ierr)
535547
548+ call mpas_timer_stop(' scotch_total' )
549+
536550 end subroutine mpas_block_decomp_scotch
537551#endif
538552
0 commit comments