|
| 1 | +/* |
| 2 | + * Tue Nov 29 2022 Thomas Naughton <[email protected]> |
| 3 | + * |
| 4 | + * Loops over MPI_Alltoall() 'MAX_NLOOP' times. |
| 5 | + * |
| 6 | + * Usage: mpirun -np $nprocs ./a2a_test2_loop_v4 [N] |
| 7 | + * |
| 8 | + * Optional position-sensitive argument: |
| 9 | + * arg1 - positive-integer for number of loops |
| 10 | + * |
| 11 | + * If no args are provided the program uses default values. |
| 12 | + * |
| 13 | + * Note: Initial SPC code bits adapted from 'ompi/examples/spc_example.c' |
| 14 | + * |
| 15 | + * TJN: Modified to have only one counter (OMPI_SPC_TIME_ALLTOALL), |
| 16 | + * also we calculate the diff per-rank at the App level and show |
| 17 | + * this info each run (at all ranks). |
| 18 | + */ |
| 19 | +#include <stdio.h> |
| 20 | +#include <stdlib.h> |
| 21 | +#include <string.h> |
| 22 | +#include <unistd.h> |
| 23 | +#include <math.h> |
| 24 | +#include <mpi.h> |
| 25 | + |
| 26 | +int MAX_NLOOP = 100; |
| 27 | + |
| 28 | +int main (int argc, char **argv) |
| 29 | +{ |
| 30 | + int rank, size; |
| 31 | + int *inbuf = NULL; |
| 32 | + int *outbuf = NULL; |
| 33 | + int i, j; |
| 34 | + int nloop; |
| 35 | + |
| 36 | + int rc; |
| 37 | + int provided, num, name_len, desc_len, verbosity, bind, var_class, readonly, continuous, atomic, count, index; |
| 38 | + char name[256], description[256]; |
| 39 | + MPI_Datatype datatype; |
| 40 | + MPI_T_enum enumtype; |
| 41 | + long long value; |
| 42 | + int found = 0; |
| 43 | + int num_elem = 1024; |
| 44 | + long long _time_alltoall_past_value = 0; |
| 45 | + |
| 46 | + if (argc > 1) { |
| 47 | + MAX_NLOOP = atoi(argv[1]); |
| 48 | + } |
| 49 | + |
| 50 | + MPI_Init (&argc, &argv); |
| 51 | + MPI_Comm_rank(MPI_COMM_WORLD, &rank); |
| 52 | + MPI_Comm_size(MPI_COMM_WORLD, &size); |
| 53 | + |
| 54 | + /* Counter names to be read by ranks 0 and 1 */ |
| 55 | + /* (See also: ompi_spc_counters_t for list) */ |
| 56 | + char *counter_name = "runtime_spc_OMPI_SPC_TIME_ALLTOALL"; |
| 57 | + MPI_T_pvar_handle handle; |
| 58 | + MPI_T_pvar_session session; |
| 59 | + |
| 60 | + MPI_T_init_thread(MPI_THREAD_SINGLE, &provided); |
| 61 | + |
| 62 | + /* Determine the MPI_T pvar indices for the OMPI_BYTES_SENT/RECIEVED_USER SPCs */ |
| 63 | + MPI_T_pvar_get_num(&num); |
| 64 | + |
| 65 | + rc = MPI_T_pvar_session_create(&session); |
| 66 | + |
| 67 | + for(i = 0; i < num; i++) { |
| 68 | + name_len = desc_len = 256; |
| 69 | + rc = PMPI_T_pvar_get_info(i, name, &name_len, &verbosity, |
| 70 | + &var_class, &datatype, &enumtype, description, &desc_len, &bind, |
| 71 | + &readonly, &continuous, &atomic); |
| 72 | + if( MPI_SUCCESS != rc ) |
| 73 | + continue; |
| 74 | + |
| 75 | + if(strcmp(name, counter_name) == 0) { |
| 76 | + /* Create the MPI_T sessions/handles for the counters and start the counters */ |
| 77 | + rc = MPI_T_pvar_handle_alloc(session, i, NULL, &handle, &count); |
| 78 | + rc = MPI_T_pvar_start(session, handle); |
| 79 | + found = 1; |
| 80 | + //printf("[%d] =====================================\n", rank); |
| 81 | + //printf("[%d] %s -> %s\n", rank, name, description); |
| 82 | + //printf("[%d] =====================================\n", rank); |
| 83 | + //fflush(stdout); |
| 84 | + } |
| 85 | + } |
| 86 | + |
| 87 | + /* Make sure we found the counters */ |
| 88 | + if(found == 0) { |
| 89 | + fprintf(stderr, "ERROR: Couldn't find the appropriate SPC counter in the MPI_T pvars.\n"); |
| 90 | + MPI_Abort(MPI_COMM_WORLD, -1); |
| 91 | + } |
| 92 | + |
| 93 | + inbuf = (int *) malloc ( size * num_elem * sizeof(int) ); |
| 94 | + if (NULL == inbuf) { |
| 95 | + fprintf(stderr, "Error: malloc failed (inbuf)\n"); |
| 96 | + goto cleanup; |
| 97 | + } |
| 98 | + |
| 99 | + outbuf = (int *) malloc ( size * num_elem * sizeof(int) ); |
| 100 | + if (NULL == outbuf) { |
| 101 | + fprintf(stderr, "Error: malloc failed (outbuf)\n"); |
| 102 | + goto cleanup; |
| 103 | + } |
| 104 | + |
| 105 | + for (i=0; i < size * num_elem; i++) { |
| 106 | + inbuf[i] = 100 + rank; |
| 107 | + outbuf[i] = 0; |
| 108 | + } |
| 109 | + |
| 110 | + MPI_Barrier(MPI_COMM_WORLD); |
| 111 | + |
| 112 | + MPI_Barrier(MPI_COMM_WORLD); |
| 113 | + |
| 114 | + for (nloop=0; nloop < MAX_NLOOP; nloop++) { |
| 115 | + long long tmp_max; |
| 116 | + int global_rc; |
| 117 | + long long new_value = 0; |
| 118 | + long long diff = 0; |
| 119 | + |
| 120 | + MPI_Barrier(MPI_COMM_WORLD); |
| 121 | + fflush(NULL); |
| 122 | + |
| 123 | + rc = MPI_Alltoall(inbuf, num_elem, MPI_INT, outbuf, num_elem, MPI_INT, MPI_COMM_WORLD); |
| 124 | + |
| 125 | + /* Check if alltoall had any problems? */ |
| 126 | + MPI_Allreduce( &rc, &global_rc, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD ); |
| 127 | + if (rank == 0) { |
| 128 | + if (global_rc != 0) { |
| 129 | + fprintf(stderr, "Error: Alltoall failed! (rc=%d)\n", global_rc); |
| 130 | + goto cleanup; |
| 131 | + } |
| 132 | + } |
| 133 | + |
| 134 | + MPI_T_pvar_read(session, handle, &value); |
| 135 | + MPI_Allreduce(&value, &tmp_max, 1, MPI_LONG_LONG, MPI_MAX, MPI_COMM_WORLD); |
| 136 | + |
| 137 | + rc = ompi_spc_value_diff("OMPI_SPC_TIME_ALLTOALL", |
| 138 | + _time_alltoall_past_value, |
| 139 | + &new_value, |
| 140 | + &diff); |
| 141 | + |
| 142 | + |
| 143 | + MPI_Barrier(MPI_COMM_WORLD); |
| 144 | + |
| 145 | + if ((MAX_NLOOP <= 20) || ( !(nloop % 10) )) { |
| 146 | + //int usecs = 0; |
| 147 | + int usecs = 250000; /* 0.25 sec */ |
| 148 | + //int usecs = 100000; /* 0.1 sec */ |
| 149 | + //int usecs = 2000000; /* 2 sec */ |
| 150 | + |
| 151 | + printf("%12s: Rank: %5d Size: %5d Loop: %8d %s: %lld max: %lld prev_value: %lld new_value: %lld diff: %lld -- SLEEP: %dus\n", |
| 152 | + "a2a_looper", rank, size, nloop, counter_name, value, tmp_max, _time_alltoall_past_value, new_value, diff, usecs); |
| 153 | + usleep(usecs); |
| 154 | + } |
| 155 | + |
| 156 | + _time_alltoall_past_value = new_value; |
| 157 | + |
| 158 | + fflush(NULL); |
| 159 | + MPI_Barrier(MPI_COMM_WORLD); |
| 160 | + } |
| 161 | + |
| 162 | + MPI_Barrier(MPI_COMM_WORLD); |
| 163 | + |
| 164 | +#if 0 |
| 165 | + printf("[%d] ==========================\n", rank); |
| 166 | + fflush(NULL); |
| 167 | + |
| 168 | + rc = MPI_T_pvar_read(session, handle, &value); |
| 169 | + printf("TJN: [%d] Value Read: %lld (%s)\n", rank, value, counter_name); |
| 170 | + fflush(stdout); |
| 171 | + |
| 172 | + MPI_Barrier(MPI_COMM_WORLD); |
| 173 | +#endif |
| 174 | + |
| 175 | + /* Stop the MPI_T session, free the handle, and then free the session */ |
| 176 | + rc = MPI_T_pvar_stop(session, handle); |
| 177 | + rc = MPI_T_pvar_handle_free(session, &handle); |
| 178 | + |
| 179 | + /* Stop the MPI_T session, free the handle, and then free the session */ |
| 180 | + rc = MPI_T_pvar_session_free(&session); |
| 181 | + |
| 182 | +cleanup: |
| 183 | + if (NULL != inbuf) |
| 184 | + free(inbuf); |
| 185 | + |
| 186 | + if (NULL != outbuf) |
| 187 | + free(outbuf); |
| 188 | + |
| 189 | + MPI_T_finalize(); |
| 190 | + MPI_Finalize(); |
| 191 | + |
| 192 | + return (0); |
| 193 | +} |
0 commit comments