1111#define WIDTH 1024
1212#define HEIGHT 768
1313
14+ // Ex 2.2 (2 of 2), Vary this value
15+ // Ex 2.3 (2 of 2), Set this to 1000
1416#define MAX_ITERATIONS 18 //number of iterations
1517
1618//C parameters (modify these to change the zoom and position of the mandelbrot)
@@ -27,6 +29,8 @@ static rgb rand_banding[MAX_ITERATIONS + 1]; //random colour banding
2729static int local_histogram [HEIGHT ][MAX_ITERATIONS + 1 ]; //only required for exercise 2.2.3, HEIGHT is the maximum possible number of threads that could be initialised as it the maximum size of the number of work units (width of parallel loop)
2830static rgb rgb_output [HEIGHT ][WIDTH ]; //output data
2931
32+ // Ex 2.2 (1 of 2), Update tf to HISTOGRAM_ESCAPE_VELOCITY
33+ // Ex 2.3 (1 of 2), Update tf to HISTOGRAM_NORMALISED_ITERATION_COUNT
3034const TRANSFER_FUNCTION tf = RANDOM_NORMALISED_ITERATION_COUNT ;
3135const HISTOGRAM_METHOD hist_method = OMP_ATOMIC ;
3236
@@ -69,6 +73,9 @@ int main(int argc, char *argv[])
6973 }
7074
7175 //STAGE 1) calculate the escape time for each pixel
76+ // Ex 2.1, Again the outer loop is being paralelised
77+ // The If statement here, can prevent OpenMP from applying to the loop at runtime, based on the value of hist_method
78+ // Ex 2.3.1-2.3.4, Vary the scheduling method and chunk size, to observe the impact, see lecture notes for full details of each schedule type
7279#pragma omp parallel for private(i, x, c_r, c_i, n_r, n_i, o_r, o_i, mu) if(hist_method != OMP_SERIAL) schedule(dynamic, 1)
7380 for (y = 0 ; y < HEIGHT ; y ++ )
7481 for (x = 0 ; x < WIDTH ; x ++ )
@@ -106,28 +113,29 @@ int main(int argc, char *argv[])
106113
107114 if ((tf == HISTOGRAM_ESCAPE_VELOCITY ) || (tf == HISTOGRAM_NORMALISED_ITERATION_COUNT )){
108115 switch (hist_method ){
109- //Exercise 2.2
116+ //ex 2.2
110117 case (OMP_SERIAL ) : {
111118 histogram [i ]++ ;
112119 break ;
113120 }
114- //Exercise 2.2.1
121+ // ex 2.2.1, A critical section executes in serial, so that no threads execute the block simultaneously
115122 case (OMP_CRITICAL ) : {
116123#pragma omp critical
117124 {
118125 histogram [i ]++ ;
119126 }
120127 break ;
121128 }
122- //Exercise 2.2.2
129+ // ex 2.2.2 (1 of 2), Each thread produces a local histogram, rather than contributing to the global histogram
123130 case (OMP_MASTER ) : {
124131 local_histogram [y ][i ]++ ;
125132 if (i == 0 ){
126133 printf ("WTF\n" );
127134 }
128135 break ;
129136 }
130- //Exercise 2.2.3
137+ // ex 2.2.3, The atomic statement uses hardware specific functionality to perform parallel operations free of race conditions
138+ // In general, atomics are faster than critical sections, however they have more limited functionality
131139 case (OMP_ATOMIC ) : {
132140#pragma omp atomic
133141 histogram [i ]++ ;
@@ -139,7 +147,9 @@ int main(int argc, char *argv[])
139147
140148 }
141149
142- //Exercise 2.2.2 serial code for summing local histograms (performed by master only)
150+ // Exercise 2.2.2 serial code for summing local histograms (performed by master only)
151+ // ex 2.2.2 (2 of 2), The main thread then sums the local histograms in serial into the global histogram
152+ // As this is not part of a parallel block #pragma omp master is not required
143153 if (hist_method == OMP_MASTER ){
144154 for (y = 0 ; y < HEIGHT ; y ++ )
145155 for (i = 0 ; i < MAX_ITERATIONS ; i ++ )
0 commit comments