@@ -216,26 +216,26 @@ extern "C" CompMatr createCompMatr(int numQubits) {
216
216
qindex numRows = powerOf2 (numQubits);
217
217
qindex numElems = numRows * numRows;
218
218
219
+ // attempt to allocate 1D memory
219
220
qcomp* cpuMem = cpu_allocArray (numElems); // nullptr if failed
220
221
qcomp* gpuMem = nullptr ;
221
222
if (getQuESTEnv ().isGpuAccelerated )
222
223
gpuMem = gpu_allocArray (numElems); // nullptr if failed
223
224
224
- // initialise all CompMatr fields inline because most are const
225
- CompMatr out = {
226
- .numQubits = numQubits,
227
- .numRows = numRows,
225
+ // prepare output CompMatr (avoiding C++20 designated initialiser)
226
+ CompMatr out;
227
+ out .numQubits = numQubits;
228
+ out .numRows = numRows;
228
229
229
- // allocate flags in the heap so that struct copies are mutable
230
- .isApproxUnitary = util_allocEpsilonSensitiveHeapFlag (), // nullptr if failed
231
- .isApproxHermitian = util_allocEpsilonSensitiveHeapFlag (),
230
+ // attemptedly allocate (un-initialised) flags in the heap so that struct copies are mutable
231
+ out.isApproxUnitary = util_allocEpsilonSensitiveHeapFlag (); // nullptr if failed
232
+ out.isApproxHermitian = util_allocEpsilonSensitiveHeapFlag ();
233
+ out.wasGpuSynced = cpu_allocHeapFlag (); // nullptr if failed
232
234
233
- .wasGpuSynced = cpu_allocHeapFlag (), // nullptr if failed
234
-
235
- .cpuElems = cpu_allocAndInitMatrixWrapper (cpuMem, numRows), // nullptr if failed
236
- .cpuElemsFlat = cpuMem,
237
- .gpuElemsFlat = gpuMem
238
- };
235
+ // attemptedly allocate 2D alias for 1D CPU memory
236
+ out.cpuElems = cpu_allocAndInitMatrixWrapper (cpuMem, numRows); // nullptr if failed
237
+ out.cpuElemsFlat = cpuMem;
238
+ out.gpuElemsFlat = gpuMem;
239
239
240
240
validateMatrixAllocs (out, __func__);
241
241
setInitialHeapFlags (out);
@@ -250,24 +250,21 @@ extern "C" DiagMatr createDiagMatr(int numQubits) {
250
250
// validation ensures this never overflows
251
251
qindex numElems = powerOf2 (numQubits);
252
252
253
- // initialise all CompMatr fields inline because most are const
254
- DiagMatr out = {
255
- .numQubits = numQubits,
256
- .numElems = numElems,
257
-
258
- // allocate flags in the heap so that struct copies are mutable
259
- .isApproxUnitary = util_allocEpsilonSensitiveHeapFlag (), // nullptr if failed
260
- .isApproxHermitian = util_allocEpsilonSensitiveHeapFlag (),
261
- .isApproxNonZero = util_allocEpsilonSensitiveHeapFlag (),
262
- .isStrictlyNonNegative = cpu_allocHeapFlag (), // nullptr if failed
263
- .wasGpuSynced = cpu_allocHeapFlag (),
253
+ // prepare output DiagMatr (avoiding C++20 designated initialiser)
254
+ DiagMatr out;
255
+ out.numQubits = numQubits,
256
+ out.numElems = numElems,
264
257
265
- // 1D CPU memory
266
- .cpuElems = cpu_allocArray (numElems), // nullptr if failed
258
+ // attempt to allocate (uninitialised) flags in the heap so that struct copies are mutable
259
+ out.isApproxUnitary = util_allocEpsilonSensitiveHeapFlag (); // nullptr if failed
260
+ out.isApproxHermitian = util_allocEpsilonSensitiveHeapFlag ();
261
+ out.isApproxNonZero = util_allocEpsilonSensitiveHeapFlag ();
262
+ out.isStrictlyNonNegative = cpu_allocHeapFlag (); // nullptr if failed
263
+ out.wasGpuSynced = cpu_allocHeapFlag ();
267
264
268
- // 1D GPU memory
269
- . gpuElems = ( getQuESTEnv (). isGpuAccelerated )? gpu_allocArray ( numElems) : nullptr // nullptr if failed or not needed
270
- } ;
265
+ // attempt to allocate 1D memory (nullptr if failed or not allocated)
266
+ out. cpuElems = cpu_allocArray ( numElems);
267
+ out. gpuElems = ( getQuESTEnv (). isGpuAccelerated )? gpu_allocArray (numElems) : nullptr ;
271
268
272
269
validateMatrixAllocs (out, __func__);
273
270
setInitialHeapFlags (out);
@@ -289,30 +286,27 @@ FullStateDiagMatr validateAndCreateCustomFullStateDiagMatr(int numQubits, int us
289
286
qindex numElems = powerOf2 (numQubits);
290
287
qindex numElemsPerNode = numElems / (useDistrib? env.numNodes : 1 ); // divides evenly
291
288
292
- FullStateDiagMatr out = {
293
-
294
- .numQubits = numQubits,
295
- .numElems = numElems,
296
-
297
- // data deployment configuration; disable distrib if deployed to 1 node
298
- .isGpuAccelerated = useGpuAccel,
299
- .isMultithreaded = useMultithread,
300
- .isDistributed = useDistrib && (env.numNodes > 1 ),
301
- .numElemsPerNode = numElemsPerNode,
302
-
303
- // allocate flags in the heap so that struct copies are mutable
304
- .isApproxUnitary = util_allocEpsilonSensitiveHeapFlag (), // nullptr if failed
305
- .isApproxHermitian = util_allocEpsilonSensitiveHeapFlag (),
306
- .isApproxNonZero = util_allocEpsilonSensitiveHeapFlag (),
307
- .isStrictlyNonNegative = cpu_allocHeapFlag (), // nullptr if failed
308
- .wasGpuSynced = cpu_allocHeapFlag (),
309
-
310
- // 1D CPU memory
311
- .cpuElems = cpu_allocArray (numElemsPerNode), // nullptr if failed
312
-
313
- // 1D GPU memory
314
- .gpuElems = (useGpuAccel)? gpu_allocArray (numElemsPerNode) : nullptr , // nullptr if failed or not needed
315
- };
289
+ // prepare output FullStateDiagMatr (avoiding C++20 designated initialiser)
290
+ FullStateDiagMatr out;
291
+ out.numQubits = numQubits;
292
+ out.numElems = numElems;
293
+
294
+ // bind deployments, disabling distribution if using a single MPI node
295
+ out.isGpuAccelerated = useGpuAccel;
296
+ out.isMultithreaded = useMultithread;
297
+ out.isDistributed = useDistrib && (env.numNodes > 1 );
298
+ out.numElemsPerNode = numElemsPerNode;
299
+
300
+ // allocate (unitialised) flags in the heap so that struct copies are mutable
301
+ out.isApproxUnitary = util_allocEpsilonSensitiveHeapFlag (); // nullptr if failed
302
+ out.isApproxHermitian = util_allocEpsilonSensitiveHeapFlag ();
303
+ out.isApproxNonZero = util_allocEpsilonSensitiveHeapFlag ();
304
+ out.isStrictlyNonNegative = cpu_allocHeapFlag (); // nullptr if failed
305
+ out.wasGpuSynced = cpu_allocHeapFlag ();
306
+
307
+ // allocate 1D memory (nullptr if failed or not allocated)
308
+ out.cpuElems = cpu_allocArray (numElemsPerNode);
309
+ out.gpuElems = (useGpuAccel)? gpu_allocArray (numElemsPerNode) : nullptr ;
316
310
317
311
validateMatrixAllocs (out, __func__);
318
312
setInitialHeapFlags (out);
0 commit comments