@@ -269,19 +269,22 @@ LogicalResult LoadNdOp::verify() {
269269 // result in SIMT mode. In the latter case, the tensor descriptor must be
270270 // evenly distributed, with each lane holding an equally sized fragment of
271271 // the result. Only subgroup size 8 or 16 is supported.
272- if (valueTy.getRank () == 1 && valueTy.getNumElements () < tdescTy.getNumElements ()) {
272+ if (valueTy.getRank () == 1 &&
273+ valueTy.getNumElements () < tdescTy.getNumElements ()) {
273274 // SIMT mode doesn't need LayoutAttr.
274275 if (tdescTy.getLayoutAttr ())
275- return emitOpError () << " TensorDesc doesn't need LayoutAttr for SIMT code" ;
276+ return emitOpError ()
277+ << " TensorDesc doesn't need LayoutAttr for SIMT code" ;
276278
277279 int tdescElems = tdescTy.getNumElements () * tdescTy.getArrayLength ();
278280 int valueElems = valueTy.getNumElements ();
279281
280- int lanes = tdescElems % valueElems == 0 ? tdescElems / valueElems: -1 ;
282+ int lanes = tdescElems % valueElems == 0 ? tdescElems / valueElems : -1 ;
281283 if (lanes != 16 && lanes != 8 ) {
282- return emitOpError () << " Result shape " << makeString (getShapeOf (valueTy))
283- << " is not a valid distribution for tensor descriptor "
284- << tdescTy;
284+ return emitOpError ()
285+ << " Result shape " << makeString (getShapeOf (valueTy))
286+ << " is not a valid distribution for tensor descriptor "
287+ << tdescTy;
285288 }
286289 return success ();
287290 }
@@ -325,7 +328,8 @@ LogicalResult LoadNdOp::verify() {
325328
326329 if (tdescShape != valueShape) {
327330 return emitOpError () << " Result shape " << makeString (valueShape)
328- << " is not consistent with tensor descriptor " << tdescTy;
331+ << " is not consistent with tensor descriptor "
332+ << tdescTy;
329333 }
330334
331335 return success ();
@@ -362,29 +366,31 @@ LogicalResult StoreNdOp::verify() {
362366 // Similar to LoadNdOp, handling a 1D vector as the value can be complex. It
363367 // may represent the input of a 1D block store in SIMD mode or a fragment of
364368 // a block store input in SIMT mode. In the latter case, the tensor descriptor
365- // must be evenly distributed, with each lane holding an equally sized fragment of
366- // the input. Only subgroup size 8 or 16 is supported.
369+ // must be evenly distributed, with each lane holding an equally sized
370+ // fragment of the input. Only subgroup size 8 or 16 is supported.
367371 if (valTy.getRank () == 1 && valTy.getNumElements () < dstTy.getNumElements ()) {
368372 // SIMT mode doesn't need LayoutAttr.
369373 if (dstTy.getLayoutAttr ())
370- return emitOpError () << " TensorDesc doesn't need LayoutAttr for SIMT code" ;
374+ return emitOpError ()
375+ << " TensorDesc doesn't need LayoutAttr for SIMT code" ;
371376
372377 int tdescElems = dstTy.getNumElements () * dstTy.getArrayLength ();
373378 int valueElems = valueShape[0 ];
374379
375- int lanes = tdescElems % valueElems == 0 ? tdescElems / valueElems: -1 ;
380+ int lanes = tdescElems % valueElems == 0 ? tdescElems / valueElems : -1 ;
376381 if (lanes != 16 && lanes != 8 ) {
377- return emitOpError () << " Value shape " << makeString ( getShapeOf (valTy))
378- << " is not a valid distribution for tensor descriptor "
379- << dstTy;
382+ return emitOpError ()
383+ << " Value shape " << makeString ( getShapeOf (valTy))
384+ << " is not a valid distribution for tensor descriptor " << dstTy;
380385 }
381386 return success ();
382387 }
383388
384389 // SIMD code should have the same shape as the tensor descriptor.
385390 if (tdescShape != valueShape) {
386391 return emitOpError () << " Value shape " << makeString (valueShape)
387- << " is not consistent with tensor descriptor " << dstTy;
392+ << " is not consistent with tensor descriptor "
393+ << dstTy;
388394 }
389395
390396 return success ();
@@ -539,12 +545,14 @@ LogicalResult LoadGatherOp::verify() {
539545 if (valueTy.getRank () == 1 && valueTy.getNumElements () != tdescShape[0 ]) {
540546 auto chunkSize = tdescTy.getChunkSize ();
541547 if (valueTy.getNumElements () != chunkSize) {
542- return emitOpError () << " Result shape " << makeString (valueShape)
543- << " is not a valid distribution for tensor descriptor "
544- << tdescTy;
548+ return emitOpError ()
549+ << " Result shape " << makeString (valueShape)
550+ << " is not a valid distribution for tensor descriptor "
551+ << tdescTy;
545552 } else { // valid SIMT code doesn't need LayoutAttr and TransposeAttr.
546553 if (tdescTy.getLayoutAttr ())
547- return emitOpError () << " TensorDesc doesn't need LayoutAttr for SIMT code" ;
554+ return emitOpError ()
555+ << " TensorDesc doesn't need LayoutAttr for SIMT code" ;
548556 if (getTransposeAttr ())
549557 return emitOpError () << " doesn't need TransposeAttr for SIMT code" ;
550558 }
@@ -598,12 +606,14 @@ LogicalResult StoreScatterOp::verify() {
598606 if (valueTy.getRank () == 1 && valueTy.getNumElements () != tdescShape[0 ]) {
599607 auto chunkSize = tdescTy.getChunkSize ();
600608 if (valueTy.getNumElements () != chunkSize) {
601- return emitOpError () << " Value shape " << makeString (valueShape)
602- << " is not a valid distribution for tensor descriptor "
603- << tdescTy;
609+ return emitOpError ()
610+ << " Value shape " << makeString (valueShape)
611+ << " is not a valid distribution for tensor descriptor "
612+ << tdescTy;
604613 } else { // valid SIMT code doesn't need LayoutAttr and TransposeAttr.
605614 if (tdescTy.getLayoutAttr ())
606- return emitOpError () << " TensorDesc doesn't need LayoutAttr for SIMT code" ;
615+ return emitOpError ()
616+ << " TensorDesc doesn't need LayoutAttr for SIMT code" ;
607617 if (getTransposeAttr ())
608618 return emitOpError () << " doesn't need TransposeAttr for SIMT code" ;
609619 }
0 commit comments