@@ -99,7 +99,7 @@ static Value getOffsetForBitwidth(Location loc, OpFoldResult srcIdx,
9999 affine::makeComposedFoldedAffineApply (builder, loc, offsetExpr, {srcIdx});
100100 Value bitOffset = getValueOrCreateConstantIndexOp (builder, loc, offsetVal);
101101 IntegerType dstType = builder.getIntegerType (targetBits);
102- return builder. create < arith::IndexCastOp>( loc, dstType, bitOffset);
102+ return arith::IndexCastOp::create (builder, loc, dstType, bitOffset);
103103}
104104
105105// / When writing a subbyte size, masked bitwise operations are used to only
@@ -112,14 +112,14 @@ static Value getSubByteWriteMask(Location loc, OpFoldResult linearizedIndices,
112112 auto dstIntegerType = builder.getIntegerType (dstBits);
113113 auto maskRightAlignedAttr =
114114 builder.getIntegerAttr (dstIntegerType, (1 << srcBits) - 1 );
115- Value maskRightAligned = builder. create < arith::ConstantOp> (
116- loc, dstIntegerType, maskRightAlignedAttr);
115+ Value maskRightAligned = arith::ConstantOp::create (
116+ builder, loc, dstIntegerType, maskRightAlignedAttr);
117117 Value writeMaskInverse =
118- builder. create < arith::ShLIOp>( loc, maskRightAligned, bitwidthOffset);
118+ arith::ShLIOp::create (builder, loc, maskRightAligned, bitwidthOffset);
119119 auto flipValAttr = builder.getIntegerAttr (dstIntegerType, -1 );
120120 Value flipVal =
121- builder. create < arith::ConstantOp>( loc, dstIntegerType, flipValAttr);
122- return builder. create < arith::XOrIOp>( loc, writeMaskInverse, flipVal);
121+ arith::ConstantOp::create (builder, loc, dstIntegerType, flipValAttr);
122+ return arith::XOrIOp::create (builder, loc, writeMaskInverse, flipVal);
123123}
124124
125125// / Returns the scaled linearized index based on the `srcBits` and `dstBits`
@@ -141,7 +141,7 @@ getLinearizedSrcIndices(OpBuilder &builder, Location loc, int64_t srcBits,
141141 const SmallVector<OpFoldResult> &indices,
142142 Value memref) {
143143 auto stridedMetadata =
144- builder. create < memref::ExtractStridedMetadataOp>( loc, memref);
144+ memref::ExtractStridedMetadataOp::create (builder, loc, memref);
145145 OpFoldResult linearizedIndices;
146146 std::tie (std::ignore, linearizedIndices) =
147147 memref::getLinearizedMemRefOffsetAndSize (
@@ -298,24 +298,24 @@ struct ConvertMemRefLoad final : OpConversionPattern<memref::LoadOp> {
298298 // Special case 0-rank memref loads.
299299 Value bitsLoad;
300300 if (convertedType.getRank () == 0 ) {
301- bitsLoad = rewriter. create < memref::LoadOp>( loc, adaptor.getMemref (),
302- ValueRange{});
301+ bitsLoad = memref::LoadOp::create (rewriter, loc, adaptor.getMemref (),
302+ ValueRange{});
303303 } else {
304304 // Linearize the indices of the original load instruction. Do not account
305305 // for the scaling yet. This will be accounted for later.
306306 OpFoldResult linearizedIndices = getLinearizedSrcIndices (
307307 rewriter, loc, srcBits, adaptor.getIndices (), op.getMemRef ());
308308
309- Value newLoad = rewriter. create < memref::LoadOp> (
310- loc, adaptor.getMemref (),
309+ Value newLoad = memref::LoadOp::create (
310+ rewriter, loc, adaptor.getMemref (),
311311 getIndicesForLoadOrStore (rewriter, loc, linearizedIndices, srcBits,
312312 dstBits));
313313
314314 // Get the offset and shift the bits to the rightmost.
315315 // Note, currently only the big-endian is supported.
316316 Value bitwidthOffset = getOffsetForBitwidth (loc, linearizedIndices,
317317 srcBits, dstBits, rewriter);
318- bitsLoad = rewriter. create < arith::ShRSIOp>( loc, newLoad, bitwidthOffset);
318+ bitsLoad = arith::ShRSIOp::create (rewriter, loc, newLoad, bitwidthOffset);
319319 }
320320
321321 // Get the corresponding bits. If the arith computation bitwidth equals
@@ -331,17 +331,17 @@ struct ConvertMemRefLoad final : OpConversionPattern<memref::LoadOp> {
331331 : IntegerType::get (rewriter.getContext (),
332332 resultTy.getIntOrFloatBitWidth ());
333333 if (conversionTy == convertedElementType) {
334- auto mask = rewriter. create < arith::ConstantOp> (
335- loc, convertedElementType,
334+ auto mask = arith::ConstantOp::create (
335+ rewriter, loc, convertedElementType,
336336 rewriter.getIntegerAttr (convertedElementType, (1 << srcBits) - 1 ));
337337
338- result = rewriter. create < arith::AndIOp>( loc, bitsLoad, mask);
338+ result = arith::AndIOp::create (rewriter, loc, bitsLoad, mask);
339339 } else {
340- result = rewriter. create < arith::TruncIOp>( loc, conversionTy, bitsLoad);
340+ result = arith::TruncIOp::create (rewriter, loc, conversionTy, bitsLoad);
341341 }
342342
343343 if (conversionTy != resultTy) {
344- result = rewriter. create < arith::BitcastOp>( loc, resultTy, result);
344+ result = arith::BitcastOp::create (rewriter, loc, resultTy, result);
345345 }
346346
347347 rewriter.replaceOp (op, result);
@@ -428,20 +428,20 @@ struct ConvertMemrefStore final : OpConversionPattern<memref::StoreOp> {
428428 // Pad the input value with 0s on the left.
429429 Value input = adaptor.getValue ();
430430 if (!input.getType ().isInteger ()) {
431- input = rewriter. create < arith::BitcastOp> (
432- loc,
431+ input = arith::BitcastOp::create (
432+ rewriter, loc,
433433 IntegerType::get (rewriter.getContext (),
434434 input.getType ().getIntOrFloatBitWidth ()),
435435 input);
436436 }
437437 Value extendedInput =
438- rewriter. create < arith::ExtUIOp>( loc, dstIntegerType, input);
438+ arith::ExtUIOp::create (rewriter, loc, dstIntegerType, input);
439439
440440 // Special case 0-rank memref stores. No need for masking.
441441 if (convertedType.getRank () == 0 ) {
442- rewriter. create < memref::AtomicRMWOp>( loc, arith::AtomicRMWKind::assign,
443- extendedInput, adaptor.getMemref (),
444- ValueRange{});
442+ memref::AtomicRMWOp::create (rewriter, loc, arith::AtomicRMWKind::assign,
443+ extendedInput, adaptor.getMemref (),
444+ ValueRange{});
445445 rewriter.eraseOp (op);
446446 return success ();
447447 }
@@ -456,16 +456,14 @@ struct ConvertMemrefStore final : OpConversionPattern<memref::StoreOp> {
456456 dstBits, bitwidthOffset, rewriter);
457457 // Align the value to write with the destination bits
458458 Value alignedVal =
459- rewriter. create < arith::ShLIOp>( loc, extendedInput, bitwidthOffset);
459+ arith::ShLIOp::create (rewriter, loc, extendedInput, bitwidthOffset);
460460
461461 // Clear destination bits
462- rewriter.create <memref::AtomicRMWOp>(loc, arith::AtomicRMWKind::andi,
463- writeMask, adaptor.getMemref (),
464- storeIndices);
462+ memref::AtomicRMWOp::create (rewriter, loc, arith::AtomicRMWKind::andi,
463+ writeMask, adaptor.getMemref (), storeIndices);
465464 // Write srcs bits to destination
466- rewriter.create <memref::AtomicRMWOp>(loc, arith::AtomicRMWKind::ori,
467- alignedVal, adaptor.getMemref (),
468- storeIndices);
465+ memref::AtomicRMWOp::create (rewriter, loc, arith::AtomicRMWKind::ori,
466+ alignedVal, adaptor.getMemref (), storeIndices);
469467 rewriter.eraseOp (op);
470468 return success ();
471469 }
@@ -525,8 +523,8 @@ struct ConvertMemRefSubview final : OpConversionPattern<memref::SubViewOp> {
525523 }
526524
527525 // Transform the offsets, sizes and strides according to the emulation.
528- auto stridedMetadata = rewriter. create < memref::ExtractStridedMetadataOp> (
529- loc, subViewOp.getViewSource ());
526+ auto stridedMetadata = memref::ExtractStridedMetadataOp::create (
527+ rewriter, loc, subViewOp.getViewSource ());
530528
531529 OpFoldResult linearizedIndices;
532530 auto strides = stridedMetadata.getConstifiedMixedStrides ();
0 commit comments