Skip to content

Commit 73f7e86

Browse files
committed
Fix formatting issues
1 parent 1811994 commit 73f7e86

File tree

2 files changed

+18
-18
lines changed

2 files changed

+18
-18
lines changed

mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -9,9 +9,9 @@
99
#ifndef MLIR_DIALECT_BUFFERIZATION_IR_BUFFERIZABLEOPINTERFACE_H_
1010
#define MLIR_DIALECT_BUFFERIZATION_IR_BUFFERIZABLEOPINTERFACE_H_
1111

12-
#include "mlir/Interfaces/FunctionInterfaces.h"
1312
#include "mlir/IR/Operation.h"
1413
#include "mlir/IR/PatternMatch.h"
14+
#include "mlir/Interfaces/FunctionInterfaces.h"
1515
#include "mlir/Support/LLVM.h"
1616
#include "llvm/ADT/DenseMapInfoVariant.h"
1717
#include "llvm/ADT/SetVector.h"
@@ -261,9 +261,9 @@ struct BufferizationOptions {
261261
using AnalysisStateInitFn = std::function<void(AnalysisState &)>;
262262
/// Tensor -> MemRef type converter.
263263
/// Parameters: Value, memory space, func op, bufferization options
264-
using FunctionArgTypeConverterFn =
265-
std::function<BaseMemRefType(TensorType, Attribute memorySpace,
266-
FunctionOpInterface, const BufferizationOptions &)>;
264+
using FunctionArgTypeConverterFn = std::function<BaseMemRefType(
265+
TensorType, Attribute memorySpace, FunctionOpInterface,
266+
const BufferizationOptions &)>;
267267
/// Tensor -> MemRef type converter.
268268
/// Parameters: Value, memory space, bufferization options
269269
using UnknownTypeConverterFn = std::function<BaseMemRefType(

mlir/lib/Dialect/Bufferization/Transforms/OneShotModuleBufferize.cpp

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -88,7 +88,7 @@ getOrCreateFuncAnalysisState(OneShotAnalysisState &state) {
8888

8989
/// Return the unique ReturnOp that terminates `funcOp`.
9090
/// Return nullptr if there is no such unique ReturnOp.
91-
static Operation* getAssumedUniqueReturnOp(FunctionOpInterface funcOp) {
91+
static Operation *getAssumedUniqueReturnOp(FunctionOpInterface funcOp) {
9292
Operation *returnOp = nullptr;
9393
for (Block &b : funcOp.getFunctionBody()) {
9494
auto candidateOp = b.getTerminator();
@@ -127,7 +127,8 @@ static void annotateEquivalentReturnBbArg(OpOperand &returnVal,
127127
/// Store function BlockArguments that are equivalent to/aliasing a returned
128128
/// value in FuncAnalysisState.
129129
static LogicalResult
130-
aliasingFuncOpBBArgsAnalysis(FunctionOpInterface funcOp, OneShotAnalysisState &state,
130+
aliasingFuncOpBBArgsAnalysis(FunctionOpInterface funcOp,
131+
OneShotAnalysisState &state,
131132
FuncAnalysisState &funcState) {
132133
if (funcOp.getFunctionBody().empty()) {
133134
// No function body available. Conservatively assume that every tensor
@@ -168,8 +169,8 @@ aliasingFuncOpBBArgsAnalysis(FunctionOpInterface funcOp, OneShotAnalysisState &s
168169
return success();
169170
}
170171

171-
static void annotateFuncArgAccess(FunctionOpInterface funcOp, int64_t idx, bool isRead,
172-
bool isWritten) {
172+
static void annotateFuncArgAccess(FunctionOpInterface funcOp, int64_t idx,
173+
bool isRead, bool isWritten) {
173174
OpBuilder b(funcOp.getContext());
174175
Attribute accessType;
175176
if (isRead && isWritten) {
@@ -189,10 +190,10 @@ static void annotateFuncArgAccess(FunctionOpInterface funcOp, int64_t idx, bool
189190
/// function with unknown ops, we conservatively assume that such ops bufferize
190191
/// to a read + write.
191192
static LogicalResult
192-
funcOpBbArgReadWriteAnalysis(FunctionOpInterface funcOp, OneShotAnalysisState &state,
193+
funcOpBbArgReadWriteAnalysis(FunctionOpInterface funcOp,
194+
OneShotAnalysisState &state,
193195
FuncAnalysisState &funcState) {
194-
for (int64_t idx = 0, e = funcOp.getNumArguments(); idx < e;
195-
++idx) {
196+
for (int64_t idx = 0, e = funcOp.getNumArguments(); idx < e; ++idx) {
196197
// Skip non-tensor arguments.
197198
if (!isa<TensorType>(funcOp.getArgumentTypes()[idx]))
198199
continue;
@@ -277,10 +278,8 @@ static void equivalenceAnalysis(FunctionOpInterface funcOp,
277278

278279
/// Return "true" if the given function signature has tensor semantics.
279280
static bool hasTensorSignature(FunctionOpInterface funcOp) {
280-
return llvm::any_of(funcOp.getArgumentTypes(),
281-
llvm::IsaPred<TensorType>) ||
282-
llvm::any_of(funcOp.getResultTypes(),
283-
llvm::IsaPred<TensorType>);
281+
return llvm::any_of(funcOp.getArgumentTypes(), llvm::IsaPred<TensorType>) ||
282+
llvm::any_of(funcOp.getResultTypes(), llvm::IsaPred<TensorType>);
284283
}
285284

286285
/// Store all functions of the `moduleOp` in `orderedFuncOps`, sorted by
@@ -310,7 +309,8 @@ getFuncOpsOrderedByCalls(ModuleOp moduleOp,
310309
numberCallOpsContainedInFuncOp[funcOp] = 0;
311310
return funcOp.walk([&](CallOpInterface callOp) -> WalkResult {
312311
FunctionOpInterface calledFunction = getCalledFunction(callOp);
313-
assert(calledFunction && "could not retrieved called FunctionOpInterface");
312+
assert(calledFunction &&
313+
"could not retrieved called FunctionOpInterface");
314314
// If the called function does not have any tensors in its signature, then
315315
// it is not necessary to bufferize the callee before the caller.
316316
if (!hasTensorSignature(calledFunction))
@@ -364,8 +364,8 @@ static void foldMemRefCasts(FunctionOpInterface funcOp) {
364364
}
365365
}
366366

367-
auto newFuncType = FunctionType::get(
368-
funcOp.getContext(), funcOp.getArgumentTypes(), resultTypes);
367+
auto newFuncType = FunctionType::get(funcOp.getContext(),
368+
funcOp.getArgumentTypes(), resultTypes);
369369
funcOp.setType(newFuncType);
370370
}
371371

0 commit comments

Comments
 (0)