Skip to content
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
Show all changes
24 commits
Select commit Hold shift + click to select a range
b9ae621
[MLIR][SparseTensor] Loop ordering strategy infrastructure (flag)
gmalasan Aug 21, 2025
97cec4e
[MLIR][SparseTensor] Fixed up the rest of the boilerplate code, strat…
gmalasan Aug 21, 2025
3f3661a
[MLIR][SparseTensor] Fixed PR feedback about style
gmalasan Aug 26, 2025
17943dc
Merge branch 'main' into sparse-tensor-loop-ordering-infrastructure
gmalasan Aug 26, 2025
6f3012f
Merge branch 'main' into sparse-tensor-loop-ordering-infrastructure
gmalasan Aug 26, 2025
e9a0671
Merge branch 'main' into sparse-tensor-loop-ordering-infrastructure
gmalasan Aug 30, 2025
3a5d7e2
Merge branch 'main' into sparse-tensor-loop-ordering-infrastructure
gmalasan Sep 1, 2025
d0b96d7
[MLIR][SparseTensor] Comment style fixes
gmalasan Sep 8, 2025
ccc784d
Merge branch 'sparse-tensor-loop-ordering-infrastructure' of https://…
gmalasan Sep 8, 2025
fde6039
[MLIR][SparseTensor] Missed comment style fix in Passes.h
gmalasan Sep 9, 2025
e80fee7
Merge branch 'main' into sparse-tensor-loop-ordering-infrastructure
gmalasan Sep 9, 2025
79ee468
Merge branch 'main' into sparse-tensor-loop-ordering-infrastructure
gmalasan Sep 12, 2025
cc7af1b
Merge branch 'main' into sparse-tensor-loop-ordering-infrastructure
gmalasan Sep 22, 2025
b6f5c6b
Merge branch 'main' into sparse-tensor-loop-ordering-infrastructure
gmalasan Sep 23, 2025
3832d9d
Merge branch 'main' into sparse-tensor-loop-ordering-infrastructure
gmalasan Sep 23, 2025
fbfa3b6
Apply clang-format to C++ files for loop ordering infrastructure
gmalasan Sep 24, 2025
8630cc7
Merge branch 'sparse-tensor-loop-ordering-infrastructure' of https://…
gmalasan Sep 24, 2025
db6ed6d
Merge branch 'main' into sparse-tensor-loop-ordering-infrastructure
gmalasan Sep 24, 2025
57c4088
Fix clang-format issues in SparseTensorPasses.cpp
gmalasan Sep 27, 2025
7e416fc
Merge branch 'sparse-tensor-loop-ordering-infrastructure' of https://…
gmalasan Sep 27, 2025
c108117
Merge branch 'main' into sparse-tensor-loop-ordering-infrastructure
gmalasan Sep 27, 2025
b330e49
Merge branch 'main' into sparse-tensor-loop-ordering-infrastructure
gmalasan Sep 30, 2025
850737c
Merge branch 'main' into sparse-tensor-loop-ordering-infrastructure
aartbik Sep 30, 2025
a914bc8
Merge branch 'main' into sparse-tensor-loop-ordering-infrastructure
aartbik Oct 6, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 13 additions & 1 deletion mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.h
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,15 @@ enum class SparseEmitStrategy {
kDebugInterface, // generate only place-holder for sparse iteration
};

namespace sparse_tensor {

/// Selects between different loop ordering strategies for sparse tensor
enum class LoopOrderingStrategy : unsigned {
kDefault, ///< Default strategy (current behavior)
};

} // namespace sparse_tensor

#define GEN_PASS_DECL
#include "mlir/Dialect/SparseTensor/Transforms/Passes.h.inc"

Expand All @@ -72,10 +81,13 @@ std::unique_ptr<Pass> createSparseAssembler(bool directOut);
//===----------------------------------------------------------------------===//

void populateSparseReinterpretMap(RewritePatternSet &patterns,
ReinterpretMapScope scope);
ReinterpretMapScope scope,
sparse_tensor::LoopOrderingStrategy strategy = sparse_tensor::LoopOrderingStrategy::kDefault);

std::unique_ptr<Pass> createSparseReinterpretMapPass();
std::unique_ptr<Pass> createSparseReinterpretMapPass(ReinterpretMapScope scope);
std::unique_ptr<Pass> createSparseReinterpretMapPass(ReinterpretMapScope scope,
sparse_tensor::LoopOrderingStrategy strategy);

//===----------------------------------------------------------------------===//
// The PreSparsificationRewriting pass.
Expand Down
5 changes: 5 additions & 0 deletions mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.td
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,11 @@ def SparseReinterpretMap : Pass<"sparse-reinterpret-map", "ModuleOp"> {
clEnumValN(mlir::ReinterpretMapScope::kExceptGeneric,
"except-generic",
"Run on operations expect linalg.generic (e.g., foreach)"))}]>,
Option<"loopOrderingStrategy", "loop-ordering-strategy", "mlir::sparse_tensor::LoopOrderingStrategy",
"mlir::sparse_tensor::LoopOrderingStrategy::kDefault",
"Set the loop ordering strategy for sparse tensor dialect", [{llvm::cl::values(
clEnumValN(mlir::sparse_tensor::LoopOrderingStrategy::kDefault, "default",
"Default strategy (current behavior)"))}]>,
];
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -408,7 +408,9 @@ struct GenericOpReinterpretMap
};

struct GenericOpScheduler : public OpRewritePattern<linalg::GenericOp> {
using OpRewritePattern::OpRewritePattern;
GenericOpScheduler(MLIRContext *context, sparse_tensor::LoopOrderingStrategy strategy)
: OpRewritePattern<linalg::GenericOp>(context), strategy(strategy) {}

LogicalResult matchAndRewrite(linalg::GenericOp linalgOp,
PatternRewriter &rewriter) const override {
if (linalgOp.getNumDpsInits() != 1 || !linalgOp.hasPureTensorSemantics() ||
Expand All @@ -421,7 +423,8 @@ struct GenericOpScheduler : public OpRewritePattern<linalg::GenericOp> {
if (linalgOp->hasAttr(sorted))
return failure();

auto scheduler = IterationGraphSorter::fromGenericOp(linalgOp);
// Pass strategy to IterationGraphSorter
auto scheduler = IterationGraphSorter::fromGenericOp(linalgOp, strategy);
bool isAdmissible = false;
AffineMap order;
// A const list of all masks that we used for iteration graph
Expand Down Expand Up @@ -583,6 +586,9 @@ struct GenericOpScheduler : public OpRewritePattern<linalg::GenericOp> {
// TODO: convert more than one?
return failure();
}

private:
sparse_tensor::LoopOrderingStrategy strategy;
};

//===----------------------------------------------------------------------===//
Expand Down Expand Up @@ -788,11 +794,12 @@ struct ForeachOpDemapper
} // namespace

void mlir::populateSparseReinterpretMap(RewritePatternSet &patterns,
ReinterpretMapScope scope) {
ReinterpretMapScope scope,
sparse_tensor::LoopOrderingStrategy strategy) {
if (scope == ReinterpretMapScope::kAll ||
scope == ReinterpretMapScope::kGenericOnly) {
patterns.add<GenericOpReinterpretMap, GenericOpScheduler>(
patterns.getContext());
patterns.add<GenericOpReinterpretMap>(patterns.getContext());
patterns.add<GenericOpScheduler>(patterns.getContext(), strategy);
}
if (scope == ReinterpretMapScope::kAll ||
scope == ReinterpretMapScope::kExceptGeneric) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -67,12 +67,13 @@ struct SparseReinterpretMap
SparseReinterpretMap(const SparseReinterpretMap &pass) = default;
SparseReinterpretMap(const SparseReinterpretMapOptions &options) {
scope = options.scope;
loopOrderingStrategy = options.loopOrderingStrategy;
}

void runOnOperation() override {
auto *ctx = &getContext();
RewritePatternSet patterns(ctx);
populateSparseReinterpretMap(patterns, scope);
populateSparseReinterpretMap(patterns, scope, loopOrderingStrategy);
(void)applyPatternsGreedily(getOperation(), std::move(patterns));
}
};
Expand Down Expand Up @@ -438,6 +439,15 @@ mlir::createSparseReinterpretMapPass(ReinterpretMapScope scope) {
return std::make_unique<SparseReinterpretMap>(options);
}

std::unique_ptr<Pass>
mlir::createSparseReinterpretMapPass(ReinterpretMapScope scope,
sparse_tensor::LoopOrderingStrategy strategy) {
SparseReinterpretMapOptions options;
options.scope = scope;
options.loopOrderingStrategy = strategy;
return std::make_unique<SparseReinterpretMap>(options);
}

std::unique_ptr<Pass> mlir::createPreSparsificationRewritePass() {
return std::make_unique<PreSparsificationRewritePass>();
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,15 @@ AffineMap IterationGraphSorter::topoSort() {
// We always prefer a parallel loop over a reduction loop because putting
// a reduction loop early might make the loop sequence inadmissible.
auto &it = !parIt.empty() ? parIt : redIt;
auto src = it.back();

// Select loop based on strategy
unsigned src;
switch (strategy) {
case sparse_tensor::LoopOrderingStrategy::kDefault:
src = it.back();
break;
}

loopOrder.push_back(src);
it.pop_back();
// Update in-degree, and push 0-degree node into worklist.
Expand All @@ -123,7 +131,8 @@ AffineMap IterationGraphSorter::topoSort() {
}

IterationGraphSorter
IterationGraphSorter::fromGenericOp(linalg::GenericOp genericOp) {
IterationGraphSorter::fromGenericOp(linalg::GenericOp genericOp,
sparse_tensor::LoopOrderingStrategy strategy) {
// Must be a demapped sparse kernel.
assert(!hasAnyNonIdentityOperandsOrResults(genericOp) &&
hasAnySparseOperandOrResult(genericOp) &&
Expand All @@ -140,14 +149,15 @@ IterationGraphSorter::fromGenericOp(linalg::GenericOp genericOp) {
genericOp.getIteratorTypesArray();

return IterationGraphSorter(std::move(ins), std::move(loopMap), out, outMap,
std::move(iterTypes));
std::move(iterTypes), strategy);
}

IterationGraphSorter::IterationGraphSorter(
SmallVector<Value> &&ins, SmallVector<AffineMap> &&loop2InsLvl, Value out,
AffineMap loop2OutLvl, SmallVector<utils::IteratorType> &&iterTypes)
AffineMap loop2OutLvl, SmallVector<utils::IteratorType> &&iterTypes,
sparse_tensor::LoopOrderingStrategy strategy)
: ins(std::move(ins)), loop2InsLvl(std::move(loop2InsLvl)), out(out),
loop2OutLvl(loop2OutLvl), iterTypes(std::move(iterTypes)) {
loop2OutLvl(loop2OutLvl), iterTypes(std::move(iterTypes)), strategy(strategy) {
// One map per tensor.
assert(loop2InsLvl.size() == ins.size());
// All the affine maps have the same number of dimensions (loops).
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
#define MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_ITERATIONGRAPHSORTER_H_

#include "mlir/IR/AffineMap.h"
#include "mlir/Dialect/SparseTensor/Transforms/Passes.h"

namespace mlir {

Expand Down Expand Up @@ -42,8 +43,9 @@ enum class SortMask : unsigned {
class IterationGraphSorter {
public:
/// Factory method that construct an iteration graph sorter
/// for the given linalg.generic operation.
static IterationGraphSorter fromGenericOp(linalg::GenericOp genericOp);
/// for the given linalg.generic operation with a specific strategy.
static IterationGraphSorter fromGenericOp(linalg::GenericOp genericOp,
sparse_tensor::LoopOrderingStrategy strategy);

/// Returns a permutation that represents the scheduled loop order.
/// Note that the returned AffineMap could be null if the kernel
Expand All @@ -58,7 +60,8 @@ class IterationGraphSorter {
IterationGraphSorter(SmallVector<Value> &&ins,
SmallVector<AffineMap> &&loop2InsLvl, Value out,
AffineMap loop2OutLvl,
SmallVector<utils::IteratorType> &&iterTypes);
SmallVector<utils::IteratorType> &&iterTypes,
sparse_tensor::LoopOrderingStrategy strategy = sparse_tensor::LoopOrderingStrategy::kDefault);

// Adds all the constraints in the given loop to level map.
void addConstraints(Value t, AffineMap loop2LvlMap);
Expand All @@ -84,6 +87,9 @@ class IterationGraphSorter {

// InDegree used for topo sort.
std::vector<unsigned> inDegree;

// Loop ordering strategy.
sparse_tensor::LoopOrderingStrategy strategy;
};

} // namespace sparse_tensor
Expand Down