Skip to content

Commit f06fba6

Browse files
NFC: Optimize includes in header files to limit header size
- Adds a utility script to analyze header file sizes after preprocessing. - Optimize includes in some of the largest headers in order to reduce size using pruning of included files or forward declarations where relevant. GitOrigin-RevId: 8f30fbbba54a4cfae6e4904352f2c04bc2677727
1 parent 70582e0 commit f06fba6

File tree

101 files changed

+771
-598
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

101 files changed

+771
-598
lines changed

mlir-tensorrt/common/include/mlir-tensorrt-common/Dialect/LinalgExt/Transforms/ToLoopsOpInterfaceImpl.h

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -26,15 +26,16 @@
2626
#define MLIR_TENSORRT_COMMON_DIALECT_LINALGEXT_TRANSFORMS_TOLOOPSOPINTERFACEIMPL
2727

2828
#include "mlir-tensorrt-common/Interfaces/ToLoopsOpInterface.h"
29-
#include "mlir/IR/DialectRegistry.h"
3029

31-
namespace mlir::linalg {
30+
namespace mlir {
31+
class DialectRegistry;
32+
namespace linalg {
3233
class LinalgOp;
3334
}
34-
35-
namespace mlir::scf {
35+
namespace scf {
3636
class ForOp;
3737
}
38+
} // namespace mlir
3839

3940
namespace mlir::linalg_ext {
4041

mlir-tensorrt/common/include/mlir-tensorrt-common/Interfaces/BufferizationScopeInterface.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
//===- BufferizationScopeInterface.h ---------------------------*- C++ -*-===//
22
//
3-
// SPDX-FileCopyrightText: Copyright 2025 NVIDIA CORPORATION & AFFILIATES.
3+
// SPDX-FileCopyrightText: Copyright 2025-2026 NVIDIA CORPORATION & AFFILIATES.
44
// All rights reserved.
55
// SPDX-License-Identifier: Apache-2.0
66
//
@@ -24,7 +24,7 @@
2424
#ifndef MLIR_TENSORRT_COMMON_INTERFACES_BUFFERIZATIONSCOPEINTERFACE_H
2525
#define MLIR_TENSORRT_COMMON_INTERFACES_BUFFERIZATIONSCOPEINTERFACE_H
2626

27-
#include "mlir/Dialect/Bufferization/Transforms/OneShotAnalysis.h"
27+
#include "mlir/Dialect/Bufferization/Transforms/OneShotAnalysis.h" // IWYU pragma: keep
2828

2929
#include "mlir-tensorrt-common/Interfaces/BufferizationScopeInterface.h.inc"
3030

Lines changed: 79 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,79 @@
1+
#ifndef MLIR_TENSORRT_COMMON_INTERFACES_TENSORKINDATTRINTERFACE
2+
#define MLIR_TENSORRT_COMMON_INTERFACES_TENSORKINDATTRINTERFACE
3+
4+
#include "mlir/Analysis/DataFlowFramework.h"
5+
#include "mlir/Support/LLVM.h"
6+
namespace mlir {
7+
8+
enum class TensorKind { Unknown = 0, Device, Host, Both };
9+
10+
/// Represents a lattice point. The lattice picture looks like this:
11+
///
12+
/// ```
13+
/// "both" (top)
14+
/// / \ (host and device states are at same level)
15+
/// "host" "device"
16+
/// \ /
17+
/// unknown
18+
/// |
19+
/// uninitialized (bottom)
20+
///
21+
/// ```
22+
struct TensorKindInfo {
23+
/// Create the type info in an uninitialized state.
24+
TensorKindInfo();
25+
TensorKindInfo(TensorKind initialKind);
26+
27+
/// Print a description of this lattice point to the stream.
28+
void print(raw_ostream &os) const;
29+
30+
/// Set the value and return whether there was a change.
31+
ChangeResult setKind(TensorKind kind);
32+
33+
/// Return whether this lattice point has an uninitialied value.
34+
bool isUninitialized() const;
35+
36+
/// Return whether this lattice point is unknown.
37+
bool isUnknown() const;
38+
39+
static TensorKindInfo join(const TensorKindInfo &lhs,
40+
const TensorKindInfo &rhs);
41+
static TensorKindInfo meet(const TensorKindInfo &lhs,
42+
const TensorKindInfo &rhs);
43+
44+
TensorKind getKind() const;
45+
46+
/// Return true if the kind has `Host` value.
47+
bool isHostOnly() const;
48+
49+
/// Returns true if the kind is `host` or `both`.
50+
bool isHostVisible() const;
51+
52+
/// Return true if this is an device tensor and not 'both'.
53+
bool isDeviceOnly() const;
54+
55+
/// Returns true if this is a tensor visible on the device (device or both).
56+
bool isDeviceVisible() const;
57+
58+
/// Return true if the kind is both a host tensor and an device tensor.
59+
bool isBothHostAndDevice() const;
60+
61+
bool operator<(const TensorKindInfo &other) const;
62+
63+
bool operator==(const TensorKindInfo &other) const;
64+
bool operator!=(const TensorKindInfo &other) const;
65+
66+
std::optional<TensorKind> kind;
67+
};
68+
69+
llvm::StringRef stringifyTensorKind(TensorKind kind);
70+
71+
/// Return the name of the function arg attr UnitAttr
72+
/// that should be used to mark an argument as a shape tensor.
73+
StringRef getHostTensorArgAttrName();
74+
75+
} // namespace mlir
76+
77+
#include "mlir-tensorrt-common/Interfaces/TensorKindAttrInterface.h.inc"
78+
79+
#endif // MLIR_TENSORRT_COMMON_INTERFACES_TENSORKINDATTRINTERFACE
Lines changed: 2 additions & 111 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
//===- TensorKindOpInterface.h ----------------------------------*- C++ -*-===//
22
//
3-
// SPDX-FileCopyrightText: Copyright 2024 NVIDIA CORPORATION & AFFILIATES.
3+
// SPDX-FileCopyrightText: Copyright 2024-2026 NVIDIA CORPORATION & AFFILIATES.
44
// All rights reserved.
55
// SPDX-License-Identifier: Apache-2.0
66
//
@@ -26,125 +26,17 @@
2626
#ifndef MLIR_TENSORRT_INTERFACE_TENSORKINDOPINTERFACE_H
2727
#define MLIR_TENSORRT_INTERFACE_TENSORKINDOPINTERFACE_H
2828

29+
#include "mlir-tensorrt-common/Interfaces/TensorKindAttrInterface.h"
2930
#include "mlir/Analysis/DataFlow/SparseAnalysis.h"
30-
#include "mlir/Analysis/DataFlowFramework.h"
31-
#include "mlir/IR/BuiltinTypes.h"
32-
#include "mlir/IR/OpDefinition.h"
33-
#include "mlir/Support/LLVM.h"
3431

3532
namespace mlir {
3633

37-
enum class TensorKind { Unknown = 0, Device, Host, Both };
38-
3934
llvm::StringRef stringifyTensorKind(TensorKind kind);
4035

4136
/// Return the name of the function arg attr UnitAttr
4237
/// that should be used to mark an argument as a shape tensor.
4338
StringRef getHostTensorArgAttrName();
4439

45-
/// Represents a lattice point. The lattice picture looks like this:
46-
///
47-
/// ```
48-
/// "both" (top)
49-
/// / \ (host and device states are at same level)
50-
/// "host" "device"
51-
/// \ /
52-
/// unknown
53-
/// |
54-
/// uninitialized (bottom)
55-
///
56-
/// ```
57-
struct TensorKindInfo {
58-
/// Create the type info in an uninitialized state.
59-
TensorKindInfo() : kind(std::nullopt) {}
60-
TensorKindInfo(TensorKind initialKind) : kind(initialKind) {}
61-
62-
/// Print a description of this lattice point to the stream.
63-
void print(raw_ostream &os) const;
64-
65-
/// Set the value and return whether there was a change.
66-
ChangeResult setKind(TensorKind kind) {
67-
auto newKind = kind;
68-
ChangeResult result = isUninitialized() || newKind != *this->kind
69-
? ChangeResult::Change
70-
: ChangeResult::NoChange;
71-
this->kind = newKind;
72-
return result;
73-
}
74-
75-
/// Return whether this lattice point has an uninitialied value.
76-
bool isUninitialized() const { return !kind.has_value(); }
77-
78-
/// Return whether this lattice point is unknown.
79-
bool isUnknown() const { return *kind == TensorKind::Unknown; }
80-
81-
static TensorKindInfo join(const TensorKindInfo &lhs,
82-
const TensorKindInfo &rhs);
83-
static TensorKindInfo meet(const TensorKindInfo &lhs,
84-
const TensorKindInfo &rhs);
85-
86-
TensorKind getKind() const {
87-
assert(!isUninitialized());
88-
return *kind;
89-
}
90-
91-
/// Return true if the kind has `Host` value.
92-
bool isHostOnly() const {
93-
assert(!isUninitialized() && "expected initialized value");
94-
return getKind() == TensorKind::Host;
95-
}
96-
97-
/// Returns true if the kind is `host` or `both`.
98-
bool isHostVisible() const {
99-
assert(!isUninitialized() && "expected initialized value");
100-
return getKind() == TensorKind::Host || getKind() == TensorKind::Both;
101-
}
102-
103-
/// Return true if this is an device tensor and not 'both'.
104-
bool isDeviceOnly() const {
105-
assert(!isUninitialized() && "expected initialized value");
106-
return getKind() == TensorKind::Device;
107-
}
108-
109-
/// Returns true if this is a tensor visible on the device (device or both).
110-
bool isDeviceVisible() const {
111-
assert(!isUninitialized() && "expected initialized value");
112-
return getKind() == TensorKind::Device || getKind() == TensorKind::Both;
113-
}
114-
115-
/// Return true if the kind is both a host tensor and an device tensor.
116-
bool isBothHostAndDevice() const {
117-
assert(!isUninitialized() && "expected initialized value");
118-
return getKind() == TensorKind::Both;
119-
}
120-
121-
bool operator<(const TensorKindInfo &other) const {
122-
// Uninitialized value is always less than other.
123-
if (isUninitialized())
124-
return true;
125-
// Uninitialized other can't be greater than anything.
126-
if (other.isUninitialized())
127-
return false;
128-
// If we are unknown, we are less than any host/device/both.
129-
if (isUnknown())
130-
return static_cast<int32_t>(getKind()) <
131-
static_cast<int32_t>(other.getKind());
132-
// If we are known, we are less than 'both'.
133-
if ((isHostOnly() || isDeviceOnly()) && other.isBothHostAndDevice())
134-
return true;
135-
return false;
136-
}
137-
138-
bool operator==(const TensorKindInfo &other) const {
139-
return other.kind == this->kind;
140-
}
141-
bool operator!=(const TensorKindInfo &other) const {
142-
return !(*this == other);
143-
}
144-
145-
std::optional<TensorKind> kind;
146-
};
147-
14840
/// Wraps the `TensorKindInfo` into a lattice class. This is required because
14941
/// upstream `dataflow::Lattice` has some issues that make it incompatible with
15042
/// backward analysis. TODO: fix the static switch for the `meet` operation
@@ -198,7 +90,6 @@ bool isHostTensorCandidate(Type type);
19890
} // namespace mlir
19991

20092
// Include the generated interface declarations.
201-
#include "mlir-tensorrt-common/Interfaces/TensorKindAttrInterface.h.inc"
20293
#include "mlir-tensorrt-common/Interfaces/TensorKindOpInterface.h.inc"
20394

20495
#endif // MLIR_TENSORRT_INTERFACE_TENSORKINDOPINTERFACE_H

mlir-tensorrt/common/include/mlir-tensorrt-common/Interfaces/ToLoopsOpInterface.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@
2424
#ifndef MLIR_TENSORRT_COMMON_INTERFACES_TOLOOPSOPINTERFACE
2525
#define MLIR_TENSORRT_COMMON_INTERFACES_TOLOOPSOPINTERFACE
2626

27-
#include "mlir/IR/OpDefinition.h"
27+
#include "mlir/IR/OpDefinition.h" // IWYU pragma: keep
2828

2929
namespace mlir {
3030
class RewriterBase;

mlir-tensorrt/common/include/mlir-tensorrt-common/Support/ADTExtras.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -27,8 +27,7 @@
2727
#include "llvm/ADT/SmallVector.h"
2828
#include "llvm/ADT/StringRef.h"
2929
#include "llvm/ADT/iterator_range.h"
30-
#include "llvm/Support/Debug.h"
31-
#include "llvm/Support/FormatProviders.h"
30+
#include "llvm/Support/FormatProviders.h" // IWYU pragma: keep
3231
#include "llvm/Support/raw_ostream.h"
3332
#include <array>
3433
#include <type_traits>

mlir-tensorrt/common/include/mlir-tensorrt-common/Utils/DataFlowUtils.h

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
1-
//===- DataFlowUtils.h ------------------------------------------*- C++ -*-===//
1+
//===- DataFlowUtils.h ---------------------------------------------------===//
22
//
3-
// SPDX-FileCopyrightText: Copyright 2025 NVIDIA CORPORATION & AFFILIATES.
3+
// SPDX-FileCopyrightText: Copyright 2025-2026 NVIDIA CORPORATION & AFFILIATES.
44
// All rights reserved.
55
// SPDX-License-Identifier: Apache-2.0
66
//
@@ -25,9 +25,6 @@
2525
#define MLIR_TENSORRT_COMMON_UTILS_DATAFLOWUTILS
2626

2727
#include "mlir/Analysis/DataFlow/DeadCodeAnalysis.h"
28-
#include "mlir/Analysis/DataFlowFramework.h"
29-
#include "mlir/IR/Operation.h"
30-
#include "mlir/IR/PatternMatch.h"
3128

3229
namespace mlir {
3330

mlir-tensorrt/executor/include/mlir-executor/Utils/ModuleLikePass.h renamed to mlir-tensorrt/common/include/mlir-tensorrt-common/Utils/ModuleLikePass.h

Lines changed: 9 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -1,18 +1,18 @@
1-
//===- ModulePass.h ---------------------------------------------*- C++ -*-===//
1+
//===- ModuleLikePass.h --------------------------------------------------===//
22
//
3-
// Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
3+
// SPDX-FileCopyrightText: Copyright 2026 NVIDIA CORPORATION & AFFILIATES.
4+
// All rights reserved.
5+
// SPDX-License-Identifier: Apache-2.0
46
//
57
//===----------------------------------------------------------------------===//
68
///
79
/// Defines a pass with static scheduling filter that only allows it to be run
810
/// on "module-like" operations.
911
///
1012
//===----------------------------------------------------------------------===//
11-
#ifndef MLIR_EXECUTOR_UTILS_MODULEPASS
12-
#define MLIR_EXECUTOR_UTILS_MODULEPASS
13+
#ifndef MLIR_TENSORRT_COMMON_UTILS_MODULELIKEPASS
14+
#define MLIR_TENSORRT_COMMON_UTILS_MODULELIKEPASS
1315

14-
#include "mlir/IR/OpDefinition.h"
15-
#include "mlir/IR/SymbolTable.h"
1616
#include "mlir/Pass/Pass.h"
1717

1818
namespace mlir {
@@ -41,21 +41,12 @@ class ModuleLikePass : public OperationPass<> {
4141
/// Indicate if the current pass can be scheduled on the given operation type.
4242
/// For an InterfacePass, this checks if the operation implements the given
4343
/// interface.
44-
bool canScheduleOn(RegisteredOperationName opName) const final {
45-
return opName.hasTrait<OpTrait::IsIsolatedFromAbove>() &&
46-
opName.hasTrait<OpTrait::SymbolTable>();
47-
}
44+
bool canScheduleOn(RegisteredOperationName opName) const final;
4845

4946
/// Used in pass `runOnOperation` implementation to ensure that the op meets
5047
/// requirements that can't be checked in 'canScheduleOn'.
51-
static LogicalResult checkIsModuleLike(Operation *op) {
52-
if (op->getNumRegions() != 1 || !op->getRegion(0).hasOneBlock())
53-
return emitError(op->getLoc())
54-
<< "expected a module-like operation with a single region "
55-
"containing a single block";
56-
return success();
57-
}
48+
static LogicalResult checkIsModuleLike(Operation *op);
5849
};
5950
} // namespace mlir
6051

61-
#endif // MLIR_EXECUTOR_UTILS_MODULEPASS
52+
#endif // MLIR_TENSORRT_COMMON_UTILS_MODULELIKEPASS

mlir-tensorrt/common/include/mlir-tensorrt-common/Utils/ModuleUtils.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
1-
//===- ModuleUtils.h --------------------------------------------*- C++ -*-===//
1+
//===- ModuleUtils.h -----------------------------------------------------===//
22
//
3-
// SPDX-FileCopyrightText: Copyright 2025 NVIDIA CORPORATION & AFFILIATES.
3+
// SPDX-FileCopyrightText: Copyright 2025-2026 NVIDIA CORPORATION & AFFILIATES.
44
// All rights reserved.
55
// SPDX-License-Identifier: Apache-2.0
66
//

mlir-tensorrt/common/include/mlir-tensorrt-common/Utils/PDLUtils.h

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
1-
//===- PDLUtils.h -----------------------------------------------*- C++ -*-===//
1+
//===- PDLUtils.h --------------------------------------------------------===//
22
//
3-
// SPDX-FileCopyrightText: Copyright 2024 NVIDIA CORPORATION & AFFILIATES.
3+
// SPDX-FileCopyrightText: Copyright 2024-2026 NVIDIA CORPORATION & AFFILIATES.
44
// All rights reserved.
55
// SPDX-License-Identifier: Apache-2.0
66
//
@@ -24,7 +24,6 @@
2424
#ifndef MLIR_TENSORRT_COMMON_UTILS_PDLUTILS
2525
#define MLIR_TENSORRT_COMMON_UTILS_PDLUTILS
2626

27-
#include "mlir/IR/PatternMatch.h"
2827
#include "llvm/ADT/TypeSwitch.h"
2928

3029
namespace mlir {

0 commit comments

Comments
 (0)