Skip to content

Commit 8748350

Browse files
authored
Integrates/llvm 20250310: Bump to llvm/llvm-project@967ab7e (#20207)
bump to llvm@[967ab7e08e62a35cc65f34e21fbeb00abf3eb83f](iree-org/llvm-project@967ab7e) create one revert commit@[c190c8d](iree-org/llvm-project@c190c8d) to this PR llvm/llvm-project#129850. add the commit [@d80a859](iree-org/llvm-project@d80a859) to fix bazel test error. PR #129850 attempted to use TableGen to define OneShotBufferizePassOption for controlling the bufferization of all operations. However, it appears that the TableGen implementation does not yet fully support all the options available in the manually defined OneShotBufferizationOptions struct. For the usage of all the vector transformation options, follow Jakub's suggestion below to pick up the default values instead of setting them manually. ```c++ VectorTransformsOptions defaultOptions; ... vector::populateVectorTransposeLoweringPatterns( patterns, defaultOptions.vectorTransposeLowering); ``` --------- Signed-off-by: Bangtian Liu <[email protected]>
1 parent 63ed28b commit 8748350

26 files changed

+89
-62
lines changed

compiler/plugins/input/TOSA/InputConversion/test/apply_pdl_patterns_tosa.mlir

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -41,16 +41,18 @@
4141
// CHECK: tosa.negate %[[RESULT]]
4242

4343
func.func @mlp_invocation(%lhs: tensor<2x4xf32>, %rhs : tensor<4x8xf32>) -> tensor<2x8xf32> {
44-
%lhs_shape = tosa.const_shape {value = dense<[1, 2, 4]> : tensor<3xindex>} : () -> !tosa.shape<3>
45-
%rhs_shape = tosa.const_shape {value = dense<[1, 4, 8]> : tensor<3xindex>} : () -> !tosa.shape<3>
44+
%lhs_shape = tosa.const_shape {values = dense<[1, 2, 4]> : tensor<3xindex>} : () -> !tosa.shape<3>
45+
%rhs_shape = tosa.const_shape {values = dense<[1, 4, 8]> : tensor<3xindex>} : () -> !tosa.shape<3>
4646
%lhs_3D = tosa.reshape %lhs, %lhs_shape : (tensor<2x4xf32>, !tosa.shape<3>) -> tensor<1x2x4xf32>
4747
%rhs_3D = tosa.reshape %rhs, %rhs_shape : (tensor<4x8xf32>, !tosa.shape<3>) -> tensor<1x4x8xf32>
48-
%0 = tosa.matmul %lhs_3D, %rhs_3D : (tensor<1x2x4xf32>, tensor<1x4x8xf32>) -> tensor<1x2x8xf32>
48+
%azp0 = "tosa.const"() <{values = dense<0.0> : tensor<1xf32>}> : () -> tensor<1xf32>
49+
%bzp0 = "tosa.const"() <{values = dense<0.0> : tensor<1xf32>}> : () -> tensor<1xf32>
50+
%0 = tosa.matmul %lhs_3D, %rhs_3D, %azp0, %bzp0 : (tensor<1x2x4xf32>, tensor<1x4x8xf32>, tensor<1xf32>, tensor<1xf32>) -> tensor<1x2x8xf32>
4951
%1 = tosa.clamp %0 {
5052
min_val = 0.0 : f32, max_val = 3.4028235e+38 : f32}
5153
: (tensor<1x2x8xf32>) -> tensor<1x2x8xf32>
5254
%2 = tosa.negate %1 : (tensor<1x2x8xf32>) -> tensor<1x2x8xf32>
53-
%result_shape = tosa.const_shape {value = dense<[2, 8]> : tensor<2xindex>} : () -> !tosa.shape<2>
55+
%result_shape = tosa.const_shape {values = dense<[2, 8]> : tensor<2xindex>} : () -> !tosa.shape<2>
5456
%3 = tosa.reshape %2, %result_shape : (tensor<1x2x8xf32>, !tosa.shape<2>) -> tensor<2x8xf32>
5557
return %3 : tensor<2x8xf32>
5658
}

compiler/plugins/input/TOSA/InputConversion/test/convert_i48_to_i64.mlir

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,9 +23,9 @@ func.func @test_other_types_not_converted(%arg0: tensor<2x2xi32>) -> tensor<2x2x
2323
// CHECK-LABEL: @test_attrs_converted
2424
func.func @test_attrs_converted() -> (i48, tensor<2xi48>) {
2525
// CHECK: %[[ARITH_C:.+]] = arith.constant 0 : i64
26-
// CHECK: %[[TOSA_C:.+]] = "tosa.const"() <{value = dense<0> : tensor<2xi64>}> : () -> tensor<2xi64>
26+
// CHECK: %[[TOSA_C:.+]] = "tosa.const"() <{values = dense<0> : tensor<2xi64>}> : () -> tensor<2xi64>
2727
// CHECK: return %[[ARITH_C]], %[[TOSA_C]] : i64, tensor<2xi64>
2828
%0 = "arith.constant"() {value = 0 : i48} : () -> i48
29-
%1 = "tosa.const"() <{value = dense<0> : tensor<2xi48>}> : () -> tensor<2xi48>
29+
%1 = "tosa.const"() <{values = dense<0> : tensor<2xi48>}> : () -> tensor<2xi48>
3030
return %0, %1 : i48, tensor<2xi48>
3131
}

compiler/plugins/input/TOSA/InputConversion/test/tosa.pdl.mlir

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -48,15 +48,19 @@ pdl.pattern @mlp : benefit(1) {
4848
%lhs = pdl.operand : %lhs_type
4949
%rhs_type = pdl.type
5050
%rhs = pdl.operand : %rhs_type
51+
%zp_type = pdl.type
52+
%azp0 = pdl.operand : %zp_type
53+
%bzp0 = pdl.operand : %zp_type
5154
%matmul_type = pdl.type
5255
%min_fp = pdl.attribute = 0.0 : f32
5356
%max_fp = pdl.attribute
54-
%matmul = pdl.operation "tosa.matmul"(%lhs, %rhs : !pdl.value, !pdl.value)
57+
%matmul = pdl.operation "tosa.matmul"(%lhs, %rhs, %azp0, %bzp0 : !pdl.value, !pdl.value, !pdl.value, !pdl.value)
5558
-> (%matmul_type : !pdl.type)
5659
%element_type = pdl.type : f32
5760
pdl.apply_native_constraint "checkTensorElementType"(%lhs_type, %element_type : !pdl.type, !pdl.type)
5861
pdl.apply_native_constraint "checkTensorElementType"(%rhs_type, %element_type : !pdl.type, !pdl.type)
5962
pdl.apply_native_constraint "checkTensorElementType"(%matmul_type, %element_type : !pdl.type, !pdl.type)
63+
pdl.apply_native_constraint "checkTensorElementType"(%zp_type, %element_type: !pdl.type, !pdl.type)
6064

6165
%matmul_result = pdl.result 0 of %matmul
6266
%relu_type = pdl.type

compiler/plugins/target/CUDA/CUDATarget.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -290,7 +290,7 @@ static LogicalResult linkObjects(Location loc, llvm::Module &module,
290290
// Ensure consistent target information.
291291
const llvm::Triple &targetTriple = targetMachine.getTargetTriple();
292292
module.setDataLayout(targetMachine.createDataLayout());
293-
module.setTargetTriple(targetTriple.str());
293+
module.setTargetTriple(targetTriple);
294294

295295
auto specializationCallback = [&](llvm::Module &userModule) {
296296
// TODO(thomasraoux): inject __nvvm_reflect-style functions/globals for

compiler/plugins/target/LLVMCPU/LLVMCPUTarget.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -494,7 +494,7 @@ class LLVMCPUTargetBackend final : public TargetBackend {
494494

495495
// Specialize the module to our target machine.
496496
llvmModule->setDataLayout(targetMachine->createDataLayout());
497-
llvmModule->setTargetTriple(targetMachine->getTargetTriple().str());
497+
llvmModule->setTargetTriple(targetMachine->getTargetTriple());
498498

499499
// Dump just the codegen bitcode before linking and optimization.
500500
if (!options.dumpIntermediatesPath.empty()) {

compiler/src/iree/compiler/Codegen/LLVMCPU/ConvertToLLVM.cpp

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -974,16 +974,17 @@ void ConvertToLLVMPass::runOnOperation() {
974974
vector::populateVectorInterleaveLoweringPatterns(patterns);
975975
// TODO: doubtful that the "default" does what one want here, it is likely
976976
// better to use outerproduct.
977+
vector::VectorTransformsOptions defaultOptions;
977978
vector::populateVectorContractLoweringPatterns(
978-
patterns, vector::VectorTransformsOptions());
979+
patterns, defaultOptions.vectorContractLowering);
979980
vector::populateVectorMaskMaterializationPatterns(
980981
patterns, /*force32BitVectorIndices=*/false);
981982
vector::populateVectorMaskOpLoweringPatterns(patterns);
982983
vector::populateVectorShapeCastLoweringPatterns(patterns);
983984
// TODO: doubtful that the "default" does what one want here, it is likely
984985
// better to use shuffle.
985986
vector::populateVectorTransposeLoweringPatterns(
986-
patterns, vector::VectorTransformsOptions());
987+
patterns, defaultOptions.vectorTransposeLowering);
987988
populateConvertArmNeon2dToIntrPatterns(patterns);
988989
if (failed(applyPatternsGreedily(getOperation(), std::move(patterns)))) {
989990
return signalPassFailure();

compiler/src/iree/compiler/Codegen/LLVMCPU/LLVMCPUVectorTransposeLowering.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -68,8 +68,8 @@ void LLVMCPUVectorTransposeLoweringPass::runOnOperation() {
6868

6969
RewritePatternSet patterns(ctx);
7070
vector::populateVectorToVectorCanonicalizationPatterns(patterns);
71-
vector::populateVectorTransposeLoweringPatterns(patterns,
72-
vectorTransformOptions);
71+
vector::populateVectorTransposeLoweringPatterns(
72+
patterns, vectorTransformOptions.vectorTransposeLowering);
7373
vector::populateVectorTransposeNarrowTypeRewritePatterns(
7474
patterns, kNarrowTypeEmulationBenefit);
7575

compiler/src/iree/compiler/Codegen/LLVMCPU/LLVMCPUVirtualVectorLowering.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ void LLVMCPUVirtualVectorLoweringPass::runOnOperation() {
7070
vector::populateVectorToVectorCanonicalizationPatterns(patterns);
7171
vector::populateVectorGatherLoweringPatterns(patterns);
7272
vector::populateVectorContractLoweringPatterns(
73-
patterns, vectorTransformOptions,
73+
patterns, vectorTransformOptions.vectorContractLowering,
7474
/*benefit=*/1,
7575
/*disableOuterProductLowering=*/false);
7676
// This pattern will transform vector loads whose elements are used in a

compiler/src/iree/compiler/Codegen/LLVMCPU/test/select_x86_64_lowering_strategy.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1904,7 +1904,7 @@ module {
19041904

19051905
// CHECK: #translation = #iree_codegen.translation_info<pipeline = CPUDoubleTilingExpert, {enable_loop_peeling}>
19061906
// CHECK-LABEL: @test_mod_vectorizing_strategy_peeling
1907-
// CHECK-SAME: attributes {hal.executable.target = #executable_target_system_elf_x86_64_, translation_info = #translation}
1907+
// CHECK-SAME: attributes {hal.executable.target = #executable_target_system_elf_x86_64, translation_info = #translation}
19081908

19091909
// -----
19101910

compiler/src/iree/compiler/Codegen/LLVMGPU/ConvertToNVVM.cpp

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -93,6 +93,9 @@ struct ConvertToNVVMPass final
9393
// Run Vector -> Vector transformations ahead of conversion to LLVM.
9494
{
9595
RewritePatternSet patterns(&getContext());
96+
auto options =
97+
vector::VectorTransformsOptions().setVectorTransformsOptions(
98+
vector::VectorContractLowering::OuterProduct);
9699
populateVectorToSCFConversionPatterns(
97100
patterns, VectorTransferToSCFOptions().enableFullUnroll());
98101
populateDropSharedMemoryDeallocOpPatterns(patterns);
@@ -101,9 +104,7 @@ struct ConvertToNVVMPass final
101104
vector::populateVectorToVectorCanonicalizationPatterns(patterns);
102105
vector::populateVectorBroadcastLoweringPatterns(patterns);
103106
vector::populateVectorContractLoweringPatterns(
104-
patterns,
105-
vector::VectorTransformsOptions().setVectorTransformsOptions(
106-
vector::VectorContractLowering::OuterProduct));
107+
patterns, options.vectorContractLowering);
107108
vector::populateVectorMaskOpLoweringPatterns(patterns);
108109
// We currently always use 64 bit indices, thus ensure the bit width of
109110
// the mask compare is consistent.
@@ -113,7 +114,7 @@ struct ConvertToNVVMPass final
113114
// TODO: doubtful that the "default" does what one want here, it is likely
114115
// better to use something else.
115116
vector::populateVectorTransposeLoweringPatterns(
116-
patterns, vector::VectorTransformsOptions());
117+
patterns, options.vectorTransposeLowering);
117118
vector::populateVectorTransferLoweringPatterns(patterns);
118119
arith::populateExpandBFloat16Patterns(patterns);
119120
if (failed(applyPatternsGreedily(m, std::move(patterns)))) {

0 commit comments

Comments
 (0)