diff --git a/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp b/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp index 958d0d085fce1..cef250232daf5 100644 --- a/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp +++ b/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp @@ -429,10 +429,10 @@ void mlir::configureGpuToNVVMConversionLegality(ConversionTarget &target) { target.addLegalDialect<::mlir::NVVM::NVVMDialect>(); target.addIllegalDialect(); target.addIllegalOp(); + LLVM::FAbsOp, LLVM::FCeilOp, LLVM::FFloorOp, LLVM::FRemOp, + LLVM::LogOp, LLVM::Log10Op, LLVM::Log2Op, LLVM::PowOp, + LLVM::RoundEvenOp, LLVM::RoundOp, LLVM::SinOp, + LLVM::SqrtOp>(); // TODO: Remove once we support replacing non-root ops. target.addLegalOp(); diff --git a/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir b/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir index 6d4555e815b66..ef06af3ad3163 100644 --- a/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir +++ b/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir @@ -1027,7 +1027,7 @@ module attributes {transform.with_named_sequence} { legal_ops = ["func.func", "gpu.module", "gpu.yield"], illegal_dialects = ["gpu"], illegal_ops = ["llvm.copysign", "llvm.cos", "llvm.exp", "llvm.exp2", "llvm.fabs", "llvm.fceil", - "llvm.ffloor", "llvm.fma", "llvm.frem", "llvm.log", "llvm.log10", "llvm.log2", "llvm.pow", + "llvm.ffloor", "llvm.frem", "llvm.log", "llvm.log10", "llvm.log2", "llvm.pow", "llvm.roundeven", "llvm.round", "llvm.sin", "llvm.sqrt"], partial_conversion } : !transform.any_op diff --git a/mlir/test/Integration/GPU/CUDA/dump-ptx.mlir b/mlir/test/Integration/GPU/CUDA/dump-ptx.mlir index 0cc5d8645bb36..27ec1ec435fef 100644 --- a/mlir/test/Integration/GPU/CUDA/dump-ptx.mlir +++ b/mlir/test/Integration/GPU/CUDA/dump-ptx.mlir @@ -2,7 +2,7 @@ // RUN: | mlir-opt -gpu-lower-to-nvvm-pipeline -debug-only=serialize-to-isa \ // RUN: 2>&1 | FileCheck %s -// CHECK: Generated by LLVM NVPTX Back-End +// CHECK-LABEL: Generated by LLVM NVPTX Back-End // CHECK: .visible .func kernel_a() // CHECK: ret; gpu.module @bar { @@ -11,3 +11,15 @@ gpu.module @bar { llvm.return } } + +// CHECK-LABEL: Generated by LLVM NVPTX Back-End +// CHECK: .visible .func ({{.+}}) fma( +// CHECK: fma.rn.f32 + +gpu.module @foo { + llvm.func @fma(%arg0: f32, %arg1: f32) -> f32 + attributes { gpu.kernel } { + %res = llvm.intr.fma (%arg0, %arg1, %arg1) : (f32, f32, f32) -> f32 + llvm.return %res : f32 + } +}