diff --git a/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp b/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp index 61b73f546b5da..159a6afd4a917 100644 --- a/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp +++ b/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp @@ -540,6 +540,8 @@ void mlir::populateLibDeviceConversionPatterns( "__nv_cosh"); populateOpPatterns(converter, patterns, benefit, "__nv_erff", "__nv_erf"); + populateOpPatterns(converter, patterns, benefit, "__nv_erfcf", + "__nv_erfc"); populateOpPatterns(converter, patterns, benefit, "__nv_expf", "__nv_exp", "__nv_fast_expf"); populateOpPatterns(converter, patterns, benefit, "__nv_exp2f", diff --git a/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir b/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir index 7b5b11ec02724..14594cd6badb1 100644 --- a/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir +++ b/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir @@ -1095,3 +1095,16 @@ gpu.module @test_module_54 { return %0, %1, %2, %3, %4, %5 : i1, i1, i1, i1, i1, i1 } } + +gpu.module @test_module_55 { + // CHECK: llvm.func @__nv_erfcf(f32) -> f32 + // CHECK: llvm.func @__nv_erfc(f64) -> f64 + // CHECK-LABEL: func @gpu_erf + func.func @gpu_erfc(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { + %result32 = math.erfc %arg_f32 : f32 + // CHECK: llvm.call @__nv_erfcf(%{{.*}}) : (f32) -> f32 + %result64 = math.erfc %arg_f64 : f64 + // CHECK: llvm.call @__nv_erfc(%{{.*}}) : (f64) -> f64 + func.return %result32, %result64 : f32, f64 + } +}