-
Notifications
You must be signed in to change notification settings - Fork 15.3k
[flang][cuda] Add cuf.register_kernel operation #112268
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
The operation will be used in the CUF constructor to register the kernel functions. This allow to delay this until codegen when the gpu.binary will be available.
|
@llvm/pr-subscribers-flang-fir-hlfir Author: Valentin Clement (バレンタイン クレメン) (clementval) ChangesThe operation will be used in the CUF constructor to register the kernel functions. This allow to delay this until codegen when the gpu.binary will be available. Full diff: https://github.com/llvm/llvm-project/pull/112268.diff 5 Files Affected:
diff --git a/flang/include/flang/Optimizer/Dialect/CUF/CUFOps.td b/flang/include/flang/Optimizer/Dialect/CUF/CUFOps.td
index f643674f1d5d6b..98d1ef529738c7 100644
--- a/flang/include/flang/Optimizer/Dialect/CUF/CUFOps.td
+++ b/flang/include/flang/Optimizer/Dialect/CUF/CUFOps.td
@@ -288,4 +288,23 @@ def cuf_KernelOp : cuf_Op<"kernel", [AttrSizedOperandSegments,
let hasVerifier = 1;
}
+def cuf_RegisterKernelOp : cuf_Op<"register_kernel", []> {
+ let summary = "Register a CUDA kernel";
+
+ let arguments = (ins
+ SymbolRefAttr:$name
+ );
+
+ let assemblyFormat = [{
+ $name attr-dict
+ }];
+
+ let hasVerifier = 1;
+
+ let extraClassDeclaration = [{
+ mlir::StringAttr getKernelName();
+ mlir::StringAttr getKernelModuleName();
+ }];
+}
+
#endif // FORTRAN_DIALECT_CUF_CUF_OPS
diff --git a/flang/lib/Optimizer/Dialect/CUF/CUFOps.cpp b/flang/lib/Optimizer/Dialect/CUF/CUFOps.cpp
index 7fb2dcf4af115c..9e3bbd1f9cbee9 100644
--- a/flang/lib/Optimizer/Dialect/CUF/CUFOps.cpp
+++ b/flang/lib/Optimizer/Dialect/CUF/CUFOps.cpp
@@ -15,6 +15,7 @@
#include "flang/Optimizer/Dialect/CUF/CUFDialect.h"
#include "flang/Optimizer/Dialect/FIRAttr.h"
#include "flang/Optimizer/Dialect/FIRType.h"
+#include "mlir/Dialect/GPU/IR/GPUDialect.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
@@ -253,6 +254,42 @@ llvm::LogicalResult cuf::KernelOp::verify() {
return mlir::success();
}
+//===----------------------------------------------------------------------===//
+// RegisterKernelOp
+//===----------------------------------------------------------------------===//
+
+mlir::StringAttr cuf::RegisterKernelOp::getKernelModuleName() {
+ return getName().getRootReference();
+}
+
+mlir::StringAttr cuf::RegisterKernelOp::getKernelName() {
+ return getName().getLeafReference();
+}
+
+mlir::LogicalResult cuf::RegisterKernelOp::verify() {
+ if (getKernelName() == getKernelModuleName())
+ return emitOpError("expect a module and a kernel name");
+
+ auto mod = getOperation()->getParentOfType<mlir::ModuleOp>();
+ if (!mod)
+ return emitOpError("expect to be in a module");
+
+ mlir::SymbolTable symTab(mod);
+ auto gpuMod = symTab.lookup<mlir::gpu::GPUModuleOp>(getKernelModuleName());
+ if (!gpuMod)
+ return emitOpError("gpu module not found");
+
+ mlir::SymbolTable gpuSymTab(gpuMod);
+ auto func = gpuSymTab.lookup<mlir::gpu::GPUFuncOp>(getKernelName());
+ if (!func)
+ return emitOpError("device function not found");
+
+ if (!func.isKernel())
+ return emitOpError("only kernel gpu.func can be registered");
+
+ return mlir::success();
+}
+
// Tablegen operators
#define GET_OP_CLASSES
diff --git a/flang/test/Fir/CUDA/cuda-register-func.fir b/flang/test/Fir/CUDA/cuda-register-func.fir
new file mode 100644
index 00000000000000..a428f68eb3bf42
--- /dev/null
+++ b/flang/test/Fir/CUDA/cuda-register-func.fir
@@ -0,0 +1,20 @@
+// RUN: fir-opt %s | FileCheck %s
+
+module attributes {gpu.container_module} {
+ gpu.module @cuda_device_mod {
+ gpu.func @_QPsub_device1() kernel {
+ gpu.return
+ }
+ gpu.func @_QPsub_device2(%arg0: !fir.ref<f32>) kernel {
+ gpu.return
+ }
+ }
+ llvm.func internal @__cudaFortranConstructor() {
+ cuf.register_kernel @cuda_device_mod::@_QPsub_device1
+ cuf.register_kernel @cuda_device_mod::@_QPsub_device2
+ llvm.return
+ }
+}
+
+// CHECK: cuf.register_kernel @cuda_device_mod::@_QPsub_device1
+// CHECK: cuf.register_kernel @cuda_device_mod::@_QPsub_device2
diff --git a/flang/test/Fir/cuf-invalid.fir b/flang/test/Fir/cuf-invalid.fir
index e9aeaa281e2a85..a5747b8ee4a3b3 100644
--- a/flang/test/Fir/cuf-invalid.fir
+++ b/flang/test/Fir/cuf-invalid.fir
@@ -125,3 +125,53 @@ func.func @_QPsub1(%arg0: !fir.ref<!fir.array<?xf32>> {cuf.data_attr = #cuf.cuda
cuf.data_transfer %20#0 to %11#0, %19 : !fir.shape<1> {transfer_kind = #cuf.cuda_transfer<host_device>} : !fir.box<!fir.array<?xf32>>, !fir.box<!fir.array<?xf32>>
return
}
+
+// -----
+
+module attributes {gpu.container_module} {
+ gpu.module @cuda_device_mod {
+ gpu.func @_QPsub_device1() {
+ gpu.return
+ }
+ }
+ llvm.func internal @__cudaFortranConstructor() {
+ // expected-error@+1{{'cuf.register_kernel' op only kernel gpu.func can be registered}}
+ cuf.register_kernel @cuda_device_mod::@_QPsub_device1
+ llvm.return
+ }
+}
+
+// -----
+
+module attributes {gpu.container_module} {
+ gpu.module @cuda_device_mod {
+ gpu.func @_QPsub_device1() {
+ gpu.return
+ }
+ }
+ llvm.func internal @__cudaFortranConstructor() {
+ // expected-error@+1{{'cuf.register_kernel' op device function not found}}
+ cuf.register_kernel @cuda_device_mod::@_QPsub_device2
+ llvm.return
+ }
+}
+
+// -----
+
+module attributes {gpu.container_module} {
+ llvm.func internal @__cudaFortranConstructor() {
+ // expected-error@+1{{'cuf.register_kernel' op gpu module not found}}
+ cuf.register_kernel @cuda_device_mod::@_QPsub_device1
+ llvm.return
+ }
+}
+
+// -----
+
+module attributes {gpu.container_module} {
+ llvm.func internal @__cudaFortranConstructor() {
+ // expected-error@+1{{'cuf.register_kernel' op expect a module and a kernel name}}
+ cuf.register_kernel @_QPsub_device1
+ llvm.return
+ }
+}
diff --git a/flang/tools/fir-opt/fir-opt.cpp b/flang/tools/fir-opt/fir-opt.cpp
index f75fba27c68f08..84a74770cf0303 100644
--- a/flang/tools/fir-opt/fir-opt.cpp
+++ b/flang/tools/fir-opt/fir-opt.cpp
@@ -42,6 +42,7 @@ int main(int argc, char **argv) {
#endif
DialectRegistry registry;
fir::support::registerDialects(registry);
+ registry.insert<mlir::gpu::GPUDialect>();
fir::support::addFIRExtensions(registry);
return failed(MlirOptMain(argc, argv, "FIR modular optimizer driver\n",
registry));
|
Renaud-K
left a comment
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM!
|
LLVM Buildbot has detected a new failure on builder Full details are available at: https://lab.llvm.org/buildbot/#/builders/89/builds/8418 Here is the relevant piece of the build log for the reference |
This reverts commit cbe76a2.
|
LLVM Buildbot has detected a new failure on builder Full details are available at: https://lab.llvm.org/buildbot/#/builders/130/builds/4881 Here is the relevant piece of the build log for the reference |
|
LLVM Buildbot has detected a new failure on builder Full details are available at: https://lab.llvm.org/buildbot/#/builders/80/builds/4930 Here is the relevant piece of the build log for the reference |
The operation will be used in the CUF constructor to register the kernel functions. This allow to delay this until codegen when the gpu.binary will be available. Reland of #112268 with correct shared library build support.
The operation will be used in the CUF constructor to register the kernel functions. This allow to delay this until codegen when the gpu.binary will be available.
The operation will be used in the CUF constructor to register the kernel functions. This allow to delay this until codegen when the gpu.binary will be available. Reland of llvm#112268 with correct shared library build support.
The operation will be used in the CUF constructor to register the kernel functions. This allow to delay this until codegen when the gpu.binary will be available.