|
| 1 | +; RUN: opt < %s -enable-coroutines -O2 -S | FileCheck --check-prefixes=CHECK %s |
| 2 | +; RUN: opt < %s -enable-coroutines -passes='default<O2>' -S | FileCheck --check-prefixes=CHECK %s |
| 3 | + |
| 4 | +target datalayout = "p:64:64:64" |
| 5 | + |
| 6 | +%async.task = type { i64 } |
| 7 | +%async.actor = type { i64 } |
| 8 | +%async.fp = type <{ i32, i32 }> |
| 9 | + |
| 10 | +%async.ctxt = type { i8*, void (i8*, %async.task*, %async.actor*)* } |
| 11 | + |
| 12 | +; The async callee. |
| 13 | +@my_other_async_function_fp = external global <{ i32, i32 }> |
| 14 | +declare void @my_other_async_function(i8* %async.ctxt) |
| 15 | + |
| 16 | +; Function that implements the dispatch to the callee function. |
| 17 | +define swiftcc void @my_async_function.my_other_async_function_fp.apply(i8* %fnPtr, i8* %async.ctxt, %async.task* %task, %async.actor* %actor) { |
| 18 | + %callee = bitcast i8* %fnPtr to void(i8*, %async.task*, %async.actor*)* |
| 19 | + tail call swiftcc void %callee(i8* %async.ctxt, %async.task* %task, %async.actor* %actor) |
| 20 | + ret void |
| 21 | +} |
| 22 | + |
| 23 | +declare void @some_user(i64) |
| 24 | +declare void @some_may_write(i64*) |
| 25 | + |
| 26 | +define i8* @resume_context_projection(i8* %ctxt) { |
| 27 | +entry: |
| 28 | + %resume_ctxt_addr = bitcast i8* %ctxt to i8** |
| 29 | + %resume_ctxt = load i8*, i8** %resume_ctxt_addr, align 8 |
| 30 | + ret i8* %resume_ctxt |
| 31 | +} |
| 32 | + |
| 33 | + |
| 34 | +@unreachable_fp = constant <{ i32, i32 }> |
| 35 | + <{ i32 trunc ( ; Relative pointer to async function |
| 36 | + i64 sub ( |
| 37 | + i64 ptrtoint (void (i8*, %async.task*, %async.actor*)* @unreachable to i64), |
| 38 | + i64 ptrtoint (i32* getelementptr inbounds (<{ i32, i32 }>, <{ i32, i32 }>* @unreachable_fp, i32 0, i32 1) to i64) |
| 39 | + ) |
| 40 | + to i32), |
| 41 | + i32 128 ; Initial async context size without space for frame |
| 42 | +}> |
| 43 | + |
| 44 | +define swiftcc void @unreachable(i8* %async.ctxt, %async.task* %task, %async.actor* %actor) { |
| 45 | +entry: |
| 46 | + %tmp = alloca { i64, i64 }, align 8 |
| 47 | + %proj.1 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %tmp, i64 0, i32 0 |
| 48 | + %proj.2 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %tmp, i64 0, i32 1 |
| 49 | + |
| 50 | + %id = call token @llvm.coro.id.async(i32 128, i32 16, i32 0, |
| 51 | + i8* bitcast (<{i32, i32}>* @unreachable_fp to i8*)) |
| 52 | + %hdl = call i8* @llvm.coro.begin(token %id, i8* null) |
| 53 | + store i64 0, i64* %proj.1, align 8 |
| 54 | + store i64 1, i64* %proj.2, align 8 |
| 55 | + call void @some_may_write(i64* %proj.1) |
| 56 | + |
| 57 | + ; Begin lowering: apply %my_other_async_function(%args...) |
| 58 | + ; setup callee context |
| 59 | + %arg0 = bitcast %async.task* %task to i8* |
| 60 | + %arg1 = bitcast <{ i32, i32}>* @my_other_async_function_fp to i8* |
| 61 | + %callee_context = call i8* @llvm.coro.async.context.alloc(i8* %arg0, i8* %arg1) |
| 62 | + %callee_context.0 = bitcast i8* %callee_context to %async.ctxt* |
| 63 | + ; store the return continuation |
| 64 | + %callee_context.return_to_caller.addr = getelementptr inbounds %async.ctxt, %async.ctxt* %callee_context.0, i32 0, i32 1 |
| 65 | + %return_to_caller.addr = bitcast void(i8*, %async.task*, %async.actor*)** %callee_context.return_to_caller.addr to i8** |
| 66 | + %resume.func_ptr = call i8* @llvm.coro.async.resume() |
| 67 | + store i8* %resume.func_ptr, i8** %return_to_caller.addr |
| 68 | + |
| 69 | + ; store caller context into callee context |
| 70 | + %callee_context.caller_context.addr = getelementptr inbounds %async.ctxt, %async.ctxt* %callee_context.0, i32 0, i32 0 |
| 71 | + store i8* %async.ctxt, i8** %callee_context.caller_context.addr |
| 72 | + %resume_proj_fun = bitcast i8*(i8*)* @resume_context_projection to i8* |
| 73 | + %callee = bitcast void(i8*, %async.task*, %async.actor*)* @asyncSuspend to i8* |
| 74 | + %res = call {i8*, i8*, i8*} (i8*, i8*, ...) @llvm.coro.suspend.async( |
| 75 | + i8* %resume.func_ptr, |
| 76 | + i8* %resume_proj_fun, |
| 77 | + void (i8*, i8*, %async.task*, %async.actor*)* @my_async_function.my_other_async_function_fp.apply, |
| 78 | + i8* %callee, i8* %callee_context, %async.task* %task, %async.actor *%actor) |
| 79 | + |
| 80 | + call void @llvm.coro.async.context.dealloc(i8* %callee_context) |
| 81 | + %continuation_task_arg = extractvalue {i8*, i8*, i8*} %res, 1 |
| 82 | + %task.2 = bitcast i8* %continuation_task_arg to %async.task* |
| 83 | + %val = load i64, i64* %proj.1 |
| 84 | + call void @some_user(i64 %val) |
| 85 | + %val.2 = load i64, i64* %proj.2 |
| 86 | + call void @some_user(i64 %val.2) |
| 87 | + unreachable |
| 88 | +} |
| 89 | + |
| 90 | +; CHECK: define swiftcc void @unreachable |
| 91 | +; CHECK-NOT: @llvm.coro.suspend.async |
| 92 | +; CHECK: return |
| 93 | + |
| 94 | +; CHECK: define internal swiftcc void @unreachable.resume.0 |
| 95 | +; CHECK: unreachable |
| 96 | + |
| 97 | +declare i8* @llvm.coro.prepare.async(i8*) |
| 98 | +declare token @llvm.coro.id.async(i32, i32, i32, i8*) |
| 99 | +declare i8* @llvm.coro.begin(token, i8*) |
| 100 | +declare {i8*, i8*, i8*} @llvm.coro.suspend.async(i8*, i8*, ...) |
| 101 | +declare i8* @llvm.coro.async.context.alloc(i8*, i8*) |
| 102 | +declare void @llvm.coro.async.context.dealloc(i8*) |
| 103 | +declare swiftcc void @asyncReturn(i8*, %async.task*, %async.actor*) |
| 104 | +declare swiftcc void @asyncSuspend(i8*, %async.task*, %async.actor*) |
| 105 | +declare i8* @llvm.coro.async.resume() |
0 commit comments