|
1 | 1 | package kotlinx.coroutines.experimental.scheduling
|
2 | 2 |
|
| 3 | +import kotlinx.atomicfu.* |
3 | 4 | import kotlinx.coroutines.experimental.*
|
4 |
| -import java.io.Closeable |
5 |
| -import java.util.concurrent.TimeUnit |
6 |
| -import kotlin.coroutines.experimental.CoroutineContext |
| 5 | +import java.io.* |
| 6 | +import java.util.concurrent.* |
| 7 | +import kotlin.coroutines.experimental.* |
7 | 8 |
|
| 9 | +class ExperimentalCoroutineDispatcher(corePoolSize: Int = Runtime.getRuntime().availableProcessors(), maxPoolSize: Int = MAX_POOL_SIZE) : CoroutineDispatcher(), Delay, Closeable { |
8 | 10 |
|
9 |
| -class ExperimentalCoroutineDispatcher(threads: Int = Runtime.getRuntime().availableProcessors()) : CoroutineDispatcher(), Delay, Closeable { |
| 11 | + private val coroutineScheduler = CoroutineScheduler(corePoolSize, maxPoolSize) |
10 | 12 |
|
11 |
| - private val coroutineScheduler = CoroutineScheduler(threads) |
| 13 | + /** |
| 14 | + * TODO: yield doesn't work as expected |
| 15 | + */ |
| 16 | + override fun dispatch(context: CoroutineContext, block: Runnable): Unit = coroutineScheduler.dispatch(block) |
12 | 17 |
|
13 |
| - override fun dispatch(context: CoroutineContext, block: Runnable) { |
14 |
| - coroutineScheduler.dispatch(block) |
15 |
| - } |
16 |
| - |
17 |
| - override fun scheduleResumeAfterDelay(time: Long, unit: TimeUnit, continuation: CancellableContinuation<Unit>) = |
| 18 | + override fun scheduleResumeAfterDelay(time: Long, unit: TimeUnit, continuation: CancellableContinuation<Unit>): Unit = |
18 | 19 | DefaultExecutor.scheduleResumeAfterDelay(time, unit, continuation)
|
19 | 20 |
|
20 | 21 | override fun close() = coroutineScheduler.close()
|
| 22 | + |
21 | 23 | override fun toString(): String {
|
22 | 24 | return "${super.toString()}[scheduler = $coroutineScheduler]"
|
23 | 25 | }
|
24 | 26 |
|
| 27 | + /** |
| 28 | + * Creates new coroutine execution context with limited parallelism to execute tasks which may potentially block. |
| 29 | + * Resulting [CoroutineDispatcher] doesn't own any resources (its threads) and piggybacks on the original [ExperimentalCoroutineDispatcher], |
| 30 | + * executing tasks in this context, giving original dispatcher hint to adjust its behaviour. |
| 31 | + * |
| 32 | + * @param parallelism parallelism level, indicating how many threads can execute tasks in given context in parallel. |
| 33 | + */ |
| 34 | + fun blocking(parallelism: Int = BLOCKING_DEFAULT_PARALLELISM): CoroutineDispatcher { |
| 35 | + require(parallelism > 0) { "Expected positive parallelism level, but have $parallelism" } |
| 36 | + return LimitingBlockingDispatcher(parallelism, TaskMode.PROBABLY_BLOCKING, this) |
| 37 | + } |
| 38 | + |
| 39 | + internal fun dispatchBlocking(block: Runnable, context: TaskMode, fair: Boolean): Unit = coroutineScheduler.dispatch(block, context, fair) |
| 40 | +} |
| 41 | + |
| 42 | +private class LimitingBlockingDispatcher(val parallelism: Int, val taskContext: TaskMode, val dispatcher: ExperimentalCoroutineDispatcher) : CoroutineDispatcher(), Delay { |
| 43 | + |
| 44 | + private val queue = ConcurrentLinkedQueue<Runnable>() |
| 45 | + private val inFlightTasks = atomic(0) |
| 46 | + |
| 47 | + override fun dispatch(context: CoroutineContext, block: Runnable) = dispatch(block, false) |
| 48 | + |
| 49 | + private fun dispatch(block: Runnable, fair: Boolean) { |
| 50 | + var taskToSchedule = wrap(block) |
| 51 | + while (true) { |
| 52 | + // Commit in-flight tasks slot |
| 53 | + val inFlight = inFlightTasks.incrementAndGet() |
| 54 | + |
| 55 | + // Fast path, if parallelism limit is not reached, dispatch task and return |
| 56 | + if (inFlight <= parallelism) { |
| 57 | + dispatcher.dispatchBlocking(taskToSchedule, taskContext, fair) |
| 58 | + return |
| 59 | + } |
| 60 | + |
| 61 | + // Parallelism limit is reached, add task to the queue |
| 62 | + queue.add(taskToSchedule) |
| 63 | + |
| 64 | + /* |
| 65 | + * We're not actually scheduled anything, so rollback committed in-flight task slot: |
| 66 | + * If the amount of in-flight tasks is still above the limit, do nothing |
| 67 | + * If the amount of in-flight tasks is lesser than parallelism, then |
| 68 | + * it's a race with a thread which finished the task from the current context, we should resubmit the first task from the queue |
| 69 | + * to avoid starvation. |
| 70 | + * |
| 71 | + * Race example #1 (TN is N-th thread, R is current in-flight tasks number), execution is sequential: |
| 72 | + * |
| 73 | + * T1: submit task, start execution, R == 1 |
| 74 | + * T2: commit slot for next task, R == 2 |
| 75 | + * T1: finish T1, R == 1 |
| 76 | + * T2: submit next task to local queue, decrement R, R == 0 |
| 77 | + * Without retries, task from T2 will be stuck in the local queue |
| 78 | + */ |
| 79 | + if (inFlightTasks.decrementAndGet() >= parallelism) { |
| 80 | + return |
| 81 | + } |
| 82 | + |
| 83 | + taskToSchedule = queue.poll() ?: return |
| 84 | + } |
| 85 | + } |
| 86 | + |
| 87 | + override fun toString(): String { |
| 88 | + return "${super.toString()}[dispatcher = $dispatcher]" |
| 89 | + } |
| 90 | + |
| 91 | + private fun wrap(block: Runnable): Runnable { |
| 92 | + return block as? WrappedTask ?: WrappedTask(block) |
| 93 | + } |
| 94 | + |
| 95 | + /** |
| 96 | + * Tries to dispatch tasks which were blocked due to reaching parallelism limit if there is any. |
| 97 | + * |
| 98 | + * Implementation note: blocking tasks are scheduled in a fair manner (to local queue tail) to avoid |
| 99 | + * non-blocking continuations starvation. |
| 100 | + * E.g. for |
| 101 | + * ``` |
| 102 | + * foo() |
| 103 | + * blocking() |
| 104 | + * bar() |
| 105 | + * ``` |
| 106 | + * it's more profitable to execute bar at the end of `blocking` rather than pending blocking task |
| 107 | + */ |
| 108 | + private fun afterTask() { |
| 109 | + var next = queue.poll() |
| 110 | + // If we have pending tasks in current blocking context, dispatch first |
| 111 | + if (next != null) { |
| 112 | + dispatcher.dispatchBlocking(next, taskContext, true) |
| 113 | + return |
| 114 | + } |
| 115 | + inFlightTasks.decrementAndGet() |
| 116 | + |
| 117 | + /* |
| 118 | + * Re-poll again and try to submit task if it's required otherwise tasks may be stuck in the local queue. |
| 119 | + * Race example #2 (TN is N-th thread, R is current in-flight tasks number), execution is sequential: |
| 120 | + * T1: submit task, start execution, R == 1 |
| 121 | + * T2: commit slot for next task, R == 2 |
| 122 | + * T1: finish T1, poll queue (it's still empty), R == 2 |
| 123 | + * T2: submit next task to the local queue, decrement R, R == 1 |
| 124 | + * T1: decrement R, finish. R == 0 |
| 125 | + * |
| 126 | + * The task from T2 is stuck is the local queue |
| 127 | + */ |
| 128 | + next = queue.poll() ?: return |
| 129 | + dispatch(next, true) |
| 130 | + } |
| 131 | + |
| 132 | + private inner class WrappedTask(val runnable: Runnable) : Runnable { |
| 133 | + override fun run() { |
| 134 | + try { |
| 135 | + runnable.run() |
| 136 | + } finally { |
| 137 | + afterTask() |
| 138 | + } |
| 139 | + } |
| 140 | + } |
| 141 | + |
| 142 | + override fun scheduleResumeAfterDelay(time: Long, unit: TimeUnit, continuation: CancellableContinuation<Unit>) = dispatcher.scheduleResumeAfterDelay(time, unit, continuation) |
25 | 143 | }
|
0 commit comments