|
4 | 4 |
|
5 | 5 | package kotlinx.coroutines
|
6 | 6 |
|
| 7 | +import kotlinx.atomicfu.* |
7 | 8 | import kotlinx.coroutines.channels.*
|
8 | 9 | import kotlinx.coroutines.internal.*
|
9 | 10 | import kotlin.coroutines.*
|
@@ -73,38 +74,75 @@ private class MultiWorkerDispatcher(
|
73 | 74 | workersCount: Int
|
74 | 75 | ) : CloseableCoroutineDispatcher() {
|
75 | 76 | private val tasksQueue = Channel<Runnable>(Channel.UNLIMITED)
|
| 77 | + private val availableWorkers = Channel<Channel<Runnable>>(Channel.UNLIMITED) |
76 | 78 | private val workerPool = OnDemandAllocatingPool(workersCount) {
|
77 | 79 | Worker.start(name = "$name-$it").apply {
|
78 | 80 | executeAfter { workerRunLoop() }
|
79 | 81 | }
|
80 | 82 | }
|
81 | 83 |
|
| 84 | + /** |
| 85 | + * (number of tasks - number of workers) * 2 + (1 if closed) |
| 86 | + */ |
| 87 | + private val tasksAndWorkersCounter = atomic(0L) |
| 88 | + |
| 89 | + private inline fun Long.isClosed() = this and 1L == 1L |
| 90 | + private inline fun Long.hasTasks() = this >= 2 |
| 91 | + private inline fun Long.hasWorkers() = this < 0 |
| 92 | + |
82 | 93 | private fun workerRunLoop() = runBlocking {
|
83 |
| - // NB: we leverage tail-call optimization in this loop, do not replace it with |
84 |
| - // .receive() without proper evaluation |
85 |
| - for (task in tasksQueue) { |
86 |
| - /** |
87 |
| - * Any unhandled exception here will pass through worker's boundary and will be properly reported. |
88 |
| - */ |
89 |
| - task.run() |
| 94 | + val privateChannel = Channel<Runnable>(1) |
| 95 | + while (true) { |
| 96 | + val state = tasksAndWorkersCounter.getAndUpdate { |
| 97 | + if (it.isClosed() && !it.hasTasks()) return@runBlocking |
| 98 | + it - 2 |
| 99 | + } |
| 100 | + if (state.hasTasks()) { |
| 101 | + // we promised to process a task, and there are some |
| 102 | + tasksQueue.receive().run() |
| 103 | + } else { |
| 104 | + availableWorkers.send(privateChannel) |
| 105 | + val task = privateChannel.receiveCatching().getOrNull()?.run() |
| 106 | + } |
90 | 107 | }
|
91 | 108 | }
|
92 | 109 |
|
93 |
| - override fun dispatch(context: CoroutineContext, block: Runnable) { |
94 |
| - fun throwClosed(block: Runnable) { |
95 |
| - throw IllegalStateException("Dispatcher $name was closed, attempted to schedule: $block") |
| 110 | + private fun obtainWorker(): Channel<Runnable> { |
| 111 | + // spin loop until a worker that promised to be here actually arrives. |
| 112 | + while (true) { |
| 113 | + val result = availableWorkers.tryReceive() |
| 114 | + return result.getOrNull() ?: continue |
96 | 115 | }
|
| 116 | + } |
97 | 117 |
|
98 |
| - if (!workerPool.allocate()) throwClosed(block) // Do not even try to send to avoid race |
99 |
| - |
100 |
| - tasksQueue.trySend(block).onClosed { |
101 |
| - throwClosed(block) |
| 118 | + override fun dispatch(context: CoroutineContext, block: Runnable) { |
| 119 | + val state = tasksAndWorkersCounter.getAndUpdate { |
| 120 | + if (it.isClosed()) |
| 121 | + throw IllegalStateException("Dispatcher $name was closed, attempted to schedule: $block") |
| 122 | + it + 2 |
| 123 | + } |
| 124 | + if (state.hasWorkers()) { |
| 125 | + // there are workers that have nothing to do, let's grab one of them |
| 126 | + obtainWorker().trySend(block) |
| 127 | + } else { |
| 128 | + workerPool.allocate() |
| 129 | + // no workers are available, we must queue the task |
| 130 | + tasksQueue.trySend(block) |
102 | 131 | }
|
103 | 132 | }
|
104 | 133 |
|
105 | 134 | override fun close() {
|
106 |
| - val workers = workerPool.close() |
107 |
| - tasksQueue.close() |
| 135 | + tasksAndWorkersCounter.getAndUpdate { if (it.isClosed()) it else it or 1L } |
| 136 | + val workers = workerPool.close() // no new workers will be created |
| 137 | + loop@while (true) { |
| 138 | + // check if there are workers that await tasks in their personal channels, we need to wake them up |
| 139 | + val state = tasksAndWorkersCounter.getAndUpdate { |
| 140 | + if (it.hasWorkers()) it + 2 else it |
| 141 | + } |
| 142 | + if (!state.hasWorkers()) |
| 143 | + break |
| 144 | + obtainWorker().close() |
| 145 | + } |
108 | 146 | /*
|
109 | 147 | * Here we cannot avoid waiting on `.result`, otherwise it will lead
|
110 | 148 | * to a native memory leak, including a pthread handle.
|
|
0 commit comments