@@ -334,7 +334,7 @@ internal class CoroutineScheduler(
334
334
globalCpuQueue.close()
335
335
// Finish processing tasks from globalQueue and/or from this worker's local queue
336
336
while (true ) {
337
- val task = currentWorker?.findTask()
337
+ val task = currentWorker?.findTask(true )
338
338
? : globalCpuQueue.removeFirstOrNull()
339
339
? : globalBlockingQueue.removeFirstOrNull()
340
340
? : break
@@ -469,10 +469,10 @@ internal class CoroutineScheduler(
469
469
*/
470
470
if (worker.state == = WorkerState .TERMINATED ) return task
471
471
// Do not add CPU tasks in local queue if we are not able to execute it
472
- // TODO discuss: maybe add it to the local queue and offload back in the global queue iff permit wasn't acquired?
473
472
if (task.mode == TaskMode .NON_BLOCKING && worker.isBlocking) {
474
473
return task
475
474
}
475
+ worker.mayHaveLocalTasks = true
476
476
return worker.localQueue.add(task, fair = fair)
477
477
}
478
478
@@ -658,42 +658,47 @@ internal class CoroutineScheduler(
658
658
}
659
659
660
660
override fun run () = runWorker()
661
+ @JvmField
662
+ var mayHaveLocalTasks = false
661
663
662
664
private fun runWorker () {
663
665
var rescanned = false
664
666
while (! isTerminated && state != WorkerState .TERMINATED ) {
665
- val task = findTask()
667
+ val task = findTask(mayHaveLocalTasks )
666
668
// Task found. Execute and repeat
667
669
if (task != null ) {
668
670
rescanned = false
669
671
minDelayUntilStealableTaskNs = 0L
670
672
executeTask(task)
671
673
continue
674
+ } else {
675
+ mayHaveLocalTasks = false
672
676
}
673
677
/*
674
678
* No tasks were found:
675
679
* 1) Either at least one of the workers has stealable task in its FIFO-buffer with a stealing deadline.
676
680
* Then its deadline is stored in [minDelayUntilStealableTask]
677
681
*
678
682
* Then just park for that duration (ditto re-scanning).
679
- * While it could potentially lead to short (up to WORK_STEALING_TIME_RESOLUTION_NS ns) starvations,
683
+ * While it could potentially lead to short (up to WORK_STEALING_TIME_RESOLUTION_NS ns) starvations,
680
684
* excess unparks and managing "one unpark per signalling" invariant become unfeasible, instead we are going to resolve
681
685
* it with "spinning via scans" mechanism.
682
686
* NB: this short potential parking does not interfere with `tryUnpark`
683
687
*/
684
688
if (minDelayUntilStealableTaskNs != 0L ) {
685
689
if (! rescanned) {
686
690
rescanned = true
687
- continue
688
691
} else {
692
+ rescanned = false
689
693
tryReleaseCpu(WorkerState .PARKING )
690
694
interrupted()
691
695
LockSupport .parkNanos(minDelayUntilStealableTaskNs)
692
696
minDelayUntilStealableTaskNs = 0L
693
697
}
698
+ continue
694
699
}
695
700
/*
696
- * 2) No tasks available, time to park and, potentially, shut down the thread.
701
+ * 2) Or no tasks available, time to park and, potentially, shut down the thread.
697
702
* Add itself to the stack of parked workers, re-scans all the queues
698
703
* to avoid missing wake-up (requestCpuWorker) and either starts executing discovered tasks or parks itself awaiting for new tasks.
699
704
*/
@@ -704,20 +709,24 @@ internal class CoroutineScheduler(
704
709
705
710
// Counterpart to "tryUnpark"
706
711
private fun tryPark () {
707
- parkingState.value = PARKING_ALLOWED
712
+ if (! inStack()) {
713
+ parkingState.value = PARKING_ALLOWED
714
+ }
708
715
if (parkedWorkersStackPush(this )) {
709
716
return
710
717
} else {
711
718
assert { localQueue.size == 0 }
712
- tryReleaseCpu(WorkerState .PARKING )
713
- interrupted() // Cleanup interruptions
714
- // Failed to get a parking permit, bailout
715
- if (! parkingState.compareAndSet(PARKING_ALLOWED , PARKED )) {
716
- return
717
- }
718
- while (inStack()) { // Prevent spurious wakeups
719
+ // Failed to get a parking permit => we are not in the stack
720
+ while (inStack()) {
719
721
if (isTerminated || state == WorkerState .TERMINATED ) break
720
- park()
722
+ if (parkingState.value != PARKED && ! parkingState.compareAndSet(PARKING_ALLOWED , PARKED )) {
723
+ return
724
+ }
725
+ tryReleaseCpu(WorkerState .PARKING )
726
+ interrupted() // Cleanup interruptions
727
+ if (inStack()) {
728
+ park()
729
+ }
721
730
}
722
731
}
723
732
}
@@ -848,22 +857,30 @@ internal class CoroutineScheduler(
848
857
}
849
858
}
850
859
851
- fun findTask (): Task ? {
852
- if (tryAcquireCpuPermit()) return findAnyTask()
860
+ fun findTask (scanLocalQueue : Boolean ): Task ? {
861
+ if (tryAcquireCpuPermit()) return findAnyTask(scanLocalQueue )
853
862
// If we can't acquire a CPU permit -- attempt to find blocking task
854
- val task = localQueue.poll() ? : globalBlockingQueue.removeFirstOrNull()
863
+ val task = if (scanLocalQueue) {
864
+ localQueue.poll() ? : globalBlockingQueue.removeFirstOrNull()
865
+ } else {
866
+ globalBlockingQueue.removeFirstOrNull()
867
+ }
855
868
return task ? : trySteal(blockingOnly = true )
856
869
}
857
870
858
- private fun findAnyTask (): Task ? {
871
+ private fun findAnyTask (scanLocalQueue : Boolean ): Task ? {
859
872
/*
860
873
* Anti-starvation mechanism: probabilistically poll either local
861
874
* or global queue to ensure progress for both external and internal tasks.
862
875
*/
863
- val globalFirst = nextInt(2 * corePoolSize) == 0
864
- if (globalFirst) pollGlobalQueues()?.let { return it }
865
- localQueue.poll()?.let { return it }
866
- if (! globalFirst) pollGlobalQueues()?.let { return it }
876
+ if (scanLocalQueue) {
877
+ val globalFirst = nextInt(2 * corePoolSize) == 0
878
+ if (globalFirst) pollGlobalQueues()?.let { return it }
879
+ localQueue.poll()?.let { return it }
880
+ if (! globalFirst) pollGlobalQueues()?.let { return it }
881
+ } else {
882
+ pollGlobalQueues()?.let { return it }
883
+ }
867
884
return trySteal(blockingOnly = false )
868
885
}
869
886
@@ -887,7 +904,7 @@ internal class CoroutineScheduler(
887
904
888
905
var currentIndex = nextInt(created)
889
906
var minDelay = Long .MAX_VALUE
890
- repeat(created ) {
907
+ repeat(workers.length() ) {
891
908
++ currentIndex
892
909
if (currentIndex > created) currentIndex = 1
893
910
val worker = workers[currentIndex]
0 commit comments