Skip to content

Commit 8f0eed4

Browse files
eberman-quicIngo Molnar
authored andcommitted
freezer,sched: Use saved_state to reduce some spurious wakeups
After commit f5d39b0 ("freezer,sched: Rewrite core freezer logic"), tasks that transition directly from TASK_FREEZABLE to TASK_FROZEN are always woken up on the thaw path. Prior to that commit, tasks could ask freezer to consider them "frozen enough" via freezer_do_not_count(). The commit replaced freezer_do_not_count() with a TASK_FREEZABLE state which allows freezer to immediately mark the task as TASK_FROZEN without waking up the task. This is efficient for the suspend path, but on the thaw path, the task is always woken up even if the task didn't need to wake up and goes back to its TASK_(UN)INTERRUPTIBLE state. Although these tasks are capable of handling of the wakeup, we can observe a power/perf impact from the extra wakeup. We observed on Android many tasks wait in the TASK_FREEZABLE state (particularly due to many of them being binder clients). We observed nearly 4x the number of tasks and a corresponding linear increase in latency and power consumption when thawing the system. The latency increased from ~15ms to ~50ms. Avoid the spurious wakeups by saving the state of TASK_FREEZABLE tasks. If the task was running before entering TASK_FROZEN state (__refrigerator()) or if the task received a wake up for the saved state, then the task is woken on thaw. saved_state from PREEMPT_RT locks can be re-used because freezer would not stomp on the rtlock wait flow: TASK_RTLOCK_WAIT isn't considered freezable. Reported-by: Prakash Viswalingam <[email protected]> Signed-off-by: Elliot Berman <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Signed-off-by: Ingo Molnar <[email protected]>
1 parent fbaa6a1 commit 8f0eed4

File tree

2 files changed

+33
-31
lines changed

2 files changed

+33
-31
lines changed

kernel/freezer.c

Lines changed: 19 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,11 @@ bool __refrigerator(bool check_kthr_stop)
7171
for (;;) {
7272
bool freeze;
7373

74+
raw_spin_lock_irq(&current->pi_lock);
7475
set_current_state(TASK_FROZEN);
76+
/* unstale saved_state so that __thaw_task() will wake us up */
77+
current->saved_state = TASK_RUNNING;
78+
raw_spin_unlock_irq(&current->pi_lock);
7579

7680
spin_lock_irq(&freezer_lock);
7781
freeze = freezing(current) && !(check_kthr_stop && kthread_should_stop());
@@ -129,6 +133,7 @@ static int __set_task_frozen(struct task_struct *p, void *arg)
129133
WARN_ON_ONCE(debug_locks && p->lockdep_depth);
130134
#endif
131135

136+
p->saved_state = p->__state;
132137
WRITE_ONCE(p->__state, TASK_FROZEN);
133138
return TASK_FROZEN;
134139
}
@@ -170,42 +175,34 @@ bool freeze_task(struct task_struct *p)
170175
}
171176

172177
/*
173-
* The special task states (TASK_STOPPED, TASK_TRACED) keep their canonical
174-
* state in p->jobctl. If either of them got a wakeup that was missed because
175-
* TASK_FROZEN, then their canonical state reflects that and the below will
176-
* refuse to restore the special state and instead issue the wakeup.
178+
* Restore the saved_state before the task entered freezer. For typical task
179+
* in the __refrigerator(), saved_state == TASK_RUNNING so nothing happens
180+
* here. For tasks which were TASK_NORMAL | TASK_FREEZABLE, their initial state
181+
* is restored unless they got an expected wakeup (see ttwu_state_match()).
182+
* Returns 1 if the task state was restored.
177183
*/
178-
static int __set_task_special(struct task_struct *p, void *arg)
184+
static int __restore_freezer_state(struct task_struct *p, void *arg)
179185
{
180-
unsigned int state = 0;
186+
unsigned int state = p->saved_state;
181187

182-
if (p->jobctl & JOBCTL_TRACED)
183-
state = TASK_TRACED;
184-
185-
else if (p->jobctl & JOBCTL_STOPPED)
186-
state = TASK_STOPPED;
187-
188-
if (state)
188+
if (state != TASK_RUNNING) {
189189
WRITE_ONCE(p->__state, state);
190+
return 1;
191+
}
190192

191-
return state;
193+
return 0;
192194
}
193195

194196
void __thaw_task(struct task_struct *p)
195197
{
196-
unsigned long flags, flags2;
198+
unsigned long flags;
197199

198200
spin_lock_irqsave(&freezer_lock, flags);
199201
if (WARN_ON_ONCE(freezing(p)))
200202
goto unlock;
201203

202-
if (lock_task_sighand(p, &flags2)) {
203-
/* TASK_FROZEN -> TASK_{STOPPED,TRACED} */
204-
bool ret = task_call_func(p, __set_task_special, NULL);
205-
unlock_task_sighand(p, &flags2);
206-
if (ret)
207-
goto unlock;
208-
}
204+
if (task_call_func(p, __restore_freezer_state, NULL))
205+
goto unlock;
209206

210207
wake_up_state(p, TASK_FROZEN);
211208
unlock:

kernel/sched/core.c

Lines changed: 14 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -2242,8 +2242,8 @@ static __always_inline
22422242
int task_state_match(struct task_struct *p, unsigned int state)
22432243
{
22442244
/*
2245-
* Serialize against current_save_and_set_rtlock_wait_state() and
2246-
* current_restore_rtlock_saved_state().
2245+
* Serialize against current_save_and_set_rtlock_wait_state(),
2246+
* current_restore_rtlock_saved_state(), and __refrigerator().
22472247
*/
22482248
guard(raw_spinlock_irq)(&p->pi_lock);
22492249
return __task_state_match(p, state);
@@ -4015,13 +4015,17 @@ static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
40154015
* The caller holds p::pi_lock if p != current or has preemption
40164016
* disabled when p == current.
40174017
*
4018-
* The rules of PREEMPT_RT saved_state:
4018+
* The rules of saved_state:
40194019
*
40204020
* The related locking code always holds p::pi_lock when updating
40214021
* p::saved_state, which means the code is fully serialized in both cases.
40224022
*
4023-
* The lock wait and lock wakeups happen via TASK_RTLOCK_WAIT. No other
4024-
* bits set. This allows to distinguish all wakeup scenarios.
4023+
* For PREEMPT_RT, the lock wait and lock wakeups happen via TASK_RTLOCK_WAIT.
4024+
* No other bits set. This allows to distinguish all wakeup scenarios.
4025+
*
4026+
* For FREEZER, the wakeup happens via TASK_FROZEN. No other bits set. This
4027+
* allows us to prevent early wakeup of tasks before they can be run on
4028+
* asymmetric ISA architectures (eg ARMv9).
40254029
*/
40264030
static __always_inline
40274031
bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success)
@@ -4037,10 +4041,11 @@ bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success)
40374041

40384042
/*
40394043
* Saved state preserves the task state across blocking on
4040-
* an RT lock. If the state matches, set p::saved_state to
4041-
* TASK_RUNNING, but do not wake the task because it waits
4042-
* for a lock wakeup. Also indicate success because from
4043-
* the regular waker's point of view this has succeeded.
4044+
* an RT lock or TASK_FREEZABLE tasks. If the state matches,
4045+
* set p::saved_state to TASK_RUNNING, but do not wake the task
4046+
* because it waits for a lock wakeup or __thaw_task(). Also
4047+
* indicate success because from the regular waker's point of
4048+
* view this has succeeded.
40444049
*
40454050
* After acquiring the lock the task will restore p::__state
40464051
* from p::saved_state which ensures that the regular

0 commit comments

Comments
 (0)