@@ -64,21 +64,33 @@ int sysctl_oom_dump_tasks = 1;
64
64
*/
65
65
DEFINE_MUTEX (oom_lock );
66
66
67
+ static inline bool is_memcg_oom (struct oom_control * oc )
68
+ {
69
+ return oc -> memcg != NULL ;
70
+ }
71
+
67
72
#ifdef CONFIG_NUMA
68
73
/**
69
- * has_intersects_mems_allowed () - check task eligiblity for kill
74
+ * oom_cpuset_eligible () - check task eligiblity for kill
70
75
* @start: task struct of which task to consider
71
76
* @mask: nodemask passed to page allocator for mempolicy ooms
72
77
*
73
78
* Task eligibility is determined by whether or not a candidate task, @tsk,
74
79
* shares the same mempolicy nodes as current if it is bound by such a policy
75
80
* and whether or not it has the same set of allowed cpuset nodes.
81
+ *
82
+ * This function is assuming oom-killer context and 'current' has triggered
83
+ * the oom-killer.
76
84
*/
77
- static bool has_intersects_mems_allowed (struct task_struct * start ,
78
- const nodemask_t * mask )
85
+ static bool oom_cpuset_eligible (struct task_struct * start ,
86
+ struct oom_control * oc )
79
87
{
80
88
struct task_struct * tsk ;
81
89
bool ret = false;
90
+ const nodemask_t * mask = oc -> nodemask ;
91
+
92
+ if (is_memcg_oom (oc ))
93
+ return true;
82
94
83
95
rcu_read_lock ();
84
96
for_each_thread (start , tsk ) {
@@ -105,8 +117,7 @@ static bool has_intersects_mems_allowed(struct task_struct *start,
105
117
return ret ;
106
118
}
107
119
#else
108
- static bool has_intersects_mems_allowed (struct task_struct * tsk ,
109
- const nodemask_t * mask )
120
+ static bool oom_cpuset_eligible (struct task_struct * tsk , struct oom_control * oc )
110
121
{
111
122
return true;
112
123
}
@@ -146,24 +157,13 @@ static inline bool is_sysrq_oom(struct oom_control *oc)
146
157
return oc -> order == -1 ;
147
158
}
148
159
149
- static inline bool is_memcg_oom (struct oom_control * oc )
150
- {
151
- return oc -> memcg != NULL ;
152
- }
153
-
154
160
/* return true if the task is not adequate as candidate victim task. */
155
- static bool oom_unkillable_task (struct task_struct * p ,
156
- const nodemask_t * nodemask )
161
+ static bool oom_unkillable_task (struct task_struct * p )
157
162
{
158
163
if (is_global_init (p ))
159
164
return true;
160
165
if (p -> flags & PF_KTHREAD )
161
166
return true;
162
-
163
- /* p may not have freeable memory in nodemask */
164
- if (!has_intersects_mems_allowed (p , nodemask ))
165
- return true;
166
-
167
167
return false;
168
168
}
169
169
@@ -190,19 +190,17 @@ static bool is_dump_unreclaim_slabs(void)
190
190
* oom_badness - heuristic function to determine which candidate task to kill
191
191
* @p: task struct of which task we should calculate
192
192
* @totalpages: total present RAM allowed for page allocation
193
- * @nodemask: nodemask passed to page allocator for mempolicy ooms
194
193
*
195
194
* The heuristic for determining which task to kill is made to be as simple and
196
195
* predictable as possible. The goal is to return the highest value for the
197
196
* task consuming the most memory to avoid subsequent oom failures.
198
197
*/
199
- unsigned long oom_badness (struct task_struct * p ,
200
- const nodemask_t * nodemask , unsigned long totalpages )
198
+ unsigned long oom_badness (struct task_struct * p , unsigned long totalpages )
201
199
{
202
200
long points ;
203
201
long adj ;
204
202
205
- if (oom_unkillable_task (p , nodemask ))
203
+ if (oom_unkillable_task (p ))
206
204
return 0 ;
207
205
208
206
p = find_lock_task_mm (p );
@@ -313,7 +311,11 @@ static int oom_evaluate_task(struct task_struct *task, void *arg)
313
311
struct oom_control * oc = arg ;
314
312
unsigned long points ;
315
313
316
- if (oom_unkillable_task (task , oc -> nodemask ))
314
+ if (oom_unkillable_task (task ))
315
+ goto next ;
316
+
317
+ /* p may not have freeable memory in nodemask */
318
+ if (!is_memcg_oom (oc ) && !oom_cpuset_eligible (task , oc ))
317
319
goto next ;
318
320
319
321
/*
@@ -337,7 +339,7 @@ static int oom_evaluate_task(struct task_struct *task, void *arg)
337
339
goto select ;
338
340
}
339
341
340
- points = oom_badness (task , oc -> nodemask , oc -> totalpages );
342
+ points = oom_badness (task , oc -> totalpages );
341
343
if (!points || points < oc -> chosen_points )
342
344
goto next ;
343
345
@@ -382,7 +384,11 @@ static int dump_task(struct task_struct *p, void *arg)
382
384
struct oom_control * oc = arg ;
383
385
struct task_struct * task ;
384
386
385
- if (oom_unkillable_task (p , oc -> nodemask ))
387
+ if (oom_unkillable_task (p ))
388
+ return 0 ;
389
+
390
+ /* p may not have freeable memory in nodemask */
391
+ if (!is_memcg_oom (oc ) && !oom_cpuset_eligible (p , oc ))
386
392
return 0 ;
387
393
388
394
task = find_lock_task_mm (p );
@@ -1079,7 +1085,8 @@ bool out_of_memory(struct oom_control *oc)
1079
1085
check_panic_on_oom (oc );
1080
1086
1081
1087
if (!is_memcg_oom (oc ) && sysctl_oom_kill_allocating_task &&
1082
- current -> mm && !oom_unkillable_task (current , oc -> nodemask ) &&
1088
+ current -> mm && !oom_unkillable_task (current ) &&
1089
+ oom_cpuset_eligible (current , oc ) &&
1083
1090
current -> signal -> oom_score_adj != OOM_SCORE_ADJ_MIN ) {
1084
1091
get_task_struct (current );
1085
1092
oc -> chosen = current ;
0 commit comments