@@ -101,6 +101,26 @@ static struct task_struct *find_lock_task_mm(struct task_struct *p)
101
101
return NULL ;
102
102
}
103
103
104
+ /* return true if the task is not adequate as candidate victim task. */
105
+ static bool oom_unkillable_task (struct task_struct * p , struct mem_cgroup * mem ,
106
+ const nodemask_t * nodemask )
107
+ {
108
+ if (is_global_init (p ))
109
+ return true;
110
+ if (p -> flags & PF_KTHREAD )
111
+ return true;
112
+
113
+ /* When mem_cgroup_out_of_memory() and p is not member of the group */
114
+ if (mem && !task_in_mem_cgroup (p , mem ))
115
+ return true;
116
+
117
+ /* p may not have freeable memory in nodemask */
118
+ if (!has_intersects_mems_allowed (p , nodemask ))
119
+ return true;
120
+
121
+ return false;
122
+ }
123
+
104
124
/**
105
125
* badness - calculate a numeric value for how bad this task has been
106
126
* @p: task struct of which task we should calculate
@@ -295,12 +315,7 @@ static struct task_struct *select_bad_process(unsigned long *ppoints,
295
315
for_each_process (p ) {
296
316
unsigned long points ;
297
317
298
- /* skip the init task and kthreads */
299
- if (is_global_init (p ) || (p -> flags & PF_KTHREAD ))
300
- continue ;
301
- if (mem && !task_in_mem_cgroup (p , mem ))
302
- continue ;
303
- if (!has_intersects_mems_allowed (p , nodemask ))
318
+ if (oom_unkillable_task (p , mem , nodemask ))
304
319
continue ;
305
320
306
321
/*
@@ -467,11 +482,7 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
467
482
468
483
if (child -> mm == p -> mm )
469
484
continue ;
470
- if (child -> flags & PF_KTHREAD )
471
- continue ;
472
- if (mem && !task_in_mem_cgroup (child , mem ))
473
- continue ;
474
- if (!has_intersects_mems_allowed (child , nodemask ))
485
+ if (oom_unkillable_task (p , mem , nodemask ))
475
486
continue ;
476
487
477
488
/* badness() returns 0 if the thread is unkillable */
0 commit comments