Blame view

mm/oom_kill.c 22.1 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
2
3
4
5
6
  /*
   *  linux/mm/oom_kill.c
   * 
   *  Copyright (C)  1998,2000  Rik van Riel
   *	Thanks go out to Claus Fischer for some serious inspiration and
   *	for goading me into coding this file...
a63d83f42   David Rientjes   oom: badness heur...
7
8
   *  Copyright (C)  2010  Google, Inc.
   *	Rewritten by David Rientjes
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
9
10
   *
   *  The routines in this file are used to kill a process when
a49335cce   Paul Jackson   [PATCH] cpusets: ...
11
12
   *  we're seriously out of memory. This gets called from __alloc_pages()
   *  in mm/page_alloc.c when we really run out of memory.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
13
14
15
16
17
18
   *
   *  Since we won't call these routines often (on a well-configured
   *  machine) this file will double as a 'coding guide' and a signpost
   *  for newbie kernel hackers. It features several pointers to major
   *  kernel subsystems and hints as to where to find out what things do.
   */
8ac773b4f   Alexey Dobriyan   [PATCH] OOM kille...
19
  #include <linux/oom.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
20
  #include <linux/mm.h>
4e950f6f0   Alexey Dobriyan   Remove fs.h from ...
21
  #include <linux/err.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
22
  #include <linux/gfp.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
23
24
25
26
  #include <linux/sched.h>
  #include <linux/swap.h>
  #include <linux/timex.h>
  #include <linux/jiffies.h>
ef08e3b49   Paul Jackson   [PATCH] cpusets: ...
27
  #include <linux/cpuset.h>
b95f1b31b   Paul Gortmaker   mm: Map most file...
28
  #include <linux/export.h>
8bc719d3c   Martin Schwidefsky   [PATCH] out of me...
29
  #include <linux/notifier.h>
c7ba5c9e8   Pavel Emelianov   Memory controller...
30
  #include <linux/memcontrol.h>
6f48d0ebd   David Rientjes   oom: select task ...
31
  #include <linux/mempolicy.h>
5cd9c58fb   David Howells   security: Fix set...
32
  #include <linux/security.h>
edd45544c   David Rientjes   oom: avoid deferr...
33
  #include <linux/ptrace.h>
f660daac4   David Rientjes   oom: thaw threads...
34
  #include <linux/freezer.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
35

fadd8fbd1   KAMEZAWA Hiroyuki   [PATCH] support f...
36
  int sysctl_panic_on_oom;
fe071d7e8   David Rientjes   oom: add oom_kill...
37
  int sysctl_oom_kill_allocating_task;
ad915c432   David Rientjes   oom: enable oom t...
38
  int sysctl_oom_dump_tasks = 1;
c7d4caeb1   David Rientjes   oom: fix zone_sca...
39
  static DEFINE_SPINLOCK(zone_scan_lock);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
40

43362a497   David Rientjes   oom: fix race whi...
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
  /*
   * compare_swap_oom_score_adj() - compare and swap current's oom_score_adj
   * @old_val: old oom_score_adj for compare
   * @new_val: new oom_score_adj for swap
   *
   * Sets the oom_score_adj value for current to @new_val iff its present value is
   * @old_val.  Usually used to reinstate a previous value to prevent racing with
   * userspacing tuning the value in the interim.
   */
  void compare_swap_oom_score_adj(int old_val, int new_val)
  {
  	struct sighand_struct *sighand = current->sighand;
  
  	spin_lock_irq(&sighand->siglock);
  	if (current->signal->oom_score_adj == old_val)
  		current->signal->oom_score_adj = new_val;
  	spin_unlock_irq(&sighand->siglock);
  }
72788c385   David Rientjes   oom: replace PF_O...
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
  /**
   * test_set_oom_score_adj() - set current's oom_score_adj and return old value
   * @new_val: new oom_score_adj value
   *
   * Sets the oom_score_adj value for current to @new_val with proper
   * synchronization and returns the old value.  Usually used to temporarily
   * set a value, save the old value in the caller, and then reinstate it later.
   */
  int test_set_oom_score_adj(int new_val)
  {
  	struct sighand_struct *sighand = current->sighand;
  	int old_val;
  
  	spin_lock_irq(&sighand->siglock);
  	old_val = current->signal->oom_score_adj;
c9f01245b   David Rientjes   oom: remove oom_d...
74
  	current->signal->oom_score_adj = new_val;
72788c385   David Rientjes   oom: replace PF_O...
75
76
77
78
  	spin_unlock_irq(&sighand->siglock);
  
  	return old_val;
  }
6f48d0ebd   David Rientjes   oom: select task ...
79
80
81
82
83
84
85
86
87
  #ifdef CONFIG_NUMA
  /**
   * has_intersects_mems_allowed() - check task eligiblity for kill
   * @tsk: task struct of which task to consider
   * @mask: nodemask passed to page allocator for mempolicy ooms
   *
   * Task eligibility is determined by whether or not a candidate task, @tsk,
   * shares the same mempolicy nodes as current if it is bound by such a policy
   * and whether or not it has the same set of allowed cpuset nodes.
495789a51   KOSAKI Motohiro   oom: make oom_sco...
88
   */
6f48d0ebd   David Rientjes   oom: select task ...
89
90
  static bool has_intersects_mems_allowed(struct task_struct *tsk,
  					const nodemask_t *mask)
495789a51   KOSAKI Motohiro   oom: make oom_sco...
91
  {
6f48d0ebd   David Rientjes   oom: select task ...
92
  	struct task_struct *start = tsk;
495789a51   KOSAKI Motohiro   oom: make oom_sco...
93

495789a51   KOSAKI Motohiro   oom: make oom_sco...
94
  	do {
6f48d0ebd   David Rientjes   oom: select task ...
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
  		if (mask) {
  			/*
  			 * If this is a mempolicy constrained oom, tsk's
  			 * cpuset is irrelevant.  Only return true if its
  			 * mempolicy intersects current, otherwise it may be
  			 * needlessly killed.
  			 */
  			if (mempolicy_nodemask_intersects(tsk, mask))
  				return true;
  		} else {
  			/*
  			 * This is not a mempolicy constrained oom, so only
  			 * check the mems of tsk's cpuset.
  			 */
  			if (cpuset_mems_allowed_intersects(current, tsk))
  				return true;
  		}
df1090a8d   KOSAKI Motohiro   oom: cleanup has_...
112
  	} while_each_thread(start, tsk);
6f48d0ebd   David Rientjes   oom: select task ...
113
114
115
116
117
118
119
  	return false;
  }
  #else
  static bool has_intersects_mems_allowed(struct task_struct *tsk,
  					const nodemask_t *mask)
  {
  	return true;
495789a51   KOSAKI Motohiro   oom: make oom_sco...
120
  }
6f48d0ebd   David Rientjes   oom: select task ...
121
  #endif /* CONFIG_NUMA */
495789a51   KOSAKI Motohiro   oom: make oom_sco...
122

6f48d0ebd   David Rientjes   oom: select task ...
123
124
125
126
127
128
  /*
   * The process p may have detached its own ->mm while exiting or through
   * use_mm(), but one or more of its subthreads may still have a valid
   * pointer.  Return p, or any of its subthreads with a valid ->mm, with
   * task_lock() held.
   */
158e0a2d1   KAMEZAWA Hiroyuki   memcg: use find_l...
129
  struct task_struct *find_lock_task_mm(struct task_struct *p)
dd8e8f405   Oleg Nesterov   oom: introduce fi...
130
131
132
133
134
135
136
137
138
139
140
141
  {
  	struct task_struct *t = p;
  
  	do {
  		task_lock(t);
  		if (likely(t->mm))
  			return t;
  		task_unlock(t);
  	} while_each_thread(p, t);
  
  	return NULL;
  }
ab290adba   KOSAKI Motohiro   oom: make oom_unk...
142
  /* return true if the task is not adequate as candidate victim task. */
e85bfd3aa   David Rientjes   oom: filter unkil...
143
144
  static bool oom_unkillable_task(struct task_struct *p,
  		const struct mem_cgroup *mem, const nodemask_t *nodemask)
ab290adba   KOSAKI Motohiro   oom: make oom_unk...
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
  {
  	if (is_global_init(p))
  		return true;
  	if (p->flags & PF_KTHREAD)
  		return true;
  
  	/* When mem_cgroup_out_of_memory() and p is not member of the group */
  	if (mem && !task_in_mem_cgroup(p, mem))
  		return true;
  
  	/* p may not have freeable memory in nodemask */
  	if (!has_intersects_mems_allowed(p, nodemask))
  		return true;
  
  	return false;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
161
  /**
a63d83f42   David Rientjes   oom: badness heur...
162
   * oom_badness - heuristic function to determine which candidate task to kill
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
163
   * @p: task struct of which task we should calculate
a63d83f42   David Rientjes   oom: badness heur...
164
   * @totalpages: total present RAM allowed for page allocation
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
165
   *
a63d83f42   David Rientjes   oom: badness heur...
166
167
168
   * The heuristic for determining which task to kill is made to be as simple and
   * predictable as possible.  The goal is to return the highest value for the
   * task consuming the most memory to avoid subsequent oom failures.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
169
   */
a63d83f42   David Rientjes   oom: badness heur...
170
171
  unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem,
  		      const nodemask_t *nodemask, unsigned long totalpages)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
172
  {
ff05b6f7a   Frantisek Hrbata   oom: fix integer ...
173
  	long points;
28b83c519   KOSAKI Motohiro   oom: move oom_adj...
174

26ebc9849   KOSAKI Motohiro   oom: /proc/<pid>/...
175
176
  	if (oom_unkillable_task(p, mem, nodemask))
  		return 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
177

dd8e8f405   Oleg Nesterov   oom: introduce fi...
178
179
  	p = find_lock_task_mm(p);
  	if (!p)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
180
  		return 0;
5aecc85ab   Michal Hocko   oom: do not kill ...
181
182
183
184
  	if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) {
  		task_unlock(p);
  		return 0;
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
185
  	/*
a63d83f42   David Rientjes   oom: badness heur...
186
187
  	 * The memory controller may have a limit of 0 bytes, so avoid a divide
  	 * by zero, if necessary.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
188
  	 */
a63d83f42   David Rientjes   oom: badness heur...
189
190
  	if (!totalpages)
  		totalpages = 1;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
191
192
  
  	/*
a63d83f42   David Rientjes   oom: badness heur...
193
  	 * The baseline for the badness score is the proportion of RAM that each
f755a042d   KOSAKI Motohiro   oom: use pte page...
194
  	 * task's rss, pagetable and swap space use.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
195
  	 */
f755a042d   KOSAKI Motohiro   oom: use pte page...
196
197
198
199
200
  	points = get_mm_rss(p->mm) + p->mm->nr_ptes;
  	points += get_mm_counter(p->mm, MM_SWAPENTS);
  
  	points *= 1000;
  	points /= totalpages;
a63d83f42   David Rientjes   oom: badness heur...
201
  	task_unlock(p);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
202
203
  
  	/*
a63d83f42   David Rientjes   oom: badness heur...
204
205
  	 * Root processes get 3% bonus, just like the __vm_enough_memory()
  	 * implementation used by LSMs.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
206
  	 */
a63d83f42   David Rientjes   oom: badness heur...
207
208
  	if (has_capability_noaudit(p, CAP_SYS_ADMIN))
  		points -= 30;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
209
210
  
  	/*
a63d83f42   David Rientjes   oom: badness heur...
211
212
213
  	 * /proc/pid/oom_score_adj ranges from -1000 to +1000 such that it may
  	 * either completely disable oom killing or always prefer a certain
  	 * task.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
214
  	 */
a63d83f42   David Rientjes   oom: badness heur...
215
  	points += p->signal->oom_score_adj;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
216

f19e8aa11   David Rientjes   oom: always retur...
217
218
219
220
221
222
223
  	/*
  	 * Never return 0 for an eligible task that may be killed since it's
  	 * possible that no single user task uses more than 0.1% of memory and
  	 * no single admin tasks uses more than 3.0%.
  	 */
  	if (points <= 0)
  		return 1;
a63d83f42   David Rientjes   oom: badness heur...
224
  	return (points < 1000) ? points : 1000;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
225
226
227
  }
  
  /*
9b0f8b040   Christoph Lameter   [PATCH] Terminate...
228
229
   * Determine the type of allocation constraint.
   */
9b0f8b040   Christoph Lameter   [PATCH] Terminate...
230
  #ifdef CONFIG_NUMA
4365a5676   KAMEZAWA Hiroyuki   oom-kill: fix NUM...
231
  static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
a63d83f42   David Rientjes   oom: badness heur...
232
233
  				gfp_t gfp_mask, nodemask_t *nodemask,
  				unsigned long *totalpages)
4365a5676   KAMEZAWA Hiroyuki   oom-kill: fix NUM...
234
  {
54a6eb5c4   Mel Gorman   mm: use two zonel...
235
  	struct zone *zone;
dd1a239f6   Mel Gorman   mm: have zonelist...
236
  	struct zoneref *z;
54a6eb5c4   Mel Gorman   mm: use two zonel...
237
  	enum zone_type high_zoneidx = gfp_zone(gfp_mask);
a63d83f42   David Rientjes   oom: badness heur...
238
239
  	bool cpuset_limited = false;
  	int nid;
9b0f8b040   Christoph Lameter   [PATCH] Terminate...
240

a63d83f42   David Rientjes   oom: badness heur...
241
242
243
244
245
  	/* Default to all available memory */
  	*totalpages = totalram_pages + total_swap_pages;
  
  	if (!zonelist)
  		return CONSTRAINT_NONE;
4365a5676   KAMEZAWA Hiroyuki   oom-kill: fix NUM...
246
247
248
249
250
251
252
  	/*
  	 * Reach here only when __GFP_NOFAIL is used. So, we should avoid
  	 * to kill current.We have to random task kill in this case.
  	 * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now.
  	 */
  	if (gfp_mask & __GFP_THISNODE)
  		return CONSTRAINT_NONE;
9b0f8b040   Christoph Lameter   [PATCH] Terminate...
253

4365a5676   KAMEZAWA Hiroyuki   oom-kill: fix NUM...
254
  	/*
a63d83f42   David Rientjes   oom: badness heur...
255
256
257
  	 * This is not a __GFP_THISNODE allocation, so a truncated nodemask in
  	 * the page allocator means a mempolicy is in effect.  Cpuset policy
  	 * is enforced in get_page_from_freelist().
4365a5676   KAMEZAWA Hiroyuki   oom-kill: fix NUM...
258
  	 */
a63d83f42   David Rientjes   oom: badness heur...
259
260
261
262
  	if (nodemask && !nodes_subset(node_states[N_HIGH_MEMORY], *nodemask)) {
  		*totalpages = total_swap_pages;
  		for_each_node_mask(nid, *nodemask)
  			*totalpages += node_spanned_pages(nid);
9b0f8b040   Christoph Lameter   [PATCH] Terminate...
263
  		return CONSTRAINT_MEMORY_POLICY;
a63d83f42   David Rientjes   oom: badness heur...
264
  	}
4365a5676   KAMEZAWA Hiroyuki   oom-kill: fix NUM...
265
266
267
268
269
  
  	/* Check this allocation failure is caused by cpuset's wall function */
  	for_each_zone_zonelist_nodemask(zone, z, zonelist,
  			high_zoneidx, nodemask)
  		if (!cpuset_zone_allowed_softwall(zone, gfp_mask))
a63d83f42   David Rientjes   oom: badness heur...
270
  			cpuset_limited = true;
9b0f8b040   Christoph Lameter   [PATCH] Terminate...
271

a63d83f42   David Rientjes   oom: badness heur...
272
273
274
275
276
277
  	if (cpuset_limited) {
  		*totalpages = total_swap_pages;
  		for_each_node_mask(nid, cpuset_current_mems_allowed)
  			*totalpages += node_spanned_pages(nid);
  		return CONSTRAINT_CPUSET;
  	}
9b0f8b040   Christoph Lameter   [PATCH] Terminate...
278
279
  	return CONSTRAINT_NONE;
  }
4365a5676   KAMEZAWA Hiroyuki   oom-kill: fix NUM...
280
281
  #else
  static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
a63d83f42   David Rientjes   oom: badness heur...
282
283
  				gfp_t gfp_mask, nodemask_t *nodemask,
  				unsigned long *totalpages)
4365a5676   KAMEZAWA Hiroyuki   oom-kill: fix NUM...
284
  {
a63d83f42   David Rientjes   oom: badness heur...
285
  	*totalpages = totalram_pages + total_swap_pages;
4365a5676   KAMEZAWA Hiroyuki   oom-kill: fix NUM...
286
287
288
  	return CONSTRAINT_NONE;
  }
  #endif
9b0f8b040   Christoph Lameter   [PATCH] Terminate...
289
290
  
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
291
292
293
294
295
   * Simple selection loop. We chose the process with the highest
   * number of 'points'. We expect the caller will lock the tasklist.
   *
   * (not docbooked, we don't want this one cluttering up the manual)
   */
a63d83f42   David Rientjes   oom: badness heur...
296
297
298
  static struct task_struct *select_bad_process(unsigned int *ppoints,
  		unsigned long totalpages, struct mem_cgroup *mem,
  		const nodemask_t *nodemask)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
299
  {
3a5dda7a1   David Rientjes   oom: prevent unne...
300
  	struct task_struct *g, *p;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
301
  	struct task_struct *chosen = NULL;
9827b781f   Kurt Garloff   [PATCH] OOM kill:...
302
  	*ppoints = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
303

3a5dda7a1   David Rientjes   oom: prevent unne...
304
  	do_each_thread(g, p) {
a63d83f42   David Rientjes   oom: badness heur...
305
  		unsigned int points;
a49335cce   Paul Jackson   [PATCH] cpusets: ...
306

c027a474a   Oleg Nesterov   oom: task->mm == ...
307
  		if (p->exit_state)
30e2b41f2   Andrey Vagin   oom: skip zombies...
308
  			continue;
ab290adba   KOSAKI Motohiro   oom: make oom_unk...
309
  		if (oom_unkillable_task(p, mem, nodemask))
6cf86ac6f   David Rientjes   oom: filter tasks...
310
  			continue;
ef08e3b49   Paul Jackson   [PATCH] cpusets: ...
311

a49335cce   Paul Jackson   [PATCH] cpusets: ...
312
  		/*
b78483a4b   Nick Piggin   [PATCH] oom: don'...
313
314
315
316
317
318
319
320
  		 * This task already has access to memory reserves and is
  		 * being killed. Don't allow any other task access to the
  		 * memory reserve.
  		 *
  		 * Note: this may have a chance of deadlock if it gets
  		 * blocked waiting for another task which itself is waiting
  		 * for memory. Is there a better alternative?
  		 */
f660daac4   David Rientjes   oom: thaw threads...
321
322
323
  		if (test_tsk_thread_flag(p, TIF_MEMDIE)) {
  			if (unlikely(frozen(p)))
  				thaw_process(p);
b78483a4b   Nick Piggin   [PATCH] oom: don'...
324
  			return ERR_PTR(-1UL);
f660daac4   David Rientjes   oom: thaw threads...
325
  		}
c027a474a   Oleg Nesterov   oom: task->mm == ...
326
327
  		if (!p->mm)
  			continue;
b78483a4b   Nick Piggin   [PATCH] oom: don'...
328

30e2b41f2   Andrey Vagin   oom: skip zombies...
329
  		if (p->flags & PF_EXITING) {
edd45544c   David Rientjes   oom: avoid deferr...
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
  			/*
  			 * If p is the current task and is in the process of
  			 * releasing memory, we allow the "kill" to set
  			 * TIF_MEMDIE, which will allow it to gain access to
  			 * memory reserves.  Otherwise, it may stall forever.
  			 *
  			 * The loop isn't broken here, however, in case other
  			 * threads are found to have already been oom killed.
  			 */
  			if (p == current) {
  				chosen = p;
  				*ppoints = 1000;
  			} else {
  				/*
  				 * If this task is not being ptraced on exit,
  				 * then wait for it to finish before killing
  				 * some other task unnecessarily.
  				 */
d21142ece   Tejun Heo   ptrace: kill task...
348
  				if (!(p->group_leader->ptrace & PT_TRACE_EXIT))
edd45544c   David Rientjes   oom: avoid deferr...
349
350
  					return ERR_PTR(-1UL);
  			}
50ec3bbff   Nick Piggin   [PATCH] oom: hand...
351
  		}
972c4ea59   Oleg Nesterov   [PATCH] select_ba...
352

a63d83f42   David Rientjes   oom: badness heur...
353
354
  		points = oom_badness(p, mem, nodemask, totalpages);
  		if (points > *ppoints) {
a49335cce   Paul Jackson   [PATCH] cpusets: ...
355
  			chosen = p;
9827b781f   Kurt Garloff   [PATCH] OOM kill:...
356
  			*ppoints = points;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
357
  		}
3a5dda7a1   David Rientjes   oom: prevent unne...
358
  	} while_each_thread(g, p);
972c4ea59   Oleg Nesterov   [PATCH] select_ba...
359

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
360
361
362
363
  	return chosen;
  }
  
  /**
1b578df02   Randy Dunlap   mm/oom_kill: fix ...
364
   * dump_tasks - dump current memory state of all system tasks
74ab7f1d3   David Rientjes   oom: improve comm...
365
   * @mem: current's memory controller, if constrained
e85bfd3aa   David Rientjes   oom: filter unkil...
366
   * @nodemask: nodemask passed to page allocator for mempolicy ooms
1b578df02   Randy Dunlap   mm/oom_kill: fix ...
367
   *
e85bfd3aa   David Rientjes   oom: filter unkil...
368
369
370
   * Dumps the current memory state of all eligible tasks.  Tasks not in the same
   * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes
   * are not shown.
fef1bdd68   David Rientjes   oom: add sysctl t...
371
   * State information includes task's pid, uid, tgid, vm size, rss, cpu, oom_adj
a63d83f42   David Rientjes   oom: badness heur...
372
   * value, oom_score_adj value, and name.
fef1bdd68   David Rientjes   oom: add sysctl t...
373
   *
fef1bdd68   David Rientjes   oom: add sysctl t...
374
375
   * Call with tasklist_lock read-locked.
   */
e85bfd3aa   David Rientjes   oom: filter unkil...
376
  static void dump_tasks(const struct mem_cgroup *mem, const nodemask_t *nodemask)
fef1bdd68   David Rientjes   oom: add sysctl t...
377
  {
c55db9578   KOSAKI Motohiro   oom: dump_tasks u...
378
379
  	struct task_struct *p;
  	struct task_struct *task;
fef1bdd68   David Rientjes   oom: add sysctl t...
380

a63d83f42   David Rientjes   oom: badness heur...
381
382
  	pr_info("[ pid ]   uid  tgid total_vm      rss cpu oom_adj oom_score_adj name
  ");
c55db9578   KOSAKI Motohiro   oom: dump_tasks u...
383
  	for_each_process(p) {
e85bfd3aa   David Rientjes   oom: filter unkil...
384
  		if (oom_unkillable_task(p, mem, nodemask))
b4416d2be   David Rientjes   oom: do not dump ...
385
  			continue;
fef1bdd68   David Rientjes   oom: add sysctl t...
386

c55db9578   KOSAKI Motohiro   oom: dump_tasks u...
387
388
  		task = find_lock_task_mm(p);
  		if (!task) {
6d2661ede   David Rientjes   oom: fix possible...
389
  			/*
74ab7f1d3   David Rientjes   oom: improve comm...
390
391
  			 * This is a kthread or all of p's threads have already
  			 * detached their mm's.  There's no need to report
c55db9578   KOSAKI Motohiro   oom: dump_tasks u...
392
  			 * them; they can't be oom killed anyway.
6d2661ede   David Rientjes   oom: fix possible...
393
  			 */
6d2661ede   David Rientjes   oom: fix possible...
394
395
  			continue;
  		}
c55db9578   KOSAKI Motohiro   oom: dump_tasks u...
396

a63d83f42   David Rientjes   oom: badness heur...
397
398
  		pr_info("[%5d] %5d %5d %8lu %8lu %3u     %3d         %5d %s
  ",
8d6c83f0b   KOSAKI Motohiro   oom: __task_cred(...
399
  			task->pid, task_uid(task), task->tgid,
a63d83f42   David Rientjes   oom: badness heur...
400
401
402
  			task->mm->total_vm, get_mm_rss(task->mm),
  			task_cpu(task), task->signal->oom_adj,
  			task->signal->oom_score_adj, task->comm);
c55db9578   KOSAKI Motohiro   oom: dump_tasks u...
403
404
  		task_unlock(task);
  	}
fef1bdd68   David Rientjes   oom: add sysctl t...
405
  }
d31f56dbf   Daisuke Nishimura   memcg: avoid oom-...
406
  static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
e85bfd3aa   David Rientjes   oom: filter unkil...
407
  			struct mem_cgroup *mem, const nodemask_t *nodemask)
1b604d75b   David Rientjes   oom: dump stack a...
408
  {
5e9d834a0   David Rientjes   oom: sacrifice ch...
409
  	task_lock(current);
1b604d75b   David Rientjes   oom: dump stack a...
410
  	pr_warning("%s invoked oom-killer: gfp_mask=0x%x, order=%d, "
a63d83f42   David Rientjes   oom: badness heur...
411
412
413
414
  		"oom_adj=%d, oom_score_adj=%d
  ",
  		current->comm, gfp_mask, order, current->signal->oom_adj,
  		current->signal->oom_score_adj);
1b604d75b   David Rientjes   oom: dump stack a...
415
416
417
  	cpuset_print_task_mems_allowed(current);
  	task_unlock(current);
  	dump_stack();
d31f56dbf   Daisuke Nishimura   memcg: avoid oom-...
418
  	mem_cgroup_print_oom_info(mem, p);
b2b755b5f   David Rientjes   lib, arch: add fi...
419
  	show_mem(SHOW_MEM_FILTER_NODES);
1b604d75b   David Rientjes   oom: dump stack a...
420
  	if (sysctl_oom_dump_tasks)
e85bfd3aa   David Rientjes   oom: filter unkil...
421
  		dump_tasks(mem, nodemask);
1b604d75b   David Rientjes   oom: dump stack a...
422
  }
3b4798cbc   KOSAKI Motohiro   oom-kill: show vi...
423
  #define K(x) ((x) << (PAGE_SHIFT-10))
93b43fa55   Luis Claudio R. Goncalves   oom: give the dyi...
424
  static int oom_kill_task(struct task_struct *p, struct mem_cgroup *mem)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
425
  {
1e99bad0d   David Rientjes   oom: kill all thr...
426
427
  	struct task_struct *q;
  	struct mm_struct *mm;
dd8e8f405   Oleg Nesterov   oom: introduce fi...
428
  	p = find_lock_task_mm(p);
be71cf220   KOSAKI Motohiro   oom: fix NULL poi...
429
  	if (!p)
b940fd703   David Rientjes   oom: remove unnec...
430
  		return 1;
be71cf220   KOSAKI Motohiro   oom: fix NULL poi...
431

1e99bad0d   David Rientjes   oom: kill all thr...
432
433
  	/* mm cannot be safely dereferenced after task_unlock(p) */
  	mm = p->mm;
b940fd703   David Rientjes   oom: remove unnec...
434
435
436
437
438
  	pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB
  ",
  		task_pid_nr(p), p->comm, K(p->mm->total_vm),
  		K(get_mm_counter(p->mm, MM_ANONPAGES)),
  		K(get_mm_counter(p->mm, MM_FILEPAGES)));
3b4798cbc   KOSAKI Motohiro   oom-kill: show vi...
439
  	task_unlock(p);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
440

1e99bad0d   David Rientjes   oom: kill all thr...
441
  	/*
7b0d44fa4   David Rientjes   oom: avoid killin...
442
  	 * Kill all user processes sharing p->mm in other thread groups, if any.
1e99bad0d   David Rientjes   oom: kill all thr...
443
444
445
446
447
448
449
450
451
  	 * They don't get access to memory reserves or a higher scheduler
  	 * priority, though, to avoid depletion of all memory or task
  	 * starvation.  This prevents mm->mmap_sem livelock when an oom killed
  	 * task cannot exit because it requires the semaphore and its contended
  	 * by another thread trying to allocate memory itself.  That thread will
  	 * now get access to memory reserves since it has a pending fatal
  	 * signal.
  	 */
  	for_each_process(q)
7b0d44fa4   David Rientjes   oom: avoid killin...
452
453
  		if (q->mm == mm && !same_thread_group(q, p) &&
  		    !(q->flags & PF_KTHREAD)) {
c9f01245b   David Rientjes   oom: remove oom_d...
454
455
  			if (q->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
  				continue;
1e99bad0d   David Rientjes   oom: kill all thr...
456
457
458
459
460
461
462
  			task_lock(q);	/* Protect ->comm from prctl() */
  			pr_err("Kill process %d (%s) sharing same memory
  ",
  				task_pid_nr(q), q->comm);
  			task_unlock(q);
  			force_sig(SIGKILL, q);
  		}
93b43fa55   Luis Claudio R. Goncalves   oom: give the dyi...
463

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
464
  	set_tsk_thread_flag(p, TIF_MEMDIE);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
465
  	force_sig(SIGKILL, p);
93b43fa55   Luis Claudio R. Goncalves   oom: give the dyi...
466

013159227   Dave Peterson   [PATCH] mm: fix m...
467
  	return 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
468
  }
b940fd703   David Rientjes   oom: remove unnec...
469
  #undef K
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
470

7213f5066   David Rientjes   oom: suppress ext...
471
  static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
a63d83f42   David Rientjes   oom: badness heur...
472
473
474
  			    unsigned int points, unsigned long totalpages,
  			    struct mem_cgroup *mem, nodemask_t *nodemask,
  			    const char *message)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
475
  {
52d3c0367   Linus Torvalds   Revert "oom: oom_...
476
  	struct task_struct *victim = p;
5e9d834a0   David Rientjes   oom: sacrifice ch...
477
  	struct task_struct *child;
52d3c0367   Linus Torvalds   Revert "oom: oom_...
478
479
  	struct task_struct *t = p;
  	unsigned int victim_points = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
480

1b604d75b   David Rientjes   oom: dump stack a...
481
  	if (printk_ratelimit())
e85bfd3aa   David Rientjes   oom: filter unkil...
482
  		dump_header(p, gfp_mask, order, mem, nodemask);
7213f5066   David Rientjes   oom: suppress ext...
483

50ec3bbff   Nick Piggin   [PATCH] oom: hand...
484
485
486
487
  	/*
  	 * If the task is already exiting, don't alarm the sysadmin or kill
  	 * its children or threads, just set TIF_MEMDIE so it can die quickly
  	 */
0753ba01e   KOSAKI Motohiro   mm: revert "oom: ...
488
  	if (p->flags & PF_EXITING) {
4358997ae   David Rientjes   oom: avoid sendin...
489
  		set_tsk_thread_flag(p, TIF_MEMDIE);
50ec3bbff   Nick Piggin   [PATCH] oom: hand...
490
491
  		return 0;
  	}
5e9d834a0   David Rientjes   oom: sacrifice ch...
492
  	task_lock(p);
a63d83f42   David Rientjes   oom: badness heur...
493
494
  	pr_err("%s: Kill process %d (%s) score %d or sacrifice child
  ",
5e9d834a0   David Rientjes   oom: sacrifice ch...
495
496
  		message, task_pid_nr(p), p->comm, points);
  	task_unlock(p);
f3af38d30   Nick Piggin   [PATCH] oom: clea...
497

5e9d834a0   David Rientjes   oom: sacrifice ch...
498
499
  	/*
  	 * If any of p's children has a different mm and is eligible for kill,
11239836c   David Rientjes   oom: remove refer...
500
  	 * the one with the highest oom_badness() score is sacrificed for its
5e9d834a0   David Rientjes   oom: sacrifice ch...
501
502
503
  	 * parent.  This attempts to lose the minimal amount of work done while
  	 * still freeing memory.
  	 */
dd8e8f405   Oleg Nesterov   oom: introduce fi...
504
  	do {
5e9d834a0   David Rientjes   oom: sacrifice ch...
505
  		list_for_each_entry(child, &t->children, sibling) {
a63d83f42   David Rientjes   oom: badness heur...
506
  			unsigned int child_points;
5e9d834a0   David Rientjes   oom: sacrifice ch...
507

edd45544c   David Rientjes   oom: avoid deferr...
508
509
  			if (child->mm == p->mm)
  				continue;
a63d83f42   David Rientjes   oom: badness heur...
510
511
512
513
514
  			/*
  			 * oom_badness() returns 0 if the thread is unkillable
  			 */
  			child_points = oom_badness(child, mem, nodemask,
  								totalpages);
5e9d834a0   David Rientjes   oom: sacrifice ch...
515
516
517
518
  			if (child_points > victim_points) {
  				victim = child;
  				victim_points = child_points;
  			}
dd8e8f405   Oleg Nesterov   oom: introduce fi...
519
520
  		}
  	} while_each_thread(p, t);
93b43fa55   Luis Claudio R. Goncalves   oom: give the dyi...
521
  	return oom_kill_task(victim, mem);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
522
  }
309ed8825   David Rientjes   oom: extract pani...
523
524
525
526
  /*
   * Determines whether the kernel must panic because of the panic_on_oom sysctl.
   */
  static void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
e85bfd3aa   David Rientjes   oom: filter unkil...
527
  				int order, const nodemask_t *nodemask)
309ed8825   David Rientjes   oom: extract pani...
528
529
530
531
532
533
534
535
536
537
538
539
540
  {
  	if (likely(!sysctl_panic_on_oom))
  		return;
  	if (sysctl_panic_on_oom != 2) {
  		/*
  		 * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel
  		 * does not panic for cpuset, mempolicy, or memcg allocation
  		 * failures.
  		 */
  		if (constraint != CONSTRAINT_NONE)
  			return;
  	}
  	read_lock(&tasklist_lock);
e85bfd3aa   David Rientjes   oom: filter unkil...
541
  	dump_header(NULL, gfp_mask, order, NULL, nodemask);
309ed8825   David Rientjes   oom: extract pani...
542
543
544
545
546
  	read_unlock(&tasklist_lock);
  	panic("Out of memory: %s panic_on_oom is enabled
  ",
  		sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
  }
00f0b8259   Balbir Singh   Memory controller...
547
  #ifdef CONFIG_CGROUP_MEM_RES_CTLR
c7ba5c9e8   Pavel Emelianov   Memory controller...
548
549
  void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask)
  {
a63d83f42   David Rientjes   oom: badness heur...
550
551
  	unsigned long limit;
  	unsigned int points = 0;
c7ba5c9e8   Pavel Emelianov   Memory controller...
552
  	struct task_struct *p;
f9434ad15   David Rientjes   memcg: give curre...
553
554
555
556
557
558
559
  	/*
  	 * If current has a pending SIGKILL, then automatically select it.  The
  	 * goal is to allow it to allocate so that it may quickly exit and free
  	 * its memory.
  	 */
  	if (fatal_signal_pending(current)) {
  		set_thread_flag(TIF_MEMDIE);
f9434ad15   David Rientjes   memcg: give curre...
560
561
  		return;
  	}
e85bfd3aa   David Rientjes   oom: filter unkil...
562
  	check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, 0, NULL);
a63d83f42   David Rientjes   oom: badness heur...
563
  	limit = mem_cgroup_get_limit(mem) >> PAGE_SHIFT;
e115f2d89   Li Zefan   memcg: fix oops i...
564
  	read_lock(&tasklist_lock);
c7ba5c9e8   Pavel Emelianov   Memory controller...
565
  retry:
a63d83f42   David Rientjes   oom: badness heur...
566
  	p = select_bad_process(&points, limit, mem, NULL);
df64f81bb   David Rientjes   memcg: make oom k...
567
  	if (!p || PTR_ERR(p) == -1UL)
c7ba5c9e8   Pavel Emelianov   Memory controller...
568
  		goto out;
a63d83f42   David Rientjes   oom: badness heur...
569
  	if (oom_kill_process(p, gfp_mask, 0, points, limit, mem, NULL,
c7ba5c9e8   Pavel Emelianov   Memory controller...
570
571
572
  				"Memory cgroup out of memory"))
  		goto retry;
  out:
e115f2d89   Li Zefan   memcg: fix oops i...
573
  	read_unlock(&tasklist_lock);
c7ba5c9e8   Pavel Emelianov   Memory controller...
574
575
  }
  #endif
8bc719d3c   Martin Schwidefsky   [PATCH] out of me...
576
577
578
579
580
581
582
583
584
585
586
587
588
  static BLOCKING_NOTIFIER_HEAD(oom_notify_list);
  
  int register_oom_notifier(struct notifier_block *nb)
  {
  	return blocking_notifier_chain_register(&oom_notify_list, nb);
  }
  EXPORT_SYMBOL_GPL(register_oom_notifier);
  
  int unregister_oom_notifier(struct notifier_block *nb)
  {
  	return blocking_notifier_chain_unregister(&oom_notify_list, nb);
  }
  EXPORT_SYMBOL_GPL(unregister_oom_notifier);
098d7f128   David Rientjes   oom: add per-zone...
589
590
591
592
593
  /*
   * Try to acquire the OOM killer lock for the zones in zonelist.  Returns zero
   * if a parallel OOM killing is already taking place that includes a zone in
   * the zonelist.  Otherwise, locks all zones in the zonelist and returns 1.
   */
ff321feac   Minchan Kim   mm: rename try_se...
594
  int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask)
098d7f128   David Rientjes   oom: add per-zone...
595
  {
dd1a239f6   Mel Gorman   mm: have zonelist...
596
597
  	struct zoneref *z;
  	struct zone *zone;
098d7f128   David Rientjes   oom: add per-zone...
598
  	int ret = 1;
c7d4caeb1   David Rientjes   oom: fix zone_sca...
599
  	spin_lock(&zone_scan_lock);
dd1a239f6   Mel Gorman   mm: have zonelist...
600
601
  	for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
  		if (zone_is_oom_locked(zone)) {
098d7f128   David Rientjes   oom: add per-zone...
602
603
604
  			ret = 0;
  			goto out;
  		}
dd1a239f6   Mel Gorman   mm: have zonelist...
605
606
607
608
  	}
  
  	for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
  		/*
c7d4caeb1   David Rientjes   oom: fix zone_sca...
609
  		 * Lock each zone in the zonelist under zone_scan_lock so a
ff321feac   Minchan Kim   mm: rename try_se...
610
  		 * parallel invocation of try_set_zonelist_oom() doesn't succeed
dd1a239f6   Mel Gorman   mm: have zonelist...
611
612
613
614
  		 * when it shouldn't.
  		 */
  		zone_set_flag(zone, ZONE_OOM_LOCKED);
  	}
098d7f128   David Rientjes   oom: add per-zone...
615

098d7f128   David Rientjes   oom: add per-zone...
616
  out:
c7d4caeb1   David Rientjes   oom: fix zone_sca...
617
  	spin_unlock(&zone_scan_lock);
098d7f128   David Rientjes   oom: add per-zone...
618
619
620
621
622
623
624
625
  	return ret;
  }
  
  /*
   * Clears the ZONE_OOM_LOCKED flag for all zones in the zonelist so that failed
   * allocation attempts with zonelists containing them may now recall the OOM
   * killer, if necessary.
   */
dd1a239f6   Mel Gorman   mm: have zonelist...
626
  void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask)
098d7f128   David Rientjes   oom: add per-zone...
627
  {
dd1a239f6   Mel Gorman   mm: have zonelist...
628
629
  	struct zoneref *z;
  	struct zone *zone;
098d7f128   David Rientjes   oom: add per-zone...
630

c7d4caeb1   David Rientjes   oom: fix zone_sca...
631
  	spin_lock(&zone_scan_lock);
dd1a239f6   Mel Gorman   mm: have zonelist...
632
633
634
  	for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
  		zone_clear_flag(zone, ZONE_OOM_LOCKED);
  	}
c7d4caeb1   David Rientjes   oom: fix zone_sca...
635
  	spin_unlock(&zone_scan_lock);
098d7f128   David Rientjes   oom: add per-zone...
636
  }
1c0fe6e3b   Nick Piggin   mm: invoke oom-ki...
637
  /*
e36589323   David Rientjes   oom: remove speci...
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
   * Try to acquire the oom killer lock for all system zones.  Returns zero if a
   * parallel oom killing is taking place, otherwise locks all zones and returns
   * non-zero.
   */
  static int try_set_system_oom(void)
  {
  	struct zone *zone;
  	int ret = 1;
  
  	spin_lock(&zone_scan_lock);
  	for_each_populated_zone(zone)
  		if (zone_is_oom_locked(zone)) {
  			ret = 0;
  			goto out;
  		}
  	for_each_populated_zone(zone)
  		zone_set_flag(zone, ZONE_OOM_LOCKED);
  out:
  	spin_unlock(&zone_scan_lock);
  	return ret;
  }
  
  /*
   * Clears ZONE_OOM_LOCKED for all system zones so that failed allocation
   * attempts or page faults may now recall the oom killer, if necessary.
   */
  static void clear_system_oom(void)
  {
  	struct zone *zone;
  
  	spin_lock(&zone_scan_lock);
  	for_each_populated_zone(zone)
  		zone_clear_flag(zone, ZONE_OOM_LOCKED);
  	spin_unlock(&zone_scan_lock);
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
673
  /**
6937a25cf   Dave Peterson   [PATCH] mm: fix t...
674
   * out_of_memory - kill the "best" process when we run out of memory
1b578df02   Randy Dunlap   mm/oom_kill: fix ...
675
676
677
   * @zonelist: zonelist pointer
   * @gfp_mask: memory allocation flags
   * @order: amount of memory being requested as a power of 2
6f48d0ebd   David Rientjes   oom: select task ...
678
   * @nodemask: nodemask passed to page allocator
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
679
680
681
682
683
684
   *
   * If we run out of memory, we have the choice between either
   * killing a random task (bad), letting the system crash (worse)
   * OR try to be smart about which process to kill. Note that we
   * don't have to be perfect here, we just have to be good.
   */
4365a5676   KAMEZAWA Hiroyuki   oom-kill: fix NUM...
685
686
  void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
  		int order, nodemask_t *nodemask)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
687
  {
e85bfd3aa   David Rientjes   oom: filter unkil...
688
  	const nodemask_t *mpol_mask;
0aad4b312   David Rientjes   oom: fold __out_o...
689
  	struct task_struct *p;
a63d83f42   David Rientjes   oom: badness heur...
690
  	unsigned long totalpages;
8bc719d3c   Martin Schwidefsky   [PATCH] out of me...
691
  	unsigned long freed = 0;
a63d83f42   David Rientjes   oom: badness heur...
692
  	unsigned int points;
e36589323   David Rientjes   oom: remove speci...
693
  	enum oom_constraint constraint = CONSTRAINT_NONE;
b52723c56   KOSAKI Motohiro   oom: fix tasklist...
694
  	int killed = 0;
8bc719d3c   Martin Schwidefsky   [PATCH] out of me...
695
696
697
698
699
  
  	blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
  	if (freed > 0)
  		/* Got some memory back in the last second. */
  		return;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
700

7b98c2e40   David Rientjes   oom: give current...
701
702
703
704
705
706
707
708
709
  	/*
  	 * If current has a pending SIGKILL, then automatically select it.  The
  	 * goal is to allow it to allocate so that it may quickly exit and free
  	 * its memory.
  	 */
  	if (fatal_signal_pending(current)) {
  		set_thread_flag(TIF_MEMDIE);
  		return;
  	}
9b0f8b040   Christoph Lameter   [PATCH] Terminate...
710
711
712
713
  	/*
  	 * Check if there were limitations on the allocation (only relevant for
  	 * NUMA) that may require different handling.
  	 */
a63d83f42   David Rientjes   oom: badness heur...
714
715
  	constraint = constrained_alloc(zonelist, gfp_mask, nodemask,
  						&totalpages);
e85bfd3aa   David Rientjes   oom: filter unkil...
716
717
  	mpol_mask = (constraint == CONSTRAINT_MEMORY_POLICY) ? nodemask : NULL;
  	check_panic_on_oom(constraint, gfp_mask, order, mpol_mask);
0aad4b312   David Rientjes   oom: fold __out_o...
718

2b45ab339   David Rientjes   oom: fix constrai...
719
  	read_lock(&tasklist_lock);
f88ccad58   KOSAKI Motohiro   oom: oom_kill_pro...
720
  	if (sysctl_oom_kill_allocating_task &&
a96cfd6e9   KOSAKI Motohiro   oom: move OOM_DIS...
721
  	    !oom_unkillable_task(current, NULL, nodemask) &&
c9f01245b   David Rientjes   oom: remove oom_d...
722
  	    current->mm) {
0aad4b312   David Rientjes   oom: fold __out_o...
723
724
725
726
727
  		/*
  		 * oom_kill_process() needs tasklist_lock held.  If it returns
  		 * non-zero, current could not be killed so we must fallback to
  		 * the tasklist scan.
  		 */
a63d83f42   David Rientjes   oom: badness heur...
728
729
  		if (!oom_kill_process(current, gfp_mask, order, 0, totalpages,
  				NULL, nodemask,
0aad4b312   David Rientjes   oom: fold __out_o...
730
  				"Out of memory (oom_kill_allocating_task)"))
b52723c56   KOSAKI Motohiro   oom: fix tasklist...
731
  			goto out;
0aad4b312   David Rientjes   oom: fold __out_o...
732
733
734
  	}
  
  retry:
e85bfd3aa   David Rientjes   oom: filter unkil...
735
  	p = select_bad_process(&points, totalpages, NULL, mpol_mask);
0aad4b312   David Rientjes   oom: fold __out_o...
736
  	if (PTR_ERR(p) == -1UL)
b52723c56   KOSAKI Motohiro   oom: fix tasklist...
737
  		goto out;
0aad4b312   David Rientjes   oom: fold __out_o...
738
739
740
  
  	/* Found nothing?!?! Either we hang forever, or we panic. */
  	if (!p) {
e85bfd3aa   David Rientjes   oom: filter unkil...
741
  		dump_header(NULL, gfp_mask, order, NULL, mpol_mask);
0aad4b312   David Rientjes   oom: fold __out_o...
742
743
744
745
  		read_unlock(&tasklist_lock);
  		panic("Out of memory and no killable processes...
  ");
  	}
a63d83f42   David Rientjes   oom: badness heur...
746
747
  	if (oom_kill_process(p, gfp_mask, order, points, totalpages, NULL,
  				nodemask, "Out of memory"))
0aad4b312   David Rientjes   oom: fold __out_o...
748
  		goto retry;
b52723c56   KOSAKI Motohiro   oom: fix tasklist...
749
750
  	killed = 1;
  out:
140ffcec4   Andrew Morton   [PATCH] out_of_me...
751
  	read_unlock(&tasklist_lock);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
752
753
754
  
  	/*
  	 * Give "p" a good chance of killing itself before we
2f659f462   Kirill Korotaev   [PATCH] Optimise ...
755
  	 * retry to allocate memory unless "p" is current
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
756
  	 */
b52723c56   KOSAKI Motohiro   oom: fix tasklist...
757
  	if (killed && !test_thread_flag(TIF_MEMDIE))
140ffcec4   Andrew Morton   [PATCH] out_of_me...
758
  		schedule_timeout_uninterruptible(1);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
759
  }
e36589323   David Rientjes   oom: remove speci...
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
  
  /*
   * The pagefault handler calls here because it is out of memory, so kill a
   * memory-hogging task.  If a populated zone has ZONE_OOM_LOCKED set, a parallel
   * oom killing is already in progress so do nothing.  If a task is found with
   * TIF_MEMDIE set, it has been killed so do nothing and allow it to exit.
   */
  void pagefault_out_of_memory(void)
  {
  	if (try_set_system_oom()) {
  		out_of_memory(NULL, 0, 0, NULL);
  		clear_system_oom();
  	}
  	if (!test_thread_flag(TIF_MEMDIE))
  		schedule_timeout_uninterruptible(1);
  }