Blame view

mm/oom_kill.c 22.2 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
2
3
4
5
6
  /*
   *  linux/mm/oom_kill.c
   * 
   *  Copyright (C)  1998,2000  Rik van Riel
   *	Thanks go out to Claus Fischer for some serious inspiration and
   *	for goading me into coding this file...
a63d83f42   David Rientjes   oom: badness heur...
7
8
   *  Copyright (C)  2010  Google, Inc.
   *	Rewritten by David Rientjes
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
9
10
   *
   *  The routines in this file are used to kill a process when
a49335cce   Paul Jackson   [PATCH] cpusets: ...
11
12
   *  we're seriously out of memory. This gets called from __alloc_pages()
   *  in mm/page_alloc.c when we really run out of memory.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
13
14
15
16
17
18
   *
   *  Since we won't call these routines often (on a well-configured
   *  machine) this file will double as a 'coding guide' and a signpost
   *  for newbie kernel hackers. It features several pointers to major
   *  kernel subsystems and hints as to where to find out what things do.
   */
8ac773b4f   Alexey Dobriyan   [PATCH] OOM kille...
19
  #include <linux/oom.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
20
  #include <linux/mm.h>
4e950f6f0   Alexey Dobriyan   Remove fs.h from ...
21
  #include <linux/err.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
22
  #include <linux/gfp.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
23
24
25
26
  #include <linux/sched.h>
  #include <linux/swap.h>
  #include <linux/timex.h>
  #include <linux/jiffies.h>
ef08e3b49   Paul Jackson   [PATCH] cpusets: ...
27
  #include <linux/cpuset.h>
b95f1b31b   Paul Gortmaker   mm: Map most file...
28
  #include <linux/export.h>
8bc719d3c   Martin Schwidefsky   [PATCH] out of me...
29
  #include <linux/notifier.h>
c7ba5c9e8   Pavel Emelianov   Memory controller...
30
  #include <linux/memcontrol.h>
6f48d0ebd   David Rientjes   oom: select task ...
31
  #include <linux/mempolicy.h>
5cd9c58fb   David Howells   security: Fix set...
32
  #include <linux/security.h>
edd45544c   David Rientjes   oom: avoid deferr...
33
  #include <linux/ptrace.h>
f660daac4   David Rientjes   oom: thaw threads...
34
  #include <linux/freezer.h>
43d2b1132   KAMEZAWA Hiroyuki   tracepoint: add t...
35
36
37
38
  #include <linux/ftrace.h>
  
  #define CREATE_TRACE_POINTS
  #include <trace/events/oom.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
39

fadd8fbd1   KAMEZAWA Hiroyuki   [PATCH] support f...
40
  int sysctl_panic_on_oom;
fe071d7e8   David Rientjes   oom: add oom_kill...
41
  int sysctl_oom_kill_allocating_task;
ad915c432   David Rientjes   oom: enable oom t...
42
  int sysctl_oom_dump_tasks = 1;
c7d4caeb1   David Rientjes   oom: fix zone_sca...
43
  static DEFINE_SPINLOCK(zone_scan_lock);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
44

43362a497   David Rientjes   oom: fix race whi...
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
  /*
   * compare_swap_oom_score_adj() - compare and swap current's oom_score_adj
   * @old_val: old oom_score_adj for compare
   * @new_val: new oom_score_adj for swap
   *
   * Sets the oom_score_adj value for current to @new_val iff its present value is
   * @old_val.  Usually used to reinstate a previous value to prevent racing with
   * userspacing tuning the value in the interim.
   */
  void compare_swap_oom_score_adj(int old_val, int new_val)
  {
  	struct sighand_struct *sighand = current->sighand;
  
  	spin_lock_irq(&sighand->siglock);
  	if (current->signal->oom_score_adj == old_val)
  		current->signal->oom_score_adj = new_val;
43d2b1132   KAMEZAWA Hiroyuki   tracepoint: add t...
61
  	trace_oom_score_adj_update(current);
43362a497   David Rientjes   oom: fix race whi...
62
63
  	spin_unlock_irq(&sighand->siglock);
  }
72788c385   David Rientjes   oom: replace PF_O...
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
  /**
   * test_set_oom_score_adj() - set current's oom_score_adj and return old value
   * @new_val: new oom_score_adj value
   *
   * Sets the oom_score_adj value for current to @new_val with proper
   * synchronization and returns the old value.  Usually used to temporarily
   * set a value, save the old value in the caller, and then reinstate it later.
   */
  int test_set_oom_score_adj(int new_val)
  {
  	struct sighand_struct *sighand = current->sighand;
  	int old_val;
  
  	spin_lock_irq(&sighand->siglock);
  	old_val = current->signal->oom_score_adj;
c9f01245b   David Rientjes   oom: remove oom_d...
79
  	current->signal->oom_score_adj = new_val;
43d2b1132   KAMEZAWA Hiroyuki   tracepoint: add t...
80
  	trace_oom_score_adj_update(current);
72788c385   David Rientjes   oom: replace PF_O...
81
82
83
84
  	spin_unlock_irq(&sighand->siglock);
  
  	return old_val;
  }
6f48d0ebd   David Rientjes   oom: select task ...
85
86
87
88
89
90
91
92
93
  #ifdef CONFIG_NUMA
  /**
   * has_intersects_mems_allowed() - check task eligiblity for kill
   * @tsk: task struct of which task to consider
   * @mask: nodemask passed to page allocator for mempolicy ooms
   *
   * Task eligibility is determined by whether or not a candidate task, @tsk,
   * shares the same mempolicy nodes as current if it is bound by such a policy
   * and whether or not it has the same set of allowed cpuset nodes.
495789a51   KOSAKI Motohiro   oom: make oom_sco...
94
   */
6f48d0ebd   David Rientjes   oom: select task ...
95
96
  static bool has_intersects_mems_allowed(struct task_struct *tsk,
  					const nodemask_t *mask)
495789a51   KOSAKI Motohiro   oom: make oom_sco...
97
  {
6f48d0ebd   David Rientjes   oom: select task ...
98
  	struct task_struct *start = tsk;
495789a51   KOSAKI Motohiro   oom: make oom_sco...
99

495789a51   KOSAKI Motohiro   oom: make oom_sco...
100
  	do {
6f48d0ebd   David Rientjes   oom: select task ...
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
  		if (mask) {
  			/*
  			 * If this is a mempolicy constrained oom, tsk's
  			 * cpuset is irrelevant.  Only return true if its
  			 * mempolicy intersects current, otherwise it may be
  			 * needlessly killed.
  			 */
  			if (mempolicy_nodemask_intersects(tsk, mask))
  				return true;
  		} else {
  			/*
  			 * This is not a mempolicy constrained oom, so only
  			 * check the mems of tsk's cpuset.
  			 */
  			if (cpuset_mems_allowed_intersects(current, tsk))
  				return true;
  		}
df1090a8d   KOSAKI Motohiro   oom: cleanup has_...
118
  	} while_each_thread(start, tsk);
6f48d0ebd   David Rientjes   oom: select task ...
119
120
121
122
123
124
125
  	return false;
  }
  #else
  static bool has_intersects_mems_allowed(struct task_struct *tsk,
  					const nodemask_t *mask)
  {
  	return true;
495789a51   KOSAKI Motohiro   oom: make oom_sco...
126
  }
6f48d0ebd   David Rientjes   oom: select task ...
127
  #endif /* CONFIG_NUMA */
495789a51   KOSAKI Motohiro   oom: make oom_sco...
128

6f48d0ebd   David Rientjes   oom: select task ...
129
130
131
132
133
134
  /*
   * The process p may have detached its own ->mm while exiting or through
   * use_mm(), but one or more of its subthreads may still have a valid
   * pointer.  Return p, or any of its subthreads with a valid ->mm, with
   * task_lock() held.
   */
158e0a2d1   KAMEZAWA Hiroyuki   memcg: use find_l...
135
  struct task_struct *find_lock_task_mm(struct task_struct *p)
dd8e8f405   Oleg Nesterov   oom: introduce fi...
136
137
138
139
140
141
142
143
144
145
146
147
  {
  	struct task_struct *t = p;
  
  	do {
  		task_lock(t);
  		if (likely(t->mm))
  			return t;
  		task_unlock(t);
  	} while_each_thread(p, t);
  
  	return NULL;
  }
ab290adba   KOSAKI Motohiro   oom: make oom_unk...
148
  /* return true if the task is not adequate as candidate victim task. */
e85bfd3aa   David Rientjes   oom: filter unkil...
149
  static bool oom_unkillable_task(struct task_struct *p,
72835c86c   Johannes Weiner   mm: unify remaini...
150
  		const struct mem_cgroup *memcg, const nodemask_t *nodemask)
ab290adba   KOSAKI Motohiro   oom: make oom_unk...
151
152
153
154
155
156
157
  {
  	if (is_global_init(p))
  		return true;
  	if (p->flags & PF_KTHREAD)
  		return true;
  
  	/* When mem_cgroup_out_of_memory() and p is not member of the group */
72835c86c   Johannes Weiner   mm: unify remaini...
158
  	if (memcg && !task_in_mem_cgroup(p, memcg))
ab290adba   KOSAKI Motohiro   oom: make oom_unk...
159
160
161
162
163
164
165
166
  		return true;
  
  	/* p may not have freeable memory in nodemask */
  	if (!has_intersects_mems_allowed(p, nodemask))
  		return true;
  
  	return false;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
167
  /**
a63d83f42   David Rientjes   oom: badness heur...
168
   * oom_badness - heuristic function to determine which candidate task to kill
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
169
   * @p: task struct of which task we should calculate
a63d83f42   David Rientjes   oom: badness heur...
170
   * @totalpages: total present RAM allowed for page allocation
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
171
   *
a63d83f42   David Rientjes   oom: badness heur...
172
173
174
   * The heuristic for determining which task to kill is made to be as simple and
   * predictable as possible.  The goal is to return the highest value for the
   * task consuming the most memory to avoid subsequent oom failures.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
175
   */
72835c86c   Johannes Weiner   mm: unify remaini...
176
  unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
a63d83f42   David Rientjes   oom: badness heur...
177
  		      const nodemask_t *nodemask, unsigned long totalpages)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
178
  {
ff05b6f7a   Frantisek Hrbata   oom: fix integer ...
179
  	long points;
28b83c519   KOSAKI Motohiro   oom: move oom_adj...
180

72835c86c   Johannes Weiner   mm: unify remaini...
181
  	if (oom_unkillable_task(p, memcg, nodemask))
26ebc9849   KOSAKI Motohiro   oom: /proc/<pid>/...
182
  		return 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
183

dd8e8f405   Oleg Nesterov   oom: introduce fi...
184
185
  	p = find_lock_task_mm(p);
  	if (!p)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
186
  		return 0;
5aecc85ab   Michal Hocko   oom: do not kill ...
187
188
189
190
  	if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) {
  		task_unlock(p);
  		return 0;
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
191
  	/*
a63d83f42   David Rientjes   oom: badness heur...
192
193
  	 * The memory controller may have a limit of 0 bytes, so avoid a divide
  	 * by zero, if necessary.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
194
  	 */
a63d83f42   David Rientjes   oom: badness heur...
195
196
  	if (!totalpages)
  		totalpages = 1;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
197
198
  
  	/*
a63d83f42   David Rientjes   oom: badness heur...
199
  	 * The baseline for the badness score is the proportion of RAM that each
f755a042d   KOSAKI Motohiro   oom: use pte page...
200
  	 * task's rss, pagetable and swap space use.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
201
  	 */
f755a042d   KOSAKI Motohiro   oom: use pte page...
202
203
204
205
206
  	points = get_mm_rss(p->mm) + p->mm->nr_ptes;
  	points += get_mm_counter(p->mm, MM_SWAPENTS);
  
  	points *= 1000;
  	points /= totalpages;
a63d83f42   David Rientjes   oom: badness heur...
207
  	task_unlock(p);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
208
209
  
  	/*
a63d83f42   David Rientjes   oom: badness heur...
210
211
  	 * Root processes get 3% bonus, just like the __vm_enough_memory()
  	 * implementation used by LSMs.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
212
  	 */
a63d83f42   David Rientjes   oom: badness heur...
213
214
  	if (has_capability_noaudit(p, CAP_SYS_ADMIN))
  		points -= 30;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
215
216
  
  	/*
a63d83f42   David Rientjes   oom: badness heur...
217
218
219
  	 * /proc/pid/oom_score_adj ranges from -1000 to +1000 such that it may
  	 * either completely disable oom killing or always prefer a certain
  	 * task.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
220
  	 */
a63d83f42   David Rientjes   oom: badness heur...
221
  	points += p->signal->oom_score_adj;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
222

f19e8aa11   David Rientjes   oom: always retur...
223
224
225
226
227
228
229
  	/*
  	 * Never return 0 for an eligible task that may be killed since it's
  	 * possible that no single user task uses more than 0.1% of memory and
  	 * no single admin tasks uses more than 3.0%.
  	 */
  	if (points <= 0)
  		return 1;
a63d83f42   David Rientjes   oom: badness heur...
230
  	return (points < 1000) ? points : 1000;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
231
232
233
  }
  
  /*
9b0f8b040   Christoph Lameter   [PATCH] Terminate...
234
235
   * Determine the type of allocation constraint.
   */
9b0f8b040   Christoph Lameter   [PATCH] Terminate...
236
  #ifdef CONFIG_NUMA
4365a5676   KAMEZAWA Hiroyuki   oom-kill: fix NUM...
237
  static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
a63d83f42   David Rientjes   oom: badness heur...
238
239
  				gfp_t gfp_mask, nodemask_t *nodemask,
  				unsigned long *totalpages)
4365a5676   KAMEZAWA Hiroyuki   oom-kill: fix NUM...
240
  {
54a6eb5c4   Mel Gorman   mm: use two zonel...
241
  	struct zone *zone;
dd1a239f6   Mel Gorman   mm: have zonelist...
242
  	struct zoneref *z;
54a6eb5c4   Mel Gorman   mm: use two zonel...
243
  	enum zone_type high_zoneidx = gfp_zone(gfp_mask);
a63d83f42   David Rientjes   oom: badness heur...
244
245
  	bool cpuset_limited = false;
  	int nid;
9b0f8b040   Christoph Lameter   [PATCH] Terminate...
246

a63d83f42   David Rientjes   oom: badness heur...
247
248
249
250
251
  	/* Default to all available memory */
  	*totalpages = totalram_pages + total_swap_pages;
  
  	if (!zonelist)
  		return CONSTRAINT_NONE;
4365a5676   KAMEZAWA Hiroyuki   oom-kill: fix NUM...
252
253
254
255
256
257
258
  	/*
  	 * Reach here only when __GFP_NOFAIL is used. So, we should avoid
  	 * to kill current.We have to random task kill in this case.
  	 * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now.
  	 */
  	if (gfp_mask & __GFP_THISNODE)
  		return CONSTRAINT_NONE;
9b0f8b040   Christoph Lameter   [PATCH] Terminate...
259

4365a5676   KAMEZAWA Hiroyuki   oom-kill: fix NUM...
260
  	/*
a63d83f42   David Rientjes   oom: badness heur...
261
262
263
  	 * This is not a __GFP_THISNODE allocation, so a truncated nodemask in
  	 * the page allocator means a mempolicy is in effect.  Cpuset policy
  	 * is enforced in get_page_from_freelist().
4365a5676   KAMEZAWA Hiroyuki   oom-kill: fix NUM...
264
  	 */
a63d83f42   David Rientjes   oom: badness heur...
265
266
267
268
  	if (nodemask && !nodes_subset(node_states[N_HIGH_MEMORY], *nodemask)) {
  		*totalpages = total_swap_pages;
  		for_each_node_mask(nid, *nodemask)
  			*totalpages += node_spanned_pages(nid);
9b0f8b040   Christoph Lameter   [PATCH] Terminate...
269
  		return CONSTRAINT_MEMORY_POLICY;
a63d83f42   David Rientjes   oom: badness heur...
270
  	}
4365a5676   KAMEZAWA Hiroyuki   oom-kill: fix NUM...
271
272
273
274
275
  
  	/* Check this allocation failure is caused by cpuset's wall function */
  	for_each_zone_zonelist_nodemask(zone, z, zonelist,
  			high_zoneidx, nodemask)
  		if (!cpuset_zone_allowed_softwall(zone, gfp_mask))
a63d83f42   David Rientjes   oom: badness heur...
276
  			cpuset_limited = true;
9b0f8b040   Christoph Lameter   [PATCH] Terminate...
277

a63d83f42   David Rientjes   oom: badness heur...
278
279
280
281
282
283
  	if (cpuset_limited) {
  		*totalpages = total_swap_pages;
  		for_each_node_mask(nid, cpuset_current_mems_allowed)
  			*totalpages += node_spanned_pages(nid);
  		return CONSTRAINT_CPUSET;
  	}
9b0f8b040   Christoph Lameter   [PATCH] Terminate...
284
285
  	return CONSTRAINT_NONE;
  }
4365a5676   KAMEZAWA Hiroyuki   oom-kill: fix NUM...
286
287
  #else
  static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
a63d83f42   David Rientjes   oom: badness heur...
288
289
  				gfp_t gfp_mask, nodemask_t *nodemask,
  				unsigned long *totalpages)
4365a5676   KAMEZAWA Hiroyuki   oom-kill: fix NUM...
290
  {
a63d83f42   David Rientjes   oom: badness heur...
291
  	*totalpages = totalram_pages + total_swap_pages;
4365a5676   KAMEZAWA Hiroyuki   oom-kill: fix NUM...
292
293
294
  	return CONSTRAINT_NONE;
  }
  #endif
9b0f8b040   Christoph Lameter   [PATCH] Terminate...
295
296
  
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
297
298
299
300
301
   * Simple selection loop. We chose the process with the highest
   * number of 'points'. We expect the caller will lock the tasklist.
   *
   * (not docbooked, we don't want this one cluttering up the manual)
   */
a63d83f42   David Rientjes   oom: badness heur...
302
  static struct task_struct *select_bad_process(unsigned int *ppoints,
72835c86c   Johannes Weiner   mm: unify remaini...
303
  		unsigned long totalpages, struct mem_cgroup *memcg,
a63d83f42   David Rientjes   oom: badness heur...
304
  		const nodemask_t *nodemask)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
305
  {
3a5dda7a1   David Rientjes   oom: prevent unne...
306
  	struct task_struct *g, *p;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
307
  	struct task_struct *chosen = NULL;
9827b781f   Kurt Garloff   [PATCH] OOM kill:...
308
  	*ppoints = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
309

3a5dda7a1   David Rientjes   oom: prevent unne...
310
  	do_each_thread(g, p) {
a63d83f42   David Rientjes   oom: badness heur...
311
  		unsigned int points;
a49335cce   Paul Jackson   [PATCH] cpusets: ...
312

c027a474a   Oleg Nesterov   oom: task->mm == ...
313
  		if (p->exit_state)
30e2b41f2   Andrey Vagin   oom: skip zombies...
314
  			continue;
72835c86c   Johannes Weiner   mm: unify remaini...
315
  		if (oom_unkillable_task(p, memcg, nodemask))
6cf86ac6f   David Rientjes   oom: filter tasks...
316
  			continue;
ef08e3b49   Paul Jackson   [PATCH] cpusets: ...
317

a49335cce   Paul Jackson   [PATCH] cpusets: ...
318
  		/*
b78483a4b   Nick Piggin   [PATCH] oom: don'...
319
320
321
322
323
324
325
326
  		 * This task already has access to memory reserves and is
  		 * being killed. Don't allow any other task access to the
  		 * memory reserve.
  		 *
  		 * Note: this may have a chance of deadlock if it gets
  		 * blocked waiting for another task which itself is waiting
  		 * for memory. Is there a better alternative?
  		 */
f660daac4   David Rientjes   oom: thaw threads...
327
328
  		if (test_tsk_thread_flag(p, TIF_MEMDIE)) {
  			if (unlikely(frozen(p)))
a5be2d0d1   Tejun Heo   freezer: rename t...
329
  				__thaw_task(p);
b78483a4b   Nick Piggin   [PATCH] oom: don'...
330
  			return ERR_PTR(-1UL);
f660daac4   David Rientjes   oom: thaw threads...
331
  		}
c027a474a   Oleg Nesterov   oom: task->mm == ...
332
333
  		if (!p->mm)
  			continue;
b78483a4b   Nick Piggin   [PATCH] oom: don'...
334

30e2b41f2   Andrey Vagin   oom: skip zombies...
335
  		if (p->flags & PF_EXITING) {
edd45544c   David Rientjes   oom: avoid deferr...
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
  			/*
  			 * If p is the current task and is in the process of
  			 * releasing memory, we allow the "kill" to set
  			 * TIF_MEMDIE, which will allow it to gain access to
  			 * memory reserves.  Otherwise, it may stall forever.
  			 *
  			 * The loop isn't broken here, however, in case other
  			 * threads are found to have already been oom killed.
  			 */
  			if (p == current) {
  				chosen = p;
  				*ppoints = 1000;
  			} else {
  				/*
  				 * If this task is not being ptraced on exit,
  				 * then wait for it to finish before killing
  				 * some other task unnecessarily.
  				 */
d21142ece   Tejun Heo   ptrace: kill task...
354
  				if (!(p->group_leader->ptrace & PT_TRACE_EXIT))
edd45544c   David Rientjes   oom: avoid deferr...
355
356
  					return ERR_PTR(-1UL);
  			}
50ec3bbff   Nick Piggin   [PATCH] oom: hand...
357
  		}
972c4ea59   Oleg Nesterov   [PATCH] select_ba...
358

72835c86c   Johannes Weiner   mm: unify remaini...
359
  		points = oom_badness(p, memcg, nodemask, totalpages);
a63d83f42   David Rientjes   oom: badness heur...
360
  		if (points > *ppoints) {
a49335cce   Paul Jackson   [PATCH] cpusets: ...
361
  			chosen = p;
9827b781f   Kurt Garloff   [PATCH] OOM kill:...
362
  			*ppoints = points;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
363
  		}
3a5dda7a1   David Rientjes   oom: prevent unne...
364
  	} while_each_thread(g, p);
972c4ea59   Oleg Nesterov   [PATCH] select_ba...
365

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
366
367
368
369
  	return chosen;
  }
  
  /**
1b578df02   Randy Dunlap   mm/oom_kill: fix ...
370
   * dump_tasks - dump current memory state of all system tasks
74ab7f1d3   David Rientjes   oom: improve comm...
371
   * @mem: current's memory controller, if constrained
e85bfd3aa   David Rientjes   oom: filter unkil...
372
   * @nodemask: nodemask passed to page allocator for mempolicy ooms
1b578df02   Randy Dunlap   mm/oom_kill: fix ...
373
   *
e85bfd3aa   David Rientjes   oom: filter unkil...
374
375
376
   * Dumps the current memory state of all eligible tasks.  Tasks not in the same
   * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes
   * are not shown.
fef1bdd68   David Rientjes   oom: add sysctl t...
377
   * State information includes task's pid, uid, tgid, vm size, rss, cpu, oom_adj
a63d83f42   David Rientjes   oom: badness heur...
378
   * value, oom_score_adj value, and name.
fef1bdd68   David Rientjes   oom: add sysctl t...
379
   *
fef1bdd68   David Rientjes   oom: add sysctl t...
380
381
   * Call with tasklist_lock read-locked.
   */
72835c86c   Johannes Weiner   mm: unify remaini...
382
  static void dump_tasks(const struct mem_cgroup *memcg, const nodemask_t *nodemask)
fef1bdd68   David Rientjes   oom: add sysctl t...
383
  {
c55db9578   KOSAKI Motohiro   oom: dump_tasks u...
384
385
  	struct task_struct *p;
  	struct task_struct *task;
fef1bdd68   David Rientjes   oom: add sysctl t...
386

a63d83f42   David Rientjes   oom: badness heur...
387
388
  	pr_info("[ pid ]   uid  tgid total_vm      rss cpu oom_adj oom_score_adj name
  ");
c55db9578   KOSAKI Motohiro   oom: dump_tasks u...
389
  	for_each_process(p) {
72835c86c   Johannes Weiner   mm: unify remaini...
390
  		if (oom_unkillable_task(p, memcg, nodemask))
b4416d2be   David Rientjes   oom: do not dump ...
391
  			continue;
fef1bdd68   David Rientjes   oom: add sysctl t...
392

c55db9578   KOSAKI Motohiro   oom: dump_tasks u...
393
394
  		task = find_lock_task_mm(p);
  		if (!task) {
6d2661ede   David Rientjes   oom: fix possible...
395
  			/*
74ab7f1d3   David Rientjes   oom: improve comm...
396
397
  			 * This is a kthread or all of p's threads have already
  			 * detached their mm's.  There's no need to report
c55db9578   KOSAKI Motohiro   oom: dump_tasks u...
398
  			 * them; they can't be oom killed anyway.
6d2661ede   David Rientjes   oom: fix possible...
399
  			 */
6d2661ede   David Rientjes   oom: fix possible...
400
401
  			continue;
  		}
c55db9578   KOSAKI Motohiro   oom: dump_tasks u...
402

a63d83f42   David Rientjes   oom: badness heur...
403
404
  		pr_info("[%5d] %5d %5d %8lu %8lu %3u     %3d         %5d %s
  ",
8d6c83f0b   KOSAKI Motohiro   oom: __task_cred(...
405
  			task->pid, task_uid(task), task->tgid,
a63d83f42   David Rientjes   oom: badness heur...
406
407
408
  			task->mm->total_vm, get_mm_rss(task->mm),
  			task_cpu(task), task->signal->oom_adj,
  			task->signal->oom_score_adj, task->comm);
c55db9578   KOSAKI Motohiro   oom: dump_tasks u...
409
410
  		task_unlock(task);
  	}
fef1bdd68   David Rientjes   oom: add sysctl t...
411
  }
d31f56dbf   Daisuke Nishimura   memcg: avoid oom-...
412
  static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
72835c86c   Johannes Weiner   mm: unify remaini...
413
  			struct mem_cgroup *memcg, const nodemask_t *nodemask)
1b604d75b   David Rientjes   oom: dump stack a...
414
  {
5e9d834a0   David Rientjes   oom: sacrifice ch...
415
  	task_lock(current);
1b604d75b   David Rientjes   oom: dump stack a...
416
  	pr_warning("%s invoked oom-killer: gfp_mask=0x%x, order=%d, "
a63d83f42   David Rientjes   oom: badness heur...
417
418
419
420
  		"oom_adj=%d, oom_score_adj=%d
  ",
  		current->comm, gfp_mask, order, current->signal->oom_adj,
  		current->signal->oom_score_adj);
1b604d75b   David Rientjes   oom: dump stack a...
421
422
423
  	cpuset_print_task_mems_allowed(current);
  	task_unlock(current);
  	dump_stack();
72835c86c   Johannes Weiner   mm: unify remaini...
424
  	mem_cgroup_print_oom_info(memcg, p);
b2b755b5f   David Rientjes   lib, arch: add fi...
425
  	show_mem(SHOW_MEM_FILTER_NODES);
1b604d75b   David Rientjes   oom: dump stack a...
426
  	if (sysctl_oom_dump_tasks)
72835c86c   Johannes Weiner   mm: unify remaini...
427
  		dump_tasks(memcg, nodemask);
1b604d75b   David Rientjes   oom: dump stack a...
428
  }
3b4798cbc   KOSAKI Motohiro   oom-kill: show vi...
429
  #define K(x) ((x) << (PAGE_SHIFT-10))
ec0fffd84   Johannes Weiner   mm: oom_kill: rem...
430
  static int oom_kill_task(struct task_struct *p)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
431
  {
1e99bad0d   David Rientjes   oom: kill all thr...
432
433
  	struct task_struct *q;
  	struct mm_struct *mm;
dd8e8f405   Oleg Nesterov   oom: introduce fi...
434
  	p = find_lock_task_mm(p);
be71cf220   KOSAKI Motohiro   oom: fix NULL poi...
435
  	if (!p)
b940fd703   David Rientjes   oom: remove unnec...
436
  		return 1;
be71cf220   KOSAKI Motohiro   oom: fix NULL poi...
437

1e99bad0d   David Rientjes   oom: kill all thr...
438
439
  	/* mm cannot be safely dereferenced after task_unlock(p) */
  	mm = p->mm;
b940fd703   David Rientjes   oom: remove unnec...
440
441
442
443
444
  	pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB
  ",
  		task_pid_nr(p), p->comm, K(p->mm->total_vm),
  		K(get_mm_counter(p->mm, MM_ANONPAGES)),
  		K(get_mm_counter(p->mm, MM_FILEPAGES)));
3b4798cbc   KOSAKI Motohiro   oom-kill: show vi...
445
  	task_unlock(p);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
446

1e99bad0d   David Rientjes   oom: kill all thr...
447
  	/*
7b0d44fa4   David Rientjes   oom: avoid killin...
448
  	 * Kill all user processes sharing p->mm in other thread groups, if any.
1e99bad0d   David Rientjes   oom: kill all thr...
449
450
451
452
453
454
455
456
457
  	 * They don't get access to memory reserves or a higher scheduler
  	 * priority, though, to avoid depletion of all memory or task
  	 * starvation.  This prevents mm->mmap_sem livelock when an oom killed
  	 * task cannot exit because it requires the semaphore and its contended
  	 * by another thread trying to allocate memory itself.  That thread will
  	 * now get access to memory reserves since it has a pending fatal
  	 * signal.
  	 */
  	for_each_process(q)
7b0d44fa4   David Rientjes   oom: avoid killin...
458
459
  		if (q->mm == mm && !same_thread_group(q, p) &&
  		    !(q->flags & PF_KTHREAD)) {
c9f01245b   David Rientjes   oom: remove oom_d...
460
461
  			if (q->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
  				continue;
1e99bad0d   David Rientjes   oom: kill all thr...
462
463
464
465
466
467
468
  			task_lock(q);	/* Protect ->comm from prctl() */
  			pr_err("Kill process %d (%s) sharing same memory
  ",
  				task_pid_nr(q), q->comm);
  			task_unlock(q);
  			force_sig(SIGKILL, q);
  		}
93b43fa55   Luis Claudio R. Goncalves   oom: give the dyi...
469

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
470
  	set_tsk_thread_flag(p, TIF_MEMDIE);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
471
  	force_sig(SIGKILL, p);
93b43fa55   Luis Claudio R. Goncalves   oom: give the dyi...
472

013159227   Dave Peterson   [PATCH] mm: fix m...
473
  	return 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
474
  }
b940fd703   David Rientjes   oom: remove unnec...
475
  #undef K
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
476

7213f5066   David Rientjes   oom: suppress ext...
477
  static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
a63d83f42   David Rientjes   oom: badness heur...
478
  			    unsigned int points, unsigned long totalpages,
72835c86c   Johannes Weiner   mm: unify remaini...
479
  			    struct mem_cgroup *memcg, nodemask_t *nodemask,
a63d83f42   David Rientjes   oom: badness heur...
480
  			    const char *message)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
481
  {
52d3c0367   Linus Torvalds   Revert "oom: oom_...
482
  	struct task_struct *victim = p;
5e9d834a0   David Rientjes   oom: sacrifice ch...
483
  	struct task_struct *child;
52d3c0367   Linus Torvalds   Revert "oom: oom_...
484
485
  	struct task_struct *t = p;
  	unsigned int victim_points = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
486

1b604d75b   David Rientjes   oom: dump stack a...
487
  	if (printk_ratelimit())
72835c86c   Johannes Weiner   mm: unify remaini...
488
  		dump_header(p, gfp_mask, order, memcg, nodemask);
7213f5066   David Rientjes   oom: suppress ext...
489

50ec3bbff   Nick Piggin   [PATCH] oom: hand...
490
491
492
493
  	/*
  	 * If the task is already exiting, don't alarm the sysadmin or kill
  	 * its children or threads, just set TIF_MEMDIE so it can die quickly
  	 */
0753ba01e   KOSAKI Motohiro   mm: revert "oom: ...
494
  	if (p->flags & PF_EXITING) {
4358997ae   David Rientjes   oom: avoid sendin...
495
  		set_tsk_thread_flag(p, TIF_MEMDIE);
50ec3bbff   Nick Piggin   [PATCH] oom: hand...
496
497
  		return 0;
  	}
5e9d834a0   David Rientjes   oom: sacrifice ch...
498
  	task_lock(p);
a63d83f42   David Rientjes   oom: badness heur...
499
500
  	pr_err("%s: Kill process %d (%s) score %d or sacrifice child
  ",
5e9d834a0   David Rientjes   oom: sacrifice ch...
501
502
  		message, task_pid_nr(p), p->comm, points);
  	task_unlock(p);
f3af38d30   Nick Piggin   [PATCH] oom: clea...
503

5e9d834a0   David Rientjes   oom: sacrifice ch...
504
505
  	/*
  	 * If any of p's children has a different mm and is eligible for kill,
11239836c   David Rientjes   oom: remove refer...
506
  	 * the one with the highest oom_badness() score is sacrificed for its
5e9d834a0   David Rientjes   oom: sacrifice ch...
507
508
509
  	 * parent.  This attempts to lose the minimal amount of work done while
  	 * still freeing memory.
  	 */
dd8e8f405   Oleg Nesterov   oom: introduce fi...
510
  	do {
5e9d834a0   David Rientjes   oom: sacrifice ch...
511
  		list_for_each_entry(child, &t->children, sibling) {
a63d83f42   David Rientjes   oom: badness heur...
512
  			unsigned int child_points;
5e9d834a0   David Rientjes   oom: sacrifice ch...
513

edd45544c   David Rientjes   oom: avoid deferr...
514
515
  			if (child->mm == p->mm)
  				continue;
a63d83f42   David Rientjes   oom: badness heur...
516
517
518
  			/*
  			 * oom_badness() returns 0 if the thread is unkillable
  			 */
72835c86c   Johannes Weiner   mm: unify remaini...
519
  			child_points = oom_badness(child, memcg, nodemask,
a63d83f42   David Rientjes   oom: badness heur...
520
  								totalpages);
5e9d834a0   David Rientjes   oom: sacrifice ch...
521
522
523
524
  			if (child_points > victim_points) {
  				victim = child;
  				victim_points = child_points;
  			}
dd8e8f405   Oleg Nesterov   oom: introduce fi...
525
526
  		}
  	} while_each_thread(p, t);
ec0fffd84   Johannes Weiner   mm: oom_kill: rem...
527
  	return oom_kill_task(victim);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
528
  }
309ed8825   David Rientjes   oom: extract pani...
529
530
531
532
  /*
   * Determines whether the kernel must panic because of the panic_on_oom sysctl.
   */
  static void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
e85bfd3aa   David Rientjes   oom: filter unkil...
533
  				int order, const nodemask_t *nodemask)
309ed8825   David Rientjes   oom: extract pani...
534
535
536
537
538
539
540
541
542
543
544
545
546
  {
  	if (likely(!sysctl_panic_on_oom))
  		return;
  	if (sysctl_panic_on_oom != 2) {
  		/*
  		 * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel
  		 * does not panic for cpuset, mempolicy, or memcg allocation
  		 * failures.
  		 */
  		if (constraint != CONSTRAINT_NONE)
  			return;
  	}
  	read_lock(&tasklist_lock);
e85bfd3aa   David Rientjes   oom: filter unkil...
547
  	dump_header(NULL, gfp_mask, order, NULL, nodemask);
309ed8825   David Rientjes   oom: extract pani...
548
549
550
551
552
  	read_unlock(&tasklist_lock);
  	panic("Out of memory: %s panic_on_oom is enabled
  ",
  		sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
  }
00f0b8259   Balbir Singh   Memory controller...
553
  #ifdef CONFIG_CGROUP_MEM_RES_CTLR
72835c86c   Johannes Weiner   mm: unify remaini...
554
  void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask)
c7ba5c9e8   Pavel Emelianov   Memory controller...
555
  {
a63d83f42   David Rientjes   oom: badness heur...
556
557
  	unsigned long limit;
  	unsigned int points = 0;
c7ba5c9e8   Pavel Emelianov   Memory controller...
558
  	struct task_struct *p;
f9434ad15   David Rientjes   memcg: give curre...
559
560
561
562
563
564
565
  	/*
  	 * If current has a pending SIGKILL, then automatically select it.  The
  	 * goal is to allow it to allocate so that it may quickly exit and free
  	 * its memory.
  	 */
  	if (fatal_signal_pending(current)) {
  		set_thread_flag(TIF_MEMDIE);
f9434ad15   David Rientjes   memcg: give curre...
566
567
  		return;
  	}
e85bfd3aa   David Rientjes   oom: filter unkil...
568
  	check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, 0, NULL);
72835c86c   Johannes Weiner   mm: unify remaini...
569
  	limit = mem_cgroup_get_limit(memcg) >> PAGE_SHIFT;
e115f2d89   Li Zefan   memcg: fix oops i...
570
  	read_lock(&tasklist_lock);
c7ba5c9e8   Pavel Emelianov   Memory controller...
571
  retry:
72835c86c   Johannes Weiner   mm: unify remaini...
572
  	p = select_bad_process(&points, limit, memcg, NULL);
df64f81bb   David Rientjes   memcg: make oom k...
573
  	if (!p || PTR_ERR(p) == -1UL)
c7ba5c9e8   Pavel Emelianov   Memory controller...
574
  		goto out;
72835c86c   Johannes Weiner   mm: unify remaini...
575
  	if (oom_kill_process(p, gfp_mask, 0, points, limit, memcg, NULL,
c7ba5c9e8   Pavel Emelianov   Memory controller...
576
577
578
  				"Memory cgroup out of memory"))
  		goto retry;
  out:
e115f2d89   Li Zefan   memcg: fix oops i...
579
  	read_unlock(&tasklist_lock);
c7ba5c9e8   Pavel Emelianov   Memory controller...
580
581
  }
  #endif
8bc719d3c   Martin Schwidefsky   [PATCH] out of me...
582
583
584
585
586
587
588
589
590
591
592
593
594
  static BLOCKING_NOTIFIER_HEAD(oom_notify_list);
  
  int register_oom_notifier(struct notifier_block *nb)
  {
  	return blocking_notifier_chain_register(&oom_notify_list, nb);
  }
  EXPORT_SYMBOL_GPL(register_oom_notifier);
  
  int unregister_oom_notifier(struct notifier_block *nb)
  {
  	return blocking_notifier_chain_unregister(&oom_notify_list, nb);
  }
  EXPORT_SYMBOL_GPL(unregister_oom_notifier);
098d7f128   David Rientjes   oom: add per-zone...
595
596
597
598
599
  /*
   * Try to acquire the OOM killer lock for the zones in zonelist.  Returns zero
   * if a parallel OOM killing is already taking place that includes a zone in
   * the zonelist.  Otherwise, locks all zones in the zonelist and returns 1.
   */
ff321feac   Minchan Kim   mm: rename try_se...
600
  int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask)
098d7f128   David Rientjes   oom: add per-zone...
601
  {
dd1a239f6   Mel Gorman   mm: have zonelist...
602
603
  	struct zoneref *z;
  	struct zone *zone;
098d7f128   David Rientjes   oom: add per-zone...
604
  	int ret = 1;
c7d4caeb1   David Rientjes   oom: fix zone_sca...
605
  	spin_lock(&zone_scan_lock);
dd1a239f6   Mel Gorman   mm: have zonelist...
606
607
  	for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
  		if (zone_is_oom_locked(zone)) {
098d7f128   David Rientjes   oom: add per-zone...
608
609
610
  			ret = 0;
  			goto out;
  		}
dd1a239f6   Mel Gorman   mm: have zonelist...
611
612
613
614
  	}
  
  	for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
  		/*
c7d4caeb1   David Rientjes   oom: fix zone_sca...
615
  		 * Lock each zone in the zonelist under zone_scan_lock so a
ff321feac   Minchan Kim   mm: rename try_se...
616
  		 * parallel invocation of try_set_zonelist_oom() doesn't succeed
dd1a239f6   Mel Gorman   mm: have zonelist...
617
618
619
620
  		 * when it shouldn't.
  		 */
  		zone_set_flag(zone, ZONE_OOM_LOCKED);
  	}
098d7f128   David Rientjes   oom: add per-zone...
621

098d7f128   David Rientjes   oom: add per-zone...
622
  out:
c7d4caeb1   David Rientjes   oom: fix zone_sca...
623
  	spin_unlock(&zone_scan_lock);
098d7f128   David Rientjes   oom: add per-zone...
624
625
626
627
628
629
630
631
  	return ret;
  }
  
  /*
   * Clears the ZONE_OOM_LOCKED flag for all zones in the zonelist so that failed
   * allocation attempts with zonelists containing them may now recall the OOM
   * killer, if necessary.
   */
dd1a239f6   Mel Gorman   mm: have zonelist...
632
  void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask)
098d7f128   David Rientjes   oom: add per-zone...
633
  {
dd1a239f6   Mel Gorman   mm: have zonelist...
634
635
  	struct zoneref *z;
  	struct zone *zone;
098d7f128   David Rientjes   oom: add per-zone...
636

c7d4caeb1   David Rientjes   oom: fix zone_sca...
637
  	spin_lock(&zone_scan_lock);
dd1a239f6   Mel Gorman   mm: have zonelist...
638
639
640
  	for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
  		zone_clear_flag(zone, ZONE_OOM_LOCKED);
  	}
c7d4caeb1   David Rientjes   oom: fix zone_sca...
641
  	spin_unlock(&zone_scan_lock);
098d7f128   David Rientjes   oom: add per-zone...
642
  }
1c0fe6e3b   Nick Piggin   mm: invoke oom-ki...
643
  /*
e36589323   David Rientjes   oom: remove speci...
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
   * Try to acquire the oom killer lock for all system zones.  Returns zero if a
   * parallel oom killing is taking place, otherwise locks all zones and returns
   * non-zero.
   */
  static int try_set_system_oom(void)
  {
  	struct zone *zone;
  	int ret = 1;
  
  	spin_lock(&zone_scan_lock);
  	for_each_populated_zone(zone)
  		if (zone_is_oom_locked(zone)) {
  			ret = 0;
  			goto out;
  		}
  	for_each_populated_zone(zone)
  		zone_set_flag(zone, ZONE_OOM_LOCKED);
  out:
  	spin_unlock(&zone_scan_lock);
  	return ret;
  }
  
  /*
   * Clears ZONE_OOM_LOCKED for all system zones so that failed allocation
   * attempts or page faults may now recall the oom killer, if necessary.
   */
  static void clear_system_oom(void)
  {
  	struct zone *zone;
  
  	spin_lock(&zone_scan_lock);
  	for_each_populated_zone(zone)
  		zone_clear_flag(zone, ZONE_OOM_LOCKED);
  	spin_unlock(&zone_scan_lock);
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
679
  /**
6937a25cf   Dave Peterson   [PATCH] mm: fix t...
680
   * out_of_memory - kill the "best" process when we run out of memory
1b578df02   Randy Dunlap   mm/oom_kill: fix ...
681
682
683
   * @zonelist: zonelist pointer
   * @gfp_mask: memory allocation flags
   * @order: amount of memory being requested as a power of 2
6f48d0ebd   David Rientjes   oom: select task ...
684
   * @nodemask: nodemask passed to page allocator
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
685
686
687
688
689
690
   *
   * If we run out of memory, we have the choice between either
   * killing a random task (bad), letting the system crash (worse)
   * OR try to be smart about which process to kill. Note that we
   * don't have to be perfect here, we just have to be good.
   */
4365a5676   KAMEZAWA Hiroyuki   oom-kill: fix NUM...
691
692
  void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
  		int order, nodemask_t *nodemask)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
693
  {
e85bfd3aa   David Rientjes   oom: filter unkil...
694
  	const nodemask_t *mpol_mask;
0aad4b312   David Rientjes   oom: fold __out_o...
695
  	struct task_struct *p;
a63d83f42   David Rientjes   oom: badness heur...
696
  	unsigned long totalpages;
8bc719d3c   Martin Schwidefsky   [PATCH] out of me...
697
  	unsigned long freed = 0;
a63d83f42   David Rientjes   oom: badness heur...
698
  	unsigned int points;
e36589323   David Rientjes   oom: remove speci...
699
  	enum oom_constraint constraint = CONSTRAINT_NONE;
b52723c56   KOSAKI Motohiro   oom: fix tasklist...
700
  	int killed = 0;
8bc719d3c   Martin Schwidefsky   [PATCH] out of me...
701
702
703
704
705
  
  	blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
  	if (freed > 0)
  		/* Got some memory back in the last second. */
  		return;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
706

7b98c2e40   David Rientjes   oom: give current...
707
708
709
710
711
712
713
714
715
  	/*
  	 * If current has a pending SIGKILL, then automatically select it.  The
  	 * goal is to allow it to allocate so that it may quickly exit and free
  	 * its memory.
  	 */
  	if (fatal_signal_pending(current)) {
  		set_thread_flag(TIF_MEMDIE);
  		return;
  	}
9b0f8b040   Christoph Lameter   [PATCH] Terminate...
716
717
718
719
  	/*
  	 * Check if there were limitations on the allocation (only relevant for
  	 * NUMA) that may require different handling.
  	 */
a63d83f42   David Rientjes   oom: badness heur...
720
721
  	constraint = constrained_alloc(zonelist, gfp_mask, nodemask,
  						&totalpages);
e85bfd3aa   David Rientjes   oom: filter unkil...
722
723
  	mpol_mask = (constraint == CONSTRAINT_MEMORY_POLICY) ? nodemask : NULL;
  	check_panic_on_oom(constraint, gfp_mask, order, mpol_mask);
0aad4b312   David Rientjes   oom: fold __out_o...
724

2b45ab339   David Rientjes   oom: fix constrai...
725
  	read_lock(&tasklist_lock);
f88ccad58   KOSAKI Motohiro   oom: oom_kill_pro...
726
  	if (sysctl_oom_kill_allocating_task &&
a96cfd6e9   KOSAKI Motohiro   oom: move OOM_DIS...
727
  	    !oom_unkillable_task(current, NULL, nodemask) &&
c9f01245b   David Rientjes   oom: remove oom_d...
728
  	    current->mm) {
0aad4b312   David Rientjes   oom: fold __out_o...
729
730
731
732
733
  		/*
  		 * oom_kill_process() needs tasklist_lock held.  If it returns
  		 * non-zero, current could not be killed so we must fallback to
  		 * the tasklist scan.
  		 */
a63d83f42   David Rientjes   oom: badness heur...
734
735
  		if (!oom_kill_process(current, gfp_mask, order, 0, totalpages,
  				NULL, nodemask,
0aad4b312   David Rientjes   oom: fold __out_o...
736
  				"Out of memory (oom_kill_allocating_task)"))
b52723c56   KOSAKI Motohiro   oom: fix tasklist...
737
  			goto out;
0aad4b312   David Rientjes   oom: fold __out_o...
738
739
740
  	}
  
  retry:
e85bfd3aa   David Rientjes   oom: filter unkil...
741
  	p = select_bad_process(&points, totalpages, NULL, mpol_mask);
0aad4b312   David Rientjes   oom: fold __out_o...
742
  	if (PTR_ERR(p) == -1UL)
b52723c56   KOSAKI Motohiro   oom: fix tasklist...
743
  		goto out;
0aad4b312   David Rientjes   oom: fold __out_o...
744
745
746
  
  	/* Found nothing?!?! Either we hang forever, or we panic. */
  	if (!p) {
e85bfd3aa   David Rientjes   oom: filter unkil...
747
  		dump_header(NULL, gfp_mask, order, NULL, mpol_mask);
0aad4b312   David Rientjes   oom: fold __out_o...
748
749
750
751
  		read_unlock(&tasklist_lock);
  		panic("Out of memory and no killable processes...
  ");
  	}
a63d83f42   David Rientjes   oom: badness heur...
752
753
  	if (oom_kill_process(p, gfp_mask, order, points, totalpages, NULL,
  				nodemask, "Out of memory"))
0aad4b312   David Rientjes   oom: fold __out_o...
754
  		goto retry;
b52723c56   KOSAKI Motohiro   oom: fix tasklist...
755
756
  	killed = 1;
  out:
140ffcec4   Andrew Morton   [PATCH] out_of_me...
757
  	read_unlock(&tasklist_lock);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
758
759
760
  
  	/*
  	 * Give "p" a good chance of killing itself before we
2f659f462   Kirill Korotaev   [PATCH] Optimise ...
761
  	 * retry to allocate memory unless "p" is current
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
762
  	 */
b52723c56   KOSAKI Motohiro   oom: fix tasklist...
763
  	if (killed && !test_thread_flag(TIF_MEMDIE))
140ffcec4   Andrew Morton   [PATCH] out_of_me...
764
  		schedule_timeout_uninterruptible(1);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
765
  }
e36589323   David Rientjes   oom: remove speci...
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
  
  /*
   * The pagefault handler calls here because it is out of memory, so kill a
   * memory-hogging task.  If a populated zone has ZONE_OOM_LOCKED set, a parallel
   * oom killing is already in progress so do nothing.  If a task is found with
   * TIF_MEMDIE set, it has been killed so do nothing and allow it to exit.
   */
  void pagefault_out_of_memory(void)
  {
  	if (try_set_system_oom()) {
  		out_of_memory(NULL, 0, 0, NULL);
  		clear_system_oom();
  	}
  	if (!test_thread_flag(TIF_MEMDIE))
  		schedule_timeout_uninterruptible(1);
  }