Blame view
mm/oom_kill.c
15.4 KB
1da177e4c
|
1 2 3 4 5 6 7 8 |
/* * linux/mm/oom_kill.c * * Copyright (C) 1998,2000 Rik van Riel * Thanks go out to Claus Fischer for some serious inspiration and * for goading me into coding this file... * * The routines in this file are used to kill a process when |
a49335cce
|
9 10 |
* we're seriously out of memory. This gets called from __alloc_pages() * in mm/page_alloc.c when we really run out of memory. |
1da177e4c
|
11 12 13 14 15 16 |
* * Since we won't call these routines often (on a well-configured * machine) this file will double as a 'coding guide' and a signpost * for newbie kernel hackers. It features several pointers to major * kernel subsystems and hints as to where to find out what things do. */ |
8ac773b4f
|
17 |
#include <linux/oom.h> |
1da177e4c
|
18 |
#include <linux/mm.h> |
4e950f6f0
|
19 |
#include <linux/err.h> |
1da177e4c
|
20 21 22 23 |
#include <linux/sched.h> #include <linux/swap.h> #include <linux/timex.h> #include <linux/jiffies.h> |
ef08e3b49
|
24 |
#include <linux/cpuset.h> |
8bc719d3c
|
25 26 |
#include <linux/module.h> #include <linux/notifier.h> |
c7ba5c9e8
|
27 |
#include <linux/memcontrol.h> |
5cd9c58fb
|
28 |
#include <linux/security.h> |
1da177e4c
|
29 |
|
fadd8fbd1
|
30 |
int sysctl_panic_on_oom; |
fe071d7e8
|
31 |
int sysctl_oom_kill_allocating_task; |
fef1bdd68
|
32 |
int sysctl_oom_dump_tasks; |
ae74138da
|
33 |
static DEFINE_SPINLOCK(zone_scan_mutex); |
1da177e4c
|
34 35 36 |
/* #define DEBUG */ /** |
6937a25cf
|
37 |
* badness - calculate a numeric value for how bad this task has been |
1da177e4c
|
38 |
* @p: task struct of which task we should calculate |
a49335cce
|
39 |
* @uptime: current uptime in seconds |
1da177e4c
|
40 41 42 43 44 45 46 47 48 49 50 51 52 53 |
* * The formula used is relatively simple and documented inline in the * function. The main rationale is that we want to select a good task * to kill when we run out of memory. * * Good in this context means that: * 1) we lose the minimum amount of work done * 2) we recover a large amount of memory * 3) we don't kill anything innocent of eating tons of memory * 4) we want to kill the minimum amount of processes (one) * 5) we try to kill the process the user expects us to kill, this * algorithm has been meticulously tuned to meet the principle * of least surprise ... (be careful when you change it) */ |
97d87c971
|
54 |
unsigned long badness(struct task_struct *p, unsigned long uptime) |
1da177e4c
|
55 56 |
{ unsigned long points, cpu_time, run_time, s; |
97c2c9b84
|
57 58 |
struct mm_struct *mm; struct task_struct *child; |
1da177e4c
|
59 |
|
97c2c9b84
|
60 61 62 63 |
task_lock(p); mm = p->mm; if (!mm) { task_unlock(p); |
1da177e4c
|
64 |
return 0; |
97c2c9b84
|
65 |
} |
1da177e4c
|
66 67 68 69 |
/* * The memory size of the process is the basis for the badness. */ |
97c2c9b84
|
70 71 72 73 74 75 |
points = mm->total_vm; /* * After this unlock we can no longer dereference local variable `mm' */ task_unlock(p); |
1da177e4c
|
76 77 |
/* |
7ba348594
|
78 79 80 81 82 83 |
* swapoff can easily use up all memory, so kill those first. */ if (p->flags & PF_SWAPOFF) return ULONG_MAX; /* |
1da177e4c
|
84 |
* Processes which fork a lot of child processes are likely |
9827b781f
|
85 |
* a good choice. We add half the vmsize of the children if they |
1da177e4c
|
86 |
* have an own mm. This prevents forking servers to flood the |
9827b781f
|
87 88 89 |
* machine with an endless amount of children. In case a single * child is eating the vast majority of memory, adding only half * to the parents will make the child our kill candidate of choice. |
1da177e4c
|
90 |
*/ |
97c2c9b84
|
91 92 93 94 95 |
list_for_each_entry(child, &p->children, sibling) { task_lock(child); if (child->mm != mm && child->mm) points += child->mm->total_vm/2 + 1; task_unlock(child); |
1da177e4c
|
96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 |
} /* * CPU time is in tens of seconds and run time is in thousands * of seconds. There is no particular reason for this other than * that it turned out to work very well in practice. */ cpu_time = (cputime_to_jiffies(p->utime) + cputime_to_jiffies(p->stime)) >> (SHIFT_HZ + 3); if (uptime >= p->start_time.tv_sec) run_time = (uptime - p->start_time.tv_sec) >> 10; else run_time = 0; s = int_sqrt(cpu_time); if (s) points /= s; s = int_sqrt(int_sqrt(run_time)); if (s) points /= s; /* * Niced processes are most likely less important, so double * their badness points. */ if (task_nice(p) > 0) points *= 2; /* * Superuser processes are usually more important, so we make it * less likely that we kill those. */ |
a2f2945a9
|
129 130 |
if (has_capability_noaudit(p, CAP_SYS_ADMIN) || has_capability_noaudit(p, CAP_SYS_RESOURCE)) |
1da177e4c
|
131 132 133 134 135 136 137 138 |
points /= 4; /* * We don't want to kill a process with direct hardware access. * Not only could that mess up the hardware, but usually users * tend to only have this flag set on applications they think * of as important. */ |
a2f2945a9
|
139 |
if (has_capability_noaudit(p, CAP_SYS_RAWIO)) |
1da177e4c
|
140 141 142 |
points /= 4; /* |
7887a3da7
|
143 144 145 146 |
* If p's nodes don't overlap ours, it may still help to kill p * because p may have allocated or otherwise mapped memory on * this node before. However it will be less likely. */ |
bbe373f2c
|
147 |
if (!cpuset_mems_allowed_intersects(current, p)) |
7887a3da7
|
148 149 150 |
points /= 8; /* |
1da177e4c
|
151 152 153 |
* Adjust the score by oomkilladj. */ if (p->oomkilladj) { |
9a82782f8
|
154 155 156 |
if (p->oomkilladj > 0) { if (!points) points = 1; |
1da177e4c
|
157 |
points <<= p->oomkilladj; |
9a82782f8
|
158 |
} else |
1da177e4c
|
159 160 161 162 |
points >>= -(p->oomkilladj); } #ifdef DEBUG |
a5e58a614
|
163 164 |
printk(KERN_DEBUG "OOMkill: task %d (%s) got %lu points ", |
1da177e4c
|
165 166 167 168 169 170 |
p->pid, p->comm, points); #endif return points; } /* |
9b0f8b040
|
171 172 |
* Determine the type of allocation constraint. */ |
70e24bdf6
|
173 174 |
static inline enum oom_constraint constrained_alloc(struct zonelist *zonelist, gfp_t gfp_mask) |
9b0f8b040
|
175 176 |
{ #ifdef CONFIG_NUMA |
54a6eb5c4
|
177 |
struct zone *zone; |
dd1a239f6
|
178 |
struct zoneref *z; |
54a6eb5c4
|
179 |
enum zone_type high_zoneidx = gfp_zone(gfp_mask); |
ee31af5d6
|
180 |
nodemask_t nodes = node_states[N_HIGH_MEMORY]; |
9b0f8b040
|
181 |
|
54a6eb5c4
|
182 183 184 |
for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) if (cpuset_zone_allowed_softwall(zone, gfp_mask)) node_clear(zone_to_nid(zone), nodes); |
9b0f8b040
|
185 186 187 188 189 190 191 192 193 194 195 |
else return CONSTRAINT_CPUSET; if (!nodes_empty(nodes)) return CONSTRAINT_MEMORY_POLICY; #endif return CONSTRAINT_NONE; } /* |
1da177e4c
|
196 197 198 199 200 |
* Simple selection loop. We chose the process with the highest * number of 'points'. We expect the caller will lock the tasklist. * * (not docbooked, we don't want this one cluttering up the manual) */ |
c7ba5c9e8
|
201 202 |
static struct task_struct *select_bad_process(unsigned long *ppoints, struct mem_cgroup *mem) |
1da177e4c
|
203 |
{ |
1da177e4c
|
204 205 206 |
struct task_struct *g, *p; struct task_struct *chosen = NULL; struct timespec uptime; |
9827b781f
|
207 |
*ppoints = 0; |
1da177e4c
|
208 209 |
do_posix_clock_monotonic_gettime(&uptime); |
a49335cce
|
210 211 |
do_each_thread(g, p) { unsigned long points; |
a49335cce
|
212 |
|
28324d1df
|
213 214 215 216 |
/* * skip kernel threads and tasks which have already released * their mm. */ |
5081dde33
|
217 218 |
if (!p->mm) continue; |
28324d1df
|
219 |
/* skip the init task */ |
b460cbc58
|
220 |
if (is_global_init(p)) |
a49335cce
|
221 |
continue; |
4c4a22148
|
222 223 |
if (mem && !task_in_mem_cgroup(p, mem)) continue; |
ef08e3b49
|
224 |
|
a49335cce
|
225 |
/* |
b78483a4b
|
226 227 228 229 230 231 232 233 234 235 236 237 |
* This task already has access to memory reserves and is * being killed. Don't allow any other task access to the * memory reserve. * * Note: this may have a chance of deadlock if it gets * blocked waiting for another task which itself is waiting * for memory. Is there a better alternative? */ if (test_tsk_thread_flag(p, TIF_MEMDIE)) return ERR_PTR(-1UL); /* |
6937a25cf
|
238 |
* This is in the process of releasing memory so wait for it |
a49335cce
|
239 |
* to finish before killing some other task by mistake. |
50ec3bbff
|
240 241 242 243 244 |
* * However, if p is the current task, we allow the 'kill' to * go ahead if it is exiting: this will simply set TIF_MEMDIE, * which will allow it to gain access to memory reserves in * the process of exiting and releasing its resources. |
b78483a4b
|
245 |
* Otherwise we could get an easy OOM deadlock. |
a49335cce
|
246 |
*/ |
b78483a4b
|
247 248 249 |
if (p->flags & PF_EXITING) { if (p != current) return ERR_PTR(-1UL); |
972c4ea59
|
250 251 |
chosen = p; *ppoints = ULONG_MAX; |
50ec3bbff
|
252 |
} |
972c4ea59
|
253 |
|
4a3ede107
|
254 255 |
if (p->oomkilladj == OOM_DISABLE) continue; |
a49335cce
|
256 |
|
97d87c971
|
257 |
points = badness(p, uptime.tv_sec); |
9827b781f
|
258 |
if (points > *ppoints || !chosen) { |
a49335cce
|
259 |
chosen = p; |
9827b781f
|
260 |
*ppoints = points; |
1da177e4c
|
261 |
} |
a49335cce
|
262 |
} while_each_thread(g, p); |
972c4ea59
|
263 |
|
1da177e4c
|
264 265 266 267 |
return chosen; } /** |
1b578df02
|
268 269 270 |
* dump_tasks - dump current memory state of all system tasks * @mem: target memory controller * |
fef1bdd68
|
271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 |
* Dumps the current memory state of all system tasks, excluding kernel threads. * State information includes task's pid, uid, tgid, vm size, rss, cpu, oom_adj * score, and name. * * If the actual is non-NULL, only tasks that are a member of the mem_cgroup are * shown. * * Call with tasklist_lock read-locked. */ static void dump_tasks(const struct mem_cgroup *mem) { struct task_struct *g, *p; printk(KERN_INFO "[ pid ] uid tgid total_vm rss cpu oom_adj " "name "); do_each_thread(g, p) { /* * total_vm and rss sizes do not exist for tasks with a * detached mm so there's no need to report them. */ if (!p->mm) continue; if (mem && !task_in_mem_cgroup(p, mem)) continue; |
b4416d2be
|
296 297 |
if (!thread_group_leader(p)) continue; |
fef1bdd68
|
298 299 300 301 |
task_lock(p); printk(KERN_INFO "[%5d] %5d %5d %8lu %8lu %3d %3d %s ", |
c69e8d9c0
|
302 303 304 |
p->pid, __task_cred(p)->uid, p->tgid, p->mm->total_vm, get_mm_rss(p->mm), (int)task_cpu(p), p->oomkilladj, p->comm); |
fef1bdd68
|
305 306 307 |
task_unlock(p); } while_each_thread(g, p); } |
1b578df02
|
308 |
/* |
5a291b98b
|
309 310 311 |
* Send SIGKILL to the selected process irrespective of CAP_SYS_RAW_IO * flag though it's unlikely that we select a process with CAP_SYS_RAW_IO * set. |
1da177e4c
|
312 |
*/ |
f3af38d30
|
313 |
static void __oom_kill_task(struct task_struct *p, int verbose) |
1da177e4c
|
314 |
{ |
b460cbc58
|
315 |
if (is_global_init(p)) { |
1da177e4c
|
316 317 318 319 320 |
WARN_ON(1); printk(KERN_WARNING "tried to kill init! "); return; } |
01017a227
|
321 |
if (!p->mm) { |
1da177e4c
|
322 323 324 |
WARN_ON(1); printk(KERN_WARNING "tried to kill an mm-less task! "); |
1da177e4c
|
325 326 |
return; } |
50ec3bbff
|
327 |
|
f3af38d30
|
328 |
if (verbose) |
ba25f9dcc
|
329 330 331 |
printk(KERN_ERR "Killed process %d (%s) ", task_pid_nr(p), p->comm); |
1da177e4c
|
332 333 334 335 336 337 |
/* * We give our sacrificial lamb high priority and access to * all the memory it needs. That way it should be able to * exit() and clear out its resources quickly... */ |
fa717060f
|
338 |
p->rt.time_slice = HZ; |
1da177e4c
|
339 340 341 342 |
set_tsk_thread_flag(p, TIF_MEMDIE); force_sig(SIGKILL, p); } |
f3af38d30
|
343 |
static int oom_kill_task(struct task_struct *p) |
1da177e4c
|
344 |
{ |
013159227
|
345 |
struct mm_struct *mm; |
36c8b5868
|
346 |
struct task_struct *g, *q; |
1da177e4c
|
347 |
|
013159227
|
348 349 350 351 352 353 354 355 356 357 |
mm = p->mm; /* WARNING: mm may not be dereferenced since we did not obtain its * value from get_task_mm(p). This is OK since all we need to do is * compare mm to q->mm below. * * Furthermore, even if mm contains a non-NULL value, p->mm may * change to NULL at any time since we do not hold task_lock(p). * However, this is of no concern to us. */ |
01017a227
|
358 |
if (mm == NULL) |
013159227
|
359 |
return 1; |
1da177e4c
|
360 |
|
c33e0fca3
|
361 362 363 364 |
/* * Don't kill the process if any threads are set to OOM_DISABLE */ do_each_thread(g, q) { |
35ae834fa
|
365 |
if (q->mm == mm && q->oomkilladj == OOM_DISABLE) |
c33e0fca3
|
366 367 |
return 1; } while_each_thread(g, q); |
f3af38d30
|
368 |
__oom_kill_task(p, 1); |
c33e0fca3
|
369 |
|
1da177e4c
|
370 371 |
/* * kill all processes that share the ->mm (i.e. all threads), |
f2a2a7108
|
372 373 |
* but are in a different thread group. Don't let them have access * to memory reserves though, otherwise we might deplete all memory. |
1da177e4c
|
374 |
*/ |
c33e0fca3
|
375 |
do_each_thread(g, q) { |
bac0abd61
|
376 |
if (q->mm == mm && !same_thread_group(q, p)) |
650a7c974
|
377 |
force_sig(SIGKILL, q); |
c33e0fca3
|
378 |
} while_each_thread(g, q); |
1da177e4c
|
379 |
|
013159227
|
380 |
return 0; |
1da177e4c
|
381 |
} |
7213f5066
|
382 |
static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, |
fef1bdd68
|
383 384 |
unsigned long points, struct mem_cgroup *mem, const char *message) |
1da177e4c
|
385 |
{ |
1da177e4c
|
386 |
struct task_struct *c; |
1da177e4c
|
387 |
|
7213f5066
|
388 389 390 391 392 393 394 |
if (printk_ratelimit()) { printk(KERN_WARNING "%s invoked oom-killer: " "gfp_mask=0x%x, order=%d, oomkilladj=%d ", current->comm, gfp_mask, order, current->oomkilladj); dump_stack(); show_mem(); |
fef1bdd68
|
395 396 |
if (sysctl_oom_dump_tasks) dump_tasks(mem); |
7213f5066
|
397 |
} |
50ec3bbff
|
398 399 400 401 402 |
/* * If the task is already exiting, don't alarm the sysadmin or kill * its children or threads, just set TIF_MEMDIE so it can die quickly */ if (p->flags & PF_EXITING) { |
f3af38d30
|
403 |
__oom_kill_task(p, 0); |
50ec3bbff
|
404 405 |
return 0; } |
f3af38d30
|
406 407 |
printk(KERN_ERR "%s: kill process %d (%s) score %li or a child ", |
ba25f9dcc
|
408 |
message, task_pid_nr(p), p->comm, points); |
f3af38d30
|
409 |
|
1da177e4c
|
410 |
/* Try to kill a child first */ |
7b1915a98
|
411 |
list_for_each_entry(c, &p->children, sibling) { |
1da177e4c
|
412 413 |
if (c->mm == p->mm) continue; |
f3af38d30
|
414 |
if (!oom_kill_task(c)) |
013159227
|
415 |
return 0; |
1da177e4c
|
416 |
} |
f3af38d30
|
417 |
return oom_kill_task(p); |
1da177e4c
|
418 |
} |
00f0b8259
|
419 |
#ifdef CONFIG_CGROUP_MEM_RES_CTLR |
c7ba5c9e8
|
420 421 422 423 424 425 |
void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask) { unsigned long points = 0; struct task_struct *p; cgroup_lock(); |
e115f2d89
|
426 |
read_lock(&tasklist_lock); |
c7ba5c9e8
|
427 428 429 430 431 432 433 |
retry: p = select_bad_process(&points, mem); if (PTR_ERR(p) == -1UL) goto out; if (!p) p = current; |
fef1bdd68
|
434 |
if (oom_kill_process(p, gfp_mask, 0, points, mem, |
c7ba5c9e8
|
435 436 437 |
"Memory cgroup out of memory")) goto retry; out: |
e115f2d89
|
438 |
read_unlock(&tasklist_lock); |
c7ba5c9e8
|
439 440 441 |
cgroup_unlock(); } #endif |
8bc719d3c
|
442 443 444 445 446 447 448 449 450 451 452 453 454 |
static BLOCKING_NOTIFIER_HEAD(oom_notify_list); int register_oom_notifier(struct notifier_block *nb) { return blocking_notifier_chain_register(&oom_notify_list, nb); } EXPORT_SYMBOL_GPL(register_oom_notifier); int unregister_oom_notifier(struct notifier_block *nb) { return blocking_notifier_chain_unregister(&oom_notify_list, nb); } EXPORT_SYMBOL_GPL(unregister_oom_notifier); |
098d7f128
|
455 456 457 458 459 |
/* * Try to acquire the OOM killer lock for the zones in zonelist. Returns zero * if a parallel OOM killing is already taking place that includes a zone in * the zonelist. Otherwise, locks all zones in the zonelist and returns 1. */ |
dd1a239f6
|
460 |
int try_set_zone_oom(struct zonelist *zonelist, gfp_t gfp_mask) |
098d7f128
|
461 |
{ |
dd1a239f6
|
462 463 |
struct zoneref *z; struct zone *zone; |
098d7f128
|
464 |
int ret = 1; |
ae74138da
|
465 |
spin_lock(&zone_scan_mutex); |
dd1a239f6
|
466 467 |
for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) { if (zone_is_oom_locked(zone)) { |
098d7f128
|
468 469 470 |
ret = 0; goto out; } |
dd1a239f6
|
471 472 473 474 475 476 477 478 479 480 |
} for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) { /* * Lock each zone in the zonelist under zone_scan_mutex so a * parallel invocation of try_set_zone_oom() doesn't succeed * when it shouldn't. */ zone_set_flag(zone, ZONE_OOM_LOCKED); } |
098d7f128
|
481 |
|
098d7f128
|
482 |
out: |
ae74138da
|
483 |
spin_unlock(&zone_scan_mutex); |
098d7f128
|
484 485 486 487 488 489 490 491 |
return ret; } /* * Clears the ZONE_OOM_LOCKED flag for all zones in the zonelist so that failed * allocation attempts with zonelists containing them may now recall the OOM * killer, if necessary. */ |
dd1a239f6
|
492 |
void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask) |
098d7f128
|
493 |
{ |
dd1a239f6
|
494 495 |
struct zoneref *z; struct zone *zone; |
098d7f128
|
496 |
|
ae74138da
|
497 |
spin_lock(&zone_scan_mutex); |
dd1a239f6
|
498 499 500 |
for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) { zone_clear_flag(zone, ZONE_OOM_LOCKED); } |
ae74138da
|
501 |
spin_unlock(&zone_scan_mutex); |
098d7f128
|
502 |
} |
1da177e4c
|
503 |
/** |
6937a25cf
|
504 |
* out_of_memory - kill the "best" process when we run out of memory |
1b578df02
|
505 506 507 |
* @zonelist: zonelist pointer * @gfp_mask: memory allocation flags * @order: amount of memory being requested as a power of 2 |
1da177e4c
|
508 509 510 511 512 513 |
* * If we run out of memory, we have the choice between either * killing a random task (bad), letting the system crash (worse) * OR try to be smart about which process to kill. Note that we * don't have to be perfect here, we just have to be good. */ |
9b0f8b040
|
514 |
void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order) |
1da177e4c
|
515 |
{ |
36c8b5868
|
516 |
struct task_struct *p; |
d6713e046
|
517 |
unsigned long points = 0; |
8bc719d3c
|
518 |
unsigned long freed = 0; |
70e24bdf6
|
519 |
enum oom_constraint constraint; |
8bc719d3c
|
520 521 522 523 524 |
blocking_notifier_call_chain(&oom_notify_list, 0, &freed); if (freed > 0) /* Got some memory back in the last second. */ return; |
1da177e4c
|
525 |
|
2b744c01a
|
526 527 528 |
if (sysctl_panic_on_oom == 2) panic("out of memory. Compulsory panic_on_oom is selected. "); |
9b0f8b040
|
529 530 531 532 |
/* * Check if there were limitations on the allocation (only relevant for * NUMA) that may require different handling. */ |
2b45ab339
|
533 |
constraint = constrained_alloc(zonelist, gfp_mask); |
2b45ab339
|
534 535 536 |
read_lock(&tasklist_lock); switch (constraint) { |
9b0f8b040
|
537 |
case CONSTRAINT_MEMORY_POLICY: |
fef1bdd68
|
538 |
oom_kill_process(current, gfp_mask, order, points, NULL, |
9b0f8b040
|
539 540 |
"No available memory (MPOL_BIND)"); break; |
9b0f8b040
|
541 |
case CONSTRAINT_NONE: |
fadd8fbd1
|
542 543 544 |
if (sysctl_panic_on_oom) panic("out of memory. panic_on_oom is selected "); |
fe071d7e8
|
545 546 547 |
/* Fall-through */ case CONSTRAINT_CPUSET: if (sysctl_oom_kill_allocating_task) { |
fef1bdd68
|
548 |
oom_kill_process(current, gfp_mask, order, points, NULL, |
fe071d7e8
|
549 550 551 |
"Out of memory (oom_kill_allocating_task)"); break; } |
1da177e4c
|
552 |
retry: |
9b0f8b040
|
553 554 555 556 |
/* * Rambo mode: Shoot down a process and hope it solves whatever * issues we may have. */ |
c7ba5c9e8
|
557 |
p = select_bad_process(&points, NULL); |
1da177e4c
|
558 |
|
9b0f8b040
|
559 560 |
if (PTR_ERR(p) == -1UL) goto out; |
1da177e4c
|
561 |
|
9b0f8b040
|
562 563 564 |
/* Found nothing?!?! Either we hang forever, or we panic. */ if (!p) { read_unlock(&tasklist_lock); |
9b0f8b040
|
565 566 567 |
panic("Out of memory and no killable processes... "); } |
1da177e4c
|
568 |
|
fef1bdd68
|
569 |
if (oom_kill_process(p, gfp_mask, order, points, NULL, |
7213f5066
|
570 |
"Out of memory")) |
9b0f8b040
|
571 572 573 574 |
goto retry; break; } |
1da177e4c
|
575 |
|
9b0f8b040
|
576 |
out: |
140ffcec4
|
577 |
read_unlock(&tasklist_lock); |
1da177e4c
|
578 579 580 |
/* * Give "p" a good chance of killing itself before we |
2f659f462
|
581 |
* retry to allocate memory unless "p" is current |
1da177e4c
|
582 |
*/ |
2f659f462
|
583 |
if (!test_thread_flag(TIF_MEMDIE)) |
140ffcec4
|
584 |
schedule_timeout_uninterruptible(1); |
1da177e4c
|
585 |
} |