Blame view
mm/memcontrol.c
145 KB
8cdea7c05 Memory controller... |
1 2 3 4 5 |
/* memcontrol.c - Memory Controller * * Copyright IBM Corporation, 2007 * Author Balbir Singh <balbir@linux.vnet.ibm.com> * |
78fb74669 Memory controller... |
6 7 8 |
* Copyright 2007 OpenVZ SWsoft Inc * Author: Pavel Emelianov <xemul@openvz.org> * |
2e72b6347 memcg: implement ... |
9 10 11 12 |
* Memory thresholds * Copyright (C) 2009 Nokia Corporation * Author: Kirill A. Shutemov * |
8cdea7c05 Memory controller... |
13 14 15 16 17 18 19 20 21 22 23 24 25 26 |
* This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/res_counter.h> #include <linux/memcontrol.h> #include <linux/cgroup.h> |
78fb74669 Memory controller... |
27 |
#include <linux/mm.h> |
4ffef5fef memcg: move charg... |
28 |
#include <linux/hugetlb.h> |
d13d14430 memcg: handle swa... |
29 |
#include <linux/pagemap.h> |
d52aa412d memory cgroup enh... |
30 |
#include <linux/smp.h> |
8a9f3ccd2 Memory controller... |
31 |
#include <linux/page-flags.h> |
66e1707bc Memory controller... |
32 |
#include <linux/backing-dev.h> |
8a9f3ccd2 Memory controller... |
33 34 |
#include <linux/bit_spinlock.h> #include <linux/rcupdate.h> |
e222432bf memcg: show memcg... |
35 |
#include <linux/limits.h> |
8c7c6e34a memcg: mem+swap c... |
36 |
#include <linux/mutex.h> |
f64c3f549 memory controller... |
37 |
#include <linux/rbtree.h> |
b6ac57d50 memcgroup: move m... |
38 |
#include <linux/slab.h> |
66e1707bc Memory controller... |
39 |
#include <linux/swap.h> |
024914477 memcg: move charg... |
40 |
#include <linux/swapops.h> |
66e1707bc Memory controller... |
41 |
#include <linux/spinlock.h> |
2e72b6347 memcg: implement ... |
42 43 |
#include <linux/eventfd.h> #include <linux/sort.h> |
66e1707bc Memory controller... |
44 |
#include <linux/fs.h> |
d2ceb9b7d memory cgroup enh... |
45 |
#include <linux/seq_file.h> |
333279487 memcgroup: use vm... |
46 |
#include <linux/vmalloc.h> |
b69408e88 vmscan: Use an in... |
47 |
#include <linux/mm_inline.h> |
52d4b9ac0 memcg: allocate a... |
48 |
#include <linux/page_cgroup.h> |
cdec2e426 memcg: coalesce c... |
49 |
#include <linux/cpu.h> |
158e0a2d1 memcg: use find_l... |
50 |
#include <linux/oom.h> |
08e552c69 memcg: synchroniz... |
51 |
#include "internal.h" |
8cdea7c05 Memory controller... |
52 |
|
8697d3319 Memory controller... |
53 |
#include <asm/uaccess.h> |
cc8e970c3 memcg: add mm_vms... |
54 |
#include <trace/events/vmscan.h> |
a181b0e88 memcg: make globa... |
55 |
struct cgroup_subsys mem_cgroup_subsys __read_mostly; |
a181b0e88 memcg: make globa... |
56 |
#define MEM_CGROUP_RECLAIM_RETRIES 5 |
4b3bde4c9 memcg: remove the... |
57 |
struct mem_cgroup *root_mem_cgroup __read_mostly; |
8cdea7c05 Memory controller... |
58 |
|
c077719be memcg: mem+swap c... |
59 |
#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP |
338c84310 memcg: remove som... |
60 |
/* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */ |
c077719be memcg: mem+swap c... |
61 |
int do_swap_account __read_mostly; |
a42c390cf cgroups: make swa... |
62 63 64 65 66 67 68 |
/* for remember boot option*/ #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP_ENABLED static int really_do_swap_account __initdata = 1; #else static int really_do_swap_account __initdata = 0; #endif |
c077719be memcg: mem+swap c... |
69 70 71 |
#else #define do_swap_account (0) #endif |
8cdea7c05 Memory controller... |
72 |
/* |
d52aa412d memory cgroup enh... |
73 74 75 76 77 78 79 |
* Statistics for memory cgroup. */ enum mem_cgroup_stat_index { /* * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss. */ MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */ |
d69b042f3 memcg: add file-b... |
80 |
MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */ |
d8046582d memcg: make memcg... |
81 |
MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */ |
0c3e73e84 memcg: improve re... |
82 |
MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */ |
711d3d2c9 memcg: cpu hotplu... |
83 |
MEM_CGROUP_STAT_DATA, /* end of data requires synchronization */ |
32047e2a8 memcg: avoid lock... |
84 |
MEM_CGROUP_ON_MOVE, /* someone is moving account between groups */ |
d52aa412d memory cgroup enh... |
85 86 |
MEM_CGROUP_STAT_NSTATS, }; |
e9f8974f2 memcg: break out ... |
87 88 89 90 |
enum mem_cgroup_events_index { MEM_CGROUP_EVENTS_PGPGIN, /* # of pages paged in */ MEM_CGROUP_EVENTS_PGPGOUT, /* # of pages paged out */ MEM_CGROUP_EVENTS_COUNT, /* # of pages paged in/out */ |
456f998ec memcg: add the pa... |
91 92 |
MEM_CGROUP_EVENTS_PGFAULT, /* # of page-faults */ MEM_CGROUP_EVENTS_PGMAJFAULT, /* # of major page-faults */ |
e9f8974f2 memcg: break out ... |
93 94 |
MEM_CGROUP_EVENTS_NSTATS, }; |
7a159cc9d memcg: use native... |
95 96 97 98 99 100 101 102 103 |
/* * Per memcg event counter is incremented at every pagein/pageout. With THP, * it will be incremated by the number of pages. This counter is used for * for trigger some periodic events. This is straightforward and better * than using jiffies etc. to handle periodic memcg event. */ enum mem_cgroup_events_target { MEM_CGROUP_TARGET_THRESH, MEM_CGROUP_TARGET_SOFTLIMIT, |
453a9bf34 memcg: fix numa s... |
104 |
MEM_CGROUP_TARGET_NUMAINFO, |
7a159cc9d memcg: use native... |
105 106 107 108 |
MEM_CGROUP_NTARGETS, }; #define THRESHOLDS_EVENTS_TARGET (128) #define SOFTLIMIT_EVENTS_TARGET (1024) |
453a9bf34 memcg: fix numa s... |
109 |
#define NUMAINFO_EVENTS_TARGET (1024) |
e9f8974f2 memcg: break out ... |
110 |
|
d52aa412d memory cgroup enh... |
111 |
struct mem_cgroup_stat_cpu { |
7a159cc9d memcg: use native... |
112 |
long count[MEM_CGROUP_STAT_NSTATS]; |
e9f8974f2 memcg: break out ... |
113 |
unsigned long events[MEM_CGROUP_EVENTS_NSTATS]; |
7a159cc9d memcg: use native... |
114 |
unsigned long targets[MEM_CGROUP_NTARGETS]; |
d52aa412d memory cgroup enh... |
115 |
}; |
d52aa412d memory cgroup enh... |
116 |
/* |
6d12e2d8d per-zone and recl... |
117 118 |
* per-zone information in memory controller. */ |
6d12e2d8d per-zone and recl... |
119 |
struct mem_cgroup_per_zone { |
072c56c13 per-zone and recl... |
120 121 122 |
/* * spin_lock to protect the per cgroup LRU */ |
b69408e88 vmscan: Use an in... |
123 124 |
struct list_head lists[NR_LRU_LISTS]; unsigned long count[NR_LRU_LISTS]; |
3e2f41f1f memcg: add zone_r... |
125 126 |
struct zone_reclaim_stat reclaim_stat; |
f64c3f549 memory controller... |
127 128 129 130 |
struct rb_node tree_node; /* RB tree node */ unsigned long long usage_in_excess;/* Set to the value by which */ /* the soft limit is exceeded*/ bool on_tree; |
4e4169535 memory controller... |
131 132 |
struct mem_cgroup *mem; /* Back pointer, we cannot */ /* use container_of */ |
6d12e2d8d per-zone and recl... |
133 134 135 136 137 138 139 140 141 142 143 144 145 |
}; /* Macro for accessing counter */ #define MEM_CGROUP_ZSTAT(mz, idx) ((mz)->count[(idx)]) struct mem_cgroup_per_node { struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES]; }; struct mem_cgroup_lru_info { struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES]; }; /* |
f64c3f549 memory controller... |
146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 |
* Cgroups above their limits are maintained in a RB-Tree, independent of * their hierarchy representation */ struct mem_cgroup_tree_per_zone { struct rb_root rb_root; spinlock_t lock; }; struct mem_cgroup_tree_per_node { struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES]; }; struct mem_cgroup_tree { struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES]; }; static struct mem_cgroup_tree soft_limit_tree __read_mostly; |
2e72b6347 memcg: implement ... |
164 165 166 167 |
struct mem_cgroup_threshold { struct eventfd_ctx *eventfd; u64 threshold; }; |
9490ff275 memcg: oom notifier |
168 |
/* For threshold */ |
2e72b6347 memcg: implement ... |
169 170 |
struct mem_cgroup_threshold_ary { /* An array index points to threshold just below usage. */ |
5407a5625 mm: remove unnece... |
171 |
int current_threshold; |
2e72b6347 memcg: implement ... |
172 173 174 175 176 |
/* Size of entries[] */ unsigned int size; /* Array of thresholds */ struct mem_cgroup_threshold entries[0]; }; |
2c488db27 memcg: clean up m... |
177 178 179 180 181 182 183 184 185 186 187 |
struct mem_cgroup_thresholds { /* Primary thresholds array */ struct mem_cgroup_threshold_ary *primary; /* * Spare threshold array. * This is needed to make mem_cgroup_unregister_event() "never fail". * It must be able to store at least primary->size - 1 entries. */ struct mem_cgroup_threshold_ary *spare; }; |
9490ff275 memcg: oom notifier |
188 189 190 191 192 |
/* for OOM */ struct mem_cgroup_eventfd_list { struct list_head list; struct eventfd_ctx *eventfd; }; |
2e72b6347 memcg: implement ... |
193 |
|
2e72b6347 memcg: implement ... |
194 |
static void mem_cgroup_threshold(struct mem_cgroup *mem); |
9490ff275 memcg: oom notifier |
195 |
static void mem_cgroup_oom_notify(struct mem_cgroup *mem); |
2e72b6347 memcg: implement ... |
196 |
|
82f9d486e memcg: add memory... |
197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 |
enum { SCAN_BY_LIMIT, SCAN_BY_SYSTEM, NR_SCAN_CONTEXT, SCAN_BY_SHRINK, /* not recorded now */ }; enum { SCAN, SCAN_ANON, SCAN_FILE, ROTATE, ROTATE_ANON, ROTATE_FILE, FREED, FREED_ANON, FREED_FILE, ELAPSED, NR_SCANSTATS, }; struct scanstat { spinlock_t lock; unsigned long stats[NR_SCAN_CONTEXT][NR_SCANSTATS]; unsigned long rootstats[NR_SCAN_CONTEXT][NR_SCANSTATS]; }; const char *scanstat_string[NR_SCANSTATS] = { "scanned_pages", "scanned_anon_pages", "scanned_file_pages", "rotated_pages", "rotated_anon_pages", "rotated_file_pages", "freed_pages", "freed_anon_pages", "freed_file_pages", "elapsed_ns", }; #define SCANSTAT_WORD_LIMIT "_by_limit" #define SCANSTAT_WORD_SYSTEM "_by_system" #define SCANSTAT_WORD_HIERARCHY "_under_hierarchy" |
f64c3f549 memory controller... |
239 |
/* |
8cdea7c05 Memory controller... |
240 241 242 243 244 245 |
* The memory controller data structure. The memory controller controls both * page cache and RSS per cgroup. We would eventually like to provide * statistics based on the statistics developed by Rik Van Riel for clock-pro, * to help the administrator determine what knobs to tune. * * TODO: Add a water mark for the memory controller. Reclaim will begin when |
8a9f3ccd2 Memory controller... |
246 247 248 |
* we hit the water mark. May be even add a low water mark, such that * no reclaim occurs from a cgroup at it's low water mark, this is * a feature that will be implemented much later in the future. |
8cdea7c05 Memory controller... |
249 250 251 252 253 254 255 |
*/ struct mem_cgroup { struct cgroup_subsys_state css; /* * the counter to account for memory usage */ struct res_counter res; |
78fb74669 Memory controller... |
256 |
/* |
8c7c6e34a memcg: mem+swap c... |
257 258 259 260 |
* the counter to account for mem+swap usage. */ struct res_counter memsw; /* |
78fb74669 Memory controller... |
261 262 |
* Per cgroup active and inactive list, similar to the * per zone LRU lists. |
78fb74669 Memory controller... |
263 |
*/ |
6d12e2d8d per-zone and recl... |
264 |
struct mem_cgroup_lru_info info; |
6d61ef409 memcg: memory cgr... |
265 |
/* |
af901ca18 tree-wide: fix as... |
266 |
* While reclaiming in a hierarchy, we cache the last child we |
04046e1a0 memcg: use CSS ID |
267 |
* reclaimed from. |
6d61ef409 memcg: memory cgr... |
268 |
*/ |
04046e1a0 memcg: use CSS ID |
269 |
int last_scanned_child; |
889976dbc memcg: reclaim me... |
270 271 272 |
int last_scanned_node; #if MAX_NUMNODES > 1 nodemask_t scan_nodes; |
453a9bf34 memcg: fix numa s... |
273 274 |
atomic_t numainfo_events; atomic_t numainfo_updating; |
889976dbc memcg: reclaim me... |
275 |
#endif |
18f59ea7d memcg: memory cgr... |
276 277 278 279 |
/* * Should the accounting and control be hierarchical, per subtree? */ bool use_hierarchy; |
79dfdaccd memcg: make oom_l... |
280 281 282 |
bool oom_lock; atomic_t under_oom; |
8c7c6e34a memcg: mem+swap c... |
283 |
atomic_t refcnt; |
14797e236 memcg: add inacti... |
284 |
|
1f4c025b5 memcg: export mem... |
285 |
int swappiness; |
3c11ecf44 memcg: oom kill d... |
286 287 |
/* OOM-Killer disable */ int oom_kill_disable; |
a7885eb8a memcg: swappiness |
288 |
|
22a668d7c memcg: fix behavi... |
289 290 |
/* set when res.limit == memsw.limit */ bool memsw_is_minimum; |
2e72b6347 memcg: implement ... |
291 292 293 294 |
/* protect arrays of thresholds */ struct mutex thresholds_lock; /* thresholds for memory usage. RCU-protected */ |
2c488db27 memcg: clean up m... |
295 |
struct mem_cgroup_thresholds thresholds; |
907860ed3 cgroups: make cft... |
296 |
|
2e72b6347 memcg: implement ... |
297 |
/* thresholds for mem+swap usage. RCU-protected */ |
2c488db27 memcg: clean up m... |
298 |
struct mem_cgroup_thresholds memsw_thresholds; |
907860ed3 cgroups: make cft... |
299 |
|
9490ff275 memcg: oom notifier |
300 301 |
/* For oom notifier event fd */ struct list_head oom_notify; |
82f9d486e memcg: add memory... |
302 303 |
/* For recording LRU-scan statistics */ struct scanstat scanstat; |
d52aa412d memory cgroup enh... |
304 |
/* |
7dc74be03 memcg: add interf... |
305 306 307 308 |
* Should we move charges of a task when a task is moved into this * mem_cgroup ? And what type of charges should we move ? */ unsigned long move_charge_at_immigrate; |
7dc74be03 memcg: add interf... |
309 |
/* |
c62b1a3b3 memcg: use generi... |
310 |
* percpu counter. |
d52aa412d memory cgroup enh... |
311 |
*/ |
c62b1a3b3 memcg: use generi... |
312 |
struct mem_cgroup_stat_cpu *stat; |
711d3d2c9 memcg: cpu hotplu... |
313 314 315 316 317 318 |
/* * used when a cpu is offlined or other synchronizations * See mem_cgroup_read_stat(). */ struct mem_cgroup_stat_cpu nocpu_base; spinlock_t pcp_counter_lock; |
8cdea7c05 Memory controller... |
319 |
}; |
7dc74be03 memcg: add interf... |
320 321 322 323 324 325 |
/* Stuffs for move charges at task migration. */ /* * Types of charges to be moved. "move_charge_at_immitgrate" is treated as a * left-shifted bitmap of these types. */ enum move_type { |
4ffef5fef memcg: move charg... |
326 |
MOVE_CHARGE_TYPE_ANON, /* private anonymous page and swap of it */ |
87946a722 memcg: move charg... |
327 |
MOVE_CHARGE_TYPE_FILE, /* file page(including tmpfs) and swap of it */ |
7dc74be03 memcg: add interf... |
328 329 |
NR_MOVE_TYPE, }; |
4ffef5fef memcg: move charg... |
330 331 |
/* "mc" and its members are protected by cgroup_mutex */ static struct move_charge_struct { |
b1dd693e5 memcg: avoid dead... |
332 |
spinlock_t lock; /* for from, to */ |
4ffef5fef memcg: move charg... |
333 334 335 |
struct mem_cgroup *from; struct mem_cgroup *to; unsigned long precharge; |
854ffa8d1 memcg: improve pe... |
336 |
unsigned long moved_charge; |
483c30b51 memcg: improve pe... |
337 |
unsigned long moved_swap; |
8033b97c9 memcg: avoid oom ... |
338 339 340 |
struct task_struct *moving_task; /* a task moving charges */ wait_queue_head_t waitq; /* a waitq for other context */ } mc = { |
2bd9bb206 memcg: clean up w... |
341 |
.lock = __SPIN_LOCK_UNLOCKED(mc.lock), |
8033b97c9 memcg: avoid oom ... |
342 343 |
.waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq), }; |
4ffef5fef memcg: move charg... |
344 |
|
90254a658 memcg: clean up m... |
345 346 347 348 349 |
static bool move_anon(void) { return test_bit(MOVE_CHARGE_TYPE_ANON, &mc.to->move_charge_at_immigrate); } |
87946a722 memcg: move charg... |
350 351 352 353 354 |
static bool move_file(void) { return test_bit(MOVE_CHARGE_TYPE_FILE, &mc.to->move_charge_at_immigrate); } |
4e4169535 memory controller... |
355 356 357 358 359 360 |
/* * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft * limit reclaim to prevent infinite loops, if they ever occur. */ #define MEM_CGROUP_MAX_RECLAIM_LOOPS (100) #define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS (2) |
217bc3194 memory cgroup enh... |
361 362 363 |
enum charge_type { MEM_CGROUP_CHARGE_TYPE_CACHE = 0, MEM_CGROUP_CHARGE_TYPE_MAPPED, |
4f98a2fee vmscan: split LRU... |
364 |
MEM_CGROUP_CHARGE_TYPE_SHMEM, /* used by page migration of shmem */ |
c05555b57 memcg: atomic ops... |
365 |
MEM_CGROUP_CHARGE_TYPE_FORCE, /* used by force_empty */ |
d13d14430 memcg: handle swa... |
366 |
MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */ |
8a9478ca7 memcg: fix swap a... |
367 |
MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */ |
c05555b57 memcg: atomic ops... |
368 369 |
NR_CHARGE_TYPE, }; |
8c7c6e34a memcg: mem+swap c... |
370 371 372 |
/* for encoding cft->private value on file */ #define _MEM (0) #define _MEMSWAP (1) |
9490ff275 memcg: oom notifier |
373 |
#define _OOM_TYPE (2) |
8c7c6e34a memcg: mem+swap c... |
374 375 376 |
#define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val)) #define MEMFILE_TYPE(val) (((val) >> 16) & 0xffff) #define MEMFILE_ATTR(val) ((val) & 0xffff) |
9490ff275 memcg: oom notifier |
377 378 |
/* Used for OOM nofiier */ #define OOM_CONTROL (0) |
8c7c6e34a memcg: mem+swap c... |
379 |
|
75822b449 memory controller... |
380 381 382 383 384 385 386 |
/* * Reclaim flags for mem_cgroup_hierarchical_reclaim */ #define MEM_CGROUP_RECLAIM_NOSWAP_BIT 0x0 #define MEM_CGROUP_RECLAIM_NOSWAP (1 << MEM_CGROUP_RECLAIM_NOSWAP_BIT) #define MEM_CGROUP_RECLAIM_SHRINK_BIT 0x1 #define MEM_CGROUP_RECLAIM_SHRINK (1 << MEM_CGROUP_RECLAIM_SHRINK_BIT) |
4e4169535 memory controller... |
387 388 |
#define MEM_CGROUP_RECLAIM_SOFT_BIT 0x2 #define MEM_CGROUP_RECLAIM_SOFT (1 << MEM_CGROUP_RECLAIM_SOFT_BIT) |
75822b449 memory controller... |
389 |
|
8c7c6e34a memcg: mem+swap c... |
390 391 |
static void mem_cgroup_get(struct mem_cgroup *mem); static void mem_cgroup_put(struct mem_cgroup *mem); |
7bcc1bb12 memcg: get/put pa... |
392 |
static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem); |
26fe61684 memcg: fix percpu... |
393 |
static void drain_all_stock_async(struct mem_cgroup *mem); |
8c7c6e34a memcg: mem+swap c... |
394 |
|
f64c3f549 memory controller... |
395 396 397 398 399 |
static struct mem_cgroup_per_zone * mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid) { return &mem->info.nodeinfo[nid]->zoneinfo[zid]; } |
d324236b3 memcg: add access... |
400 401 402 403 |
struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem) { return &mem->css; } |
f64c3f549 memory controller... |
404 |
static struct mem_cgroup_per_zone * |
97a6c37b3 memcg: change pag... |
405 |
page_cgroup_zoneinfo(struct mem_cgroup *mem, struct page *page) |
f64c3f549 memory controller... |
406 |
{ |
97a6c37b3 memcg: change pag... |
407 408 |
int nid = page_to_nid(page); int zid = page_zonenum(page); |
f64c3f549 memory controller... |
409 |
|
f64c3f549 memory controller... |
410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 |
return mem_cgroup_zoneinfo(mem, nid, zid); } static struct mem_cgroup_tree_per_zone * soft_limit_tree_node_zone(int nid, int zid) { return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid]; } static struct mem_cgroup_tree_per_zone * soft_limit_tree_from_page(struct page *page) { int nid = page_to_nid(page); int zid = page_zonenum(page); return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid]; } static void |
4e4169535 memory controller... |
429 |
__mem_cgroup_insert_exceeded(struct mem_cgroup *mem, |
f64c3f549 memory controller... |
430 |
struct mem_cgroup_per_zone *mz, |
ef8745c1e memcg: reduce che... |
431 432 |
struct mem_cgroup_tree_per_zone *mctz, unsigned long long new_usage_in_excess) |
f64c3f549 memory controller... |
433 434 435 436 437 438 439 |
{ struct rb_node **p = &mctz->rb_root.rb_node; struct rb_node *parent = NULL; struct mem_cgroup_per_zone *mz_node; if (mz->on_tree) return; |
ef8745c1e memcg: reduce che... |
440 441 442 |
mz->usage_in_excess = new_usage_in_excess; if (!mz->usage_in_excess) return; |
f64c3f549 memory controller... |
443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 |
while (*p) { parent = *p; mz_node = rb_entry(parent, struct mem_cgroup_per_zone, tree_node); if (mz->usage_in_excess < mz_node->usage_in_excess) p = &(*p)->rb_left; /* * We can't avoid mem cgroups that are over their soft * limit by the same amount */ else if (mz->usage_in_excess >= mz_node->usage_in_excess) p = &(*p)->rb_right; } rb_link_node(&mz->tree_node, parent, p); rb_insert_color(&mz->tree_node, &mctz->rb_root); mz->on_tree = true; |
4e4169535 memory controller... |
459 460 461 462 463 464 465 466 467 468 469 470 471 472 |
} static void __mem_cgroup_remove_exceeded(struct mem_cgroup *mem, struct mem_cgroup_per_zone *mz, struct mem_cgroup_tree_per_zone *mctz) { if (!mz->on_tree) return; rb_erase(&mz->tree_node, &mctz->rb_root); mz->on_tree = false; } static void |
f64c3f549 memory controller... |
473 474 475 476 477 |
mem_cgroup_remove_exceeded(struct mem_cgroup *mem, struct mem_cgroup_per_zone *mz, struct mem_cgroup_tree_per_zone *mctz) { spin_lock(&mctz->lock); |
4e4169535 memory controller... |
478 |
__mem_cgroup_remove_exceeded(mem, mz, mctz); |
f64c3f549 memory controller... |
479 480 |
spin_unlock(&mctz->lock); } |
f64c3f549 memory controller... |
481 482 483 |
static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page) { |
ef8745c1e memcg: reduce che... |
484 |
unsigned long long excess; |
f64c3f549 memory controller... |
485 486 |
struct mem_cgroup_per_zone *mz; struct mem_cgroup_tree_per_zone *mctz; |
4e649152c memcg: some modif... |
487 488 |
int nid = page_to_nid(page); int zid = page_zonenum(page); |
f64c3f549 memory controller... |
489 490 491 |
mctz = soft_limit_tree_from_page(page); /* |
4e649152c memcg: some modif... |
492 493 |
* Necessary to update all ancestors when hierarchy is used. * because their event counter is not touched. |
f64c3f549 memory controller... |
494 |
*/ |
4e649152c memcg: some modif... |
495 496 |
for (; mem; mem = parent_mem_cgroup(mem)) { mz = mem_cgroup_zoneinfo(mem, nid, zid); |
ef8745c1e memcg: reduce che... |
497 |
excess = res_counter_soft_limit_excess(&mem->res); |
4e649152c memcg: some modif... |
498 499 500 501 |
/* * We have to update the tree if mz is on RB-tree or * mem is over its softlimit. */ |
ef8745c1e memcg: reduce che... |
502 |
if (excess || mz->on_tree) { |
4e649152c memcg: some modif... |
503 504 505 506 507 |
spin_lock(&mctz->lock); /* if on-tree, remove it */ if (mz->on_tree) __mem_cgroup_remove_exceeded(mem, mz, mctz); /* |
ef8745c1e memcg: reduce che... |
508 509 |
* Insert again. mz->usage_in_excess will be updated. * If excess is 0, no tree ops. |
4e649152c memcg: some modif... |
510 |
*/ |
ef8745c1e memcg: reduce che... |
511 |
__mem_cgroup_insert_exceeded(mem, mz, mctz, excess); |
4e649152c memcg: some modif... |
512 513 |
spin_unlock(&mctz->lock); } |
f64c3f549 memory controller... |
514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 |
} } static void mem_cgroup_remove_from_trees(struct mem_cgroup *mem) { int node, zone; struct mem_cgroup_per_zone *mz; struct mem_cgroup_tree_per_zone *mctz; for_each_node_state(node, N_POSSIBLE) { for (zone = 0; zone < MAX_NR_ZONES; zone++) { mz = mem_cgroup_zoneinfo(mem, node, zone); mctz = soft_limit_tree_node_zone(node, zone); mem_cgroup_remove_exceeded(mem, mz, mctz); } } } |
4e4169535 memory controller... |
531 532 533 534 |
static struct mem_cgroup_per_zone * __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz) { struct rb_node *rightmost = NULL; |
26251eaf9 memcg: fix refcnt... |
535 |
struct mem_cgroup_per_zone *mz; |
4e4169535 memory controller... |
536 537 |
retry: |
26251eaf9 memcg: fix refcnt... |
538 |
mz = NULL; |
4e4169535 memory controller... |
539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 |
rightmost = rb_last(&mctz->rb_root); if (!rightmost) goto done; /* Nothing to reclaim from */ mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node); /* * Remove the node now but someone else can add it back, * we will to add it back at the end of reclaim to its correct * position in the tree. */ __mem_cgroup_remove_exceeded(mz->mem, mz, mctz); if (!res_counter_soft_limit_excess(&mz->mem->res) || !css_tryget(&mz->mem->css)) goto retry; done: return mz; } static struct mem_cgroup_per_zone * mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz) { struct mem_cgroup_per_zone *mz; spin_lock(&mctz->lock); mz = __mem_cgroup_largest_soft_limit_node(mctz); spin_unlock(&mctz->lock); return mz; } |
711d3d2c9 memcg: cpu hotplu... |
567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 |
/* * Implementation Note: reading percpu statistics for memcg. * * Both of vmstat[] and percpu_counter has threshold and do periodic * synchronization to implement "quick" read. There are trade-off between * reading cost and precision of value. Then, we may have a chance to implement * a periodic synchronizion of counter in memcg's counter. * * But this _read() function is used for user interface now. The user accounts * memory usage by memory cgroup and he _always_ requires exact value because * he accounts memory. Even if we provide quick-and-fuzzy read, we always * have to visit all online cpus and make sum. So, for now, unnecessary * synchronization is not implemented. (just implemented for cpu hotplug) * * If there are kernel internal actions which can make use of some not-exact * value, and reading all cpu value can be performance bottleneck in some * common workload, threashold and synchonization as vmstat[] should be * implemented. */ |
7a159cc9d memcg: use native... |
586 587 |
static long mem_cgroup_read_stat(struct mem_cgroup *mem, enum mem_cgroup_stat_index idx) |
c62b1a3b3 memcg: use generi... |
588 |
{ |
7a159cc9d memcg: use native... |
589 |
long val = 0; |
c62b1a3b3 memcg: use generi... |
590 |
int cpu; |
c62b1a3b3 memcg: use generi... |
591 |
|
711d3d2c9 memcg: cpu hotplu... |
592 593 |
get_online_cpus(); for_each_online_cpu(cpu) |
c62b1a3b3 memcg: use generi... |
594 |
val += per_cpu(mem->stat->count[idx], cpu); |
711d3d2c9 memcg: cpu hotplu... |
595 596 597 598 599 600 |
#ifdef CONFIG_HOTPLUG_CPU spin_lock(&mem->pcp_counter_lock); val += mem->nocpu_base.count[idx]; spin_unlock(&mem->pcp_counter_lock); #endif put_online_cpus(); |
c62b1a3b3 memcg: use generi... |
601 602 |
return val; } |
0c3e73e84 memcg: improve re... |
603 604 605 606 |
static void mem_cgroup_swap_statistics(struct mem_cgroup *mem, bool charge) { int val = (charge) ? 1 : -1; |
c62b1a3b3 memcg: use generi... |
607 |
this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_SWAPOUT], val); |
0c3e73e84 memcg: improve re... |
608 |
} |
456f998ec memcg: add the pa... |
609 610 611 612 613 614 615 616 617 |
void mem_cgroup_pgfault(struct mem_cgroup *mem, int val) { this_cpu_add(mem->stat->events[MEM_CGROUP_EVENTS_PGFAULT], val); } void mem_cgroup_pgmajfault(struct mem_cgroup *mem, int val) { this_cpu_add(mem->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT], val); } |
e9f8974f2 memcg: break out ... |
618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 |
static unsigned long mem_cgroup_read_events(struct mem_cgroup *mem, enum mem_cgroup_events_index idx) { unsigned long val = 0; int cpu; for_each_online_cpu(cpu) val += per_cpu(mem->stat->events[idx], cpu); #ifdef CONFIG_HOTPLUG_CPU spin_lock(&mem->pcp_counter_lock); val += mem->nocpu_base.events[idx]; spin_unlock(&mem->pcp_counter_lock); #endif return val; } |
c05555b57 memcg: atomic ops... |
633 |
static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, |
e401f1761 memcg: modify acc... |
634 |
bool file, int nr_pages) |
d52aa412d memory cgroup enh... |
635 |
{ |
c62b1a3b3 memcg: use generi... |
636 |
preempt_disable(); |
e401f1761 memcg: modify acc... |
637 638 |
if (file) __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_CACHE], nr_pages); |
d52aa412d memory cgroup enh... |
639 |
else |
e401f1761 memcg: modify acc... |
640 |
__this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_RSS], nr_pages); |
55e462b05 memcg: simple sta... |
641 |
|
e401f1761 memcg: modify acc... |
642 643 |
/* pagein of a big page is an event. So, ignore page size */ if (nr_pages > 0) |
e9f8974f2 memcg: break out ... |
644 |
__this_cpu_inc(mem->stat->events[MEM_CGROUP_EVENTS_PGPGIN]); |
3751d6043 memcg: fix event ... |
645 |
else { |
e9f8974f2 memcg: break out ... |
646 |
__this_cpu_inc(mem->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]); |
3751d6043 memcg: fix event ... |
647 648 |
nr_pages = -nr_pages; /* for event */ } |
e401f1761 memcg: modify acc... |
649 |
|
e9f8974f2 memcg: break out ... |
650 |
__this_cpu_add(mem->stat->events[MEM_CGROUP_EVENTS_COUNT], nr_pages); |
2e72b6347 memcg: implement ... |
651 |
|
c62b1a3b3 memcg: use generi... |
652 |
preempt_enable(); |
6d12e2d8d per-zone and recl... |
653 |
} |
bb2a0de92 memcg: consolidat... |
654 655 656 |
unsigned long mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *mem, int nid, int zid, unsigned int lru_mask) |
889976dbc memcg: reclaim me... |
657 658 |
{ struct mem_cgroup_per_zone *mz; |
bb2a0de92 memcg: consolidat... |
659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 |
enum lru_list l; unsigned long ret = 0; mz = mem_cgroup_zoneinfo(mem, nid, zid); for_each_lru(l) { if (BIT(l) & lru_mask) ret += MEM_CGROUP_ZSTAT(mz, l); } return ret; } static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *mem, int nid, unsigned int lru_mask) { |
889976dbc memcg: reclaim me... |
675 676 |
u64 total = 0; int zid; |
bb2a0de92 memcg: consolidat... |
677 678 |
for (zid = 0; zid < MAX_NR_ZONES; zid++) total += mem_cgroup_zone_nr_lru_pages(mem, nid, zid, lru_mask); |
889976dbc memcg: reclaim me... |
679 680 |
return total; } |
bb2a0de92 memcg: consolidat... |
681 682 683 |
static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *mem, unsigned int lru_mask) |
6d12e2d8d per-zone and recl... |
684 |
{ |
889976dbc memcg: reclaim me... |
685 |
int nid; |
6d12e2d8d per-zone and recl... |
686 |
u64 total = 0; |
bb2a0de92 memcg: consolidat... |
687 688 |
for_each_node_state(nid, N_HIGH_MEMORY) total += mem_cgroup_node_nr_lru_pages(mem, nid, lru_mask); |
6d12e2d8d per-zone and recl... |
689 |
return total; |
d52aa412d memory cgroup enh... |
690 |
} |
7a159cc9d memcg: use native... |
691 692 693 694 695 696 697 698 699 700 701 |
static bool __memcg_event_check(struct mem_cgroup *mem, int target) { unsigned long val, next; val = this_cpu_read(mem->stat->events[MEM_CGROUP_EVENTS_COUNT]); next = this_cpu_read(mem->stat->targets[target]); /* from time_after() in jiffies.h */ return ((long)next - (long)val < 0); } static void __mem_cgroup_target_update(struct mem_cgroup *mem, int target) |
d2265e6fa memcg : share eve... |
702 |
{ |
7a159cc9d memcg: use native... |
703 |
unsigned long val, next; |
d2265e6fa memcg : share eve... |
704 |
|
e9f8974f2 memcg: break out ... |
705 |
val = this_cpu_read(mem->stat->events[MEM_CGROUP_EVENTS_COUNT]); |
d2265e6fa memcg : share eve... |
706 |
|
7a159cc9d memcg: use native... |
707 708 709 710 711 712 713 |
switch (target) { case MEM_CGROUP_TARGET_THRESH: next = val + THRESHOLDS_EVENTS_TARGET; break; case MEM_CGROUP_TARGET_SOFTLIMIT: next = val + SOFTLIMIT_EVENTS_TARGET; break; |
453a9bf34 memcg: fix numa s... |
714 715 716 |
case MEM_CGROUP_TARGET_NUMAINFO: next = val + NUMAINFO_EVENTS_TARGET; break; |
7a159cc9d memcg: use native... |
717 718 719 720 721 |
default: return; } this_cpu_write(mem->stat->targets[target], next); |
d2265e6fa memcg : share eve... |
722 723 724 725 726 727 728 729 730 |
} /* * Check events in order. * */ static void memcg_check_events(struct mem_cgroup *mem, struct page *page) { /* threshold event is triggered in finer grain than soft limit */ |
7a159cc9d memcg: use native... |
731 |
if (unlikely(__memcg_event_check(mem, MEM_CGROUP_TARGET_THRESH))) { |
d2265e6fa memcg : share eve... |
732 |
mem_cgroup_threshold(mem); |
7a159cc9d memcg: use native... |
733 734 |
__mem_cgroup_target_update(mem, MEM_CGROUP_TARGET_THRESH); if (unlikely(__memcg_event_check(mem, |
453a9bf34 memcg: fix numa s... |
735 |
MEM_CGROUP_TARGET_SOFTLIMIT))) { |
d2265e6fa memcg : share eve... |
736 |
mem_cgroup_update_tree(mem, page); |
7a159cc9d memcg: use native... |
737 |
__mem_cgroup_target_update(mem, |
453a9bf34 memcg: fix numa s... |
738 739 740 741 742 743 744 745 |
MEM_CGROUP_TARGET_SOFTLIMIT); } #if MAX_NUMNODES > 1 if (unlikely(__memcg_event_check(mem, MEM_CGROUP_TARGET_NUMAINFO))) { atomic_inc(&mem->numainfo_events); __mem_cgroup_target_update(mem, MEM_CGROUP_TARGET_NUMAINFO); |
7a159cc9d memcg: use native... |
746 |
} |
453a9bf34 memcg: fix numa s... |
747 |
#endif |
d2265e6fa memcg : share eve... |
748 749 |
} } |
d5b69e38f memcg: memcontrol... |
750 |
static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont) |
8cdea7c05 Memory controller... |
751 752 753 754 755 |
{ return container_of(cgroup_subsys_state(cont, mem_cgroup_subsys_id), struct mem_cgroup, css); } |
cf475ad28 cgroups: add an o... |
756 |
struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) |
78fb74669 Memory controller... |
757 |
{ |
31a78f23b mm owner: fix rac... |
758 759 760 761 762 763 764 |
/* * mm_update_next_owner() may clear mm->owner to NULL * if it races with swapoff, page migration, etc. * So this can be called with p == NULL. */ if (unlikely(!p)) return NULL; |
78fb74669 Memory controller... |
765 766 767 |
return container_of(task_subsys_state(p, mem_cgroup_subsys_id), struct mem_cgroup, css); } |
a433658c3 vmscan,memcg: mem... |
768 |
struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm) |
54595fe26 memcg: use css_tr... |
769 770 |
{ struct mem_cgroup *mem = NULL; |
0b7f569e4 memcg: fix OOM ki... |
771 772 773 |
if (!mm) return NULL; |
54595fe26 memcg: use css_tr... |
774 775 776 777 778 779 780 781 782 783 784 785 786 787 |
/* * Because we have no locks, mm->owner's may be being moved to other * cgroup. We use css_tryget() here even if this looks * pessimistic (rather than adding locks here). */ rcu_read_lock(); do { mem = mem_cgroup_from_task(rcu_dereference(mm->owner)); if (unlikely(!mem)) break; } while (!css_tryget(&mem->css)); rcu_read_unlock(); return mem; } |
7d74b06f2 memcg: use for_ea... |
788 789 |
/* The caller has to guarantee "mem" exists before calling this */ static struct mem_cgroup *mem_cgroup_start_loop(struct mem_cgroup *mem) |
14067bb3e memcg: hierarchic... |
790 |
{ |
711d3d2c9 memcg: cpu hotplu... |
791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 |
struct cgroup_subsys_state *css; int found; if (!mem) /* ROOT cgroup has the smallest ID */ return root_mem_cgroup; /*css_put/get against root is ignored*/ if (!mem->use_hierarchy) { if (css_tryget(&mem->css)) return mem; return NULL; } rcu_read_lock(); /* * searching a memory cgroup which has the smallest ID under given * ROOT cgroup. (ID >= 1) */ css = css_get_next(&mem_cgroup_subsys, 1, &mem->css, &found); if (css && css_tryget(css)) mem = container_of(css, struct mem_cgroup, css); else mem = NULL; rcu_read_unlock(); return mem; |
7d74b06f2 memcg: use for_ea... |
813 814 815 816 817 818 819 820 821 |
} static struct mem_cgroup *mem_cgroup_get_next(struct mem_cgroup *iter, struct mem_cgroup *root, bool cond) { int nextid = css_id(&iter->css) + 1; int found; int hierarchy_used; |
14067bb3e memcg: hierarchic... |
822 |
struct cgroup_subsys_state *css; |
14067bb3e memcg: hierarchic... |
823 |
|
7d74b06f2 memcg: use for_ea... |
824 |
hierarchy_used = iter->use_hierarchy; |
14067bb3e memcg: hierarchic... |
825 |
|
7d74b06f2 memcg: use for_ea... |
826 |
css_put(&iter->css); |
711d3d2c9 memcg: cpu hotplu... |
827 828 |
/* If no ROOT, walk all, ignore hierarchy */ if (!cond || (root && !hierarchy_used)) |
7d74b06f2 memcg: use for_ea... |
829 |
return NULL; |
14067bb3e memcg: hierarchic... |
830 |
|
711d3d2c9 memcg: cpu hotplu... |
831 832 |
if (!root) root = root_mem_cgroup; |
7d74b06f2 memcg: use for_ea... |
833 834 |
do { iter = NULL; |
14067bb3e memcg: hierarchic... |
835 |
rcu_read_lock(); |
7d74b06f2 memcg: use for_ea... |
836 837 838 |
css = css_get_next(&mem_cgroup_subsys, nextid, &root->css, &found); |
14067bb3e memcg: hierarchic... |
839 |
if (css && css_tryget(css)) |
7d74b06f2 memcg: use for_ea... |
840 |
iter = container_of(css, struct mem_cgroup, css); |
14067bb3e memcg: hierarchic... |
841 |
rcu_read_unlock(); |
7d74b06f2 memcg: use for_ea... |
842 |
/* If css is NULL, no more cgroups will be found */ |
14067bb3e memcg: hierarchic... |
843 |
nextid = found + 1; |
7d74b06f2 memcg: use for_ea... |
844 |
} while (css && !iter); |
14067bb3e memcg: hierarchic... |
845 |
|
7d74b06f2 memcg: use for_ea... |
846 |
return iter; |
14067bb3e memcg: hierarchic... |
847 |
} |
7d74b06f2 memcg: use for_ea... |
848 849 850 851 852 853 854 855 856 857 858 859 |
/* * for_eacn_mem_cgroup_tree() for visiting all cgroup under tree. Please * be careful that "break" loop is not allowed. We have reference count. * Instead of that modify "cond" to be false and "continue" to exit the loop. */ #define for_each_mem_cgroup_tree_cond(iter, root, cond) \ for (iter = mem_cgroup_start_loop(root);\ iter != NULL;\ iter = mem_cgroup_get_next(iter, root, cond)) #define for_each_mem_cgroup_tree(iter, root) \ for_each_mem_cgroup_tree_cond(iter, root, true) |
711d3d2c9 memcg: cpu hotplu... |
860 861 |
#define for_each_mem_cgroup_all(iter) \ for_each_mem_cgroup_tree_cond(iter, NULL, true) |
14067bb3e memcg: hierarchic... |
862 |
|
4b3bde4c9 memcg: remove the... |
863 864 865 866 |
static inline bool mem_cgroup_is_root(struct mem_cgroup *mem) { return (mem == root_mem_cgroup); } |
456f998ec memcg: add the pa... |
867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 |
void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx) { struct mem_cgroup *mem; if (!mm) return; rcu_read_lock(); mem = mem_cgroup_from_task(rcu_dereference(mm->owner)); if (unlikely(!mem)) goto out; switch (idx) { case PGMAJFAULT: mem_cgroup_pgmajfault(mem, 1); break; case PGFAULT: mem_cgroup_pgfault(mem, 1); break; default: BUG(); } out: rcu_read_unlock(); } EXPORT_SYMBOL(mem_cgroup_count_vm_event); |
08e552c69 memcg: synchroniz... |
893 894 895 896 897 898 899 900 901 902 903 904 905 |
/* * Following LRU functions are allowed to be used without PCG_LOCK. * Operations are called by routine of global LRU independently from memcg. * What we have to take care of here is validness of pc->mem_cgroup. * * Changes to pc->mem_cgroup happens when * 1. charge * 2. moving account * In typical case, "charge" is done before add-to-lru. Exception is SwapCache. * It is added to LRU before charge. * If PCG_USED bit is not set, page_cgroup is not added to this private LRU. * When moving account, the page is not on LRU. It's isolated. */ |
4f98a2fee vmscan: split LRU... |
906 |
|
08e552c69 memcg: synchroniz... |
907 908 909 |
void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru) { struct page_cgroup *pc; |
08e552c69 memcg: synchroniz... |
910 |
struct mem_cgroup_per_zone *mz; |
6d12e2d8d per-zone and recl... |
911 |
|
f8d665422 memcg: add mem_cg... |
912 |
if (mem_cgroup_disabled()) |
08e552c69 memcg: synchroniz... |
913 914 915 |
return; pc = lookup_page_cgroup(page); /* can happen while we handle swapcache. */ |
4b3bde4c9 memcg: remove the... |
916 |
if (!TestClearPageCgroupAcctLRU(pc)) |
08e552c69 memcg: synchroniz... |
917 |
return; |
4b3bde4c9 memcg: remove the... |
918 |
VM_BUG_ON(!pc->mem_cgroup); |
544122e5e memcg: fix LRU ac... |
919 920 921 922 |
/* * We don't check PCG_USED bit. It's cleared when the "page" is finally * removed from global LRU. */ |
97a6c37b3 memcg: change pag... |
923 |
mz = page_cgroup_zoneinfo(pc->mem_cgroup, page); |
ece35ca81 memcg: fix LRU ac... |
924 925 |
/* huge page split is done under lru_lock. so, we have no races. */ MEM_CGROUP_ZSTAT(mz, lru) -= 1 << compound_order(page); |
4b3bde4c9 memcg: remove the... |
926 927 928 |
if (mem_cgroup_is_root(pc->mem_cgroup)) return; VM_BUG_ON(list_empty(&pc->lru)); |
08e552c69 memcg: synchroniz... |
929 |
list_del_init(&pc->lru); |
6d12e2d8d per-zone and recl... |
930 |
} |
08e552c69 memcg: synchroniz... |
931 |
void mem_cgroup_del_lru(struct page *page) |
6d12e2d8d per-zone and recl... |
932 |
{ |
08e552c69 memcg: synchroniz... |
933 934 |
mem_cgroup_del_lru_list(page, page_lru(page)); } |
b69408e88 vmscan: Use an in... |
935 |
|
3f58a8294 memcg: move memcg... |
936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 |
/* * Writeback is about to end against a page which has been marked for immediate * reclaim. If it still appears to be reclaimable, move it to the tail of the * inactive list. */ void mem_cgroup_rotate_reclaimable_page(struct page *page) { struct mem_cgroup_per_zone *mz; struct page_cgroup *pc; enum lru_list lru = page_lru(page); if (mem_cgroup_disabled()) return; pc = lookup_page_cgroup(page); /* unused or root page is not rotated. */ if (!PageCgroupUsed(pc)) return; /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */ smp_rmb(); if (mem_cgroup_is_root(pc->mem_cgroup)) return; |
97a6c37b3 memcg: change pag... |
958 |
mz = page_cgroup_zoneinfo(pc->mem_cgroup, page); |
3f58a8294 memcg: move memcg... |
959 960 |
list_move_tail(&pc->lru, &mz->lists[lru]); } |
08e552c69 memcg: synchroniz... |
961 962 963 964 |
void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru) { struct mem_cgroup_per_zone *mz; struct page_cgroup *pc; |
b69408e88 vmscan: Use an in... |
965 |
|
f8d665422 memcg: add mem_cg... |
966 |
if (mem_cgroup_disabled()) |
08e552c69 memcg: synchroniz... |
967 |
return; |
6d12e2d8d per-zone and recl... |
968 |
|
08e552c69 memcg: synchroniz... |
969 |
pc = lookup_page_cgroup(page); |
4b3bde4c9 memcg: remove the... |
970 |
/* unused or root page is not rotated. */ |
713735b42 memcg: correctly ... |
971 972 973 974 975 |
if (!PageCgroupUsed(pc)) return; /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */ smp_rmb(); if (mem_cgroup_is_root(pc->mem_cgroup)) |
08e552c69 memcg: synchroniz... |
976 |
return; |
97a6c37b3 memcg: change pag... |
977 |
mz = page_cgroup_zoneinfo(pc->mem_cgroup, page); |
08e552c69 memcg: synchroniz... |
978 |
list_move(&pc->lru, &mz->lists[lru]); |
6d12e2d8d per-zone and recl... |
979 |
} |
08e552c69 memcg: synchroniz... |
980 |
void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru) |
66e1707bc Memory controller... |
981 |
{ |
08e552c69 memcg: synchroniz... |
982 983 |
struct page_cgroup *pc; struct mem_cgroup_per_zone *mz; |
6d12e2d8d per-zone and recl... |
984 |
|
f8d665422 memcg: add mem_cg... |
985 |
if (mem_cgroup_disabled()) |
08e552c69 memcg: synchroniz... |
986 987 |
return; pc = lookup_page_cgroup(page); |
4b3bde4c9 memcg: remove the... |
988 |
VM_BUG_ON(PageCgroupAcctLRU(pc)); |
08e552c69 memcg: synchroniz... |
989 |
if (!PageCgroupUsed(pc)) |
894bc3104 Unevictable LRU I... |
990 |
return; |
713735b42 memcg: correctly ... |
991 992 |
/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */ smp_rmb(); |
97a6c37b3 memcg: change pag... |
993 |
mz = page_cgroup_zoneinfo(pc->mem_cgroup, page); |
ece35ca81 memcg: fix LRU ac... |
994 995 |
/* huge page split is done under lru_lock. so, we have no races. */ MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page); |
4b3bde4c9 memcg: remove the... |
996 997 998 |
SetPageCgroupAcctLRU(pc); if (mem_cgroup_is_root(pc->mem_cgroup)) return; |
08e552c69 memcg: synchroniz... |
999 1000 |
list_add(&pc->lru, &mz->lists[lru]); } |
544122e5e memcg: fix LRU ac... |
1001 |
|
08e552c69 memcg: synchroniz... |
1002 |
/* |
5a6475a4e memcg: fix leak o... |
1003 1004 1005 1006 |
* At handling SwapCache and other FUSE stuff, pc->mem_cgroup may be changed * while it's linked to lru because the page may be reused after it's fully * uncharged. To handle that, unlink page_cgroup from LRU when charge it again. * It's done under lock_page and expected that zone->lru_lock isnever held. |
08e552c69 memcg: synchroniz... |
1007 |
*/ |
5a6475a4e memcg: fix leak o... |
1008 |
static void mem_cgroup_lru_del_before_commit(struct page *page) |
08e552c69 memcg: synchroniz... |
1009 |
{ |
544122e5e memcg: fix LRU ac... |
1010 1011 1012 |
unsigned long flags; struct zone *zone = page_zone(page); struct page_cgroup *pc = lookup_page_cgroup(page); |
5a6475a4e memcg: fix leak o... |
1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 |
/* * Doing this check without taking ->lru_lock seems wrong but this * is safe. Because if page_cgroup's USED bit is unset, the page * will not be added to any memcg's LRU. If page_cgroup's USED bit is * set, the commit after this will fail, anyway. * This all charge/uncharge is done under some mutual execustion. * So, we don't need to taking care of changes in USED bit. */ if (likely(!PageLRU(page))) return; |
544122e5e memcg: fix LRU ac... |
1023 1024 1025 1026 1027 1028 1029 1030 |
spin_lock_irqsave(&zone->lru_lock, flags); /* * Forget old LRU when this page_cgroup is *not* used. This Used bit * is guarded by lock_page() because the page is SwapCache. */ if (!PageCgroupUsed(pc)) mem_cgroup_del_lru_list(page, page_lru(page)); spin_unlock_irqrestore(&zone->lru_lock, flags); |
08e552c69 memcg: synchroniz... |
1031 |
} |
5a6475a4e memcg: fix leak o... |
1032 |
static void mem_cgroup_lru_add_after_commit(struct page *page) |
544122e5e memcg: fix LRU ac... |
1033 1034 1035 1036 |
{ unsigned long flags; struct zone *zone = page_zone(page); struct page_cgroup *pc = lookup_page_cgroup(page); |
5a6475a4e memcg: fix leak o... |
1037 1038 1039 |
/* taking care of that the page is added to LRU while we commit it */ if (likely(!PageLRU(page))) return; |
544122e5e memcg: fix LRU ac... |
1040 1041 |
spin_lock_irqsave(&zone->lru_lock, flags); /* link when the page is linked to LRU but page_cgroup isn't */ |
4b3bde4c9 memcg: remove the... |
1042 |
if (PageLRU(page) && !PageCgroupAcctLRU(pc)) |
544122e5e memcg: fix LRU ac... |
1043 1044 1045 |
mem_cgroup_add_lru_list(page, page_lru(page)); spin_unlock_irqrestore(&zone->lru_lock, flags); } |
08e552c69 memcg: synchroniz... |
1046 1047 1048 |
void mem_cgroup_move_lists(struct page *page, enum lru_list from, enum lru_list to) { |
f8d665422 memcg: add mem_cg... |
1049 |
if (mem_cgroup_disabled()) |
08e552c69 memcg: synchroniz... |
1050 1051 1052 |
return; mem_cgroup_del_lru_list(page, from); mem_cgroup_add_lru_list(page, to); |
66e1707bc Memory controller... |
1053 |
} |
3e92041d6 memcg: add mem_cg... |
1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 |
/* * Checks whether given mem is same or in the root_mem's * hierarchy subtree */ static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_mem, struct mem_cgroup *mem) { if (root_mem != mem) { return (root_mem->use_hierarchy && css_is_ancestor(&mem->css, &root_mem->css)); } return true; } |
4c4a22148 memcontrol: move ... |
1068 1069 1070 |
int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem) { int ret; |
0b7f569e4 memcg: fix OOM ki... |
1071 |
struct mem_cgroup *curr = NULL; |
158e0a2d1 memcg: use find_l... |
1072 |
struct task_struct *p; |
4c4a22148 memcontrol: move ... |
1073 |
|
158e0a2d1 memcg: use find_l... |
1074 1075 1076 1077 1078 |
p = find_lock_task_mm(task); if (!p) return 0; curr = try_get_mem_cgroup_from_mm(p->mm); task_unlock(p); |
0b7f569e4 memcg: fix OOM ki... |
1079 1080 |
if (!curr) return 0; |
d31f56dbf memcg: avoid oom-... |
1081 1082 1083 1084 1085 1086 |
/* * We should check use_hierarchy of "mem" not "curr". Because checking * use_hierarchy of "curr" here make this function true if hierarchy is * enabled in "curr" and "curr" is a child of "mem" in *cgroup* * hierarchy(even if use_hierarchy is disabled in "mem"). */ |
3e92041d6 memcg: add mem_cg... |
1087 |
ret = mem_cgroup_same_or_subtree(mem, curr); |
0b7f569e4 memcg: fix OOM ki... |
1088 |
css_put(&curr->css); |
4c4a22148 memcontrol: move ... |
1089 1090 |
return ret; } |
c772be939 memcg: fix calcul... |
1091 |
static int calc_inactive_ratio(struct mem_cgroup *memcg, unsigned long *present_pages) |
14797e236 memcg: add inacti... |
1092 1093 1094 |
{ unsigned long active; unsigned long inactive; |
c772be939 memcg: fix calcul... |
1095 1096 |
unsigned long gb; unsigned long inactive_ratio; |
14797e236 memcg: add inacti... |
1097 |
|
bb2a0de92 memcg: consolidat... |
1098 1099 |
inactive = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_INACTIVE_ANON)); active = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_ACTIVE_ANON)); |
14797e236 memcg: add inacti... |
1100 |
|
c772be939 memcg: fix calcul... |
1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 |
gb = (inactive + active) >> (30 - PAGE_SHIFT); if (gb) inactive_ratio = int_sqrt(10 * gb); else inactive_ratio = 1; if (present_pages) { present_pages[0] = inactive; present_pages[1] = active; } return inactive_ratio; } int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg) { unsigned long active; unsigned long inactive; unsigned long present_pages[2]; unsigned long inactive_ratio; inactive_ratio = calc_inactive_ratio(memcg, present_pages); inactive = present_pages[0]; active = present_pages[1]; if (inactive * inactive_ratio < active) |
14797e236 memcg: add inacti... |
1128 1129 1130 1131 |
return 1; return 0; } |
56e49d218 vmscan: evict use... |
1132 1133 1134 1135 |
int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg) { unsigned long active; unsigned long inactive; |
bb2a0de92 memcg: consolidat... |
1136 1137 |
inactive = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_INACTIVE_FILE)); active = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_ACTIVE_FILE)); |
56e49d218 vmscan: evict use... |
1138 1139 1140 |
return (active > inactive); } |
3e2f41f1f memcg: add zone_r... |
1141 1142 1143 |
struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, struct zone *zone) { |
13d7e3a2d memcg: convert to... |
1144 |
int nid = zone_to_nid(zone); |
3e2f41f1f memcg: add zone_r... |
1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 |
int zid = zone_idx(zone); struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid); return &mz->reclaim_stat; } struct zone_reclaim_stat * mem_cgroup_get_reclaim_stat_from_page(struct page *page) { struct page_cgroup *pc; struct mem_cgroup_per_zone *mz; if (mem_cgroup_disabled()) return NULL; pc = lookup_page_cgroup(page); |
bd112db87 memcg: fix mem_cg... |
1161 1162 |
if (!PageCgroupUsed(pc)) return NULL; |
713735b42 memcg: correctly ... |
1163 1164 |
/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */ smp_rmb(); |
97a6c37b3 memcg: change pag... |
1165 |
mz = page_cgroup_zoneinfo(pc->mem_cgroup, page); |
3e2f41f1f memcg: add zone_r... |
1166 1167 |
return &mz->reclaim_stat; } |
66e1707bc Memory controller... |
1168 1169 1170 1171 1172 |
unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, struct list_head *dst, unsigned long *scanned, int order, int mode, struct zone *z, struct mem_cgroup *mem_cont, |
4f98a2fee vmscan: split LRU... |
1173 |
int active, int file) |
66e1707bc Memory controller... |
1174 1175 1176 1177 1178 1179 |
{ unsigned long nr_taken = 0; struct page *page; unsigned long scan; LIST_HEAD(pc_list); struct list_head *src; |
ff7283fa3 bugfix for memory... |
1180 |
struct page_cgroup *pc, *tmp; |
13d7e3a2d memcg: convert to... |
1181 |
int nid = zone_to_nid(z); |
1ecaab2bd per-zone and recl... |
1182 1183 |
int zid = zone_idx(z); struct mem_cgroup_per_zone *mz; |
b7c46d151 mm: drop unneeded... |
1184 |
int lru = LRU_FILE * file + active; |
2ffebca6a memcg: fix lru ro... |
1185 |
int ret; |
66e1707bc Memory controller... |
1186 |
|
cf475ad28 cgroups: add an o... |
1187 |
BUG_ON(!mem_cont); |
1ecaab2bd per-zone and recl... |
1188 |
mz = mem_cgroup_zoneinfo(mem_cont, nid, zid); |
b69408e88 vmscan: Use an in... |
1189 |
src = &mz->lists[lru]; |
66e1707bc Memory controller... |
1190 |
|
ff7283fa3 bugfix for memory... |
1191 1192 |
scan = 0; list_for_each_entry_safe_reverse(pc, tmp, src, lru) { |
436c6541b memcgroup: fix zo... |
1193 |
if (scan >= nr_to_scan) |
ff7283fa3 bugfix for memory... |
1194 |
break; |
08e552c69 memcg: synchroniz... |
1195 |
|
52d4b9ac0 memcg: allocate a... |
1196 1197 |
if (unlikely(!PageCgroupUsed(pc))) continue; |
5564e88ba memcg: condense p... |
1198 |
|
6b3ae58ef memcg: remove dir... |
1199 |
page = lookup_cgroup_page(pc); |
5564e88ba memcg: condense p... |
1200 |
|
436c6541b memcgroup: fix zo... |
1201 |
if (unlikely(!PageLRU(page))) |
ff7283fa3 bugfix for memory... |
1202 |
continue; |
ff7283fa3 bugfix for memory... |
1203 |
|
436c6541b memcgroup: fix zo... |
1204 |
scan++; |
2ffebca6a memcg: fix lru ro... |
1205 1206 1207 |
ret = __isolate_lru_page(page, mode, file); switch (ret) { case 0: |
66e1707bc Memory controller... |
1208 |
list_move(&page->lru, dst); |
2ffebca6a memcg: fix lru ro... |
1209 |
mem_cgroup_del_lru(page); |
2c888cfbc thp: fix anon mem... |
1210 |
nr_taken += hpage_nr_pages(page); |
2ffebca6a memcg: fix lru ro... |
1211 1212 1213 1214 1215 1216 1217 |
break; case -EBUSY: /* we don't affect global LRU but rotate in our LRU */ mem_cgroup_rotate_lru_list(page, page_lru(page)); break; default: break; |
66e1707bc Memory controller... |
1218 1219 |
} } |
66e1707bc Memory controller... |
1220 |
*scanned = scan; |
cc8e970c3 memcg: add mm_vms... |
1221 1222 1223 |
trace_mm_vmscan_memcg_isolate(0, nr_to_scan, scan, nr_taken, 0, 0, 0, mode); |
66e1707bc Memory controller... |
1224 1225 |
return nr_taken; } |
6d61ef409 memcg: memory cgr... |
1226 1227 |
#define mem_cgroup_from_res_counter(counter, member) \ container_of(counter, struct mem_cgroup, member) |
19942822d memcg: prevent en... |
1228 |
/** |
9d11ea9f1 memcg: simplify t... |
1229 1230 |
* mem_cgroup_margin - calculate chargeable space of a memory cgroup * @mem: the memory cgroup |
19942822d memcg: prevent en... |
1231 |
* |
9d11ea9f1 memcg: simplify t... |
1232 |
* Returns the maximum amount of memory @mem can be charged with, in |
7ec99d621 memcg: unify char... |
1233 |
* pages. |
19942822d memcg: prevent en... |
1234 |
*/ |
7ec99d621 memcg: unify char... |
1235 |
static unsigned long mem_cgroup_margin(struct mem_cgroup *mem) |
19942822d memcg: prevent en... |
1236 |
{ |
9d11ea9f1 memcg: simplify t... |
1237 1238 1239 1240 1241 |
unsigned long long margin; margin = res_counter_margin(&mem->res); if (do_swap_account) margin = min(margin, res_counter_margin(&mem->memsw)); |
7ec99d621 memcg: unify char... |
1242 |
return margin >> PAGE_SHIFT; |
19942822d memcg: prevent en... |
1243 |
} |
1f4c025b5 memcg: export mem... |
1244 |
int mem_cgroup_swappiness(struct mem_cgroup *memcg) |
a7885eb8a memcg: swappiness |
1245 1246 |
{ struct cgroup *cgrp = memcg->css.cgroup; |
a7885eb8a memcg: swappiness |
1247 1248 1249 1250 |
/* root ? */ if (cgrp->parent == NULL) return vm_swappiness; |
bf1ff2635 memcg: remove mem... |
1251 |
return memcg->swappiness; |
a7885eb8a memcg: swappiness |
1252 |
} |
32047e2a8 memcg: avoid lock... |
1253 1254 1255 |
static void mem_cgroup_start_move(struct mem_cgroup *mem) { int cpu; |
1489ebad8 memcg: cpu hotplu... |
1256 1257 1258 1259 |
get_online_cpus(); spin_lock(&mem->pcp_counter_lock); for_each_online_cpu(cpu) |
32047e2a8 memcg: avoid lock... |
1260 |
per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) += 1; |
1489ebad8 memcg: cpu hotplu... |
1261 1262 1263 |
mem->nocpu_base.count[MEM_CGROUP_ON_MOVE] += 1; spin_unlock(&mem->pcp_counter_lock); put_online_cpus(); |
32047e2a8 memcg: avoid lock... |
1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 |
synchronize_rcu(); } static void mem_cgroup_end_move(struct mem_cgroup *mem) { int cpu; if (!mem) return; |
1489ebad8 memcg: cpu hotplu... |
1274 1275 1276 |
get_online_cpus(); spin_lock(&mem->pcp_counter_lock); for_each_online_cpu(cpu) |
32047e2a8 memcg: avoid lock... |
1277 |
per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) -= 1; |
1489ebad8 memcg: cpu hotplu... |
1278 1279 1280 |
mem->nocpu_base.count[MEM_CGROUP_ON_MOVE] -= 1; spin_unlock(&mem->pcp_counter_lock); put_online_cpus(); |
32047e2a8 memcg: avoid lock... |
1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 |
} /* * 2 routines for checking "mem" is under move_account() or not. * * mem_cgroup_stealed() - checking a cgroup is mc.from or not. This is used * for avoiding race in accounting. If true, * pc->mem_cgroup may be overwritten. * * mem_cgroup_under_move() - checking a cgroup is mc.from or mc.to or * under hierarchy of moving cgroups. This is for * waiting at hith-memory prressure caused by "move". */ static bool mem_cgroup_stealed(struct mem_cgroup *mem) { VM_BUG_ON(!rcu_read_lock_held()); return this_cpu_read(mem->stat->count[MEM_CGROUP_ON_MOVE]) > 0; } |
4b5343346 memcg: clean up t... |
1299 1300 1301 |
static bool mem_cgroup_under_move(struct mem_cgroup *mem) { |
2bd9bb206 memcg: clean up w... |
1302 1303 |
struct mem_cgroup *from; struct mem_cgroup *to; |
4b5343346 memcg: clean up t... |
1304 |
bool ret = false; |
2bd9bb206 memcg: clean up w... |
1305 1306 1307 1308 1309 1310 1311 1312 1313 |
/* * Unlike task_move routines, we access mc.to, mc.from not under * mutual exclusion by cgroup_mutex. Here, we take spinlock instead. */ spin_lock(&mc.lock); from = mc.from; to = mc.to; if (!from) goto unlock; |
3e92041d6 memcg: add mem_cg... |
1314 1315 1316 |
ret = mem_cgroup_same_or_subtree(mem, from) || mem_cgroup_same_or_subtree(mem, to); |
2bd9bb206 memcg: clean up w... |
1317 1318 |
unlock: spin_unlock(&mc.lock); |
4b5343346 memcg: clean up t... |
1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 |
return ret; } static bool mem_cgroup_wait_acct_move(struct mem_cgroup *mem) { if (mc.moving_task && current != mc.moving_task) { if (mem_cgroup_under_move(mem)) { DEFINE_WAIT(wait); prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE); /* moving charge context might have finished. */ if (mc.moving_task) schedule(); finish_wait(&mc.waitq, &wait); return true; } } return false; } |
e222432bf memcg: show memcg... |
1337 |
/** |
6a6135b64 memcg: typo in co... |
1338 |
* mem_cgroup_print_oom_info: Called from OOM with tasklist_lock held in read mode. |
e222432bf memcg: show memcg... |
1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 |
* @memcg: The memory cgroup that went over limit * @p: Task that is going to be killed * * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is * enabled */ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) { struct cgroup *task_cgrp; struct cgroup *mem_cgrp; /* * Need a buffer in BSS, can't rely on allocations. The code relies * on the assumption that OOM is serialized for memory controller. * If this assumption is broken, revisit this code. */ static char memcg_name[PATH_MAX]; int ret; |
d31f56dbf memcg: avoid oom-... |
1356 |
if (!memcg || !p) |
e222432bf memcg: show memcg... |
1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 |
return; rcu_read_lock(); mem_cgrp = memcg->css.cgroup; task_cgrp = task_cgroup(p, mem_cgroup_subsys_id); ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX); if (ret < 0) { /* * Unfortunately, we are unable to convert to a useful name * But we'll still print out the usage information */ rcu_read_unlock(); goto done; } rcu_read_unlock(); printk(KERN_INFO "Task in %s killed", memcg_name); rcu_read_lock(); ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX); if (ret < 0) { rcu_read_unlock(); goto done; } rcu_read_unlock(); /* * Continues from above, so we don't need an KERN_ level */ printk(KERN_CONT " as a result of limit of %s ", memcg_name); done: printk(KERN_INFO "memory: usage %llukB, limit %llukB, failcnt %llu ", res_counter_read_u64(&memcg->res, RES_USAGE) >> 10, res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10, res_counter_read_u64(&memcg->res, RES_FAILCNT)); printk(KERN_INFO "memory+swap: usage %llukB, limit %llukB, " "failcnt %llu ", res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10, res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10, res_counter_read_u64(&memcg->memsw, RES_FAILCNT)); } |
81d39c20f memcg: fix shrink... |
1405 1406 1407 1408 1409 1410 1411 |
/* * This function returns the number of memcg under hierarchy tree. Returns * 1(self count) if no children. */ static int mem_cgroup_count_children(struct mem_cgroup *mem) { int num = 0; |
7d74b06f2 memcg: use for_ea... |
1412 1413 1414 1415 |
struct mem_cgroup *iter; for_each_mem_cgroup_tree(iter, mem) num++; |
81d39c20f memcg: fix shrink... |
1416 1417 |
return num; } |
6d61ef409 memcg: memory cgr... |
1418 |
/* |
a63d83f42 oom: badness heur... |
1419 1420 1421 1422 1423 1424 |
* Return the memory (and swap, if configured) limit for a memcg. */ u64 mem_cgroup_get_limit(struct mem_cgroup *memcg) { u64 limit; u64 memsw; |
f3e8eb70b memcg: fix unit m... |
1425 1426 |
limit = res_counter_read_u64(&memcg->res, RES_LIMIT); limit += total_swap_pages << PAGE_SHIFT; |
a63d83f42 oom: badness heur... |
1427 1428 1429 1430 1431 1432 1433 1434 1435 |
memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT); /* * If memsw is finite and limits the amount of swap space available * to this memcg, return that limit. */ return min(limit, memsw); } /* |
04046e1a0 memcg: use CSS ID |
1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 |
* Visit the first child (need not be the first child as per the ordering * of the cgroup list, since we track last_scanned_child) of @mem and use * that to reclaim free pages from. */ static struct mem_cgroup * mem_cgroup_select_victim(struct mem_cgroup *root_mem) { struct mem_cgroup *ret = NULL; struct cgroup_subsys_state *css; int nextid, found; if (!root_mem->use_hierarchy) { css_get(&root_mem->css); ret = root_mem; } while (!ret) { rcu_read_lock(); nextid = root_mem->last_scanned_child + 1; css = css_get_next(&mem_cgroup_subsys, nextid, &root_mem->css, &found); if (css && css_tryget(css)) ret = container_of(css, struct mem_cgroup, css); rcu_read_unlock(); /* Updates scanning parameter */ |
04046e1a0 memcg: use CSS ID |
1462 1463 1464 1465 1466 |
if (!css) { /* this means start scan from ID:1 */ root_mem->last_scanned_child = 0; } else root_mem->last_scanned_child = found; |
04046e1a0 memcg: use CSS ID |
1467 1468 1469 1470 |
} return ret; } |
4d0c066d2 memcg: fix reclai... |
1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 |
/** * test_mem_cgroup_node_reclaimable * @mem: the target memcg * @nid: the node ID to be checked. * @noswap : specify true here if the user wants flle only information. * * This function returns whether the specified memcg contains any * reclaimable pages on a node. Returns true if there are any reclaimable * pages in the node. */ static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *mem, int nid, bool noswap) { |
bb2a0de92 memcg: consolidat... |
1484 |
if (mem_cgroup_node_nr_lru_pages(mem, nid, LRU_ALL_FILE)) |
4d0c066d2 memcg: fix reclai... |
1485 1486 1487 |
return true; if (noswap || !total_swap_pages) return false; |
bb2a0de92 memcg: consolidat... |
1488 |
if (mem_cgroup_node_nr_lru_pages(mem, nid, LRU_ALL_ANON)) |
4d0c066d2 memcg: fix reclai... |
1489 1490 1491 1492 |
return true; return false; } |
889976dbc memcg: reclaim me... |
1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 |
#if MAX_NUMNODES > 1 /* * Always updating the nodemask is not very good - even if we have an empty * list or the wrong list here, we can start from some node and traverse all * nodes based on the zonelist. So update the list loosely once per 10 secs. * */ static void mem_cgroup_may_update_nodemask(struct mem_cgroup *mem) { int nid; |
453a9bf34 memcg: fix numa s... |
1504 1505 1506 1507 1508 1509 1510 |
/* * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET * pagein/pageout changes since the last update. */ if (!atomic_read(&mem->numainfo_events)) return; if (atomic_inc_return(&mem->numainfo_updating) > 1) |
889976dbc memcg: reclaim me... |
1511 |
return; |
889976dbc memcg: reclaim me... |
1512 1513 1514 1515 |
/* make a nodemask where this memcg uses memory from */ mem->scan_nodes = node_states[N_HIGH_MEMORY]; for_each_node_mask(nid, node_states[N_HIGH_MEMORY]) { |
4d0c066d2 memcg: fix reclai... |
1516 1517 |
if (!test_mem_cgroup_node_reclaimable(mem, nid, false)) node_clear(nid, mem->scan_nodes); |
889976dbc memcg: reclaim me... |
1518 |
} |
453a9bf34 memcg: fix numa s... |
1519 1520 1521 |
atomic_set(&mem->numainfo_events, 0); atomic_set(&mem->numainfo_updating, 0); |
889976dbc memcg: reclaim me... |
1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 |
} /* * Selecting a node where we start reclaim from. Because what we need is just * reducing usage counter, start from anywhere is O,K. Considering * memory reclaim from current node, there are pros. and cons. * * Freeing memory from current node means freeing memory from a node which * we'll use or we've used. So, it may make LRU bad. And if several threads * hit limits, it will see a contention on a node. But freeing from remote * node means more costs for memory reclaim because of memory latency. * * Now, we use round-robin. Better algorithm is welcomed. */ int mem_cgroup_select_victim_node(struct mem_cgroup *mem) { int node; mem_cgroup_may_update_nodemask(mem); node = mem->last_scanned_node; node = next_node(node, mem->scan_nodes); if (node == MAX_NUMNODES) node = first_node(mem->scan_nodes); /* * We call this when we hit limit, not when pages are added to LRU. * No LRU may hold pages because all pages are UNEVICTABLE or * memcg is too small and all pages are not on LRU. In that case, * we use curret node. */ if (unlikely(node == MAX_NUMNODES)) node = numa_node_id(); mem->last_scanned_node = node; return node; } |
4d0c066d2 memcg: fix reclai... |
1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 |
/* * Check all nodes whether it contains reclaimable pages or not. * For quick scan, we make use of scan_nodes. This will allow us to skip * unused nodes. But scan_nodes is lazily updated and may not cotain * enough new information. We need to do double check. */ bool mem_cgroup_reclaimable(struct mem_cgroup *mem, bool noswap) { int nid; /* * quick check...making use of scan_node. * We can skip unused nodes. */ if (!nodes_empty(mem->scan_nodes)) { for (nid = first_node(mem->scan_nodes); nid < MAX_NUMNODES; nid = next_node(nid, mem->scan_nodes)) { if (test_mem_cgroup_node_reclaimable(mem, nid, noswap)) return true; } } /* * Check rest of nodes. */ for_each_node_state(nid, N_HIGH_MEMORY) { if (node_isset(nid, mem->scan_nodes)) continue; if (test_mem_cgroup_node_reclaimable(mem, nid, noswap)) return true; } return false; } |
889976dbc memcg: reclaim me... |
1592 1593 1594 1595 1596 |
#else int mem_cgroup_select_victim_node(struct mem_cgroup *mem) { return 0; } |
4d0c066d2 memcg: fix reclai... |
1597 1598 1599 1600 1601 |
bool mem_cgroup_reclaimable(struct mem_cgroup *mem, bool noswap) { return test_mem_cgroup_node_reclaimable(mem, 0, noswap); } |
889976dbc memcg: reclaim me... |
1602 |
#endif |
82f9d486e memcg: add memory... |
1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 |
static void __mem_cgroup_record_scanstat(unsigned long *stats, struct memcg_scanrecord *rec) { stats[SCAN] += rec->nr_scanned[0] + rec->nr_scanned[1]; stats[SCAN_ANON] += rec->nr_scanned[0]; stats[SCAN_FILE] += rec->nr_scanned[1]; stats[ROTATE] += rec->nr_rotated[0] + rec->nr_rotated[1]; stats[ROTATE_ANON] += rec->nr_rotated[0]; stats[ROTATE_FILE] += rec->nr_rotated[1]; stats[FREED] += rec->nr_freed[0] + rec->nr_freed[1]; stats[FREED_ANON] += rec->nr_freed[0]; stats[FREED_FILE] += rec->nr_freed[1]; stats[ELAPSED] += rec->elapsed; } static void mem_cgroup_record_scanstat(struct memcg_scanrecord *rec) { struct mem_cgroup *mem; int context = rec->context; if (context >= NR_SCAN_CONTEXT) return; mem = rec->mem; spin_lock(&mem->scanstat.lock); __mem_cgroup_record_scanstat(mem->scanstat.stats[context], rec); spin_unlock(&mem->scanstat.lock); mem = rec->root; spin_lock(&mem->scanstat.lock); __mem_cgroup_record_scanstat(mem->scanstat.rootstats[context], rec); spin_unlock(&mem->scanstat.lock); } |
04046e1a0 memcg: use CSS ID |
1640 1641 1642 1643 |
/* * Scan the hierarchy if needed to reclaim memory. We remember the last child * we reclaimed from, so that we don't end up penalizing one child extensively * based on its position in the children list. |
6d61ef409 memcg: memory cgr... |
1644 1645 |
* * root_mem is the original ancestor that we've been reclaim from. |
04046e1a0 memcg: use CSS ID |
1646 1647 1648 |
* * We give up and return to the caller when we visit root_mem twice. * (other groups can be removed while we're walking....) |
81d39c20f memcg: fix shrink... |
1649 1650 |
* * If shrink==true, for avoiding to free too much, this returns immedieately. |
6d61ef409 memcg: memory cgr... |
1651 1652 |
*/ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, |
4e4169535 memory controller... |
1653 |
struct zone *zone, |
75822b449 memory controller... |
1654 |
gfp_t gfp_mask, |
0ae5e89c6 memcg: count the ... |
1655 1656 |
unsigned long reclaim_options, unsigned long *total_scanned) |
6d61ef409 memcg: memory cgr... |
1657 |
{ |
04046e1a0 memcg: use CSS ID |
1658 1659 1660 |
struct mem_cgroup *victim; int ret, total = 0; int loop = 0; |
75822b449 memory controller... |
1661 1662 |
bool noswap = reclaim_options & MEM_CGROUP_RECLAIM_NOSWAP; bool shrink = reclaim_options & MEM_CGROUP_RECLAIM_SHRINK; |
4e4169535 memory controller... |
1663 |
bool check_soft = reclaim_options & MEM_CGROUP_RECLAIM_SOFT; |
82f9d486e memcg: add memory... |
1664 |
struct memcg_scanrecord rec; |
9d11ea9f1 memcg: simplify t... |
1665 |
unsigned long excess; |
82f9d486e memcg: add memory... |
1666 |
unsigned long scanned; |
9d11ea9f1 memcg: simplify t... |
1667 1668 |
excess = res_counter_soft_limit_excess(&root_mem->res) >> PAGE_SHIFT; |
04046e1a0 memcg: use CSS ID |
1669 |
|
22a668d7c memcg: fix behavi... |
1670 |
/* If memsw_is_minimum==1, swap-out is of-no-use. */ |
108b6a784 memcg: fix behavi... |
1671 |
if (!check_soft && !shrink && root_mem->memsw_is_minimum) |
22a668d7c memcg: fix behavi... |
1672 |
noswap = true; |
82f9d486e memcg: add memory... |
1673 1674 1675 1676 1677 1678 1679 1680 |
if (shrink) rec.context = SCAN_BY_SHRINK; else if (check_soft) rec.context = SCAN_BY_SYSTEM; else rec.context = SCAN_BY_LIMIT; rec.root = root_mem; |
4e4169535 memory controller... |
1681 |
while (1) { |
04046e1a0 memcg: use CSS ID |
1682 |
victim = mem_cgroup_select_victim(root_mem); |
4e4169535 memory controller... |
1683 |
if (victim == root_mem) { |
04046e1a0 memcg: use CSS ID |
1684 |
loop++; |
fbc29a25e memcg: avoid perc... |
1685 1686 1687 1688 1689 1690 1691 |
/* * We are not draining per cpu cached charges during * soft limit reclaim because global reclaim doesn't * care about charges. It tries to free some memory and * charges will not give any. */ if (!check_soft && loop >= 1) |
26fe61684 memcg: fix percpu... |
1692 |
drain_all_stock_async(root_mem); |
4e4169535 memory controller... |
1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 |
if (loop >= 2) { /* * If we have not been able to reclaim * anything, it might because there are * no reclaimable pages under this hierarchy */ if (!check_soft || !total) { css_put(&victim->css); break; } /* |
25985edce Fix common misspe... |
1704 |
* We want to do more targeted reclaim. |
4e4169535 memory controller... |
1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 |
* excess >> 2 is not to excessive so as to * reclaim too much, nor too less that we keep * coming back to reclaim from this cgroup */ if (total >= (excess >> 2) || (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) { css_put(&victim->css); break; } } } |
4d0c066d2 memcg: fix reclai... |
1716 |
if (!mem_cgroup_reclaimable(victim, noswap)) { |
04046e1a0 memcg: use CSS ID |
1717 1718 |
/* this cgroup's local usage == 0 */ css_put(&victim->css); |
6d61ef409 memcg: memory cgr... |
1719 1720 |
continue; } |
82f9d486e memcg: add memory... |
1721 1722 1723 1724 1725 1726 1727 1728 |
rec.mem = victim; rec.nr_scanned[0] = 0; rec.nr_scanned[1] = 0; rec.nr_rotated[0] = 0; rec.nr_rotated[1] = 0; rec.nr_freed[0] = 0; rec.nr_freed[1] = 0; rec.elapsed = 0; |
04046e1a0 memcg: use CSS ID |
1729 |
/* we use swappiness of local cgroup */ |
0ae5e89c6 memcg: count the ... |
1730 |
if (check_soft) { |
4e4169535 memory controller... |
1731 |
ret = mem_cgroup_shrink_node_zone(victim, gfp_mask, |
82f9d486e memcg: add memory... |
1732 1733 |
noswap, zone, &rec, &scanned); *total_scanned += scanned; |
0ae5e89c6 memcg: count the ... |
1734 |
} else |
4e4169535 memory controller... |
1735 |
ret = try_to_free_mem_cgroup_pages(victim, gfp_mask, |
82f9d486e memcg: add memory... |
1736 1737 |
noswap, &rec); mem_cgroup_record_scanstat(&rec); |
04046e1a0 memcg: use CSS ID |
1738 |
css_put(&victim->css); |
81d39c20f memcg: fix shrink... |
1739 1740 1741 1742 1743 1744 1745 |
/* * At shrinking usage, we can't check we should stop here or * reclaim more. It's depends on callers. last_scanned_child * will work enough for keeping fairness under tree. */ if (shrink) return ret; |
04046e1a0 memcg: use CSS ID |
1746 |
total += ret; |
4e4169535 memory controller... |
1747 |
if (check_soft) { |
9d11ea9f1 memcg: simplify t... |
1748 |
if (!res_counter_soft_limit_excess(&root_mem->res)) |
4e4169535 memory controller... |
1749 |
return total; |
9d11ea9f1 memcg: simplify t... |
1750 |
} else if (mem_cgroup_margin(root_mem)) |
4fd14ebf6 memcg: remove unu... |
1751 |
return total; |
6d61ef409 memcg: memory cgr... |
1752 |
} |
04046e1a0 memcg: use CSS ID |
1753 |
return total; |
6d61ef409 memcg: memory cgr... |
1754 |
} |
867578cbc memcg: fix oom ki... |
1755 1756 1757 |
/* * Check OOM-Killer is already running under our hierarchy. * If someone is running, return false. |
1af8efe96 memcg: change mem... |
1758 |
* Has to be called with memcg_oom_lock |
867578cbc memcg: fix oom ki... |
1759 1760 1761 |
*/ static bool mem_cgroup_oom_lock(struct mem_cgroup *mem) { |
79dfdaccd memcg: make oom_l... |
1762 1763 1764 |
int lock_count = -1; struct mem_cgroup *iter, *failed = NULL; bool cond = true; |
a636b327f memcg: avoid unne... |
1765 |
|
79dfdaccd memcg: make oom_l... |
1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 |
for_each_mem_cgroup_tree_cond(iter, mem, cond) { bool locked = iter->oom_lock; iter->oom_lock = true; if (lock_count == -1) lock_count = iter->oom_lock; else if (lock_count != locked) { /* * this subtree of our hierarchy is already locked * so we cannot give a lock. */ lock_count = 0; failed = iter; cond = false; } |
7d74b06f2 memcg: use for_ea... |
1781 |
} |
867578cbc memcg: fix oom ki... |
1782 |
|
79dfdaccd memcg: make oom_l... |
1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 |
if (!failed) goto done; /* * OK, we failed to lock the whole subtree so we have to clean up * what we set up to the failing subtree */ cond = true; for_each_mem_cgroup_tree_cond(iter, mem, cond) { if (iter == failed) { cond = false; continue; } iter->oom_lock = false; } done: return lock_count; |
a636b327f memcg: avoid unne... |
1800 |
} |
0b7f569e4 memcg: fix OOM ki... |
1801 |
|
79dfdaccd memcg: make oom_l... |
1802 |
/* |
1af8efe96 memcg: change mem... |
1803 |
* Has to be called with memcg_oom_lock |
79dfdaccd memcg: make oom_l... |
1804 |
*/ |
7d74b06f2 memcg: use for_ea... |
1805 |
static int mem_cgroup_oom_unlock(struct mem_cgroup *mem) |
0b7f569e4 memcg: fix OOM ki... |
1806 |
{ |
7d74b06f2 memcg: use for_ea... |
1807 |
struct mem_cgroup *iter; |
79dfdaccd memcg: make oom_l... |
1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 |
for_each_mem_cgroup_tree(iter, mem) iter->oom_lock = false; return 0; } static void mem_cgroup_mark_under_oom(struct mem_cgroup *mem) { struct mem_cgroup *iter; for_each_mem_cgroup_tree(iter, mem) atomic_inc(&iter->under_oom); } static void mem_cgroup_unmark_under_oom(struct mem_cgroup *mem) { struct mem_cgroup *iter; |
867578cbc memcg: fix oom ki... |
1824 1825 1826 1827 1828 |
/* * When a new child is created while the hierarchy is under oom, * mem_cgroup_oom_lock() may not be called. We have to use * atomic_add_unless() here. */ |
7d74b06f2 memcg: use for_ea... |
1829 |
for_each_mem_cgroup_tree(iter, mem) |
79dfdaccd memcg: make oom_l... |
1830 |
atomic_add_unless(&iter->under_oom, -1, 0); |
0b7f569e4 memcg: fix OOM ki... |
1831 |
} |
1af8efe96 memcg: change mem... |
1832 |
static DEFINE_SPINLOCK(memcg_oom_lock); |
867578cbc memcg: fix oom ki... |
1833 |
static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq); |
dc98df5a1 memcg: oom wakeup... |
1834 1835 1836 1837 1838 1839 1840 1841 |
struct oom_wait_info { struct mem_cgroup *mem; wait_queue_t wait; }; static int memcg_oom_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *arg) { |
3e92041d6 memcg: add mem_cg... |
1842 1843 |
struct mem_cgroup *wake_mem = (struct mem_cgroup *)arg, *oom_wait_mem; |
dc98df5a1 memcg: oom wakeup... |
1844 1845 1846 |
struct oom_wait_info *oom_wait_info; oom_wait_info = container_of(wait, struct oom_wait_info, wait); |
3e92041d6 memcg: add mem_cg... |
1847 |
oom_wait_mem = oom_wait_info->mem; |
dc98df5a1 memcg: oom wakeup... |
1848 |
|
dc98df5a1 memcg: oom wakeup... |
1849 1850 1851 1852 |
/* * Both of oom_wait_info->mem and wake_mem are stable under us. * Then we can use css_is_ancestor without taking care of RCU. */ |
3e92041d6 memcg: add mem_cg... |
1853 1854 |
if (!mem_cgroup_same_or_subtree(oom_wait_mem, wake_mem) && !mem_cgroup_same_or_subtree(wake_mem, oom_wait_mem)) |
dc98df5a1 memcg: oom wakeup... |
1855 |
return 0; |
dc98df5a1 memcg: oom wakeup... |
1856 1857 1858 1859 1860 1861 1862 1863 |
return autoremove_wake_function(wait, mode, sync, arg); } static void memcg_wakeup_oom(struct mem_cgroup *mem) { /* for filtering, pass "mem" as argument. */ __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, mem); } |
3c11ecf44 memcg: oom kill d... |
1864 1865 |
static void memcg_oom_recover(struct mem_cgroup *mem) { |
79dfdaccd memcg: make oom_l... |
1866 |
if (mem && atomic_read(&mem->under_oom)) |
3c11ecf44 memcg: oom kill d... |
1867 1868 |
memcg_wakeup_oom(mem); } |
867578cbc memcg: fix oom ki... |
1869 1870 1871 1872 |
/* * try to call OOM killer. returns false if we should exit memory-reclaim loop. */ bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask) |
0b7f569e4 memcg: fix OOM ki... |
1873 |
{ |
dc98df5a1 memcg: oom wakeup... |
1874 |
struct oom_wait_info owait; |
3c11ecf44 memcg: oom kill d... |
1875 |
bool locked, need_to_kill; |
867578cbc memcg: fix oom ki... |
1876 |
|
dc98df5a1 memcg: oom wakeup... |
1877 1878 1879 1880 1881 |
owait.mem = mem; owait.wait.flags = 0; owait.wait.func = memcg_oom_wake_function; owait.wait.private = current; INIT_LIST_HEAD(&owait.wait.task_list); |
3c11ecf44 memcg: oom kill d... |
1882 |
need_to_kill = true; |
79dfdaccd memcg: make oom_l... |
1883 |
mem_cgroup_mark_under_oom(mem); |
867578cbc memcg: fix oom ki... |
1884 |
/* At first, try to OOM lock hierarchy under mem.*/ |
1af8efe96 memcg: change mem... |
1885 |
spin_lock(&memcg_oom_lock); |
867578cbc memcg: fix oom ki... |
1886 1887 1888 1889 1890 1891 |
locked = mem_cgroup_oom_lock(mem); /* * Even if signal_pending(), we can't quit charge() loop without * accounting. So, UNINTERRUPTIBLE is appropriate. But SIGKILL * under OOM is always welcomed, use TASK_KILLABLE here. */ |
3c11ecf44 memcg: oom kill d... |
1892 1893 1894 1895 |
prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE); if (!locked || mem->oom_kill_disable) need_to_kill = false; if (locked) |
9490ff275 memcg: oom notifier |
1896 |
mem_cgroup_oom_notify(mem); |
1af8efe96 memcg: change mem... |
1897 |
spin_unlock(&memcg_oom_lock); |
867578cbc memcg: fix oom ki... |
1898 |
|
3c11ecf44 memcg: oom kill d... |
1899 1900 |
if (need_to_kill) { finish_wait(&memcg_oom_waitq, &owait.wait); |
867578cbc memcg: fix oom ki... |
1901 |
mem_cgroup_out_of_memory(mem, mask); |
3c11ecf44 memcg: oom kill d... |
1902 |
} else { |
867578cbc memcg: fix oom ki... |
1903 |
schedule(); |
dc98df5a1 memcg: oom wakeup... |
1904 |
finish_wait(&memcg_oom_waitq, &owait.wait); |
867578cbc memcg: fix oom ki... |
1905 |
} |
1af8efe96 memcg: change mem... |
1906 |
spin_lock(&memcg_oom_lock); |
79dfdaccd memcg: make oom_l... |
1907 1908 |
if (locked) mem_cgroup_oom_unlock(mem); |
dc98df5a1 memcg: oom wakeup... |
1909 |
memcg_wakeup_oom(mem); |
1af8efe96 memcg: change mem... |
1910 |
spin_unlock(&memcg_oom_lock); |
867578cbc memcg: fix oom ki... |
1911 |
|
79dfdaccd memcg: make oom_l... |
1912 |
mem_cgroup_unmark_under_oom(mem); |
867578cbc memcg: fix oom ki... |
1913 1914 1915 1916 1917 |
if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current)) return false; /* Give chance to dying process */ schedule_timeout(1); return true; |
0b7f569e4 memcg: fix OOM ki... |
1918 |
} |
d69b042f3 memcg: add file-b... |
1919 1920 1921 |
/* * Currently used to update mapped file statistics, but the routine can be * generalized to update other statistics as well. |
32047e2a8 memcg: avoid lock... |
1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 |
* * Notes: Race condition * * We usually use page_cgroup_lock() for accessing page_cgroup member but * it tends to be costly. But considering some conditions, we doesn't need * to do so _always_. * * Considering "charge", lock_page_cgroup() is not required because all * file-stat operations happen after a page is attached to radix-tree. There * are no race with "charge". * * Considering "uncharge", we know that memcg doesn't clear pc->mem_cgroup * at "uncharge" intentionally. So, we always see valid pc->mem_cgroup even * if there are race with "uncharge". Statistics itself is properly handled * by flags. * * Considering "move", this is an only case we see a race. To make the race * small, we check MEM_CGROUP_ON_MOVE percpu value and detect there are * possibility of race condition. If there is, we take a lock. |
d69b042f3 memcg: add file-b... |
1941 |
*/ |
26174efd4 memcg: generic fi... |
1942 |
|
2a7106f2c memcg: create ext... |
1943 1944 |
void mem_cgroup_update_page_stat(struct page *page, enum mem_cgroup_page_stat_item idx, int val) |
d69b042f3 memcg: add file-b... |
1945 1946 |
{ struct mem_cgroup *mem; |
32047e2a8 memcg: avoid lock... |
1947 1948 |
struct page_cgroup *pc = lookup_page_cgroup(page); bool need_unlock = false; |
dbd4ea78f memcg: add lock t... |
1949 |
unsigned long uninitialized_var(flags); |
d69b042f3 memcg: add file-b... |
1950 |
|
d69b042f3 memcg: add file-b... |
1951 1952 |
if (unlikely(!pc)) return; |
32047e2a8 memcg: avoid lock... |
1953 |
rcu_read_lock(); |
d69b042f3 memcg: add file-b... |
1954 |
mem = pc->mem_cgroup; |
32047e2a8 memcg: avoid lock... |
1955 1956 1957 |
if (unlikely(!mem || !PageCgroupUsed(pc))) goto out; /* pc->mem_cgroup is unstable ? */ |
ca3e02141 memcg: fix USED b... |
1958 |
if (unlikely(mem_cgroup_stealed(mem)) || PageTransHuge(page)) { |
32047e2a8 memcg: avoid lock... |
1959 |
/* take a lock against to access pc->mem_cgroup */ |
dbd4ea78f memcg: add lock t... |
1960 |
move_lock_page_cgroup(pc, &flags); |
32047e2a8 memcg: avoid lock... |
1961 1962 1963 1964 1965 |
need_unlock = true; mem = pc->mem_cgroup; if (!mem || !PageCgroupUsed(pc)) goto out; } |
26174efd4 memcg: generic fi... |
1966 |
|
26174efd4 memcg: generic fi... |
1967 |
switch (idx) { |
2a7106f2c memcg: create ext... |
1968 |
case MEMCG_NR_FILE_MAPPED: |
26174efd4 memcg: generic fi... |
1969 1970 1971 |
if (val > 0) SetPageCgroupFileMapped(pc); else if (!page_mapped(page)) |
0c270f8f9 memcg: fix race i... |
1972 |
ClearPageCgroupFileMapped(pc); |
2a7106f2c memcg: create ext... |
1973 |
idx = MEM_CGROUP_STAT_FILE_MAPPED; |
26174efd4 memcg: generic fi... |
1974 1975 1976 |
break; default: BUG(); |
8725d5416 memcg: fix race i... |
1977 |
} |
d69b042f3 memcg: add file-b... |
1978 |
|
2a7106f2c memcg: create ext... |
1979 |
this_cpu_add(mem->stat->count[idx], val); |
32047e2a8 memcg: avoid lock... |
1980 1981 |
out: if (unlikely(need_unlock)) |
dbd4ea78f memcg: add lock t... |
1982 |
move_unlock_page_cgroup(pc, &flags); |
32047e2a8 memcg: avoid lock... |
1983 1984 |
rcu_read_unlock(); return; |
d69b042f3 memcg: add file-b... |
1985 |
} |
2a7106f2c memcg: create ext... |
1986 |
EXPORT_SYMBOL(mem_cgroup_update_page_stat); |
26174efd4 memcg: generic fi... |
1987 |
|
f817ed485 memcg: move all a... |
1988 |
/* |
cdec2e426 memcg: coalesce c... |
1989 1990 1991 |
* size of first charge trial. "32" comes from vmscan.c's magic value. * TODO: maybe necessary to use big numbers in big irons. */ |
7ec99d621 memcg: unify char... |
1992 |
#define CHARGE_BATCH 32U |
cdec2e426 memcg: coalesce c... |
1993 1994 |
struct memcg_stock_pcp { struct mem_cgroup *cached; /* this never be root cgroup */ |
11c9ea4e8 memcg: convert pe... |
1995 |
unsigned int nr_pages; |
cdec2e426 memcg: coalesce c... |
1996 |
struct work_struct work; |
26fe61684 memcg: fix percpu... |
1997 1998 |
unsigned long flags; #define FLUSHING_CACHED_CHARGE (0) |
cdec2e426 memcg: coalesce c... |
1999 2000 |
}; static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock); |
cdec2e426 memcg: coalesce c... |
2001 2002 |
/* |
11c9ea4e8 memcg: convert pe... |
2003 |
* Try to consume stocked charge on this cpu. If success, one page is consumed |
cdec2e426 memcg: coalesce c... |
2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 |
* from local stock and true is returned. If the stock is 0 or charges from a * cgroup which is not current target, returns false. This stock will be * refilled. */ static bool consume_stock(struct mem_cgroup *mem) { struct memcg_stock_pcp *stock; bool ret = true; stock = &get_cpu_var(memcg_stock); |
11c9ea4e8 memcg: convert pe... |
2014 2015 |
if (mem == stock->cached && stock->nr_pages) stock->nr_pages--; |
cdec2e426 memcg: coalesce c... |
2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 |
else /* need to call res_counter_charge */ ret = false; put_cpu_var(memcg_stock); return ret; } /* * Returns stocks cached in percpu to res_counter and reset cached information. */ static void drain_stock(struct memcg_stock_pcp *stock) { struct mem_cgroup *old = stock->cached; |
11c9ea4e8 memcg: convert pe... |
2028 2029 2030 2031 |
if (stock->nr_pages) { unsigned long bytes = stock->nr_pages * PAGE_SIZE; res_counter_uncharge(&old->res, bytes); |
cdec2e426 memcg: coalesce c... |
2032 |
if (do_swap_account) |
11c9ea4e8 memcg: convert pe... |
2033 2034 |
res_counter_uncharge(&old->memsw, bytes); stock->nr_pages = 0; |
cdec2e426 memcg: coalesce c... |
2035 2036 |
} stock->cached = NULL; |
cdec2e426 memcg: coalesce c... |
2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 |
} /* * This must be called under preempt disabled or must be called by * a thread which is pinned to local cpu. */ static void drain_local_stock(struct work_struct *dummy) { struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock); drain_stock(stock); |
26fe61684 memcg: fix percpu... |
2047 |
clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); |
cdec2e426 memcg: coalesce c... |
2048 2049 2050 2051 |
} /* * Cache charges(val) which is from res_counter, to local per_cpu area. |
320cc51d9 mm: fix typo in r... |
2052 |
* This will be consumed by consume_stock() function, later. |
cdec2e426 memcg: coalesce c... |
2053 |
*/ |
11c9ea4e8 memcg: convert pe... |
2054 |
static void refill_stock(struct mem_cgroup *mem, unsigned int nr_pages) |
cdec2e426 memcg: coalesce c... |
2055 2056 2057 2058 2059 2060 2061 |
{ struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock); if (stock->cached != mem) { /* reset if necessary */ drain_stock(stock); stock->cached = mem; } |
11c9ea4e8 memcg: convert pe... |
2062 |
stock->nr_pages += nr_pages; |
cdec2e426 memcg: coalesce c... |
2063 2064 2065 2066 |
put_cpu_var(memcg_stock); } /* |
d38144b7a memcg: unify sync... |
2067 2068 2069 |
* Drains all per-CPU charge caches for given root_mem resp. subtree * of the hierarchy under it. sync flag says whether we should block * until the work is done. |
cdec2e426 memcg: coalesce c... |
2070 |
*/ |
d38144b7a memcg: unify sync... |
2071 |
static void drain_all_stock(struct mem_cgroup *root_mem, bool sync) |
cdec2e426 memcg: coalesce c... |
2072 |
{ |
26fe61684 memcg: fix percpu... |
2073 |
int cpu, curcpu; |
d38144b7a memcg: unify sync... |
2074 |
|
cdec2e426 memcg: coalesce c... |
2075 |
/* Notify other cpus that system-wide "drain" is running */ |
cdec2e426 memcg: coalesce c... |
2076 |
get_online_cpus(); |
26fe61684 memcg: fix percpu... |
2077 2078 2079 2080 2081 2082 2083 |
/* * Get a hint for avoiding draining charges on the current cpu, * which must be exhausted by our charging. It is not required that * this be a precise check, so we use raw_smp_processor_id() instead of * getcpu()/putcpu(). */ curcpu = raw_smp_processor_id(); |
cdec2e426 memcg: coalesce c... |
2084 2085 |
for_each_online_cpu(cpu) { struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); |
26fe61684 memcg: fix percpu... |
2086 |
struct mem_cgroup *mem; |
26fe61684 memcg: fix percpu... |
2087 |
mem = stock->cached; |
d1a05b697 memcg: do not try... |
2088 |
if (!mem || !stock->nr_pages) |
26fe61684 memcg: fix percpu... |
2089 |
continue; |
3e92041d6 memcg: add mem_cg... |
2090 2091 |
if (!mem_cgroup_same_or_subtree(root_mem, mem)) continue; |
d1a05b697 memcg: do not try... |
2092 2093 2094 2095 2096 2097 |
if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) { if (cpu == curcpu) drain_local_stock(&stock->work); else schedule_work_on(cpu, &stock->work); } |
cdec2e426 memcg: coalesce c... |
2098 |
} |
d38144b7a memcg: unify sync... |
2099 2100 2101 2102 2103 2104 |
if (!sync) goto out; for_each_online_cpu(cpu) { struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); |
8521fc50d memcg: get rid of... |
2105 2106 |
if (mem_cgroup_same_or_subtree(root_mem, stock->cached) && test_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) |
d38144b7a memcg: unify sync... |
2107 2108 2109 |
flush_work(&stock->work); } out: |
cdec2e426 memcg: coalesce c... |
2110 |
put_online_cpus(); |
d38144b7a memcg: unify sync... |
2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 |
} /* * Tries to drain stocked charges in other cpus. This function is asynchronous * and just put a work per cpu for draining localy on each cpu. Caller can * expects some charges will be back to res_counter later but cannot wait for * it. */ static void drain_all_stock_async(struct mem_cgroup *root_mem) { |
d38144b7a memcg: unify sync... |
2121 |
drain_all_stock(root_mem, false); |
cdec2e426 memcg: coalesce c... |
2122 2123 2124 |
} /* This is a synchronous drain interface. */ |
d38144b7a memcg: unify sync... |
2125 |
static void drain_all_stock_sync(struct mem_cgroup *root_mem) |
cdec2e426 memcg: coalesce c... |
2126 2127 |
{ /* called when force_empty is called */ |
d38144b7a memcg: unify sync... |
2128 |
drain_all_stock(root_mem, true); |
cdec2e426 memcg: coalesce c... |
2129 |
} |
711d3d2c9 memcg: cpu hotplu... |
2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 |
/* * This function drains percpu counter value from DEAD cpu and * move it to local cpu. Note that this function can be preempted. */ static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *mem, int cpu) { int i; spin_lock(&mem->pcp_counter_lock); for (i = 0; i < MEM_CGROUP_STAT_DATA; i++) { |
7a159cc9d memcg: use native... |
2140 |
long x = per_cpu(mem->stat->count[i], cpu); |
711d3d2c9 memcg: cpu hotplu... |
2141 2142 2143 2144 |
per_cpu(mem->stat->count[i], cpu) = 0; mem->nocpu_base.count[i] += x; } |
e9f8974f2 memcg: break out ... |
2145 2146 2147 2148 2149 2150 |
for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) { unsigned long x = per_cpu(mem->stat->events[i], cpu); per_cpu(mem->stat->events[i], cpu) = 0; mem->nocpu_base.events[i] += x; } |
1489ebad8 memcg: cpu hotplu... |
2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 |
/* need to clear ON_MOVE value, works as a kind of lock. */ per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) = 0; spin_unlock(&mem->pcp_counter_lock); } static void synchronize_mem_cgroup_on_move(struct mem_cgroup *mem, int cpu) { int idx = MEM_CGROUP_ON_MOVE; spin_lock(&mem->pcp_counter_lock); per_cpu(mem->stat->count[idx], cpu) = mem->nocpu_base.count[idx]; |
711d3d2c9 memcg: cpu hotplu... |
2162 2163 2164 2165 |
spin_unlock(&mem->pcp_counter_lock); } static int __cpuinit memcg_cpu_hotplug_callback(struct notifier_block *nb, |
cdec2e426 memcg: coalesce c... |
2166 2167 2168 2169 2170 |
unsigned long action, void *hcpu) { int cpu = (unsigned long)hcpu; struct memcg_stock_pcp *stock; |
711d3d2c9 memcg: cpu hotplu... |
2171 |
struct mem_cgroup *iter; |
cdec2e426 memcg: coalesce c... |
2172 |
|
1489ebad8 memcg: cpu hotplu... |
2173 2174 2175 2176 2177 |
if ((action == CPU_ONLINE)) { for_each_mem_cgroup_all(iter) synchronize_mem_cgroup_on_move(iter, cpu); return NOTIFY_OK; } |
711d3d2c9 memcg: cpu hotplu... |
2178 |
if ((action != CPU_DEAD) || action != CPU_DEAD_FROZEN) |
cdec2e426 memcg: coalesce c... |
2179 |
return NOTIFY_OK; |
711d3d2c9 memcg: cpu hotplu... |
2180 2181 2182 |
for_each_mem_cgroup_all(iter) mem_cgroup_drain_pcp_counter(iter, cpu); |
cdec2e426 memcg: coalesce c... |
2183 2184 2185 2186 |
stock = &per_cpu(memcg_stock, cpu); drain_stock(stock); return NOTIFY_OK; } |
4b5343346 memcg: clean up t... |
2187 2188 2189 2190 2191 2192 2193 2194 2195 |
/* See __mem_cgroup_try_charge() for details */ enum { CHARGE_OK, /* success */ CHARGE_RETRY, /* need to retry but retry is not bad */ CHARGE_NOMEM, /* we can't do more. return -ENOMEM */ CHARGE_WOULDBLOCK, /* GFP_WAIT wasn't set and no enough res. */ CHARGE_OOM_DIE, /* the current is killed because of OOM */ }; |
7ec99d621 memcg: unify char... |
2196 2197 |
static int mem_cgroup_do_charge(struct mem_cgroup *mem, gfp_t gfp_mask, unsigned int nr_pages, bool oom_check) |
4b5343346 memcg: clean up t... |
2198 |
{ |
7ec99d621 memcg: unify char... |
2199 |
unsigned long csize = nr_pages * PAGE_SIZE; |
4b5343346 memcg: clean up t... |
2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 |
struct mem_cgroup *mem_over_limit; struct res_counter *fail_res; unsigned long flags = 0; int ret; ret = res_counter_charge(&mem->res, csize, &fail_res); if (likely(!ret)) { if (!do_swap_account) return CHARGE_OK; ret = res_counter_charge(&mem->memsw, csize, &fail_res); if (likely(!ret)) return CHARGE_OK; |
01c88e2d6 memcg: fix accoun... |
2213 |
res_counter_uncharge(&mem->res, csize); |
4b5343346 memcg: clean up t... |
2214 2215 2216 2217 |
mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw); flags |= MEM_CGROUP_RECLAIM_NOSWAP; } else mem_over_limit = mem_cgroup_from_res_counter(fail_res, res); |
9221edb71 memcg: prevent en... |
2218 |
/* |
7ec99d621 memcg: unify char... |
2219 2220 |
* nr_pages can be either a huge page (HPAGE_PMD_NR), a batch * of regular pages (CHARGE_BATCH), or a single regular page (1). |
9221edb71 memcg: prevent en... |
2221 2222 2223 2224 |
* * Never reclaim on behalf of optional batching, retry with a * single page instead. */ |
7ec99d621 memcg: unify char... |
2225 |
if (nr_pages == CHARGE_BATCH) |
4b5343346 memcg: clean up t... |
2226 2227 2228 2229 2230 2231 |
return CHARGE_RETRY; if (!(gfp_mask & __GFP_WAIT)) return CHARGE_WOULDBLOCK; ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, NULL, |
0ae5e89c6 memcg: count the ... |
2232 |
gfp_mask, flags, NULL); |
7ec99d621 memcg: unify char... |
2233 |
if (mem_cgroup_margin(mem_over_limit) >= nr_pages) |
19942822d memcg: prevent en... |
2234 |
return CHARGE_RETRY; |
4b5343346 memcg: clean up t... |
2235 |
/* |
19942822d memcg: prevent en... |
2236 2237 2238 2239 2240 2241 2242 |
* Even though the limit is exceeded at this point, reclaim * may have been able to free some pages. Retry the charge * before killing the task. * * Only for regular pages, though: huge pages are rather * unlikely to succeed so close to the limit, and we fall back * to regular pages anyway in case of failure. |
4b5343346 memcg: clean up t... |
2243 |
*/ |
7ec99d621 memcg: unify char... |
2244 |
if (nr_pages == 1 && ret) |
4b5343346 memcg: clean up t... |
2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 |
return CHARGE_RETRY; /* * At task move, charge accounts can be doubly counted. So, it's * better to wait until the end of task_move if something is going on. */ if (mem_cgroup_wait_acct_move(mem_over_limit)) return CHARGE_RETRY; /* If we don't need to call oom-killer at el, return immediately */ if (!oom_check) return CHARGE_NOMEM; /* check OOM */ if (!mem_cgroup_handle_oom(mem_over_limit, gfp_mask)) return CHARGE_OOM_DIE; return CHARGE_RETRY; } |
cdec2e426 memcg: coalesce c... |
2263 |
/* |
f817ed485 memcg: move all a... |
2264 2265 |
* Unlike exported interface, "oom" parameter is added. if oom==true, * oom-killer can be invoked. |
8a9f3ccd2 Memory controller... |
2266 |
*/ |
f817ed485 memcg: move all a... |
2267 |
static int __mem_cgroup_try_charge(struct mm_struct *mm, |
ec1685109 thp: memcg compound |
2268 |
gfp_t gfp_mask, |
7ec99d621 memcg: unify char... |
2269 2270 2271 |
unsigned int nr_pages, struct mem_cgroup **memcg, bool oom) |
8a9f3ccd2 Memory controller... |
2272 |
{ |
7ec99d621 memcg: unify char... |
2273 |
unsigned int batch = max(CHARGE_BATCH, nr_pages); |
4b5343346 memcg: clean up t... |
2274 2275 2276 |
int nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES; struct mem_cgroup *mem = NULL; int ret; |
a636b327f memcg: avoid unne... |
2277 |
|
867578cbc memcg: fix oom ki... |
2278 2279 2280 2281 2282 2283 2284 2285 |
/* * Unlike gloval-vm's OOM-kill, we're not in memory shortage * in system level. So, allow to go ahead dying process in addition to * MEMDIE process. */ if (unlikely(test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current))) goto bypass; |
a636b327f memcg: avoid unne... |
2286 |
|
8a9f3ccd2 Memory controller... |
2287 |
/* |
3be91277e memcgroup: tidy u... |
2288 2289 |
* We always charge the cgroup the mm_struct belongs to. * The mm_struct's mem_cgroup changes on task migration if the |
8a9f3ccd2 Memory controller... |
2290 2291 2292 |
* thread group leader migrates. It's possible that mm is not * set, if so charge the init_mm (happens for pagecache usage). */ |
f75ca9620 memcg: avoid css_... |
2293 2294 2295 2296 |
if (!*memcg && !mm) goto bypass; again: if (*memcg) { /* css should be a valid one */ |
4b5343346 memcg: clean up t... |
2297 |
mem = *memcg; |
f75ca9620 memcg: avoid css_... |
2298 2299 2300 |
VM_BUG_ON(css_is_removed(&mem->css)); if (mem_cgroup_is_root(mem)) goto done; |
7ec99d621 memcg: unify char... |
2301 |
if (nr_pages == 1 && consume_stock(mem)) |
f75ca9620 memcg: avoid css_... |
2302 |
goto done; |
4b5343346 memcg: clean up t... |
2303 2304 |
css_get(&mem->css); } else { |
f75ca9620 memcg: avoid css_... |
2305 |
struct task_struct *p; |
54595fe26 memcg: use css_tr... |
2306 |
|
f75ca9620 memcg: avoid css_... |
2307 2308 |
rcu_read_lock(); p = rcu_dereference(mm->owner); |
f75ca9620 memcg: avoid css_... |
2309 |
/* |
ebb76ce16 memcg: fix wrong ... |
2310 2311 2312 2313 2314 2315 2316 2317 |
* Because we don't have task_lock(), "p" can exit. * In that case, "mem" can point to root or p can be NULL with * race with swapoff. Then, we have small risk of mis-accouning. * But such kind of mis-account by race always happens because * we don't have cgroup_mutex(). It's overkill and we allo that * small race, here. * (*) swapoff at el will charge against mm-struct not against * task-struct. So, mm->owner can be NULL. |
f75ca9620 memcg: avoid css_... |
2318 2319 |
*/ mem = mem_cgroup_from_task(p); |
ebb76ce16 memcg: fix wrong ... |
2320 |
if (!mem || mem_cgroup_is_root(mem)) { |
f75ca9620 memcg: avoid css_... |
2321 2322 2323 |
rcu_read_unlock(); goto done; } |
7ec99d621 memcg: unify char... |
2324 |
if (nr_pages == 1 && consume_stock(mem)) { |
f75ca9620 memcg: avoid css_... |
2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 |
/* * It seems dagerous to access memcg without css_get(). * But considering how consume_stok works, it's not * necessary. If consume_stock success, some charges * from this memcg are cached on this cpu. So, we * don't need to call css_get()/css_tryget() before * calling consume_stock(). */ rcu_read_unlock(); goto done; } /* after here, we may be blocked. we need to get refcnt */ if (!css_tryget(&mem->css)) { rcu_read_unlock(); goto again; } rcu_read_unlock(); } |
8a9f3ccd2 Memory controller... |
2343 |
|
4b5343346 memcg: clean up t... |
2344 2345 |
do { bool oom_check; |
7a81b88cb memcg: introduce ... |
2346 |
|
4b5343346 memcg: clean up t... |
2347 |
/* If killed, bypass charge */ |
f75ca9620 memcg: avoid css_... |
2348 2349 |
if (fatal_signal_pending(current)) { css_put(&mem->css); |
4b5343346 memcg: clean up t... |
2350 |
goto bypass; |
f75ca9620 memcg: avoid css_... |
2351 |
} |
6d61ef409 memcg: memory cgr... |
2352 |
|
4b5343346 memcg: clean up t... |
2353 2354 2355 2356 |
oom_check = false; if (oom && !nr_oom_retries) { oom_check = true; nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES; |
cdec2e426 memcg: coalesce c... |
2357 |
} |
66e1707bc Memory controller... |
2358 |
|
7ec99d621 memcg: unify char... |
2359 |
ret = mem_cgroup_do_charge(mem, gfp_mask, batch, oom_check); |
4b5343346 memcg: clean up t... |
2360 2361 2362 2363 |
switch (ret) { case CHARGE_OK: break; case CHARGE_RETRY: /* not in OOM situation but retry */ |
7ec99d621 memcg: unify char... |
2364 |
batch = nr_pages; |
f75ca9620 memcg: avoid css_... |
2365 2366 2367 |
css_put(&mem->css); mem = NULL; goto again; |
4b5343346 memcg: clean up t... |
2368 |
case CHARGE_WOULDBLOCK: /* !__GFP_WAIT */ |
f75ca9620 memcg: avoid css_... |
2369 |
css_put(&mem->css); |
4b5343346 memcg: clean up t... |
2370 2371 |
goto nomem; case CHARGE_NOMEM: /* OOM routine works */ |
f75ca9620 memcg: avoid css_... |
2372 2373 |
if (!oom) { css_put(&mem->css); |
867578cbc memcg: fix oom ki... |
2374 |
goto nomem; |
f75ca9620 memcg: avoid css_... |
2375 |
} |
4b5343346 memcg: clean up t... |
2376 2377 2378 2379 |
/* If oom, we never return -ENOMEM */ nr_oom_retries--; break; case CHARGE_OOM_DIE: /* Killed by OOM Killer */ |
f75ca9620 memcg: avoid css_... |
2380 |
css_put(&mem->css); |
867578cbc memcg: fix oom ki... |
2381 |
goto bypass; |
66e1707bc Memory controller... |
2382 |
} |
4b5343346 memcg: clean up t... |
2383 |
} while (ret != CHARGE_OK); |
7ec99d621 memcg: unify char... |
2384 2385 |
if (batch > nr_pages) refill_stock(mem, batch - nr_pages); |
f75ca9620 memcg: avoid css_... |
2386 |
css_put(&mem->css); |
0c3e73e84 memcg: improve re... |
2387 |
done: |
f75ca9620 memcg: avoid css_... |
2388 |
*memcg = mem; |
7a81b88cb memcg: introduce ... |
2389 2390 |
return 0; nomem: |
f75ca9620 memcg: avoid css_... |
2391 |
*memcg = NULL; |
7a81b88cb memcg: introduce ... |
2392 |
return -ENOMEM; |
867578cbc memcg: fix oom ki... |
2393 2394 2395 |
bypass: *memcg = NULL; return 0; |
7a81b88cb memcg: introduce ... |
2396 |
} |
8a9f3ccd2 Memory controller... |
2397 |
|
a3b2d6926 cgroups: use css ... |
2398 |
/* |
a3032a2c1 memcg: add mem_cg... |
2399 2400 2401 2402 |
* Somemtimes we have to undo a charge we got by try_charge(). * This function is for that and do uncharge, put css's refcnt. * gotten by try_charge(). */ |
854ffa8d1 memcg: improve pe... |
2403 |
static void __mem_cgroup_cancel_charge(struct mem_cgroup *mem, |
e7018b8d2 memcg: keep only ... |
2404 |
unsigned int nr_pages) |
a3032a2c1 memcg: add mem_cg... |
2405 2406 |
{ if (!mem_cgroup_is_root(mem)) { |
e7018b8d2 memcg: keep only ... |
2407 2408 2409 |
unsigned long bytes = nr_pages * PAGE_SIZE; res_counter_uncharge(&mem->res, bytes); |
a3032a2c1 memcg: add mem_cg... |
2410 |
if (do_swap_account) |
e7018b8d2 memcg: keep only ... |
2411 |
res_counter_uncharge(&mem->memsw, bytes); |
a3032a2c1 memcg: add mem_cg... |
2412 |
} |
854ffa8d1 memcg: improve pe... |
2413 |
} |
a3032a2c1 memcg: add mem_cg... |
2414 |
/* |
a3b2d6926 cgroups: use css ... |
2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 |
* A helper function to get mem_cgroup from ID. must be called under * rcu_read_lock(). The caller must check css_is_removed() or some if * it's concern. (dropping refcnt from swap can be called against removed * memcg.) */ static struct mem_cgroup *mem_cgroup_lookup(unsigned short id) { struct cgroup_subsys_state *css; /* ID 0 is unused ID */ if (!id) return NULL; css = css_lookup(&mem_cgroup_subsys, id); if (!css) return NULL; return container_of(css, struct mem_cgroup, css); } |
e42d9d5d4 memcg: rename and... |
2432 |
struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page) |
b5a84319a memcg: fix shmem'... |
2433 |
{ |
e42d9d5d4 memcg: rename and... |
2434 |
struct mem_cgroup *mem = NULL; |
3c776e646 memcg: charge swa... |
2435 |
struct page_cgroup *pc; |
a3b2d6926 cgroups: use css ... |
2436 |
unsigned short id; |
b5a84319a memcg: fix shmem'... |
2437 |
swp_entry_t ent; |
3c776e646 memcg: charge swa... |
2438 |
VM_BUG_ON(!PageLocked(page)); |
3c776e646 memcg: charge swa... |
2439 |
pc = lookup_page_cgroup(page); |
c0bd3f63c memcg: fix try_ge... |
2440 |
lock_page_cgroup(pc); |
a3b2d6926 cgroups: use css ... |
2441 |
if (PageCgroupUsed(pc)) { |
3c776e646 memcg: charge swa... |
2442 |
mem = pc->mem_cgroup; |
a3b2d6926 cgroups: use css ... |
2443 2444 |
if (mem && !css_tryget(&mem->css)) mem = NULL; |
e42d9d5d4 memcg: rename and... |
2445 |
} else if (PageSwapCache(page)) { |
3c776e646 memcg: charge swa... |
2446 |
ent.val = page_private(page); |
a3b2d6926 cgroups: use css ... |
2447 2448 2449 2450 2451 2452 |
id = lookup_swap_cgroup(ent); rcu_read_lock(); mem = mem_cgroup_lookup(id); if (mem && !css_tryget(&mem->css)) mem = NULL; rcu_read_unlock(); |
3c776e646 memcg: charge swa... |
2453 |
} |
c0bd3f63c memcg: fix try_ge... |
2454 |
unlock_page_cgroup(pc); |
b5a84319a memcg: fix shmem'... |
2455 2456 |
return mem; } |
ca3e02141 memcg: fix USED b... |
2457 |
static void __mem_cgroup_commit_charge(struct mem_cgroup *mem, |
5564e88ba memcg: condense p... |
2458 |
struct page *page, |
7ec99d621 memcg: unify char... |
2459 |
unsigned int nr_pages, |
ca3e02141 memcg: fix USED b... |
2460 |
struct page_cgroup *pc, |
7ec99d621 memcg: unify char... |
2461 |
enum charge_type ctype) |
7a81b88cb memcg: introduce ... |
2462 |
{ |
ca3e02141 memcg: fix USED b... |
2463 2464 2465 |
lock_page_cgroup(pc); if (unlikely(PageCgroupUsed(pc))) { unlock_page_cgroup(pc); |
e7018b8d2 memcg: keep only ... |
2466 |
__mem_cgroup_cancel_charge(mem, nr_pages); |
ca3e02141 memcg: fix USED b... |
2467 2468 2469 2470 2471 2472 |
return; } /* * we don't need page_cgroup_lock about tail pages, becase they are not * accessed by any other context at this point. */ |
8a9f3ccd2 Memory controller... |
2473 |
pc->mem_cgroup = mem; |
261fb61a8 memcg: add commen... |
2474 2475 2476 2477 2478 2479 2480 |
/* * We access a page_cgroup asynchronously without lock_page_cgroup(). * Especially when a page_cgroup is taken from a page, pc->mem_cgroup * is accessed after testing USED bit. To make pc->mem_cgroup visible * before USED bit, we need memory barrier here. * See mem_cgroup_add_lru_list(), etc. */ |
08e552c69 memcg: synchroniz... |
2481 |
smp_wmb(); |
4b3bde4c9 memcg: remove the... |
2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 |
switch (ctype) { case MEM_CGROUP_CHARGE_TYPE_CACHE: case MEM_CGROUP_CHARGE_TYPE_SHMEM: SetPageCgroupCache(pc); SetPageCgroupUsed(pc); break; case MEM_CGROUP_CHARGE_TYPE_MAPPED: ClearPageCgroupCache(pc); SetPageCgroupUsed(pc); break; default: break; } |
3be91277e memcgroup: tidy u... |
2495 |
|
ca3e02141 memcg: fix USED b... |
2496 |
mem_cgroup_charge_statistics(mem, PageCgroupCache(pc), nr_pages); |
52d4b9ac0 memcg: allocate a... |
2497 |
unlock_page_cgroup(pc); |
430e48631 memcg: update thr... |
2498 2499 2500 2501 2502 |
/* * "charge_statistics" updated event counter. Then, check it. * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree. * if they exceeds softlimit. */ |
5564e88ba memcg: condense p... |
2503 |
memcg_check_events(mem, page); |
7a81b88cb memcg: introduce ... |
2504 |
} |
66e1707bc Memory controller... |
2505 |
|
ca3e02141 memcg: fix USED b... |
2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 |
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #define PCGF_NOCOPY_AT_SPLIT ((1 << PCG_LOCK) | (1 << PCG_MOVE_LOCK) |\ (1 << PCG_ACCT_LRU) | (1 << PCG_MIGRATION)) /* * Because tail pages are not marked as "used", set it. We're under * zone->lru_lock, 'splitting on pmd' and compund_lock. */ void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail) { struct page_cgroup *head_pc = lookup_page_cgroup(head); struct page_cgroup *tail_pc = lookup_page_cgroup(tail); unsigned long flags; |
3d37c4a91 memcg: bugfix che... |
2519 2520 |
if (mem_cgroup_disabled()) return; |
ca3e02141 memcg: fix USED b... |
2521 |
/* |
ece35ca81 memcg: fix LRU ac... |
2522 |
* We have no races with charge/uncharge but will have races with |
ca3e02141 memcg: fix USED b... |
2523 2524 2525 2526 2527 2528 |
* page state accounting. */ move_lock_page_cgroup(head_pc, &flags); tail_pc->mem_cgroup = head_pc->mem_cgroup; smp_wmb(); /* see __commit_charge() */ |
ece35ca81 memcg: fix LRU ac... |
2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 |
if (PageCgroupAcctLRU(head_pc)) { enum lru_list lru; struct mem_cgroup_per_zone *mz; /* * LRU flags cannot be copied because we need to add tail *.page to LRU by generic call and our hook will be called. * We hold lru_lock, then, reduce counter directly. */ lru = page_lru(head); |
97a6c37b3 memcg: change pag... |
2539 |
mz = page_cgroup_zoneinfo(head_pc->mem_cgroup, head); |
ece35ca81 memcg: fix LRU ac... |
2540 2541 |
MEM_CGROUP_ZSTAT(mz, lru) -= 1; } |
ca3e02141 memcg: fix USED b... |
2542 2543 2544 2545 |
tail_pc->flags = head_pc->flags & ~PCGF_NOCOPY_AT_SPLIT; move_unlock_page_cgroup(head_pc, &flags); } #endif |
f817ed485 memcg: move all a... |
2546 |
/** |
de3638d9c memcg: fold __mem... |
2547 |
* mem_cgroup_move_account - move account of the page |
5564e88ba memcg: condense p... |
2548 |
* @page: the page |
7ec99d621 memcg: unify char... |
2549 |
* @nr_pages: number of regular pages (>1 for huge pages) |
f817ed485 memcg: move all a... |
2550 2551 2552 |
* @pc: page_cgroup of the page. * @from: mem_cgroup which the page is moved from. * @to: mem_cgroup which the page is moved to. @from != @to. |
854ffa8d1 memcg: improve pe... |
2553 |
* @uncharge: whether we should call uncharge and css_put against @from. |
f817ed485 memcg: move all a... |
2554 2555 |
* * The caller must confirm following. |
08e552c69 memcg: synchroniz... |
2556 |
* - page is not on LRU (isolate_page() is useful.) |
7ec99d621 memcg: unify char... |
2557 |
* - compound_lock is held when nr_pages > 1 |
f817ed485 memcg: move all a... |
2558 |
* |
854ffa8d1 memcg: improve pe... |
2559 |
* This function doesn't do "charge" nor css_get to new cgroup. It should be |
25985edce Fix common misspe... |
2560 |
* done by a caller(__mem_cgroup_try_charge would be useful). If @uncharge is |
854ffa8d1 memcg: improve pe... |
2561 2562 |
* true, this function does "uncharge" from old cgroup, but it doesn't if * @uncharge is false, so a caller should do "uncharge". |
f817ed485 memcg: move all a... |
2563 |
*/ |
7ec99d621 memcg: unify char... |
2564 2565 2566 2567 2568 2569 |
static int mem_cgroup_move_account(struct page *page, unsigned int nr_pages, struct page_cgroup *pc, struct mem_cgroup *from, struct mem_cgroup *to, bool uncharge) |
f817ed485 memcg: move all a... |
2570 |
{ |
de3638d9c memcg: fold __mem... |
2571 2572 |
unsigned long flags; int ret; |
987eba66e memcg: fix rmdir,... |
2573 |
|
f817ed485 memcg: move all a... |
2574 |
VM_BUG_ON(from == to); |
5564e88ba memcg: condense p... |
2575 |
VM_BUG_ON(PageLRU(page)); |
de3638d9c memcg: fold __mem... |
2576 2577 2578 2579 2580 2581 2582 |
/* * The page is isolated from LRU. So, collapse function * will not handle this page. But page splitting can happen. * Do this check under compound_page_lock(). The caller should * hold it. */ ret = -EBUSY; |
7ec99d621 memcg: unify char... |
2583 |
if (nr_pages > 1 && !PageTransHuge(page)) |
de3638d9c memcg: fold __mem... |
2584 2585 2586 2587 2588 2589 2590 2591 2592 |
goto out; lock_page_cgroup(pc); ret = -EINVAL; if (!PageCgroupUsed(pc) || pc->mem_cgroup != from) goto unlock; move_lock_page_cgroup(pc, &flags); |
f817ed485 memcg: move all a... |
2593 |
|
8725d5416 memcg: fix race i... |
2594 |
if (PageCgroupFileMapped(pc)) { |
c62b1a3b3 memcg: use generi... |
2595 2596 2597 2598 2599 |
/* Update mapped_file data for mem_cgroup */ preempt_disable(); __this_cpu_dec(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]); __this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]); preempt_enable(); |
d69b042f3 memcg: add file-b... |
2600 |
} |
987eba66e memcg: fix rmdir,... |
2601 |
mem_cgroup_charge_statistics(from, PageCgroupCache(pc), -nr_pages); |
854ffa8d1 memcg: improve pe... |
2602 2603 |
if (uncharge) /* This is not "cancel", but cancel_charge does all we need. */ |
e7018b8d2 memcg: keep only ... |
2604 |
__mem_cgroup_cancel_charge(from, nr_pages); |
d69b042f3 memcg: add file-b... |
2605 |
|
854ffa8d1 memcg: improve pe... |
2606 |
/* caller should have done css_get */ |
08e552c69 memcg: synchroniz... |
2607 |
pc->mem_cgroup = to; |
987eba66e memcg: fix rmdir,... |
2608 |
mem_cgroup_charge_statistics(to, PageCgroupCache(pc), nr_pages); |
887032670 cgroup avoid perm... |
2609 2610 2611 |
/* * We charges against "to" which may not have any tasks. Then, "to" * can be under rmdir(). But in current implementation, caller of |
4ffef5fef memcg: move charg... |
2612 |
* this function is just force_empty() and move charge, so it's |
25985edce Fix common misspe... |
2613 |
* guaranteed that "to" is never removed. So, we don't check rmdir |
4ffef5fef memcg: move charg... |
2614 |
* status here. |
887032670 cgroup avoid perm... |
2615 |
*/ |
de3638d9c memcg: fold __mem... |
2616 2617 2618 |
move_unlock_page_cgroup(pc, &flags); ret = 0; unlock: |
57f9fd7d2 memcg: cleanup me... |
2619 |
unlock_page_cgroup(pc); |
d2265e6fa memcg : share eve... |
2620 2621 2622 |
/* * check events */ |
5564e88ba memcg: condense p... |
2623 2624 |
memcg_check_events(to, page); memcg_check_events(from, page); |
de3638d9c memcg: fold __mem... |
2625 |
out: |
f817ed485 memcg: move all a... |
2626 2627 2628 2629 2630 2631 |
return ret; } /* * move charges to its parent. */ |
5564e88ba memcg: condense p... |
2632 2633 |
static int mem_cgroup_move_parent(struct page *page, struct page_cgroup *pc, |
f817ed485 memcg: move all a... |
2634 2635 2636 2637 2638 2639 |
struct mem_cgroup *child, gfp_t gfp_mask) { struct cgroup *cg = child->css.cgroup; struct cgroup *pcg = cg->parent; struct mem_cgroup *parent; |
7ec99d621 memcg: unify char... |
2640 |
unsigned int nr_pages; |
4be4489fe mm/memcontrol.c: ... |
2641 |
unsigned long uninitialized_var(flags); |
f817ed485 memcg: move all a... |
2642 2643 2644 2645 2646 |
int ret; /* Is ROOT ? */ if (!pcg) return -EINVAL; |
57f9fd7d2 memcg: cleanup me... |
2647 2648 2649 2650 2651 |
ret = -EBUSY; if (!get_page_unless_zero(page)) goto out; if (isolate_lru_page(page)) goto put; |
52dbb9050 memcg: fix race a... |
2652 |
|
7ec99d621 memcg: unify char... |
2653 |
nr_pages = hpage_nr_pages(page); |
08e552c69 memcg: synchroniz... |
2654 |
|
f817ed485 memcg: move all a... |
2655 |
parent = mem_cgroup_from_cont(pcg); |
7ec99d621 memcg: unify char... |
2656 |
ret = __mem_cgroup_try_charge(NULL, gfp_mask, nr_pages, &parent, false); |
a636b327f memcg: avoid unne... |
2657 |
if (ret || !parent) |
57f9fd7d2 memcg: cleanup me... |
2658 |
goto put_back; |
f817ed485 memcg: move all a... |
2659 |
|
7ec99d621 memcg: unify char... |
2660 |
if (nr_pages > 1) |
987eba66e memcg: fix rmdir,... |
2661 |
flags = compound_lock_irqsave(page); |
7ec99d621 memcg: unify char... |
2662 |
ret = mem_cgroup_move_account(page, nr_pages, pc, child, parent, true); |
854ffa8d1 memcg: improve pe... |
2663 |
if (ret) |
7ec99d621 memcg: unify char... |
2664 |
__mem_cgroup_cancel_charge(parent, nr_pages); |
8dba474f0 mm/memcontrol.c: ... |
2665 |
|
7ec99d621 memcg: unify char... |
2666 |
if (nr_pages > 1) |
987eba66e memcg: fix rmdir,... |
2667 |
compound_unlock_irqrestore(page, flags); |
8dba474f0 mm/memcontrol.c: ... |
2668 |
put_back: |
08e552c69 memcg: synchroniz... |
2669 |
putback_lru_page(page); |
57f9fd7d2 memcg: cleanup me... |
2670 |
put: |
40d58138f memcg: fix error ... |
2671 |
put_page(page); |
57f9fd7d2 memcg: cleanup me... |
2672 |
out: |
f817ed485 memcg: move all a... |
2673 2674 |
return ret; } |
7a81b88cb memcg: introduce ... |
2675 2676 2677 2678 2679 2680 2681 |
/* * Charge the memory controller for page usage. * Return * 0 if the charge was successful * < 0 if the cgroup is over its limit */ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, |
73045c47b memcg: remove mem... |
2682 |
gfp_t gfp_mask, enum charge_type ctype) |
7a81b88cb memcg: introduce ... |
2683 |
{ |
73045c47b memcg: remove mem... |
2684 |
struct mem_cgroup *mem = NULL; |
7ec99d621 memcg: unify char... |
2685 |
unsigned int nr_pages = 1; |
7a81b88cb memcg: introduce ... |
2686 |
struct page_cgroup *pc; |
8493ae439 memcg: never OOM ... |
2687 |
bool oom = true; |
7a81b88cb memcg: introduce ... |
2688 |
int ret; |
ec1685109 thp: memcg compound |
2689 |
|
37c2ac787 thp: compound_tra... |
2690 |
if (PageTransHuge(page)) { |
7ec99d621 memcg: unify char... |
2691 |
nr_pages <<= compound_order(page); |
37c2ac787 thp: compound_tra... |
2692 |
VM_BUG_ON(!PageTransHuge(page)); |
8493ae439 memcg: never OOM ... |
2693 2694 2695 2696 2697 |
/* * Never OOM-kill a process for a huge page. The * fault handler will fall back to regular pages. */ oom = false; |
37c2ac787 thp: compound_tra... |
2698 |
} |
7a81b88cb memcg: introduce ... |
2699 2700 |
pc = lookup_page_cgroup(page); |
af4a66214 memcg: remove NUL... |
2701 |
BUG_ON(!pc); /* XXX: remove this and move pc lookup into commit */ |
7a81b88cb memcg: introduce ... |
2702 |
|
7ec99d621 memcg: unify char... |
2703 |
ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &mem, oom); |
a636b327f memcg: avoid unne... |
2704 |
if (ret || !mem) |
7a81b88cb memcg: introduce ... |
2705 |
return ret; |
7ec99d621 memcg: unify char... |
2706 |
__mem_cgroup_commit_charge(mem, page, nr_pages, pc, ctype); |
8a9f3ccd2 Memory controller... |
2707 |
return 0; |
8a9f3ccd2 Memory controller... |
2708 |
} |
7a81b88cb memcg: introduce ... |
2709 2710 |
int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) |
217bc3194 memory cgroup enh... |
2711 |
{ |
f8d665422 memcg: add mem_cg... |
2712 |
if (mem_cgroup_disabled()) |
cede86acd memcg: clean up c... |
2713 |
return 0; |
69029cd55 memcg: remove ref... |
2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 |
/* * If already mapped, we don't have to account. * If page cache, page->mapping has address_space. * But page->mapping may have out-of-use anon_vma pointer, * detecit it by PageAnon() check. newly-mapped-anon's page->mapping * is NULL. */ if (page_mapped(page) || (page->mapping && !PageAnon(page))) return 0; if (unlikely(!mm)) mm = &init_mm; |
217bc3194 memory cgroup enh... |
2725 |
return mem_cgroup_charge_common(page, mm, gfp_mask, |
73045c47b memcg: remove mem... |
2726 |
MEM_CGROUP_CHARGE_TYPE_MAPPED); |
217bc3194 memory cgroup enh... |
2727 |
} |
83aae4c73 memcg: cleanup ca... |
2728 2729 2730 |
static void __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr, enum charge_type ctype); |
5a6475a4e memcg: fix leak o... |
2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 |
static void __mem_cgroup_commit_charge_lrucare(struct page *page, struct mem_cgroup *mem, enum charge_type ctype) { struct page_cgroup *pc = lookup_page_cgroup(page); /* * In some case, SwapCache, FUSE(splice_buf->radixtree), the page * is already on LRU. It means the page may on some other page_cgroup's * LRU. Take care of it. */ mem_cgroup_lru_del_before_commit(page); __mem_cgroup_commit_charge(mem, page, 1, pc, ctype); mem_cgroup_lru_add_after_commit(page); return; } |
e1a1cd590 Memory controller... |
2746 2747 |
int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) |
8697d3319 Memory controller... |
2748 |
{ |
5a6475a4e memcg: fix leak o... |
2749 |
struct mem_cgroup *mem = NULL; |
b5a84319a memcg: fix shmem'... |
2750 |
int ret; |
f8d665422 memcg: add mem_cg... |
2751 |
if (mem_cgroup_disabled()) |
cede86acd memcg: clean up c... |
2752 |
return 0; |
52d4b9ac0 memcg: allocate a... |
2753 2754 |
if (PageCompound(page)) return 0; |
accf163e6 memcg: remove a r... |
2755 |
|
73045c47b memcg: remove mem... |
2756 |
if (unlikely(!mm)) |
8697d3319 Memory controller... |
2757 |
mm = &init_mm; |
accf163e6 memcg: remove a r... |
2758 |
|
5a6475a4e memcg: fix leak o... |
2759 2760 2761 2762 |
if (page_is_file_cache(page)) { ret = __mem_cgroup_try_charge(mm, gfp_mask, 1, &mem, true); if (ret || !mem) return ret; |
b5a84319a memcg: fix shmem'... |
2763 |
|
5a6475a4e memcg: fix leak o... |
2764 2765 2766 2767 2768 2769 2770 2771 2772 |
/* * FUSE reuses pages without going through the final * put that would remove them from the LRU list, make * sure that they get relinked properly. */ __mem_cgroup_commit_charge_lrucare(page, mem, MEM_CGROUP_CHARGE_TYPE_CACHE); return ret; } |
83aae4c73 memcg: cleanup ca... |
2773 2774 2775 2776 2777 2778 2779 2780 |
/* shmem */ if (PageSwapCache(page)) { ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem); if (!ret) __mem_cgroup_commit_charge_swapin(page, mem, MEM_CGROUP_CHARGE_TYPE_SHMEM); } else ret = mem_cgroup_charge_common(page, mm, gfp_mask, |
73045c47b memcg: remove mem... |
2781 |
MEM_CGROUP_CHARGE_TYPE_SHMEM); |
b5a84319a memcg: fix shmem'... |
2782 |
|
b5a84319a memcg: fix shmem'... |
2783 |
return ret; |
e8589cc18 memcg: better mig... |
2784 |
} |
54595fe26 memcg: use css_tr... |
2785 2786 2787 |
/* * While swap-in, try_charge -> commit or cancel, the page is locked. * And when try_charge() successfully returns, one refcnt to memcg without |
21ae2956c tree-wide: fix ty... |
2788 |
* struct page_cgroup is acquired. This refcnt will be consumed by |
54595fe26 memcg: use css_tr... |
2789 2790 |
* "commit()" or removed by "cancel()" */ |
8c7c6e34a memcg: mem+swap c... |
2791 2792 2793 2794 2795 |
int mem_cgroup_try_charge_swapin(struct mm_struct *mm, struct page *page, gfp_t mask, struct mem_cgroup **ptr) { struct mem_cgroup *mem; |
54595fe26 memcg: use css_tr... |
2796 |
int ret; |
8c7c6e34a memcg: mem+swap c... |
2797 |
|
56039efa1 memcg: fix ugly i... |
2798 |
*ptr = NULL; |
f8d665422 memcg: add mem_cg... |
2799 |
if (mem_cgroup_disabled()) |
8c7c6e34a memcg: mem+swap c... |
2800 2801 2802 2803 |
return 0; if (!do_swap_account) goto charge_cur_mm; |
8c7c6e34a memcg: mem+swap c... |
2804 2805 |
/* * A racing thread's fault, or swapoff, may have already updated |
407f9c8b0 ksm: mem cgroup c... |
2806 2807 2808 |
* the pte, and even removed page from swap cache: in those cases * do_swap_page()'s pte_same() test will fail; but there's also a * KSM case which does need to charge the page. |
8c7c6e34a memcg: mem+swap c... |
2809 2810 |
*/ if (!PageSwapCache(page)) |
407f9c8b0 ksm: mem cgroup c... |
2811 |
goto charge_cur_mm; |
e42d9d5d4 memcg: rename and... |
2812 |
mem = try_get_mem_cgroup_from_page(page); |
54595fe26 memcg: use css_tr... |
2813 2814 |
if (!mem) goto charge_cur_mm; |
8c7c6e34a memcg: mem+swap c... |
2815 |
*ptr = mem; |
7ec99d621 memcg: unify char... |
2816 |
ret = __mem_cgroup_try_charge(NULL, mask, 1, ptr, true); |
54595fe26 memcg: use css_tr... |
2817 2818 |
css_put(&mem->css); return ret; |
8c7c6e34a memcg: mem+swap c... |
2819 2820 2821 |
charge_cur_mm: if (unlikely(!mm)) mm = &init_mm; |
7ec99d621 memcg: unify char... |
2822 |
return __mem_cgroup_try_charge(mm, mask, 1, ptr, true); |
8c7c6e34a memcg: mem+swap c... |
2823 |
} |
83aae4c73 memcg: cleanup ca... |
2824 2825 2826 |
static void __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr, enum charge_type ctype) |
7a81b88cb memcg: introduce ... |
2827 |
{ |
f8d665422 memcg: add mem_cg... |
2828 |
if (mem_cgroup_disabled()) |
7a81b88cb memcg: introduce ... |
2829 2830 2831 |
return; if (!ptr) return; |
887032670 cgroup avoid perm... |
2832 |
cgroup_exclude_rmdir(&ptr->css); |
5a6475a4e memcg: fix leak o... |
2833 2834 |
__mem_cgroup_commit_charge_lrucare(page, ptr, ctype); |
8c7c6e34a memcg: mem+swap c... |
2835 2836 2837 |
/* * Now swap is on-memory. This means this page may be * counted both as mem and swap....double count. |
03f3c4336 memcg: fix swap a... |
2838 2839 2840 |
* Fix it by uncharging from memsw. Basically, this SwapCache is stable * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page() * may call delete_from_swap_cache() before reach here. |
8c7c6e34a memcg: mem+swap c... |
2841 |
*/ |
03f3c4336 memcg: fix swap a... |
2842 |
if (do_swap_account && PageSwapCache(page)) { |
8c7c6e34a memcg: mem+swap c... |
2843 |
swp_entry_t ent = {.val = page_private(page)}; |
a3b2d6926 cgroups: use css ... |
2844 |
unsigned short id; |
8c7c6e34a memcg: mem+swap c... |
2845 |
struct mem_cgroup *memcg; |
a3b2d6926 cgroups: use css ... |
2846 2847 2848 2849 |
id = swap_cgroup_record(ent, 0); rcu_read_lock(); memcg = mem_cgroup_lookup(id); |
8c7c6e34a memcg: mem+swap c... |
2850 |
if (memcg) { |
a3b2d6926 cgroups: use css ... |
2851 2852 2853 2854 |
/* * This recorded memcg can be obsolete one. So, avoid * calling css_tryget */ |
0c3e73e84 memcg: improve re... |
2855 |
if (!mem_cgroup_is_root(memcg)) |
4e649152c memcg: some modif... |
2856 |
res_counter_uncharge(&memcg->memsw, PAGE_SIZE); |
0c3e73e84 memcg: improve re... |
2857 |
mem_cgroup_swap_statistics(memcg, false); |
8c7c6e34a memcg: mem+swap c... |
2858 2859 |
mem_cgroup_put(memcg); } |
a3b2d6926 cgroups: use css ... |
2860 |
rcu_read_unlock(); |
8c7c6e34a memcg: mem+swap c... |
2861 |
} |
887032670 cgroup avoid perm... |
2862 2863 2864 2865 2866 2867 |
/* * At swapin, we may charge account against cgroup which has no tasks. * So, rmdir()->pre_destroy() can be called while we do this charge. * In that case, we need to call pre_destroy() again. check it here. */ cgroup_release_and_wakeup_rmdir(&ptr->css); |
7a81b88cb memcg: introduce ... |
2868 |
} |
83aae4c73 memcg: cleanup ca... |
2869 2870 2871 2872 2873 |
void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr) { __mem_cgroup_commit_charge_swapin(page, ptr, MEM_CGROUP_CHARGE_TYPE_MAPPED); } |
7a81b88cb memcg: introduce ... |
2874 2875 |
void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem) { |
f8d665422 memcg: add mem_cg... |
2876 |
if (mem_cgroup_disabled()) |
7a81b88cb memcg: introduce ... |
2877 2878 2879 |
return; if (!mem) return; |
e7018b8d2 memcg: keep only ... |
2880 |
__mem_cgroup_cancel_charge(mem, 1); |
7a81b88cb memcg: introduce ... |
2881 |
} |
7ec99d621 memcg: unify char... |
2882 2883 2884 |
static void mem_cgroup_do_uncharge(struct mem_cgroup *mem, unsigned int nr_pages, const enum charge_type ctype) |
569b846df memcg: coalesce u... |
2885 2886 2887 |
{ struct memcg_batch_info *batch = NULL; bool uncharge_memsw = true; |
7ec99d621 memcg: unify char... |
2888 |
|
569b846df memcg: coalesce u... |
2889 2890 2891 |
/* If swapout, usage of swap doesn't decrease */ if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) uncharge_memsw = false; |
569b846df memcg: coalesce u... |
2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 |
batch = ¤t->memcg_batch; /* * In usual, we do css_get() when we remember memcg pointer. * But in this case, we keep res->usage until end of a series of * uncharges. Then, it's ok to ignore memcg's refcnt. */ if (!batch->memcg) batch->memcg = mem; /* |
3c11ecf44 memcg: oom kill d... |
2902 |
* do_batch > 0 when unmapping pages or inode invalidate/truncate. |
25985edce Fix common misspe... |
2903 |
* In those cases, all pages freed continuously can be expected to be in |
3c11ecf44 memcg: oom kill d... |
2904 2905 2906 2907 2908 2909 2910 |
* the same cgroup and we have chance to coalesce uncharges. * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE) * because we want to do uncharge as soon as possible. */ if (!batch->do_batch || test_thread_flag(TIF_MEMDIE)) goto direct_uncharge; |
7ec99d621 memcg: unify char... |
2911 |
if (nr_pages > 1) |
ec1685109 thp: memcg compound |
2912 |
goto direct_uncharge; |
3c11ecf44 memcg: oom kill d... |
2913 |
/* |
569b846df memcg: coalesce u... |
2914 2915 2916 2917 2918 2919 2920 |
* In typical case, batch->memcg == mem. This means we can * merge a series of uncharges to an uncharge of res_counter. * If not, we uncharge res_counter ony by one. */ if (batch->memcg != mem) goto direct_uncharge; /* remember freed charge and uncharge it later */ |
7ffd4ca7a memcg: convert un... |
2921 |
batch->nr_pages++; |
569b846df memcg: coalesce u... |
2922 |
if (uncharge_memsw) |
7ffd4ca7a memcg: convert un... |
2923 |
batch->memsw_nr_pages++; |
569b846df memcg: coalesce u... |
2924 2925 |
return; direct_uncharge: |
7ec99d621 memcg: unify char... |
2926 |
res_counter_uncharge(&mem->res, nr_pages * PAGE_SIZE); |
569b846df memcg: coalesce u... |
2927 |
if (uncharge_memsw) |
7ec99d621 memcg: unify char... |
2928 |
res_counter_uncharge(&mem->memsw, nr_pages * PAGE_SIZE); |
3c11ecf44 memcg: oom kill d... |
2929 2930 |
if (unlikely(batch->memcg != mem)) memcg_oom_recover(mem); |
569b846df memcg: coalesce u... |
2931 2932 |
return; } |
7a81b88cb memcg: introduce ... |
2933 |
|
8697d3319 Memory controller... |
2934 |
/* |
69029cd55 memcg: remove ref... |
2935 |
* uncharge if !page_mapped(page) |
8a9f3ccd2 Memory controller... |
2936 |
*/ |
8c7c6e34a memcg: mem+swap c... |
2937 |
static struct mem_cgroup * |
69029cd55 memcg: remove ref... |
2938 |
__mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype) |
8a9f3ccd2 Memory controller... |
2939 |
{ |
8c7c6e34a memcg: mem+swap c... |
2940 |
struct mem_cgroup *mem = NULL; |
7ec99d621 memcg: unify char... |
2941 2942 |
unsigned int nr_pages = 1; struct page_cgroup *pc; |
8a9f3ccd2 Memory controller... |
2943 |
|
f8d665422 memcg: add mem_cg... |
2944 |
if (mem_cgroup_disabled()) |
8c7c6e34a memcg: mem+swap c... |
2945 |
return NULL; |
4077960e2 memory controller... |
2946 |
|
d13d14430 memcg: handle swa... |
2947 |
if (PageSwapCache(page)) |
8c7c6e34a memcg: mem+swap c... |
2948 |
return NULL; |
d13d14430 memcg: handle swa... |
2949 |
|
37c2ac787 thp: compound_tra... |
2950 |
if (PageTransHuge(page)) { |
7ec99d621 memcg: unify char... |
2951 |
nr_pages <<= compound_order(page); |
37c2ac787 thp: compound_tra... |
2952 2953 |
VM_BUG_ON(!PageTransHuge(page)); } |
8697d3319 Memory controller... |
2954 |
/* |
3c541e14b Memory controller... |
2955 |
* Check if our page_cgroup is valid |
8697d3319 Memory controller... |
2956 |
*/ |
52d4b9ac0 memcg: allocate a... |
2957 2958 |
pc = lookup_page_cgroup(page); if (unlikely(!pc || !PageCgroupUsed(pc))) |
8c7c6e34a memcg: mem+swap c... |
2959 |
return NULL; |
b9c565d5a memcg: remove cle... |
2960 |
|
52d4b9ac0 memcg: allocate a... |
2961 |
lock_page_cgroup(pc); |
d13d14430 memcg: handle swa... |
2962 |
|
8c7c6e34a memcg: mem+swap c... |
2963 |
mem = pc->mem_cgroup; |
d13d14430 memcg: handle swa... |
2964 2965 2966 2967 2968 |
if (!PageCgroupUsed(pc)) goto unlock_out; switch (ctype) { case MEM_CGROUP_CHARGE_TYPE_MAPPED: |
8a9478ca7 memcg: fix swap a... |
2969 |
case MEM_CGROUP_CHARGE_TYPE_DROP: |
ac39cf8cb memcg: fix mis-ac... |
2970 2971 |
/* See mem_cgroup_prepare_migration() */ if (page_mapped(page) || PageCgroupMigration(pc)) |
d13d14430 memcg: handle swa... |
2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 |
goto unlock_out; break; case MEM_CGROUP_CHARGE_TYPE_SWAPOUT: if (!PageAnon(page)) { /* Shared memory */ if (page->mapping && !page_is_file_cache(page)) goto unlock_out; } else if (page_mapped(page)) /* Anon */ goto unlock_out; break; default: break; |
52d4b9ac0 memcg: allocate a... |
2983 |
} |
d13d14430 memcg: handle swa... |
2984 |
|
7ec99d621 memcg: unify char... |
2985 |
mem_cgroup_charge_statistics(mem, PageCgroupCache(pc), -nr_pages); |
04046e1a0 memcg: use CSS ID |
2986 |
|
52d4b9ac0 memcg: allocate a... |
2987 |
ClearPageCgroupUsed(pc); |
544122e5e memcg: fix LRU ac... |
2988 2989 2990 2991 2992 2993 |
/* * pc->mem_cgroup is not cleared here. It will be accessed when it's * freed from LRU. This is safe because uncharged page is expected not * to be reused (freed soon). Exception is SwapCache, it's handled by * special functions. */ |
b9c565d5a memcg: remove cle... |
2994 |
|
52d4b9ac0 memcg: allocate a... |
2995 |
unlock_page_cgroup(pc); |
f75ca9620 memcg: avoid css_... |
2996 2997 2998 2999 |
/* * even after unlock, we have mem->res.usage here and this memcg * will never be freed. */ |
d2265e6fa memcg : share eve... |
3000 |
memcg_check_events(mem, page); |
f75ca9620 memcg: avoid css_... |
3001 3002 3003 3004 3005 |
if (do_swap_account && ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) { mem_cgroup_swap_statistics(mem, true); mem_cgroup_get(mem); } if (!mem_cgroup_is_root(mem)) |
7ec99d621 memcg: unify char... |
3006 |
mem_cgroup_do_uncharge(mem, nr_pages, ctype); |
6d12e2d8d per-zone and recl... |
3007 |
|
8c7c6e34a memcg: mem+swap c... |
3008 |
return mem; |
d13d14430 memcg: handle swa... |
3009 3010 3011 |
unlock_out: unlock_page_cgroup(pc); |
8c7c6e34a memcg: mem+swap c... |
3012 |
return NULL; |
3c541e14b Memory controller... |
3013 |
} |
69029cd55 memcg: remove ref... |
3014 3015 |
void mem_cgroup_uncharge_page(struct page *page) { |
52d4b9ac0 memcg: allocate a... |
3016 3017 3018 3019 3020 |
/* early check. */ if (page_mapped(page)) return; if (page->mapping && !PageAnon(page)) return; |
69029cd55 memcg: remove ref... |
3021 3022 3023 3024 3025 3026 |
__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED); } void mem_cgroup_uncharge_cache_page(struct page *page) { VM_BUG_ON(page_mapped(page)); |
b7abea963 memcg: make page-... |
3027 |
VM_BUG_ON(page->mapping); |
69029cd55 memcg: remove ref... |
3028 3029 |
__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE); } |
569b846df memcg: coalesce u... |
3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 |
/* * Batch_start/batch_end is called in unmap_page_range/invlidate/trucate. * In that cases, pages are freed continuously and we can expect pages * are in the same memcg. All these calls itself limits the number of * pages freed at once, then uncharge_start/end() is called properly. * This may be called prural(2) times in a context, */ void mem_cgroup_uncharge_start(void) { current->memcg_batch.do_batch++; /* We can do nest. */ if (current->memcg_batch.do_batch == 1) { current->memcg_batch.memcg = NULL; |
7ffd4ca7a memcg: convert un... |
3044 3045 |
current->memcg_batch.nr_pages = 0; current->memcg_batch.memsw_nr_pages = 0; |
569b846df memcg: coalesce u... |
3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 |
} } void mem_cgroup_uncharge_end(void) { struct memcg_batch_info *batch = ¤t->memcg_batch; if (!batch->do_batch) return; batch->do_batch--; if (batch->do_batch) /* If stacked, do nothing. */ return; if (!batch->memcg) return; /* * This "batch->memcg" is valid without any css_get/put etc... * bacause we hide charges behind us. */ |
7ffd4ca7a memcg: convert un... |
3066 3067 3068 3069 3070 3071 |
if (batch->nr_pages) res_counter_uncharge(&batch->memcg->res, batch->nr_pages * PAGE_SIZE); if (batch->memsw_nr_pages) res_counter_uncharge(&batch->memcg->memsw, batch->memsw_nr_pages * PAGE_SIZE); |
3c11ecf44 memcg: oom kill d... |
3072 |
memcg_oom_recover(batch->memcg); |
569b846df memcg: coalesce u... |
3073 3074 3075 |
/* forget this pointer (for sanity check) */ batch->memcg = NULL; } |
e767e0561 memcg: fix deadlo... |
3076 |
#ifdef CONFIG_SWAP |
8c7c6e34a memcg: mem+swap c... |
3077 |
/* |
e767e0561 memcg: fix deadlo... |
3078 |
* called after __delete_from_swap_cache() and drop "page" account. |
8c7c6e34a memcg: mem+swap c... |
3079 3080 |
* memcg information is recorded to swap_cgroup of "ent" */ |
8a9478ca7 memcg: fix swap a... |
3081 3082 |
void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout) |
8c7c6e34a memcg: mem+swap c... |
3083 3084 |
{ struct mem_cgroup *memcg; |
8a9478ca7 memcg: fix swap a... |
3085 3086 3087 3088 3089 3090 |
int ctype = MEM_CGROUP_CHARGE_TYPE_SWAPOUT; if (!swapout) /* this was a swap cache but the swap is unused ! */ ctype = MEM_CGROUP_CHARGE_TYPE_DROP; memcg = __mem_cgroup_uncharge_common(page, ctype); |
8c7c6e34a memcg: mem+swap c... |
3091 |
|
f75ca9620 memcg: avoid css_... |
3092 3093 3094 3095 3096 |
/* * record memcg information, if swapout && memcg != NULL, * mem_cgroup_get() was called in uncharge(). */ if (do_swap_account && swapout && memcg) |
a3b2d6926 cgroups: use css ... |
3097 |
swap_cgroup_record(ent, css_id(&memcg->css)); |
8c7c6e34a memcg: mem+swap c... |
3098 |
} |
e767e0561 memcg: fix deadlo... |
3099 |
#endif |
8c7c6e34a memcg: mem+swap c... |
3100 3101 3102 3103 3104 3105 3106 |
#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP /* * called from swap_entry_free(). remove record in swap_cgroup and * uncharge "memsw" account. */ void mem_cgroup_uncharge_swap(swp_entry_t ent) |
d13d14430 memcg: handle swa... |
3107 |
{ |
8c7c6e34a memcg: mem+swap c... |
3108 |
struct mem_cgroup *memcg; |
a3b2d6926 cgroups: use css ... |
3109 |
unsigned short id; |
8c7c6e34a memcg: mem+swap c... |
3110 3111 3112 |
if (!do_swap_account) return; |
a3b2d6926 cgroups: use css ... |
3113 3114 3115 |
id = swap_cgroup_record(ent, 0); rcu_read_lock(); memcg = mem_cgroup_lookup(id); |
8c7c6e34a memcg: mem+swap c... |
3116 |
if (memcg) { |
a3b2d6926 cgroups: use css ... |
3117 3118 3119 3120 |
/* * We uncharge this because swap is freed. * This memcg can be obsolete one. We avoid calling css_tryget */ |
0c3e73e84 memcg: improve re... |
3121 |
if (!mem_cgroup_is_root(memcg)) |
4e649152c memcg: some modif... |
3122 |
res_counter_uncharge(&memcg->memsw, PAGE_SIZE); |
0c3e73e84 memcg: improve re... |
3123 |
mem_cgroup_swap_statistics(memcg, false); |
8c7c6e34a memcg: mem+swap c... |
3124 3125 |
mem_cgroup_put(memcg); } |
a3b2d6926 cgroups: use css ... |
3126 |
rcu_read_unlock(); |
d13d14430 memcg: handle swa... |
3127 |
} |
024914477 memcg: move charg... |
3128 3129 3130 3131 3132 3133 |
/** * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record. * @entry: swap entry to be moved * @from: mem_cgroup which the entry is moved from * @to: mem_cgroup which the entry is moved to |
483c30b51 memcg: improve pe... |
3134 |
* @need_fixup: whether we should fixup res_counters and refcounts. |
024914477 memcg: move charg... |
3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 |
* * It succeeds only when the swap_cgroup's record for this entry is the same * as the mem_cgroup's id of @from. * * Returns 0 on success, -EINVAL on failure. * * The caller must have charged to @to, IOW, called res_counter_charge() about * both res and memsw, and called css_get(). */ static int mem_cgroup_move_swap_account(swp_entry_t entry, |
483c30b51 memcg: improve pe... |
3145 |
struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup) |
024914477 memcg: move charg... |
3146 3147 3148 3149 3150 3151 3152 |
{ unsigned short old_id, new_id; old_id = css_id(&from->css); new_id = css_id(&to->css); if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) { |
024914477 memcg: move charg... |
3153 |
mem_cgroup_swap_statistics(from, false); |
483c30b51 memcg: improve pe... |
3154 |
mem_cgroup_swap_statistics(to, true); |
024914477 memcg: move charg... |
3155 |
/* |
483c30b51 memcg: improve pe... |
3156 3157 3158 3159 3160 3161 |
* This function is only called from task migration context now. * It postpones res_counter and refcount handling till the end * of task migration(mem_cgroup_clear_mc()) for performance * improvement. But we cannot postpone mem_cgroup_get(to) * because if the process that has been moved to @to does * swap-in, the refcount of @to might be decreased to 0. |
024914477 memcg: move charg... |
3162 |
*/ |
024914477 memcg: move charg... |
3163 |
mem_cgroup_get(to); |
483c30b51 memcg: improve pe... |
3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 |
if (need_fixup) { if (!mem_cgroup_is_root(from)) res_counter_uncharge(&from->memsw, PAGE_SIZE); mem_cgroup_put(from); /* * we charged both to->res and to->memsw, so we should * uncharge to->res. */ if (!mem_cgroup_is_root(to)) res_counter_uncharge(&to->res, PAGE_SIZE); |
483c30b51 memcg: improve pe... |
3174 |
} |
024914477 memcg: move charg... |
3175 3176 3177 3178 3179 3180 |
return 0; } return -EINVAL; } #else static inline int mem_cgroup_move_swap_account(swp_entry_t entry, |
483c30b51 memcg: improve pe... |
3181 |
struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup) |
024914477 memcg: move charg... |
3182 3183 3184 |
{ return -EINVAL; } |
8c7c6e34a memcg: mem+swap c... |
3185 |
#endif |
d13d14430 memcg: handle swa... |
3186 |
|
ae41be374 bugfix for memory... |
3187 |
/* |
01b1ae63c memcg: simple mig... |
3188 3189 |
* Before starting migration, account PAGE_SIZE to mem_cgroup that the old * page belongs to. |
ae41be374 bugfix for memory... |
3190 |
*/ |
ac39cf8cb memcg: fix mis-ac... |
3191 |
int mem_cgroup_prepare_migration(struct page *page, |
ef6a3c631 mm: add replace_p... |
3192 |
struct page *newpage, struct mem_cgroup **ptr, gfp_t gfp_mask) |
ae41be374 bugfix for memory... |
3193 |
{ |
e8589cc18 memcg: better mig... |
3194 |
struct mem_cgroup *mem = NULL; |
7ec99d621 memcg: unify char... |
3195 |
struct page_cgroup *pc; |
ac39cf8cb memcg: fix mis-ac... |
3196 |
enum charge_type ctype; |
e8589cc18 memcg: better mig... |
3197 |
int ret = 0; |
8869b8f6e memcg: memcontrol... |
3198 |
|
56039efa1 memcg: fix ugly i... |
3199 |
*ptr = NULL; |
ec1685109 thp: memcg compound |
3200 |
VM_BUG_ON(PageTransHuge(page)); |
f8d665422 memcg: add mem_cg... |
3201 |
if (mem_cgroup_disabled()) |
4077960e2 memory controller... |
3202 |
return 0; |
52d4b9ac0 memcg: allocate a... |
3203 3204 3205 |
pc = lookup_page_cgroup(page); lock_page_cgroup(pc); if (PageCgroupUsed(pc)) { |
e8589cc18 memcg: better mig... |
3206 3207 |
mem = pc->mem_cgroup; css_get(&mem->css); |
ac39cf8cb memcg: fix mis-ac... |
3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 |
/* * At migrating an anonymous page, its mapcount goes down * to 0 and uncharge() will be called. But, even if it's fully * unmapped, migration may fail and this page has to be * charged again. We set MIGRATION flag here and delay uncharge * until end_migration() is called * * Corner Case Thinking * A) * When the old page was mapped as Anon and it's unmap-and-freed * while migration was ongoing. * If unmap finds the old page, uncharge() of it will be delayed * until end_migration(). If unmap finds a new page, it's * uncharged when it make mapcount to be 1->0. If unmap code * finds swap_migration_entry, the new page will not be mapped * and end_migration() will find it(mapcount==0). * * B) * When the old page was mapped but migraion fails, the kernel * remaps it. A charge for it is kept by MIGRATION flag even * if mapcount goes down to 0. We can do remap successfully * without charging it again. * * C) * The "old" page is under lock_page() until the end of * migration, so, the old page itself will not be swapped-out. * If the new page is swapped out before end_migraton, our * hook to usual swap-out path will catch the event. */ if (PageAnon(page)) SetPageCgroupMigration(pc); |
e8589cc18 memcg: better mig... |
3239 |
} |
52d4b9ac0 memcg: allocate a... |
3240 |
unlock_page_cgroup(pc); |
ac39cf8cb memcg: fix mis-ac... |
3241 3242 3243 3244 3245 3246 |
/* * If the page is not charged at this point, * we return here. */ if (!mem) return 0; |
01b1ae63c memcg: simple mig... |
3247 |
|
93d5c9be1 memcg: fix prepar... |
3248 |
*ptr = mem; |
7ec99d621 memcg: unify char... |
3249 |
ret = __mem_cgroup_try_charge(NULL, gfp_mask, 1, ptr, false); |
ac39cf8cb memcg: fix mis-ac... |
3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 |
css_put(&mem->css);/* drop extra refcnt */ if (ret || *ptr == NULL) { if (PageAnon(page)) { lock_page_cgroup(pc); ClearPageCgroupMigration(pc); unlock_page_cgroup(pc); /* * The old page may be fully unmapped while we kept it. */ mem_cgroup_uncharge_page(page); } return -ENOMEM; |
e8589cc18 memcg: better mig... |
3262 |
} |
ac39cf8cb memcg: fix mis-ac... |
3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 |
/* * We charge new page before it's used/mapped. So, even if unlock_page() * is called before end_migration, we can catch all events on this new * page. In the case new page is migrated but not remapped, new page's * mapcount will be finally 0 and we call uncharge in end_migration(). */ pc = lookup_page_cgroup(newpage); if (PageAnon(page)) ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED; else if (page_is_file_cache(page)) ctype = MEM_CGROUP_CHARGE_TYPE_CACHE; else ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM; |
7ec99d621 memcg: unify char... |
3276 |
__mem_cgroup_commit_charge(mem, page, 1, pc, ctype); |
e8589cc18 memcg: better mig... |
3277 |
return ret; |
ae41be374 bugfix for memory... |
3278 |
} |
8869b8f6e memcg: memcontrol... |
3279 |
|
69029cd55 memcg: remove ref... |
3280 |
/* remove redundant charge if migration failed*/ |
01b1ae63c memcg: simple mig... |
3281 |
void mem_cgroup_end_migration(struct mem_cgroup *mem, |
50de1dd96 memcg: fix memory... |
3282 |
struct page *oldpage, struct page *newpage, bool migration_ok) |
ae41be374 bugfix for memory... |
3283 |
{ |
ac39cf8cb memcg: fix mis-ac... |
3284 |
struct page *used, *unused; |
01b1ae63c memcg: simple mig... |
3285 |
struct page_cgroup *pc; |
01b1ae63c memcg: simple mig... |
3286 3287 3288 |
if (!mem) return; |
ac39cf8cb memcg: fix mis-ac... |
3289 |
/* blocks rmdir() */ |
887032670 cgroup avoid perm... |
3290 |
cgroup_exclude_rmdir(&mem->css); |
50de1dd96 memcg: fix memory... |
3291 |
if (!migration_ok) { |
ac39cf8cb memcg: fix mis-ac... |
3292 3293 |
used = oldpage; unused = newpage; |
01b1ae63c memcg: simple mig... |
3294 |
} else { |
ac39cf8cb memcg: fix mis-ac... |
3295 |
used = newpage; |
01b1ae63c memcg: simple mig... |
3296 3297 |
unused = oldpage; } |
69029cd55 memcg: remove ref... |
3298 |
/* |
ac39cf8cb memcg: fix mis-ac... |
3299 3300 3301 |
* We disallowed uncharge of pages under migration because mapcount * of the page goes down to zero, temporarly. * Clear the flag and check the page should be charged. |
01b1ae63c memcg: simple mig... |
3302 |
*/ |
ac39cf8cb memcg: fix mis-ac... |
3303 3304 3305 3306 |
pc = lookup_page_cgroup(oldpage); lock_page_cgroup(pc); ClearPageCgroupMigration(pc); unlock_page_cgroup(pc); |
01b1ae63c memcg: simple mig... |
3307 |
|
ac39cf8cb memcg: fix mis-ac... |
3308 |
__mem_cgroup_uncharge_common(unused, MEM_CGROUP_CHARGE_TYPE_FORCE); |
01b1ae63c memcg: simple mig... |
3309 |
/* |
ac39cf8cb memcg: fix mis-ac... |
3310 3311 3312 3313 3314 3315 |
* If a page is a file cache, radix-tree replacement is very atomic * and we can skip this check. When it was an Anon page, its mapcount * goes down to 0. But because we added MIGRATION flage, it's not * uncharged yet. There are several case but page->mapcount check * and USED bit check in mem_cgroup_uncharge_page() will do enough * check. (see prepare_charge() also) |
69029cd55 memcg: remove ref... |
3316 |
*/ |
ac39cf8cb memcg: fix mis-ac... |
3317 3318 |
if (PageAnon(used)) mem_cgroup_uncharge_page(used); |
887032670 cgroup avoid perm... |
3319 |
/* |
ac39cf8cb memcg: fix mis-ac... |
3320 3321 |
* At migration, we may charge account against cgroup which has no * tasks. |
887032670 cgroup avoid perm... |
3322 3323 3324 3325 |
* So, rmdir()->pre_destroy() can be called while we do this charge. * In that case, we need to call pre_destroy() again. check it here. */ cgroup_release_and_wakeup_rmdir(&mem->css); |
ae41be374 bugfix for memory... |
3326 |
} |
78fb74669 Memory controller... |
3327 |
|
f212ad7cf memcg: add memcg ... |
3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 |
#ifdef CONFIG_DEBUG_VM static struct page_cgroup *lookup_page_cgroup_used(struct page *page) { struct page_cgroup *pc; pc = lookup_page_cgroup(page); if (likely(pc) && PageCgroupUsed(pc)) return pc; return NULL; } bool mem_cgroup_bad_page_check(struct page *page) { if (mem_cgroup_disabled()) return false; return lookup_page_cgroup_used(page) != NULL; } void mem_cgroup_print_bad_page(struct page *page) { struct page_cgroup *pc; pc = lookup_page_cgroup_used(page); if (pc) { int ret = -1; char *path; printk(KERN_ALERT "pc:%p pc->flags:%lx pc->mem_cgroup:%p", pc, pc->flags, pc->mem_cgroup); path = kmalloc(PATH_MAX, GFP_KERNEL); if (path) { rcu_read_lock(); ret = cgroup_path(pc->mem_cgroup->css.cgroup, path, PATH_MAX); rcu_read_unlock(); } printk(KERN_CONT "(%s) ", (ret < 0) ? "cannot get the path" : path); kfree(path); } } #endif |
8c7c6e34a memcg: mem+swap c... |
3374 |
static DEFINE_MUTEX(set_limit_mutex); |
d38d2a758 mm: make mem_cgro... |
3375 |
static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, |
8c7c6e34a memcg: mem+swap c... |
3376 |
unsigned long long val) |
628f42355 memcg: limit chan... |
3377 |
{ |
81d39c20f memcg: fix shrink... |
3378 |
int retry_count; |
3c11ecf44 memcg: oom kill d... |
3379 |
u64 memswlimit, memlimit; |
628f42355 memcg: limit chan... |
3380 |
int ret = 0; |
81d39c20f memcg: fix shrink... |
3381 3382 |
int children = mem_cgroup_count_children(memcg); u64 curusage, oldusage; |
3c11ecf44 memcg: oom kill d... |
3383 |
int enlarge; |
81d39c20f memcg: fix shrink... |
3384 3385 3386 3387 3388 3389 3390 3391 3392 |
/* * For keeping hierarchical_reclaim simple, how long we should retry * is depends on callers. We set our retry-count to be function * of # of children which we should visit in this loop. */ retry_count = MEM_CGROUP_RECLAIM_RETRIES * children; oldusage = res_counter_read_u64(&memcg->res, RES_USAGE); |
628f42355 memcg: limit chan... |
3393 |
|
3c11ecf44 memcg: oom kill d... |
3394 |
enlarge = 0; |
8c7c6e34a memcg: mem+swap c... |
3395 |
while (retry_count) { |
628f42355 memcg: limit chan... |
3396 3397 3398 3399 |
if (signal_pending(current)) { ret = -EINTR; break; } |
8c7c6e34a memcg: mem+swap c... |
3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 |
/* * Rather than hide all in some function, I do this in * open coded manner. You see what this really does. * We have to guarantee mem->res.limit < mem->memsw.limit. */ mutex_lock(&set_limit_mutex); memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT); if (memswlimit < val) { ret = -EINVAL; mutex_unlock(&set_limit_mutex); |
628f42355 memcg: limit chan... |
3410 3411 |
break; } |
3c11ecf44 memcg: oom kill d... |
3412 3413 3414 3415 |
memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT); if (memlimit < val) enlarge = 1; |
8c7c6e34a memcg: mem+swap c... |
3416 |
ret = res_counter_set_limit(&memcg->res, val); |
22a668d7c memcg: fix behavi... |
3417 3418 3419 3420 3421 3422 |
if (!ret) { if (memswlimit == val) memcg->memsw_is_minimum = true; else memcg->memsw_is_minimum = false; } |
8c7c6e34a memcg: mem+swap c... |
3423 3424 3425 3426 |
mutex_unlock(&set_limit_mutex); if (!ret) break; |
aa20d489c memcg: code clean... |
3427 |
mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL, |
0ae5e89c6 memcg: count the ... |
3428 3429 |
MEM_CGROUP_RECLAIM_SHRINK, NULL); |
81d39c20f memcg: fix shrink... |
3430 3431 3432 3433 3434 3435 |
curusage = res_counter_read_u64(&memcg->res, RES_USAGE); /* Usage is reduced ? */ if (curusage >= oldusage) retry_count--; else oldusage = curusage; |
8c7c6e34a memcg: mem+swap c... |
3436 |
} |
3c11ecf44 memcg: oom kill d... |
3437 3438 |
if (!ret && enlarge) memcg_oom_recover(memcg); |
14797e236 memcg: add inacti... |
3439 |
|
8c7c6e34a memcg: mem+swap c... |
3440 3441 |
return ret; } |
338c84310 memcg: remove som... |
3442 3443 |
static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg, unsigned long long val) |
8c7c6e34a memcg: mem+swap c... |
3444 |
{ |
81d39c20f memcg: fix shrink... |
3445 |
int retry_count; |
3c11ecf44 memcg: oom kill d... |
3446 |
u64 memlimit, memswlimit, oldusage, curusage; |
81d39c20f memcg: fix shrink... |
3447 3448 |
int children = mem_cgroup_count_children(memcg); int ret = -EBUSY; |
3c11ecf44 memcg: oom kill d... |
3449 |
int enlarge = 0; |
8c7c6e34a memcg: mem+swap c... |
3450 |
|
81d39c20f memcg: fix shrink... |
3451 3452 3453 |
/* see mem_cgroup_resize_res_limit */ retry_count = children * MEM_CGROUP_RECLAIM_RETRIES; oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE); |
8c7c6e34a memcg: mem+swap c... |
3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 |
while (retry_count) { if (signal_pending(current)) { ret = -EINTR; break; } /* * Rather than hide all in some function, I do this in * open coded manner. You see what this really does. * We have to guarantee mem->res.limit < mem->memsw.limit. */ mutex_lock(&set_limit_mutex); memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT); if (memlimit > val) { ret = -EINVAL; mutex_unlock(&set_limit_mutex); break; } |
3c11ecf44 memcg: oom kill d... |
3471 3472 3473 |
memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT); if (memswlimit < val) enlarge = 1; |
8c7c6e34a memcg: mem+swap c... |
3474 |
ret = res_counter_set_limit(&memcg->memsw, val); |
22a668d7c memcg: fix behavi... |
3475 3476 3477 3478 3479 3480 |
if (!ret) { if (memlimit == val) memcg->memsw_is_minimum = true; else memcg->memsw_is_minimum = false; } |
8c7c6e34a memcg: mem+swap c... |
3481 3482 3483 3484 |
mutex_unlock(&set_limit_mutex); if (!ret) break; |
4e4169535 memory controller... |
3485 |
mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL, |
75822b449 memory controller... |
3486 |
MEM_CGROUP_RECLAIM_NOSWAP | |
0ae5e89c6 memcg: count the ... |
3487 3488 |
MEM_CGROUP_RECLAIM_SHRINK, NULL); |
8c7c6e34a memcg: mem+swap c... |
3489 |
curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE); |
81d39c20f memcg: fix shrink... |
3490 |
/* Usage is reduced ? */ |
8c7c6e34a memcg: mem+swap c... |
3491 |
if (curusage >= oldusage) |
628f42355 memcg: limit chan... |
3492 |
retry_count--; |
81d39c20f memcg: fix shrink... |
3493 3494 |
else oldusage = curusage; |
628f42355 memcg: limit chan... |
3495 |
} |
3c11ecf44 memcg: oom kill d... |
3496 3497 |
if (!ret && enlarge) memcg_oom_recover(memcg); |
628f42355 memcg: limit chan... |
3498 3499 |
return ret; } |
4e4169535 memory controller... |
3500 |
unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, |
0ae5e89c6 memcg: count the ... |
3501 3502 |
gfp_t gfp_mask, unsigned long *total_scanned) |
4e4169535 memory controller... |
3503 3504 3505 3506 3507 3508 |
{ unsigned long nr_reclaimed = 0; struct mem_cgroup_per_zone *mz, *next_mz = NULL; unsigned long reclaimed; int loop = 0; struct mem_cgroup_tree_per_zone *mctz; |
ef8745c1e memcg: reduce che... |
3509 |
unsigned long long excess; |
0ae5e89c6 memcg: count the ... |
3510 |
unsigned long nr_scanned; |
4e4169535 memory controller... |
3511 3512 3513 |
if (order > 0) return 0; |
00918b6ab memcg: remove nid... |
3514 |
mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone)); |
4e4169535 memory controller... |
3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 |
/* * This loop can run a while, specially if mem_cgroup's continuously * keep exceeding their soft limit and putting the system under * pressure */ do { if (next_mz) mz = next_mz; else mz = mem_cgroup_largest_soft_limit_node(mctz); if (!mz) break; |
0ae5e89c6 memcg: count the ... |
3527 |
nr_scanned = 0; |
4e4169535 memory controller... |
3528 3529 |
reclaimed = mem_cgroup_hierarchical_reclaim(mz->mem, zone, gfp_mask, |
0ae5e89c6 memcg: count the ... |
3530 3531 |
MEM_CGROUP_RECLAIM_SOFT, &nr_scanned); |
4e4169535 memory controller... |
3532 |
nr_reclaimed += reclaimed; |
0ae5e89c6 memcg: count the ... |
3533 |
*total_scanned += nr_scanned; |
4e4169535 memory controller... |
3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 |
spin_lock(&mctz->lock); /* * If we failed to reclaim anything from this memory cgroup * it is time to move on to the next cgroup */ next_mz = NULL; if (!reclaimed) { do { /* * Loop until we find yet another one. * * By the time we get the soft_limit lock * again, someone might have aded the * group back on the RB tree. Iterate to * make sure we get a different mem. * mem_cgroup_largest_soft_limit_node returns * NULL if no other cgroup is present on * the tree */ next_mz = __mem_cgroup_largest_soft_limit_node(mctz); |
39cc98f1f memcg: remove poi... |
3556 |
if (next_mz == mz) |
4e4169535 memory controller... |
3557 |
css_put(&next_mz->mem->css); |
39cc98f1f memcg: remove poi... |
3558 |
else /* next_mz == NULL or other memcg */ |
4e4169535 memory controller... |
3559 3560 3561 |
break; } while (1); } |
4e4169535 memory controller... |
3562 |
__mem_cgroup_remove_exceeded(mz->mem, mz, mctz); |
ef8745c1e memcg: reduce che... |
3563 |
excess = res_counter_soft_limit_excess(&mz->mem->res); |
4e4169535 memory controller... |
3564 3565 3566 3567 3568 3569 3570 3571 |
/* * One school of thought says that we should not add * back the node to the tree if reclaim returns 0. * But our reclaim could return 0, simply because due * to priority we are exposing a smaller subset of * memory to reclaim from. Consider this as a longer * term TODO. */ |
ef8745c1e memcg: reduce che... |
3572 3573 |
/* If excess == 0, no tree ops */ __mem_cgroup_insert_exceeded(mz->mem, mz, mctz, excess); |
4e4169535 memory controller... |
3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 |
spin_unlock(&mctz->lock); css_put(&mz->mem->css); loop++; /* * Could not reclaim anything and there are no more * mem cgroups to try or we seem to be looping without * reclaiming anything. */ if (!nr_reclaimed && (next_mz == NULL || loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS)) break; } while (!nr_reclaimed); if (next_mz) css_put(&next_mz->mem->css); return nr_reclaimed; } |
c9b0ed514 memcg: helper fun... |
3591 |
/* |
cc8475822 memory cgroup enh... |
3592 |
* This routine traverse page_cgroup in given list and drop them all. |
cc8475822 memory cgroup enh... |
3593 3594 |
* *And* this routine doesn't reclaim page itself, just removes page_cgroup. */ |
f817ed485 memcg: move all a... |
3595 |
static int mem_cgroup_force_empty_list(struct mem_cgroup *mem, |
08e552c69 memcg: synchroniz... |
3596 |
int node, int zid, enum lru_list lru) |
cc8475822 memory cgroup enh... |
3597 |
{ |
08e552c69 memcg: synchroniz... |
3598 3599 |
struct zone *zone; struct mem_cgroup_per_zone *mz; |
f817ed485 memcg: move all a... |
3600 |
struct page_cgroup *pc, *busy; |
08e552c69 memcg: synchroniz... |
3601 |
unsigned long flags, loop; |
072c56c13 per-zone and recl... |
3602 |
struct list_head *list; |
f817ed485 memcg: move all a... |
3603 |
int ret = 0; |
072c56c13 per-zone and recl... |
3604 |
|
08e552c69 memcg: synchroniz... |
3605 3606 |
zone = &NODE_DATA(node)->node_zones[zid]; mz = mem_cgroup_zoneinfo(mem, node, zid); |
b69408e88 vmscan: Use an in... |
3607 |
list = &mz->lists[lru]; |
cc8475822 memory cgroup enh... |
3608 |
|
f817ed485 memcg: move all a... |
3609 3610 3611 3612 3613 |
loop = MEM_CGROUP_ZSTAT(mz, lru); /* give some margin against EBUSY etc...*/ loop += 256; busy = NULL; while (loop--) { |
5564e88ba memcg: condense p... |
3614 |
struct page *page; |
f817ed485 memcg: move all a... |
3615 |
ret = 0; |
08e552c69 memcg: synchroniz... |
3616 |
spin_lock_irqsave(&zone->lru_lock, flags); |
f817ed485 memcg: move all a... |
3617 |
if (list_empty(list)) { |
08e552c69 memcg: synchroniz... |
3618 |
spin_unlock_irqrestore(&zone->lru_lock, flags); |
52d4b9ac0 memcg: allocate a... |
3619 |
break; |
f817ed485 memcg: move all a... |
3620 3621 3622 3623 |
} pc = list_entry(list->prev, struct page_cgroup, lru); if (busy == pc) { list_move(&pc->lru, list); |
648bcc771 mm/memcontrol.c: ... |
3624 |
busy = NULL; |
08e552c69 memcg: synchroniz... |
3625 |
spin_unlock_irqrestore(&zone->lru_lock, flags); |
f817ed485 memcg: move all a... |
3626 3627 |
continue; } |
08e552c69 memcg: synchroniz... |
3628 |
spin_unlock_irqrestore(&zone->lru_lock, flags); |
f817ed485 memcg: move all a... |
3629 |
|
6b3ae58ef memcg: remove dir... |
3630 |
page = lookup_cgroup_page(pc); |
5564e88ba memcg: condense p... |
3631 3632 |
ret = mem_cgroup_move_parent(page, pc, mem, GFP_KERNEL); |
f817ed485 memcg: move all a... |
3633 |
if (ret == -ENOMEM) |
52d4b9ac0 memcg: allocate a... |
3634 |
break; |
f817ed485 memcg: move all a... |
3635 3636 3637 3638 3639 3640 3641 |
if (ret == -EBUSY || ret == -EINVAL) { /* found lock contention or "pc" is obsolete. */ busy = pc; cond_resched(); } else busy = NULL; |
cc8475822 memory cgroup enh... |
3642 |
} |
08e552c69 memcg: synchroniz... |
3643 |
|
f817ed485 memcg: move all a... |
3644 3645 3646 |
if (!ret && !list_empty(list)) return -EBUSY; return ret; |
cc8475822 memory cgroup enh... |
3647 3648 3649 3650 3651 3652 |
} /* * make mem_cgroup's charge to be 0 if there is no task. * This enables deleting this mem_cgroup. */ |
c1e862c1f memcg: new force_... |
3653 |
static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all) |
cc8475822 memory cgroup enh... |
3654 |
{ |
f817ed485 memcg: move all a... |
3655 3656 3657 |
int ret; int node, zid, shrink; int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; |
c1e862c1f memcg: new force_... |
3658 |
struct cgroup *cgrp = mem->css.cgroup; |
8869b8f6e memcg: memcontrol... |
3659 |
|
cc8475822 memory cgroup enh... |
3660 |
css_get(&mem->css); |
f817ed485 memcg: move all a... |
3661 3662 |
shrink = 0; |
c1e862c1f memcg: new force_... |
3663 3664 3665 |
/* should free all ? */ if (free_all) goto try_to_free; |
f817ed485 memcg: move all a... |
3666 |
move_account: |
fce664775 memcg: ensure lis... |
3667 |
do { |
f817ed485 memcg: move all a... |
3668 |
ret = -EBUSY; |
c1e862c1f memcg: new force_... |
3669 3670 3671 3672 |
if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children)) goto out; ret = -EINTR; if (signal_pending(current)) |
cc8475822 memory cgroup enh... |
3673 |
goto out; |
52d4b9ac0 memcg: allocate a... |
3674 3675 |
/* This is for making all *used* pages to be on LRU. */ lru_add_drain_all(); |
d38144b7a memcg: unify sync... |
3676 |
drain_all_stock_sync(mem); |
f817ed485 memcg: move all a... |
3677 |
ret = 0; |
32047e2a8 memcg: avoid lock... |
3678 |
mem_cgroup_start_move(mem); |
299b4eaa3 memcg: NULL point... |
3679 |
for_each_node_state(node, N_HIGH_MEMORY) { |
f817ed485 memcg: move all a... |
3680 |
for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) { |
b69408e88 vmscan: Use an in... |
3681 |
enum lru_list l; |
f817ed485 memcg: move all a... |
3682 3683 |
for_each_lru(l) { ret = mem_cgroup_force_empty_list(mem, |
08e552c69 memcg: synchroniz... |
3684 |
node, zid, l); |
f817ed485 memcg: move all a... |
3685 3686 3687 |
if (ret) break; } |
1ecaab2bd per-zone and recl... |
3688 |
} |
f817ed485 memcg: move all a... |
3689 3690 3691 |
if (ret) break; } |
32047e2a8 memcg: avoid lock... |
3692 |
mem_cgroup_end_move(mem); |
3c11ecf44 memcg: oom kill d... |
3693 |
memcg_oom_recover(mem); |
f817ed485 memcg: move all a... |
3694 3695 3696 |
/* it seems parent cgroup doesn't have enough mem */ if (ret == -ENOMEM) goto try_to_free; |
52d4b9ac0 memcg: allocate a... |
3697 |
cond_resched(); |
fce664775 memcg: ensure lis... |
3698 3699 |
/* "ret" should also be checked to ensure all lists are empty. */ } while (mem->res.usage > 0 || ret); |
cc8475822 memory cgroup enh... |
3700 3701 3702 |
out: css_put(&mem->css); return ret; |
f817ed485 memcg: move all a... |
3703 3704 |
try_to_free: |
c1e862c1f memcg: new force_... |
3705 3706 |
/* returns EBUSY if there is a task or if we come here twice. */ if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) { |
f817ed485 memcg: move all a... |
3707 3708 3709 |
ret = -EBUSY; goto out; } |
c1e862c1f memcg: new force_... |
3710 3711 |
/* we call try-to-free pages for make this cgroup empty */ lru_add_drain_all(); |
f817ed485 memcg: move all a... |
3712 3713 3714 |
/* try to free all pages in this cgroup */ shrink = 1; while (nr_retries && mem->res.usage > 0) { |
82f9d486e memcg: add memory... |
3715 |
struct memcg_scanrecord rec; |
f817ed485 memcg: move all a... |
3716 |
int progress; |
c1e862c1f memcg: new force_... |
3717 3718 3719 3720 3721 |
if (signal_pending(current)) { ret = -EINTR; goto out; } |
82f9d486e memcg: add memory... |
3722 3723 3724 |
rec.context = SCAN_BY_SHRINK; rec.mem = mem; rec.root = mem; |
a7885eb8a memcg: swappiness |
3725 |
progress = try_to_free_mem_cgroup_pages(mem, GFP_KERNEL, |
82f9d486e memcg: add memory... |
3726 |
false, &rec); |
c1e862c1f memcg: new force_... |
3727 |
if (!progress) { |
f817ed485 memcg: move all a... |
3728 |
nr_retries--; |
c1e862c1f memcg: new force_... |
3729 |
/* maybe some writeback is necessary */ |
8aa7e847d Fix congestion_wa... |
3730 |
congestion_wait(BLK_RW_ASYNC, HZ/10); |
c1e862c1f memcg: new force_... |
3731 |
} |
f817ed485 memcg: move all a... |
3732 3733 |
} |
08e552c69 memcg: synchroniz... |
3734 |
lru_add_drain(); |
f817ed485 memcg: move all a... |
3735 |
/* try move_account...there may be some *locked* pages. */ |
fce664775 memcg: ensure lis... |
3736 |
goto move_account; |
cc8475822 memory cgroup enh... |
3737 |
} |
c1e862c1f memcg: new force_... |
3738 3739 3740 3741 |
int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event) { return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true); } |
18f59ea7d memcg: memory cgr... |
3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 |
static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft) { return mem_cgroup_from_cont(cont)->use_hierarchy; } static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft, u64 val) { int retval = 0; struct mem_cgroup *mem = mem_cgroup_from_cont(cont); struct cgroup *parent = cont->parent; struct mem_cgroup *parent_mem = NULL; if (parent) parent_mem = mem_cgroup_from_cont(parent); cgroup_lock(); /* |
af901ca18 tree-wide: fix as... |
3760 |
* If parent's use_hierarchy is set, we can't make any modifications |
18f59ea7d memcg: memory cgr... |
3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 |
* in the child subtrees. If it is unset, then the change can * occur, provided the current cgroup has no children. * * For the root cgroup, parent_mem is NULL, we allow value to be * set if there are no children. */ if ((!parent_mem || !parent_mem->use_hierarchy) && (val == 1 || val == 0)) { if (list_empty(&cont->children)) mem->use_hierarchy = val; else retval = -EBUSY; } else retval = -EINVAL; cgroup_unlock(); return retval; } |
0c3e73e84 memcg: improve re... |
3779 |
|
7a159cc9d memcg: use native... |
3780 3781 |
static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup *mem, enum mem_cgroup_stat_index idx) |
0c3e73e84 memcg: improve re... |
3782 |
{ |
7d74b06f2 memcg: use for_ea... |
3783 |
struct mem_cgroup *iter; |
7a159cc9d memcg: use native... |
3784 |
long val = 0; |
0c3e73e84 memcg: improve re... |
3785 |
|
7a159cc9d memcg: use native... |
3786 |
/* Per-cpu values can be negative, use a signed accumulator */ |
7d74b06f2 memcg: use for_ea... |
3787 3788 3789 3790 3791 3792 |
for_each_mem_cgroup_tree(iter, mem) val += mem_cgroup_read_stat(iter, idx); if (val < 0) /* race ? */ val = 0; return val; |
0c3e73e84 memcg: improve re... |
3793 |
} |
104f39284 memcg: extract me... |
3794 3795 |
static inline u64 mem_cgroup_usage(struct mem_cgroup *mem, bool swap) { |
7d74b06f2 memcg: use for_ea... |
3796 |
u64 val; |
104f39284 memcg: extract me... |
3797 3798 3799 3800 3801 3802 3803 |
if (!mem_cgroup_is_root(mem)) { if (!swap) return res_counter_read_u64(&mem->res, RES_USAGE); else return res_counter_read_u64(&mem->memsw, RES_USAGE); } |
7a159cc9d memcg: use native... |
3804 3805 |
val = mem_cgroup_recursive_stat(mem, MEM_CGROUP_STAT_CACHE); val += mem_cgroup_recursive_stat(mem, MEM_CGROUP_STAT_RSS); |
104f39284 memcg: extract me... |
3806 |
|
7d74b06f2 memcg: use for_ea... |
3807 |
if (swap) |
7a159cc9d memcg: use native... |
3808 |
val += mem_cgroup_recursive_stat(mem, MEM_CGROUP_STAT_SWAPOUT); |
104f39284 memcg: extract me... |
3809 3810 3811 |
return val << PAGE_SHIFT; } |
2c3daa722 CGroup API files:... |
3812 |
static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft) |
8cdea7c05 Memory controller... |
3813 |
{ |
8c7c6e34a memcg: mem+swap c... |
3814 |
struct mem_cgroup *mem = mem_cgroup_from_cont(cont); |
104f39284 memcg: extract me... |
3815 |
u64 val; |
8c7c6e34a memcg: mem+swap c... |
3816 3817 3818 3819 3820 3821 |
int type, name; type = MEMFILE_TYPE(cft->private); name = MEMFILE_ATTR(cft->private); switch (type) { case _MEM: |
104f39284 memcg: extract me... |
3822 3823 3824 |
if (name == RES_USAGE) val = mem_cgroup_usage(mem, false); else |
0c3e73e84 memcg: improve re... |
3825 |
val = res_counter_read_u64(&mem->res, name); |
8c7c6e34a memcg: mem+swap c... |
3826 3827 |
break; case _MEMSWAP: |
104f39284 memcg: extract me... |
3828 3829 3830 |
if (name == RES_USAGE) val = mem_cgroup_usage(mem, true); else |
0c3e73e84 memcg: improve re... |
3831 |
val = res_counter_read_u64(&mem->memsw, name); |
8c7c6e34a memcg: mem+swap c... |
3832 3833 3834 3835 3836 3837 |
break; default: BUG(); break; } return val; |
8cdea7c05 Memory controller... |
3838 |
} |
628f42355 memcg: limit chan... |
3839 3840 3841 3842 |
/* * The user of this function is... * RES_LIMIT. */ |
856c13aa1 cgroup files: con... |
3843 3844 |
static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft, const char *buffer) |
8cdea7c05 Memory controller... |
3845 |
{ |
628f42355 memcg: limit chan... |
3846 |
struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); |
8c7c6e34a memcg: mem+swap c... |
3847 |
int type, name; |
628f42355 memcg: limit chan... |
3848 3849 |
unsigned long long val; int ret; |
8c7c6e34a memcg: mem+swap c... |
3850 3851 3852 |
type = MEMFILE_TYPE(cft->private); name = MEMFILE_ATTR(cft->private); switch (name) { |
628f42355 memcg: limit chan... |
3853 |
case RES_LIMIT: |
4b3bde4c9 memcg: remove the... |
3854 3855 3856 3857 |
if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */ ret = -EINVAL; break; } |
628f42355 memcg: limit chan... |
3858 3859 |
/* This function does all necessary parse...reuse it */ ret = res_counter_memparse_write_strategy(buffer, &val); |
8c7c6e34a memcg: mem+swap c... |
3860 3861 3862 |
if (ret) break; if (type == _MEM) |
628f42355 memcg: limit chan... |
3863 |
ret = mem_cgroup_resize_limit(memcg, val); |
8c7c6e34a memcg: mem+swap c... |
3864 3865 |
else ret = mem_cgroup_resize_memsw_limit(memcg, val); |
628f42355 memcg: limit chan... |
3866 |
break; |
296c81d89 memory controller... |
3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 |
case RES_SOFT_LIMIT: ret = res_counter_memparse_write_strategy(buffer, &val); if (ret) break; /* * For memsw, soft limits are hard to implement in terms * of semantics, for now, we support soft limits for * control without swap */ if (type == _MEM) ret = res_counter_set_soft_limit(&memcg->res, val); else ret = -EINVAL; break; |
628f42355 memcg: limit chan... |
3881 3882 3883 3884 3885 |
default: ret = -EINVAL; /* should be BUG() ? */ break; } return ret; |
8cdea7c05 Memory controller... |
3886 |
} |
fee7b548e memcg: show real ... |
3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 |
static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg, unsigned long long *mem_limit, unsigned long long *memsw_limit) { struct cgroup *cgroup; unsigned long long min_limit, min_memsw_limit, tmp; min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT); min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT); cgroup = memcg->css.cgroup; if (!memcg->use_hierarchy) goto out; while (cgroup->parent) { cgroup = cgroup->parent; memcg = mem_cgroup_from_cont(cgroup); if (!memcg->use_hierarchy) break; tmp = res_counter_read_u64(&memcg->res, RES_LIMIT); min_limit = min(min_limit, tmp); tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT); min_memsw_limit = min(min_memsw_limit, tmp); } out: *mem_limit = min_limit; *memsw_limit = min_memsw_limit; return; } |
29f2a4dac memcgroup: implem... |
3914 |
static int mem_cgroup_reset(struct cgroup *cont, unsigned int event) |
c84872e16 memcgroup: add th... |
3915 3916 |
{ struct mem_cgroup *mem; |
8c7c6e34a memcg: mem+swap c... |
3917 |
int type, name; |
c84872e16 memcgroup: add th... |
3918 3919 |
mem = mem_cgroup_from_cont(cont); |
8c7c6e34a memcg: mem+swap c... |
3920 3921 3922 |
type = MEMFILE_TYPE(event); name = MEMFILE_ATTR(event); switch (name) { |
29f2a4dac memcgroup: implem... |
3923 |
case RES_MAX_USAGE: |
8c7c6e34a memcg: mem+swap c... |
3924 3925 3926 3927 |
if (type == _MEM) res_counter_reset_max(&mem->res); else res_counter_reset_max(&mem->memsw); |
29f2a4dac memcgroup: implem... |
3928 3929 |
break; case RES_FAILCNT: |
8c7c6e34a memcg: mem+swap c... |
3930 3931 3932 3933 |
if (type == _MEM) res_counter_reset_failcnt(&mem->res); else res_counter_reset_failcnt(&mem->memsw); |
29f2a4dac memcgroup: implem... |
3934 3935 |
break; } |
f64c3f549 memory controller... |
3936 |
|
85cc59db1 memcgroup: use tr... |
3937 |
return 0; |
c84872e16 memcgroup: add th... |
3938 |
} |
7dc74be03 memcg: add interf... |
3939 3940 3941 3942 3943 |
static u64 mem_cgroup_move_charge_read(struct cgroup *cgrp, struct cftype *cft) { return mem_cgroup_from_cont(cgrp)->move_charge_at_immigrate; } |
024914477 memcg: move charg... |
3944 |
#ifdef CONFIG_MMU |
7dc74be03 memcg: add interf... |
3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 |
static int mem_cgroup_move_charge_write(struct cgroup *cgrp, struct cftype *cft, u64 val) { struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp); if (val >= (1 << NR_MOVE_TYPE)) return -EINVAL; /* * We check this value several times in both in can_attach() and * attach(), so we need cgroup lock to prevent this value from being * inconsistent. */ cgroup_lock(); mem->move_charge_at_immigrate = val; cgroup_unlock(); return 0; } |
024914477 memcg: move charg... |
3963 3964 3965 3966 3967 3968 3969 |
#else static int mem_cgroup_move_charge_write(struct cgroup *cgrp, struct cftype *cft, u64 val) { return -ENOSYS; } #endif |
7dc74be03 memcg: add interf... |
3970 |
|
14067bb3e memcg: hierarchic... |
3971 3972 3973 3974 3975 |
/* For read statistics */ enum { MCS_CACHE, MCS_RSS, |
d8046582d memcg: make memcg... |
3976 |
MCS_FILE_MAPPED, |
14067bb3e memcg: hierarchic... |
3977 3978 |
MCS_PGPGIN, MCS_PGPGOUT, |
1dd3a2732 memcg: show swap ... |
3979 |
MCS_SWAP, |
456f998ec memcg: add the pa... |
3980 3981 |
MCS_PGFAULT, MCS_PGMAJFAULT, |
14067bb3e memcg: hierarchic... |
3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 |
MCS_INACTIVE_ANON, MCS_ACTIVE_ANON, MCS_INACTIVE_FILE, MCS_ACTIVE_FILE, MCS_UNEVICTABLE, NR_MCS_STAT, }; struct mcs_total_stat { s64 stat[NR_MCS_STAT]; |
d2ceb9b7d memory cgroup enh... |
3992 |
}; |
14067bb3e memcg: hierarchic... |
3993 3994 3995 3996 3997 3998 |
struct { char *local_name; char *total_name; } memcg_stat_strings[NR_MCS_STAT] = { {"cache", "total_cache"}, {"rss", "total_rss"}, |
d69b042f3 memcg: add file-b... |
3999 |
{"mapped_file", "total_mapped_file"}, |
14067bb3e memcg: hierarchic... |
4000 4001 |
{"pgpgin", "total_pgpgin"}, {"pgpgout", "total_pgpgout"}, |
1dd3a2732 memcg: show swap ... |
4002 |
{"swap", "total_swap"}, |
456f998ec memcg: add the pa... |
4003 4004 |
{"pgfault", "total_pgfault"}, {"pgmajfault", "total_pgmajfault"}, |
14067bb3e memcg: hierarchic... |
4005 4006 4007 4008 4009 4010 |
{"inactive_anon", "total_inactive_anon"}, {"active_anon", "total_active_anon"}, {"inactive_file", "total_inactive_file"}, {"active_file", "total_active_file"}, {"unevictable", "total_unevictable"} }; |
7d74b06f2 memcg: use for_ea... |
4011 4012 |
static void mem_cgroup_get_local_stat(struct mem_cgroup *mem, struct mcs_total_stat *s) |
14067bb3e memcg: hierarchic... |
4013 |
{ |
14067bb3e memcg: hierarchic... |
4014 4015 4016 |
s64 val; /* per cpu stat */ |
c62b1a3b3 memcg: use generi... |
4017 |
val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_CACHE); |
14067bb3e memcg: hierarchic... |
4018 |
s->stat[MCS_CACHE] += val * PAGE_SIZE; |
c62b1a3b3 memcg: use generi... |
4019 |
val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_RSS); |
14067bb3e memcg: hierarchic... |
4020 |
s->stat[MCS_RSS] += val * PAGE_SIZE; |
c62b1a3b3 memcg: use generi... |
4021 |
val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_FILE_MAPPED); |
d8046582d memcg: make memcg... |
4022 |
s->stat[MCS_FILE_MAPPED] += val * PAGE_SIZE; |
e9f8974f2 memcg: break out ... |
4023 |
val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGPGIN); |
14067bb3e memcg: hierarchic... |
4024 |
s->stat[MCS_PGPGIN] += val; |
e9f8974f2 memcg: break out ... |
4025 |
val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGPGOUT); |
14067bb3e memcg: hierarchic... |
4026 |
s->stat[MCS_PGPGOUT] += val; |
1dd3a2732 memcg: show swap ... |
4027 |
if (do_swap_account) { |
c62b1a3b3 memcg: use generi... |
4028 |
val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_SWAPOUT); |
1dd3a2732 memcg: show swap ... |
4029 4030 |
s->stat[MCS_SWAP] += val * PAGE_SIZE; } |
456f998ec memcg: add the pa... |
4031 4032 4033 4034 |
val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGFAULT); s->stat[MCS_PGFAULT] += val; val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGMAJFAULT); s->stat[MCS_PGMAJFAULT] += val; |
14067bb3e memcg: hierarchic... |
4035 4036 |
/* per zone stat */ |
bb2a0de92 memcg: consolidat... |
4037 |
val = mem_cgroup_nr_lru_pages(mem, BIT(LRU_INACTIVE_ANON)); |
14067bb3e memcg: hierarchic... |
4038 |
s->stat[MCS_INACTIVE_ANON] += val * PAGE_SIZE; |
bb2a0de92 memcg: consolidat... |
4039 |
val = mem_cgroup_nr_lru_pages(mem, BIT(LRU_ACTIVE_ANON)); |
14067bb3e memcg: hierarchic... |
4040 |
s->stat[MCS_ACTIVE_ANON] += val * PAGE_SIZE; |
bb2a0de92 memcg: consolidat... |
4041 |
val = mem_cgroup_nr_lru_pages(mem, BIT(LRU_INACTIVE_FILE)); |
14067bb3e memcg: hierarchic... |
4042 |
s->stat[MCS_INACTIVE_FILE] += val * PAGE_SIZE; |
bb2a0de92 memcg: consolidat... |
4043 |
val = mem_cgroup_nr_lru_pages(mem, BIT(LRU_ACTIVE_FILE)); |
14067bb3e memcg: hierarchic... |
4044 |
s->stat[MCS_ACTIVE_FILE] += val * PAGE_SIZE; |
bb2a0de92 memcg: consolidat... |
4045 |
val = mem_cgroup_nr_lru_pages(mem, BIT(LRU_UNEVICTABLE)); |
14067bb3e memcg: hierarchic... |
4046 |
s->stat[MCS_UNEVICTABLE] += val * PAGE_SIZE; |
14067bb3e memcg: hierarchic... |
4047 4048 4049 4050 4051 |
} static void mem_cgroup_get_total_stat(struct mem_cgroup *mem, struct mcs_total_stat *s) { |
7d74b06f2 memcg: use for_ea... |
4052 4053 4054 4055 |
struct mem_cgroup *iter; for_each_mem_cgroup_tree(iter, mem) mem_cgroup_get_local_stat(iter, s); |
14067bb3e memcg: hierarchic... |
4056 |
} |
406eb0c9b memcg: add memory... |
4057 4058 4059 4060 4061 4062 4063 4064 |
#ifdef CONFIG_NUMA static int mem_control_numa_stat_show(struct seq_file *m, void *arg) { int nid; unsigned long total_nr, file_nr, anon_nr, unevictable_nr; unsigned long node_nr; struct cgroup *cont = m->private; struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont); |
bb2a0de92 memcg: consolidat... |
4065 |
total_nr = mem_cgroup_nr_lru_pages(mem_cont, LRU_ALL); |
406eb0c9b memcg: add memory... |
4066 4067 |
seq_printf(m, "total=%lu", total_nr); for_each_node_state(nid, N_HIGH_MEMORY) { |
bb2a0de92 memcg: consolidat... |
4068 |
node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid, LRU_ALL); |
406eb0c9b memcg: add memory... |
4069 4070 4071 4072 |
seq_printf(m, " N%d=%lu", nid, node_nr); } seq_putc(m, ' '); |
bb2a0de92 memcg: consolidat... |
4073 |
file_nr = mem_cgroup_nr_lru_pages(mem_cont, LRU_ALL_FILE); |
406eb0c9b memcg: add memory... |
4074 4075 |
seq_printf(m, "file=%lu", file_nr); for_each_node_state(nid, N_HIGH_MEMORY) { |
bb2a0de92 memcg: consolidat... |
4076 4077 |
node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid, LRU_ALL_FILE); |
406eb0c9b memcg: add memory... |
4078 4079 4080 4081 |
seq_printf(m, " N%d=%lu", nid, node_nr); } seq_putc(m, ' '); |
bb2a0de92 memcg: consolidat... |
4082 |
anon_nr = mem_cgroup_nr_lru_pages(mem_cont, LRU_ALL_ANON); |
406eb0c9b memcg: add memory... |
4083 4084 |
seq_printf(m, "anon=%lu", anon_nr); for_each_node_state(nid, N_HIGH_MEMORY) { |
bb2a0de92 memcg: consolidat... |
4085 4086 |
node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid, LRU_ALL_ANON); |
406eb0c9b memcg: add memory... |
4087 4088 4089 4090 |
seq_printf(m, " N%d=%lu", nid, node_nr); } seq_putc(m, ' '); |
bb2a0de92 memcg: consolidat... |
4091 |
unevictable_nr = mem_cgroup_nr_lru_pages(mem_cont, BIT(LRU_UNEVICTABLE)); |
406eb0c9b memcg: add memory... |
4092 4093 |
seq_printf(m, "unevictable=%lu", unevictable_nr); for_each_node_state(nid, N_HIGH_MEMORY) { |
bb2a0de92 memcg: consolidat... |
4094 4095 |
node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid, BIT(LRU_UNEVICTABLE)); |
406eb0c9b memcg: add memory... |
4096 4097 4098 4099 4100 4101 4102 |
seq_printf(m, " N%d=%lu", nid, node_nr); } seq_putc(m, ' '); return 0; } #endif /* CONFIG_NUMA */ |
c64745cf0 CGroup API files:... |
4103 4104 |
static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft, struct cgroup_map_cb *cb) |
d2ceb9b7d memory cgroup enh... |
4105 |
{ |
d2ceb9b7d memory cgroup enh... |
4106 |
struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont); |
14067bb3e memcg: hierarchic... |
4107 |
struct mcs_total_stat mystat; |
d2ceb9b7d memory cgroup enh... |
4108 |
int i; |
14067bb3e memcg: hierarchic... |
4109 4110 |
memset(&mystat, 0, sizeof(mystat)); mem_cgroup_get_local_stat(mem_cont, &mystat); |
d2ceb9b7d memory cgroup enh... |
4111 |
|
406eb0c9b memcg: add memory... |
4112 |
|
1dd3a2732 memcg: show swap ... |
4113 4114 4115 |
for (i = 0; i < NR_MCS_STAT; i++) { if (i == MCS_SWAP && !do_swap_account) continue; |
14067bb3e memcg: hierarchic... |
4116 |
cb->fill(cb, memcg_stat_strings[i].local_name, mystat.stat[i]); |
1dd3a2732 memcg: show swap ... |
4117 |
} |
7b854121e Unevictable LRU P... |
4118 |
|
14067bb3e memcg: hierarchic... |
4119 |
/* Hierarchical information */ |
fee7b548e memcg: show real ... |
4120 4121 4122 4123 4124 4125 4126 |
{ unsigned long long limit, memsw_limit; memcg_get_hierarchical_limit(mem_cont, &limit, &memsw_limit); cb->fill(cb, "hierarchical_memory_limit", limit); if (do_swap_account) cb->fill(cb, "hierarchical_memsw_limit", memsw_limit); } |
7f016ee8b memcg: show recla... |
4127 |
|
14067bb3e memcg: hierarchic... |
4128 4129 |
memset(&mystat, 0, sizeof(mystat)); mem_cgroup_get_total_stat(mem_cont, &mystat); |
1dd3a2732 memcg: show swap ... |
4130 4131 4132 |
for (i = 0; i < NR_MCS_STAT; i++) { if (i == MCS_SWAP && !do_swap_account) continue; |
14067bb3e memcg: hierarchic... |
4133 |
cb->fill(cb, memcg_stat_strings[i].total_name, mystat.stat[i]); |
1dd3a2732 memcg: show swap ... |
4134 |
} |
14067bb3e memcg: hierarchic... |
4135 |
|
7f016ee8b memcg: show recla... |
4136 |
#ifdef CONFIG_DEBUG_VM |
c772be939 memcg: fix calcul... |
4137 |
cb->fill(cb, "inactive_ratio", calc_inactive_ratio(mem_cont, NULL)); |
7f016ee8b memcg: show recla... |
4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 |
{ int nid, zid; struct mem_cgroup_per_zone *mz; unsigned long recent_rotated[2] = {0, 0}; unsigned long recent_scanned[2] = {0, 0}; for_each_online_node(nid) for (zid = 0; zid < MAX_NR_ZONES; zid++) { mz = mem_cgroup_zoneinfo(mem_cont, nid, zid); recent_rotated[0] += mz->reclaim_stat.recent_rotated[0]; recent_rotated[1] += mz->reclaim_stat.recent_rotated[1]; recent_scanned[0] += mz->reclaim_stat.recent_scanned[0]; recent_scanned[1] += mz->reclaim_stat.recent_scanned[1]; } cb->fill(cb, "recent_rotated_anon", recent_rotated[0]); cb->fill(cb, "recent_rotated_file", recent_rotated[1]); cb->fill(cb, "recent_scanned_anon", recent_scanned[0]); cb->fill(cb, "recent_scanned_file", recent_scanned[1]); } #endif |
d2ceb9b7d memory cgroup enh... |
4164 4165 |
return 0; } |
a7885eb8a memcg: swappiness |
4166 4167 4168 |
static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft) { struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); |
1f4c025b5 memcg: export mem... |
4169 |
return mem_cgroup_swappiness(memcg); |
a7885eb8a memcg: swappiness |
4170 4171 4172 4173 4174 4175 4176 |
} static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft, u64 val) { struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); struct mem_cgroup *parent; |
068b38c1f memcg: fix a race... |
4177 |
|
a7885eb8a memcg: swappiness |
4178 4179 4180 4181 4182 4183 4184 |
if (val > 100) return -EINVAL; if (cgrp->parent == NULL) return -EINVAL; parent = mem_cgroup_from_cont(cgrp->parent); |
068b38c1f memcg: fix a race... |
4185 4186 |
cgroup_lock(); |
a7885eb8a memcg: swappiness |
4187 4188 |
/* If under hierarchy, only empty-root can set this value */ if ((parent->use_hierarchy) || |
068b38c1f memcg: fix a race... |
4189 4190 |
(memcg->use_hierarchy && !list_empty(&cgrp->children))) { cgroup_unlock(); |
a7885eb8a memcg: swappiness |
4191 |
return -EINVAL; |
068b38c1f memcg: fix a race... |
4192 |
} |
a7885eb8a memcg: swappiness |
4193 |
|
a7885eb8a memcg: swappiness |
4194 |
memcg->swappiness = val; |
a7885eb8a memcg: swappiness |
4195 |
|
068b38c1f memcg: fix a race... |
4196 |
cgroup_unlock(); |
a7885eb8a memcg: swappiness |
4197 4198 |
return 0; } |
2e72b6347 memcg: implement ... |
4199 4200 4201 4202 4203 4204 4205 4206 |
static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) { struct mem_cgroup_threshold_ary *t; u64 usage; int i; rcu_read_lock(); if (!swap) |
2c488db27 memcg: clean up m... |
4207 |
t = rcu_dereference(memcg->thresholds.primary); |
2e72b6347 memcg: implement ... |
4208 |
else |
2c488db27 memcg: clean up m... |
4209 |
t = rcu_dereference(memcg->memsw_thresholds.primary); |
2e72b6347 memcg: implement ... |
4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 |
if (!t) goto unlock; usage = mem_cgroup_usage(memcg, swap); /* * current_threshold points to threshold just below usage. * If it's not true, a threshold was crossed after last * call of __mem_cgroup_threshold(). */ |
5407a5625 mm: remove unnece... |
4221 |
i = t->current_threshold; |
2e72b6347 memcg: implement ... |
4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 |
/* * Iterate backward over array of thresholds starting from * current_threshold and check if a threshold is crossed. * If none of thresholds below usage is crossed, we read * only one element of the array here. */ for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--) eventfd_signal(t->entries[i].eventfd, 1); /* i = current_threshold + 1 */ i++; /* * Iterate forward over array of thresholds starting from * current_threshold+1 and check if a threshold is crossed. * If none of thresholds above usage is crossed, we read * only one element of the array here. */ for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++) eventfd_signal(t->entries[i].eventfd, 1); /* Update current_threshold */ |
5407a5625 mm: remove unnece... |
4245 |
t->current_threshold = i - 1; |
2e72b6347 memcg: implement ... |
4246 4247 4248 4249 4250 4251 |
unlock: rcu_read_unlock(); } static void mem_cgroup_threshold(struct mem_cgroup *memcg) { |
ad4ca5f4b memcg: fix thresh... |
4252 4253 4254 4255 4256 4257 4258 |
while (memcg) { __mem_cgroup_threshold(memcg, false); if (do_swap_account) __mem_cgroup_threshold(memcg, true); memcg = parent_mem_cgroup(memcg); } |
2e72b6347 memcg: implement ... |
4259 4260 4261 4262 4263 4264 4265 4266 4267 |
} static int compare_thresholds(const void *a, const void *b) { const struct mem_cgroup_threshold *_a = a; const struct mem_cgroup_threshold *_b = b; return _a->threshold - _b->threshold; } |
7d74b06f2 memcg: use for_ea... |
4268 |
static int mem_cgroup_oom_notify_cb(struct mem_cgroup *mem) |
9490ff275 memcg: oom notifier |
4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 |
{ struct mem_cgroup_eventfd_list *ev; list_for_each_entry(ev, &mem->oom_notify, list) eventfd_signal(ev->eventfd, 1); return 0; } static void mem_cgroup_oom_notify(struct mem_cgroup *mem) { |
7d74b06f2 memcg: use for_ea... |
4279 4280 4281 4282 |
struct mem_cgroup *iter; for_each_mem_cgroup_tree(iter, mem) mem_cgroup_oom_notify_cb(iter); |
9490ff275 memcg: oom notifier |
4283 4284 4285 4286 |
} static int mem_cgroup_usage_register_event(struct cgroup *cgrp, struct cftype *cft, struct eventfd_ctx *eventfd, const char *args) |
2e72b6347 memcg: implement ... |
4287 4288 |
{ struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); |
2c488db27 memcg: clean up m... |
4289 4290 |
struct mem_cgroup_thresholds *thresholds; struct mem_cgroup_threshold_ary *new; |
2e72b6347 memcg: implement ... |
4291 4292 |
int type = MEMFILE_TYPE(cft->private); u64 threshold, usage; |
2c488db27 memcg: clean up m... |
4293 |
int i, size, ret; |
2e72b6347 memcg: implement ... |
4294 4295 4296 4297 4298 4299 |
ret = res_counter_memparse_write_strategy(args, &threshold); if (ret) return ret; mutex_lock(&memcg->thresholds_lock); |
2c488db27 memcg: clean up m... |
4300 |
|
2e72b6347 memcg: implement ... |
4301 |
if (type == _MEM) |
2c488db27 memcg: clean up m... |
4302 |
thresholds = &memcg->thresholds; |
2e72b6347 memcg: implement ... |
4303 |
else if (type == _MEMSWAP) |
2c488db27 memcg: clean up m... |
4304 |
thresholds = &memcg->memsw_thresholds; |
2e72b6347 memcg: implement ... |
4305 4306 4307 4308 4309 4310 |
else BUG(); usage = mem_cgroup_usage(memcg, type == _MEMSWAP); /* Check if a threshold crossed before adding a new one */ |
2c488db27 memcg: clean up m... |
4311 |
if (thresholds->primary) |
2e72b6347 memcg: implement ... |
4312 |
__mem_cgroup_threshold(memcg, type == _MEMSWAP); |
2c488db27 memcg: clean up m... |
4313 |
size = thresholds->primary ? thresholds->primary->size + 1 : 1; |
2e72b6347 memcg: implement ... |
4314 4315 |
/* Allocate memory for new array of thresholds */ |
2c488db27 memcg: clean up m... |
4316 |
new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold), |
2e72b6347 memcg: implement ... |
4317 |
GFP_KERNEL); |
2c488db27 memcg: clean up m... |
4318 |
if (!new) { |
2e72b6347 memcg: implement ... |
4319 4320 4321 |
ret = -ENOMEM; goto unlock; } |
2c488db27 memcg: clean up m... |
4322 |
new->size = size; |
2e72b6347 memcg: implement ... |
4323 4324 |
/* Copy thresholds (if any) to new array */ |
2c488db27 memcg: clean up m... |
4325 4326 |
if (thresholds->primary) { memcpy(new->entries, thresholds->primary->entries, (size - 1) * |
2e72b6347 memcg: implement ... |
4327 |
sizeof(struct mem_cgroup_threshold)); |
2c488db27 memcg: clean up m... |
4328 |
} |
2e72b6347 memcg: implement ... |
4329 |
/* Add new threshold */ |
2c488db27 memcg: clean up m... |
4330 4331 |
new->entries[size - 1].eventfd = eventfd; new->entries[size - 1].threshold = threshold; |
2e72b6347 memcg: implement ... |
4332 4333 |
/* Sort thresholds. Registering of new threshold isn't time-critical */ |
2c488db27 memcg: clean up m... |
4334 |
sort(new->entries, size, sizeof(struct mem_cgroup_threshold), |
2e72b6347 memcg: implement ... |
4335 4336 4337 |
compare_thresholds, NULL); /* Find current threshold */ |
2c488db27 memcg: clean up m... |
4338 |
new->current_threshold = -1; |
2e72b6347 memcg: implement ... |
4339 |
for (i = 0; i < size; i++) { |
2c488db27 memcg: clean up m... |
4340 |
if (new->entries[i].threshold < usage) { |
2e72b6347 memcg: implement ... |
4341 |
/* |
2c488db27 memcg: clean up m... |
4342 4343 |
* new->current_threshold will not be used until * rcu_assign_pointer(), so it's safe to increment |
2e72b6347 memcg: implement ... |
4344 4345 |
* it here. */ |
2c488db27 memcg: clean up m... |
4346 |
++new->current_threshold; |
2e72b6347 memcg: implement ... |
4347 4348 |
} } |
2c488db27 memcg: clean up m... |
4349 4350 4351 4352 4353 |
/* Free old spare buffer and save old primary buffer as spare */ kfree(thresholds->spare); thresholds->spare = thresholds->primary; rcu_assign_pointer(thresholds->primary, new); |
2e72b6347 memcg: implement ... |
4354 |
|
907860ed3 cgroups: make cft... |
4355 |
/* To be sure that nobody uses thresholds */ |
2e72b6347 memcg: implement ... |
4356 |
synchronize_rcu(); |
2e72b6347 memcg: implement ... |
4357 4358 4359 4360 4361 |
unlock: mutex_unlock(&memcg->thresholds_lock); return ret; } |
907860ed3 cgroups: make cft... |
4362 |
static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp, |
9490ff275 memcg: oom notifier |
4363 |
struct cftype *cft, struct eventfd_ctx *eventfd) |
2e72b6347 memcg: implement ... |
4364 4365 |
{ struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); |
2c488db27 memcg: clean up m... |
4366 4367 |
struct mem_cgroup_thresholds *thresholds; struct mem_cgroup_threshold_ary *new; |
2e72b6347 memcg: implement ... |
4368 4369 |
int type = MEMFILE_TYPE(cft->private); u64 usage; |
2c488db27 memcg: clean up m... |
4370 |
int i, j, size; |
2e72b6347 memcg: implement ... |
4371 4372 4373 |
mutex_lock(&memcg->thresholds_lock); if (type == _MEM) |
2c488db27 memcg: clean up m... |
4374 |
thresholds = &memcg->thresholds; |
2e72b6347 memcg: implement ... |
4375 |
else if (type == _MEMSWAP) |
2c488db27 memcg: clean up m... |
4376 |
thresholds = &memcg->memsw_thresholds; |
2e72b6347 memcg: implement ... |
4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 |
else BUG(); /* * Something went wrong if we trying to unregister a threshold * if we don't have thresholds */ BUG_ON(!thresholds); usage = mem_cgroup_usage(memcg, type == _MEMSWAP); /* Check if a threshold crossed before removing */ __mem_cgroup_threshold(memcg, type == _MEMSWAP); /* Calculate new number of threshold */ |
2c488db27 memcg: clean up m... |
4392 4393 4394 |
size = 0; for (i = 0; i < thresholds->primary->size; i++) { if (thresholds->primary->entries[i].eventfd != eventfd) |
2e72b6347 memcg: implement ... |
4395 4396 |
size++; } |
2c488db27 memcg: clean up m... |
4397 |
new = thresholds->spare; |
907860ed3 cgroups: make cft... |
4398 |
|
2e72b6347 memcg: implement ... |
4399 4400 |
/* Set thresholds array to NULL if we don't have thresholds */ if (!size) { |
2c488db27 memcg: clean up m... |
4401 4402 |
kfree(new); new = NULL; |
907860ed3 cgroups: make cft... |
4403 |
goto swap_buffers; |
2e72b6347 memcg: implement ... |
4404 |
} |
2c488db27 memcg: clean up m... |
4405 |
new->size = size; |
2e72b6347 memcg: implement ... |
4406 4407 |
/* Copy thresholds and find current threshold */ |
2c488db27 memcg: clean up m... |
4408 4409 4410 |
new->current_threshold = -1; for (i = 0, j = 0; i < thresholds->primary->size; i++) { if (thresholds->primary->entries[i].eventfd == eventfd) |
2e72b6347 memcg: implement ... |
4411 |
continue; |
2c488db27 memcg: clean up m... |
4412 4413 |
new->entries[j] = thresholds->primary->entries[i]; if (new->entries[j].threshold < usage) { |
2e72b6347 memcg: implement ... |
4414 |
/* |
2c488db27 memcg: clean up m... |
4415 |
* new->current_threshold will not be used |
2e72b6347 memcg: implement ... |
4416 4417 4418 |
* until rcu_assign_pointer(), so it's safe to increment * it here. */ |
2c488db27 memcg: clean up m... |
4419 |
++new->current_threshold; |
2e72b6347 memcg: implement ... |
4420 4421 4422 |
} j++; } |
907860ed3 cgroups: make cft... |
4423 |
swap_buffers: |
2c488db27 memcg: clean up m... |
4424 4425 4426 |
/* Swap primary and spare array */ thresholds->spare = thresholds->primary; rcu_assign_pointer(thresholds->primary, new); |
2e72b6347 memcg: implement ... |
4427 |
|
907860ed3 cgroups: make cft... |
4428 |
/* To be sure that nobody uses thresholds */ |
2e72b6347 memcg: implement ... |
4429 |
synchronize_rcu(); |
2e72b6347 memcg: implement ... |
4430 |
mutex_unlock(&memcg->thresholds_lock); |
2e72b6347 memcg: implement ... |
4431 |
} |
c1e862c1f memcg: new force_... |
4432 |
|
9490ff275 memcg: oom notifier |
4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 |
static int mem_cgroup_oom_register_event(struct cgroup *cgrp, struct cftype *cft, struct eventfd_ctx *eventfd, const char *args) { struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); struct mem_cgroup_eventfd_list *event; int type = MEMFILE_TYPE(cft->private); BUG_ON(type != _OOM_TYPE); event = kmalloc(sizeof(*event), GFP_KERNEL); if (!event) return -ENOMEM; |
1af8efe96 memcg: change mem... |
4444 |
spin_lock(&memcg_oom_lock); |
9490ff275 memcg: oom notifier |
4445 4446 4447 4448 4449 |
event->eventfd = eventfd; list_add(&event->list, &memcg->oom_notify); /* already in OOM ? */ |
79dfdaccd memcg: make oom_l... |
4450 |
if (atomic_read(&memcg->under_oom)) |
9490ff275 memcg: oom notifier |
4451 |
eventfd_signal(eventfd, 1); |
1af8efe96 memcg: change mem... |
4452 |
spin_unlock(&memcg_oom_lock); |
9490ff275 memcg: oom notifier |
4453 4454 4455 |
return 0; } |
907860ed3 cgroups: make cft... |
4456 |
static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp, |
9490ff275 memcg: oom notifier |
4457 4458 4459 4460 4461 4462 4463 |
struct cftype *cft, struct eventfd_ctx *eventfd) { struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp); struct mem_cgroup_eventfd_list *ev, *tmp; int type = MEMFILE_TYPE(cft->private); BUG_ON(type != _OOM_TYPE); |
1af8efe96 memcg: change mem... |
4464 |
spin_lock(&memcg_oom_lock); |
9490ff275 memcg: oom notifier |
4465 4466 4467 4468 4469 4470 4471 |
list_for_each_entry_safe(ev, tmp, &mem->oom_notify, list) { if (ev->eventfd == eventfd) { list_del(&ev->list); kfree(ev); } } |
1af8efe96 memcg: change mem... |
4472 |
spin_unlock(&memcg_oom_lock); |
9490ff275 memcg: oom notifier |
4473 |
} |
3c11ecf44 memcg: oom kill d... |
4474 4475 4476 4477 4478 4479 |
static int mem_cgroup_oom_control_read(struct cgroup *cgrp, struct cftype *cft, struct cgroup_map_cb *cb) { struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp); cb->fill(cb, "oom_kill_disable", mem->oom_kill_disable); |
79dfdaccd memcg: make oom_l... |
4480 |
if (atomic_read(&mem->under_oom)) |
3c11ecf44 memcg: oom kill d... |
4481 4482 4483 4484 4485 |
cb->fill(cb, "under_oom", 1); else cb->fill(cb, "under_oom", 0); return 0; } |
3c11ecf44 memcg: oom kill d... |
4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 |
static int mem_cgroup_oom_control_write(struct cgroup *cgrp, struct cftype *cft, u64 val) { struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp); struct mem_cgroup *parent; /* cannot set to root cgroup and only 0 and 1 are allowed */ if (!cgrp->parent || !((val == 0) || (val == 1))) return -EINVAL; parent = mem_cgroup_from_cont(cgrp->parent); cgroup_lock(); /* oom-kill-disable is a flag for subhierarchy. */ if ((parent->use_hierarchy) || (mem->use_hierarchy && !list_empty(&cgrp->children))) { cgroup_unlock(); return -EINVAL; } mem->oom_kill_disable = val; |
4d845ebf4 memcg: fix wake u... |
4506 4507 |
if (!val) memcg_oom_recover(mem); |
3c11ecf44 memcg: oom kill d... |
4508 4509 4510 |
cgroup_unlock(); return 0; } |
406eb0c9b memcg: add memory... |
4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 |
#ifdef CONFIG_NUMA static const struct file_operations mem_control_numa_stat_file_operations = { .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int mem_control_numa_stat_open(struct inode *unused, struct file *file) { struct cgroup *cont = file->f_dentry->d_parent->d_fsdata; file->f_op = &mem_control_numa_stat_file_operations; return single_open(file, mem_control_numa_stat_show, cont); } #endif /* CONFIG_NUMA */ |
82f9d486e memcg: add memory... |
4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 |
static int mem_cgroup_vmscan_stat_read(struct cgroup *cgrp, struct cftype *cft, struct cgroup_map_cb *cb) { struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp); char string[64]; int i; for (i = 0; i < NR_SCANSTATS; i++) { strcpy(string, scanstat_string[i]); strcat(string, SCANSTAT_WORD_LIMIT); cb->fill(cb, string, mem->scanstat.stats[SCAN_BY_LIMIT][i]); } for (i = 0; i < NR_SCANSTATS; i++) { strcpy(string, scanstat_string[i]); strcat(string, SCANSTAT_WORD_SYSTEM); cb->fill(cb, string, mem->scanstat.stats[SCAN_BY_SYSTEM][i]); } for (i = 0; i < NR_SCANSTATS; i++) { strcpy(string, scanstat_string[i]); strcat(string, SCANSTAT_WORD_LIMIT); strcat(string, SCANSTAT_WORD_HIERARCHY); cb->fill(cb, string, mem->scanstat.rootstats[SCAN_BY_LIMIT][i]); } for (i = 0; i < NR_SCANSTATS; i++) { strcpy(string, scanstat_string[i]); strcat(string, SCANSTAT_WORD_SYSTEM); strcat(string, SCANSTAT_WORD_HIERARCHY); cb->fill(cb, string, mem->scanstat.rootstats[SCAN_BY_SYSTEM][i]); } return 0; } static int mem_cgroup_reset_vmscan_stat(struct cgroup *cgrp, unsigned int event) { struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp); spin_lock(&mem->scanstat.lock); memset(&mem->scanstat.stats, 0, sizeof(mem->scanstat.stats)); memset(&mem->scanstat.rootstats, 0, sizeof(mem->scanstat.rootstats)); spin_unlock(&mem->scanstat.lock); return 0; } |
8cdea7c05 Memory controller... |
4572 4573 |
static struct cftype mem_cgroup_files[] = { { |
0eea10301 Memory controller... |
4574 |
.name = "usage_in_bytes", |
8c7c6e34a memcg: mem+swap c... |
4575 |
.private = MEMFILE_PRIVATE(_MEM, RES_USAGE), |
2c3daa722 CGroup API files:... |
4576 |
.read_u64 = mem_cgroup_read, |
9490ff275 memcg: oom notifier |
4577 4578 |
.register_event = mem_cgroup_usage_register_event, .unregister_event = mem_cgroup_usage_unregister_event, |
8cdea7c05 Memory controller... |
4579 4580 |
}, { |
c84872e16 memcgroup: add th... |
4581 |
.name = "max_usage_in_bytes", |
8c7c6e34a memcg: mem+swap c... |
4582 |
.private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE), |
29f2a4dac memcgroup: implem... |
4583 |
.trigger = mem_cgroup_reset, |
c84872e16 memcgroup: add th... |
4584 4585 4586 |
.read_u64 = mem_cgroup_read, }, { |
0eea10301 Memory controller... |
4587 |
.name = "limit_in_bytes", |
8c7c6e34a memcg: mem+swap c... |
4588 |
.private = MEMFILE_PRIVATE(_MEM, RES_LIMIT), |
856c13aa1 cgroup files: con... |
4589 |
.write_string = mem_cgroup_write, |
2c3daa722 CGroup API files:... |
4590 |
.read_u64 = mem_cgroup_read, |
8cdea7c05 Memory controller... |
4591 4592 |
}, { |
296c81d89 memory controller... |
4593 4594 4595 4596 4597 4598 |
.name = "soft_limit_in_bytes", .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT), .write_string = mem_cgroup_write, .read_u64 = mem_cgroup_read, }, { |
8cdea7c05 Memory controller... |
4599 |
.name = "failcnt", |
8c7c6e34a memcg: mem+swap c... |
4600 |
.private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT), |
29f2a4dac memcgroup: implem... |
4601 |
.trigger = mem_cgroup_reset, |
2c3daa722 CGroup API files:... |
4602 |
.read_u64 = mem_cgroup_read, |
8cdea7c05 Memory controller... |
4603 |
}, |
8697d3319 Memory controller... |
4604 |
{ |
d2ceb9b7d memory cgroup enh... |
4605 |
.name = "stat", |
c64745cf0 CGroup API files:... |
4606 |
.read_map = mem_control_stat_show, |
d2ceb9b7d memory cgroup enh... |
4607 |
}, |
c1e862c1f memcg: new force_... |
4608 4609 4610 4611 |
{ .name = "force_empty", .trigger = mem_cgroup_force_empty_write, }, |
18f59ea7d memcg: memory cgr... |
4612 4613 4614 4615 4616 |
{ .name = "use_hierarchy", .write_u64 = mem_cgroup_hierarchy_write, .read_u64 = mem_cgroup_hierarchy_read, }, |
a7885eb8a memcg: swappiness |
4617 4618 4619 4620 4621 |
{ .name = "swappiness", .read_u64 = mem_cgroup_swappiness_read, .write_u64 = mem_cgroup_swappiness_write, }, |
7dc74be03 memcg: add interf... |
4622 4623 4624 4625 4626 |
{ .name = "move_charge_at_immigrate", .read_u64 = mem_cgroup_move_charge_read, .write_u64 = mem_cgroup_move_charge_write, }, |
9490ff275 memcg: oom notifier |
4627 4628 |
{ .name = "oom_control", |
3c11ecf44 memcg: oom kill d... |
4629 4630 |
.read_map = mem_cgroup_oom_control_read, .write_u64 = mem_cgroup_oom_control_write, |
9490ff275 memcg: oom notifier |
4631 4632 4633 4634 |
.register_event = mem_cgroup_oom_register_event, .unregister_event = mem_cgroup_oom_unregister_event, .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL), }, |
406eb0c9b memcg: add memory... |
4635 4636 4637 4638 |
#ifdef CONFIG_NUMA { .name = "numa_stat", .open = mem_control_numa_stat_open, |
895771271 mm: memory.numa_s... |
4639 |
.mode = S_IRUGO, |
406eb0c9b memcg: add memory... |
4640 4641 |
}, #endif |
82f9d486e memcg: add memory... |
4642 4643 4644 4645 4646 |
{ .name = "vmscan_stat", .read_map = mem_cgroup_vmscan_stat_read, .trigger = mem_cgroup_reset_vmscan_stat, }, |
8cdea7c05 Memory controller... |
4647 |
}; |
8c7c6e34a memcg: mem+swap c... |
4648 4649 4650 4651 4652 4653 |
#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP static struct cftype memsw_cgroup_files[] = { { .name = "memsw.usage_in_bytes", .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE), .read_u64 = mem_cgroup_read, |
9490ff275 memcg: oom notifier |
4654 4655 |
.register_event = mem_cgroup_usage_register_event, .unregister_event = mem_cgroup_usage_unregister_event, |
8c7c6e34a memcg: mem+swap c... |
4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 |
}, { .name = "memsw.max_usage_in_bytes", .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE), .trigger = mem_cgroup_reset, .read_u64 = mem_cgroup_read, }, { .name = "memsw.limit_in_bytes", .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT), .write_string = mem_cgroup_write, .read_u64 = mem_cgroup_read, }, { .name = "memsw.failcnt", .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT), .trigger = mem_cgroup_reset, .read_u64 = mem_cgroup_read, }, }; static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss) { if (!do_swap_account) return 0; return cgroup_add_files(cont, ss, memsw_cgroup_files, ARRAY_SIZE(memsw_cgroup_files)); }; #else static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss) { return 0; } #endif |
6d12e2d8d per-zone and recl... |
4690 4691 4692 |
static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node) { struct mem_cgroup_per_node *pn; |
1ecaab2bd per-zone and recl... |
4693 |
struct mem_cgroup_per_zone *mz; |
b69408e88 vmscan: Use an in... |
4694 |
enum lru_list l; |
41e3355de memcg: fix node_s... |
4695 |
int zone, tmp = node; |
1ecaab2bd per-zone and recl... |
4696 4697 4698 4699 4700 4701 4702 4703 |
/* * This routine is called against possible nodes. * But it's BUG to call kmalloc() against offline node. * * TODO: this routine can waste much memory for nodes which will * never be onlined. It's better to use memory hotplug callback * function. */ |
41e3355de memcg: fix node_s... |
4704 4705 |
if (!node_state(node, N_NORMAL_MEMORY)) tmp = -1; |
17295c88a memcg: use [kv]za... |
4706 |
pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp); |
6d12e2d8d per-zone and recl... |
4707 4708 |
if (!pn) return 1; |
1ecaab2bd per-zone and recl... |
4709 |
|
6d12e2d8d per-zone and recl... |
4710 |
mem->info.nodeinfo[node] = pn; |
1ecaab2bd per-zone and recl... |
4711 4712 |
for (zone = 0; zone < MAX_NR_ZONES; zone++) { mz = &pn->zoneinfo[zone]; |
b69408e88 vmscan: Use an in... |
4713 4714 |
for_each_lru(l) INIT_LIST_HEAD(&mz->lists[l]); |
f64c3f549 memory controller... |
4715 |
mz->usage_in_excess = 0; |
4e4169535 memory controller... |
4716 4717 |
mz->on_tree = false; mz->mem = mem; |
1ecaab2bd per-zone and recl... |
4718 |
} |
6d12e2d8d per-zone and recl... |
4719 4720 |
return 0; } |
1ecaab2bd per-zone and recl... |
4721 4722 4723 4724 |
static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node) { kfree(mem->info.nodeinfo[node]); } |
333279487 memcgroup: use vm... |
4725 4726 4727 |
static struct mem_cgroup *mem_cgroup_alloc(void) { struct mem_cgroup *mem; |
c62b1a3b3 memcg: use generi... |
4728 |
int size = sizeof(struct mem_cgroup); |
333279487 memcgroup: use vm... |
4729 |
|
c62b1a3b3 memcg: use generi... |
4730 |
/* Can be very big if MAX_NUMNODES is very big */ |
c8dad2bb6 memcg: reduce siz... |
4731 |
if (size < PAGE_SIZE) |
17295c88a memcg: use [kv]za... |
4732 |
mem = kzalloc(size, GFP_KERNEL); |
333279487 memcgroup: use vm... |
4733 |
else |
17295c88a memcg: use [kv]za... |
4734 |
mem = vzalloc(size); |
333279487 memcgroup: use vm... |
4735 |
|
e7bbcdf37 memcontrol: fix p... |
4736 4737 |
if (!mem) return NULL; |
c62b1a3b3 memcg: use generi... |
4738 |
mem->stat = alloc_percpu(struct mem_cgroup_stat_cpu); |
d2e61b8dc memcg: null deref... |
4739 4740 |
if (!mem->stat) goto out_free; |
711d3d2c9 memcg: cpu hotplu... |
4741 |
spin_lock_init(&mem->pcp_counter_lock); |
333279487 memcgroup: use vm... |
4742 |
return mem; |
d2e61b8dc memcg: null deref... |
4743 4744 4745 4746 4747 4748 4749 |
out_free: if (size < PAGE_SIZE) kfree(mem); else vfree(mem); return NULL; |
333279487 memcgroup: use vm... |
4750 |
} |
8c7c6e34a memcg: mem+swap c... |
4751 4752 4753 4754 4755 4756 4757 4758 |
/* * At destroying mem_cgroup, references from swap_cgroup can remain. * (scanning all at force_empty is too costly...) * * Instead of clearing all references at force_empty, we remember * the number of reference from swap_cgroup and free mem_cgroup when * it goes down to 0. * |
8c7c6e34a memcg: mem+swap c... |
4759 4760 |
* Removal of cgroup itself succeeds regardless of refs from swap. */ |
a7ba0eef3 memcg: fix double... |
4761 |
static void __mem_cgroup_free(struct mem_cgroup *mem) |
333279487 memcgroup: use vm... |
4762 |
{ |
08e552c69 memcg: synchroniz... |
4763 |
int node; |
f64c3f549 memory controller... |
4764 |
mem_cgroup_remove_from_trees(mem); |
04046e1a0 memcg: use CSS ID |
4765 |
free_css_id(&mem_cgroup_subsys, &mem->css); |
08e552c69 memcg: synchroniz... |
4766 4767 |
for_each_node_state(node, N_POSSIBLE) free_mem_cgroup_per_zone_info(mem, node); |
c62b1a3b3 memcg: use generi... |
4768 4769 |
free_percpu(mem->stat); if (sizeof(struct mem_cgroup) < PAGE_SIZE) |
333279487 memcgroup: use vm... |
4770 4771 4772 4773 |
kfree(mem); else vfree(mem); } |
8c7c6e34a memcg: mem+swap c... |
4774 4775 4776 4777 |
static void mem_cgroup_get(struct mem_cgroup *mem) { atomic_inc(&mem->refcnt); } |
483c30b51 memcg: improve pe... |
4778 |
static void __mem_cgroup_put(struct mem_cgroup *mem, int count) |
8c7c6e34a memcg: mem+swap c... |
4779 |
{ |
483c30b51 memcg: improve pe... |
4780 |
if (atomic_sub_and_test(count, &mem->refcnt)) { |
7bcc1bb12 memcg: get/put pa... |
4781 |
struct mem_cgroup *parent = parent_mem_cgroup(mem); |
a7ba0eef3 memcg: fix double... |
4782 |
__mem_cgroup_free(mem); |
7bcc1bb12 memcg: get/put pa... |
4783 4784 4785 |
if (parent) mem_cgroup_put(parent); } |
8c7c6e34a memcg: mem+swap c... |
4786 |
} |
483c30b51 memcg: improve pe... |
4787 4788 4789 4790 |
static void mem_cgroup_put(struct mem_cgroup *mem) { __mem_cgroup_put(mem, 1); } |
7bcc1bb12 memcg: get/put pa... |
4791 4792 4793 4794 4795 4796 4797 4798 4799 |
/* * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled. */ static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem) { if (!mem->res.parent) return NULL; return mem_cgroup_from_res_counter(mem->res.parent, res); } |
333279487 memcgroup: use vm... |
4800 |
|
c077719be memcg: mem+swap c... |
4801 4802 4803 |
#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP static void __init enable_swap_cgroup(void) { |
f8d665422 memcg: add mem_cg... |
4804 |
if (!mem_cgroup_disabled() && really_do_swap_account) |
c077719be memcg: mem+swap c... |
4805 4806 4807 4808 4809 4810 4811 |
do_swap_account = 1; } #else static void __init enable_swap_cgroup(void) { } #endif |
f64c3f549 memory controller... |
4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 |
static int mem_cgroup_soft_limit_tree_init(void) { struct mem_cgroup_tree_per_node *rtpn; struct mem_cgroup_tree_per_zone *rtpz; int tmp, node, zone; for_each_node_state(node, N_POSSIBLE) { tmp = node; if (!node_state(node, N_NORMAL_MEMORY)) tmp = -1; rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp); if (!rtpn) return 1; soft_limit_tree.rb_tree_per_node[node] = rtpn; for (zone = 0; zone < MAX_NR_ZONES; zone++) { rtpz = &rtpn->rb_tree_per_zone[zone]; rtpz->rb_root = RB_ROOT; spin_lock_init(&rtpz->lock); } } return 0; } |
0eb253e22 memcg: fix sectio... |
4836 |
static struct cgroup_subsys_state * __ref |
8cdea7c05 Memory controller... |
4837 4838 |
mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) { |
28dbc4b6a memcg: memory cgr... |
4839 |
struct mem_cgroup *mem, *parent; |
04046e1a0 memcg: use CSS ID |
4840 |
long error = -ENOMEM; |
6d12e2d8d per-zone and recl... |
4841 |
int node; |
8cdea7c05 Memory controller... |
4842 |
|
c8dad2bb6 memcg: reduce siz... |
4843 4844 |
mem = mem_cgroup_alloc(); if (!mem) |
04046e1a0 memcg: use CSS ID |
4845 |
return ERR_PTR(error); |
78fb74669 Memory controller... |
4846 |
|
6d12e2d8d per-zone and recl... |
4847 4848 4849 |
for_each_node_state(node, N_POSSIBLE) if (alloc_mem_cgroup_per_zone_info(mem, node)) goto free_out; |
f64c3f549 memory controller... |
4850 |
|
c077719be memcg: mem+swap c... |
4851 |
/* root ? */ |
28dbc4b6a memcg: memory cgr... |
4852 |
if (cont->parent == NULL) { |
cdec2e426 memcg: coalesce c... |
4853 |
int cpu; |
c077719be memcg: mem+swap c... |
4854 |
enable_swap_cgroup(); |
28dbc4b6a memcg: memory cgr... |
4855 |
parent = NULL; |
4b3bde4c9 memcg: remove the... |
4856 |
root_mem_cgroup = mem; |
f64c3f549 memory controller... |
4857 4858 |
if (mem_cgroup_soft_limit_tree_init()) goto free_out; |
cdec2e426 memcg: coalesce c... |
4859 4860 4861 4862 4863 |
for_each_possible_cpu(cpu) { struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); INIT_WORK(&stock->work, drain_local_stock); } |
711d3d2c9 memcg: cpu hotplu... |
4864 |
hotcpu_notifier(memcg_cpu_hotplug_callback, 0); |
18f59ea7d memcg: memory cgr... |
4865 |
} else { |
28dbc4b6a memcg: memory cgr... |
4866 |
parent = mem_cgroup_from_cont(cont->parent); |
18f59ea7d memcg: memory cgr... |
4867 |
mem->use_hierarchy = parent->use_hierarchy; |
3c11ecf44 memcg: oom kill d... |
4868 |
mem->oom_kill_disable = parent->oom_kill_disable; |
18f59ea7d memcg: memory cgr... |
4869 |
} |
28dbc4b6a memcg: memory cgr... |
4870 |
|
18f59ea7d memcg: memory cgr... |
4871 4872 4873 |
if (parent && parent->use_hierarchy) { res_counter_init(&mem->res, &parent->res); res_counter_init(&mem->memsw, &parent->memsw); |
7bcc1bb12 memcg: get/put pa... |
4874 4875 4876 4877 4878 4879 4880 |
/* * We increment refcnt of the parent to ensure that we can * safely access it on res_counter_charge/uncharge. * This refcnt will be decremented when freeing this * mem_cgroup(see mem_cgroup_put). */ mem_cgroup_get(parent); |
18f59ea7d memcg: memory cgr... |
4881 4882 4883 4884 |
} else { res_counter_init(&mem->res, NULL); res_counter_init(&mem->memsw, NULL); } |
04046e1a0 memcg: use CSS ID |
4885 |
mem->last_scanned_child = 0; |
889976dbc memcg: reclaim me... |
4886 |
mem->last_scanned_node = MAX_NUMNODES; |
9490ff275 memcg: oom notifier |
4887 |
INIT_LIST_HEAD(&mem->oom_notify); |
6d61ef409 memcg: memory cgr... |
4888 |
|
a7885eb8a memcg: swappiness |
4889 |
if (parent) |
1f4c025b5 memcg: export mem... |
4890 |
mem->swappiness = mem_cgroup_swappiness(parent); |
a7ba0eef3 memcg: fix double... |
4891 |
atomic_set(&mem->refcnt, 1); |
7dc74be03 memcg: add interf... |
4892 |
mem->move_charge_at_immigrate = 0; |
2e72b6347 memcg: implement ... |
4893 |
mutex_init(&mem->thresholds_lock); |
82f9d486e memcg: add memory... |
4894 |
spin_lock_init(&mem->scanstat.lock); |
8cdea7c05 Memory controller... |
4895 |
return &mem->css; |
6d12e2d8d per-zone and recl... |
4896 |
free_out: |
a7ba0eef3 memcg: fix double... |
4897 |
__mem_cgroup_free(mem); |
4b3bde4c9 memcg: remove the... |
4898 |
root_mem_cgroup = NULL; |
04046e1a0 memcg: use CSS ID |
4899 |
return ERR_PTR(error); |
8cdea7c05 Memory controller... |
4900 |
} |
ec64f5154 cgroup: fix frequ... |
4901 |
static int mem_cgroup_pre_destroy(struct cgroup_subsys *ss, |
df878fb04 memory cgroup enh... |
4902 4903 4904 |
struct cgroup *cont) { struct mem_cgroup *mem = mem_cgroup_from_cont(cont); |
ec64f5154 cgroup: fix frequ... |
4905 4906 |
return mem_cgroup_force_empty(mem, false); |
df878fb04 memory cgroup enh... |
4907 |
} |
8cdea7c05 Memory controller... |
4908 4909 4910 |
static void mem_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cont) { |
c268e9946 memcg: fix hierar... |
4911 |
struct mem_cgroup *mem = mem_cgroup_from_cont(cont); |
c268e9946 memcg: fix hierar... |
4912 |
|
c268e9946 memcg: fix hierar... |
4913 |
mem_cgroup_put(mem); |
8cdea7c05 Memory controller... |
4914 4915 4916 4917 4918 |
} static int mem_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont) { |
8c7c6e34a memcg: mem+swap c... |
4919 4920 4921 4922 4923 4924 4925 4926 |
int ret; ret = cgroup_add_files(cont, ss, mem_cgroup_files, ARRAY_SIZE(mem_cgroup_files)); if (!ret) ret = register_memsw_files(cont, ss); return ret; |
8cdea7c05 Memory controller... |
4927 |
} |
024914477 memcg: move charg... |
4928 |
#ifdef CONFIG_MMU |
7dc74be03 memcg: add interf... |
4929 |
/* Handlers for move charge at task migration. */ |
854ffa8d1 memcg: improve pe... |
4930 4931 |
#define PRECHARGE_COUNT_AT_ONCE 256 static int mem_cgroup_do_precharge(unsigned long count) |
7dc74be03 memcg: add interf... |
4932 |
{ |
854ffa8d1 memcg: improve pe... |
4933 4934 |
int ret = 0; int batch_count = PRECHARGE_COUNT_AT_ONCE; |
4ffef5fef memcg: move charg... |
4935 |
struct mem_cgroup *mem = mc.to; |
854ffa8d1 memcg: improve pe... |
4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 |
if (mem_cgroup_is_root(mem)) { mc.precharge += count; /* we don't need css_get for root */ return ret; } /* try to charge at once */ if (count > 1) { struct res_counter *dummy; /* * "mem" cannot be under rmdir() because we've already checked * by cgroup_lock_live_cgroup() that it is not removed and we * are still under the same cgroup_mutex. So we can postpone * css_get(). */ if (res_counter_charge(&mem->res, PAGE_SIZE * count, &dummy)) goto one_by_one; if (do_swap_account && res_counter_charge(&mem->memsw, PAGE_SIZE * count, &dummy)) { res_counter_uncharge(&mem->res, PAGE_SIZE * count); goto one_by_one; } mc.precharge += count; |
854ffa8d1 memcg: improve pe... |
4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 |
return ret; } one_by_one: /* fall back to one by one charge */ while (count--) { if (signal_pending(current)) { ret = -EINTR; break; } if (!batch_count--) { batch_count = PRECHARGE_COUNT_AT_ONCE; cond_resched(); } |
7ec99d621 memcg: unify char... |
4971 |
ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, 1, &mem, false); |
854ffa8d1 memcg: improve pe... |
4972 4973 4974 4975 4976 |
if (ret || !mem) /* mem_cgroup_clear_mc() will do uncharge later */ return -ENOMEM; mc.precharge++; } |
4ffef5fef memcg: move charg... |
4977 4978 4979 4980 4981 4982 4983 4984 |
return ret; } /** * is_target_pte_for_mc - check a pte whether it is valid for move charge * @vma: the vma the pte to be checked belongs * @addr: the address corresponding to the pte to be checked * @ptent: the pte to be checked |
024914477 memcg: move charg... |
4985 |
* @target: the pointer the target page or swap ent will be stored(can be NULL) |
4ffef5fef memcg: move charg... |
4986 4987 4988 4989 4990 4991 |
* * Returns * 0(MC_TARGET_NONE): if the pte is not a target for move charge. * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for * move charge. if @target is not NULL, the page is stored in target->page * with extra refcnt got(Callers should handle it). |
024914477 memcg: move charg... |
4992 4993 4994 |
* 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a * target for charge migration. if @target is not NULL, the entry is stored * in target->ent. |
4ffef5fef memcg: move charg... |
4995 4996 4997 |
* * Called with pte lock held. */ |
4ffef5fef memcg: move charg... |
4998 4999 |
union mc_target { struct page *page; |
024914477 memcg: move charg... |
5000 |
swp_entry_t ent; |
4ffef5fef memcg: move charg... |
5001 |
}; |
4ffef5fef memcg: move charg... |
5002 5003 5004 |
enum mc_target_type { MC_TARGET_NONE, /* not used */ MC_TARGET_PAGE, |
024914477 memcg: move charg... |
5005 |
MC_TARGET_SWAP, |
4ffef5fef memcg: move charg... |
5006 |
}; |
90254a658 memcg: clean up m... |
5007 5008 |
static struct page *mc_handle_present_pte(struct vm_area_struct *vma, unsigned long addr, pte_t ptent) |
4ffef5fef memcg: move charg... |
5009 |
{ |
90254a658 memcg: clean up m... |
5010 |
struct page *page = vm_normal_page(vma, addr, ptent); |
4ffef5fef memcg: move charg... |
5011 |
|
90254a658 memcg: clean up m... |
5012 5013 5014 5015 5016 5017 |
if (!page || !page_mapped(page)) return NULL; if (PageAnon(page)) { /* we don't move shared anon */ if (!move_anon() || page_mapcount(page) > 2) return NULL; |
87946a722 memcg: move charg... |
5018 5019 |
} else if (!move_file()) /* we ignore mapcount for file pages */ |
90254a658 memcg: clean up m... |
5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 |
return NULL; if (!get_page_unless_zero(page)) return NULL; return page; } static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, unsigned long addr, pte_t ptent, swp_entry_t *entry) { int usage_count; struct page *page = NULL; swp_entry_t ent = pte_to_swp_entry(ptent); if (!move_anon() || non_swap_entry(ent)) return NULL; usage_count = mem_cgroup_count_swap_user(ent, &page); if (usage_count > 1) { /* we don't move shared anon */ |
024914477 memcg: move charg... |
5038 5039 |
if (page) put_page(page); |
90254a658 memcg: clean up m... |
5040 |
return NULL; |
024914477 memcg: move charg... |
5041 |
} |
90254a658 memcg: clean up m... |
5042 5043 5044 5045 5046 |
if (do_swap_account) entry->val = ent.val; return page; } |
87946a722 memcg: move charg... |
5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 |
static struct page *mc_handle_file_pte(struct vm_area_struct *vma, unsigned long addr, pte_t ptent, swp_entry_t *entry) { struct page *page = NULL; struct inode *inode; struct address_space *mapping; pgoff_t pgoff; if (!vma->vm_file) /* anonymous vma */ return NULL; if (!move_file()) return NULL; inode = vma->vm_file->f_path.dentry->d_inode; mapping = vma->vm_file->f_mapping; if (pte_none(ptent)) pgoff = linear_page_index(vma, addr); else /* pte_file(ptent) is true */ pgoff = pte_to_pgoff(ptent); /* page is moved even if it's not RSS of this task(page-faulted). */ |
aa3b18955 tmpfs: convert me... |
5068 5069 5070 5071 5072 5073 |
page = find_get_page(mapping, pgoff); #ifdef CONFIG_SWAP /* shmem/tmpfs may report page out on swap: account for that too. */ if (radix_tree_exceptional_entry(page)) { swp_entry_t swap = radix_to_swp_entry(page); |
87946a722 memcg: move charg... |
5074 |
if (do_swap_account) |
aa3b18955 tmpfs: convert me... |
5075 5076 |
*entry = swap; page = find_get_page(&swapper_space, swap.val); |
87946a722 memcg: move charg... |
5077 |
} |
aa3b18955 tmpfs: convert me... |
5078 |
#endif |
87946a722 memcg: move charg... |
5079 5080 |
return page; } |
90254a658 memcg: clean up m... |
5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 |
static int is_target_pte_for_mc(struct vm_area_struct *vma, unsigned long addr, pte_t ptent, union mc_target *target) { struct page *page = NULL; struct page_cgroup *pc; int ret = 0; swp_entry_t ent = { .val = 0 }; if (pte_present(ptent)) page = mc_handle_present_pte(vma, addr, ptent); else if (is_swap_pte(ptent)) page = mc_handle_swap_pte(vma, addr, ptent, &ent); |
87946a722 memcg: move charg... |
5093 5094 |
else if (pte_none(ptent) || pte_file(ptent)) page = mc_handle_file_pte(vma, addr, ptent, &ent); |
90254a658 memcg: clean up m... |
5095 5096 5097 |
if (!page && !ent.val) return 0; |
024914477 memcg: move charg... |
5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 |
if (page) { pc = lookup_page_cgroup(page); /* * Do only loose check w/o page_cgroup lock. * mem_cgroup_move_account() checks the pc is valid or not under * the lock. */ if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) { ret = MC_TARGET_PAGE; if (target) target->page = page; } if (!ret || !target) put_page(page); } |
90254a658 memcg: clean up m... |
5113 5114 |
/* There is a swap entry and a page doesn't exist or isn't charged */ if (ent.val && !ret && |
7f0f15464 memcg: fix css_id... |
5115 5116 5117 5118 |
css_id(&mc.from->css) == lookup_swap_cgroup(ent)) { ret = MC_TARGET_SWAP; if (target) target->ent = ent; |
4ffef5fef memcg: move charg... |
5119 |
} |
4ffef5fef memcg: move charg... |
5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 |
return ret; } static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) { struct vm_area_struct *vma = walk->private; pte_t *pte; spinlock_t *ptl; |
033193275 pagewalk: only sp... |
5130 |
split_huge_page_pmd(walk->mm, pmd); |
4ffef5fef memcg: move charg... |
5131 5132 5133 5134 5135 5136 |
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); for (; addr != end; pte++, addr += PAGE_SIZE) if (is_target_pte_for_mc(vma, addr, *pte, NULL)) mc.precharge++; /* increment precharge temporarily */ pte_unmap_unlock(pte - 1, ptl); cond_resched(); |
7dc74be03 memcg: add interf... |
5137 5138 |
return 0; } |
4ffef5fef memcg: move charg... |
5139 5140 5141 5142 |
static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm) { unsigned long precharge; struct vm_area_struct *vma; |
dfe076b09 memcg: fix deadlo... |
5143 |
down_read(&mm->mmap_sem); |
4ffef5fef memcg: move charg... |
5144 5145 5146 5147 5148 5149 5150 5151 |
for (vma = mm->mmap; vma; vma = vma->vm_next) { struct mm_walk mem_cgroup_count_precharge_walk = { .pmd_entry = mem_cgroup_count_precharge_pte_range, .mm = mm, .private = vma, }; if (is_vm_hugetlb_page(vma)) continue; |
4ffef5fef memcg: move charg... |
5152 5153 5154 |
walk_page_range(vma->vm_start, vma->vm_end, &mem_cgroup_count_precharge_walk); } |
dfe076b09 memcg: fix deadlo... |
5155 |
up_read(&mm->mmap_sem); |
4ffef5fef memcg: move charg... |
5156 5157 5158 5159 5160 5161 |
precharge = mc.precharge; mc.precharge = 0; return precharge; } |
4ffef5fef memcg: move charg... |
5162 5163 |
static int mem_cgroup_precharge_mc(struct mm_struct *mm) { |
dfe076b09 memcg: fix deadlo... |
5164 5165 5166 5167 5168 |
unsigned long precharge = mem_cgroup_count_precharge(mm); VM_BUG_ON(mc.moving_task); mc.moving_task = current; return mem_cgroup_do_precharge(precharge); |
4ffef5fef memcg: move charg... |
5169 |
} |
dfe076b09 memcg: fix deadlo... |
5170 5171 |
/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */ static void __mem_cgroup_clear_mc(void) |
4ffef5fef memcg: move charg... |
5172 |
{ |
2bd9bb206 memcg: clean up w... |
5173 5174 |
struct mem_cgroup *from = mc.from; struct mem_cgroup *to = mc.to; |
4ffef5fef memcg: move charg... |
5175 |
/* we must uncharge all the leftover precharges from mc.to */ |
854ffa8d1 memcg: improve pe... |
5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 |
if (mc.precharge) { __mem_cgroup_cancel_charge(mc.to, mc.precharge); mc.precharge = 0; } /* * we didn't uncharge from mc.from at mem_cgroup_move_account(), so * we must uncharge here. */ if (mc.moved_charge) { __mem_cgroup_cancel_charge(mc.from, mc.moved_charge); mc.moved_charge = 0; |
4ffef5fef memcg: move charg... |
5187 |
} |
483c30b51 memcg: improve pe... |
5188 5189 |
/* we must fixup refcnts and charges */ if (mc.moved_swap) { |
483c30b51 memcg: improve pe... |
5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 |
/* uncharge swap account from the old cgroup */ if (!mem_cgroup_is_root(mc.from)) res_counter_uncharge(&mc.from->memsw, PAGE_SIZE * mc.moved_swap); __mem_cgroup_put(mc.from, mc.moved_swap); if (!mem_cgroup_is_root(mc.to)) { /* * we charged both to->res and to->memsw, so we should * uncharge to->res. */ res_counter_uncharge(&mc.to->res, PAGE_SIZE * mc.moved_swap); |
483c30b51 memcg: improve pe... |
5203 5204 |
} /* we've already done mem_cgroup_get(mc.to) */ |
483c30b51 memcg: improve pe... |
5205 5206 |
mc.moved_swap = 0; } |
dfe076b09 memcg: fix deadlo... |
5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 |
memcg_oom_recover(from); memcg_oom_recover(to); wake_up_all(&mc.waitq); } static void mem_cgroup_clear_mc(void) { struct mem_cgroup *from = mc.from; /* * we must clear moving_task before waking up waiters at the end of * task migration. */ mc.moving_task = NULL; __mem_cgroup_clear_mc(); |
2bd9bb206 memcg: clean up w... |
5222 |
spin_lock(&mc.lock); |
4ffef5fef memcg: move charg... |
5223 5224 |
mc.from = NULL; mc.to = NULL; |
2bd9bb206 memcg: clean up w... |
5225 |
spin_unlock(&mc.lock); |
32047e2a8 memcg: avoid lock... |
5226 |
mem_cgroup_end_move(from); |
4ffef5fef memcg: move charg... |
5227 |
} |
7dc74be03 memcg: add interf... |
5228 5229 |
static int mem_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgroup, |
f780bdb7c cgroups: add per-... |
5230 |
struct task_struct *p) |
7dc74be03 memcg: add interf... |
5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 |
{ int ret = 0; struct mem_cgroup *mem = mem_cgroup_from_cont(cgroup); if (mem->move_charge_at_immigrate) { struct mm_struct *mm; struct mem_cgroup *from = mem_cgroup_from_task(p); VM_BUG_ON(from == mem); mm = get_task_mm(p); if (!mm) return 0; |
7dc74be03 memcg: add interf... |
5244 |
/* We move charges only when we move a owner of the mm */ |
4ffef5fef memcg: move charg... |
5245 5246 5247 5248 |
if (mm->owner == p) { VM_BUG_ON(mc.from); VM_BUG_ON(mc.to); VM_BUG_ON(mc.precharge); |
854ffa8d1 memcg: improve pe... |
5249 |
VM_BUG_ON(mc.moved_charge); |
483c30b51 memcg: improve pe... |
5250 |
VM_BUG_ON(mc.moved_swap); |
32047e2a8 memcg: avoid lock... |
5251 |
mem_cgroup_start_move(from); |
2bd9bb206 memcg: clean up w... |
5252 |
spin_lock(&mc.lock); |
4ffef5fef memcg: move charg... |
5253 5254 |
mc.from = from; mc.to = mem; |
2bd9bb206 memcg: clean up w... |
5255 |
spin_unlock(&mc.lock); |
dfe076b09 memcg: fix deadlo... |
5256 |
/* We set mc.moving_task later */ |
4ffef5fef memcg: move charg... |
5257 5258 5259 5260 |
ret = mem_cgroup_precharge_mc(mm); if (ret) mem_cgroup_clear_mc(); |
dfe076b09 memcg: fix deadlo... |
5261 5262 |
} mmput(mm); |
7dc74be03 memcg: add interf... |
5263 5264 5265 5266 5267 5268 |
} return ret; } static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss, struct cgroup *cgroup, |
f780bdb7c cgroups: add per-... |
5269 |
struct task_struct *p) |
7dc74be03 memcg: add interf... |
5270 |
{ |
4ffef5fef memcg: move charg... |
5271 |
mem_cgroup_clear_mc(); |
7dc74be03 memcg: add interf... |
5272 |
} |
4ffef5fef memcg: move charg... |
5273 5274 5275 |
static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) |
7dc74be03 memcg: add interf... |
5276 |
{ |
4ffef5fef memcg: move charg... |
5277 5278 5279 5280 |
int ret = 0; struct vm_area_struct *vma = walk->private; pte_t *pte; spinlock_t *ptl; |
033193275 pagewalk: only sp... |
5281 |
split_huge_page_pmd(walk->mm, pmd); |
4ffef5fef memcg: move charg... |
5282 5283 5284 5285 5286 5287 5288 5289 |
retry: pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); for (; addr != end; addr += PAGE_SIZE) { pte_t ptent = *(pte++); union mc_target target; int type; struct page *page; struct page_cgroup *pc; |
024914477 memcg: move charg... |
5290 |
swp_entry_t ent; |
4ffef5fef memcg: move charg... |
5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 |
if (!mc.precharge) break; type = is_target_pte_for_mc(vma, addr, ptent, &target); switch (type) { case MC_TARGET_PAGE: page = target.page; if (isolate_lru_page(page)) goto put; pc = lookup_page_cgroup(page); |
7ec99d621 memcg: unify char... |
5302 5303 |
if (!mem_cgroup_move_account(page, 1, pc, mc.from, mc.to, false)) { |
4ffef5fef memcg: move charg... |
5304 |
mc.precharge--; |
854ffa8d1 memcg: improve pe... |
5305 5306 |
/* we uncharge from mc.from later. */ mc.moved_charge++; |
4ffef5fef memcg: move charg... |
5307 5308 5309 5310 5311 |
} putback_lru_page(page); put: /* is_target_pte_for_mc() gets the page */ put_page(page); break; |
024914477 memcg: move charg... |
5312 5313 |
case MC_TARGET_SWAP: ent = target.ent; |
483c30b51 memcg: improve pe... |
5314 5315 |
if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to, false)) { |
024914477 memcg: move charg... |
5316 |
mc.precharge--; |
483c30b51 memcg: improve pe... |
5317 5318 5319 |
/* we fixup refcnts and charges later. */ mc.moved_swap++; } |
024914477 memcg: move charg... |
5320 |
break; |
4ffef5fef memcg: move charg... |
5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 |
default: break; } } pte_unmap_unlock(pte - 1, ptl); cond_resched(); if (addr != end) { /* * We have consumed all precharges we got in can_attach(). * We try charge one by one, but don't do any additional * charges to mc.to if we have failed in charge once in attach() * phase. */ |
854ffa8d1 memcg: improve pe... |
5335 |
ret = mem_cgroup_do_precharge(1); |
4ffef5fef memcg: move charg... |
5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 |
if (!ret) goto retry; } return ret; } static void mem_cgroup_move_charge(struct mm_struct *mm) { struct vm_area_struct *vma; lru_add_drain_all(); |
dfe076b09 memcg: fix deadlo... |
5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 |
retry: if (unlikely(!down_read_trylock(&mm->mmap_sem))) { /* * Someone who are holding the mmap_sem might be waiting in * waitq. So we cancel all extra charges, wake up all waiters, * and retry. Because we cancel precharges, we might not be able * to move enough charges, but moving charge is a best-effort * feature anyway, so it wouldn't be a big problem. */ __mem_cgroup_clear_mc(); cond_resched(); goto retry; } |
4ffef5fef memcg: move charg... |
5361 5362 5363 5364 5365 5366 5367 5368 5369 |
for (vma = mm->mmap; vma; vma = vma->vm_next) { int ret; struct mm_walk mem_cgroup_move_charge_walk = { .pmd_entry = mem_cgroup_move_charge_pte_range, .mm = mm, .private = vma, }; if (is_vm_hugetlb_page(vma)) continue; |
4ffef5fef memcg: move charg... |
5370 5371 5372 5373 5374 5375 5376 5377 5378 |
ret = walk_page_range(vma->vm_start, vma->vm_end, &mem_cgroup_move_charge_walk); if (ret) /* * means we have consumed all precharges and failed in * doing additional charge. Just abandon here. */ break; } |
dfe076b09 memcg: fix deadlo... |
5379 |
up_read(&mm->mmap_sem); |
7dc74be03 memcg: add interf... |
5380 |
} |
67e465a77 Memory controller... |
5381 5382 5383 |
static void mem_cgroup_move_task(struct cgroup_subsys *ss, struct cgroup *cont, struct cgroup *old_cont, |
f780bdb7c cgroups: add per-... |
5384 |
struct task_struct *p) |
67e465a77 Memory controller... |
5385 |
{ |
a433658c3 vmscan,memcg: mem... |
5386 |
struct mm_struct *mm = get_task_mm(p); |
dfe076b09 memcg: fix deadlo... |
5387 |
|
dfe076b09 memcg: fix deadlo... |
5388 |
if (mm) { |
a433658c3 vmscan,memcg: mem... |
5389 5390 5391 |
if (mc.to) mem_cgroup_move_charge(mm); put_swap_token(mm); |
dfe076b09 memcg: fix deadlo... |
5392 5393 |
mmput(mm); } |
a433658c3 vmscan,memcg: mem... |
5394 5395 |
if (mc.to) mem_cgroup_clear_mc(); |
67e465a77 Memory controller... |
5396 |
} |
5cfb80a73 memcg: disable mo... |
5397 5398 5399 |
#else /* !CONFIG_MMU */ static int mem_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgroup, |
f780bdb7c cgroups: add per-... |
5400 |
struct task_struct *p) |
5cfb80a73 memcg: disable mo... |
5401 5402 5403 5404 5405 |
{ return 0; } static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss, struct cgroup *cgroup, |
f780bdb7c cgroups: add per-... |
5406 |
struct task_struct *p) |
5cfb80a73 memcg: disable mo... |
5407 5408 5409 5410 5411 |
{ } static void mem_cgroup_move_task(struct cgroup_subsys *ss, struct cgroup *cont, struct cgroup *old_cont, |
f780bdb7c cgroups: add per-... |
5412 |
struct task_struct *p) |
5cfb80a73 memcg: disable mo... |
5413 5414 5415 |
{ } #endif |
67e465a77 Memory controller... |
5416 |
|
8cdea7c05 Memory controller... |
5417 5418 5419 5420 |
struct cgroup_subsys mem_cgroup_subsys = { .name = "memory", .subsys_id = mem_cgroup_subsys_id, .create = mem_cgroup_create, |
df878fb04 memory cgroup enh... |
5421 |
.pre_destroy = mem_cgroup_pre_destroy, |
8cdea7c05 Memory controller... |
5422 5423 |
.destroy = mem_cgroup_destroy, .populate = mem_cgroup_populate, |
7dc74be03 memcg: add interf... |
5424 5425 |
.can_attach = mem_cgroup_can_attach, .cancel_attach = mem_cgroup_cancel_attach, |
67e465a77 Memory controller... |
5426 |
.attach = mem_cgroup_move_task, |
6d12e2d8d per-zone and recl... |
5427 |
.early_init = 0, |
04046e1a0 memcg: use CSS ID |
5428 |
.use_id = 1, |
8cdea7c05 Memory controller... |
5429 |
}; |
c077719be memcg: mem+swap c... |
5430 5431 |
#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP |
a42c390cf cgroups: make swa... |
5432 5433 5434 |
static int __init enable_swap_account(char *s) { /* consider enabled if no parameter or 1 is given */ |
a2c8990ae memsw: remove nos... |
5435 |
if (!strcmp(s, "1")) |
a42c390cf cgroups: make swa... |
5436 |
really_do_swap_account = 1; |
a2c8990ae memsw: remove nos... |
5437 |
else if (!strcmp(s, "0")) |
a42c390cf cgroups: make swa... |
5438 5439 5440 |
really_do_swap_account = 0; return 1; } |
a2c8990ae memsw: remove nos... |
5441 |
__setup("swapaccount=", enable_swap_account); |
c077719be memcg: mem+swap c... |
5442 |
|
c077719be memcg: mem+swap c... |
5443 |
#endif |