Blame view
include/linux/memcontrol.h
15.4 KB
8cdea7c05
|
1 2 3 4 5 |
/* memcontrol.h - Memory Controller * * Copyright IBM Corporation, 2007 * Author Balbir Singh <balbir@linux.vnet.ibm.com> * |
78fb74669
|
6 7 8 |
* Copyright 2007 OpenVZ SWsoft Inc * Author: Pavel Emelianov <xemul@openvz.org> * |
8cdea7c05
|
9 10 11 12 13 14 15 16 17 18 19 20 21 |
* This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #ifndef _LINUX_MEMCONTROL_H #define _LINUX_MEMCONTROL_H |
f8d665422
|
22 |
#include <linux/cgroup.h> |
456f998ec
|
23 |
#include <linux/vm_event_item.h> |
7ae1e1d0f
|
24 |
#include <linux/hardirq.h> |
a8964b9b8
|
25 |
#include <linux/jump_label.h> |
456f998ec
|
26 |
|
78fb74669
|
27 |
struct mem_cgroup; |
8697d3319
|
28 29 |
struct page; struct mm_struct; |
2633d7a02
|
30 |
struct kmem_cache; |
78fb74669
|
31 |
|
68b4876d9
|
32 33 34 35 36 37 38 39 40 41 42 43 |
/* * The corresponding mem_cgroup_stat_names is defined in mm/memcontrol.c, * These two lists should keep in accord with each other. */ enum mem_cgroup_stat_index { /* * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss. */ MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */ MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */ MEM_CGROUP_STAT_RSS_HUGE, /* # of pages charged as anon huge */ MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */ |
3ea67d06e
|
44 |
MEM_CGROUP_STAT_WRITEBACK, /* # of pages under writeback */ |
68b4876d9
|
45 46 |
MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */ MEM_CGROUP_STAT_NSTATS, |
2a7106f2c
|
47 |
}; |
5660048cc
|
48 49 50 51 52 |
struct mem_cgroup_reclaim_cookie { struct zone *zone; int priority; unsigned int generation; }; |
241994ed8
|
53 54 55 56 57 58 59 60 61 62 63 64 65 |
enum mem_cgroup_events_index { MEM_CGROUP_EVENTS_PGPGIN, /* # of pages paged in */ MEM_CGROUP_EVENTS_PGPGOUT, /* # of pages paged out */ MEM_CGROUP_EVENTS_PGFAULT, /* # of page-faults */ MEM_CGROUP_EVENTS_PGMAJFAULT, /* # of major page-faults */ MEM_CGROUP_EVENTS_NSTATS, /* default hierarchy events */ MEMCG_LOW = MEM_CGROUP_EVENTS_NSTATS, MEMCG_HIGH, MEMCG_MAX, MEMCG_OOM, MEMCG_NR_EVENTS, }; |
c255a4580
|
66 |
#ifdef CONFIG_MEMCG |
241994ed8
|
67 68 69 70 71 |
void mem_cgroup_events(struct mem_cgroup *memcg, enum mem_cgroup_events_index idx, unsigned int nr); bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg); |
00501b531
|
72 73 74 75 76 |
int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask, struct mem_cgroup **memcgp); void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg, bool lrucare); void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg); |
0a31bc97c
|
77 |
void mem_cgroup_uncharge(struct page *page); |
747db954c
|
78 |
void mem_cgroup_uncharge_list(struct list_head *page_list); |
569b846df
|
79 |
|
0a31bc97c
|
80 81 |
void mem_cgroup_migrate(struct page *oldpage, struct page *newpage, bool lrucare); |
569b846df
|
82 |
|
0a31bc97c
|
83 84 |
struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *); struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *); |
c9b0ed514
|
85 |
|
2314b42db
|
86 87 88 |
bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, struct mem_cgroup *root); bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg); |
3062fc67d
|
89 |
|
e42d9d5d4
|
90 |
extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page); |
cf475ad28
|
91 |
extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); |
e1aab161e
|
92 |
extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg); |
182446d08
|
93 |
extern struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css); |
e1aab161e
|
94 |
|
2314b42db
|
95 96 |
static inline bool mm_match_cgroup(struct mm_struct *mm, struct mem_cgroup *memcg) |
2e4d40915
|
97 |
{ |
587af308c
|
98 |
struct mem_cgroup *task_memcg; |
413918bb6
|
99 |
bool match = false; |
c3ac9a8ad
|
100 |
|
2e4d40915
|
101 |
rcu_read_lock(); |
587af308c
|
102 |
task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); |
413918bb6
|
103 |
if (task_memcg) |
2314b42db
|
104 |
match = mem_cgroup_is_descendant(task_memcg, memcg); |
2e4d40915
|
105 |
rcu_read_unlock(); |
c3ac9a8ad
|
106 |
return match; |
2e4d40915
|
107 |
} |
8a9f3ccd2
|
108 |
|
c0ff4b854
|
109 |
extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg); |
d324236b3
|
110 |
|
694fbc0fe
|
111 112 113 |
struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *, struct mem_cgroup *, struct mem_cgroup_reclaim_cookie *); |
5660048cc
|
114 |
void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); |
58ae83db2
|
115 116 117 |
/* * For memory reclaim. */ |
c56d5c7df
|
118 |
int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec); |
90cbc2508
|
119 |
bool mem_cgroup_lruvec_online(struct lruvec *lruvec); |
889976dbc
|
120 |
int mem_cgroup_select_victim_node(struct mem_cgroup *memcg); |
4d7dcca21
|
121 |
unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list); |
fa9add641
|
122 |
void mem_cgroup_update_lru_size(struct lruvec *, enum lru_list, int); |
e222432bf
|
123 124 |
extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p); |
58ae83db2
|
125 |
|
494264208
|
126 |
static inline void mem_cgroup_oom_enable(void) |
519e52473
|
127 |
{ |
494264208
|
128 129 |
WARN_ON(current->memcg_oom.may_oom); current->memcg_oom.may_oom = 1; |
519e52473
|
130 |
} |
494264208
|
131 |
static inline void mem_cgroup_oom_disable(void) |
519e52473
|
132 |
{ |
494264208
|
133 134 |
WARN_ON(!current->memcg_oom.may_oom); current->memcg_oom.may_oom = 0; |
519e52473
|
135 |
} |
3812c8c8f
|
136 137 |
static inline bool task_in_memcg_oom(struct task_struct *p) { |
494264208
|
138 |
return p->memcg_oom.memcg; |
3812c8c8f
|
139 |
} |
494264208
|
140 |
bool mem_cgroup_oom_synchronize(bool wait); |
3812c8c8f
|
141 |
|
c255a4580
|
142 |
#ifdef CONFIG_MEMCG_SWAP |
c077719be
|
143 144 |
extern int do_swap_account; #endif |
f8d665422
|
145 146 147 |
static inline bool mem_cgroup_disabled(void) { |
073219e99
|
148 |
if (memory_cgrp_subsys.disabled) |
f8d665422
|
149 150 151 |
return true; return false; } |
6de226191
|
152 |
struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page); |
d7365e783
|
153 154 |
void mem_cgroup_update_page_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx, int val); |
6de226191
|
155 |
void mem_cgroup_end_page_stat(struct mem_cgroup *memcg); |
d7365e783
|
156 157 |
static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg, |
68b4876d9
|
158 |
enum mem_cgroup_stat_index idx) |
2a7106f2c
|
159 |
{ |
d7365e783
|
160 |
mem_cgroup_update_page_stat(memcg, idx, 1); |
2a7106f2c
|
161 |
} |
d7365e783
|
162 |
static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg, |
68b4876d9
|
163 |
enum mem_cgroup_stat_index idx) |
2a7106f2c
|
164 |
{ |
d7365e783
|
165 |
mem_cgroup_update_page_stat(memcg, idx, -1); |
2a7106f2c
|
166 |
} |
0608f43da
|
167 168 169 |
unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, gfp_t gfp_mask, unsigned long *total_scanned); |
a63d83f42
|
170 |
|
68ae564bb
|
171 172 173 174 175 176 177 178 |
void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx); static inline void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx) { if (mem_cgroup_disabled()) return; __mem_cgroup_count_vm_event(mm, idx); } |
ca3e02141
|
179 |
#ifdef CONFIG_TRANSPARENT_HUGEPAGE |
e94c8a9cb
|
180 |
void mem_cgroup_split_huge_fixup(struct page *head); |
ca3e02141
|
181 |
#endif |
c255a4580
|
182 |
#else /* CONFIG_MEMCG */ |
7a81b88cb
|
183 |
struct mem_cgroup; |
241994ed8
|
184 185 186 187 188 189 190 191 192 193 194 |
static inline void mem_cgroup_events(struct mem_cgroup *memcg, enum mem_cgroup_events_index idx, unsigned int nr) { } static inline bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg) { return false; } |
00501b531
|
195 196 197 |
static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask, struct mem_cgroup **memcgp) |
7a81b88cb
|
198 |
{ |
00501b531
|
199 |
*memcgp = NULL; |
7a81b88cb
|
200 201 |
return 0; } |
00501b531
|
202 203 204 |
static inline void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg, bool lrucare) |
7a81b88cb
|
205 206 |
{ } |
00501b531
|
207 208 |
static inline void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg) |
7a81b88cb
|
209 210 |
{ } |
0a31bc97c
|
211 |
static inline void mem_cgroup_uncharge(struct page *page) |
569b846df
|
212 213 |
{ } |
747db954c
|
214 |
static inline void mem_cgroup_uncharge_list(struct list_head *page_list) |
8a9f3ccd2
|
215 216 |
{ } |
0a31bc97c
|
217 218 219 |
static inline void mem_cgroup_migrate(struct page *oldpage, struct page *newpage, bool lrucare) |
69029cd55
|
220 221 |
{ } |
925b7673c
|
222 223 |
static inline struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone, struct mem_cgroup *memcg) |
08e552c69
|
224 |
{ |
925b7673c
|
225 |
return &zone->lruvec; |
08e552c69
|
226 |
} |
fa9add641
|
227 228 |
static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone) |
66e1707bc
|
229 |
{ |
925b7673c
|
230 |
return &zone->lruvec; |
66e1707bc
|
231 |
} |
e42d9d5d4
|
232 233 234 235 |
static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page) { return NULL; } |
587af308c
|
236 |
static inline bool mm_match_cgroup(struct mm_struct *mm, |
c0ff4b854
|
237 |
struct mem_cgroup *memcg) |
bed7161a5
|
238 |
{ |
587af308c
|
239 |
return true; |
bed7161a5
|
240 |
} |
ffbdccf5e
|
241 242 |
static inline bool task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg) |
4c4a22148
|
243 |
{ |
ffbdccf5e
|
244 |
return true; |
4c4a22148
|
245 |
} |
c0ff4b854
|
246 247 |
static inline struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg) |
d324236b3
|
248 249 250 |
{ return NULL; } |
5660048cc
|
251 252 253 254 255 256 257 258 259 260 261 262 |
static inline struct mem_cgroup * mem_cgroup_iter(struct mem_cgroup *root, struct mem_cgroup *prev, struct mem_cgroup_reclaim_cookie *reclaim) { return NULL; } static inline void mem_cgroup_iter_break(struct mem_cgroup *root, struct mem_cgroup *prev) { } |
f8d665422
|
263 264 265 266 |
static inline bool mem_cgroup_disabled(void) { return true; } |
a636b327f
|
267 |
|
14797e236
|
268 |
static inline int |
c56d5c7df
|
269 |
mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec) |
14797e236
|
270 271 272 |
{ return 1; } |
90cbc2508
|
273 274 275 276 |
static inline bool mem_cgroup_lruvec_online(struct lruvec *lruvec) { return true; } |
a3d8e0549
|
277 |
static inline unsigned long |
4d7dcca21
|
278 |
mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru) |
a3d8e0549
|
279 280 281 |
{ return 0; } |
fa9add641
|
282 283 284 |
static inline void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, int increment) |
3e2f41f1f
|
285 |
{ |
3e2f41f1f
|
286 |
} |
e222432bf
|
287 288 289 290 |
static inline void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) { } |
6de226191
|
291 |
static inline struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page) |
89c06bd52
|
292 |
{ |
d7365e783
|
293 |
return NULL; |
89c06bd52
|
294 |
} |
6de226191
|
295 |
static inline void mem_cgroup_end_page_stat(struct mem_cgroup *memcg) |
89c06bd52
|
296 297 |
{ } |
494264208
|
298 |
static inline void mem_cgroup_oom_enable(void) |
519e52473
|
299 300 |
{ } |
494264208
|
301 |
static inline void mem_cgroup_oom_disable(void) |
519e52473
|
302 303 |
{ } |
3812c8c8f
|
304 305 306 307 |
static inline bool task_in_memcg_oom(struct task_struct *p) { return false; } |
494264208
|
308 |
static inline bool mem_cgroup_oom_synchronize(bool wait) |
3812c8c8f
|
309 310 311 |
{ return false; } |
d7365e783
|
312 |
static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg, |
68b4876d9
|
313 |
enum mem_cgroup_stat_index idx) |
2a7106f2c
|
314 315 |
{ } |
d7365e783
|
316 |
static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg, |
68b4876d9
|
317 |
enum mem_cgroup_stat_index idx) |
d69b042f3
|
318 319 |
{ } |
4e4169535
|
320 |
static inline |
0608f43da
|
321 322 323 |
unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, gfp_t gfp_mask, unsigned long *total_scanned) |
4e4169535
|
324 |
{ |
0608f43da
|
325 |
return 0; |
4e4169535
|
326 |
} |
e94c8a9cb
|
327 |
static inline void mem_cgroup_split_huge_fixup(struct page *head) |
ca3e02141
|
328 329 |
{ } |
456f998ec
|
330 331 332 333 |
static inline void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx) { } |
c255a4580
|
334 |
#endif /* CONFIG_MEMCG */ |
78fb74669
|
335 |
|
e1aab161e
|
336 337 338 339 340 341 342 |
enum { UNDER_LIMIT, SOFT_LIMIT, OVER_LIMIT, }; struct sock; |
cd59085a9
|
343 |
#if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM) |
e1aab161e
|
344 345 346 347 348 349 350 351 352 |
void sock_update_memcg(struct sock *sk); void sock_release_memcg(struct sock *sk); #else static inline void sock_update_memcg(struct sock *sk) { } static inline void sock_release_memcg(struct sock *sk) { } |
cd59085a9
|
353 |
#endif /* CONFIG_INET && CONFIG_MEMCG_KMEM */ |
7ae1e1d0f
|
354 355 |
#ifdef CONFIG_MEMCG_KMEM |
a8964b9b8
|
356 |
extern struct static_key memcg_kmem_enabled_key; |
749c54151
|
357 |
|
dbcf73e26
|
358 |
extern int memcg_nr_cache_ids; |
05257a1a3
|
359 360 |
extern void memcg_get_cache_ids(void); extern void memcg_put_cache_ids(void); |
ebe945c27
|
361 362 363 364 365 366 |
/* * Helper macro to loop through all memcg-specific caches. Callers must still * check if the cache is valid (it is either valid or NULL). * the slab_mutex must be held when looping through those caches */ |
749c54151
|
367 |
#define for_each_memcg_cache_index(_idx) \ |
dbcf73e26
|
368 |
for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++) |
749c54151
|
369 |
|
7ae1e1d0f
|
370 371 |
static inline bool memcg_kmem_enabled(void) { |
a8964b9b8
|
372 |
return static_key_false(&memcg_kmem_enabled_key); |
7ae1e1d0f
|
373 |
} |
cb731d6c6
|
374 |
bool memcg_kmem_is_active(struct mem_cgroup *memcg); |
7ae1e1d0f
|
375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 |
/* * In general, we'll do everything in our power to not incur in any overhead * for non-memcg users for the kmem functions. Not even a function call, if we * can avoid it. * * Therefore, we'll inline all those functions so that in the best case, we'll * see that kmemcg is off for everybody and proceed quickly. If it is on, * we'll still do most of the flag checking inline. We check a lot of * conditions, but because they are pretty simple, they are expected to be * fast. */ bool __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order); void __memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order); void __memcg_kmem_uncharge_pages(struct page *page, int order); |
2633d7a02
|
391 |
int memcg_cache_id(struct mem_cgroup *memcg); |
5722d094a
|
392 |
|
8135be5a8
|
393 394 |
struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep); void __memcg_kmem_put_cache(struct kmem_cache *cachep); |
d7f25f8a2
|
395 |
|
60d3fd32a
|
396 |
struct mem_cgroup *__mem_cgroup_from_kmem(void *ptr); |
dbf22eb6d
|
397 398 399 |
int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, unsigned long nr_pages); void memcg_uncharge_kmem(struct mem_cgroup *memcg, unsigned long nr_pages); |
5dfb41750
|
400 |
|
7ae1e1d0f
|
401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 |
/** * memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed. * @gfp: the gfp allocation flags. * @memcg: a pointer to the memcg this was charged against. * @order: allocation order. * * returns true if the memcg where the current task belongs can hold this * allocation. * * We return true automatically if this allocation is not to be accounted to * any memcg. */ static inline bool memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order) { if (!memcg_kmem_enabled()) return true; |
8f4fc071b
|
418 419 |
if (gfp & __GFP_NOACCOUNT) return true; |
7ae1e1d0f
|
420 421 422 |
/* * __GFP_NOFAIL allocations will move on even if charging is not * possible. Therefore we don't even try, and have this allocation |
3e32cb2e0
|
423 424 |
* unaccounted. We could in theory charge it forcibly, but we hope * those allocations are rare, and won't be worth the trouble. |
7ae1e1d0f
|
425 |
*/ |
52383431b
|
426 |
if (gfp & __GFP_NOFAIL) |
7ae1e1d0f
|
427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 |
return true; if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD)) return true; /* If the test is dying, just let it go. */ if (unlikely(fatal_signal_pending(current))) return true; return __memcg_kmem_newpage_charge(gfp, memcg, order); } /** * memcg_kmem_uncharge_pages: uncharge pages from memcg * @page: pointer to struct page being freed * @order: allocation order. |
7ae1e1d0f
|
442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 |
*/ static inline void memcg_kmem_uncharge_pages(struct page *page, int order) { if (memcg_kmem_enabled()) __memcg_kmem_uncharge_pages(page, order); } /** * memcg_kmem_commit_charge: embeds correct memcg in a page * @page: pointer to struct page recently allocated * @memcg: the memcg structure we charged against * @order: allocation order. * * Needs to be called after memcg_kmem_newpage_charge, regardless of success or * failure of the allocation. if @page is NULL, this function will revert the |
1306a85ae
|
458 |
* charges. Otherwise, it will commit @page to @memcg. |
7ae1e1d0f
|
459 460 461 462 463 464 465 |
*/ static inline void memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order) { if (memcg_kmem_enabled() && memcg) __memcg_kmem_commit_charge(page, memcg, order); } |
d7f25f8a2
|
466 467 468 469 470 |
/** * memcg_kmem_get_cache: selects the correct per-memcg cache for allocation * @cachep: the original global kmem cache * @gfp: allocation flags. * |
5dfb41750
|
471 |
* All memory allocated from a per-memcg cache is charged to the owner memcg. |
d7f25f8a2
|
472 473 474 475 476 477 |
*/ static __always_inline struct kmem_cache * memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) { if (!memcg_kmem_enabled()) return cachep; |
8f4fc071b
|
478 479 |
if (gfp & __GFP_NOACCOUNT) return cachep; |
d7f25f8a2
|
480 481 482 483 484 485 |
if (gfp & __GFP_NOFAIL) return cachep; if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD)) return cachep; if (unlikely(fatal_signal_pending(current))) return cachep; |
056b7ccef
|
486 |
return __memcg_kmem_get_cache(cachep); |
d7f25f8a2
|
487 |
} |
8135be5a8
|
488 489 490 491 492 493 |
static __always_inline void memcg_kmem_put_cache(struct kmem_cache *cachep) { if (memcg_kmem_enabled()) __memcg_kmem_put_cache(cachep); } |
60d3fd32a
|
494 495 496 497 498 499 500 |
static __always_inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr) { if (!memcg_kmem_enabled()) return NULL; return __mem_cgroup_from_kmem(ptr); } |
7ae1e1d0f
|
501 |
#else |
749c54151
|
502 503 |
#define for_each_memcg_cache_index(_idx) \ for (; NULL; ) |
b9ce5ef49
|
504 505 506 507 |
static inline bool memcg_kmem_enabled(void) { return false; } |
cb731d6c6
|
508 509 510 511 |
static inline bool memcg_kmem_is_active(struct mem_cgroup *memcg) { return false; } |
7ae1e1d0f
|
512 513 514 515 516 517 518 519 520 521 522 523 524 525 |
static inline bool memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order) { return true; } static inline void memcg_kmem_uncharge_pages(struct page *page, int order) { } static inline void memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order) { } |
2633d7a02
|
526 527 528 529 530 |
static inline int memcg_cache_id(struct mem_cgroup *memcg) { return -1; } |
05257a1a3
|
531 532 533 534 535 536 537 |
static inline void memcg_get_cache_ids(void) { } static inline void memcg_put_cache_ids(void) { } |
d7f25f8a2
|
538 539 540 541 542 |
static inline struct kmem_cache * memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) { return cachep; } |
8135be5a8
|
543 544 545 546 |
static inline void memcg_kmem_put_cache(struct kmem_cache *cachep) { } |
60d3fd32a
|
547 548 549 550 551 |
static inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr) { return NULL; } |
7ae1e1d0f
|
552 |
#endif /* CONFIG_MEMCG_KMEM */ |
8cdea7c05
|
553 |
#endif /* _LINUX_MEMCONTROL_H */ |