Blame view
mm/vmpressure.c
12.5 KB
70ddf637e memcg: add memory... |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 |
/* * Linux VM pressure * * Copyright 2012 Linaro Ltd. * Anton Vorontsov <anton.vorontsov@linaro.org> * * Based on ideas from Andrew Morton, David Rientjes, KOSAKI Motohiro, * Leonid Moiseichuk, Mel Gorman, Minchan Kim and Pekka Enberg. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include <linux/cgroup.h> #include <linux/fs.h> #include <linux/log2.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/vmstat.h> #include <linux/eventfd.h> |
1ff6bbfd1 arm, pm, vmpressu... |
22 |
#include <linux/slab.h> |
70ddf637e memcg: add memory... |
23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 |
#include <linux/swap.h> #include <linux/printk.h> #include <linux/vmpressure.h> /* * The window size (vmpressure_win) is the number of scanned pages before * we try to analyze scanned/reclaimed ratio. So the window is used as a * rate-limit tunable for the "low" level notification, and also for * averaging the ratio for medium/critical levels. Using small window * sizes can cause lot of false positives, but too big window size will * delay the notifications. * * As the vmscan reclaimer logic works with chunks which are multiple of * SWAP_CLUSTER_MAX, it makes sense to use it for the window size as well. * * TODO: Make the window size depend on machine size, as we do for vmstat * thresholds. Currently we set it to 512 pages (2MB for 4KB pages). */ static const unsigned long vmpressure_win = SWAP_CLUSTER_MAX * 16; /* * These thresholds are used when we account memory pressure through * scanned/reclaimed ratio. The current values were chosen empirically. In * essence, they are percents: the higher the value, the more number * unsuccessful reclaims there were. */ static const unsigned int vmpressure_level_med = 60; static const unsigned int vmpressure_level_critical = 95; /* * When there are too little pages left to scan, vmpressure() may miss the * critical pressure as number of pages will be less than "window size". * However, in that case the vmscan priority will raise fast as the * reclaimer will try to scan LRUs more deeply. * * The vmscan logic considers these special priorities: * * prio == DEF_PRIORITY (12): reclaimer starts with that value * prio <= DEF_PRIORITY - 2 : kswapd becomes somewhat overwhelmed * prio == 0 : close to OOM, kernel scans every page in an lru * * Any value in this range is acceptable for this tunable (i.e. from 12 to * 0). Current value for the vmpressure_level_critical_prio is chosen * empirically, but the number, in essence, means that we consider * critical level when scanning depth is ~10% of the lru size (vmscan * scans 'lru_size >> prio' pages, so it is actually 12.5%, or one * eights). */ static const unsigned int vmpressure_level_critical_prio = ilog2(100 / 10); static struct vmpressure *work_to_vmpressure(struct work_struct *work) { return container_of(work, struct vmpressure, work); } |
70ddf637e memcg: add memory... |
77 78 |
static struct vmpressure *vmpressure_parent(struct vmpressure *vmpr) { |
182446d08 cgroup: pass arou... |
79 80 |
struct cgroup_subsys_state *css = vmpressure_to_css(vmpr); struct mem_cgroup *memcg = mem_cgroup_from_css(css); |
70ddf637e memcg: add memory... |
81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 |
memcg = parent_mem_cgroup(memcg); if (!memcg) return NULL; return memcg_to_vmpressure(memcg); } enum vmpressure_levels { VMPRESSURE_LOW = 0, VMPRESSURE_MEDIUM, VMPRESSURE_CRITICAL, VMPRESSURE_NUM_LEVELS, }; static const char * const vmpressure_str_levels[] = { [VMPRESSURE_LOW] = "low", [VMPRESSURE_MEDIUM] = "medium", [VMPRESSURE_CRITICAL] = "critical", }; static enum vmpressure_levels vmpressure_level(unsigned long pressure) { if (pressure >= vmpressure_level_critical) return VMPRESSURE_CRITICAL; else if (pressure >= vmpressure_level_med) return VMPRESSURE_MEDIUM; return VMPRESSURE_LOW; } static enum vmpressure_levels vmpressure_calc_level(unsigned long scanned, unsigned long reclaimed) { unsigned long scale = scanned + reclaimed; unsigned long pressure; /* * We calculate the ratio (in percents) of how many pages were * scanned vs. reclaimed in a given time frame (window). Note that * time is in VM reclaimer's "ticks", i.e. number of pages * scanned. This makes it possible to set desired reaction time * and serves as a ratelimit. */ pressure = scale - (reclaimed * scale / scanned); pressure = pressure * 100 / scale; pr_debug("%s: %3lu (s: %lu r: %lu) ", __func__, pressure, scanned, reclaimed); return vmpressure_level(pressure); } struct vmpressure_event { struct eventfd_ctx *efd; enum vmpressure_levels level; struct list_head node; }; static bool vmpressure_event(struct vmpressure *vmpr, |
8e8ae6452 mm: memcontrol: h... |
140 |
enum vmpressure_levels level) |
70ddf637e memcg: add memory... |
141 142 |
{ struct vmpressure_event *ev; |
70ddf637e memcg: add memory... |
143 |
bool signalled = false; |
70ddf637e memcg: add memory... |
144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 |
mutex_lock(&vmpr->events_lock); list_for_each_entry(ev, &vmpr->events, node) { if (level >= ev->level) { eventfd_signal(ev->efd, 1); signalled = true; } } mutex_unlock(&vmpr->events_lock); return signalled; } static void vmpressure_work_fn(struct work_struct *work) { struct vmpressure *vmpr = work_to_vmpressure(work); unsigned long scanned; unsigned long reclaimed; |
8e8ae6452 mm: memcontrol: h... |
163 |
enum vmpressure_levels level; |
70ddf637e memcg: add memory... |
164 |
|
91b57191c mm/vmpressure.c: ... |
165 |
spin_lock(&vmpr->sr_lock); |
70ddf637e memcg: add memory... |
166 167 168 169 170 171 172 173 |
/* * Several contexts might be calling vmpressure(), so it is * possible that the work was rescheduled again before the old * work context cleared the counters. In that case we will run * just after the old work returns, but then scanned might be zero * here. No need for any locks here since we don't care if * vmpr->reclaimed is in sync. */ |
8e8ae6452 mm: memcontrol: h... |
174 |
scanned = vmpr->tree_scanned; |
91b57191c mm/vmpressure.c: ... |
175 176 |
if (!scanned) { spin_unlock(&vmpr->sr_lock); |
70ddf637e memcg: add memory... |
177 |
return; |
91b57191c mm/vmpressure.c: ... |
178 |
} |
70ddf637e memcg: add memory... |
179 |
|
8e8ae6452 mm: memcontrol: h... |
180 181 182 |
reclaimed = vmpr->tree_reclaimed; vmpr->tree_scanned = 0; vmpr->tree_reclaimed = 0; |
22f2020f8 vmpressure: chang... |
183 |
spin_unlock(&vmpr->sr_lock); |
70ddf637e memcg: add memory... |
184 |
|
8e8ae6452 mm: memcontrol: h... |
185 |
level = vmpressure_calc_level(scanned, reclaimed); |
70ddf637e memcg: add memory... |
186 |
do { |
8e8ae6452 mm: memcontrol: h... |
187 |
if (vmpressure_event(vmpr, level)) |
70ddf637e memcg: add memory... |
188 189 190 191 192 193 194 195 196 197 198 199 |
break; /* * If not handled, propagate the event upward into the * hierarchy. */ } while ((vmpr = vmpressure_parent(vmpr))); } /** * vmpressure() - Account memory pressure through scanned/reclaimed ratio * @gfp: reclaimer's gfp mask * @memcg: cgroup memory controller handle |
8e8ae6452 mm: memcontrol: h... |
200 |
* @tree: legacy subtree mode |
70ddf637e memcg: add memory... |
201 202 203 204 205 206 207 |
* @scanned: number of pages scanned * @reclaimed: number of pages reclaimed * * This function should be called from the vmscan reclaim path to account * "instantaneous" memory pressure (scanned/reclaimed ratio). The raw * pressure index is then further refined and averaged over time. * |
8e8ae6452 mm: memcontrol: h... |
208 209 210 211 212 213 214 |
* If @tree is set, vmpressure is in traditional userspace reporting * mode: @memcg is considered the pressure root and userspace is * notified of the entire subtree's reclaim efficiency. * * If @tree is not set, reclaim efficiency is recorded for @memcg, and * only in-kernel users are notified. * |
70ddf637e memcg: add memory... |
215 216 |
* This function does not return any value. */ |
8e8ae6452 mm: memcontrol: h... |
217 |
void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, |
70ddf637e memcg: add memory... |
218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 |
unsigned long scanned, unsigned long reclaimed) { struct vmpressure *vmpr = memcg_to_vmpressure(memcg); /* * Here we only want to account pressure that userland is able to * help us with. For example, suppose that DMA zone is under * pressure; if we notify userland about that kind of pressure, * then it will be mostly a waste as it will trigger unnecessary * freeing of memory by userland (since userland is more likely to * have HIGHMEM/MOVABLE pages instead of the DMA fallback). That * is why we include only movable, highmem and FS/IO pages. * Indirect reclaim (kswapd) sets sc->gfp_mask to GFP_KERNEL, so * we account it too. */ if (!(gfp & (__GFP_HIGHMEM | __GFP_MOVABLE | __GFP_IO | __GFP_FS))) return; /* * If we got here with no pages scanned, then that is an indicator * that reclaimer was unable to find any shrinkable LRUs at the * current scanning depth. But it does not mean that we should * report the critical pressure, yet. If the scanning priority * (scanning depth) goes too high (deep), we will be notified * through vmpressure_prio(). But so far, keep calm. */ if (!scanned) return; |
8e8ae6452 mm: memcontrol: h... |
246 247 |
if (tree) { spin_lock(&vmpr->sr_lock); |
3c1da7bee mm/vmpressure.c: ... |
248 |
scanned = vmpr->tree_scanned += scanned; |
8e8ae6452 mm: memcontrol: h... |
249 |
vmpr->tree_reclaimed += reclaimed; |
8e8ae6452 mm: memcontrol: h... |
250 |
spin_unlock(&vmpr->sr_lock); |
70ddf637e memcg: add memory... |
251 |
|
8e8ae6452 mm: memcontrol: h... |
252 253 254 255 256 257 258 |
if (scanned < vmpressure_win) return; schedule_work(&vmpr->work); } else { enum vmpressure_levels level; /* For now, no users for root-level efficiency */ |
686739f6a memcg: avoid vmpr... |
259 |
if (!memcg || memcg == root_mem_cgroup) |
8e8ae6452 mm: memcontrol: h... |
260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 |
return; spin_lock(&vmpr->sr_lock); scanned = vmpr->scanned += scanned; reclaimed = vmpr->reclaimed += reclaimed; if (scanned < vmpressure_win) { spin_unlock(&vmpr->sr_lock); return; } vmpr->scanned = vmpr->reclaimed = 0; spin_unlock(&vmpr->sr_lock); level = vmpressure_calc_level(scanned, reclaimed); if (level > VMPRESSURE_LOW) { /* * Let the socket buffer allocator know that * we are having trouble reclaiming LRU pages. * * For hysteresis keep the pressure state * asserted for a second in which subsequent * pressure events can occur. */ memcg->socket_pressure = jiffies + HZ; } } |
70ddf637e memcg: add memory... |
286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 |
} /** * vmpressure_prio() - Account memory pressure through reclaimer priority level * @gfp: reclaimer's gfp mask * @memcg: cgroup memory controller handle * @prio: reclaimer's priority * * This function should be called from the reclaim path every time when * the vmscan's reclaiming priority (scanning depth) changes. * * This function does not return any value. */ void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio) { /* * We only use prio for accounting critical level. For more info * see comment for vmpressure_level_critical_prio variable above. */ if (prio > vmpressure_level_critical_prio) return; /* * OK, the prio is below the threshold, updating vmpressure * information before shrinker dives into long shrinking of long * range vmscan. Passing scanned = vmpressure_win, reclaimed = 0 * to the vmpressure() basically means that we signal 'critical' * level. */ |
8e8ae6452 mm: memcontrol: h... |
315 |
vmpressure(gfp, memcg, true, vmpressure_win, 0); |
70ddf637e memcg: add memory... |
316 317 318 319 |
} /** * vmpressure_register_event() - Bind vmpressure notifications to an eventfd |
59b6f8734 memcg: make cgrou... |
320 |
* @memcg: memcg that is interested in vmpressure notifications |
70ddf637e memcg: add memory... |
321 322 323 324 325 326 327 328 329 |
* @eventfd: eventfd context to link notifications with * @args: event arguments (used to set up a pressure level threshold) * * This function associates eventfd context with the vmpressure * infrastructure, so that the notifications will be delivered to the * @eventfd. The @args parameter is a string that denotes pressure level * threshold (one of vmpressure_str_levels, i.e. "low", "medium", or * "critical"). * |
347c4a874 memcg: remove cgr... |
330 |
* To be used as memcg event method. |
70ddf637e memcg: add memory... |
331 |
*/ |
59b6f8734 memcg: make cgrou... |
332 |
int vmpressure_register_event(struct mem_cgroup *memcg, |
347c4a874 memcg: remove cgr... |
333 |
struct eventfd_ctx *eventfd, const char *args) |
70ddf637e memcg: add memory... |
334 |
{ |
59b6f8734 memcg: make cgrou... |
335 |
struct vmpressure *vmpr = memcg_to_vmpressure(memcg); |
70ddf637e memcg: add memory... |
336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 |
struct vmpressure_event *ev; int level; for (level = 0; level < VMPRESSURE_NUM_LEVELS; level++) { if (!strcmp(vmpressure_str_levels[level], args)) break; } if (level >= VMPRESSURE_NUM_LEVELS) return -EINVAL; ev = kzalloc(sizeof(*ev), GFP_KERNEL); if (!ev) return -ENOMEM; ev->efd = eventfd; ev->level = level; mutex_lock(&vmpr->events_lock); list_add(&ev->node, &vmpr->events); mutex_unlock(&vmpr->events_lock); return 0; } /** * vmpressure_unregister_event() - Unbind eventfd from vmpressure |
59b6f8734 memcg: make cgrou... |
363 |
* @memcg: memcg handle |
70ddf637e memcg: add memory... |
364 365 366 367 368 369 |
* @eventfd: eventfd context that was used to link vmpressure with the @cg * * This function does internal manipulations to detach the @eventfd from * the vmpressure notifications, and then frees internal resources * associated with the @eventfd (but the @eventfd itself is not freed). * |
347c4a874 memcg: remove cgr... |
370 |
* To be used as memcg event method. |
70ddf637e memcg: add memory... |
371 |
*/ |
59b6f8734 memcg: make cgrou... |
372 |
void vmpressure_unregister_event(struct mem_cgroup *memcg, |
70ddf637e memcg: add memory... |
373 374 |
struct eventfd_ctx *eventfd) { |
59b6f8734 memcg: make cgrou... |
375 |
struct vmpressure *vmpr = memcg_to_vmpressure(memcg); |
70ddf637e memcg: add memory... |
376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 |
struct vmpressure_event *ev; mutex_lock(&vmpr->events_lock); list_for_each_entry(ev, &vmpr->events, node) { if (ev->efd != eventfd) continue; list_del(&ev->node); kfree(ev); break; } mutex_unlock(&vmpr->events_lock); } /** * vmpressure_init() - Initialize vmpressure control structure * @vmpr: Structure to be initialized * * This function should be called on every allocated vmpressure structure * before any usage. */ void vmpressure_init(struct vmpressure *vmpr) { |
22f2020f8 vmpressure: chang... |
398 |
spin_lock_init(&vmpr->sr_lock); |
70ddf637e memcg: add memory... |
399 400 401 402 |
mutex_init(&vmpr->events_lock); INIT_LIST_HEAD(&vmpr->events); INIT_WORK(&vmpr->work, vmpressure_work_fn); } |
33cb876e9 vmpressure: make ... |
403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 |
/** * vmpressure_cleanup() - shuts down vmpressure control structure * @vmpr: Structure to be cleaned up * * This function should be called before the structure in which it is * embedded is cleaned up. */ void vmpressure_cleanup(struct vmpressure *vmpr) { /* * Make sure there is no pending work before eventfd infrastructure * goes away. */ flush_work(&vmpr->work); } |