Blame view
mm/quicklist.c
2.42 KB
6225e9373 Quicklists for pa... |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 |
/* * Quicklist support. * * Quicklists are light weight lists of pages that have a defined state * on alloc and free. Pages must be in the quicklist specific defined state * (zero by default) when the page is freed. It seems that the initial idea * for such lists first came from Dave Miller and then various other people * improved on it. * * Copyright (C) 2007 SGI, * Christoph Lameter <clameter@sgi.com> * Generalized, added support for multiple lists and * constructors / destructors. */ #include <linux/kernel.h> |
5a0e3ad6a include cleanup: ... |
16 |
#include <linux/gfp.h> |
6225e9373 Quicklists for pa... |
17 18 19 20 |
#include <linux/mm.h> #include <linux/mmzone.h> #include <linux/module.h> #include <linux/quicklist.h> |
204fba4aa percpu: cleanup p... |
21 |
DEFINE_PER_CPU(struct quicklist [CONFIG_NR_QUICK], quicklist); |
6225e9373 Quicklists for pa... |
22 23 24 25 26 27 |
#define FRACTION_OF_NODE_MEM 16 static unsigned long max_pages(unsigned long min_pages) { unsigned long node_free_pages, max; |
b95418521 mm: size of quick... |
28 29 30 |
int node = numa_node_id(); struct zone *zones = NODE_DATA(node)->node_zones; int num_cpus_on_node; |
96990a4ae quicklists: Only ... |
31 32 33 34 35 36 37 38 39 |
node_free_pages = #ifdef CONFIG_ZONE_DMA zone_page_state(&zones[ZONE_DMA], NR_FREE_PAGES) + #endif #ifdef CONFIG_ZONE_DMA32 zone_page_state(&zones[ZONE_DMA32], NR_FREE_PAGES) + #endif zone_page_state(&zones[ZONE_NORMAL], NR_FREE_PAGES); |
6225e9373 Quicklists for pa... |
40 |
|
6225e9373 Quicklists for pa... |
41 |
max = node_free_pages / FRACTION_OF_NODE_MEM; |
b95418521 mm: size of quick... |
42 |
|
db7907865 cpumask: use new-... |
43 |
num_cpus_on_node = cpumask_weight(cpumask_of_node(node)); |
b95418521 mm: size of quick... |
44 |
max /= num_cpus_on_node; |
6225e9373 Quicklists for pa... |
45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 |
return max(max, min_pages); } static long min_pages_to_free(struct quicklist *q, unsigned long min_pages, long max_free) { long pages_to_free; pages_to_free = q->nr_pages - max_pages(min_pages); return min(pages_to_free, max_free); } /* * Trim down the number of pages in the quicklist */ void quicklist_trim(int nr, void (*dtor)(void *), unsigned long min_pages, unsigned long max_free) { long pages_to_free; struct quicklist *q; q = &get_cpu_var(quicklist)[nr]; if (q->nr_pages > min_pages) { pages_to_free = min_pages_to_free(q, min_pages, max_free); while (pages_to_free > 0) { /* * We pass a gfp_t of 0 to quicklist_alloc here * because we will never call into the page allocator. */ void *p = quicklist_alloc(nr, 0, NULL); if (dtor) dtor(p); free_page((unsigned long)p); pages_to_free--; } } put_cpu_var(quicklist); } unsigned long quicklist_total_size(void) { unsigned long count = 0; int cpu; struct quicklist *ql, *q; for_each_online_cpu(cpu) { ql = per_cpu(quicklist, cpu); for (q = ql; q < ql + CONFIG_NR_QUICK; q++) count += q->nr_pages; } return count; } |