Blame view
mm/swap_cgroup.c
4.6 KB
5d1ea48bd mm: page_cgroup: ... |
1 |
#include <linux/swap_cgroup.h> |
4c8210427 mm: page_cgroup n... |
2 |
#include <linux/vmalloc.h> |
5d1ea48bd mm: page_cgroup: ... |
3 |
#include <linux/mm.h> |
27a7faa07 memcg: swap cgrou... |
4 |
|
5d1ea48bd mm: page_cgroup: ... |
5 |
#include <linux/swapops.h> /* depends on mm.h include */ |
27a7faa07 memcg: swap cgrou... |
6 7 8 9 10 |
static DEFINE_MUTEX(swap_cgroup_mutex); struct swap_cgroup_ctrl { struct page **map; unsigned long length; |
e9e58a4ec memcg: avoid use ... |
11 |
spinlock_t lock; |
27a7faa07 memcg: swap cgrou... |
12 |
}; |
61600f578 mm/page_cgroup.c:... |
13 |
static struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES]; |
27a7faa07 memcg: swap cgrou... |
14 |
|
27a7faa07 memcg: swap cgrou... |
15 |
struct swap_cgroup { |
a3b2d6926 cgroups: use css ... |
16 |
unsigned short id; |
27a7faa07 memcg: swap cgrou... |
17 18 |
}; #define SC_PER_PAGE (PAGE_SIZE/sizeof(struct swap_cgroup)) |
27a7faa07 memcg: swap cgrou... |
19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 |
/* * SwapCgroup implements "lookup" and "exchange" operations. * In typical usage, this swap_cgroup is accessed via memcg's charge/uncharge * against SwapCache. At swap_free(), this is accessed directly from swap. * * This means, * - we have no race in "exchange" when we're accessed via SwapCache because * SwapCache(and its swp_entry) is under lock. * - When called via swap_free(), there is no user of this entry and no race. * Then, we don't need lock around "exchange". * * TODO: we can push these buffers out to HIGHMEM. */ /* * allocate buffer for swap_cgroup. */ static int swap_cgroup_prepare(int type) { struct page *page; struct swap_cgroup_ctrl *ctrl; unsigned long idx, max; |
27a7faa07 memcg: swap cgrou... |
42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 |
ctrl = &swap_cgroup_ctrl[type]; for (idx = 0; idx < ctrl->length; idx++) { page = alloc_page(GFP_KERNEL | __GFP_ZERO); if (!page) goto not_enough_page; ctrl->map[idx] = page; } return 0; not_enough_page: max = idx; for (idx = 0; idx < max; idx++) __free_page(ctrl->map[idx]); return -ENOMEM; } |
9fb4b7cc0 page_cgroup: add ... |
58 59 60 61 62 63 |
static struct swap_cgroup *lookup_swap_cgroup(swp_entry_t ent, struct swap_cgroup_ctrl **ctrlp) { pgoff_t offset = swp_offset(ent); struct swap_cgroup_ctrl *ctrl; struct page *mappage; |
c09ff089a page_cgroup: fix ... |
64 |
struct swap_cgroup *sc; |
9fb4b7cc0 page_cgroup: add ... |
65 66 67 68 69 70 |
ctrl = &swap_cgroup_ctrl[swp_type(ent)]; if (ctrlp) *ctrlp = ctrl; mappage = ctrl->map[offset / SC_PER_PAGE]; |
c09ff089a page_cgroup: fix ... |
71 72 |
sc = page_address(mappage); return sc + offset % SC_PER_PAGE; |
9fb4b7cc0 page_cgroup: add ... |
73 |
} |
27a7faa07 memcg: swap cgrou... |
74 |
/** |
024914477 memcg: move charg... |
75 |
* swap_cgroup_cmpxchg - cmpxchg mem_cgroup's id for this swp_entry. |
dad7557eb mm: fix kernel-do... |
76 |
* @ent: swap entry to be cmpxchged |
024914477 memcg: move charg... |
77 78 79 80 |
* @old: old id * @new: new id * * Returns old id at success, 0 at failure. |
25985edce Fix common misspe... |
81 |
* (There is no mem_cgroup using 0 as its id) |
024914477 memcg: move charg... |
82 83 84 85 |
*/ unsigned short swap_cgroup_cmpxchg(swp_entry_t ent, unsigned short old, unsigned short new) { |
024914477 memcg: move charg... |
86 |
struct swap_cgroup_ctrl *ctrl; |
024914477 memcg: move charg... |
87 |
struct swap_cgroup *sc; |
e9e58a4ec memcg: avoid use ... |
88 89 |
unsigned long flags; unsigned short retval; |
024914477 memcg: move charg... |
90 |
|
9fb4b7cc0 page_cgroup: add ... |
91 |
sc = lookup_swap_cgroup(ent, &ctrl); |
024914477 memcg: move charg... |
92 |
|
e9e58a4ec memcg: avoid use ... |
93 94 95 96 |
spin_lock_irqsave(&ctrl->lock, flags); retval = sc->id; if (retval == old) sc->id = new; |
024914477 memcg: move charg... |
97 |
else |
e9e58a4ec memcg: avoid use ... |
98 99 100 |
retval = 0; spin_unlock_irqrestore(&ctrl->lock, flags); return retval; |
024914477 memcg: move charg... |
101 102 103 |
} /** |
27a7faa07 memcg: swap cgrou... |
104 105 |
* swap_cgroup_record - record mem_cgroup for this swp_entry. * @ent: swap entry to be recorded into |
dad7557eb mm: fix kernel-do... |
106 |
* @id: mem_cgroup to be recorded |
27a7faa07 memcg: swap cgrou... |
107 |
* |
a3b2d6926 cgroups: use css ... |
108 109 |
* Returns old value at success, 0 at failure. * (Of course, old value can be 0.) |
27a7faa07 memcg: swap cgrou... |
110 |
*/ |
a3b2d6926 cgroups: use css ... |
111 |
unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id) |
27a7faa07 memcg: swap cgrou... |
112 |
{ |
27a7faa07 memcg: swap cgrou... |
113 |
struct swap_cgroup_ctrl *ctrl; |
27a7faa07 memcg: swap cgrou... |
114 |
struct swap_cgroup *sc; |
a3b2d6926 cgroups: use css ... |
115 |
unsigned short old; |
e9e58a4ec memcg: avoid use ... |
116 |
unsigned long flags; |
27a7faa07 memcg: swap cgrou... |
117 |
|
9fb4b7cc0 page_cgroup: add ... |
118 |
sc = lookup_swap_cgroup(ent, &ctrl); |
27a7faa07 memcg: swap cgrou... |
119 |
|
e9e58a4ec memcg: avoid use ... |
120 121 122 123 |
spin_lock_irqsave(&ctrl->lock, flags); old = sc->id; sc->id = id; spin_unlock_irqrestore(&ctrl->lock, flags); |
27a7faa07 memcg: swap cgrou... |
124 125 126 127 128 |
return old; } /** |
9fb4b7cc0 page_cgroup: add ... |
129 |
* lookup_swap_cgroup_id - lookup mem_cgroup id tied to swap entry |
27a7faa07 memcg: swap cgrou... |
130 131 |
* @ent: swap entry to be looked up. * |
b3ff8a2f9 cgroup: remove st... |
132 |
* Returns ID of mem_cgroup at success. 0 at failure. (0 is invalid ID) |
27a7faa07 memcg: swap cgrou... |
133 |
*/ |
9fb4b7cc0 page_cgroup: add ... |
134 |
unsigned short lookup_swap_cgroup_id(swp_entry_t ent) |
27a7faa07 memcg: swap cgrou... |
135 |
{ |
9fb4b7cc0 page_cgroup: add ... |
136 |
return lookup_swap_cgroup(ent, NULL)->id; |
27a7faa07 memcg: swap cgrou... |
137 138 139 140 141 142 143 144 145 146 147 |
} int swap_cgroup_swapon(int type, unsigned long max_pages) { void *array; unsigned long array_size; unsigned long length; struct swap_cgroup_ctrl *ctrl; if (!do_swap_account) return 0; |
33278f7f0 memcg: fix off-by... |
148 |
length = DIV_ROUND_UP(max_pages, SC_PER_PAGE); |
27a7faa07 memcg: swap cgrou... |
149 |
array_size = length * sizeof(void *); |
8c1fec1ba mm: Convert vmall... |
150 |
array = vzalloc(array_size); |
27a7faa07 memcg: swap cgrou... |
151 152 |
if (!array) goto nomem; |
27a7faa07 memcg: swap cgrou... |
153 154 155 156 |
ctrl = &swap_cgroup_ctrl[type]; mutex_lock(&swap_cgroup_mutex); ctrl->length = length; ctrl->map = array; |
e9e58a4ec memcg: avoid use ... |
157 |
spin_lock_init(&ctrl->lock); |
27a7faa07 memcg: swap cgrou... |
158 159 160 161 |
if (swap_cgroup_prepare(type)) { /* memory shortage */ ctrl->map = NULL; ctrl->length = 0; |
27a7faa07 memcg: swap cgrou... |
162 |
mutex_unlock(&swap_cgroup_mutex); |
6a5b18d2b memcg: move page-... |
163 |
vfree(array); |
27a7faa07 memcg: swap cgrou... |
164 165 166 |
goto nomem; } mutex_unlock(&swap_cgroup_mutex); |
27a7faa07 memcg: swap cgrou... |
167 168 |
return 0; nomem: |
1170532bb mm: convert print... |
169 170 171 172 |
pr_info("couldn't allocate enough memory for swap_cgroup "); pr_info("swap_cgroup can be disabled by swapaccount=0 boot option "); |
27a7faa07 memcg: swap cgrou... |
173 174 175 176 177 |
return -ENOMEM; } void swap_cgroup_swapoff(int type) { |
6a5b18d2b memcg: move page-... |
178 179 |
struct page **map; unsigned long i, length; |
27a7faa07 memcg: swap cgrou... |
180 181 182 183 184 185 186 |
struct swap_cgroup_ctrl *ctrl; if (!do_swap_account) return; mutex_lock(&swap_cgroup_mutex); ctrl = &swap_cgroup_ctrl[type]; |
6a5b18d2b memcg: move page-... |
187 188 189 190 191 192 193 194 195 |
map = ctrl->map; length = ctrl->length; ctrl->map = NULL; ctrl->length = 0; mutex_unlock(&swap_cgroup_mutex); if (map) { for (i = 0; i < length; i++) { struct page *page = map[i]; |
27a7faa07 memcg: swap cgrou... |
196 197 198 |
if (page) __free_page(page); } |
6a5b18d2b memcg: move page-... |
199 |
vfree(map); |
27a7faa07 memcg: swap cgrou... |
200 |
} |
27a7faa07 memcg: swap cgrou... |
201 |
} |