Blame view
mm/percpu.c
66.2 KB
fbf59bc9d percpu: implement... |
1 |
/* |
88999a898 percpu: misc prep... |
2 |
* mm/percpu.c - percpu memory allocator |
fbf59bc9d percpu: implement... |
3 4 5 6 7 8 9 |
* * Copyright (C) 2009 SUSE Linux Products GmbH * Copyright (C) 2009 Tejun Heo <tj@kernel.org> * * This file is released under the GPLv2. * * This is percpu allocator which can handle both static and dynamic |
88999a898 percpu: misc prep... |
10 11 12 |
* areas. Percpu areas are allocated in chunks. Each chunk is * consisted of boot-time determined number of units and the first * chunk is used for static percpu variables in the kernel image |
2f39e637e percpu: allow non... |
13 14 15 |
* (special boot time alloc/init handling necessary as these areas * need to be brought up before allocation services are running). * Unit grows as necessary and all units grow or shrink in unison. |
88999a898 percpu: misc prep... |
16 |
* When a chunk is filled up, another chunk is allocated. |
fbf59bc9d percpu: implement... |
17 18 19 20 21 22 23 24 |
* * c0 c1 c2 * ------------------- ------------------- ------------ * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u * ------------------- ...... ------------------- .... ------------ * * Allocation is done in offset-size areas of single unit space. Ie, * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0, |
2f39e637e percpu: allow non... |
25 26 27 28 |
* c1:u1, c1:u2 and c1:u3. On UMA, units corresponds directly to * cpus. On NUMA, the mapping can be non-linear and even sparse. * Percpu access can be done by configuring percpu base registers * according to cpu to unit mapping and pcpu_unit_size. |
fbf59bc9d percpu: implement... |
29 |
* |
2f39e637e percpu: allow non... |
30 31 |
* There are usually many small percpu allocations many of them being * as small as 4 bytes. The allocator organizes chunks into lists |
fbf59bc9d percpu: implement... |
32 33 |
* according to free size and tries to allocate from the fullest one. * Each chunk keeps the maximum contiguous area size hint which is |
4785879e4 fix a typo on com... |
34 |
* guaranteed to be equal to or larger than the maximum contiguous |
fbf59bc9d percpu: implement... |
35 36 37 38 39 40 41 42 |
* area in the chunk. This helps the allocator not to iterate the * chunk maps unnecessarily. * * Allocation state in each chunk is kept using an array of integers * on chunk->map. A positive value in the map represents a free * region and negative allocated. Allocation inside a chunk is done * by scanning this map sequentially and serving the first matching * entry. This is mostly copied from the percpu_modalloc() allocator. |
e1b9aa3f4 percpu: remove rb... |
43 44 |
* Chunks can be determined from the address using the index field * in the page struct. The index field contains a pointer to the chunk. |
fbf59bc9d percpu: implement... |
45 46 47 |
* * To use this allocator, arch code should do the followings. * |
fbf59bc9d percpu: implement... |
48 |
* - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate |
e01009833 percpu: make x86 ... |
49 50 |
* regular address to percpu pointer and back if they need to be * different from the default |
fbf59bc9d percpu: implement... |
51 |
* |
8d408b4be percpu: give more... |
52 53 |
* - use pcpu_setup_first_chunk() during percpu area initialization to * setup the first chunk containing the kernel static percpu area |
fbf59bc9d percpu: implement... |
54 |
*/ |
870d4b12a mm: percpu: use p... |
55 |
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
fbf59bc9d percpu: implement... |
56 57 |
#include <linux/bitmap.h> #include <linux/bootmem.h> |
fd1e8a1fe percpu: introduce... |
58 |
#include <linux/err.h> |
fbf59bc9d percpu: implement... |
59 |
#include <linux/list.h> |
a530b7958 percpu: teach lar... |
60 |
#include <linux/log2.h> |
fbf59bc9d percpu: implement... |
61 62 63 64 65 |
#include <linux/mm.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/percpu.h> #include <linux/pfn.h> |
fbf59bc9d percpu: implement... |
66 |
#include <linux/slab.h> |
ccea34b5d percpu: finer gra... |
67 |
#include <linux/spinlock.h> |
fbf59bc9d percpu: implement... |
68 |
#include <linux/vmalloc.h> |
a56dbddf0 percpu: move full... |
69 |
#include <linux/workqueue.h> |
f528f0b8e kmemleak: Handle ... |
70 |
#include <linux/kmemleak.h> |
fbf59bc9d percpu: implement... |
71 72 |
#include <asm/cacheflush.h> |
e01009833 percpu: make x86 ... |
73 |
#include <asm/sections.h> |
fbf59bc9d percpu: implement... |
74 |
#include <asm/tlbflush.h> |
3b034b0d0 percpu: Fix kdump... |
75 |
#include <asm/io.h> |
fbf59bc9d percpu: implement... |
76 |
|
fbf59bc9d percpu: implement... |
77 78 |
#define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */ #define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */ |
9c824b6a1 percpu: make sure... |
79 80 |
#define PCPU_ATOMIC_MAP_MARGIN_LOW 32 #define PCPU_ATOMIC_MAP_MARGIN_HIGH 64 |
1a4d76076 percpu: implement... |
81 82 |
#define PCPU_EMPTY_POP_PAGES_LOW 2 #define PCPU_EMPTY_POP_PAGES_HIGH 4 |
fbf59bc9d percpu: implement... |
83 |
|
bbddff054 percpu: use percp... |
84 |
#ifdef CONFIG_SMP |
e01009833 percpu: make x86 ... |
85 86 87 |
/* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */ #ifndef __addr_to_pcpu_ptr #define __addr_to_pcpu_ptr(addr) \ |
43cf38eb5 percpu: add __per... |
88 89 90 |
(void __percpu *)((unsigned long)(addr) - \ (unsigned long)pcpu_base_addr + \ (unsigned long)__per_cpu_start) |
e01009833 percpu: make x86 ... |
91 92 93 |
#endif #ifndef __pcpu_ptr_to_addr #define __pcpu_ptr_to_addr(ptr) \ |
43cf38eb5 percpu: add __per... |
94 95 96 |
(void __force *)((unsigned long)(ptr) + \ (unsigned long)pcpu_base_addr - \ (unsigned long)__per_cpu_start) |
e01009833 percpu: make x86 ... |
97 |
#endif |
bbddff054 percpu: use percp... |
98 99 100 101 102 |
#else /* CONFIG_SMP */ /* on UP, it's always identity mapped */ #define __addr_to_pcpu_ptr(addr) (void __percpu *)(addr) #define __pcpu_ptr_to_addr(ptr) (void __force *)(ptr) #endif /* CONFIG_SMP */ |
e01009833 percpu: make x86 ... |
103 |
|
fbf59bc9d percpu: implement... |
104 105 |
struct pcpu_chunk { struct list_head list; /* linked to pcpu_slot lists */ |
fbf59bc9d percpu: implement... |
106 107 |
int free_size; /* free bytes in the chunk */ int contig_hint; /* max contiguous size hint */ |
bba174f5e percpu: add chunk... |
108 |
void *base_addr; /* base address of this chunk */ |
9c824b6a1 percpu: make sure... |
109 |
|
723ad1d90 percpu: store off... |
110 |
int map_used; /* # of map entries used before the sentry */ |
fbf59bc9d percpu: implement... |
111 112 |
int map_alloc; /* # of map entries allocated */ int *map; /* allocation map */ |
4f996e234 percpu: fix synch... |
113 |
struct list_head map_extend_list;/* on pcpu_map_extend_chunks */ |
9c824b6a1 percpu: make sure... |
114 |
|
88999a898 percpu: misc prep... |
115 |
void *data; /* chunk data */ |
3d331ad74 percpu: speed all... |
116 |
int first_free; /* no free below this */ |
8d408b4be percpu: give more... |
117 |
bool immutable; /* no [de]population allowed */ |
b539b87fe percpu: implmeent... |
118 |
int nr_populated; /* # of populated pages */ |
ce3141a27 percpu: drop pcpu... |
119 |
unsigned long populated[]; /* populated bitmap */ |
fbf59bc9d percpu: implement... |
120 |
}; |
40150d37b percpu: add __rea... |
121 122 |
static int pcpu_unit_pages __read_mostly; static int pcpu_unit_size __read_mostly; |
2f39e637e percpu: allow non... |
123 |
static int pcpu_nr_units __read_mostly; |
6563297ce percpu: use group... |
124 |
static int pcpu_atom_size __read_mostly; |
40150d37b percpu: add __rea... |
125 126 |
static int pcpu_nr_slots __read_mostly; static size_t pcpu_chunk_struct_size __read_mostly; |
fbf59bc9d percpu: implement... |
127 |
|
a855b84c3 percpu: fix chunk... |
128 129 130 |
/* cpus with the lowest and highest unit addresses */ static unsigned int pcpu_low_unit_cpu __read_mostly; static unsigned int pcpu_high_unit_cpu __read_mostly; |
2f39e637e percpu: allow non... |
131 |
|
fbf59bc9d percpu: implement... |
132 |
/* the address of the first chunk which starts with the kernel static area */ |
40150d37b percpu: add __rea... |
133 |
void *pcpu_base_addr __read_mostly; |
fbf59bc9d percpu: implement... |
134 |
EXPORT_SYMBOL_GPL(pcpu_base_addr); |
fb435d523 percpu: add pcpu_... |
135 136 |
static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */ const unsigned long *pcpu_unit_offsets __read_mostly; /* cpu -> unit offset */ |
2f39e637e percpu: allow non... |
137 |
|
6563297ce percpu: use group... |
138 139 140 141 |
/* group information, used for vm allocation */ static int pcpu_nr_groups __read_mostly; static const unsigned long *pcpu_group_offsets __read_mostly; static const size_t *pcpu_group_sizes __read_mostly; |
ae9e6bc9f percpu: don't put... |
142 143 144 145 146 147 148 149 150 151 152 153 154 155 |
/* * The first chunk which always exists. Note that unlike other * chunks, this one can be allocated and mapped in several different * ways and thus often doesn't live in the vmalloc area. */ static struct pcpu_chunk *pcpu_first_chunk; /* * Optional reserved chunk. This chunk reserves part of the first * chunk and serves it for reserved allocations. The amount of * reserved offset is in pcpu_reserved_chunk_limit. When reserved * area doesn't exist, the following variables contain NULL and 0 * respectively. */ |
edcb46399 percpu, module: i... |
156 |
static struct pcpu_chunk *pcpu_reserved_chunk; |
edcb46399 percpu, module: i... |
157 |
static int pcpu_reserved_chunk_limit; |
b38d08f31 percpu: restructu... |
158 |
static DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */ |
6710e594f percpu: fix synch... |
159 |
static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop, map ext */ |
fbf59bc9d percpu: implement... |
160 |
|
40150d37b percpu: add __rea... |
161 |
static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */ |
fbf59bc9d percpu: implement... |
162 |
|
4f996e234 percpu: fix synch... |
163 164 |
/* chunks which need their map areas extended, protected by pcpu_lock */ static LIST_HEAD(pcpu_map_extend_chunks); |
b539b87fe percpu: implmeent... |
165 166 167 168 169 |
/* * The number of empty populated pages, protected by pcpu_lock. The * reserved chunk doesn't contribute to the count. */ static int pcpu_nr_empty_pop_pages; |
1a4d76076 percpu: implement... |
170 171 172 173 174 175 |
/* * Balance work is used to populate or destroy chunks asynchronously. We * try to keep the number of populated free pages between * PCPU_EMPTY_POP_PAGES_LOW and HIGH for atomic allocations and at most one * empty chunk. */ |
fe6bd8c3d percpu: rename pc... |
176 177 |
static void pcpu_balance_workfn(struct work_struct *work); static DECLARE_WORK(pcpu_balance_work, pcpu_balance_workfn); |
1a4d76076 percpu: implement... |
178 179 180 181 182 183 184 185 |
static bool pcpu_async_enabled __read_mostly; static bool pcpu_atomic_alloc_failed; static void pcpu_schedule_balance_work(void) { if (pcpu_async_enabled) schedule_work(&pcpu_balance_work); } |
a56dbddf0 percpu: move full... |
186 |
|
020ec6537 percpu: factor ou... |
187 188 189 190 191 192 193 194 195 196 197 198 199 200 |
static bool pcpu_addr_in_first_chunk(void *addr) { void *first_start = pcpu_first_chunk->base_addr; return addr >= first_start && addr < first_start + pcpu_unit_size; } static bool pcpu_addr_in_reserved_chunk(void *addr) { void *first_start = pcpu_first_chunk->base_addr; return addr >= first_start && addr < first_start + pcpu_reserved_chunk_limit; } |
d9b55eeb1 percpu: remove un... |
201 |
static int __pcpu_size_to_slot(int size) |
fbf59bc9d percpu: implement... |
202 |
{ |
cae3aeb83 percpu: clean up ... |
203 |
int highbit = fls(size); /* size is in bytes */ |
fbf59bc9d percpu: implement... |
204 205 |
return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1); } |
d9b55eeb1 percpu: remove un... |
206 207 208 209 210 211 |
static int pcpu_size_to_slot(int size) { if (size == pcpu_unit_size) return pcpu_nr_slots - 1; return __pcpu_size_to_slot(size); } |
fbf59bc9d percpu: implement... |
212 213 214 215 216 217 218 |
static int pcpu_chunk_slot(const struct pcpu_chunk *chunk) { if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int)) return 0; return pcpu_size_to_slot(chunk->free_size); } |
88999a898 percpu: misc prep... |
219 220 221 222 223 224 225 226 227 228 229 230 231 |
/* set the pointer to a chunk in a page struct */ static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu) { page->index = (unsigned long)pcpu; } /* obtain pointer to a chunk from a page struct */ static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page) { return (struct pcpu_chunk *)page->index; } static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx) |
fbf59bc9d percpu: implement... |
232 |
{ |
2f39e637e percpu: allow non... |
233 |
return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx; |
fbf59bc9d percpu: implement... |
234 |
} |
9983b6f0c percpu: fix first... |
235 236 |
static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk, unsigned int cpu, int page_idx) |
fbf59bc9d percpu: implement... |
237 |
{ |
bba174f5e percpu: add chunk... |
238 |
return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] + |
fb435d523 percpu: add pcpu_... |
239 |
(page_idx << PAGE_SHIFT); |
fbf59bc9d percpu: implement... |
240 |
} |
88999a898 percpu: misc prep... |
241 242 |
static void __maybe_unused pcpu_next_unpop(struct pcpu_chunk *chunk, int *rs, int *re, int end) |
ce3141a27 percpu: drop pcpu... |
243 244 245 246 |
{ *rs = find_next_zero_bit(chunk->populated, end, *rs); *re = find_next_bit(chunk->populated, end, *rs + 1); } |
88999a898 percpu: misc prep... |
247 248 |
static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk, int *rs, int *re, int end) |
ce3141a27 percpu: drop pcpu... |
249 250 251 252 253 254 255 |
{ *rs = find_next_bit(chunk->populated, end, *rs); *re = find_next_zero_bit(chunk->populated, end, *rs + 1); } /* * (Un)populated page region iterators. Iterate over (un)populated |
b595076a1 tree-wide: fix co... |
256 |
* page regions between @start and @end in @chunk. @rs and @re should |
ce3141a27 percpu: drop pcpu... |
257 258 259 260 261 262 263 264 265 266 267 268 |
* be integer variables and will be set to start and end page index of * the current region. */ #define pcpu_for_each_unpop_region(chunk, rs, re, start, end) \ for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \ (rs) < (re); \ (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end))) #define pcpu_for_each_pop_region(chunk, rs, re, start, end) \ for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end)); \ (rs) < (re); \ (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end))) |
fbf59bc9d percpu: implement... |
269 |
/** |
90459ce06 percpu: rename pc... |
270 |
* pcpu_mem_zalloc - allocate memory |
1880d93b8 percpu: replace p... |
271 |
* @size: bytes to allocate |
fbf59bc9d percpu: implement... |
272 |
* |
1880d93b8 percpu: replace p... |
273 |
* Allocate @size bytes. If @size is smaller than PAGE_SIZE, |
90459ce06 percpu: rename pc... |
274 |
* kzalloc() is used; otherwise, vzalloc() is used. The returned |
1880d93b8 percpu: replace p... |
275 |
* memory is always zeroed. |
fbf59bc9d percpu: implement... |
276 |
* |
ccea34b5d percpu: finer gra... |
277 278 279 |
* CONTEXT: * Does GFP_KERNEL allocation. * |
fbf59bc9d percpu: implement... |
280 |
* RETURNS: |
1880d93b8 percpu: replace p... |
281 |
* Pointer to the allocated area on success, NULL on failure. |
fbf59bc9d percpu: implement... |
282 |
*/ |
90459ce06 percpu: rename pc... |
283 |
static void *pcpu_mem_zalloc(size_t size) |
fbf59bc9d percpu: implement... |
284 |
{ |
099a19d91 percpu: allow lim... |
285 286 |
if (WARN_ON_ONCE(!slab_is_available())) return NULL; |
1880d93b8 percpu: replace p... |
287 288 |
if (size <= PAGE_SIZE) return kzalloc(size, GFP_KERNEL); |
7af4c0932 percpu: zero memo... |
289 290 |
else return vzalloc(size); |
1880d93b8 percpu: replace p... |
291 |
} |
fbf59bc9d percpu: implement... |
292 |
|
1880d93b8 percpu: replace p... |
293 294 295 |
/** * pcpu_mem_free - free memory * @ptr: memory to free |
1880d93b8 percpu: replace p... |
296 |
* |
90459ce06 percpu: rename pc... |
297 |
* Free @ptr. @ptr should have been allocated using pcpu_mem_zalloc(). |
1880d93b8 percpu: replace p... |
298 |
*/ |
1d5cfdb07 tree wide: use kv... |
299 |
static void pcpu_mem_free(void *ptr) |
1880d93b8 percpu: replace p... |
300 |
{ |
1d5cfdb07 tree wide: use kv... |
301 |
kvfree(ptr); |
fbf59bc9d percpu: implement... |
302 303 304 |
} /** |
b539b87fe percpu: implmeent... |
305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 |
* pcpu_count_occupied_pages - count the number of pages an area occupies * @chunk: chunk of interest * @i: index of the area in question * * Count the number of pages chunk's @i'th area occupies. When the area's * start and/or end address isn't aligned to page boundary, the straddled * page is included in the count iff the rest of the page is free. */ static int pcpu_count_occupied_pages(struct pcpu_chunk *chunk, int i) { int off = chunk->map[i] & ~1; int end = chunk->map[i + 1] & ~1; if (!PAGE_ALIGNED(off) && i > 0) { int prev = chunk->map[i - 1]; if (!(prev & 1) && prev <= round_down(off, PAGE_SIZE)) off = round_down(off, PAGE_SIZE); } if (!PAGE_ALIGNED(end) && i + 1 < chunk->map_used) { int next = chunk->map[i + 1]; int nend = chunk->map[i + 2] & ~1; if (!(next & 1) && nend >= round_up(end, PAGE_SIZE)) end = round_up(end, PAGE_SIZE); } return max_t(int, PFN_DOWN(end) - PFN_UP(off), 0); } /** |
fbf59bc9d percpu: implement... |
337 338 339 340 341 342 |
* pcpu_chunk_relocate - put chunk in the appropriate chunk slot * @chunk: chunk of interest * @oslot: the previous slot it was on * * This function is called after an allocation or free changed @chunk. * New slot according to the changed state is determined and @chunk is |
edcb46399 percpu, module: i... |
343 344 |
* moved to the slot. Note that the reserved chunk is never put on * chunk slots. |
ccea34b5d percpu: finer gra... |
345 346 347 |
* * CONTEXT: * pcpu_lock. |
fbf59bc9d percpu: implement... |
348 349 350 351 |
*/ static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot) { int nslot = pcpu_chunk_slot(chunk); |
edcb46399 percpu, module: i... |
352 |
if (chunk != pcpu_reserved_chunk && oslot != nslot) { |
fbf59bc9d percpu: implement... |
353 354 355 356 357 358 |
if (oslot < nslot) list_move(&chunk->list, &pcpu_slot[nslot]); else list_move_tail(&chunk->list, &pcpu_slot[nslot]); } } |
fbf59bc9d percpu: implement... |
359 |
/** |
833af8427 percpu: restructu... |
360 361 |
* pcpu_need_to_extend - determine whether chunk area map needs to be extended * @chunk: chunk of interest |
9c824b6a1 percpu: make sure... |
362 |
* @is_atomic: the allocation context |
9f7dcf224 percpu: move chun... |
363 |
* |
9c824b6a1 percpu: make sure... |
364 365 366 367 368 369 |
* Determine whether area map of @chunk needs to be extended. If * @is_atomic, only the amount necessary for a new allocation is * considered; however, async extension is scheduled if the left amount is * low. If !@is_atomic, it aims for more empty space. Combined, this * ensures that the map is likely to have enough available space to * accomodate atomic allocations which can't extend maps directly. |
9f7dcf224 percpu: move chun... |
370 |
* |
ccea34b5d percpu: finer gra... |
371 |
* CONTEXT: |
833af8427 percpu: restructu... |
372 |
* pcpu_lock. |
ccea34b5d percpu: finer gra... |
373 |
* |
9f7dcf224 percpu: move chun... |
374 |
* RETURNS: |
833af8427 percpu: restructu... |
375 376 |
* New target map allocation length if extension is necessary, 0 * otherwise. |
9f7dcf224 percpu: move chun... |
377 |
*/ |
9c824b6a1 percpu: make sure... |
378 |
static int pcpu_need_to_extend(struct pcpu_chunk *chunk, bool is_atomic) |
9f7dcf224 percpu: move chun... |
379 |
{ |
9c824b6a1 percpu: make sure... |
380 |
int margin, new_alloc; |
4f996e234 percpu: fix synch... |
381 |
lockdep_assert_held(&pcpu_lock); |
9c824b6a1 percpu: make sure... |
382 383 |
if (is_atomic) { margin = 3; |
9f7dcf224 percpu: move chun... |
384 |
|
9c824b6a1 percpu: make sure... |
385 |
if (chunk->map_alloc < |
4f996e234 percpu: fix synch... |
386 387 388 389 390 391 392 |
chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW) { if (list_empty(&chunk->map_extend_list)) { list_add_tail(&chunk->map_extend_list, &pcpu_map_extend_chunks); pcpu_schedule_balance_work(); } } |
9c824b6a1 percpu: make sure... |
393 394 395 396 397 |
} else { margin = PCPU_ATOMIC_MAP_MARGIN_HIGH; } if (chunk->map_alloc >= chunk->map_used + margin) |
9f7dcf224 percpu: move chun... |
398 399 400 |
return 0; new_alloc = PCPU_DFL_MAP_ALLOC; |
9c824b6a1 percpu: make sure... |
401 |
while (new_alloc < chunk->map_used + margin) |
9f7dcf224 percpu: move chun... |
402 |
new_alloc *= 2; |
833af8427 percpu: restructu... |
403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 |
return new_alloc; } /** * pcpu_extend_area_map - extend area map of a chunk * @chunk: chunk of interest * @new_alloc: new target allocation length of the area map * * Extend area map of @chunk to have @new_alloc entries. * * CONTEXT: * Does GFP_KERNEL allocation. Grabs and releases pcpu_lock. * * RETURNS: * 0 on success, -errno on failure. */ static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc) { int *old = NULL, *new = NULL; size_t old_size = 0, new_size = new_alloc * sizeof(new[0]); unsigned long flags; |
6710e594f percpu: fix synch... |
424 |
lockdep_assert_held(&pcpu_alloc_mutex); |
90459ce06 percpu: rename pc... |
425 |
new = pcpu_mem_zalloc(new_size); |
833af8427 percpu: restructu... |
426 |
if (!new) |
9f7dcf224 percpu: move chun... |
427 |
return -ENOMEM; |
ccea34b5d percpu: finer gra... |
428 |
|
833af8427 percpu: restructu... |
429 430 431 432 433 |
/* acquire pcpu_lock and switch to new area map */ spin_lock_irqsave(&pcpu_lock, flags); if (new_alloc <= chunk->map_alloc) goto out_unlock; |
9f7dcf224 percpu: move chun... |
434 |
|
833af8427 percpu: restructu... |
435 |
old_size = chunk->map_alloc * sizeof(chunk->map[0]); |
a002d1484 percpu: fix a mem... |
436 437 438 |
old = chunk->map; memcpy(new, old, old_size); |
9f7dcf224 percpu: move chun... |
439 |
|
9f7dcf224 percpu: move chun... |
440 441 |
chunk->map_alloc = new_alloc; chunk->map = new; |
833af8427 percpu: restructu... |
442 443 444 445 446 447 448 449 450 |
new = NULL; out_unlock: spin_unlock_irqrestore(&pcpu_lock, flags); /* * pcpu_mem_free() might end up calling vfree() which uses * IRQ-unsafe lock and thus can't be called under pcpu_lock. */ |
1d5cfdb07 tree wide: use kv... |
451 452 |
pcpu_mem_free(old); pcpu_mem_free(new); |
833af8427 percpu: restructu... |
453 |
|
9f7dcf224 percpu: move chun... |
454 455 456 457 |
return 0; } /** |
a16037c8d percpu: make pcpu... |
458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 |
* pcpu_fit_in_area - try to fit the requested allocation in a candidate area * @chunk: chunk the candidate area belongs to * @off: the offset to the start of the candidate area * @this_size: the size of the candidate area * @size: the size of the target allocation * @align: the alignment of the target allocation * @pop_only: only allocate from already populated region * * We're trying to allocate @size bytes aligned at @align. @chunk's area * at @off sized @this_size is a candidate. This function determines * whether the target allocation fits in the candidate area and returns the * number of bytes to pad after @off. If the target area doesn't fit, -1 * is returned. * * If @pop_only is %true, this function only considers the already * populated part of the candidate area. */ static int pcpu_fit_in_area(struct pcpu_chunk *chunk, int off, int this_size, int size, int align, bool pop_only) { int cand_off = off; while (true) { int head = ALIGN(cand_off, align) - off; int page_start, page_end, rs, re; if (this_size < head + size) return -1; if (!pop_only) return head; /* * If the first unpopulated page is beyond the end of the * allocation, the whole allocation is populated; * otherwise, retry from the end of the unpopulated area. */ page_start = PFN_DOWN(head + off); page_end = PFN_UP(head + off + size); rs = page_start; pcpu_next_unpop(chunk, &rs, &re, PFN_UP(off + this_size)); if (rs >= page_end) return head; cand_off = re * PAGE_SIZE; } } /** |
fbf59bc9d percpu: implement... |
507 508 |
* pcpu_alloc_area - allocate area from a pcpu_chunk * @chunk: chunk of interest |
cae3aeb83 percpu: clean up ... |
509 |
* @size: wanted size in bytes |
fbf59bc9d percpu: implement... |
510 |
* @align: wanted align |
a16037c8d percpu: make pcpu... |
511 |
* @pop_only: allocate only from the populated area |
b539b87fe percpu: implmeent... |
512 |
* @occ_pages_p: out param for the number of pages the area occupies |
fbf59bc9d percpu: implement... |
513 514 515 516 517 |
* * Try to allocate @size bytes area aligned at @align from @chunk. * Note that this function only allocates the offset. It doesn't * populate or map the area. * |
9f7dcf224 percpu: move chun... |
518 519 |
* @chunk->map must have at least two free slots. * |
ccea34b5d percpu: finer gra... |
520 521 522 |
* CONTEXT: * pcpu_lock. * |
fbf59bc9d percpu: implement... |
523 |
* RETURNS: |
9f7dcf224 percpu: move chun... |
524 525 |
* Allocated offset in @chunk on success, -1 if no matching area is * found. |
fbf59bc9d percpu: implement... |
526 |
*/ |
a16037c8d percpu: make pcpu... |
527 |
static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align, |
b539b87fe percpu: implmeent... |
528 |
bool pop_only, int *occ_pages_p) |
fbf59bc9d percpu: implement... |
529 530 531 532 |
{ int oslot = pcpu_chunk_slot(chunk); int max_contig = 0; int i, off; |
3d331ad74 percpu: speed all... |
533 |
bool seen_free = false; |
723ad1d90 percpu: store off... |
534 |
int *p; |
fbf59bc9d percpu: implement... |
535 |
|
3d331ad74 percpu: speed all... |
536 |
for (i = chunk->first_free, p = chunk->map + i; i < chunk->map_used; i++, p++) { |
fbf59bc9d percpu: implement... |
537 |
int head, tail; |
723ad1d90 percpu: store off... |
538 539 540 541 542 |
int this_size; off = *p; if (off & 1) continue; |
fbf59bc9d percpu: implement... |
543 |
|
723ad1d90 percpu: store off... |
544 |
this_size = (p[1] & ~1) - off; |
a16037c8d percpu: make pcpu... |
545 546 547 548 |
head = pcpu_fit_in_area(chunk, off, this_size, size, align, pop_only); if (head < 0) { |
3d331ad74 percpu: speed all... |
549 550 551 552 |
if (!seen_free) { chunk->first_free = i; seen_free = true; } |
723ad1d90 percpu: store off... |
553 |
max_contig = max(this_size, max_contig); |
fbf59bc9d percpu: implement... |
554 555 556 557 558 559 560 561 562 |
continue; } /* * If head is small or the previous block is free, * merge'em. Note that 'small' is defined as smaller * than sizeof(int), which is very small but isn't too * uncommon for percpu allocations. */ |
723ad1d90 percpu: store off... |
563 |
if (head && (head < sizeof(int) || !(p[-1] & 1))) { |
21ddfd38e percpu: renew the... |
564 |
*p = off += head; |
723ad1d90 percpu: store off... |
565 |
if (p[-1] & 1) |
fbf59bc9d percpu: implement... |
566 |
chunk->free_size -= head; |
21ddfd38e percpu: renew the... |
567 568 |
else max_contig = max(*p - p[-1], max_contig); |
723ad1d90 percpu: store off... |
569 |
this_size -= head; |
fbf59bc9d percpu: implement... |
570 571 572 573 |
head = 0; } /* if tail is small, just keep it around */ |
723ad1d90 percpu: store off... |
574 575 |
tail = this_size - head - size; if (tail < sizeof(int)) { |
fbf59bc9d percpu: implement... |
576 |
tail = 0; |
723ad1d90 percpu: store off... |
577 578 |
size = this_size - head; } |
fbf59bc9d percpu: implement... |
579 580 581 |
/* split if warranted */ if (head || tail) { |
706c16f23 perpcu: fold pcpu... |
582 583 584 |
int nr_extra = !!head + !!tail; /* insert new subblocks */ |
723ad1d90 percpu: store off... |
585 |
memmove(p + nr_extra + 1, p + 1, |
706c16f23 perpcu: fold pcpu... |
586 587 |
sizeof(chunk->map[0]) * (chunk->map_used - i)); chunk->map_used += nr_extra; |
fbf59bc9d percpu: implement... |
588 |
if (head) { |
3d331ad74 percpu: speed all... |
589 590 591 592 |
if (!seen_free) { chunk->first_free = i; seen_free = true; } |
723ad1d90 percpu: store off... |
593 594 |
*++p = off += head; ++i; |
706c16f23 perpcu: fold pcpu... |
595 596 597 |
max_contig = max(head, max_contig); } if (tail) { |
723ad1d90 percpu: store off... |
598 |
p[1] = off + size; |
706c16f23 perpcu: fold pcpu... |
599 |
max_contig = max(tail, max_contig); |
fbf59bc9d percpu: implement... |
600 |
} |
fbf59bc9d percpu: implement... |
601 |
} |
3d331ad74 percpu: speed all... |
602 603 |
if (!seen_free) chunk->first_free = i + 1; |
fbf59bc9d percpu: implement... |
604 |
/* update hint and mark allocated */ |
723ad1d90 percpu: store off... |
605 |
if (i + 1 == chunk->map_used) |
fbf59bc9d percpu: implement... |
606 607 608 609 |
chunk->contig_hint = max_contig; /* fully scanned */ else chunk->contig_hint = max(chunk->contig_hint, max_contig); |
723ad1d90 percpu: store off... |
610 611 |
chunk->free_size -= size; *p |= 1; |
fbf59bc9d percpu: implement... |
612 |
|
b539b87fe percpu: implmeent... |
613 |
*occ_pages_p = pcpu_count_occupied_pages(chunk, i); |
fbf59bc9d percpu: implement... |
614 615 616 617 618 619 |
pcpu_chunk_relocate(chunk, oslot); return off; } chunk->contig_hint = max_contig; /* fully scanned */ pcpu_chunk_relocate(chunk, oslot); |
9f7dcf224 percpu: move chun... |
620 621 |
/* tell the upper layer that this chunk has no matching area */ return -1; |
fbf59bc9d percpu: implement... |
622 623 624 625 626 627 |
} /** * pcpu_free_area - free area to a pcpu_chunk * @chunk: chunk of interest * @freeme: offset of area to free |
b539b87fe percpu: implmeent... |
628 |
* @occ_pages_p: out param for the number of pages the area occupies |
fbf59bc9d percpu: implement... |
629 630 631 632 |
* * Free area starting from @freeme to @chunk. Note that this function * only modifies the allocation map. It doesn't depopulate or unmap * the area. |
ccea34b5d percpu: finer gra... |
633 634 635 |
* * CONTEXT: * pcpu_lock. |
fbf59bc9d percpu: implement... |
636 |
*/ |
b539b87fe percpu: implmeent... |
637 638 |
static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme, int *occ_pages_p) |
fbf59bc9d percpu: implement... |
639 640 |
{ int oslot = pcpu_chunk_slot(chunk); |
723ad1d90 percpu: store off... |
641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 |
int off = 0; unsigned i, j; int to_free = 0; int *p; freeme |= 1; /* we are searching for <given offset, in use> pair */ i = 0; j = chunk->map_used; while (i != j) { unsigned k = (i + j) / 2; off = chunk->map[k]; if (off < freeme) i = k + 1; else if (off > freeme) j = k; else i = j = k; } |
fbf59bc9d percpu: implement... |
660 |
BUG_ON(off != freeme); |
fbf59bc9d percpu: implement... |
661 |
|
3d331ad74 percpu: speed all... |
662 663 |
if (i < chunk->first_free) chunk->first_free = i; |
723ad1d90 percpu: store off... |
664 665 666 |
p = chunk->map + i; *p = off &= ~1; chunk->free_size += (p[1] & ~1) - off; |
fbf59bc9d percpu: implement... |
667 |
|
b539b87fe percpu: implmeent... |
668 |
*occ_pages_p = pcpu_count_occupied_pages(chunk, i); |
723ad1d90 percpu: store off... |
669 670 671 |
/* merge with next? */ if (!(p[1] & 1)) to_free++; |
fbf59bc9d percpu: implement... |
672 |
/* merge with previous? */ |
723ad1d90 percpu: store off... |
673 674 |
if (i > 0 && !(p[-1] & 1)) { to_free++; |
fbf59bc9d percpu: implement... |
675 |
i--; |
723ad1d90 percpu: store off... |
676 |
p--; |
fbf59bc9d percpu: implement... |
677 |
} |
723ad1d90 percpu: store off... |
678 679 680 681 |
if (to_free) { chunk->map_used -= to_free; memmove(p + 1, p + 1 + to_free, (chunk->map_used - i) * sizeof(chunk->map[0])); |
fbf59bc9d percpu: implement... |
682 |
} |
723ad1d90 percpu: store off... |
683 |
chunk->contig_hint = max(chunk->map[i + 1] - chunk->map[i] - 1, chunk->contig_hint); |
fbf59bc9d percpu: implement... |
684 685 |
pcpu_chunk_relocate(chunk, oslot); } |
6081089fd percpu: reorganiz... |
686 687 688 |
static struct pcpu_chunk *pcpu_alloc_chunk(void) { struct pcpu_chunk *chunk; |
90459ce06 percpu: rename pc... |
689 |
chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size); |
6081089fd percpu: reorganiz... |
690 691 |
if (!chunk) return NULL; |
90459ce06 percpu: rename pc... |
692 693 |
chunk->map = pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0])); |
6081089fd percpu: reorganiz... |
694 |
if (!chunk->map) { |
1d5cfdb07 tree wide: use kv... |
695 |
pcpu_mem_free(chunk); |
6081089fd percpu: reorganiz... |
696 697 698 699 |
return NULL; } chunk->map_alloc = PCPU_DFL_MAP_ALLOC; |
723ad1d90 percpu: store off... |
700 701 702 |
chunk->map[0] = 0; chunk->map[1] = pcpu_unit_size | 1; chunk->map_used = 1; |
6081089fd percpu: reorganiz... |
703 704 |
INIT_LIST_HEAD(&chunk->list); |
4f996e234 percpu: fix synch... |
705 |
INIT_LIST_HEAD(&chunk->map_extend_list); |
6081089fd percpu: reorganiz... |
706 707 708 709 710 711 712 713 714 715 |
chunk->free_size = pcpu_unit_size; chunk->contig_hint = pcpu_unit_size; return chunk; } static void pcpu_free_chunk(struct pcpu_chunk *chunk) { if (!chunk) return; |
1d5cfdb07 tree wide: use kv... |
716 717 |
pcpu_mem_free(chunk->map); pcpu_mem_free(chunk); |
6081089fd percpu: reorganiz... |
718 |
} |
b539b87fe percpu: implmeent... |
719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 |
/** * pcpu_chunk_populated - post-population bookkeeping * @chunk: pcpu_chunk which got populated * @page_start: the start page * @page_end: the end page * * Pages in [@page_start,@page_end) have been populated to @chunk. Update * the bookkeeping information accordingly. Must be called after each * successful population. */ static void pcpu_chunk_populated(struct pcpu_chunk *chunk, int page_start, int page_end) { int nr = page_end - page_start; lockdep_assert_held(&pcpu_lock); bitmap_set(chunk->populated, page_start, nr); chunk->nr_populated += nr; pcpu_nr_empty_pop_pages += nr; } /** * pcpu_chunk_depopulated - post-depopulation bookkeeping * @chunk: pcpu_chunk which got depopulated * @page_start: the start page * @page_end: the end page * * Pages in [@page_start,@page_end) have been depopulated from @chunk. * Update the bookkeeping information accordingly. Must be called after * each successful depopulation. */ static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk, int page_start, int page_end) { int nr = page_end - page_start; lockdep_assert_held(&pcpu_lock); bitmap_clear(chunk->populated, page_start, nr); chunk->nr_populated -= nr; pcpu_nr_empty_pop_pages -= nr; } |
9f6455325 percpu: move vmal... |
762 763 764 765 766 767 768 769 770 771 772 773 774 775 |
/* * Chunk management implementation. * * To allow different implementations, chunk alloc/free and * [de]population are implemented in a separate file which is pulled * into this file and compiled together. The following functions * should be implemented. * * pcpu_populate_chunk - populate the specified range of a chunk * pcpu_depopulate_chunk - depopulate the specified range of a chunk * pcpu_create_chunk - create a new chunk * pcpu_destroy_chunk - destroy a chunk, always preceded by full depop * pcpu_addr_to_page - translate address to physical address * pcpu_verify_alloc_info - check alloc_info is acceptable during init |
fbf59bc9d percpu: implement... |
776 |
*/ |
9f6455325 percpu: move vmal... |
777 778 779 780 781 782 |
static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size); static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size); static struct pcpu_chunk *pcpu_create_chunk(void); static void pcpu_destroy_chunk(struct pcpu_chunk *chunk); static struct page *pcpu_addr_to_page(void *addr); static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai); |
fbf59bc9d percpu: implement... |
783 |
|
b0c9778b1 percpu: implement... |
784 785 786 |
#ifdef CONFIG_NEED_PER_CPU_KM #include "percpu-km.c" #else |
9f6455325 percpu: move vmal... |
787 |
#include "percpu-vm.c" |
b0c9778b1 percpu: implement... |
788 |
#endif |
fbf59bc9d percpu: implement... |
789 790 |
/** |
88999a898 percpu: misc prep... |
791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 |
* pcpu_chunk_addr_search - determine chunk containing specified address * @addr: address for which the chunk needs to be determined. * * RETURNS: * The address of the found chunk. */ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr) { /* is it in the first chunk? */ if (pcpu_addr_in_first_chunk(addr)) { /* is it in the reserved area? */ if (pcpu_addr_in_reserved_chunk(addr)) return pcpu_reserved_chunk; return pcpu_first_chunk; } /* * The address is relative to unit0 which might be unused and * thus unmapped. Offset the address to the unit space of the * current processor before looking it up in the vmalloc * space. Note that any possible cpu id can be used here, so * there's no need to worry about preemption or cpu hotplug. */ addr += pcpu_unit_offsets[raw_smp_processor_id()]; |
9f6455325 percpu: move vmal... |
815 |
return pcpu_get_page_chunk(pcpu_addr_to_page(addr)); |
88999a898 percpu: misc prep... |
816 817 818 |
} /** |
edcb46399 percpu, module: i... |
819 |
* pcpu_alloc - the percpu allocator |
cae3aeb83 percpu: clean up ... |
820 |
* @size: size of area to allocate in bytes |
fbf59bc9d percpu: implement... |
821 |
* @align: alignment of area (max PAGE_SIZE) |
edcb46399 percpu, module: i... |
822 |
* @reserved: allocate from the reserved chunk if available |
5835d96e9 percpu: implement... |
823 |
* @gfp: allocation flags |
fbf59bc9d percpu: implement... |
824 |
* |
5835d96e9 percpu: implement... |
825 826 |
* Allocate percpu area of @size bytes aligned at @align. If @gfp doesn't * contain %GFP_KERNEL, the allocation is atomic. |
fbf59bc9d percpu: implement... |
827 828 829 830 |
* * RETURNS: * Percpu pointer to the allocated area on success, NULL on failure. */ |
5835d96e9 percpu: implement... |
831 832 |
static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, gfp_t gfp) |
fbf59bc9d percpu: implement... |
833 |
{ |
f2badb0c9 percpu: make allo... |
834 |
static int warn_limit = 10; |
fbf59bc9d percpu: implement... |
835 |
struct pcpu_chunk *chunk; |
f2badb0c9 percpu: make allo... |
836 |
const char *err; |
6ae833c7f percpu: fix how @... |
837 |
bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL; |
b539b87fe percpu: implmeent... |
838 |
int occ_pages = 0; |
b38d08f31 percpu: restructu... |
839 |
int slot, off, new_alloc, cpu, ret; |
403a91b16 percpu: allow pcp... |
840 |
unsigned long flags; |
f528f0b8e kmemleak: Handle ... |
841 |
void __percpu *ptr; |
fbf59bc9d percpu: implement... |
842 |
|
723ad1d90 percpu: store off... |
843 844 |
/* * We want the lowest bit of offset available for in-use/free |
2f69fa829 percpu: allocatio... |
845 |
* indicator, so force >= 16bit alignment and make size even. |
723ad1d90 percpu: store off... |
846 847 848 |
*/ if (unlikely(align < 2)) align = 2; |
fb009e3a9 percpu: Use ALIGN... |
849 |
size = ALIGN(size, 2); |
2f69fa829 percpu: allocatio... |
850 |
|
8d408b4be percpu: give more... |
851 |
if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) { |
756a025f0 mm: coalesce spli... |
852 853 854 |
WARN(true, "illegal size (%zu) or align (%zu) for percpu allocation ", size, align); |
fbf59bc9d percpu: implement... |
855 856 |
return NULL; } |
6710e594f percpu: fix synch... |
857 858 |
if (!is_atomic) mutex_lock(&pcpu_alloc_mutex); |
403a91b16 percpu: allow pcp... |
859 |
spin_lock_irqsave(&pcpu_lock, flags); |
fbf59bc9d percpu: implement... |
860 |
|
edcb46399 percpu, module: i... |
861 862 863 |
/* serve reserved allocations from the reserved chunk if available */ if (reserved && pcpu_reserved_chunk) { chunk = pcpu_reserved_chunk; |
833af8427 percpu: restructu... |
864 865 866 |
if (size > chunk->contig_hint) { err = "alloc from reserved chunk failed"; |
ccea34b5d percpu: finer gra... |
867 |
goto fail_unlock; |
f2badb0c9 percpu: make allo... |
868 |
} |
833af8427 percpu: restructu... |
869 |
|
9c824b6a1 percpu: make sure... |
870 |
while ((new_alloc = pcpu_need_to_extend(chunk, is_atomic))) { |
833af8427 percpu: restructu... |
871 |
spin_unlock_irqrestore(&pcpu_lock, flags); |
5835d96e9 percpu: implement... |
872 873 |
if (is_atomic || pcpu_extend_area_map(chunk, new_alloc) < 0) { |
833af8427 percpu: restructu... |
874 |
err = "failed to extend area map of reserved chunk"; |
b38d08f31 percpu: restructu... |
875 |
goto fail; |
833af8427 percpu: restructu... |
876 877 878 |
} spin_lock_irqsave(&pcpu_lock, flags); } |
b539b87fe percpu: implmeent... |
879 880 |
off = pcpu_alloc_area(chunk, size, align, is_atomic, &occ_pages); |
edcb46399 percpu, module: i... |
881 882 |
if (off >= 0) goto area_found; |
833af8427 percpu: restructu... |
883 |
|
f2badb0c9 percpu: make allo... |
884 |
err = "alloc from reserved chunk failed"; |
ccea34b5d percpu: finer gra... |
885 |
goto fail_unlock; |
edcb46399 percpu, module: i... |
886 |
} |
ccea34b5d percpu: finer gra... |
887 |
restart: |
edcb46399 percpu, module: i... |
888 |
/* search through normal chunks */ |
fbf59bc9d percpu: implement... |
889 890 891 892 |
for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) { list_for_each_entry(chunk, &pcpu_slot[slot], list) { if (size > chunk->contig_hint) continue; |
ccea34b5d percpu: finer gra... |
893 |
|
9c824b6a1 percpu: make sure... |
894 |
new_alloc = pcpu_need_to_extend(chunk, is_atomic); |
833af8427 percpu: restructu... |
895 |
if (new_alloc) { |
5835d96e9 percpu: implement... |
896 897 |
if (is_atomic) continue; |
833af8427 percpu: restructu... |
898 899 900 901 |
spin_unlock_irqrestore(&pcpu_lock, flags); if (pcpu_extend_area_map(chunk, new_alloc) < 0) { err = "failed to extend area map"; |
b38d08f31 percpu: restructu... |
902 |
goto fail; |
833af8427 percpu: restructu... |
903 904 905 906 907 908 909 |
} spin_lock_irqsave(&pcpu_lock, flags); /* * pcpu_lock has been dropped, need to * restart cpu_slot list walking. */ goto restart; |
ccea34b5d percpu: finer gra... |
910 |
} |
b539b87fe percpu: implmeent... |
911 912 |
off = pcpu_alloc_area(chunk, size, align, is_atomic, &occ_pages); |
fbf59bc9d percpu: implement... |
913 914 |
if (off >= 0) goto area_found; |
fbf59bc9d percpu: implement... |
915 916 |
} } |
403a91b16 percpu: allow pcp... |
917 |
spin_unlock_irqrestore(&pcpu_lock, flags); |
ccea34b5d percpu: finer gra... |
918 |
|
b38d08f31 percpu: restructu... |
919 920 921 922 923 |
/* * No space left. Create a new chunk. We don't want multiple * tasks to create chunks simultaneously. Serialize and create iff * there's still no empty chunk after grabbing the mutex. */ |
5835d96e9 percpu: implement... |
924 925 |
if (is_atomic) goto fail; |
b38d08f31 percpu: restructu... |
926 927 928 929 930 931 932 933 934 935 936 |
if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) { chunk = pcpu_create_chunk(); if (!chunk) { err = "failed to allocate new chunk"; goto fail; } spin_lock_irqsave(&pcpu_lock, flags); pcpu_chunk_relocate(chunk, -1); } else { spin_lock_irqsave(&pcpu_lock, flags); |
f2badb0c9 percpu: make allo... |
937 |
} |
ccea34b5d percpu: finer gra... |
938 |
|
ccea34b5d percpu: finer gra... |
939 |
goto restart; |
fbf59bc9d percpu: implement... |
940 941 |
area_found: |
403a91b16 percpu: allow pcp... |
942 |
spin_unlock_irqrestore(&pcpu_lock, flags); |
ccea34b5d percpu: finer gra... |
943 |
|
dca496451 percpu: move comm... |
944 |
/* populate if not all pages are already there */ |
5835d96e9 percpu: implement... |
945 |
if (!is_atomic) { |
e04d32083 percpu: indent th... |
946 |
int page_start, page_end, rs, re; |
dca496451 percpu: move comm... |
947 |
|
e04d32083 percpu: indent th... |
948 949 |
page_start = PFN_DOWN(off); page_end = PFN_UP(off + size); |
b38d08f31 percpu: restructu... |
950 |
|
e04d32083 percpu: indent th... |
951 952 953 954 955 956 957 |
pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) { WARN_ON(chunk->immutable); ret = pcpu_populate_chunk(chunk, rs, re); spin_lock_irqsave(&pcpu_lock, flags); if (ret) { |
b539b87fe percpu: implmeent... |
958 |
pcpu_free_area(chunk, off, &occ_pages); |
e04d32083 percpu: indent th... |
959 960 961 |
err = "failed to populate"; goto fail_unlock; } |
b539b87fe percpu: implmeent... |
962 |
pcpu_chunk_populated(chunk, rs, re); |
e04d32083 percpu: indent th... |
963 |
spin_unlock_irqrestore(&pcpu_lock, flags); |
dca496451 percpu: move comm... |
964 |
} |
fbf59bc9d percpu: implement... |
965 |
|
e04d32083 percpu: indent th... |
966 967 |
mutex_unlock(&pcpu_alloc_mutex); } |
ccea34b5d percpu: finer gra... |
968 |
|
b539b87fe percpu: implmeent... |
969 970 |
if (chunk != pcpu_reserved_chunk) pcpu_nr_empty_pop_pages -= occ_pages; |
1a4d76076 percpu: implement... |
971 972 |
if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW) pcpu_schedule_balance_work(); |
dca496451 percpu: move comm... |
973 974 975 |
/* clear the areas and return address relative to base address */ for_each_possible_cpu(cpu) memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size); |
f528f0b8e kmemleak: Handle ... |
976 |
ptr = __addr_to_pcpu_ptr(chunk->base_addr + off); |
8a8c35fad mm: kmemleak_allo... |
977 |
kmemleak_alloc_percpu(ptr, size, gfp); |
f528f0b8e kmemleak: Handle ... |
978 |
return ptr; |
ccea34b5d percpu: finer gra... |
979 980 |
fail_unlock: |
403a91b16 percpu: allow pcp... |
981 |
spin_unlock_irqrestore(&pcpu_lock, flags); |
b38d08f31 percpu: restructu... |
982 |
fail: |
5835d96e9 percpu: implement... |
983 |
if (!is_atomic && warn_limit) { |
870d4b12a mm: percpu: use p... |
984 985 |
pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s ", |
598d80914 mm: convert pr_wa... |
986 |
size, align, is_atomic, err); |
f2badb0c9 percpu: make allo... |
987 988 |
dump_stack(); if (!--warn_limit) |
870d4b12a mm: percpu: use p... |
989 990 |
pr_info("limit reached, disable warning "); |
f2badb0c9 percpu: make allo... |
991 |
} |
1a4d76076 percpu: implement... |
992 993 994 995 |
if (is_atomic) { /* see the flag handling in pcpu_blance_workfn() */ pcpu_atomic_alloc_failed = true; pcpu_schedule_balance_work(); |
6710e594f percpu: fix synch... |
996 997 |
} else { mutex_unlock(&pcpu_alloc_mutex); |
1a4d76076 percpu: implement... |
998 |
} |
ccea34b5d percpu: finer gra... |
999 |
return NULL; |
fbf59bc9d percpu: implement... |
1000 |
} |
edcb46399 percpu, module: i... |
1001 1002 |
/** |
5835d96e9 percpu: implement... |
1003 |
* __alloc_percpu_gfp - allocate dynamic percpu area |
edcb46399 percpu, module: i... |
1004 1005 |
* @size: size of area to allocate in bytes * @align: alignment of area (max PAGE_SIZE) |
5835d96e9 percpu: implement... |
1006 |
* @gfp: allocation flags |
edcb46399 percpu, module: i... |
1007 |
* |
5835d96e9 percpu: implement... |
1008 1009 1010 |
* Allocate zero-filled percpu area of @size bytes aligned at @align. If * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can * be called from any context but is a lot more likely to fail. |
ccea34b5d percpu: finer gra... |
1011 |
* |
edcb46399 percpu, module: i... |
1012 1013 1014 |
* RETURNS: * Percpu pointer to the allocated area on success, NULL on failure. */ |
5835d96e9 percpu: implement... |
1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 |
void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp) { return pcpu_alloc(size, align, false, gfp); } EXPORT_SYMBOL_GPL(__alloc_percpu_gfp); /** * __alloc_percpu - allocate dynamic percpu area * @size: size of area to allocate in bytes * @align: alignment of area (max PAGE_SIZE) * * Equivalent to __alloc_percpu_gfp(size, align, %GFP_KERNEL). */ |
43cf38eb5 percpu: add __per... |
1028 |
void __percpu *__alloc_percpu(size_t size, size_t align) |
edcb46399 percpu, module: i... |
1029 |
{ |
5835d96e9 percpu: implement... |
1030 |
return pcpu_alloc(size, align, false, GFP_KERNEL); |
edcb46399 percpu, module: i... |
1031 |
} |
fbf59bc9d percpu: implement... |
1032 |
EXPORT_SYMBOL_GPL(__alloc_percpu); |
edcb46399 percpu, module: i... |
1033 1034 1035 1036 1037 |
/** * __alloc_reserved_percpu - allocate reserved percpu area * @size: size of area to allocate in bytes * @align: alignment of area (max PAGE_SIZE) * |
9329ba970 percpu: update co... |
1038 1039 1040 1041 |
* Allocate zero-filled percpu area of @size bytes aligned at @align * from reserved percpu area if arch has set it up; otherwise, * allocation is served from the same dynamic area. Might sleep. * Might trigger writeouts. |
edcb46399 percpu, module: i... |
1042 |
* |
ccea34b5d percpu: finer gra... |
1043 1044 1045 |
* CONTEXT: * Does GFP_KERNEL allocation. * |
edcb46399 percpu, module: i... |
1046 1047 1048 |
* RETURNS: * Percpu pointer to the allocated area on success, NULL on failure. */ |
43cf38eb5 percpu: add __per... |
1049 |
void __percpu *__alloc_reserved_percpu(size_t size, size_t align) |
edcb46399 percpu, module: i... |
1050 |
{ |
5835d96e9 percpu: implement... |
1051 |
return pcpu_alloc(size, align, true, GFP_KERNEL); |
edcb46399 percpu, module: i... |
1052 |
} |
a56dbddf0 percpu: move full... |
1053 |
/** |
1a4d76076 percpu: implement... |
1054 |
* pcpu_balance_workfn - manage the amount of free chunks and populated pages |
a56dbddf0 percpu: move full... |
1055 1056 1057 1058 |
* @work: unused * * Reclaim all fully free chunks except for the first one. */ |
fe6bd8c3d percpu: rename pc... |
1059 |
static void pcpu_balance_workfn(struct work_struct *work) |
fbf59bc9d percpu: implement... |
1060 |
{ |
fe6bd8c3d percpu: rename pc... |
1061 1062 |
LIST_HEAD(to_free); struct list_head *free_head = &pcpu_slot[pcpu_nr_slots - 1]; |
a56dbddf0 percpu: move full... |
1063 |
struct pcpu_chunk *chunk, *next; |
1a4d76076 percpu: implement... |
1064 |
int slot, nr_to_pop, ret; |
a56dbddf0 percpu: move full... |
1065 |
|
1a4d76076 percpu: implement... |
1066 1067 1068 1069 |
/* * There's no reason to keep around multiple unused chunks and VM * areas can be scarce. Destroy all free chunks except for one. */ |
ccea34b5d percpu: finer gra... |
1070 1071 |
mutex_lock(&pcpu_alloc_mutex); spin_lock_irq(&pcpu_lock); |
a56dbddf0 percpu: move full... |
1072 |
|
fe6bd8c3d percpu: rename pc... |
1073 |
list_for_each_entry_safe(chunk, next, free_head, list) { |
a56dbddf0 percpu: move full... |
1074 1075 1076 |
WARN_ON(chunk->immutable); /* spare the first one */ |
fe6bd8c3d percpu: rename pc... |
1077 |
if (chunk == list_first_entry(free_head, struct pcpu_chunk, list)) |
a56dbddf0 percpu: move full... |
1078 |
continue; |
4f996e234 percpu: fix synch... |
1079 |
list_del_init(&chunk->map_extend_list); |
fe6bd8c3d percpu: rename pc... |
1080 |
list_move(&chunk->list, &to_free); |
a56dbddf0 percpu: move full... |
1081 |
} |
ccea34b5d percpu: finer gra... |
1082 |
spin_unlock_irq(&pcpu_lock); |
a56dbddf0 percpu: move full... |
1083 |
|
fe6bd8c3d percpu: rename pc... |
1084 |
list_for_each_entry_safe(chunk, next, &to_free, list) { |
a93ace487 percpu: move regi... |
1085 |
int rs, re; |
dca496451 percpu: move comm... |
1086 |
|
a93ace487 percpu: move regi... |
1087 1088 |
pcpu_for_each_pop_region(chunk, rs, re, 0, pcpu_unit_pages) { pcpu_depopulate_chunk(chunk, rs, re); |
b539b87fe percpu: implmeent... |
1089 1090 1091 |
spin_lock_irq(&pcpu_lock); pcpu_chunk_depopulated(chunk, rs, re); spin_unlock_irq(&pcpu_lock); |
a93ace487 percpu: move regi... |
1092 |
} |
6081089fd percpu: reorganiz... |
1093 |
pcpu_destroy_chunk(chunk); |
a56dbddf0 percpu: move full... |
1094 |
} |
971f3918a percpu: fix pcpu_... |
1095 |
|
4f996e234 percpu: fix synch... |
1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 |
/* service chunks which requested async area map extension */ do { int new_alloc = 0; spin_lock_irq(&pcpu_lock); chunk = list_first_entry_or_null(&pcpu_map_extend_chunks, struct pcpu_chunk, map_extend_list); if (chunk) { list_del_init(&chunk->map_extend_list); new_alloc = pcpu_need_to_extend(chunk, false); } spin_unlock_irq(&pcpu_lock); if (new_alloc) pcpu_extend_area_map(chunk, new_alloc); } while (chunk); |
1a4d76076 percpu: implement... |
1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 |
/* * Ensure there are certain number of free populated pages for * atomic allocs. Fill up from the most packed so that atomic * allocs don't increase fragmentation. If atomic allocation * failed previously, always populate the maximum amount. This * should prevent atomic allocs larger than PAGE_SIZE from keeping * failing indefinitely; however, large atomic allocs are not * something we support properly and can be highly unreliable and * inefficient. */ retry_pop: if (pcpu_atomic_alloc_failed) { nr_to_pop = PCPU_EMPTY_POP_PAGES_HIGH; /* best effort anyway, don't worry about synchronization */ pcpu_atomic_alloc_failed = false; } else { nr_to_pop = clamp(PCPU_EMPTY_POP_PAGES_HIGH - pcpu_nr_empty_pop_pages, 0, PCPU_EMPTY_POP_PAGES_HIGH); } for (slot = pcpu_size_to_slot(PAGE_SIZE); slot < pcpu_nr_slots; slot++) { int nr_unpop = 0, rs, re; if (!nr_to_pop) break; spin_lock_irq(&pcpu_lock); list_for_each_entry(chunk, &pcpu_slot[slot], list) { nr_unpop = pcpu_unit_pages - chunk->nr_populated; if (nr_unpop) break; } spin_unlock_irq(&pcpu_lock); if (!nr_unpop) continue; /* @chunk can't go away while pcpu_alloc_mutex is held */ pcpu_for_each_unpop_region(chunk, rs, re, 0, pcpu_unit_pages) { int nr = min(re - rs, nr_to_pop); ret = pcpu_populate_chunk(chunk, rs, rs + nr); if (!ret) { nr_to_pop -= nr; spin_lock_irq(&pcpu_lock); pcpu_chunk_populated(chunk, rs, rs + nr); spin_unlock_irq(&pcpu_lock); } else { nr_to_pop = 0; } if (!nr_to_pop) break; } } if (nr_to_pop) { /* ran out of chunks to populate, create a new one and retry */ chunk = pcpu_create_chunk(); if (chunk) { spin_lock_irq(&pcpu_lock); pcpu_chunk_relocate(chunk, -1); spin_unlock_irq(&pcpu_lock); goto retry_pop; } } |
971f3918a percpu: fix pcpu_... |
1181 |
mutex_unlock(&pcpu_alloc_mutex); |
fbf59bc9d percpu: implement... |
1182 1183 1184 1185 1186 1187 |
} /** * free_percpu - free percpu area * @ptr: pointer to area to free * |
ccea34b5d percpu: finer gra... |
1188 1189 1190 1191 |
* Free percpu area @ptr. * * CONTEXT: * Can be called from atomic context. |
fbf59bc9d percpu: implement... |
1192 |
*/ |
43cf38eb5 percpu: add __per... |
1193 |
void free_percpu(void __percpu *ptr) |
fbf59bc9d percpu: implement... |
1194 |
{ |
129182e56 percpu: avoid cal... |
1195 |
void *addr; |
fbf59bc9d percpu: implement... |
1196 |
struct pcpu_chunk *chunk; |
ccea34b5d percpu: finer gra... |
1197 |
unsigned long flags; |
b539b87fe percpu: implmeent... |
1198 |
int off, occ_pages; |
fbf59bc9d percpu: implement... |
1199 1200 1201 |
if (!ptr) return; |
f528f0b8e kmemleak: Handle ... |
1202 |
kmemleak_free_percpu(ptr); |
129182e56 percpu: avoid cal... |
1203 |
addr = __pcpu_ptr_to_addr(ptr); |
ccea34b5d percpu: finer gra... |
1204 |
spin_lock_irqsave(&pcpu_lock, flags); |
fbf59bc9d percpu: implement... |
1205 1206 |
chunk = pcpu_chunk_addr_search(addr); |
bba174f5e percpu: add chunk... |
1207 |
off = addr - chunk->base_addr; |
fbf59bc9d percpu: implement... |
1208 |
|
b539b87fe percpu: implmeent... |
1209 1210 1211 1212 |
pcpu_free_area(chunk, off, &occ_pages); if (chunk != pcpu_reserved_chunk) pcpu_nr_empty_pop_pages += occ_pages; |
fbf59bc9d percpu: implement... |
1213 |
|
a56dbddf0 percpu: move full... |
1214 |
/* if there are more than one fully free chunks, wake up grim reaper */ |
fbf59bc9d percpu: implement... |
1215 1216 |
if (chunk->free_size == pcpu_unit_size) { struct pcpu_chunk *pos; |
a56dbddf0 percpu: move full... |
1217 |
list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list) |
fbf59bc9d percpu: implement... |
1218 |
if (pos != chunk) { |
1a4d76076 percpu: implement... |
1219 |
pcpu_schedule_balance_work(); |
fbf59bc9d percpu: implement... |
1220 1221 1222 |
break; } } |
ccea34b5d percpu: finer gra... |
1223 |
spin_unlock_irqrestore(&pcpu_lock, flags); |
fbf59bc9d percpu: implement... |
1224 1225 |
} EXPORT_SYMBOL_GPL(free_percpu); |
3b034b0d0 percpu: Fix kdump... |
1226 |
/** |
10fad5e46 percpu, module: i... |
1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 |
* is_kernel_percpu_address - test whether address is from static percpu area * @addr: address to test * * Test whether @addr belongs to in-kernel static percpu area. Module * static percpu areas are not considered. For those, use * is_module_percpu_address(). * * RETURNS: * %true if @addr is from in-kernel static percpu area, %false otherwise. */ bool is_kernel_percpu_address(unsigned long addr) { |
bbddff054 percpu: use percp... |
1239 |
#ifdef CONFIG_SMP |
10fad5e46 percpu, module: i... |
1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 |
const size_t static_size = __per_cpu_end - __per_cpu_start; void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); unsigned int cpu; for_each_possible_cpu(cpu) { void *start = per_cpu_ptr(base, cpu); if ((void *)addr >= start && (void *)addr < start + static_size) return true; } |
bbddff054 percpu: use percp... |
1250 1251 |
#endif /* on UP, can't distinguish from other static vars, always false */ |
10fad5e46 percpu, module: i... |
1252 1253 1254 1255 |
return false; } /** |
3b034b0d0 percpu: Fix kdump... |
1256 1257 1258 1259 1260 1261 1262 1263 |
* per_cpu_ptr_to_phys - convert translated percpu address to physical address * @addr: the address to be converted to physical address * * Given @addr which is dereferenceable address obtained via one of * percpu access macros, this function translates it into its physical * address. The caller is responsible for ensuring @addr stays valid * until this function finishes. * |
67589c714 percpu: explain w... |
1264 1265 1266 1267 1268 |
* percpu allocator has special setup for the first chunk, which currently * supports either embedding in linear address space or vmalloc mapping, * and, from the second one, the backing allocator (currently either vm or * km) provides translation. * |
bffc43758 percpu: Fix trivi... |
1269 |
* The addr can be translated simply without checking if it falls into the |
67589c714 percpu: explain w... |
1270 1271 1272 1273 1274 |
* first chunk. But the current code reflects better how percpu allocator * actually works, and the verification can discover both bugs in percpu * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current * code. * |
3b034b0d0 percpu: Fix kdump... |
1275 1276 1277 1278 1279 |
* RETURNS: * The physical address for @addr. */ phys_addr_t per_cpu_ptr_to_phys(void *addr) { |
9983b6f0c percpu: fix first... |
1280 1281 |
void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); bool in_first_chunk = false; |
a855b84c3 percpu: fix chunk... |
1282 |
unsigned long first_low, first_high; |
9983b6f0c percpu: fix first... |
1283 1284 1285 |
unsigned int cpu; /* |
a855b84c3 percpu: fix chunk... |
1286 |
* The following test on unit_low/high isn't strictly |
9983b6f0c percpu: fix first... |
1287 1288 1289 |
* necessary but will speed up lookups of addresses which * aren't in the first chunk. */ |
a855b84c3 percpu: fix chunk... |
1290 1291 1292 1293 1294 |
first_low = pcpu_chunk_addr(pcpu_first_chunk, pcpu_low_unit_cpu, 0); first_high = pcpu_chunk_addr(pcpu_first_chunk, pcpu_high_unit_cpu, pcpu_unit_pages); if ((unsigned long)addr >= first_low && (unsigned long)addr < first_high) { |
9983b6f0c percpu: fix first... |
1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 |
for_each_possible_cpu(cpu) { void *start = per_cpu_ptr(base, cpu); if (addr >= start && addr < start + pcpu_unit_size) { in_first_chunk = true; break; } } } if (in_first_chunk) { |
eac522ef4 NOMMU: percpu sho... |
1306 |
if (!is_vmalloc_addr(addr)) |
020ec6537 percpu: factor ou... |
1307 1308 |
return __pa(addr); else |
9f57bd4d6 percpu: fix per_c... |
1309 1310 |
return page_to_phys(vmalloc_to_page(addr)) + offset_in_page(addr); |
020ec6537 percpu: factor ou... |
1311 |
} else |
9f57bd4d6 percpu: fix per_c... |
1312 1313 |
return page_to_phys(pcpu_addr_to_page(addr)) + offset_in_page(addr); |
3b034b0d0 percpu: Fix kdump... |
1314 |
} |
fbf59bc9d percpu: implement... |
1315 |
/** |
fd1e8a1fe percpu: introduce... |
1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 |
* pcpu_alloc_alloc_info - allocate percpu allocation info * @nr_groups: the number of groups * @nr_units: the number of units * * Allocate ai which is large enough for @nr_groups groups containing * @nr_units units. The returned ai's groups[0].cpu_map points to the * cpu_map array which is long enough for @nr_units and filled with * NR_CPUS. It's the caller's responsibility to initialize cpu_map * pointer of other groups. * * RETURNS: * Pointer to the allocated pcpu_alloc_info on success, NULL on * failure. */ struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups, int nr_units) { struct pcpu_alloc_info *ai; size_t base_size, ai_size; void *ptr; int unit; base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]), __alignof__(ai->groups[0].cpu_map[0])); ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]); |
999c17e3d mm/percpu.c: use ... |
1341 |
ptr = memblock_virt_alloc_nopanic(PFN_ALIGN(ai_size), 0); |
fd1e8a1fe percpu: introduce... |
1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 |
if (!ptr) return NULL; ai = ptr; ptr += base_size; ai->groups[0].cpu_map = ptr; for (unit = 0; unit < nr_units; unit++) ai->groups[0].cpu_map[unit] = NR_CPUS; ai->nr_groups = nr_groups; ai->__ai_size = PFN_ALIGN(ai_size); return ai; } /** * pcpu_free_alloc_info - free percpu allocation info * @ai: pcpu_alloc_info to free * * Free @ai which was allocated by pcpu_alloc_alloc_info(). */ void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai) { |
999c17e3d mm/percpu.c: use ... |
1366 |
memblock_free_early(__pa(ai), ai->__ai_size); |
fd1e8a1fe percpu: introduce... |
1367 1368 1369 |
} /** |
fd1e8a1fe percpu: introduce... |
1370 1371 1372 1373 1374 1375 1376 1377 |
* pcpu_dump_alloc_info - print out information about pcpu_alloc_info * @lvl: loglevel * @ai: allocation info to dump * * Print out information about @ai using loglevel @lvl. */ static void pcpu_dump_alloc_info(const char *lvl, const struct pcpu_alloc_info *ai) |
033e48fb8 percpu: move pcpu... |
1378 |
{ |
fd1e8a1fe percpu: introduce... |
1379 |
int group_width = 1, cpu_width = 1, width; |
033e48fb8 percpu: move pcpu... |
1380 |
char empty_str[] = "--------"; |
fd1e8a1fe percpu: introduce... |
1381 1382 1383 1384 1385 1386 1387 |
int alloc = 0, alloc_end = 0; int group, v; int upa, apl; /* units per alloc, allocs per line */ v = ai->nr_groups; while (v /= 10) group_width++; |
033e48fb8 percpu: move pcpu... |
1388 |
|
fd1e8a1fe percpu: introduce... |
1389 |
v = num_possible_cpus(); |
033e48fb8 percpu: move pcpu... |
1390 |
while (v /= 10) |
fd1e8a1fe percpu: introduce... |
1391 1392 |
cpu_width++; empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0'; |
033e48fb8 percpu: move pcpu... |
1393 |
|
fd1e8a1fe percpu: introduce... |
1394 1395 1396 |
upa = ai->alloc_size / ai->unit_size; width = upa * (cpu_width + 1) + group_width + 3; apl = rounddown_pow_of_two(max(60 / width, 1)); |
033e48fb8 percpu: move pcpu... |
1397 |
|
fd1e8a1fe percpu: introduce... |
1398 1399 1400 |
printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu", lvl, ai->static_size, ai->reserved_size, ai->dyn_size, ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size); |
033e48fb8 percpu: move pcpu... |
1401 |
|
fd1e8a1fe percpu: introduce... |
1402 1403 1404 1405 1406 1407 1408 1409 |
for (group = 0; group < ai->nr_groups; group++) { const struct pcpu_group_info *gi = &ai->groups[group]; int unit = 0, unit_end = 0; BUG_ON(gi->nr_units % upa); for (alloc_end += gi->nr_units / upa; alloc < alloc_end; alloc++) { if (!(alloc % apl)) { |
1170532bb mm: convert print... |
1410 1411 |
pr_cont(" "); |
fd1e8a1fe percpu: introduce... |
1412 1413 |
printk("%spcpu-alloc: ", lvl); } |
1170532bb mm: convert print... |
1414 |
pr_cont("[%0*d] ", group_width, group); |
fd1e8a1fe percpu: introduce... |
1415 1416 1417 |
for (unit_end += upa; unit < unit_end; unit++) if (gi->cpu_map[unit] != NR_CPUS) |
1170532bb mm: convert print... |
1418 1419 |
pr_cont("%0*d ", cpu_width, gi->cpu_map[unit]); |
fd1e8a1fe percpu: introduce... |
1420 |
else |
1170532bb mm: convert print... |
1421 |
pr_cont("%s ", empty_str); |
033e48fb8 percpu: move pcpu... |
1422 |
} |
033e48fb8 percpu: move pcpu... |
1423 |
} |
1170532bb mm: convert print... |
1424 1425 |
pr_cont(" "); |
033e48fb8 percpu: move pcpu... |
1426 |
} |
033e48fb8 percpu: move pcpu... |
1427 |
|
fbf59bc9d percpu: implement... |
1428 |
/** |
8d408b4be percpu: give more... |
1429 |
* pcpu_setup_first_chunk - initialize the first percpu chunk |
fd1e8a1fe percpu: introduce... |
1430 |
* @ai: pcpu_alloc_info describing how to percpu area is shaped |
38a6be525 percpu: simplify ... |
1431 |
* @base_addr: mapped address |
8d408b4be percpu: give more... |
1432 1433 1434 |
* * Initialize the first percpu chunk which contains the kernel static * perpcu area. This function is to be called from arch percpu area |
38a6be525 percpu: simplify ... |
1435 |
* setup path. |
8d408b4be percpu: give more... |
1436 |
* |
fd1e8a1fe percpu: introduce... |
1437 1438 1439 1440 1441 1442 |
* @ai contains all information necessary to initialize the first * chunk and prime the dynamic percpu allocator. * * @ai->static_size is the size of static percpu area. * * @ai->reserved_size, if non-zero, specifies the amount of bytes to |
edcb46399 percpu, module: i... |
1443 1444 1445 1446 1447 1448 1449 |
* reserve after the static area in the first chunk. This reserves * the first chunk such that it's available only through reserved * percpu allocation. This is primarily used to serve module percpu * static areas on architectures where the addressing model has * limited offset range for symbol relocations to guarantee module * percpu symbols fall inside the relocatable range. * |
fd1e8a1fe percpu: introduce... |
1450 1451 1452 |
* @ai->dyn_size determines the number of bytes available for dynamic * allocation in the first chunk. The area between @ai->static_size + * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused. |
6074d5b0a percpu: more flex... |
1453 |
* |
fd1e8a1fe percpu: introduce... |
1454 1455 1456 |
* @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE * and equal to or larger than @ai->static_size + @ai->reserved_size + * @ai->dyn_size. |
8d408b4be percpu: give more... |
1457 |
* |
fd1e8a1fe percpu: introduce... |
1458 1459 |
* @ai->atom_size is the allocation atom size and used as alignment * for vm areas. |
8d408b4be percpu: give more... |
1460 |
* |
fd1e8a1fe percpu: introduce... |
1461 1462 1463 1464 1465 1466 1467 1468 1469 |
* @ai->alloc_size is the allocation size and always multiple of * @ai->atom_size. This is larger than @ai->atom_size if * @ai->unit_size is larger than @ai->atom_size. * * @ai->nr_groups and @ai->groups describe virtual memory layout of * percpu areas. Units which should be colocated are put into the * same group. Dynamic VM areas will be allocated according to these * groupings. If @ai->nr_groups is zero, a single group containing * all units is assumed. |
8d408b4be percpu: give more... |
1470 |
* |
38a6be525 percpu: simplify ... |
1471 1472 |
* The caller should have mapped the first chunk at @base_addr and * copied static data to each unit. |
fbf59bc9d percpu: implement... |
1473 |
* |
edcb46399 percpu, module: i... |
1474 1475 1476 1477 1478 1479 1480 |
* If the first chunk ends up with both reserved and dynamic areas, it * is served by two chunks - one to serve the core static and reserved * areas and the other for the dynamic area. They share the same vm * and page map but uses different area allocation map to stay away * from each other. The latter chunk is circulated in the chunk slots * and available for dynamic allocation like any other chunks. * |
fbf59bc9d percpu: implement... |
1481 |
* RETURNS: |
fb435d523 percpu: add pcpu_... |
1482 |
* 0 on success, -errno on failure. |
fbf59bc9d percpu: implement... |
1483 |
*/ |
fb435d523 percpu: add pcpu_... |
1484 1485 |
int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, void *base_addr) |
fbf59bc9d percpu: implement... |
1486 |
{ |
099a19d91 percpu: allow lim... |
1487 1488 |
static int smap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata; static int dmap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata; |
fd1e8a1fe percpu: introduce... |
1489 1490 |
size_t dyn_size = ai->dyn_size; size_t size_sum = ai->static_size + ai->reserved_size + dyn_size; |
edcb46399 percpu, module: i... |
1491 |
struct pcpu_chunk *schunk, *dchunk = NULL; |
6563297ce percpu: use group... |
1492 1493 |
unsigned long *group_offsets; size_t *group_sizes; |
fb435d523 percpu: add pcpu_... |
1494 |
unsigned long *unit_off; |
fbf59bc9d percpu: implement... |
1495 |
unsigned int cpu; |
fd1e8a1fe percpu: introduce... |
1496 1497 |
int *unit_map; int group, unit, i; |
fbf59bc9d percpu: implement... |
1498 |
|
635b75fc1 percpu: make pcpu... |
1499 1500 |
#define PCPU_SETUP_BUG_ON(cond) do { \ if (unlikely(cond)) { \ |
870d4b12a mm: percpu: use p... |
1501 1502 1503 1504 |
pr_emerg("failed to initialize, %s ", #cond); \ pr_emerg("cpu_possible_mask=%*pb ", \ |
807de073b percpu: use %*pb[... |
1505 |
cpumask_pr_args(cpu_possible_mask)); \ |
635b75fc1 percpu: make pcpu... |
1506 1507 1508 1509 |
pcpu_dump_alloc_info(KERN_EMERG, ai); \ BUG(); \ } \ } while (0) |
2f39e637e percpu: allow non... |
1510 |
/* sanity checks */ |
635b75fc1 percpu: make pcpu... |
1511 |
PCPU_SETUP_BUG_ON(ai->nr_groups <= 0); |
bbddff054 percpu: use percp... |
1512 |
#ifdef CONFIG_SMP |
635b75fc1 percpu: make pcpu... |
1513 |
PCPU_SETUP_BUG_ON(!ai->static_size); |
f09f1243c mm/percpu: use of... |
1514 |
PCPU_SETUP_BUG_ON(offset_in_page(__per_cpu_start)); |
bbddff054 percpu: use percp... |
1515 |
#endif |
635b75fc1 percpu: make pcpu... |
1516 |
PCPU_SETUP_BUG_ON(!base_addr); |
f09f1243c mm/percpu: use of... |
1517 |
PCPU_SETUP_BUG_ON(offset_in_page(base_addr)); |
635b75fc1 percpu: make pcpu... |
1518 |
PCPU_SETUP_BUG_ON(ai->unit_size < size_sum); |
f09f1243c mm/percpu: use of... |
1519 |
PCPU_SETUP_BUG_ON(offset_in_page(ai->unit_size)); |
635b75fc1 percpu: make pcpu... |
1520 |
PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE); |
099a19d91 percpu: allow lim... |
1521 |
PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE); |
9f6455325 percpu: move vmal... |
1522 |
PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0); |
8d408b4be percpu: give more... |
1523 |
|
6563297ce percpu: use group... |
1524 |
/* process group information and build config tables accordingly */ |
999c17e3d mm/percpu.c: use ... |
1525 1526 1527 1528 1529 1530 |
group_offsets = memblock_virt_alloc(ai->nr_groups * sizeof(group_offsets[0]), 0); group_sizes = memblock_virt_alloc(ai->nr_groups * sizeof(group_sizes[0]), 0); unit_map = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_map[0]), 0); unit_off = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_off[0]), 0); |
2f39e637e percpu: allow non... |
1531 |
|
fd1e8a1fe percpu: introduce... |
1532 |
for (cpu = 0; cpu < nr_cpu_ids; cpu++) |
ffe0d5a57 percpu: fix unit_... |
1533 |
unit_map[cpu] = UINT_MAX; |
a855b84c3 percpu: fix chunk... |
1534 1535 1536 |
pcpu_low_unit_cpu = NR_CPUS; pcpu_high_unit_cpu = NR_CPUS; |
2f39e637e percpu: allow non... |
1537 |
|
fd1e8a1fe percpu: introduce... |
1538 1539 |
for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) { const struct pcpu_group_info *gi = &ai->groups[group]; |
2f39e637e percpu: allow non... |
1540 |
|
6563297ce percpu: use group... |
1541 1542 |
group_offsets[group] = gi->base_offset; group_sizes[group] = gi->nr_units * ai->unit_size; |
fd1e8a1fe percpu: introduce... |
1543 1544 1545 1546 |
for (i = 0; i < gi->nr_units; i++) { cpu = gi->cpu_map[i]; if (cpu == NR_CPUS) continue; |
8d408b4be percpu: give more... |
1547 |
|
9f295664e percpu: off by on... |
1548 |
PCPU_SETUP_BUG_ON(cpu >= nr_cpu_ids); |
635b75fc1 percpu: make pcpu... |
1549 1550 |
PCPU_SETUP_BUG_ON(!cpu_possible(cpu)); PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX); |
fbf59bc9d percpu: implement... |
1551 |
|
fd1e8a1fe percpu: introduce... |
1552 |
unit_map[cpu] = unit + i; |
fb435d523 percpu: add pcpu_... |
1553 |
unit_off[cpu] = gi->base_offset + i * ai->unit_size; |
a855b84c3 percpu: fix chunk... |
1554 1555 1556 1557 1558 1559 1560 |
/* determine low/high unit_cpu */ if (pcpu_low_unit_cpu == NR_CPUS || unit_off[cpu] < unit_off[pcpu_low_unit_cpu]) pcpu_low_unit_cpu = cpu; if (pcpu_high_unit_cpu == NR_CPUS || unit_off[cpu] > unit_off[pcpu_high_unit_cpu]) pcpu_high_unit_cpu = cpu; |
fd1e8a1fe percpu: introduce... |
1561 |
} |
2f39e637e percpu: allow non... |
1562 |
} |
fd1e8a1fe percpu: introduce... |
1563 1564 1565 |
pcpu_nr_units = unit; for_each_possible_cpu(cpu) |
635b75fc1 percpu: make pcpu... |
1566 1567 1568 1569 |
PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX); /* we're done parsing the input, undefine BUG macro and dump config */ #undef PCPU_SETUP_BUG_ON |
bcbea798f percpu: print out... |
1570 |
pcpu_dump_alloc_info(KERN_DEBUG, ai); |
fd1e8a1fe percpu: introduce... |
1571 |
|
6563297ce percpu: use group... |
1572 1573 1574 |
pcpu_nr_groups = ai->nr_groups; pcpu_group_offsets = group_offsets; pcpu_group_sizes = group_sizes; |
fd1e8a1fe percpu: introduce... |
1575 |
pcpu_unit_map = unit_map; |
fb435d523 percpu: add pcpu_... |
1576 |
pcpu_unit_offsets = unit_off; |
2f39e637e percpu: allow non... |
1577 1578 |
/* determine basic parameters */ |
fd1e8a1fe percpu: introduce... |
1579 |
pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT; |
d9b55eeb1 percpu: remove un... |
1580 |
pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT; |
6563297ce percpu: use group... |
1581 |
pcpu_atom_size = ai->atom_size; |
ce3141a27 percpu: drop pcpu... |
1582 1583 |
pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) + BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long); |
cafe8816b percpu: use negat... |
1584 |
|
d9b55eeb1 percpu: remove un... |
1585 1586 1587 1588 1589 |
/* * Allocate chunk slots. The additional last slot is for * empty chunks. */ pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2; |
999c17e3d mm/percpu.c: use ... |
1590 1591 |
pcpu_slot = memblock_virt_alloc( pcpu_nr_slots * sizeof(pcpu_slot[0]), 0); |
fbf59bc9d percpu: implement... |
1592 1593 |
for (i = 0; i < pcpu_nr_slots; i++) INIT_LIST_HEAD(&pcpu_slot[i]); |
edcb46399 percpu, module: i... |
1594 1595 1596 1597 1598 1599 1600 |
/* * Initialize static chunk. If reserved_size is zero, the * static chunk covers static area + dynamic allocation area * in the first chunk. If reserved_size is not zero, it * covers static area + reserved area (mostly used for module * static percpu allocation). */ |
999c17e3d mm/percpu.c: use ... |
1601 |
schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0); |
2441d15c9 percpu: cosmetic ... |
1602 |
INIT_LIST_HEAD(&schunk->list); |
4f996e234 percpu: fix synch... |
1603 |
INIT_LIST_HEAD(&schunk->map_extend_list); |
bba174f5e percpu: add chunk... |
1604 |
schunk->base_addr = base_addr; |
61ace7fa2 percpu: improve f... |
1605 1606 |
schunk->map = smap; schunk->map_alloc = ARRAY_SIZE(smap); |
38a6be525 percpu: simplify ... |
1607 |
schunk->immutable = true; |
ce3141a27 percpu: drop pcpu... |
1608 |
bitmap_fill(schunk->populated, pcpu_unit_pages); |
b539b87fe percpu: implmeent... |
1609 |
schunk->nr_populated = pcpu_unit_pages; |
edcb46399 percpu, module: i... |
1610 |
|
fd1e8a1fe percpu: introduce... |
1611 1612 |
if (ai->reserved_size) { schunk->free_size = ai->reserved_size; |
ae9e6bc9f percpu: don't put... |
1613 |
pcpu_reserved_chunk = schunk; |
fd1e8a1fe percpu: introduce... |
1614 |
pcpu_reserved_chunk_limit = ai->static_size + ai->reserved_size; |
edcb46399 percpu, module: i... |
1615 1616 1617 1618 |
} else { schunk->free_size = dyn_size; dyn_size = 0; /* dynamic area covered */ } |
2441d15c9 percpu: cosmetic ... |
1619 |
schunk->contig_hint = schunk->free_size; |
fbf59bc9d percpu: implement... |
1620 |
|
723ad1d90 percpu: store off... |
1621 1622 1623 |
schunk->map[0] = 1; schunk->map[1] = ai->static_size; schunk->map_used = 1; |
61ace7fa2 percpu: improve f... |
1624 |
if (schunk->free_size) |
292c24a07 percpu: clean up ... |
1625 1626 |
schunk->map[++schunk->map_used] = ai->static_size + schunk->free_size; schunk->map[schunk->map_used] |= 1; |
61ace7fa2 percpu: improve f... |
1627 |
|
edcb46399 percpu, module: i... |
1628 1629 |
/* init dynamic chunk if necessary */ if (dyn_size) { |
999c17e3d mm/percpu.c: use ... |
1630 |
dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0); |
edcb46399 percpu, module: i... |
1631 |
INIT_LIST_HEAD(&dchunk->list); |
4f996e234 percpu: fix synch... |
1632 |
INIT_LIST_HEAD(&dchunk->map_extend_list); |
bba174f5e percpu: add chunk... |
1633 |
dchunk->base_addr = base_addr; |
edcb46399 percpu, module: i... |
1634 1635 |
dchunk->map = dmap; dchunk->map_alloc = ARRAY_SIZE(dmap); |
38a6be525 percpu: simplify ... |
1636 |
dchunk->immutable = true; |
ce3141a27 percpu: drop pcpu... |
1637 |
bitmap_fill(dchunk->populated, pcpu_unit_pages); |
b539b87fe percpu: implmeent... |
1638 |
dchunk->nr_populated = pcpu_unit_pages; |
edcb46399 percpu, module: i... |
1639 1640 |
dchunk->contig_hint = dchunk->free_size = dyn_size; |
723ad1d90 percpu: store off... |
1641 1642 1643 1644 |
dchunk->map[0] = 1; dchunk->map[1] = pcpu_reserved_chunk_limit; dchunk->map[2] = (pcpu_reserved_chunk_limit + dchunk->free_size) | 1; dchunk->map_used = 2; |
edcb46399 percpu, module: i... |
1645 |
} |
2441d15c9 percpu: cosmetic ... |
1646 |
/* link the first chunk in */ |
ae9e6bc9f percpu: don't put... |
1647 |
pcpu_first_chunk = dchunk ?: schunk; |
b539b87fe percpu: implmeent... |
1648 1649 |
pcpu_nr_empty_pop_pages += pcpu_count_occupied_pages(pcpu_first_chunk, 1); |
ae9e6bc9f percpu: don't put... |
1650 |
pcpu_chunk_relocate(pcpu_first_chunk, -1); |
fbf59bc9d percpu: implement... |
1651 1652 |
/* we're done */ |
bba174f5e percpu: add chunk... |
1653 |
pcpu_base_addr = base_addr; |
fb435d523 percpu: add pcpu_... |
1654 |
return 0; |
fbf59bc9d percpu: implement... |
1655 |
} |
66c3a7577 percpu: generaliz... |
1656 |
|
bbddff054 percpu: use percp... |
1657 |
#ifdef CONFIG_SMP |
17f3609c2 sections: fix sec... |
1658 |
const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = { |
f58dc01ba percpu: generaliz... |
1659 1660 1661 |
[PCPU_FC_AUTO] = "auto", [PCPU_FC_EMBED] = "embed", [PCPU_FC_PAGE] = "page", |
f58dc01ba percpu: generaliz... |
1662 |
}; |
66c3a7577 percpu: generaliz... |
1663 |
|
f58dc01ba percpu: generaliz... |
1664 |
enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO; |
66c3a7577 percpu: generaliz... |
1665 |
|
f58dc01ba percpu: generaliz... |
1666 1667 |
static int __init percpu_alloc_setup(char *str) { |
5479c78ac mm, percpu: Make ... |
1668 1669 |
if (!str) return -EINVAL; |
f58dc01ba percpu: generaliz... |
1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 |
if (0) /* nada */; #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK else if (!strcmp(str, "embed")) pcpu_chosen_fc = PCPU_FC_EMBED; #endif #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK else if (!strcmp(str, "page")) pcpu_chosen_fc = PCPU_FC_PAGE; #endif |
f58dc01ba percpu: generaliz... |
1680 |
else |
870d4b12a mm: percpu: use p... |
1681 1682 |
pr_warn("unknown allocator %s specified ", str); |
66c3a7577 percpu: generaliz... |
1683 |
|
f58dc01ba percpu: generaliz... |
1684 |
return 0; |
66c3a7577 percpu: generaliz... |
1685 |
} |
f58dc01ba percpu: generaliz... |
1686 |
early_param("percpu_alloc", percpu_alloc_setup); |
66c3a7577 percpu: generaliz... |
1687 |
|
3c9a024fd percpu: fix build... |
1688 1689 1690 1691 1692 |
/* * pcpu_embed_first_chunk() is used by the generic percpu setup. * Build it if needed by the arch config or the generic setup is going * to be used. */ |
08fc45806 percpu: build fir... |
1693 1694 |
#if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \ !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) |
3c9a024fd percpu: fix build... |
1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 |
#define BUILD_EMBED_FIRST_CHUNK #endif /* build pcpu_page_first_chunk() iff needed by the arch config */ #if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK) #define BUILD_PAGE_FIRST_CHUNK #endif /* pcpu_build_alloc_info() is used by both embed and page first chunk */ #if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK) /** * pcpu_build_alloc_info - build alloc_info considering distances between CPUs * @reserved_size: the size of reserved percpu area in bytes * @dyn_size: minimum free size for dynamic allocation in bytes * @atom_size: allocation atom size * @cpu_distance_fn: callback to determine distance between cpus, optional * * This function determines grouping of units, their mappings to cpus * and other parameters considering needed percpu size, allocation * atom size and distances between CPUs. * |
bffc43758 percpu: Fix trivi... |
1716 |
* Groups are always multiples of atom size and CPUs which are of |
3c9a024fd percpu: fix build... |
1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 |
* LOCAL_DISTANCE both ways are grouped together and share space for * units in the same group. The returned configuration is guaranteed * to have CPUs on different nodes on different groups and >=75% usage * of allocated virtual address space. * * RETURNS: * On success, pointer to the new allocation_info is returned. On * failure, ERR_PTR value is returned. */ static struct pcpu_alloc_info * __init pcpu_build_alloc_info( size_t reserved_size, size_t dyn_size, size_t atom_size, pcpu_fc_cpu_distance_fn_t cpu_distance_fn) { static int group_map[NR_CPUS] __initdata; static int group_cnt[NR_CPUS] __initdata; const size_t static_size = __per_cpu_end - __per_cpu_start; int nr_groups = 1, nr_units = 0; size_t size_sum, min_unit_size, alloc_size; int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */ int last_allocs, group, unit; unsigned int cpu, tcpu; struct pcpu_alloc_info *ai; unsigned int *cpu_map; /* this function may be called multiple times */ memset(group_map, 0, sizeof(group_map)); memset(group_cnt, 0, sizeof(group_cnt)); /* calculate size_sum and ensure dyn_size is enough for early alloc */ size_sum = PFN_ALIGN(static_size + reserved_size + max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE)); dyn_size = size_sum - static_size - reserved_size; /* * Determine min_unit_size, alloc_size and max_upa such that * alloc_size is multiple of atom_size and is the smallest |
25985edce Fix common misspe... |
1754 |
* which can accommodate 4k aligned segments which are equal to |
3c9a024fd percpu: fix build... |
1755 1756 1757 1758 1759 1760 |
* or larger than min_unit_size. */ min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE); alloc_size = roundup(min_unit_size, atom_size); upa = alloc_size / min_unit_size; |
f09f1243c mm/percpu: use of... |
1761 |
while (alloc_size % upa || (offset_in_page(alloc_size / upa))) |
3c9a024fd percpu: fix build... |
1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 |
upa--; max_upa = upa; /* group cpus according to their proximity */ for_each_possible_cpu(cpu) { group = 0; next_group: for_each_possible_cpu(tcpu) { if (cpu == tcpu) break; if (group_map[tcpu] == group && cpu_distance_fn && (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE || cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) { group++; nr_groups = max(nr_groups, group + 1); goto next_group; } } group_map[cpu] = group; group_cnt[group]++; } /* * Expand unit size until address space usage goes over 75% * and then as much as possible without using more address * space. */ last_allocs = INT_MAX; for (upa = max_upa; upa; upa--) { int allocs = 0, wasted = 0; |
f09f1243c mm/percpu: use of... |
1792 |
if (alloc_size % upa || (offset_in_page(alloc_size / upa))) |
3c9a024fd percpu: fix build... |
1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 |
continue; for (group = 0; group < nr_groups; group++) { int this_allocs = DIV_ROUND_UP(group_cnt[group], upa); allocs += this_allocs; wasted += this_allocs * upa - group_cnt[group]; } /* * Don't accept if wastage is over 1/3. The * greater-than comparison ensures upa==1 always * passes the following check. */ if (wasted > num_possible_cpus() / 3) continue; /* and then don't consume more memory */ if (allocs > last_allocs) break; last_allocs = allocs; best_upa = upa; } upa = best_upa; /* allocate and fill alloc_info */ for (group = 0; group < nr_groups; group++) nr_units += roundup(group_cnt[group], upa); ai = pcpu_alloc_alloc_info(nr_groups, nr_units); if (!ai) return ERR_PTR(-ENOMEM); cpu_map = ai->groups[0].cpu_map; for (group = 0; group < nr_groups; group++) { ai->groups[group].cpu_map = cpu_map; cpu_map += roundup(group_cnt[group], upa); } ai->static_size = static_size; ai->reserved_size = reserved_size; ai->dyn_size = dyn_size; ai->unit_size = alloc_size / upa; ai->atom_size = atom_size; ai->alloc_size = alloc_size; for (group = 0, unit = 0; group_cnt[group]; group++) { struct pcpu_group_info *gi = &ai->groups[group]; /* * Initialize base_offset as if all groups are located * back-to-back. The caller should update this to * reflect actual allocation. */ gi->base_offset = unit * ai->unit_size; for_each_possible_cpu(cpu) if (group_map[cpu] == group) gi->cpu_map[gi->nr_units++] = cpu; gi->nr_units = roundup(gi->nr_units, upa); unit += gi->nr_units; } BUG_ON(unit != nr_units); return ai; } #endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */ #if defined(BUILD_EMBED_FIRST_CHUNK) |
66c3a7577 percpu: generaliz... |
1861 1862 |
/** * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem |
66c3a7577 percpu: generaliz... |
1863 |
* @reserved_size: the size of reserved percpu area in bytes |
4ba6ce250 percpu: make @dyn... |
1864 |
* @dyn_size: minimum free size for dynamic allocation in bytes |
c8826dd53 percpu: update em... |
1865 1866 1867 |
* @atom_size: allocation atom size * @cpu_distance_fn: callback to determine distance between cpus, optional * @alloc_fn: function to allocate percpu page |
25985edce Fix common misspe... |
1868 |
* @free_fn: function to free percpu page |
66c3a7577 percpu: generaliz... |
1869 1870 1871 1872 1873 |
* * This is a helper to ease setting up embedded first percpu chunk and * can be called where pcpu_setup_first_chunk() is expected. * * If this function is used to setup the first chunk, it is allocated |
c8826dd53 percpu: update em... |
1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 |
* by calling @alloc_fn and used as-is without being mapped into * vmalloc area. Allocations are always whole multiples of @atom_size * aligned to @atom_size. * * This enables the first chunk to piggy back on the linear physical * mapping which often uses larger page size. Please note that this * can result in very sparse cpu->unit mapping on NUMA machines thus * requiring large vmalloc address space. Don't use this allocator if * vmalloc space is not orders of magnitude larger than distances * between node memory addresses (ie. 32bit NUMA machines). |
66c3a7577 percpu: generaliz... |
1884 |
* |
4ba6ce250 percpu: make @dyn... |
1885 |
* @dyn_size specifies the minimum dynamic area size. |
66c3a7577 percpu: generaliz... |
1886 1887 |
* * If the needed size is smaller than the minimum or specified unit |
c8826dd53 percpu: update em... |
1888 |
* size, the leftover is returned using @free_fn. |
66c3a7577 percpu: generaliz... |
1889 1890 |
* * RETURNS: |
fb435d523 percpu: add pcpu_... |
1891 |
* 0 on success, -errno on failure. |
66c3a7577 percpu: generaliz... |
1892 |
*/ |
4ba6ce250 percpu: make @dyn... |
1893 |
int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size, |
c8826dd53 percpu: update em... |
1894 1895 1896 1897 |
size_t atom_size, pcpu_fc_cpu_distance_fn_t cpu_distance_fn, pcpu_fc_alloc_fn_t alloc_fn, pcpu_fc_free_fn_t free_fn) |
66c3a7577 percpu: generaliz... |
1898 |
{ |
c8826dd53 percpu: update em... |
1899 1900 |
void *base = (void *)ULONG_MAX; void **areas = NULL; |
fd1e8a1fe percpu: introduce... |
1901 |
struct pcpu_alloc_info *ai; |
93c76b6b2 mm/percpu.c: corr... |
1902 1903 |
size_t size_sum, areas_size; unsigned long max_distance; |
9b7396624 mm/percpu.c: fix ... |
1904 |
int group, i, highest_group, rc; |
66c3a7577 percpu: generaliz... |
1905 |
|
c8826dd53 percpu: update em... |
1906 1907 |
ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size, cpu_distance_fn); |
fd1e8a1fe percpu: introduce... |
1908 1909 |
if (IS_ERR(ai)) return PTR_ERR(ai); |
66c3a7577 percpu: generaliz... |
1910 |
|
fd1e8a1fe percpu: introduce... |
1911 |
size_sum = ai->static_size + ai->reserved_size + ai->dyn_size; |
c8826dd53 percpu: update em... |
1912 |
areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *)); |
fa8a7094b x86: implement pe... |
1913 |
|
999c17e3d mm/percpu.c: use ... |
1914 |
areas = memblock_virt_alloc_nopanic(areas_size, 0); |
c8826dd53 percpu: update em... |
1915 |
if (!areas) { |
fb435d523 percpu: add pcpu_... |
1916 |
rc = -ENOMEM; |
c8826dd53 percpu: update em... |
1917 |
goto out_free; |
fa8a7094b x86: implement pe... |
1918 |
} |
66c3a7577 percpu: generaliz... |
1919 |
|
9b7396624 mm/percpu.c: fix ... |
1920 1921 |
/* allocate, copy and determine base address & max_distance */ highest_group = 0; |
c8826dd53 percpu: update em... |
1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 |
for (group = 0; group < ai->nr_groups; group++) { struct pcpu_group_info *gi = &ai->groups[group]; unsigned int cpu = NR_CPUS; void *ptr; for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++) cpu = gi->cpu_map[i]; BUG_ON(cpu == NR_CPUS); /* allocate space for the whole group */ ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size); if (!ptr) { rc = -ENOMEM; goto out_free_areas; } |
f528f0b8e kmemleak: Handle ... |
1937 1938 |
/* kmemleak tracks the percpu allocations separately */ kmemleak_free(ptr); |
c8826dd53 percpu: update em... |
1939 |
areas[group] = ptr; |
fd1e8a1fe percpu: introduce... |
1940 |
|
c8826dd53 percpu: update em... |
1941 |
base = min(ptr, base); |
9b7396624 mm/percpu.c: fix ... |
1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 |
if (ptr > areas[highest_group]) highest_group = group; } max_distance = areas[highest_group] - base; max_distance += ai->unit_size * ai->groups[highest_group].nr_units; /* warn if maximum distance is further than 75% of vmalloc space */ if (max_distance > VMALLOC_TOTAL * 3 / 4) { pr_warn("max_distance=0x%lx too large for vmalloc space 0x%lx ", max_distance, VMALLOC_TOTAL); #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK /* and fail if we have fallback */ rc = -EINVAL; goto out_free_areas; #endif |
42b642814 percpu: pcpu_embe... |
1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 |
} /* * Copy data and free unused parts. This should happen after all * allocations are complete; otherwise, we may end up with * overlapping groups. */ for (group = 0; group < ai->nr_groups; group++) { struct pcpu_group_info *gi = &ai->groups[group]; void *ptr = areas[group]; |
c8826dd53 percpu: update em... |
1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 |
for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) { if (gi->cpu_map[i] == NR_CPUS) { /* unused unit, free whole */ free_fn(ptr, ai->unit_size); continue; } /* copy and return the unused part */ memcpy(ptr, __per_cpu_load, ai->static_size); free_fn(ptr + size_sum, ai->unit_size - size_sum); } |
fa8a7094b x86: implement pe... |
1979 |
} |
66c3a7577 percpu: generaliz... |
1980 |
|
c8826dd53 percpu: update em... |
1981 |
/* base address is now known, determine group base offsets */ |
6ea529a20 percpu: make embe... |
1982 |
for (group = 0; group < ai->nr_groups; group++) { |
c8826dd53 percpu: update em... |
1983 |
ai->groups[group].base_offset = areas[group] - base; |
6ea529a20 percpu: make embe... |
1984 |
} |
c8826dd53 percpu: update em... |
1985 |
|
870d4b12a mm: percpu: use p... |
1986 1987 |
pr_info("Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu ", |
fd1e8a1fe percpu: introduce... |
1988 1989 |
PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size, ai->dyn_size, ai->unit_size); |
d4b95f803 x86,percpu: gener... |
1990 |
|
fb435d523 percpu: add pcpu_... |
1991 |
rc = pcpu_setup_first_chunk(ai, base); |
c8826dd53 percpu: update em... |
1992 1993 1994 1995 |
goto out_free; out_free_areas: for (group = 0; group < ai->nr_groups; group++) |
f851c8d85 percpu: fix bootm... |
1996 1997 1998 |
if (areas[group]) free_fn(areas[group], ai->groups[group].nr_units * ai->unit_size); |
c8826dd53 percpu: update em... |
1999 |
out_free: |
fd1e8a1fe percpu: introduce... |
2000 |
pcpu_free_alloc_info(ai); |
c8826dd53 percpu: update em... |
2001 |
if (areas) |
999c17e3d mm/percpu.c: use ... |
2002 |
memblock_free_early(__pa(areas), areas_size); |
fb435d523 percpu: add pcpu_... |
2003 |
return rc; |
d4b95f803 x86,percpu: gener... |
2004 |
} |
3c9a024fd percpu: fix build... |
2005 |
#endif /* BUILD_EMBED_FIRST_CHUNK */ |
d4b95f803 x86,percpu: gener... |
2006 |
|
3c9a024fd percpu: fix build... |
2007 |
#ifdef BUILD_PAGE_FIRST_CHUNK |
d4b95f803 x86,percpu: gener... |
2008 |
/** |
00ae4064b percpu: rename 4k... |
2009 |
* pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages |
d4b95f803 x86,percpu: gener... |
2010 2011 |
* @reserved_size: the size of reserved percpu area in bytes * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE |
25985edce Fix common misspe... |
2012 |
* @free_fn: function to free percpu page, always called with PAGE_SIZE |
d4b95f803 x86,percpu: gener... |
2013 2014 |
* @populate_pte_fn: function to populate pte * |
00ae4064b percpu: rename 4k... |
2015 2016 |
* This is a helper to ease setting up page-remapped first percpu * chunk and can be called where pcpu_setup_first_chunk() is expected. |
d4b95f803 x86,percpu: gener... |
2017 2018 2019 2020 2021 |
* * This is the basic allocator. Static percpu area is allocated * page-by-page into vmalloc area. * * RETURNS: |
fb435d523 percpu: add pcpu_... |
2022 |
* 0 on success, -errno on failure. |
d4b95f803 x86,percpu: gener... |
2023 |
*/ |
fb435d523 percpu: add pcpu_... |
2024 2025 2026 2027 |
int __init pcpu_page_first_chunk(size_t reserved_size, pcpu_fc_alloc_fn_t alloc_fn, pcpu_fc_free_fn_t free_fn, pcpu_fc_populate_pte_fn_t populate_pte_fn) |
d4b95f803 x86,percpu: gener... |
2028 |
{ |
8f05a6a65 percpu: make 4k f... |
2029 |
static struct vm_struct vm; |
fd1e8a1fe percpu: introduce... |
2030 |
struct pcpu_alloc_info *ai; |
00ae4064b percpu: rename 4k... |
2031 |
char psize_str[16]; |
ce3141a27 percpu: drop pcpu... |
2032 |
int unit_pages; |
d4b95f803 x86,percpu: gener... |
2033 |
size_t pages_size; |
ce3141a27 percpu: drop pcpu... |
2034 |
struct page **pages; |
fb435d523 percpu: add pcpu_... |
2035 |
int unit, i, j, rc; |
d4b95f803 x86,percpu: gener... |
2036 |
|
00ae4064b percpu: rename 4k... |
2037 |
snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10); |
4ba6ce250 percpu: make @dyn... |
2038 |
ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL); |
fd1e8a1fe percpu: introduce... |
2039 2040 2041 2042 2043 2044 |
if (IS_ERR(ai)) return PTR_ERR(ai); BUG_ON(ai->nr_groups != 1); BUG_ON(ai->groups[0].nr_units != num_possible_cpus()); unit_pages = ai->unit_size >> PAGE_SHIFT; |
d4b95f803 x86,percpu: gener... |
2045 2046 |
/* unaligned allocations can't be freed, round up to page size */ |
fd1e8a1fe percpu: introduce... |
2047 2048 |
pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() * sizeof(pages[0])); |
999c17e3d mm/percpu.c: use ... |
2049 |
pages = memblock_virt_alloc(pages_size, 0); |
d4b95f803 x86,percpu: gener... |
2050 |
|
8f05a6a65 percpu: make 4k f... |
2051 |
/* allocate pages */ |
d4b95f803 x86,percpu: gener... |
2052 |
j = 0; |
fd1e8a1fe percpu: introduce... |
2053 |
for (unit = 0; unit < num_possible_cpus(); unit++) |
ce3141a27 percpu: drop pcpu... |
2054 |
for (i = 0; i < unit_pages; i++) { |
fd1e8a1fe percpu: introduce... |
2055 |
unsigned int cpu = ai->groups[0].cpu_map[unit]; |
d4b95f803 x86,percpu: gener... |
2056 |
void *ptr; |
3cbc85652 percpu: add @alig... |
2057 |
ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE); |
d4b95f803 x86,percpu: gener... |
2058 |
if (!ptr) { |
870d4b12a mm: percpu: use p... |
2059 2060 |
pr_warn("failed to allocate %s page for cpu%u ", |
598d80914 mm: convert pr_wa... |
2061 |
psize_str, cpu); |
d4b95f803 x86,percpu: gener... |
2062 2063 |
goto enomem; } |
f528f0b8e kmemleak: Handle ... |
2064 2065 |
/* kmemleak tracks the percpu allocations separately */ kmemleak_free(ptr); |
ce3141a27 percpu: drop pcpu... |
2066 |
pages[j++] = virt_to_page(ptr); |
d4b95f803 x86,percpu: gener... |
2067 |
} |
8f05a6a65 percpu: make 4k f... |
2068 2069 |
/* allocate vm area, map the pages and copy static data */ vm.flags = VM_ALLOC; |
fd1e8a1fe percpu: introduce... |
2070 |
vm.size = num_possible_cpus() * ai->unit_size; |
8f05a6a65 percpu: make 4k f... |
2071 |
vm_area_register_early(&vm, PAGE_SIZE); |
fd1e8a1fe percpu: introduce... |
2072 |
for (unit = 0; unit < num_possible_cpus(); unit++) { |
1d9d32572 percpu: make @dyn... |
2073 |
unsigned long unit_addr = |
fd1e8a1fe percpu: introduce... |
2074 |
(unsigned long)vm.addr + unit * ai->unit_size; |
8f05a6a65 percpu: make 4k f... |
2075 |
|
ce3141a27 percpu: drop pcpu... |
2076 |
for (i = 0; i < unit_pages; i++) |
8f05a6a65 percpu: make 4k f... |
2077 2078 2079 |
populate_pte_fn(unit_addr + (i << PAGE_SHIFT)); /* pte already populated, the following shouldn't fail */ |
fb435d523 percpu: add pcpu_... |
2080 2081 2082 2083 2084 |
rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages], unit_pages); if (rc < 0) panic("failed to map percpu area, err=%d ", rc); |
66c3a7577 percpu: generaliz... |
2085 |
|
8f05a6a65 percpu: make 4k f... |
2086 2087 2088 2089 2090 2091 2092 2093 2094 |
/* * FIXME: Archs with virtual cache should flush local * cache for the linear mapping here - something * equivalent to flush_cache_vmap() on the local cpu. * flush_cache_vmap() can't be used as most supporting * data structures are not set up yet. */ /* copy static data */ |
fd1e8a1fe percpu: introduce... |
2095 |
memcpy((void *)unit_addr, __per_cpu_load, ai->static_size); |
66c3a7577 percpu: generaliz... |
2096 2097 2098 |
} /* we're ready, commit */ |
870d4b12a mm: percpu: use p... |
2099 2100 |
pr_info("%d %s pages/cpu @%p s%zu r%zu d%zu ", |
fd1e8a1fe percpu: introduce... |
2101 2102 |
unit_pages, psize_str, vm.addr, ai->static_size, ai->reserved_size, ai->dyn_size); |
d4b95f803 x86,percpu: gener... |
2103 |
|
fb435d523 percpu: add pcpu_... |
2104 |
rc = pcpu_setup_first_chunk(ai, vm.addr); |
d4b95f803 x86,percpu: gener... |
2105 2106 2107 2108 |
goto out_free_ar; enomem: while (--j >= 0) |
ce3141a27 percpu: drop pcpu... |
2109 |
free_fn(page_address(pages[j]), PAGE_SIZE); |
fb435d523 percpu: add pcpu_... |
2110 |
rc = -ENOMEM; |
d4b95f803 x86,percpu: gener... |
2111 |
out_free_ar: |
999c17e3d mm/percpu.c: use ... |
2112 |
memblock_free_early(__pa(pages), pages_size); |
fd1e8a1fe percpu: introduce... |
2113 |
pcpu_free_alloc_info(ai); |
fb435d523 percpu: add pcpu_... |
2114 |
return rc; |
d4b95f803 x86,percpu: gener... |
2115 |
} |
3c9a024fd percpu: fix build... |
2116 |
#endif /* BUILD_PAGE_FIRST_CHUNK */ |
d4b95f803 x86,percpu: gener... |
2117 |
|
bbddff054 percpu: use percp... |
2118 |
#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA |
8c4bfc6e8 x86,percpu: gener... |
2119 |
/* |
bbddff054 percpu: use percp... |
2120 |
* Generic SMP percpu area setup. |
e74e39620 percpu: use dynam... |
2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 |
* * The embedding helper is used because its behavior closely resembles * the original non-dynamic generic percpu area setup. This is * important because many archs have addressing restrictions and might * fail if the percpu area is located far away from the previous * location. As an added bonus, in non-NUMA cases, embedding is * generally a good idea TLB-wise because percpu area can piggy back * on the physical linear memory mapping which uses large page * mappings on applicable archs. */ |
e74e39620 percpu: use dynam... |
2131 2132 |
unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; EXPORT_SYMBOL(__per_cpu_offset); |
c8826dd53 percpu: update em... |
2133 2134 2135 |
static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size, size_t align) { |
999c17e3d mm/percpu.c: use ... |
2136 2137 |
return memblock_virt_alloc_from_nopanic( size, align, __pa(MAX_DMA_ADDRESS)); |
c8826dd53 percpu: update em... |
2138 |
} |
66c3a7577 percpu: generaliz... |
2139 |
|
c8826dd53 percpu: update em... |
2140 2141 |
static void __init pcpu_dfl_fc_free(void *ptr, size_t size) { |
999c17e3d mm/percpu.c: use ... |
2142 |
memblock_free_early(__pa(ptr), size); |
c8826dd53 percpu: update em... |
2143 |
} |
e74e39620 percpu: use dynam... |
2144 2145 |
void __init setup_per_cpu_areas(void) { |
e74e39620 percpu: use dynam... |
2146 2147 |
unsigned long delta; unsigned int cpu; |
fb435d523 percpu: add pcpu_... |
2148 |
int rc; |
e74e39620 percpu: use dynam... |
2149 2150 2151 2152 2153 |
/* * Always reserve area for module percpu variables. That's * what the legacy allocator did. */ |
fb435d523 percpu: add pcpu_... |
2154 |
rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, |
c8826dd53 percpu: update em... |
2155 2156 |
PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL, pcpu_dfl_fc_alloc, pcpu_dfl_fc_free); |
fb435d523 percpu: add pcpu_... |
2157 |
if (rc < 0) |
bbddff054 percpu: use percp... |
2158 |
panic("Failed to initialize percpu areas."); |
e74e39620 percpu: use dynam... |
2159 2160 2161 |
delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; for_each_possible_cpu(cpu) |
fb435d523 percpu: add pcpu_... |
2162 |
__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; |
66c3a7577 percpu: generaliz... |
2163 |
} |
bbddff054 percpu: use percp... |
2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 |
#endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */ #else /* CONFIG_SMP */ /* * UP percpu area setup. * * UP always uses km-based percpu allocator with identity mapping. * Static percpu variables are indistinguishable from the usual static * variables and don't require any special preparation. */ void __init setup_per_cpu_areas(void) { const size_t unit_size = roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE, PERCPU_DYNAMIC_RESERVE)); struct pcpu_alloc_info *ai; void *fc; ai = pcpu_alloc_alloc_info(1, 1); |
999c17e3d mm/percpu.c: use ... |
2184 2185 2186 |
fc = memblock_virt_alloc_from_nopanic(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); |
bbddff054 percpu: use percp... |
2187 2188 |
if (!ai || !fc) panic("Failed to allocate memory for percpu areas."); |
100d13c3b kmemleak: Fix the... |
2189 2190 |
/* kmemleak tracks the percpu allocations separately */ kmemleak_free(fc); |
bbddff054 percpu: use percp... |
2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 |
ai->dyn_size = unit_size; ai->unit_size = unit_size; ai->atom_size = unit_size; ai->alloc_size = unit_size; ai->groups[0].nr_units = 1; ai->groups[0].cpu_map[0] = 0; if (pcpu_setup_first_chunk(ai, fc) < 0) panic("Failed to initialize percpu areas."); } #endif /* CONFIG_SMP */ |
099a19d91 percpu: allow lim... |
2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 |
/* * First and reserved chunks are initialized with temporary allocation * map in initdata so that they can be used before slab is online. * This function is called after slab is brought up and replaces those * with properly allocated maps. */ void __init percpu_init_late(void) { struct pcpu_chunk *target_chunks[] = { pcpu_first_chunk, pcpu_reserved_chunk, NULL }; struct pcpu_chunk *chunk; unsigned long flags; int i; for (i = 0; (chunk = target_chunks[i]); i++) { int *map; const size_t size = PERCPU_DYNAMIC_EARLY_SLOTS * sizeof(map[0]); BUILD_BUG_ON(size > PAGE_SIZE); |
90459ce06 percpu: rename pc... |
2224 |
map = pcpu_mem_zalloc(size); |
099a19d91 percpu: allow lim... |
2225 2226 2227 2228 2229 2230 2231 2232 |
BUG_ON(!map); spin_lock_irqsave(&pcpu_lock, flags); memcpy(map, chunk->map, size); chunk->map = map; spin_unlock_irqrestore(&pcpu_lock, flags); } } |
1a4d76076 percpu: implement... |
2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 |
/* * Percpu allocator is initialized early during boot when neither slab or * workqueue is available. Plug async management until everything is up * and running. */ static int __init percpu_enable_async(void) { pcpu_async_enabled = true; return 0; } subsys_initcall(percpu_enable_async); |