Blame view
mm/page_ext.c
11.4 KB
eefa864b7 mm/page_ext: resu... |
1 2 3 4 5 6 7 |
#include <linux/mm.h> #include <linux/mmzone.h> #include <linux/bootmem.h> #include <linux/page_ext.h> #include <linux/memory.h> #include <linux/vmalloc.h> #include <linux/kmemleak.h> |
48c96a368 mm/page_owner: ke... |
8 |
#include <linux/page_owner.h> |
33c3fc71c mm: introduce idl... |
9 |
#include <linux/page_idle.h> |
eefa864b7 mm/page_ext: resu... |
10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 |
/* * struct page extension * * This is the feature to manage memory for extended data per page. * * Until now, we must modify struct page itself to store extra data per page. * This requires rebuilding the kernel and it is really time consuming process. * And, sometimes, rebuild is impossible due to third party module dependency. * At last, enlarging struct page could cause un-wanted system behaviour change. * * This feature is intended to overcome above mentioned problems. This feature * allocates memory for extended data per page in certain place rather than * the struct page itself. This memory can be accessed by the accessor * functions provided by this code. During the boot process, it checks whether * allocation of huge chunk of memory is needed or not. If not, it avoids * allocating memory at all. With this advantage, we can include this feature * into the kernel in default and can avoid rebuild and solve related problems. * * To help these things to work well, there are two callbacks for clients. One * is the need callback which is mandatory if user wants to avoid useless * memory allocation at boot-time. The other is optional, init callback, which * is used to do proper initialization after memory is allocated. * * The need callback is used to decide whether extended memory allocation is * needed or not. Sometimes users want to deactivate some features in this * boot and extra memory would be unneccessary. In this case, to avoid * allocating huge chunk of memory, each clients represent their need of * extra memory through the need callback. If one of the need callbacks * returns true, it means that someone needs extra memory so that * page extension core should allocates memory for page extension. If * none of need callbacks return true, memory isn't needed at all in this boot * and page extension core can skip to allocate memory. As result, * none of memory is wasted. * |
980ac1672 mm/page_ext: supp... |
45 46 47 48 49 |
* When need callback returns true, page_ext checks if there is a request for * extra memory through size in struct page_ext_operations. If it is non-zero, * extra space is allocated for each page_ext entry and offset is returned to * user through offset in struct page_ext_operations. * |
eefa864b7 mm/page_ext: resu... |
50 51 52 53 54 55 56 57 58 59 60 |
* The init callback is used to do proper initialization after page extension * is completely initialized. In sparse memory system, extra memory is * allocated some time later than memmap is allocated. In other words, lifetime * of memory for page extension isn't same with memmap for struct page. * Therefore, clients can't store extra data until page extension is * initialized, even if pages are allocated and used freely. This could * cause inadequate state of extra data per page, so, to prevent it, client * can utilize this callback to initialize the state of it correctly. */ static struct page_ext_operations *page_ext_ops[] = { |
e30825f18 mm/debug-pageallo... |
61 62 63 64 |
&debug_guardpage_ops, #ifdef CONFIG_PAGE_POISONING &page_poisoning_ops, #endif |
48c96a368 mm/page_owner: ke... |
65 66 67 |
#ifdef CONFIG_PAGE_OWNER &page_owner_ops, #endif |
33c3fc71c mm: introduce idl... |
68 69 70 |
#if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT) &page_idle_ops, #endif |
eefa864b7 mm/page_ext: resu... |
71 72 73 |
}; static unsigned long total_usage; |
980ac1672 mm/page_ext: supp... |
74 |
static unsigned long extra_mem; |
eefa864b7 mm/page_ext: resu... |
75 76 77 78 79 |
static bool __init invoke_need_callbacks(void) { int i; int entries = ARRAY_SIZE(page_ext_ops); |
980ac1672 mm/page_ext: supp... |
80 |
bool need = false; |
eefa864b7 mm/page_ext: resu... |
81 82 |
for (i = 0; i < entries; i++) { |
980ac1672 mm/page_ext: supp... |
83 84 85 86 87 88 |
if (page_ext_ops[i]->need && page_ext_ops[i]->need()) { page_ext_ops[i]->offset = sizeof(struct page_ext) + extra_mem; extra_mem += page_ext_ops[i]->size; need = true; } |
eefa864b7 mm/page_ext: resu... |
89 |
} |
980ac1672 mm/page_ext: supp... |
90 |
return need; |
eefa864b7 mm/page_ext: resu... |
91 92 93 94 95 96 97 98 99 100 101 102 |
} static void __init invoke_init_callbacks(void) { int i; int entries = ARRAY_SIZE(page_ext_ops); for (i = 0; i < entries; i++) { if (page_ext_ops[i]->init) page_ext_ops[i]->init(); } } |
980ac1672 mm/page_ext: supp... |
103 104 105 106 107 108 109 110 111 |
static unsigned long get_entry_size(void) { return sizeof(struct page_ext) + extra_mem; } static inline struct page_ext *get_entry(void *base, unsigned long index) { return base + get_entry_size() * index; } |
eefa864b7 mm/page_ext: resu... |
112 113 114 115 116 117 118 119 120 121 122 |
#if !defined(CONFIG_SPARSEMEM) void __meminit pgdat_page_ext_init(struct pglist_data *pgdat) { pgdat->node_page_ext = NULL; } struct page_ext *lookup_page_ext(struct page *page) { unsigned long pfn = page_to_pfn(page); |
0b06bb3f6 mm/page_ext: rena... |
123 |
unsigned long index; |
eefa864b7 mm/page_ext: resu... |
124 125 126 |
struct page_ext *base; base = NODE_DATA(page_to_nid(page))->node_page_ext; |
1414c7f4f mm/page_poisoning... |
127 |
#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAGE_POISONING) |
eefa864b7 mm/page_ext: resu... |
128 129 130 131 132 |
/* * The sanity checks the page allocator does upon freeing a * page can reach here before the page_ext arrays are * allocated when feeding a range of pages to the allocator * for the first time during bootup or memory hotplug. |
1414c7f4f mm/page_poisoning... |
133 134 135 |
* * This check is also necessary for ensuring page poisoning * works as expected when enabled |
eefa864b7 mm/page_ext: resu... |
136 137 138 139 |
*/ if (unlikely(!base)) return NULL; #endif |
0b06bb3f6 mm/page_ext: rena... |
140 |
index = pfn - round_down(node_start_pfn(page_to_nid(page)), |
eefa864b7 mm/page_ext: resu... |
141 |
MAX_ORDER_NR_PAGES); |
980ac1672 mm/page_ext: supp... |
142 |
return get_entry(base, index); |
eefa864b7 mm/page_ext: resu... |
143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 |
} static int __init alloc_node_page_ext(int nid) { struct page_ext *base; unsigned long table_size; unsigned long nr_pages; nr_pages = NODE_DATA(nid)->node_spanned_pages; if (!nr_pages) return 0; /* * Need extra space if node range is not aligned with * MAX_ORDER_NR_PAGES. When page allocator's buddy algorithm * checks buddy's status, range could be out of exact node range. */ if (!IS_ALIGNED(node_start_pfn(nid), MAX_ORDER_NR_PAGES) || !IS_ALIGNED(node_end_pfn(nid), MAX_ORDER_NR_PAGES)) nr_pages += MAX_ORDER_NR_PAGES; |
980ac1672 mm/page_ext: supp... |
163 |
table_size = get_entry_size() * nr_pages; |
eefa864b7 mm/page_ext: resu... |
164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 |
base = memblock_virt_alloc_try_nid_nopanic( table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid); if (!base) return -ENOMEM; NODE_DATA(nid)->node_page_ext = base; total_usage += table_size; return 0; } void __init page_ext_init_flatmem(void) { int nid, fail; if (!invoke_need_callbacks()) return; for_each_online_node(nid) { fail = alloc_node_page_ext(nid); if (fail) goto fail; } pr_info("allocated %ld bytes of page_ext ", total_usage); invoke_init_callbacks(); return; fail: pr_crit("allocation of page_ext failed. "); panic("Out of memory"); } #else /* CONFIG_FLAT_NODE_MEM_MAP */ struct page_ext *lookup_page_ext(struct page *page) { unsigned long pfn = page_to_pfn(page); struct mem_section *section = __pfn_to_section(pfn); |
1414c7f4f mm/page_poisoning... |
205 |
#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAGE_POISONING) |
eefa864b7 mm/page_ext: resu... |
206 207 208 209 210 |
/* * The sanity checks the page allocator does upon freeing a * page can reach here before the page_ext arrays are * allocated when feeding a range of pages to the allocator * for the first time during bootup or memory hotplug. |
1414c7f4f mm/page_poisoning... |
211 212 213 |
* * This check is also necessary for ensuring page poisoning * works as expected when enabled |
eefa864b7 mm/page_ext: resu... |
214 215 216 217 |
*/ if (!section->page_ext) return NULL; #endif |
980ac1672 mm/page_ext: supp... |
218 |
return get_entry(section->page_ext, pfn); |
eefa864b7 mm/page_ext: resu... |
219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 |
} static void *__meminit alloc_page_ext(size_t size, int nid) { gfp_t flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN; void *addr = NULL; addr = alloc_pages_exact_nid(nid, size, flags); if (addr) { kmemleak_alloc(addr, size, 1, flags); return addr; } if (node_state(nid, N_HIGH_MEMORY)) addr = vzalloc_node(size, nid); else addr = vzalloc(size); return addr; } static int __meminit init_section_page_ext(unsigned long pfn, int nid) { struct mem_section *section; struct page_ext *base; unsigned long table_size; section = __pfn_to_section(pfn); if (section->page_ext) return 0; |
980ac1672 mm/page_ext: supp... |
250 |
table_size = get_entry_size() * PAGES_PER_SECTION; |
eefa864b7 mm/page_ext: resu... |
251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 |
base = alloc_page_ext(table_size, nid); /* * The value stored in section->page_ext is (base - pfn) * and it does not point to the memory block allocated above, * causing kmemleak false positives. */ kmemleak_not_leak(base); if (!base) { pr_err("page ext allocation failure "); return -ENOMEM; } /* * The passed "pfn" may not be aligned to SECTION. For the calculation * we need to apply a mask. */ pfn &= PAGE_SECTION_MASK; |
980ac1672 mm/page_ext: supp... |
271 |
section->page_ext = (void *)base - get_entry_size() * pfn; |
eefa864b7 mm/page_ext: resu... |
272 273 274 275 276 277 278 279 280 281 282 |
total_usage += table_size; return 0; } #ifdef CONFIG_MEMORY_HOTPLUG static void free_page_ext(void *addr) { if (is_vmalloc_addr(addr)) { vfree(addr); } else { struct page *page = virt_to_page(addr); size_t table_size; |
980ac1672 mm/page_ext: supp... |
283 |
table_size = get_entry_size() * PAGES_PER_SECTION; |
eefa864b7 mm/page_ext: resu... |
284 285 286 287 288 289 290 291 292 293 294 295 296 297 |
BUG_ON(PageReserved(page)); free_pages_exact(addr, table_size); } } static void __free_page_ext(unsigned long pfn) { struct mem_section *ms; struct page_ext *base; ms = __pfn_to_section(pfn); if (!ms || !ms->page_ext) return; |
980ac1672 mm/page_ext: supp... |
298 |
base = get_entry(ms->page_ext, pfn); |
eefa864b7 mm/page_ext: resu... |
299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 |
free_page_ext(base); ms->page_ext = NULL; } static int __meminit online_page_ext(unsigned long start_pfn, unsigned long nr_pages, int nid) { unsigned long start, end, pfn; int fail = 0; start = SECTION_ALIGN_DOWN(start_pfn); end = SECTION_ALIGN_UP(start_pfn + nr_pages); if (nid == -1) { /* * In this case, "nid" already exists and contains valid memory. * "start_pfn" passed to us is a pfn which is an arg for * online__pages(), and start_pfn should exist. */ nid = pfn_to_nid(start_pfn); VM_BUG_ON(!node_state(nid, N_ONLINE)); } for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) { if (!pfn_present(pfn)) continue; fail = init_section_page_ext(pfn, nid); } if (!fail) return 0; /* rollback */ for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) __free_page_ext(pfn); return -ENOMEM; } static int __meminit offline_page_ext(unsigned long start_pfn, unsigned long nr_pages, int nid) { unsigned long start, end, pfn; start = SECTION_ALIGN_DOWN(start_pfn); end = SECTION_ALIGN_UP(start_pfn + nr_pages); for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) __free_page_ext(pfn); return 0; } static int __meminit page_ext_callback(struct notifier_block *self, unsigned long action, void *arg) { struct memory_notify *mn = arg; int ret = 0; switch (action) { case MEM_GOING_ONLINE: ret = online_page_ext(mn->start_pfn, mn->nr_pages, mn->status_change_nid); break; case MEM_OFFLINE: offline_page_ext(mn->start_pfn, mn->nr_pages, mn->status_change_nid); break; case MEM_CANCEL_ONLINE: offline_page_ext(mn->start_pfn, mn->nr_pages, mn->status_change_nid); break; case MEM_GOING_OFFLINE: break; case MEM_ONLINE: case MEM_CANCEL_OFFLINE: break; } return notifier_from_errno(ret); } #endif void __init page_ext_init(void) { unsigned long pfn; int nid; if (!invoke_need_callbacks()) return; for_each_node_state(nid, N_MEMORY) { unsigned long start_pfn, end_pfn; start_pfn = node_start_pfn(nid); end_pfn = node_end_pfn(nid); /* * start_pfn and end_pfn may not be aligned to SECTION and the * page->flags of out of node pages are not initialized. So we * scan [start_pfn, the biggest section's pfn < end_pfn) here. */ for (pfn = start_pfn; pfn < end_pfn; pfn = ALIGN(pfn + 1, PAGES_PER_SECTION)) { if (!pfn_valid(pfn)) continue; /* * Nodes's pfns can be overlapping. * We know some arch can have a nodes layout such as * -------------pfn--------------> * N0 | N1 | N2 | N0 | N1 | N2|.... |
fe53ca542 mm: use early_pfn... |
411 412 |
* * Take into account DEFERRED_STRUCT_PAGE_INIT. |
eefa864b7 mm/page_ext: resu... |
413 |
*/ |
fe53ca542 mm: use early_pfn... |
414 |
if (early_pfn_to_nid(pfn) != nid) |
eefa864b7 mm/page_ext: resu... |
415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 |
continue; if (init_section_page_ext(pfn, nid)) goto oom; } } hotplug_memory_notifier(page_ext_callback, 0); pr_info("allocated %ld bytes of page_ext ", total_usage); invoke_init_callbacks(); return; oom: panic("Out of memory"); } void __meminit pgdat_page_ext_init(struct pglist_data *pgdat) { } #endif |