Commit dcce284a259373f9e5570f2e33f79eca84fcf565
Committed by
Linus Torvalds
1 parent
9729a6eb58
Exists in
master
and in
7 other branches
mm: Extend gfp masking to the page allocator
The page allocator also needs the masking of gfp flags during boot, so this moves it out of slab/slub and uses it with the page allocator as well. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Acked-by: Pekka Enberg <penberg@cs.helsinki.fi> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Showing 5 changed files with 18 additions and 25 deletions Side-by-side Diff
include/linux/gfp.h
... | ... | @@ -99,7 +99,7 @@ |
99 | 99 | __GFP_NORETRY|__GFP_NOMEMALLOC) |
100 | 100 | |
101 | 101 | /* Control slab gfp mask during early boot */ |
102 | -#define SLAB_GFP_BOOT_MASK __GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS) | |
102 | +#define GFP_BOOT_MASK __GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS) | |
103 | 103 | |
104 | 104 | /* Control allocation constraints */ |
105 | 105 | #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE) |
... | ... | @@ -346,6 +346,13 @@ |
346 | 346 | static inline void oom_killer_enable(void) |
347 | 347 | { |
348 | 348 | oom_killer_disabled = false; |
349 | +} | |
350 | + | |
351 | +extern gfp_t gfp_allowed_mask; | |
352 | + | |
353 | +static inline void set_gfp_allowed_mask(gfp_t mask) | |
354 | +{ | |
355 | + gfp_allowed_mask = mask; | |
349 | 356 | } |
350 | 357 | |
351 | 358 | #endif /* __LINUX_GFP_H */ |
init/main.c
mm/page_alloc.c
... | ... | @@ -73,6 +73,7 @@ |
73 | 73 | unsigned long totalreserve_pages __read_mostly; |
74 | 74 | unsigned long highest_memmap_pfn __read_mostly; |
75 | 75 | int percpu_pagelist_fraction; |
76 | +gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; | |
76 | 77 | |
77 | 78 | #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE |
78 | 79 | int pageblock_order __read_mostly; |
... | ... | @@ -1862,6 +1863,8 @@ |
1862 | 1863 | struct zone *preferred_zone; |
1863 | 1864 | struct page *page; |
1864 | 1865 | int migratetype = allocflags_to_migratetype(gfp_mask); |
1866 | + | |
1867 | + gfp_mask &= gfp_allowed_mask; | |
1865 | 1868 | |
1866 | 1869 | lockdep_trace_alloc(gfp_mask); |
1867 | 1870 |
mm/slab.c
... | ... | @@ -305,12 +305,6 @@ |
305 | 305 | }; |
306 | 306 | |
307 | 307 | /* |
308 | - * The slab allocator is initialized with interrupts disabled. Therefore, make | |
309 | - * sure early boot allocations don't accidentally enable interrupts. | |
310 | - */ | |
311 | -static gfp_t slab_gfp_mask __read_mostly = SLAB_GFP_BOOT_MASK; | |
312 | - | |
313 | -/* | |
314 | 308 | * Need this for bootstrapping a per node allocator. |
315 | 309 | */ |
316 | 310 | #define NUM_INIT_LISTS (3 * MAX_NUMNODES) |
... | ... | @@ -1559,11 +1553,6 @@ |
1559 | 1553 | { |
1560 | 1554 | struct kmem_cache *cachep; |
1561 | 1555 | |
1562 | - /* | |
1563 | - * Interrupts are enabled now so all GFP allocations are safe. | |
1564 | - */ | |
1565 | - slab_gfp_mask = __GFP_BITS_MASK; | |
1566 | - | |
1567 | 1556 | /* 6) resize the head arrays to their final sizes */ |
1568 | 1557 | mutex_lock(&cache_chain_mutex); |
1569 | 1558 | list_for_each_entry(cachep, &cache_chain, next) |
... | ... | @@ -3307,7 +3296,7 @@ |
3307 | 3296 | unsigned long save_flags; |
3308 | 3297 | void *ptr; |
3309 | 3298 | |
3310 | - flags &= slab_gfp_mask; | |
3299 | + flags &= gfp_allowed_mask; | |
3311 | 3300 | |
3312 | 3301 | lockdep_trace_alloc(flags); |
3313 | 3302 | |
... | ... | @@ -3392,7 +3381,7 @@ |
3392 | 3381 | unsigned long save_flags; |
3393 | 3382 | void *objp; |
3394 | 3383 | |
3395 | - flags &= slab_gfp_mask; | |
3384 | + flags &= gfp_allowed_mask; | |
3396 | 3385 | |
3397 | 3386 | lockdep_trace_alloc(flags); |
3398 | 3387 |
mm/slub.c
... | ... | @@ -179,12 +179,6 @@ |
179 | 179 | SYSFS /* Sysfs up */ |
180 | 180 | } slab_state = DOWN; |
181 | 181 | |
182 | -/* | |
183 | - * The slab allocator is initialized with interrupts disabled. Therefore, make | |
184 | - * sure early boot allocations don't accidentally enable interrupts. | |
185 | - */ | |
186 | -static gfp_t slab_gfp_mask __read_mostly = SLAB_GFP_BOOT_MASK; | |
187 | - | |
188 | 182 | /* A list of all slab caches on the system */ |
189 | 183 | static DECLARE_RWSEM(slub_lock); |
190 | 184 | static LIST_HEAD(slab_caches); |
... | ... | @@ -1692,7 +1686,7 @@ |
1692 | 1686 | unsigned long flags; |
1693 | 1687 | unsigned int objsize; |
1694 | 1688 | |
1695 | - gfpflags &= slab_gfp_mask; | |
1689 | + gfpflags &= gfp_allowed_mask; | |
1696 | 1690 | |
1697 | 1691 | lockdep_trace_alloc(gfpflags); |
1698 | 1692 | might_sleep_if(gfpflags & __GFP_WAIT); |
... | ... | @@ -3220,10 +3214,6 @@ |
3220 | 3214 | |
3221 | 3215 | void __init kmem_cache_init_late(void) |
3222 | 3216 | { |
3223 | - /* | |
3224 | - * Interrupts are enabled now so all GFP allocations are safe. | |
3225 | - */ | |
3226 | - slab_gfp_mask = __GFP_BITS_MASK; | |
3227 | 3217 | } |
3228 | 3218 | |
3229 | 3219 | /* |