Blame view
include/linux/gfp.h
11.6 KB
1da177e4c
|
1 2 3 4 5 6 |
#ifndef __LINUX_GFP_H #define __LINUX_GFP_H #include <linux/mmzone.h> #include <linux/stddef.h> #include <linux/linkage.h> |
082edb7bf
|
7 |
#include <linux/topology.h> |
6484eb3e2
|
8 |
#include <linux/mmdebug.h> |
1da177e4c
|
9 10 11 12 13 |
struct vm_area_struct; /* * GFP bitmasks.. |
e53ef38d0
|
14 15 16 |
* * Zone modifiers (see linux/mmzone.h - low three bits) * |
e53ef38d0
|
17 |
* Do not put any conditional on these. If necessary modify the definitions |
263ff5d8e
|
18 |
* without the underscores and use them consistently. The definitions here may |
e53ef38d0
|
19 |
* be used in bit comparisons. |
1da177e4c
|
20 |
*/ |
af4ca457e
|
21 22 |
#define __GFP_DMA ((__force gfp_t)0x01u) #define __GFP_HIGHMEM ((__force gfp_t)0x02u) |
e53ef38d0
|
23 |
#define __GFP_DMA32 ((__force gfp_t)0x04u) |
b70d94ee4
|
24 25 |
#define __GFP_MOVABLE ((__force gfp_t)0x08u) /* Page is movable */ #define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE) |
1da177e4c
|
26 27 28 29 30 31 32 |
/* * Action modifiers - doesn't change the zoning * * __GFP_REPEAT: Try hard to allocate the memory, but the allocation attempt * _might_ fail. This depends upon the particular VM implementation. * * __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller |
478352e78
|
33 34 |
* cannot handle allocation failures. This modifier is deprecated and no new * users should be added. |
1da177e4c
|
35 36 |
* * __GFP_NORETRY: The VM implementation must not retry indefinitely. |
769848c03
|
37 38 39 |
* * __GFP_MOVABLE: Flag that this page will be movable by the page migration * mechanism or reclaimed |
1da177e4c
|
40 |
*/ |
af4ca457e
|
41 42 43 44 45 46 |
#define __GFP_WAIT ((__force gfp_t)0x10u) /* Can wait and reschedule? */ #define __GFP_HIGH ((__force gfp_t)0x20u) /* Should access emergency pools? */ #define __GFP_IO ((__force gfp_t)0x40u) /* Can start physical IO? */ #define __GFP_FS ((__force gfp_t)0x80u) /* Can call down to low-level FS? */ #define __GFP_COLD ((__force gfp_t)0x100u) /* Cache-cold page required */ #define __GFP_NOWARN ((__force gfp_t)0x200u) /* Suppress page allocation failure warning */ |
ab857d093
|
47 48 49 |
#define __GFP_REPEAT ((__force gfp_t)0x400u) /* See above */ #define __GFP_NOFAIL ((__force gfp_t)0x800u) /* See above */ #define __GFP_NORETRY ((__force gfp_t)0x1000u)/* See above */ |
af4ca457e
|
50 51 52 |
#define __GFP_COMP ((__force gfp_t)0x4000u)/* Add compound page metadata */ #define __GFP_ZERO ((__force gfp_t)0x8000u)/* Return zeroed page on success */ #define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */ |
2d6c666e8
|
53 |
#define __GFP_HARDWALL ((__force gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */ |
9b819d204
|
54 |
#define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */ |
e12ba74d8
|
55 |
#define __GFP_RECLAIMABLE ((__force gfp_t)0x80000u) /* Page is reclaimable */ |
b1eeab676
|
56 57 |
#ifdef CONFIG_KMEMCHECK |
2dff44052
|
58 |
#define __GFP_NOTRACK ((__force gfp_t)0x200000u) /* Don't track with kmemcheck */ |
b1eeab676
|
59 60 61 |
#else #define __GFP_NOTRACK ((__force gfp_t)0) #endif |
1da177e4c
|
62 |
|
2dff44052
|
63 64 65 66 67 68 69 |
/* * This may seem redundant, but it's a way of annotating false positives vs. * allocations that simply cannot be supported (e.g. page tables). */ #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK) #define __GFP_BITS_SHIFT 22 /* Room for 22 __GFP_FOO bits */ |
af4ca457e
|
70 |
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) |
1da177e4c
|
71 |
|
7b04d7170
|
72 73 |
/* This equals 0, but use constants in case they ever change */ #define GFP_NOWAIT (GFP_ATOMIC & ~__GFP_HIGH) |
4eac915d0
|
74 |
/* GFP_ATOMIC means both !wait (__GFP_WAIT not set) and use emergency pool */ |
1da177e4c
|
75 76 77 78 |
#define GFP_ATOMIC (__GFP_HIGH) #define GFP_NOIO (__GFP_WAIT) #define GFP_NOFS (__GFP_WAIT | __GFP_IO) #define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS) |
e12ba74d8
|
79 80 |
#define GFP_TEMPORARY (__GFP_WAIT | __GFP_IO | __GFP_FS | \ __GFP_RECLAIMABLE) |
f90b1d2f1
|
81 82 83 |
#define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL) #define GFP_HIGHUSER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL | \ __GFP_HIGHMEM) |
769848c03
|
84 85 86 |
#define GFP_HIGHUSER_MOVABLE (__GFP_WAIT | __GFP_IO | __GFP_FS | \ __GFP_HARDWALL | __GFP_HIGHMEM | \ __GFP_MOVABLE) |
452aa6999
|
87 |
#define GFP_IOFS (__GFP_IO | __GFP_FS) |
1da177e4c
|
88 |
|
77f700dab
|
89 |
#ifdef CONFIG_NUMA |
980128f22
|
90 |
#define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY) |
77f700dab
|
91 |
#else |
f2e97df66
|
92 |
#define GFP_THISNODE ((__force gfp_t)0) |
77f700dab
|
93 |
#endif |
6cb062296
|
94 |
/* This mask makes up all the page movable related flags */ |
e12ba74d8
|
95 |
#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE) |
6cb062296
|
96 97 98 99 100 |
/* Control page allocator reclaim behavior */ #define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\ __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\ __GFP_NORETRY|__GFP_NOMEMALLOC) |
7e85ee0c1
|
101 |
/* Control slab gfp mask during early boot */ |
fd23855e3
|
102 |
#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS)) |
7e85ee0c1
|
103 |
|
6cb062296
|
104 105 106 107 108 |
/* Control allocation constraints */ #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE) /* Do not use these with a slab allocator */ #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK) |
980128f22
|
109 |
|
1da177e4c
|
110 111 112 113 |
/* Flag - indicates that the buffer will be suitable for DMA. Ignored on some platforms, used as appropriate on others */ #define GFP_DMA __GFP_DMA |
a2f1b4249
|
114 115 |
/* 4GB DMA on some platforms */ #define GFP_DMA32 __GFP_DMA32 |
467c996c1
|
116 117 118 119 120 121 122 123 124 125 126 127 |
/* Convert GFP flags to their corresponding migrate type */ static inline int allocflags_to_migratetype(gfp_t gfp_flags) { WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK); if (unlikely(page_group_by_mobility_disabled)) return MIGRATE_UNMOVABLE; /* Group based on mobility */ return (((gfp_flags & __GFP_MOVABLE) != 0) << 1) | ((gfp_flags & __GFP_RECLAIMABLE) != 0); } |
a2f1b4249
|
128 |
|
b70d94ee4
|
129 130 131 132 133 |
#ifdef CONFIG_HIGHMEM #define OPT_ZONE_HIGHMEM ZONE_HIGHMEM #else #define OPT_ZONE_HIGHMEM ZONE_NORMAL #endif |
4b51d6698
|
134 |
#ifdef CONFIG_ZONE_DMA |
b70d94ee4
|
135 136 137 |
#define OPT_ZONE_DMA ZONE_DMA #else #define OPT_ZONE_DMA ZONE_NORMAL |
4b51d6698
|
138 |
#endif |
b70d94ee4
|
139 |
|
4e4785bcf
|
140 |
#ifdef CONFIG_ZONE_DMA32 |
b70d94ee4
|
141 142 143 |
#define OPT_ZONE_DMA32 ZONE_DMA32 #else #define OPT_ZONE_DMA32 ZONE_NORMAL |
4e4785bcf
|
144 |
#endif |
b70d94ee4
|
145 146 147 148 149 |
/* * GFP_ZONE_TABLE is a word size bitstring that is used for looking up the * zone to use given the lowest 4 bits of gfp_t. Entries are ZONE_SHIFT long * and there are 16 of them to cover all possible combinations of |
263ff5d8e
|
150 |
* __GFP_DMA, __GFP_DMA32, __GFP_MOVABLE and __GFP_HIGHMEM. |
b70d94ee4
|
151 152 153 154 |
* * The zone fallback order is MOVABLE=>HIGHMEM=>NORMAL=>DMA32=>DMA. * But GFP_MOVABLE is not only a zone specifier but also an allocation * policy. Therefore __GFP_MOVABLE plus another zone selector is valid. |
263ff5d8e
|
155 |
* Only 1 bit of the lowest 3 bits (DMA,DMA32,HIGHMEM) can be set to "1". |
b70d94ee4
|
156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 |
* * bit result * ================= * 0x0 => NORMAL * 0x1 => DMA or NORMAL * 0x2 => HIGHMEM or NORMAL * 0x3 => BAD (DMA+HIGHMEM) * 0x4 => DMA32 or DMA or NORMAL * 0x5 => BAD (DMA+DMA32) * 0x6 => BAD (HIGHMEM+DMA32) * 0x7 => BAD (HIGHMEM+DMA32+DMA) * 0x8 => NORMAL (MOVABLE+0) * 0x9 => DMA or NORMAL (MOVABLE+DMA) * 0xa => MOVABLE (Movable is valid only if HIGHMEM is set too) * 0xb => BAD (MOVABLE+HIGHMEM+DMA) * 0xc => DMA32 (MOVABLE+HIGHMEM+DMA32) * 0xd => BAD (MOVABLE+DMA32+DMA) * 0xe => BAD (MOVABLE+DMA32+HIGHMEM) * 0xf => BAD (MOVABLE+DMA32+HIGHMEM+DMA) * * ZONES_SHIFT must be <= 2 on 32 bit platforms. */ #if 16 * ZONES_SHIFT > BITS_PER_LONG #error ZONES_SHIFT too large to create GFP_ZONE_TABLE integer #endif #define GFP_ZONE_TABLE ( \ (ZONE_NORMAL << 0 * ZONES_SHIFT) \ |
fd23855e3
|
185 |
| (OPT_ZONE_DMA << __GFP_DMA * ZONES_SHIFT) \ |
b70d94ee4
|
186 187 188 189 190 191 192 193 194 |
| (OPT_ZONE_HIGHMEM << __GFP_HIGHMEM * ZONES_SHIFT) \ | (OPT_ZONE_DMA32 << __GFP_DMA32 * ZONES_SHIFT) \ | (ZONE_NORMAL << __GFP_MOVABLE * ZONES_SHIFT) \ | (OPT_ZONE_DMA << (__GFP_MOVABLE | __GFP_DMA) * ZONES_SHIFT) \ | (ZONE_MOVABLE << (__GFP_MOVABLE | __GFP_HIGHMEM) * ZONES_SHIFT)\ | (OPT_ZONE_DMA32 << (__GFP_MOVABLE | __GFP_DMA32) * ZONES_SHIFT)\ ) /* |
263ff5d8e
|
195 |
* GFP_ZONE_BAD is a bitmap for all combinations of __GFP_DMA, __GFP_DMA32 |
b70d94ee4
|
196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 |
* __GFP_HIGHMEM and __GFP_MOVABLE that are not permitted. One flag per * entry starting with bit 0. Bit is set if the combination is not * allowed. */ #define GFP_ZONE_BAD ( \ 1 << (__GFP_DMA | __GFP_HIGHMEM) \ | 1 << (__GFP_DMA | __GFP_DMA32) \ | 1 << (__GFP_DMA32 | __GFP_HIGHMEM) \ | 1 << (__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM) \ | 1 << (__GFP_MOVABLE | __GFP_HIGHMEM | __GFP_DMA) \ | 1 << (__GFP_MOVABLE | __GFP_DMA32 | __GFP_DMA) \ | 1 << (__GFP_MOVABLE | __GFP_DMA32 | __GFP_HIGHMEM) \ | 1 << (__GFP_MOVABLE | __GFP_DMA32 | __GFP_DMA | __GFP_HIGHMEM)\ ) static inline enum zone_type gfp_zone(gfp_t flags) { enum zone_type z; int bit = flags & GFP_ZONEMASK; z = (GFP_ZONE_TABLE >> (bit * ZONES_SHIFT)) & ((1 << ZONES_SHIFT) - 1); if (__builtin_constant_p(bit)) |
8c87df457
|
220 |
MAYBE_BUILD_BUG_ON((GFP_ZONE_BAD >> bit) & 1); |
b70d94ee4
|
221 222 223 |
else { #ifdef CONFIG_DEBUG_VM BUG_ON((GFP_ZONE_BAD >> bit) & 1); |
4e4785bcf
|
224 |
#endif |
b70d94ee4
|
225 226 |
} return z; |
4e4785bcf
|
227 |
} |
1da177e4c
|
228 229 230 231 232 233 |
/* * There is only one page-allocator function, and two main namespaces to * it. The alloc_page*() variants return 'struct page *' and as such * can allocate highmem pages, the *get*page*() variants return * virtual kernel addresses to the allocated page(s). */ |
54a6eb5c4
|
234 235 236 237 238 239 240 |
static inline int gfp_zonelist(gfp_t flags) { if (NUMA_BUILD && unlikely(flags & __GFP_THISNODE)) return 1; return 0; } |
1da177e4c
|
241 242 243 |
/* * We get the zone list from the current node and the gfp_mask. * This zone list contains a maximum of MAXNODES*MAX_NR_ZONES zones. |
54a6eb5c4
|
244 245 |
* There are two zonelists per node, one for all zones with memory and * one containing just zones from the node the zonelist belongs to. |
1da177e4c
|
246 247 248 249 |
* * For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets * optimized to &contig_page_data at compile-time. */ |
0e88460da
|
250 251 |
static inline struct zonelist *node_zonelist(int nid, gfp_t flags) { |
54a6eb5c4
|
252 |
return NODE_DATA(nid)->node_zonelists + gfp_zonelist(flags); |
0e88460da
|
253 |
} |
1da177e4c
|
254 255 256 257 |
#ifndef HAVE_ARCH_FREE_PAGE static inline void arch_free_page(struct page *page, int order) { } #endif |
cc1025090
|
258 259 260 |
#ifndef HAVE_ARCH_ALLOC_PAGE static inline void arch_alloc_page(struct page *page, int order) { } #endif |
1da177e4c
|
261 |
|
e4048e5dc
|
262 |
struct page * |
d239171e4
|
263 |
__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, |
e4048e5dc
|
264 265 266 267 268 269 |
struct zonelist *zonelist, nodemask_t *nodemask); static inline struct page * __alloc_pages(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist) { |
d239171e4
|
270 |
return __alloc_pages_nodemask(gfp_mask, order, zonelist, NULL); |
e4048e5dc
|
271 |
} |
dd0fc66fb
|
272 |
static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, |
1da177e4c
|
273 274 |
unsigned int order) { |
819a69280
|
275 276 277 |
/* Unknown node is current node */ if (nid < 0) nid = numa_node_id(); |
0e88460da
|
278 |
return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); |
1da177e4c
|
279 |
} |
6484eb3e2
|
280 281 282 283 284 285 286 |
static inline struct page *alloc_pages_exact_node(int nid, gfp_t gfp_mask, unsigned int order) { VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES); return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); } |
1da177e4c
|
287 |
#ifdef CONFIG_NUMA |
dd0fc66fb
|
288 |
extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order); |
1da177e4c
|
289 290 |
static inline struct page * |
dd0fc66fb
|
291 |
alloc_pages(gfp_t gfp_mask, unsigned int order) |
1da177e4c
|
292 |
{ |
1da177e4c
|
293 294 |
return alloc_pages_current(gfp_mask, order); } |
dd0fc66fb
|
295 |
extern struct page *alloc_page_vma(gfp_t gfp_mask, |
1da177e4c
|
296 297 298 299 300 301 302 |
struct vm_area_struct *vma, unsigned long addr); #else #define alloc_pages(gfp_mask, order) \ alloc_pages_node(numa_node_id(), gfp_mask, order) #define alloc_page_vma(gfp_mask, vma, addr) alloc_pages(gfp_mask, 0) #endif #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) |
b3c975286
|
303 304 |
extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); extern unsigned long get_zeroed_page(gfp_t gfp_mask); |
1da177e4c
|
305 |
|
2be0ffe2b
|
306 307 |
void *alloc_pages_exact(size_t size, gfp_t gfp_mask); void free_pages_exact(void *virt, size_t size); |
1da177e4c
|
308 |
#define __get_free_page(gfp_mask) \ |
fd23855e3
|
309 |
__get_free_pages((gfp_mask), 0) |
1da177e4c
|
310 311 |
#define __get_dma_pages(gfp_mask, order) \ |
fd23855e3
|
312 |
__get_free_pages((gfp_mask) | GFP_DMA, (order)) |
1da177e4c
|
313 |
|
b3c975286
|
314 315 |
extern void __free_pages(struct page *page, unsigned int order); extern void free_pages(unsigned long addr, unsigned int order); |
fc91668ea
|
316 |
extern void free_hot_cold_page(struct page *page, int cold); |
1da177e4c
|
317 318 |
#define __free_page(page) __free_pages((page), 0) |
fd23855e3
|
319 |
#define free_page(addr) free_pages((addr), 0) |
1da177e4c
|
320 321 |
void page_alloc_init(void); |
4037d4522
|
322 |
void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp); |
9f8f21725
|
323 324 |
void drain_all_pages(void); void drain_local_pages(void *dummy); |
1da177e4c
|
325 |
|
dcce284a2
|
326 |
extern gfp_t gfp_allowed_mask; |
452aa6999
|
327 328 |
extern void set_gfp_allowed_mask(gfp_t mask); extern gfp_t clear_gfp_allowed_mask(gfp_t mask); |
dcce284a2
|
329 |
|
1da177e4c
|
330 |
#endif /* __LINUX_GFP_H */ |