Blame view
mm/internal.h
17.1 KB
2874c5fd2
|
1 |
/* SPDX-License-Identifier: GPL-2.0-or-later */ |
1da177e4c
|
2 3 4 5 |
/* internal.h: mm/ internal definitions * * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) |
1da177e4c
|
6 |
*/ |
0f8053a50
|
7 8 |
#ifndef __MM_INTERNAL_H #define __MM_INTERNAL_H |
29f175d12
|
9 |
#include <linux/fs.h> |
0f8053a50
|
10 |
#include <linux/mm.h> |
e9b61f198
|
11 |
#include <linux/pagemap.h> |
edf14cdbf
|
12 |
#include <linux/tracepoint-defs.h> |
1da177e4c
|
13 |
|
dd56b0464
|
14 15 16 17 18 19 20 |
/* * The set of flags that only affect watermark checking and reclaim * behaviour. This is used by the MM to obey the caller constraints * about IO, FS and watermark checking while ignoring placement * hints such as HIGHMEM usage. */ #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\ |
dcda9b047
|
21 |
__GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\ |
e838a45f9
|
22 23 |
__GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\ __GFP_ATOMIC) |
dd56b0464
|
24 25 26 27 28 29 30 31 32 |
/* The GFP flags allowed during early boot */ #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS)) /* Control allocation cpuset and node placement constraints */ #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE) /* Do not use these with a slab allocator */ #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK) |
629060270
|
33 |
void page_writeback_init(void); |
2b7403035
|
34 |
vm_fault_t do_swap_page(struct vm_fault *vmf); |
8a966ed74
|
35 |
|
42b777281
|
36 37 |
void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma, unsigned long floor, unsigned long ceiling); |
235190738
|
38 39 40 41 |
static inline bool can_madv_dontneed_vma(struct vm_area_struct *vma) { return !(vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP)); } |
aac453635
|
42 43 44 45 |
void unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long addr, unsigned long end, struct zap_details *details); |
c534aa3fd
|
46 |
extern unsigned int __do_page_cache_readahead(struct address_space *mapping, |
29f175d12
|
47 48 49 50 51 52 53 54 55 56 57 58 |
struct file *filp, pgoff_t offset, unsigned long nr_to_read, unsigned long lookahead_size); /* * Submit IO for the read-ahead request in file_ra_state. */ static inline unsigned long ra_submit(struct file_ra_state *ra, struct address_space *mapping, struct file *filp) { return __do_page_cache_readahead(mapping, filp, ra->start, ra->size, ra->async_size); } |
7835e98b2
|
59 |
/* |
0139aa7b7
|
60 |
* Turn a non-refcounted page (->_refcount == 0) into refcounted with |
7835e98b2
|
61 62 63 64 |
* a count of one. */ static inline void set_page_refcounted(struct page *page) { |
309381fea
|
65 |
VM_BUG_ON_PAGE(PageTail(page), page); |
fe896d187
|
66 |
VM_BUG_ON_PAGE(page_ref_count(page), page); |
77a8a7883
|
67 |
set_page_count(page, 1); |
77a8a7883
|
68 |
} |
03f6462a3
|
69 |
extern unsigned long highest_memmap_pfn; |
894bc3104
|
70 |
/* |
c73322d09
|
71 72 73 74 75 76 |
* Maximum number of reclaim retries without progress before the OOM * killer is consider the only way forward. */ #define MAX_RECLAIM_RETRIES 16 /* |
894bc3104
|
77 78 |
* in mm/vmscan.c: */ |
62695a84e
|
79 |
extern int isolate_lru_page(struct page *page); |
894bc3104
|
80 |
extern void putback_lru_page(struct page *page); |
62695a84e
|
81 |
|
894bc3104
|
82 |
/* |
6219049ae
|
83 84 85 86 87 |
* in mm/rmap.c: */ extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address); /* |
894bc3104
|
88 89 |
* in mm/page_alloc.c */ |
3c605096d
|
90 91 |
/* |
1a6d53a10
|
92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 |
* Structure for holding the mostly immutable allocation parameters passed * between functions involved in allocations, including the alloc_pages* * family of functions. * * nodemask, migratetype and high_zoneidx are initialized only once in * __alloc_pages_nodemask() and then never change. * * zonelist, preferred_zone and classzone_idx are set first in * __alloc_pages_nodemask() for the fast path, and might be later changed * in __alloc_pages_slowpath(). All other functions pass the whole strucure * by a const pointer. */ struct alloc_context { struct zonelist *zonelist; nodemask_t *nodemask; |
c33d6c06f
|
107 |
struct zoneref *preferred_zoneref; |
1a6d53a10
|
108 109 |
int migratetype; enum zone_type high_zoneidx; |
c9ab0c4fb
|
110 |
bool spread_dirty_pages; |
1a6d53a10
|
111 |
}; |
93ea9964d
|
112 |
#define ac_classzone_idx(ac) zonelist_zone_idx(ac->preferred_zoneref) |
1a6d53a10
|
113 |
/* |
3c605096d
|
114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 |
* Locate the struct page for both the matching buddy in our * pair (buddy1) and the combined O(n+1) page they form (page). * * 1) Any buddy B1 will have an order O twin B2 which satisfies * the following equation: * B2 = B1 ^ (1 << O) * For example, if the starting buddy (buddy2) is #8 its order * 1 buddy is #10: * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10 * * 2) Any buddy B will have an order O+1 parent P which * satisfies the following equation: * P = B & ~(1 << O) * * Assumption: *_mem_map is contiguous at least up to MAX_ORDER */ static inline unsigned long |
76741e776
|
131 |
__find_buddy_pfn(unsigned long page_pfn, unsigned int order) |
3c605096d
|
132 |
{ |
76741e776
|
133 |
return page_pfn ^ (1 << order); |
3c605096d
|
134 |
} |
7cf91a98e
|
135 136 137 138 139 140 141 142 143 144 145 |
extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn, unsigned long end_pfn, struct zone *zone); static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn, unsigned long end_pfn, struct zone *zone) { if (zone->contiguous) return pfn_to_page(start_pfn); return __pageblock_pfn_to_page(start_pfn, end_pfn, zone); } |
3c605096d
|
146 |
extern int __isolate_free_page(struct page *page, unsigned int order); |
7c2ee349c
|
147 |
extern void memblock_free_pages(struct page *page, unsigned long pfn, |
d70ddd7a5
|
148 |
unsigned int order); |
a9cd410a3
|
149 |
extern void __free_pages_core(struct page *page, unsigned int order); |
d00181b96
|
150 |
extern void prep_compound_page(struct page *page, unsigned int order); |
46f24fd85
|
151 152 |
extern void post_alloc_hook(struct page *page, unsigned int order, gfp_t gfp_flags); |
42aa83cb6
|
153 |
extern int user_min_free_kbytes; |
20a0307c0
|
154 |
|
ff9543fd3
|
155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 |
#if defined CONFIG_COMPACTION || defined CONFIG_CMA /* * in mm/compaction.c */ /* * compact_control is used to track pages being migrated and the free pages * they are being migrated to during memory compaction. The free_pfn starts * at the end of a zone and migrate_pfn begins at the start. Movable pages * are moved to the end of a zone during a compaction run and the run * completes when free_pfn <= migrate_pfn */ struct compact_control { struct list_head freepages; /* List of free pages to migrate to */ struct list_head migratepages; /* List of pages being migrated */ |
c5fbd937b
|
170 171 |
unsigned int nr_freepages; /* Number of isolated free pages */ unsigned int nr_migratepages; /* Number of pages to migrate */ |
ff9543fd3
|
172 173 |
unsigned long free_pfn; /* isolate_freepages search base */ unsigned long migrate_pfn; /* isolate_migratepages search base */ |
70b44595e
|
174 |
unsigned long fast_start_pfn; /* a pfn to start linear scan from */ |
c5943b9c5
|
175 176 177 |
struct zone *zone; unsigned long total_migrate_scanned; unsigned long total_free_scanned; |
dbe2d4e4f
|
178 179 |
unsigned short fast_search_fail;/* failures to use free list searches */ short search_order; /* order to start a fast search at */ |
f25ba6dcc
|
180 181 |
const gfp_t gfp_mask; /* gfp mask of a direct compactor */ int order; /* order a direct compactor needs */ |
d39773a06
|
182 |
int migratetype; /* migratetype of direct compactor */ |
f25ba6dcc
|
183 184 |
const unsigned int alloc_flags; /* alloc flags of a direct compactor */ const int classzone_idx; /* zone index of a direct compactor */ |
e0b9daeb4
|
185 |
enum migrate_mode mode; /* Async or sync migration mode */ |
bb13ffeb9
|
186 |
bool ignore_skip_hint; /* Scan blocks even if marked skip */ |
2583d6713
|
187 |
bool no_set_skip_hint; /* Don't mark blocks for skipping */ |
9f7e33879
|
188 |
bool ignore_block_suitable; /* Scan blocks considered unsuitable */ |
accf62422
|
189 |
bool direct_compaction; /* False from kcompactd or /proc/... */ |
06ed29989
|
190 |
bool whole_zone; /* Whole zone should/has been scanned */ |
c3486f537
|
191 |
bool contended; /* Signal lock or sched contention */ |
804d3121b
|
192 |
bool rescan; /* Rescanning the same pageblock */ |
ff9543fd3
|
193 |
}; |
5e1f0f098
|
194 195 196 197 198 199 200 201 |
/* * Used in direct compaction when a page should be taken from the freelists * immediately when one is created during the free path. */ struct capture_control { struct compact_control *cc; struct page *page; }; |
ff9543fd3
|
202 |
unsigned long |
bb13ffeb9
|
203 204 |
isolate_freepages_range(struct compact_control *cc, unsigned long start_pfn, unsigned long end_pfn); |
ff9543fd3
|
205 |
unsigned long |
edc2ca612
|
206 207 |
isolate_migratepages_range(struct compact_control *cc, unsigned long low_pfn, unsigned long end_pfn); |
2149cdaef
|
208 209 |
int find_suitable_fallback(struct free_area *area, unsigned int order, int migratetype, bool only_stealable, bool *can_steal); |
ff9543fd3
|
210 211 |
#endif |
0f8053a50
|
212 |
|
48f13bf3e
|
213 |
/* |
6c14466cc
|
214 215 216 217 |
* This function returns the order of a free page in the buddy system. In * general, page_zone(page)->lock must be held by the caller to prevent the * page from being allocated in parallel and returning garbage as the order. * If a caller does not hold page_zone(page)->lock, it must guarantee that the |
99c0fd5e5
|
218 219 |
* page cannot be allocated or merged in parallel. Alternatively, it must * handle invalid values gracefully, and use page_order_unsafe() below. |
48f13bf3e
|
220 |
*/ |
d00181b96
|
221 |
static inline unsigned int page_order(struct page *page) |
48f13bf3e
|
222 |
{ |
572438f9b
|
223 |
/* PageBuddy() must be checked by the caller */ |
48f13bf3e
|
224 225 |
return page_private(page); } |
b5a0e0113
|
226 |
|
99c0fd5e5
|
227 228 229 230 231 |
/* * Like page_order(), but for callers who cannot afford to hold the zone lock. * PageBuddy() should be checked first by the caller to minimize race window, * and invalid values must be handled gracefully. * |
4db0c3c29
|
232 |
* READ_ONCE is used so that if the caller assigns the result into a local |
99c0fd5e5
|
233 234 235 236 237 |
* variable and e.g. tests it for valid range before using, the compiler cannot * decide to remove the variable and inline the page_private(page) multiple * times, potentially observing different values in the tests and the actual * use of the result. */ |
4db0c3c29
|
238 |
#define page_order_unsafe(page) READ_ONCE(page_private(page)) |
99c0fd5e5
|
239 |
|
4bbd4c776
|
240 241 242 243 |
static inline bool is_cow_mapping(vm_flags_t flags) { return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; } |
30bdbb780
|
244 245 246 247 248 249 250 |
/* * These three helpers classifies VMAs for virtual memory accounting. */ /* * Executable code area - executable, not writable, not stack */ |
d977d56ce
|
251 252 |
static inline bool is_exec_mapping(vm_flags_t flags) { |
30bdbb780
|
253 |
return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC; |
d977d56ce
|
254 |
} |
30bdbb780
|
255 256 257 258 259 260 |
/* * Stack area - atomatically grows in one direction * * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous: * do_mmap() forbids all other combinations. */ |
d977d56ce
|
261 262 |
static inline bool is_stack_mapping(vm_flags_t flags) { |
30bdbb780
|
263 |
return (flags & VM_STACK) == VM_STACK; |
d977d56ce
|
264 |
} |
30bdbb780
|
265 266 267 |
/* * Data area - private, writable, not stack */ |
d977d56ce
|
268 269 |
static inline bool is_data_mapping(vm_flags_t flags) { |
30bdbb780
|
270 |
return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE; |
d977d56ce
|
271 |
} |
6038def0d
|
272 273 274 |
/* mm/util.c */ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, struct rb_node *rb_parent); |
af8e3354b
|
275 |
#ifdef CONFIG_MMU |
fc05f5662
|
276 |
extern long populate_vma_page_range(struct vm_area_struct *vma, |
cea10a19b
|
277 |
unsigned long start, unsigned long end, int *nonblocking); |
af8e3354b
|
278 279 280 281 282 283 |
extern void munlock_vma_pages_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); static inline void munlock_vma_pages_all(struct vm_area_struct *vma) { munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end); } |
b291f0003
|
284 |
/* |
73848b468
|
285 |
* must be called with vma's mmap_sem held for read or write, and page locked. |
b291f0003
|
286 287 |
*/ extern void mlock_vma_page(struct page *page); |
ff6a6da60
|
288 |
extern unsigned int munlock_vma_page(struct page *page); |
b291f0003
|
289 290 291 292 293 294 295 296 297 298 |
/* * Clear the page's PageMlocked(). This can be useful in a situation where * we want to unconditionally remove a page from the pagecache -- e.g., * on truncation or freeing. * * It is legal to call this function for any page, mlocked or not. * If called for a page that is still mapped by mlocked vmas, all we do * is revert to lazy LRU behaviour -- semantics are not broken. */ |
e6c509f85
|
299 |
extern void clear_page_mlock(struct page *page); |
b291f0003
|
300 301 |
/* |
51afb12ba
|
302 303 304 |
* mlock_migrate_page - called only from migrate_misplaced_transhuge_page() * (because that does not go through the full procedure of migration ptes): * to migrate the Mlocked page flag; update statistics. |
b291f0003
|
305 306 307 |
*/ static inline void mlock_migrate_page(struct page *newpage, struct page *page) { |
5344b7e64
|
308 |
if (TestClearPageMlocked(page)) { |
b32967ff1
|
309 |
int nr_pages = hpage_nr_pages(page); |
5344b7e64
|
310 |
|
51afb12ba
|
311 |
/* Holding pmd lock, no change in irq context: __mod is safe */ |
b32967ff1
|
312 |
__mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); |
b291f0003
|
313 |
SetPageMlocked(newpage); |
b32967ff1
|
314 |
__mod_zone_page_state(page_zone(newpage), NR_MLOCK, nr_pages); |
5344b7e64
|
315 |
} |
b291f0003
|
316 |
} |
f55e1014f
|
317 |
extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma); |
b32967ff1
|
318 |
|
e9b61f198
|
319 320 321 322 323 324 325 326 327 328 329 330 331 |
/* * At what user virtual address is page expected in @vma? */ static inline unsigned long __vma_address(struct page *page, struct vm_area_struct *vma) { pgoff_t pgoff = page_to_pgoff(page); return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); } static inline unsigned long vma_address(struct page *page, struct vm_area_struct *vma) { |
a8fa41ad2
|
332 333 334 335 |
unsigned long start, end; start = __vma_address(page, vma); end = start + PAGE_SIZE * (hpage_nr_pages(page) - 1); |
e9b61f198
|
336 337 |
/* page should be within @vma mapping range */ |
a8fa41ad2
|
338 |
VM_BUG_ON_VMA(end < vma->vm_start || start >= vma->vm_end, vma); |
e9b61f198
|
339 |
|
a8fa41ad2
|
340 |
return max(start, vma->vm_start); |
e9b61f198
|
341 |
} |
af8e3354b
|
342 |
#else /* !CONFIG_MMU */ |
b291f0003
|
343 344 345 |
static inline void clear_page_mlock(struct page *page) { } static inline void mlock_vma_page(struct page *page) { } static inline void mlock_migrate_page(struct page *new, struct page *old) { } |
af8e3354b
|
346 |
#endif /* !CONFIG_MMU */ |
894bc3104
|
347 |
|
b5a0e0113
|
348 |
/* |
69d177c2f
|
349 350 351 352 353 354 355 |
* Return the mem_map entry representing the 'offset' subpage within * the maximally aligned gigantic page 'base'. Handle any discontiguity * in the mem_map at MAX_ORDER_NR_PAGES boundaries. */ static inline struct page *mem_map_offset(struct page *base, int offset) { if (unlikely(offset >= MAX_ORDER_NR_PAGES)) |
bc7f84c0e
|
356 |
return nth_page(base, offset); |
69d177c2f
|
357 358 359 360 |
return base + offset; } /* |
25985edce
|
361 |
* Iterator over all subpages within the maximally aligned gigantic |
69d177c2f
|
362 363 364 365 366 367 368 369 370 371 372 373 374 |
* page 'base'. Handle any discontiguity in the mem_map. */ static inline struct page *mem_map_next(struct page *iter, struct page *base, int offset) { if (unlikely((offset & (MAX_ORDER_NR_PAGES - 1)) == 0)) { unsigned long pfn = page_to_pfn(base) + offset; if (!pfn_valid(pfn)) return NULL; return pfn_to_page(pfn); } return iter + 1; } |
6b74ab97b
|
375 376 377 378 379 380 381 382 383 384 385 386 387 388 |
/* Memory initialisation debug and verification */ enum mminit_level { MMINIT_WARNING, MMINIT_VERIFY, MMINIT_TRACE }; #ifdef CONFIG_DEBUG_MEMORY_INIT extern int mminit_loglevel; #define mminit_dprintk(level, prefix, fmt, arg...) \ do { \ if (level < mminit_loglevel) { \ |
fc5199d1a
|
389 |
if (level <= MMINIT_WARNING) \ |
1170532bb
|
390 |
pr_warn("mminit::" prefix " " fmt, ##arg); \ |
fc5199d1a
|
391 392 |
else \ printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \ |
6b74ab97b
|
393 394 |
} \ } while (0) |
708614e61
|
395 |
extern void mminit_verify_pageflags_layout(void); |
68ad8df42
|
396 |
extern void mminit_verify_zonelist(void); |
6b74ab97b
|
397 398 399 400 401 402 |
#else static inline void mminit_dprintk(enum mminit_level level, const char *prefix, const char *fmt, ...) { } |
708614e61
|
403 404 405 |
static inline void mminit_verify_pageflags_layout(void) { } |
68ad8df42
|
406 407 408 |
static inline void mminit_verify_zonelist(void) { } |
6b74ab97b
|
409 |
#endif /* CONFIG_DEBUG_MEMORY_INIT */ |
2dbb51c49
|
410 411 412 413 414 415 416 417 418 419 420 |
/* mminit_validate_memmodel_limits is independent of CONFIG_DEBUG_MEMORY_INIT */ #if defined(CONFIG_SPARSEMEM) extern void mminit_validate_memmodel_limits(unsigned long *start_pfn, unsigned long *end_pfn); #else static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn, unsigned long *end_pfn) { } #endif /* CONFIG_SPARSEMEM */ |
a5f5f91da
|
421 422 423 424 |
#define NODE_RECLAIM_NOSCAN -2 #define NODE_RECLAIM_FULL -1 #define NODE_RECLAIM_SOME 0 #define NODE_RECLAIM_SUCCESS 1 |
7c116f2b0
|
425 |
|
8b09549c2
|
426 427 428 429 430 431 432 433 434 |
#ifdef CONFIG_NUMA extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int); #else static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask, unsigned int order) { return NODE_RECLAIM_NOSCAN; } #endif |
31d3d3484
|
435 |
extern int hwpoison_filter(struct page *p); |
7c116f2b0
|
436 437 |
extern u32 hwpoison_filter_dev_major; extern u32 hwpoison_filter_dev_minor; |
478c5ffc0
|
438 439 |
extern u64 hwpoison_filter_flags_mask; extern u64 hwpoison_filter_flags_value; |
4fd466eb4
|
440 |
extern u64 hwpoison_filter_memcg; |
1bfe5febe
|
441 |
extern u32 hwpoison_filter_enable; |
eb36c5873
|
442 |
|
dc0ef0df7
|
443 |
extern unsigned long __must_check vm_mmap_pgoff(struct file *, unsigned long, |
eb36c5873
|
444 |
unsigned long, unsigned long, |
9fbeb5ab5
|
445 |
unsigned long, unsigned long); |
ca57df79d
|
446 447 |
extern void set_pageblock_order(void); |
02c6de8d7
|
448 449 |
unsigned long reclaim_clean_pages_from_list(struct zone *zone, struct list_head *page_list); |
d95ea5d18
|
450 451 452 453 454 455 456 457 |
/* The ALLOC_WMARK bits are used as an index to zone->watermark */ #define ALLOC_WMARK_MIN WMARK_MIN #define ALLOC_WMARK_LOW WMARK_LOW #define ALLOC_WMARK_HIGH WMARK_HIGH #define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */ /* Mask to get the watermark bits */ #define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1) |
cd04ae1e2
|
458 459 460 461 462 463 464 465 466 467 |
/* * Only MMU archs have async oom victim reclaim - aka oom_reaper so we * cannot assume a reduced access to memory reserves is sufficient for * !MMU */ #ifdef CONFIG_MMU #define ALLOC_OOM 0x08 #else #define ALLOC_OOM ALLOC_NO_WATERMARKS #endif |
6bb154504
|
468 469 470 471 472 473 474 475 476 |
#define ALLOC_HARDER 0x10 /* try to alloc harder */ #define ALLOC_HIGH 0x20 /* __GFP_HIGH set */ #define ALLOC_CPUSET 0x40 /* check for correct cpuset */ #define ALLOC_CMA 0x80 /* allow allocations from CMA areas */ #ifdef CONFIG_ZONE_DMA32 #define ALLOC_NOFRAGMENT 0x100 /* avoid mixing pageblock types */ #else #define ALLOC_NOFRAGMENT 0x0 #endif |
0a79cdad5
|
477 |
#define ALLOC_KSWAPD 0x200 /* allow waking of kswapd */ |
d95ea5d18
|
478 |
|
72b252aed
|
479 480 |
enum ttu_flags; struct tlbflush_unmap_batch; |
ce612879d
|
481 482 483 484 485 486 |
/* * only for MM internal work items which do not depend on * any allocations or locks which might depend on allocations */ extern struct workqueue_struct *mm_percpu_wq; |
72b252aed
|
487 488 |
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH void try_to_unmap_flush(void); |
d950c9477
|
489 |
void try_to_unmap_flush_dirty(void); |
3ea277194
|
490 |
void flush_tlb_batched_pending(struct mm_struct *mm); |
72b252aed
|
491 492 493 494 |
#else static inline void try_to_unmap_flush(void) { } |
d950c9477
|
495 496 497 |
static inline void try_to_unmap_flush_dirty(void) { } |
3ea277194
|
498 499 500 |
static inline void flush_tlb_batched_pending(struct mm_struct *mm) { } |
72b252aed
|
501 |
#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ |
edf14cdbf
|
502 503 504 505 |
extern const struct trace_print_flags pageflag_names[]; extern const struct trace_print_flags vmaflag_names[]; extern const struct trace_print_flags gfpflag_names[]; |
a6ffdc078
|
506 507 508 509 510 511 512 513 514 |
static inline bool is_migrate_highatomic(enum migratetype migratetype) { return migratetype == MIGRATE_HIGHATOMIC; } static inline bool is_migrate_highatomic_page(struct page *page) { return get_pageblock_migratetype(page) == MIGRATE_HIGHATOMIC; } |
72675e131
|
515 |
void setup_zone_pageset(struct zone *zone); |
666feb21a
|
516 |
extern struct page *alloc_new_node_page(struct page *page, unsigned long node); |
db9714188
|
517 |
#endif /* __MM_INTERNAL_H */ |