Blame view
mm/internal.h
16.6 KB
1da177e4c Linux-2.6.12-rc2 |
1 2 3 4 5 6 7 8 9 10 |
/* internal.h: mm/ internal definitions * * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ |
0f8053a50 [PATCH] mm: make ... |
11 12 |
#ifndef __MM_INTERNAL_H #define __MM_INTERNAL_H |
29f175d12 mm/readahead.c: i... |
13 |
#include <linux/fs.h> |
0f8053a50 [PATCH] mm: make ... |
14 |
#include <linux/mm.h> |
e9b61f198 thp: reintroduce ... |
15 |
#include <linux/pagemap.h> |
edf14cdbf mm, printk: intro... |
16 |
#include <linux/tracepoint-defs.h> |
1da177e4c Linux-2.6.12-rc2 |
17 |
|
dd56b0464 mm: page_alloc: h... |
18 19 20 21 22 23 24 |
/* * The set of flags that only affect watermark checking and reclaim * behaviour. This is used by the MM to obey the caller constraints * about IO, FS and watermark checking while ignoring placement * hints such as HIGHMEM usage. */ #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\ |
dcda9b047 mm, tree wide: re... |
25 |
__GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\ |
e838a45f9 mm, sl[au]b: add ... |
26 27 |
__GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\ __GFP_ATOMIC) |
dd56b0464 mm: page_alloc: h... |
28 29 30 31 32 33 34 35 36 |
/* The GFP flags allowed during early boot */ #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS)) /* Control allocation cpuset and node placement constraints */ #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE) /* Do not use these with a slab allocator */ #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK) |
629060270 mm: add PageWaite... |
37 |
void page_writeback_init(void); |
2994302bc mm: add orig_pte ... |
38 |
int do_swap_page(struct vm_fault *vmf); |
8a966ed74 mm: make swapin r... |
39 |
|
42b777281 mm: remove double... |
40 41 |
void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma, unsigned long floor, unsigned long ceiling); |
235190738 oom-reaper: use m... |
42 43 44 45 |
static inline bool can_madv_dontneed_vma(struct vm_area_struct *vma) { return !(vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP)); } |
aac453635 mm, oom: introduc... |
46 47 48 49 |
void unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long addr, unsigned long end, struct zap_details *details); |
29f175d12 mm/readahead.c: i... |
50 51 52 53 54 55 56 57 58 59 60 61 62 |
extern int __do_page_cache_readahead(struct address_space *mapping, struct file *filp, pgoff_t offset, unsigned long nr_to_read, unsigned long lookahead_size); /* * Submit IO for the read-ahead request in file_ra_state. */ static inline unsigned long ra_submit(struct file_ra_state *ra, struct address_space *mapping, struct file *filp) { return __do_page_cache_readahead(mapping, filp, ra->start, ra->size, ra->async_size); } |
7835e98b2 [PATCH] remove se... |
63 |
/* |
0139aa7b7 mm: rename _count... |
64 |
* Turn a non-refcounted page (->_refcount == 0) into refcounted with |
7835e98b2 [PATCH] remove se... |
65 66 67 68 |
* a count of one. */ static inline void set_page_refcounted(struct page *page) { |
309381fea mm: dump page whe... |
69 |
VM_BUG_ON_PAGE(PageTail(page), page); |
fe896d187 mm: introduce pag... |
70 |
VM_BUG_ON_PAGE(page_ref_count(page), page); |
77a8a7883 [PATCH] mm: set_p... |
71 |
set_page_count(page, 1); |
77a8a7883 [PATCH] mm: set_p... |
72 |
} |
03f6462a3 mm: move highest_... |
73 |
extern unsigned long highest_memmap_pfn; |
894bc3104 Unevictable LRU I... |
74 |
/* |
c73322d09 mm: fix 100% CPU ... |
75 76 77 78 79 80 |
* Maximum number of reclaim retries without progress before the OOM * killer is consider the only way forward. */ #define MAX_RECLAIM_RETRIES 16 /* |
894bc3104 Unevictable LRU I... |
81 82 |
* in mm/vmscan.c: */ |
62695a84e vmscan: move isol... |
83 |
extern int isolate_lru_page(struct page *page); |
894bc3104 Unevictable LRU I... |
84 |
extern void putback_lru_page(struct page *page); |
62695a84e vmscan: move isol... |
85 |
|
894bc3104 Unevictable LRU I... |
86 |
/* |
6219049ae mm: introduce mm_... |
87 88 89 90 91 |
* in mm/rmap.c: */ extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address); /* |
894bc3104 Unevictable LRU I... |
92 93 |
* in mm/page_alloc.c */ |
3c605096d mm/page_alloc: re... |
94 95 |
/* |
1a6d53a10 mm: reduce try_to... |
96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 |
* Structure for holding the mostly immutable allocation parameters passed * between functions involved in allocations, including the alloc_pages* * family of functions. * * nodemask, migratetype and high_zoneidx are initialized only once in * __alloc_pages_nodemask() and then never change. * * zonelist, preferred_zone and classzone_idx are set first in * __alloc_pages_nodemask() for the fast path, and might be later changed * in __alloc_pages_slowpath(). All other functions pass the whole strucure * by a const pointer. */ struct alloc_context { struct zonelist *zonelist; nodemask_t *nodemask; |
c33d6c06f mm, page_alloc: a... |
111 |
struct zoneref *preferred_zoneref; |
1a6d53a10 mm: reduce try_to... |
112 113 |
int migratetype; enum zone_type high_zoneidx; |
c9ab0c4fb mm, page_alloc: r... |
114 |
bool spread_dirty_pages; |
1a6d53a10 mm: reduce try_to... |
115 |
}; |
93ea9964d mm, page_alloc: r... |
116 |
#define ac_classzone_idx(ac) zonelist_zone_idx(ac->preferred_zoneref) |
1a6d53a10 mm: reduce try_to... |
117 |
/* |
3c605096d mm/page_alloc: re... |
118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 |
* Locate the struct page for both the matching buddy in our * pair (buddy1) and the combined O(n+1) page they form (page). * * 1) Any buddy B1 will have an order O twin B2 which satisfies * the following equation: * B2 = B1 ^ (1 << O) * For example, if the starting buddy (buddy2) is #8 its order * 1 buddy is #10: * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10 * * 2) Any buddy B will have an order O+1 parent P which * satisfies the following equation: * P = B & ~(1 << O) * * Assumption: *_mem_map is contiguous at least up to MAX_ORDER */ static inline unsigned long |
76741e776 mm, page_alloc: d... |
135 |
__find_buddy_pfn(unsigned long page_pfn, unsigned int order) |
3c605096d mm/page_alloc: re... |
136 |
{ |
76741e776 mm, page_alloc: d... |
137 |
return page_pfn ^ (1 << order); |
3c605096d mm/page_alloc: re... |
138 |
} |
7cf91a98e mm/compaction: sp... |
139 140 141 142 143 144 145 146 147 148 149 |
extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn, unsigned long end_pfn, struct zone *zone); static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn, unsigned long end_pfn, struct zone *zone) { if (zone->contiguous) return pfn_to_page(start_pfn); return __pageblock_pfn_to_page(start_pfn, end_pfn, zone); } |
3c605096d mm/page_alloc: re... |
150 |
extern int __isolate_free_page(struct page *page, unsigned int order); |
d70ddd7a5 mm: page_alloc: p... |
151 152 |
extern void __free_pages_bootmem(struct page *page, unsigned long pfn, unsigned int order); |
d00181b96 mm: use 'unsigned... |
153 |
extern void prep_compound_page(struct page *page, unsigned int order); |
46f24fd85 mm/page_alloc: in... |
154 155 |
extern void post_alloc_hook(struct page *page, unsigned int order, gfp_t gfp_flags); |
42aa83cb6 mm: show message ... |
156 |
extern int user_min_free_kbytes; |
20a0307c0 mm: introduce Pag... |
157 |
|
ff9543fd3 mm: compaction: e... |
158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 |
#if defined CONFIG_COMPACTION || defined CONFIG_CMA /* * in mm/compaction.c */ /* * compact_control is used to track pages being migrated and the free pages * they are being migrated to during memory compaction. The free_pfn starts * at the end of a zone and migrate_pfn begins at the start. Movable pages * are moved to the end of a zone during a compaction run and the run * completes when free_pfn <= migrate_pfn */ struct compact_control { struct list_head freepages; /* List of free pages to migrate to */ struct list_head migratepages; /* List of pages being migrated */ |
f25ba6dcc mm, compaction: r... |
173 |
struct zone *zone; |
ff9543fd3 mm: compaction: e... |
174 175 |
unsigned long nr_freepages; /* Number of isolated free pages */ unsigned long nr_migratepages; /* Number of pages to migrate */ |
7f354a548 mm, compaction: a... |
176 177 |
unsigned long total_migrate_scanned; unsigned long total_free_scanned; |
ff9543fd3 mm: compaction: e... |
178 179 |
unsigned long free_pfn; /* isolate_freepages search base */ unsigned long migrate_pfn; /* isolate_migratepages search base */ |
1a16718cf mm/compaction: co... |
180 |
unsigned long last_migrated_pfn;/* Not yet flushed page being freed */ |
f25ba6dcc mm, compaction: r... |
181 182 |
const gfp_t gfp_mask; /* gfp mask of a direct compactor */ int order; /* order a direct compactor needs */ |
d39773a06 mm, compaction: a... |
183 |
int migratetype; /* migratetype of direct compactor */ |
f25ba6dcc mm, compaction: r... |
184 185 |
const unsigned int alloc_flags; /* alloc flags of a direct compactor */ const int classzone_idx; /* zone index of a direct compactor */ |
e0b9daeb4 mm, compaction: e... |
186 |
enum migrate_mode mode; /* Async or sync migration mode */ |
bb13ffeb9 mm: compaction: c... |
187 |
bool ignore_skip_hint; /* Scan blocks even if marked skip */ |
9f7e33879 mm, compaction: m... |
188 |
bool ignore_block_suitable; /* Scan blocks considered unsuitable */ |
accf62422 mm, kswapd: repla... |
189 |
bool direct_compaction; /* False from kcompactd or /proc/... */ |
06ed29989 mm, compaction: m... |
190 |
bool whole_zone; /* Whole zone should/has been scanned */ |
c3486f537 mm, compaction: s... |
191 |
bool contended; /* Signal lock or sched contention */ |
baf6a9a1d mm, compaction: f... |
192 |
bool finishing_block; /* Finishing current pageblock */ |
ff9543fd3 mm: compaction: e... |
193 194 195 |
}; unsigned long |
bb13ffeb9 mm: compaction: c... |
196 197 |
isolate_freepages_range(struct compact_control *cc, unsigned long start_pfn, unsigned long end_pfn); |
ff9543fd3 mm: compaction: e... |
198 |
unsigned long |
edc2ca612 mm, compaction: m... |
199 200 |
isolate_migratepages_range(struct compact_control *cc, unsigned long low_pfn, unsigned long end_pfn); |
2149cdaef mm/compaction: en... |
201 202 |
int find_suitable_fallback(struct free_area *area, unsigned int order, int migratetype, bool only_stealable, bool *can_steal); |
ff9543fd3 mm: compaction: e... |
203 204 |
#endif |
0f8053a50 [PATCH] mm: make ... |
205 |
|
48f13bf3e Breakout page_ord... |
206 |
/* |
6c14466cc mm: improve docum... |
207 208 209 210 |
* This function returns the order of a free page in the buddy system. In * general, page_zone(page)->lock must be held by the caller to prevent the * page from being allocated in parallel and returning garbage as the order. * If a caller does not hold page_zone(page)->lock, it must guarantee that the |
99c0fd5e5 mm, compaction: s... |
211 212 |
* page cannot be allocated or merged in parallel. Alternatively, it must * handle invalid values gracefully, and use page_order_unsafe() below. |
48f13bf3e Breakout page_ord... |
213 |
*/ |
d00181b96 mm: use 'unsigned... |
214 |
static inline unsigned int page_order(struct page *page) |
48f13bf3e Breakout page_ord... |
215 |
{ |
572438f9b mm: fix is_mem_se... |
216 |
/* PageBuddy() must be checked by the caller */ |
48f13bf3e Breakout page_ord... |
217 218 |
return page_private(page); } |
b5a0e0113 Solve section mis... |
219 |
|
99c0fd5e5 mm, compaction: s... |
220 221 222 223 224 |
/* * Like page_order(), but for callers who cannot afford to hold the zone lock. * PageBuddy() should be checked first by the caller to minimize race window, * and invalid values must be handled gracefully. * |
4db0c3c29 mm: remove rest o... |
225 |
* READ_ONCE is used so that if the caller assigns the result into a local |
99c0fd5e5 mm, compaction: s... |
226 227 228 229 230 |
* variable and e.g. tests it for valid range before using, the compiler cannot * decide to remove the variable and inline the page_private(page) multiple * times, potentially observing different values in the tests and the actual * use of the result. */ |
4db0c3c29 mm: remove rest o... |
231 |
#define page_order_unsafe(page) READ_ONCE(page_private(page)) |
99c0fd5e5 mm, compaction: s... |
232 |
|
4bbd4c776 mm: move get_user... |
233 234 235 236 |
static inline bool is_cow_mapping(vm_flags_t flags) { return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; } |
30bdbb780 mm: polish virtua... |
237 238 239 240 241 242 243 |
/* * These three helpers classifies VMAs for virtual memory accounting. */ /* * Executable code area - executable, not writable, not stack */ |
d977d56ce mm: warn about Vm... |
244 245 |
static inline bool is_exec_mapping(vm_flags_t flags) { |
30bdbb780 mm: polish virtua... |
246 |
return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC; |
d977d56ce mm: warn about Vm... |
247 |
} |
30bdbb780 mm: polish virtua... |
248 249 250 251 252 253 |
/* * Stack area - atomatically grows in one direction * * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous: * do_mmap() forbids all other combinations. */ |
d977d56ce mm: warn about Vm... |
254 255 |
static inline bool is_stack_mapping(vm_flags_t flags) { |
30bdbb780 mm: polish virtua... |
256 |
return (flags & VM_STACK) == VM_STACK; |
d977d56ce mm: warn about Vm... |
257 |
} |
30bdbb780 mm: polish virtua... |
258 259 260 |
/* * Data area - private, writable, not stack */ |
d977d56ce mm: warn about Vm... |
261 262 |
static inline bool is_data_mapping(vm_flags_t flags) { |
30bdbb780 mm: polish virtua... |
263 |
return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE; |
d977d56ce mm: warn about Vm... |
264 |
} |
6038def0d mm: nommu: sort m... |
265 266 267 |
/* mm/util.c */ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, struct rb_node *rb_parent); |
af8e3354b mm: CONFIG_MMU fo... |
268 |
#ifdef CONFIG_MMU |
fc05f5662 mm: rename __mloc... |
269 |
extern long populate_vma_page_range(struct vm_area_struct *vma, |
cea10a19b mm: directly use ... |
270 |
unsigned long start, unsigned long end, int *nonblocking); |
af8e3354b mm: CONFIG_MMU fo... |
271 272 273 274 275 276 |
extern void munlock_vma_pages_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); static inline void munlock_vma_pages_all(struct vm_area_struct *vma) { munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end); } |
b291f0003 mlock: mlocked pa... |
277 |
/* |
73848b468 ksm: fix mlockfre... |
278 |
* must be called with vma's mmap_sem held for read or write, and page locked. |
b291f0003 mlock: mlocked pa... |
279 280 |
*/ extern void mlock_vma_page(struct page *page); |
ff6a6da60 mm: accelerate mu... |
281 |
extern unsigned int munlock_vma_page(struct page *page); |
b291f0003 mlock: mlocked pa... |
282 283 284 285 286 287 288 289 290 291 |
/* * Clear the page's PageMlocked(). This can be useful in a situation where * we want to unconditionally remove a page from the pagecache -- e.g., * on truncation or freeing. * * It is legal to call this function for any page, mlocked or not. * If called for a page that is still mapped by mlocked vmas, all we do * is revert to lazy LRU behaviour -- semantics are not broken. */ |
e6c509f85 mm: use clear_pag... |
292 |
extern void clear_page_mlock(struct page *page); |
b291f0003 mlock: mlocked pa... |
293 294 |
/* |
51afb12ba mm: page migratio... |
295 296 297 |
* mlock_migrate_page - called only from migrate_misplaced_transhuge_page() * (because that does not go through the full procedure of migration ptes): * to migrate the Mlocked page flag; update statistics. |
b291f0003 mlock: mlocked pa... |
298 299 300 |
*/ static inline void mlock_migrate_page(struct page *newpage, struct page *page) { |
5344b7e64 vmstat: mlocked p... |
301 |
if (TestClearPageMlocked(page)) { |
b32967ff1 mm: numa: Add THP... |
302 |
int nr_pages = hpage_nr_pages(page); |
5344b7e64 vmstat: mlocked p... |
303 |
|
51afb12ba mm: page migratio... |
304 |
/* Holding pmd lock, no change in irq context: __mod is safe */ |
b32967ff1 mm: numa: Add THP... |
305 |
__mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); |
b291f0003 mlock: mlocked pa... |
306 |
SetPageMlocked(newpage); |
b32967ff1 mm: numa: Add THP... |
307 |
__mod_zone_page_state(page_zone(newpage), NR_MLOCK, nr_pages); |
5344b7e64 vmstat: mlocked p... |
308 |
} |
b291f0003 mlock: mlocked pa... |
309 |
} |
b32967ff1 mm: numa: Add THP... |
310 |
extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma); |
e9b61f198 thp: reintroduce ... |
311 312 313 314 315 316 317 318 319 320 321 322 323 |
/* * At what user virtual address is page expected in @vma? */ static inline unsigned long __vma_address(struct page *page, struct vm_area_struct *vma) { pgoff_t pgoff = page_to_pgoff(page); return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); } static inline unsigned long vma_address(struct page *page, struct vm_area_struct *vma) { |
a8fa41ad2 mm, rmap: check a... |
324 325 326 327 |
unsigned long start, end; start = __vma_address(page, vma); end = start + PAGE_SIZE * (hpage_nr_pages(page) - 1); |
e9b61f198 thp: reintroduce ... |
328 329 |
/* page should be within @vma mapping range */ |
a8fa41ad2 mm, rmap: check a... |
330 |
VM_BUG_ON_VMA(end < vma->vm_start || start >= vma->vm_end, vma); |
e9b61f198 thp: reintroduce ... |
331 |
|
a8fa41ad2 mm, rmap: check a... |
332 |
return max(start, vma->vm_start); |
e9b61f198 thp: reintroduce ... |
333 |
} |
af8e3354b mm: CONFIG_MMU fo... |
334 |
#else /* !CONFIG_MMU */ |
b291f0003 mlock: mlocked pa... |
335 336 337 |
static inline void clear_page_mlock(struct page *page) { } static inline void mlock_vma_page(struct page *page) { } static inline void mlock_migrate_page(struct page *new, struct page *old) { } |
af8e3354b mm: CONFIG_MMU fo... |
338 |
#endif /* !CONFIG_MMU */ |
894bc3104 Unevictable LRU I... |
339 |
|
b5a0e0113 Solve section mis... |
340 |
/* |
69d177c2f hugetlbfs: handle... |
341 342 343 344 345 346 347 |
* Return the mem_map entry representing the 'offset' subpage within * the maximally aligned gigantic page 'base'. Handle any discontiguity * in the mem_map at MAX_ORDER_NR_PAGES boundaries. */ static inline struct page *mem_map_offset(struct page *base, int offset) { if (unlikely(offset >= MAX_ORDER_NR_PAGES)) |
bc7f84c0e mm/internal.h: us... |
348 |
return nth_page(base, offset); |
69d177c2f hugetlbfs: handle... |
349 350 351 352 |
return base + offset; } /* |
25985edce Fix common misspe... |
353 |
* Iterator over all subpages within the maximally aligned gigantic |
69d177c2f hugetlbfs: handle... |
354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 |
* page 'base'. Handle any discontiguity in the mem_map. */ static inline struct page *mem_map_next(struct page *iter, struct page *base, int offset) { if (unlikely((offset & (MAX_ORDER_NR_PAGES - 1)) == 0)) { unsigned long pfn = page_to_pfn(base) + offset; if (!pfn_valid(pfn)) return NULL; return pfn_to_page(pfn); } return iter + 1; } /* |
b5a0e0113 Solve section mis... |
369 370 371 372 373 374 375 376 377 378 |
* FLATMEM and DISCONTIGMEM configurations use alloc_bootmem_node, * so all functions starting at paging_init should be marked __init * in those cases. SPARSEMEM, however, allows for memory hotplug, * and alloc_bootmem_node is not used. */ #ifdef CONFIG_SPARSEMEM #define __paginginit __meminit #else #define __paginginit __init #endif |
6b74ab97b mm: add a basic d... |
379 380 381 382 383 384 385 386 387 388 389 390 391 392 |
/* Memory initialisation debug and verification */ enum mminit_level { MMINIT_WARNING, MMINIT_VERIFY, MMINIT_TRACE }; #ifdef CONFIG_DEBUG_MEMORY_INIT extern int mminit_loglevel; #define mminit_dprintk(level, prefix, fmt, arg...) \ do { \ if (level < mminit_loglevel) { \ |
fc5199d1a mm/internal.h: do... |
393 |
if (level <= MMINIT_WARNING) \ |
1170532bb mm: convert print... |
394 |
pr_warn("mminit::" prefix " " fmt, ##arg); \ |
fc5199d1a mm/internal.h: do... |
395 396 |
else \ printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \ |
6b74ab97b mm: add a basic d... |
397 398 |
} \ } while (0) |
708614e61 mm: verify the pa... |
399 |
extern void mminit_verify_pageflags_layout(void); |
68ad8df42 mm: print out the... |
400 |
extern void mminit_verify_zonelist(void); |
6b74ab97b mm: add a basic d... |
401 402 403 404 405 406 |
#else static inline void mminit_dprintk(enum mminit_level level, const char *prefix, const char *fmt, ...) { } |
708614e61 mm: verify the pa... |
407 408 409 |
static inline void mminit_verify_pageflags_layout(void) { } |
68ad8df42 mm: print out the... |
410 411 412 |
static inline void mminit_verify_zonelist(void) { } |
6b74ab97b mm: add a basic d... |
413 |
#endif /* CONFIG_DEBUG_MEMORY_INIT */ |
2dbb51c49 mm: make defensiv... |
414 415 416 417 418 419 420 421 422 423 424 |
/* mminit_validate_memmodel_limits is independent of CONFIG_DEBUG_MEMORY_INIT */ #if defined(CONFIG_SPARSEMEM) extern void mminit_validate_memmodel_limits(unsigned long *start_pfn, unsigned long *end_pfn); #else static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn, unsigned long *end_pfn) { } #endif /* CONFIG_SPARSEMEM */ |
a5f5f91da mm: convert zone_... |
425 426 427 428 |
#define NODE_RECLAIM_NOSCAN -2 #define NODE_RECLAIM_FULL -1 #define NODE_RECLAIM_SOME 0 #define NODE_RECLAIM_SUCCESS 1 |
7c116f2b0 HWPOISON: add fs/... |
429 |
|
31d3d3484 HWPOISON: limit h... |
430 |
extern int hwpoison_filter(struct page *p); |
7c116f2b0 HWPOISON: add fs/... |
431 432 |
extern u32 hwpoison_filter_dev_major; extern u32 hwpoison_filter_dev_minor; |
478c5ffc0 HWPOISON: add pag... |
433 434 |
extern u64 hwpoison_filter_flags_mask; extern u64 hwpoison_filter_flags_value; |
4fd466eb4 HWPOISON: add mem... |
435 |
extern u64 hwpoison_filter_memcg; |
1bfe5febe HWPOISON: add an ... |
436 |
extern u32 hwpoison_filter_enable; |
eb36c5873 new helper: vm_mm... |
437 |
|
dc0ef0df7 mm: make mmap_sem... |
438 |
extern unsigned long __must_check vm_mmap_pgoff(struct file *, unsigned long, |
eb36c5873 new helper: vm_mm... |
439 |
unsigned long, unsigned long, |
9fbeb5ab5 mm: make vm_mmap ... |
440 |
unsigned long, unsigned long); |
ca57df79d mm: setup pageblo... |
441 442 |
extern void set_pageblock_order(void); |
02c6de8d7 mm: cma: discard ... |
443 444 |
unsigned long reclaim_clean_pages_from_list(struct zone *zone, struct list_head *page_list); |
d95ea5d18 cma: fix watermar... |
445 446 447 448 449 450 451 452 |
/* The ALLOC_WMARK bits are used as an index to zone->watermark */ #define ALLOC_WMARK_MIN WMARK_MIN #define ALLOC_WMARK_LOW WMARK_LOW #define ALLOC_WMARK_HIGH WMARK_HIGH #define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */ /* Mask to get the watermark bits */ #define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1) |
cd04ae1e2 mm, oom: do not r... |
453 454 455 456 457 458 459 460 461 462 |
/* * Only MMU archs have async oom victim reclaim - aka oom_reaper so we * cannot assume a reduced access to memory reserves is sufficient for * !MMU */ #ifdef CONFIG_MMU #define ALLOC_OOM 0x08 #else #define ALLOC_OOM ALLOC_NO_WATERMARKS #endif |
d95ea5d18 cma: fix watermar... |
463 464 465 466 |
#define ALLOC_HARDER 0x10 /* try to alloc harder */ #define ALLOC_HIGH 0x20 /* __GFP_HIGH set */ #define ALLOC_CPUSET 0x40 /* check for correct cpuset */ #define ALLOC_CMA 0x80 /* allow allocations from CMA areas */ |
72b252aed mm: send one IPI ... |
467 468 |
enum ttu_flags; struct tlbflush_unmap_batch; |
ce612879d mm: move pcp and ... |
469 470 471 472 473 474 |
/* * only for MM internal work items which do not depend on * any allocations or locks which might depend on allocations */ extern struct workqueue_struct *mm_percpu_wq; |
72b252aed mm: send one IPI ... |
475 476 |
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH void try_to_unmap_flush(void); |
d950c9477 mm: defer flush o... |
477 |
void try_to_unmap_flush_dirty(void); |
3ea277194 mm, mprotect: flu... |
478 |
void flush_tlb_batched_pending(struct mm_struct *mm); |
72b252aed mm: send one IPI ... |
479 480 481 482 |
#else static inline void try_to_unmap_flush(void) { } |
d950c9477 mm: defer flush o... |
483 484 485 |
static inline void try_to_unmap_flush_dirty(void) { } |
3ea277194 mm, mprotect: flu... |
486 487 488 |
static inline void flush_tlb_batched_pending(struct mm_struct *mm) { } |
72b252aed mm: send one IPI ... |
489 |
#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ |
edf14cdbf mm, printk: intro... |
490 491 492 493 |
extern const struct trace_print_flags pageflag_names[]; extern const struct trace_print_flags vmaflag_names[]; extern const struct trace_print_flags gfpflag_names[]; |
a6ffdc078 mm: use is_migrat... |
494 495 496 497 498 499 500 501 502 |
static inline bool is_migrate_highatomic(enum migratetype migratetype) { return migratetype == MIGRATE_HIGHATOMIC; } static inline bool is_migrate_highatomic_page(struct page *page) { return get_pageblock_migratetype(page) == MIGRATE_HIGHATOMIC; } |
72675e131 mm, memory_hotplu... |
503 |
void setup_zone_pageset(struct zone *zone); |
db9714188 mm: adjust final ... |
504 |
#endif /* __MM_INTERNAL_H */ |