Blame view
mm/internal.h
19.1 KB
2874c5fd2 treewide: Replace... |
1 |
/* SPDX-License-Identifier: GPL-2.0-or-later */ |
1da177e4c Linux-2.6.12-rc2 |
2 3 4 5 |
/* internal.h: mm/ internal definitions * * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) |
1da177e4c Linux-2.6.12-rc2 |
6 |
*/ |
0f8053a50 [PATCH] mm: make ... |
7 8 |
#ifndef __MM_INTERNAL_H #define __MM_INTERNAL_H |
29f175d12 mm/readahead.c: i... |
9 |
#include <linux/fs.h> |
0f8053a50 [PATCH] mm: make ... |
10 |
#include <linux/mm.h> |
e9b61f198 thp: reintroduce ... |
11 |
#include <linux/pagemap.h> |
edf14cdbf mm, printk: intro... |
12 |
#include <linux/tracepoint-defs.h> |
1da177e4c Linux-2.6.12-rc2 |
13 |
|
dd56b0464 mm: page_alloc: h... |
14 15 16 17 18 19 20 |
/* * The set of flags that only affect watermark checking and reclaim * behaviour. This is used by the MM to obey the caller constraints * about IO, FS and watermark checking while ignoring placement * hints such as HIGHMEM usage. */ #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\ |
dcda9b047 mm, tree wide: re... |
21 |
__GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\ |
e838a45f9 mm, sl[au]b: add ... |
22 23 |
__GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\ __GFP_ATOMIC) |
dd56b0464 mm: page_alloc: h... |
24 25 26 27 28 29 30 31 32 |
/* The GFP flags allowed during early boot */ #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS)) /* Control allocation cpuset and node placement constraints */ #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE) /* Do not use these with a slab allocator */ #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK) |
629060270 mm: add PageWaite... |
33 |
void page_writeback_init(void); |
2b7403035 mm: Change return... |
34 |
vm_fault_t do_swap_page(struct vm_fault *vmf); |
8a966ed74 mm: make swapin r... |
35 |
|
42b777281 mm: remove double... |
36 37 |
void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma, unsigned long floor, unsigned long ceiling); |
9c276cc65 mm: introduce MAD... |
38 |
static inline bool can_madv_lru_vma(struct vm_area_struct *vma) |
235190738 oom-reaper: use m... |
39 40 41 |
{ return !(vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP)); } |
aac453635 mm, oom: introduc... |
42 43 44 45 |
void unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long addr, unsigned long end, struct zap_details *details); |
7b3df3b9a mm/readahead: pas... |
46 47 |
void do_page_cache_ra(struct readahead_control *, unsigned long nr_to_read, unsigned long lookahead_size); |
b1647dc0d mm/readahead: pas... |
48 49 |
void force_page_cache_ra(struct readahead_control *, struct file_ra_state *, unsigned long nr); |
7b3df3b9a mm/readahead: pas... |
50 51 52 53 |
static inline void force_page_cache_readahead(struct address_space *mapping, struct file *file, pgoff_t index, unsigned long nr_to_read) { DEFINE_READAHEAD(ractl, file, mapping, index); |
b1647dc0d mm/readahead: pas... |
54 |
force_page_cache_ra(&ractl, &file->f_ra, nr_to_read); |
7b3df3b9a mm/readahead: pas... |
55 |
} |
29f175d12 mm/readahead.c: i... |
56 |
|
9dfc8ff34 i915: use find_lo... |
57 58 |
struct page *find_get_entry(struct address_space *mapping, pgoff_t index); struct page *find_lock_entry(struct address_space *mapping, pgoff_t index); |
1eb6234e5 mm: swap: make pa... |
59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 |
/** * page_evictable - test whether a page is evictable * @page: the page to test * * Test whether page is evictable--i.e., should be placed on active/inactive * lists vs unevictable list. * * Reasons page might not be evictable: * (1) page's mapping marked unevictable * (2) page is part of an mlocked VMA * */ static inline bool page_evictable(struct page *page) { bool ret; /* Prevent address_space of inode and swap cache from being freed */ rcu_read_lock(); ret = !mapping_unevictable(page_mapping(page)) && !PageMlocked(page); rcu_read_unlock(); return ret; } |
7835e98b2 [PATCH] remove se... |
81 |
/* |
0139aa7b7 mm: rename _count... |
82 |
* Turn a non-refcounted page (->_refcount == 0) into refcounted with |
7835e98b2 [PATCH] remove se... |
83 84 85 86 |
* a count of one. */ static inline void set_page_refcounted(struct page *page) { |
309381fea mm: dump page whe... |
87 |
VM_BUG_ON_PAGE(PageTail(page), page); |
fe896d187 mm: introduce pag... |
88 |
VM_BUG_ON_PAGE(page_ref_count(page), page); |
77a8a7883 [PATCH] mm: set_p... |
89 |
set_page_count(page, 1); |
77a8a7883 [PATCH] mm: set_p... |
90 |
} |
03f6462a3 mm: move highest_... |
91 |
extern unsigned long highest_memmap_pfn; |
894bc3104 Unevictable LRU I... |
92 |
/* |
c73322d09 mm: fix 100% CPU ... |
93 94 95 96 97 98 |
* Maximum number of reclaim retries without progress before the OOM * killer is consider the only way forward. */ #define MAX_RECLAIM_RETRIES 16 /* |
894bc3104 Unevictable LRU I... |
99 100 |
* in mm/vmscan.c: */ |
62695a84e vmscan: move isol... |
101 |
extern int isolate_lru_page(struct page *page); |
894bc3104 Unevictable LRU I... |
102 |
extern void putback_lru_page(struct page *page); |
62695a84e vmscan: move isol... |
103 |
|
894bc3104 Unevictable LRU I... |
104 |
/* |
6219049ae mm: introduce mm_... |
105 106 107 108 109 |
* in mm/rmap.c: */ extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address); /* |
894bc3104 Unevictable LRU I... |
110 111 |
* in mm/page_alloc.c */ |
3c605096d mm/page_alloc: re... |
112 113 |
/* |
1a6d53a10 mm: reduce try_to... |
114 115 116 117 |
* Structure for holding the mostly immutable allocation parameters passed * between functions involved in allocations, including the alloc_pages* * family of functions. * |
97a225e69 mm/page_alloc: in... |
118 |
* nodemask, migratetype and highest_zoneidx are initialized only once in |
1a6d53a10 mm: reduce try_to... |
119 120 |
* __alloc_pages_nodemask() and then never change. * |
97a225e69 mm/page_alloc: in... |
121 |
* zonelist, preferred_zone and highest_zoneidx are set first in |
1a6d53a10 mm: reduce try_to... |
122 |
* __alloc_pages_nodemask() for the fast path, and might be later changed |
68956ccb6 mm: fix a typo in... |
123 |
* in __alloc_pages_slowpath(). All other functions pass the whole structure |
1a6d53a10 mm: reduce try_to... |
124 125 126 127 128 |
* by a const pointer. */ struct alloc_context { struct zonelist *zonelist; nodemask_t *nodemask; |
c33d6c06f mm, page_alloc: a... |
129 |
struct zoneref *preferred_zoneref; |
1a6d53a10 mm: reduce try_to... |
130 |
int migratetype; |
97a225e69 mm/page_alloc: in... |
131 132 133 134 135 136 137 138 139 140 141 142 |
/* * highest_zoneidx represents highest usable zone index of * the allocation request. Due to the nature of the zone, * memory on lower zone than the highest_zoneidx will be * protected by lowmem_reserve[highest_zoneidx]. * * highest_zoneidx is also used by reclaim/compaction to limit * the target zone since higher zone than this index cannot be * usable for this allocation request. */ enum zone_type highest_zoneidx; |
c9ab0c4fb mm, page_alloc: r... |
143 |
bool spread_dirty_pages; |
1a6d53a10 mm: reduce try_to... |
144 145 146 |
}; /* |
3c605096d mm/page_alloc: re... |
147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 |
* Locate the struct page for both the matching buddy in our * pair (buddy1) and the combined O(n+1) page they form (page). * * 1) Any buddy B1 will have an order O twin B2 which satisfies * the following equation: * B2 = B1 ^ (1 << O) * For example, if the starting buddy (buddy2) is #8 its order * 1 buddy is #10: * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10 * * 2) Any buddy B will have an order O+1 parent P which * satisfies the following equation: * P = B & ~(1 << O) * * Assumption: *_mem_map is contiguous at least up to MAX_ORDER */ static inline unsigned long |
76741e776 mm, page_alloc: d... |
164 |
__find_buddy_pfn(unsigned long page_pfn, unsigned int order) |
3c605096d mm/page_alloc: re... |
165 |
{ |
76741e776 mm, page_alloc: d... |
166 |
return page_pfn ^ (1 << order); |
3c605096d mm/page_alloc: re... |
167 |
} |
7cf91a98e mm/compaction: sp... |
168 169 170 171 172 173 174 175 176 177 178 |
extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn, unsigned long end_pfn, struct zone *zone); static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn, unsigned long end_pfn, struct zone *zone) { if (zone->contiguous) return pfn_to_page(start_pfn); return __pageblock_pfn_to_page(start_pfn, end_pfn, zone); } |
3c605096d mm/page_alloc: re... |
179 |
extern int __isolate_free_page(struct page *page, unsigned int order); |
624f58d8f mm: add function ... |
180 181 |
extern void __putback_isolated_page(struct page *page, unsigned int order, int mt); |
7c2ee349c memblock: rename ... |
182 |
extern void memblock_free_pages(struct page *page, unsigned long pfn, |
d70ddd7a5 mm: page_alloc: p... |
183 |
unsigned int order); |
a9cd410a3 mm/page_alloc.c: ... |
184 |
extern void __free_pages_core(struct page *page, unsigned int order); |
d00181b96 mm: use 'unsigned... |
185 |
extern void prep_compound_page(struct page *page, unsigned int order); |
46f24fd85 mm/page_alloc: in... |
186 187 |
extern void post_alloc_hook(struct page *page, unsigned int order, gfp_t gfp_flags); |
42aa83cb6 mm: show message ... |
188 |
extern int user_min_free_kbytes; |
20a0307c0 mm: introduce Pag... |
189 |
|
68265390f mm, pcpu: make zo... |
190 191 |
extern void zone_pcp_update(struct zone *zone); extern void zone_pcp_reset(struct zone *zone); |
ff9543fd3 mm: compaction: e... |
192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 |
#if defined CONFIG_COMPACTION || defined CONFIG_CMA /* * in mm/compaction.c */ /* * compact_control is used to track pages being migrated and the free pages * they are being migrated to during memory compaction. The free_pfn starts * at the end of a zone and migrate_pfn begins at the start. Movable pages * are moved to the end of a zone during a compaction run and the run * completes when free_pfn <= migrate_pfn */ struct compact_control { struct list_head freepages; /* List of free pages to migrate to */ struct list_head migratepages; /* List of pages being migrated */ |
c5fbd937b mm, compaction: s... |
207 208 |
unsigned int nr_freepages; /* Number of isolated free pages */ unsigned int nr_migratepages; /* Number of pages to migrate */ |
ff9543fd3 mm: compaction: e... |
209 210 |
unsigned long free_pfn; /* isolate_freepages search base */ unsigned long migrate_pfn; /* isolate_migratepages search base */ |
70b44595e mm, compaction: u... |
211 |
unsigned long fast_start_pfn; /* a pfn to start linear scan from */ |
c5943b9c5 mm, compaction: r... |
212 213 214 |
struct zone *zone; unsigned long total_migrate_scanned; unsigned long total_free_scanned; |
dbe2d4e4f mm, compaction: r... |
215 216 |
unsigned short fast_search_fail;/* failures to use free list searches */ short search_order; /* order to start a fast search at */ |
f25ba6dcc mm, compaction: r... |
217 218 |
const gfp_t gfp_mask; /* gfp mask of a direct compactor */ int order; /* order a direct compactor needs */ |
d39773a06 mm, compaction: a... |
219 |
int migratetype; /* migratetype of direct compactor */ |
f25ba6dcc mm, compaction: r... |
220 |
const unsigned int alloc_flags; /* alloc flags of a direct compactor */ |
97a225e69 mm/page_alloc: in... |
221 |
const int highest_zoneidx; /* zone index of a direct compactor */ |
e0b9daeb4 mm, compaction: e... |
222 |
enum migrate_mode mode; /* Async or sync migration mode */ |
bb13ffeb9 mm: compaction: c... |
223 |
bool ignore_skip_hint; /* Scan blocks even if marked skip */ |
2583d6713 mm, compaction: s... |
224 |
bool no_set_skip_hint; /* Don't mark blocks for skipping */ |
9f7e33879 mm, compaction: m... |
225 |
bool ignore_block_suitable; /* Scan blocks considered unsuitable */ |
accf62422 mm, kswapd: repla... |
226 |
bool direct_compaction; /* False from kcompactd or /proc/... */ |
facdaa917 mm: proactive com... |
227 |
bool proactive_compaction; /* kcompactd proactive compaction */ |
06ed29989 mm, compaction: m... |
228 |
bool whole_zone; /* Whole zone should/has been scanned */ |
c3486f537 mm, compaction: s... |
229 |
bool contended; /* Signal lock or sched contention */ |
804d3121b mm, compaction: a... |
230 |
bool rescan; /* Rescanning the same pageblock */ |
b06eda091 mm,compaction,cma... |
231 |
bool alloc_contig; /* alloc_contig_range allocation */ |
ff9543fd3 mm: compaction: e... |
232 |
}; |
5e1f0f098 mm, compaction: c... |
233 234 235 236 237 238 239 240 |
/* * Used in direct compaction when a page should be taken from the freelists * immediately when one is created during the free path. */ struct capture_control { struct compact_control *cc; struct page *page; }; |
ff9543fd3 mm: compaction: e... |
241 |
unsigned long |
bb13ffeb9 mm: compaction: c... |
242 243 |
isolate_freepages_range(struct compact_control *cc, unsigned long start_pfn, unsigned long end_pfn); |
ff9543fd3 mm: compaction: e... |
244 |
unsigned long |
edc2ca612 mm, compaction: m... |
245 246 |
isolate_migratepages_range(struct compact_control *cc, unsigned long low_pfn, unsigned long end_pfn); |
2149cdaef mm/compaction: en... |
247 248 |
int find_suitable_fallback(struct free_area *area, unsigned int order, int migratetype, bool only_stealable, bool *can_steal); |
ff9543fd3 mm: compaction: e... |
249 250 |
#endif |
0f8053a50 [PATCH] mm: make ... |
251 |
|
48f13bf3e Breakout page_ord... |
252 |
/* |
6c14466cc mm: improve docum... |
253 254 255 256 |
* This function returns the order of a free page in the buddy system. In * general, page_zone(page)->lock must be held by the caller to prevent the * page from being allocated in parallel and returning garbage as the order. * If a caller does not hold page_zone(page)->lock, it must guarantee that the |
99c0fd5e5 mm, compaction: s... |
257 |
* page cannot be allocated or merged in parallel. Alternatively, it must |
ab130f910 mm: rename page_o... |
258 |
* handle invalid values gracefully, and use buddy_order_unsafe() below. |
48f13bf3e Breakout page_ord... |
259 |
*/ |
ab130f910 mm: rename page_o... |
260 |
static inline unsigned int buddy_order(struct page *page) |
48f13bf3e Breakout page_ord... |
261 |
{ |
572438f9b mm: fix is_mem_se... |
262 |
/* PageBuddy() must be checked by the caller */ |
48f13bf3e Breakout page_ord... |
263 264 |
return page_private(page); } |
b5a0e0113 Solve section mis... |
265 |
|
99c0fd5e5 mm, compaction: s... |
266 |
/* |
ab130f910 mm: rename page_o... |
267 |
* Like buddy_order(), but for callers who cannot afford to hold the zone lock. |
99c0fd5e5 mm, compaction: s... |
268 269 270 |
* PageBuddy() should be checked first by the caller to minimize race window, * and invalid values must be handled gracefully. * |
4db0c3c29 mm: remove rest o... |
271 |
* READ_ONCE is used so that if the caller assigns the result into a local |
99c0fd5e5 mm, compaction: s... |
272 273 274 275 276 |
* variable and e.g. tests it for valid range before using, the compiler cannot * decide to remove the variable and inline the page_private(page) multiple * times, potentially observing different values in the tests and the actual * use of the result. */ |
ab130f910 mm: rename page_o... |
277 |
#define buddy_order_unsafe(page) READ_ONCE(page_private(page)) |
99c0fd5e5 mm, compaction: s... |
278 |
|
4bbd4c776 mm: move get_user... |
279 280 281 282 |
static inline bool is_cow_mapping(vm_flags_t flags) { return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; } |
30bdbb780 mm: polish virtua... |
283 284 285 286 287 288 289 |
/* * These three helpers classifies VMAs for virtual memory accounting. */ /* * Executable code area - executable, not writable, not stack */ |
d977d56ce mm: warn about Vm... |
290 291 |
static inline bool is_exec_mapping(vm_flags_t flags) { |
30bdbb780 mm: polish virtua... |
292 |
return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC; |
d977d56ce mm: warn about Vm... |
293 |
} |
30bdbb780 mm: polish virtua... |
294 295 296 297 298 299 |
/* * Stack area - atomatically grows in one direction * * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous: * do_mmap() forbids all other combinations. */ |
d977d56ce mm: warn about Vm... |
300 301 |
static inline bool is_stack_mapping(vm_flags_t flags) { |
30bdbb780 mm: polish virtua... |
302 |
return (flags & VM_STACK) == VM_STACK; |
d977d56ce mm: warn about Vm... |
303 |
} |
30bdbb780 mm: polish virtua... |
304 305 306 |
/* * Data area - private, writable, not stack */ |
d977d56ce mm: warn about Vm... |
307 308 |
static inline bool is_data_mapping(vm_flags_t flags) { |
30bdbb780 mm: polish virtua... |
309 |
return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE; |
d977d56ce mm: warn about Vm... |
310 |
} |
6038def0d mm: nommu: sort m... |
311 312 |
/* mm/util.c */ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, |
aba6dfb75 mm/mmap.c: rb_par... |
313 |
struct vm_area_struct *prev); |
1b9fc5b24 mm/mmap.c: extrac... |
314 |
void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma); |
6038def0d mm: nommu: sort m... |
315 |
|
af8e3354b mm: CONFIG_MMU fo... |
316 |
#ifdef CONFIG_MMU |
fc05f5662 mm: rename __mloc... |
317 |
extern long populate_vma_page_range(struct vm_area_struct *vma, |
cea10a19b mm: directly use ... |
318 |
unsigned long start, unsigned long end, int *nonblocking); |
af8e3354b mm: CONFIG_MMU fo... |
319 320 321 322 323 324 |
extern void munlock_vma_pages_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); static inline void munlock_vma_pages_all(struct vm_area_struct *vma) { munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end); } |
b291f0003 mlock: mlocked pa... |
325 |
/* |
c1e8d7c6a mmap locking API:... |
326 |
* must be called with vma's mmap_lock held for read or write, and page locked. |
b291f0003 mlock: mlocked pa... |
327 328 |
*/ extern void mlock_vma_page(struct page *page); |
ff6a6da60 mm: accelerate mu... |
329 |
extern unsigned int munlock_vma_page(struct page *page); |
b291f0003 mlock: mlocked pa... |
330 331 332 333 334 335 336 337 338 339 |
/* * Clear the page's PageMlocked(). This can be useful in a situation where * we want to unconditionally remove a page from the pagecache -- e.g., * on truncation or freeing. * * It is legal to call this function for any page, mlocked or not. * If called for a page that is still mapped by mlocked vmas, all we do * is revert to lazy LRU behaviour -- semantics are not broken. */ |
e6c509f85 mm: use clear_pag... |
340 |
extern void clear_page_mlock(struct page *page); |
b291f0003 mlock: mlocked pa... |
341 342 |
/* |
51afb12ba mm: page migratio... |
343 344 345 |
* mlock_migrate_page - called only from migrate_misplaced_transhuge_page() * (because that does not go through the full procedure of migration ptes): * to migrate the Mlocked page flag; update statistics. |
b291f0003 mlock: mlocked pa... |
346 347 348 |
*/ static inline void mlock_migrate_page(struct page *newpage, struct page *page) { |
5344b7e64 vmstat: mlocked p... |
349 |
if (TestClearPageMlocked(page)) { |
6c357848b mm: replace hpage... |
350 |
int nr_pages = thp_nr_pages(page); |
5344b7e64 vmstat: mlocked p... |
351 |
|
51afb12ba mm: page migratio... |
352 |
/* Holding pmd lock, no change in irq context: __mod is safe */ |
b32967ff1 mm: numa: Add THP... |
353 |
__mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); |
b291f0003 mlock: mlocked pa... |
354 |
SetPageMlocked(newpage); |
b32967ff1 mm: numa: Add THP... |
355 |
__mod_zone_page_state(page_zone(newpage), NR_MLOCK, nr_pages); |
5344b7e64 vmstat: mlocked p... |
356 |
} |
b291f0003 mlock: mlocked pa... |
357 |
} |
f55e1014f Revert "mm, thp: ... |
358 |
extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma); |
b32967ff1 mm: numa: Add THP... |
359 |
|
e9b61f198 thp: reintroduce ... |
360 361 362 363 364 365 366 367 368 369 370 371 372 |
/* * At what user virtual address is page expected in @vma? */ static inline unsigned long __vma_address(struct page *page, struct vm_area_struct *vma) { pgoff_t pgoff = page_to_pgoff(page); return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); } static inline unsigned long vma_address(struct page *page, struct vm_area_struct *vma) { |
a8fa41ad2 mm, rmap: check a... |
373 374 375 |
unsigned long start, end; start = __vma_address(page, vma); |
af3bbc12d mm: add thp_size |
376 |
end = start + thp_size(page) - PAGE_SIZE; |
e9b61f198 thp: reintroduce ... |
377 378 |
/* page should be within @vma mapping range */ |
a8fa41ad2 mm, rmap: check a... |
379 |
VM_BUG_ON_VMA(end < vma->vm_start || start >= vma->vm_end, vma); |
e9b61f198 thp: reintroduce ... |
380 |
|
a8fa41ad2 mm, rmap: check a... |
381 |
return max(start, vma->vm_start); |
e9b61f198 thp: reintroduce ... |
382 |
} |
89b15332a mm: drop mmap_sem... |
383 384 385 386 387 388 389 390 391 392 |
static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf, struct file *fpin) { int flags = vmf->flags; if (fpin) return fpin; /* * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or |
c1e8d7c6a mmap locking API:... |
393 |
* anything, so we only pin the file and drop the mmap_lock if only |
4064b9827 mm: allow VM_FAUL... |
394 |
* FAULT_FLAG_ALLOW_RETRY is set, while this is the first attempt. |
89b15332a mm: drop mmap_sem... |
395 |
*/ |
4064b9827 mm: allow VM_FAUL... |
396 397 |
if (fault_flag_allow_retry_first(flags) && !(flags & FAULT_FLAG_RETRY_NOWAIT)) { |
89b15332a mm: drop mmap_sem... |
398 |
fpin = get_file(vmf->vma->vm_file); |
d8ed45c5d mmap locking API:... |
399 |
mmap_read_unlock(vmf->vma->vm_mm); |
89b15332a mm: drop mmap_sem... |
400 401 402 |
} return fpin; } |
af8e3354b mm: CONFIG_MMU fo... |
403 |
#else /* !CONFIG_MMU */ |
b291f0003 mlock: mlocked pa... |
404 405 406 |
static inline void clear_page_mlock(struct page *page) { } static inline void mlock_vma_page(struct page *page) { } static inline void mlock_migrate_page(struct page *new, struct page *old) { } |
af8e3354b mm: CONFIG_MMU fo... |
407 |
#endif /* !CONFIG_MMU */ |
894bc3104 Unevictable LRU I... |
408 |
|
b5a0e0113 Solve section mis... |
409 |
/* |
69d177c2f hugetlbfs: handle... |
410 411 412 413 414 415 416 |
* Return the mem_map entry representing the 'offset' subpage within * the maximally aligned gigantic page 'base'. Handle any discontiguity * in the mem_map at MAX_ORDER_NR_PAGES boundaries. */ static inline struct page *mem_map_offset(struct page *base, int offset) { if (unlikely(offset >= MAX_ORDER_NR_PAGES)) |
bc7f84c0e mm/internal.h: us... |
417 |
return nth_page(base, offset); |
69d177c2f hugetlbfs: handle... |
418 419 420 421 |
return base + offset; } /* |
25985edce Fix common misspe... |
422 |
* Iterator over all subpages within the maximally aligned gigantic |
69d177c2f hugetlbfs: handle... |
423 424 425 426 427 428 429 430 431 432 433 434 435 |
* page 'base'. Handle any discontiguity in the mem_map. */ static inline struct page *mem_map_next(struct page *iter, struct page *base, int offset) { if (unlikely((offset & (MAX_ORDER_NR_PAGES - 1)) == 0)) { unsigned long pfn = page_to_pfn(base) + offset; if (!pfn_valid(pfn)) return NULL; return pfn_to_page(pfn); } return iter + 1; } |
6b74ab97b mm: add a basic d... |
436 437 438 439 440 441 442 443 444 445 446 447 448 449 |
/* Memory initialisation debug and verification */ enum mminit_level { MMINIT_WARNING, MMINIT_VERIFY, MMINIT_TRACE }; #ifdef CONFIG_DEBUG_MEMORY_INIT extern int mminit_loglevel; #define mminit_dprintk(level, prefix, fmt, arg...) \ do { \ if (level < mminit_loglevel) { \ |
fc5199d1a mm/internal.h: do... |
450 |
if (level <= MMINIT_WARNING) \ |
1170532bb mm: convert print... |
451 |
pr_warn("mminit::" prefix " " fmt, ##arg); \ |
fc5199d1a mm/internal.h: do... |
452 453 |
else \ printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \ |
6b74ab97b mm: add a basic d... |
454 455 |
} \ } while (0) |
708614e61 mm: verify the pa... |
456 |
extern void mminit_verify_pageflags_layout(void); |
68ad8df42 mm: print out the... |
457 |
extern void mminit_verify_zonelist(void); |
6b74ab97b mm: add a basic d... |
458 459 460 461 462 463 |
#else static inline void mminit_dprintk(enum mminit_level level, const char *prefix, const char *fmt, ...) { } |
708614e61 mm: verify the pa... |
464 465 466 |
static inline void mminit_verify_pageflags_layout(void) { } |
68ad8df42 mm: print out the... |
467 468 469 |
static inline void mminit_verify_zonelist(void) { } |
6b74ab97b mm: add a basic d... |
470 |
#endif /* CONFIG_DEBUG_MEMORY_INIT */ |
2dbb51c49 mm: make defensiv... |
471 472 473 474 475 476 477 478 479 480 481 |
/* mminit_validate_memmodel_limits is independent of CONFIG_DEBUG_MEMORY_INIT */ #if defined(CONFIG_SPARSEMEM) extern void mminit_validate_memmodel_limits(unsigned long *start_pfn, unsigned long *end_pfn); #else static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn, unsigned long *end_pfn) { } #endif /* CONFIG_SPARSEMEM */ |
a5f5f91da mm: convert zone_... |
482 483 484 485 |
#define NODE_RECLAIM_NOSCAN -2 #define NODE_RECLAIM_FULL -1 #define NODE_RECLAIM_SOME 0 #define NODE_RECLAIM_SUCCESS 1 |
7c116f2b0 HWPOISON: add fs/... |
486 |
|
8b09549c2 vmscan: return NO... |
487 488 489 490 491 492 493 494 495 |
#ifdef CONFIG_NUMA extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int); #else static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask, unsigned int order) { return NODE_RECLAIM_NOSCAN; } #endif |
31d3d3484 HWPOISON: limit h... |
496 |
extern int hwpoison_filter(struct page *p); |
7c116f2b0 HWPOISON: add fs/... |
497 498 |
extern u32 hwpoison_filter_dev_major; extern u32 hwpoison_filter_dev_minor; |
478c5ffc0 HWPOISON: add pag... |
499 500 |
extern u64 hwpoison_filter_flags_mask; extern u64 hwpoison_filter_flags_value; |
4fd466eb4 HWPOISON: add mem... |
501 |
extern u64 hwpoison_filter_memcg; |
1bfe5febe HWPOISON: add an ... |
502 |
extern u32 hwpoison_filter_enable; |
eb36c5873 new helper: vm_mm... |
503 |
|
dc0ef0df7 mm: make mmap_sem... |
504 |
extern unsigned long __must_check vm_mmap_pgoff(struct file *, unsigned long, |
eb36c5873 new helper: vm_mm... |
505 |
unsigned long, unsigned long, |
9fbeb5ab5 mm: make vm_mmap ... |
506 |
unsigned long, unsigned long); |
ca57df79d mm: setup pageblo... |
507 508 |
extern void set_pageblock_order(void); |
730ec8c01 mm/vmscan.c: chan... |
509 |
unsigned int reclaim_clean_pages_from_list(struct zone *zone, |
02c6de8d7 mm: cma: discard ... |
510 |
struct list_head *page_list); |
d95ea5d18 cma: fix watermar... |
511 512 513 514 515 516 517 518 |
/* The ALLOC_WMARK bits are used as an index to zone->watermark */ #define ALLOC_WMARK_MIN WMARK_MIN #define ALLOC_WMARK_LOW WMARK_LOW #define ALLOC_WMARK_HIGH WMARK_HIGH #define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */ /* Mask to get the watermark bits */ #define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1) |
cd04ae1e2 mm, oom: do not r... |
519 520 521 522 523 524 525 526 527 528 |
/* * Only MMU archs have async oom victim reclaim - aka oom_reaper so we * cannot assume a reduced access to memory reserves is sufficient for * !MMU */ #ifdef CONFIG_MMU #define ALLOC_OOM 0x08 #else #define ALLOC_OOM ALLOC_NO_WATERMARKS #endif |
6bb154504 mm, page_alloc: s... |
529 530 531 532 533 534 535 536 537 |
#define ALLOC_HARDER 0x10 /* try to alloc harder */ #define ALLOC_HIGH 0x20 /* __GFP_HIGH set */ #define ALLOC_CPUSET 0x40 /* check for correct cpuset */ #define ALLOC_CMA 0x80 /* allow allocations from CMA areas */ #ifdef CONFIG_ZONE_DMA32 #define ALLOC_NOFRAGMENT 0x100 /* avoid mixing pageblock types */ #else #define ALLOC_NOFRAGMENT 0x0 #endif |
736838e96 mm, pagealloc: mi... |
538 |
#define ALLOC_KSWAPD 0x800 /* allow waking of kswapd, __GFP_KSWAPD_RECLAIM set */ |
d95ea5d18 cma: fix watermar... |
539 |
|
72b252aed mm: send one IPI ... |
540 541 |
enum ttu_flags; struct tlbflush_unmap_batch; |
ce612879d mm: move pcp and ... |
542 543 544 545 546 547 |
/* * only for MM internal work items which do not depend on * any allocations or locks which might depend on allocations */ extern struct workqueue_struct *mm_percpu_wq; |
72b252aed mm: send one IPI ... |
548 549 |
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH void try_to_unmap_flush(void); |
d950c9477 mm: defer flush o... |
550 |
void try_to_unmap_flush_dirty(void); |
3ea277194 mm, mprotect: flu... |
551 |
void flush_tlb_batched_pending(struct mm_struct *mm); |
72b252aed mm: send one IPI ... |
552 553 554 555 |
#else static inline void try_to_unmap_flush(void) { } |
d950c9477 mm: defer flush o... |
556 557 558 |
static inline void try_to_unmap_flush_dirty(void) { } |
3ea277194 mm, mprotect: flu... |
559 560 561 |
static inline void flush_tlb_batched_pending(struct mm_struct *mm) { } |
72b252aed mm: send one IPI ... |
562 |
#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ |
edf14cdbf mm, printk: intro... |
563 564 565 566 |
extern const struct trace_print_flags pageflag_names[]; extern const struct trace_print_flags vmaflag_names[]; extern const struct trace_print_flags gfpflag_names[]; |
a6ffdc078 mm: use is_migrat... |
567 568 569 570 571 572 573 574 575 |
static inline bool is_migrate_highatomic(enum migratetype migratetype) { return migratetype == MIGRATE_HIGHATOMIC; } static inline bool is_migrate_highatomic_page(struct page *page) { return get_pageblock_migratetype(page) == MIGRATE_HIGHATOMIC; } |
72675e131 mm, memory_hotplu... |
576 |
void setup_zone_pageset(struct zone *zone); |
19fc7bed2 mm/migrate: intro... |
577 578 579 580 581 582 |
struct migration_target_control { int nid; /* preferred node id */ nodemask_t *nmask; gfp_t gfp_mask; }; |
db9714188 mm: adjust final ... |
583 |
#endif /* __MM_INTERNAL_H */ |