Blame view
mm/internal.h
11.1 KB
1da177e4c
|
1 2 3 4 5 6 7 8 9 10 |
/* internal.h: mm/ internal definitions * * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ |
0f8053a50
|
11 12 13 14 |
#ifndef __MM_INTERNAL_H #define __MM_INTERNAL_H #include <linux/mm.h> |
1da177e4c
|
15 |
|
42b777281
|
16 17 |
void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma, unsigned long floor, unsigned long ceiling); |
7835e98b2
|
18 |
static inline void set_page_count(struct page *page, int v) |
77a8a7883
|
19 |
{ |
7835e98b2
|
20 21 22 23 24 25 26 27 28 |
atomic_set(&page->_count, v); } /* * Turn a non-refcounted page (->_count == 0) into refcounted with * a count of one. */ static inline void set_page_refcounted(struct page *page) { |
ae1276b93
|
29 |
VM_BUG_ON(PageTail(page)); |
725d704ec
|
30 |
VM_BUG_ON(atomic_read(&page->_count)); |
77a8a7883
|
31 |
set_page_count(page, 1); |
77a8a7883
|
32 |
} |
0f8053a50
|
33 34 35 36 |
static inline void __put_page(struct page *page) { atomic_dec(&page->_count); } |
70b50f94f
|
37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 |
static inline void __get_page_tail_foll(struct page *page, bool get_page_head) { /* * If we're getting a tail page, the elevated page->_count is * required only in the head page and we will elevate the head * page->_count and tail page->_mapcount. * * We elevate page_tail->_mapcount for tail pages to force * page_tail->_count to be zero at all times to avoid getting * false positives from get_page_unless_zero() with * speculative page access (like in * page_cache_get_speculative()) on tail pages. */ VM_BUG_ON(atomic_read(&page->first_page->_count) <= 0); VM_BUG_ON(atomic_read(&page->_count) != 0); VM_BUG_ON(page_mapcount(page) < 0); if (get_page_head) atomic_inc(&page->first_page->_count); atomic_inc(&page->_mapcount); } /* * This is meant to be called as the FOLL_GET operation of * follow_page() and it must be called while holding the proper PT * lock while the pte (or pmd_trans_huge) is still mapping the page. */ static inline void get_page_foll(struct page *page) { if (unlikely(PageTail(page))) /* * This is safe only because * __split_huge_page_refcount() can't run under * get_page_foll() because we hold the proper PT lock. */ __get_page_tail_foll(page, true); else { /* * Getting a normal page or the head of a compound page * requires to already have an elevated page->_count. */ VM_BUG_ON(atomic_read(&page->_count) <= 0); atomic_inc(&page->_count); } } |
03f6462a3
|
82 |
extern unsigned long highest_memmap_pfn; |
894bc3104
|
83 84 85 |
/* * in mm/vmscan.c: */ |
62695a84e
|
86 |
extern int isolate_lru_page(struct page *page); |
894bc3104
|
87 |
extern void putback_lru_page(struct page *page); |
62695a84e
|
88 |
|
894bc3104
|
89 |
/* |
6219049ae
|
90 91 92 93 94 |
* in mm/rmap.c: */ extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address); /* |
894bc3104
|
95 96 |
* in mm/page_alloc.c */ |
0c0a4a517
|
97 |
extern void __free_pages_bootmem(struct page *page, unsigned int order); |
20a0307c0
|
98 |
extern void prep_compound_page(struct page *page, unsigned long order); |
8d22ba1b7
|
99 100 101 |
#ifdef CONFIG_MEMORY_FAILURE extern bool is_free_buddy_page(struct page *page); #endif |
20a0307c0
|
102 |
|
ff9543fd3
|
103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 |
#if defined CONFIG_COMPACTION || defined CONFIG_CMA /* * in mm/compaction.c */ /* * compact_control is used to track pages being migrated and the free pages * they are being migrated to during memory compaction. The free_pfn starts * at the end of a zone and migrate_pfn begins at the start. Movable pages * are moved to the end of a zone during a compaction run and the run * completes when free_pfn <= migrate_pfn */ struct compact_control { struct list_head freepages; /* List of free pages to migrate to */ struct list_head migratepages; /* List of pages being migrated */ unsigned long nr_freepages; /* Number of isolated free pages */ unsigned long nr_migratepages; /* Number of pages to migrate */ unsigned long free_pfn; /* isolate_freepages search base */ unsigned long migrate_pfn; /* isolate_migratepages search base */ |
68e3e9262
|
122 |
bool sync; /* Synchronous migration */ |
bb13ffeb9
|
123 |
bool ignore_skip_hint; /* Scan blocks even if marked skip */ |
c89511ab2
|
124 125 126 127 |
bool finished_update_free; /* True when the zone cached pfns are * no longer being updated */ bool finished_update_migrate; |
ff9543fd3
|
128 129 130 131 |
int order; /* order a direct compactor needs */ int migratetype; /* MOVABLE, RECLAIMABLE etc */ struct zone *zone; |
e64c5237c
|
132 |
bool contended; /* True if a lock was contended */ |
ff9543fd3
|
133 134 135 |
}; unsigned long |
bb13ffeb9
|
136 137 |
isolate_freepages_range(struct compact_control *cc, unsigned long start_pfn, unsigned long end_pfn); |
ff9543fd3
|
138 139 |
unsigned long isolate_migratepages_range(struct zone *zone, struct compact_control *cc, |
e46a28790
|
140 |
unsigned long low_pfn, unsigned long end_pfn, bool unevictable); |
ff9543fd3
|
141 142 |
#endif |
0f8053a50
|
143 |
|
48f13bf3e
|
144 145 146 147 148 149 150 |
/* * function for dealing with page's order in buddy system. * zone->lock is already acquired when we use these. * So, we don't need atomic page->flags operations here. */ static inline unsigned long page_order(struct page *page) { |
572438f9b
|
151 |
/* PageBuddy() must be checked by the caller */ |
48f13bf3e
|
152 153 |
return page_private(page); } |
b5a0e0113
|
154 |
|
6038def0d
|
155 156 157 |
/* mm/util.c */ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, struct rb_node *rb_parent); |
af8e3354b
|
158 |
#ifdef CONFIG_MMU |
cea10a19b
|
159 160 |
extern long __mlock_vma_pages_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, int *nonblocking); |
af8e3354b
|
161 162 163 164 165 166 |
extern void munlock_vma_pages_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); static inline void munlock_vma_pages_all(struct vm_area_struct *vma) { munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end); } |
b291f0003
|
167 |
/* |
39b5f29ac
|
168 169 |
* Called only in fault path, to determine if a new page is being * mapped into a LOCKED vma. If it is, mark page as mlocked. |
b291f0003
|
170 |
*/ |
096a7cf44
|
171 172 |
static inline int mlocked_vma_newpage(struct vm_area_struct *vma, struct page *page) |
b291f0003
|
173 174 175 176 177 |
{ VM_BUG_ON(PageLRU(page)); if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) return 0; |
5344b7e64
|
178 |
if (!TestSetPageMlocked(page)) { |
8449d21fb
|
179 180 |
mod_zone_page_state(page_zone(page), NR_MLOCK, hpage_nr_pages(page)); |
5344b7e64
|
181 182 |
count_vm_event(UNEVICTABLE_PGMLOCKED); } |
b291f0003
|
183 184 185 186 |
return 1; } /* |
73848b468
|
187 |
* must be called with vma's mmap_sem held for read or write, and page locked. |
b291f0003
|
188 189 |
*/ extern void mlock_vma_page(struct page *page); |
ff6a6da60
|
190 |
extern unsigned int munlock_vma_page(struct page *page); |
b291f0003
|
191 192 193 194 195 196 197 198 199 200 |
/* * Clear the page's PageMlocked(). This can be useful in a situation where * we want to unconditionally remove a page from the pagecache -- e.g., * on truncation or freeing. * * It is legal to call this function for any page, mlocked or not. * If called for a page that is still mapped by mlocked vmas, all we do * is revert to lazy LRU behaviour -- semantics are not broken. */ |
e6c509f85
|
201 |
extern void clear_page_mlock(struct page *page); |
b291f0003
|
202 203 204 |
/* * mlock_migrate_page - called only from migrate_page_copy() to |
5344b7e64
|
205 |
* migrate the Mlocked page flag; update statistics. |
b291f0003
|
206 207 208 |
*/ static inline void mlock_migrate_page(struct page *newpage, struct page *page) { |
5344b7e64
|
209 210 |
if (TestClearPageMlocked(page)) { unsigned long flags; |
b32967ff1
|
211 |
int nr_pages = hpage_nr_pages(page); |
5344b7e64
|
212 213 |
local_irq_save(flags); |
b32967ff1
|
214 |
__mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); |
b291f0003
|
215 |
SetPageMlocked(newpage); |
b32967ff1
|
216 |
__mod_zone_page_state(page_zone(newpage), NR_MLOCK, nr_pages); |
5344b7e64
|
217 218 |
local_irq_restore(flags); } |
b291f0003
|
219 |
} |
b32967ff1
|
220 |
extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma); |
71e3aac07
|
221 222 223 224 |
#ifdef CONFIG_TRANSPARENT_HUGEPAGE extern unsigned long vma_address(struct page *page, struct vm_area_struct *vma); #endif |
af8e3354b
|
225 |
#else /* !CONFIG_MMU */ |
096a7cf44
|
226 |
static inline int mlocked_vma_newpage(struct vm_area_struct *v, struct page *p) |
b291f0003
|
227 228 229 230 231 232 |
{ return 0; } static inline void clear_page_mlock(struct page *page) { } static inline void mlock_vma_page(struct page *page) { } static inline void mlock_migrate_page(struct page *new, struct page *old) { } |
af8e3354b
|
233 |
#endif /* !CONFIG_MMU */ |
894bc3104
|
234 |
|
b5a0e0113
|
235 |
/* |
69d177c2f
|
236 237 238 239 240 241 242 243 244 245 246 247 |
* Return the mem_map entry representing the 'offset' subpage within * the maximally aligned gigantic page 'base'. Handle any discontiguity * in the mem_map at MAX_ORDER_NR_PAGES boundaries. */ static inline struct page *mem_map_offset(struct page *base, int offset) { if (unlikely(offset >= MAX_ORDER_NR_PAGES)) return pfn_to_page(page_to_pfn(base) + offset); return base + offset; } /* |
25985edce
|
248 |
* Iterator over all subpages within the maximally aligned gigantic |
69d177c2f
|
249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 |
* page 'base'. Handle any discontiguity in the mem_map. */ static inline struct page *mem_map_next(struct page *iter, struct page *base, int offset) { if (unlikely((offset & (MAX_ORDER_NR_PAGES - 1)) == 0)) { unsigned long pfn = page_to_pfn(base) + offset; if (!pfn_valid(pfn)) return NULL; return pfn_to_page(pfn); } return iter + 1; } /* |
b5a0e0113
|
264 265 266 267 268 269 270 271 272 273 |
* FLATMEM and DISCONTIGMEM configurations use alloc_bootmem_node, * so all functions starting at paging_init should be marked __init * in those cases. SPARSEMEM, however, allows for memory hotplug, * and alloc_bootmem_node is not used. */ #ifdef CONFIG_SPARSEMEM #define __paginginit __meminit #else #define __paginginit __init #endif |
6b74ab97b
|
274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 |
/* Memory initialisation debug and verification */ enum mminit_level { MMINIT_WARNING, MMINIT_VERIFY, MMINIT_TRACE }; #ifdef CONFIG_DEBUG_MEMORY_INIT extern int mminit_loglevel; #define mminit_dprintk(level, prefix, fmt, arg...) \ do { \ if (level < mminit_loglevel) { \ printk(level <= MMINIT_WARNING ? KERN_WARNING : KERN_DEBUG); \ printk(KERN_CONT "mminit::" prefix " " fmt, ##arg); \ } \ } while (0) |
708614e61
|
292 293 294 |
extern void mminit_verify_pageflags_layout(void); extern void mminit_verify_page_links(struct page *page, enum zone_type zone, unsigned long nid, unsigned long pfn); |
68ad8df42
|
295 |
extern void mminit_verify_zonelist(void); |
708614e61
|
296 |
|
6b74ab97b
|
297 298 299 300 301 302 |
#else static inline void mminit_dprintk(enum mminit_level level, const char *prefix, const char *fmt, ...) { } |
708614e61
|
303 304 305 306 307 308 309 310 |
static inline void mminit_verify_pageflags_layout(void) { } static inline void mminit_verify_page_links(struct page *page, enum zone_type zone, unsigned long nid, unsigned long pfn) { } |
68ad8df42
|
311 312 313 314 |
static inline void mminit_verify_zonelist(void) { } |
6b74ab97b
|
315 |
#endif /* CONFIG_DEBUG_MEMORY_INIT */ |
2dbb51c49
|
316 317 318 319 320 321 322 323 324 325 326 |
/* mminit_validate_memmodel_limits is independent of CONFIG_DEBUG_MEMORY_INIT */ #if defined(CONFIG_SPARSEMEM) extern void mminit_validate_memmodel_limits(unsigned long *start_pfn, unsigned long *end_pfn); #else static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn, unsigned long *end_pfn) { } #endif /* CONFIG_SPARSEMEM */ |
fa5e084e4
|
327 328 329 330 |
#define ZONE_RECLAIM_NOSCAN -2 #define ZONE_RECLAIM_FULL -1 #define ZONE_RECLAIM_SOME 0 #define ZONE_RECLAIM_SUCCESS 1 |
7c116f2b0
|
331 |
|
31d3d3484
|
332 |
extern int hwpoison_filter(struct page *p); |
7c116f2b0
|
333 334 |
extern u32 hwpoison_filter_dev_major; extern u32 hwpoison_filter_dev_minor; |
478c5ffc0
|
335 336 |
extern u64 hwpoison_filter_flags_mask; extern u64 hwpoison_filter_flags_value; |
4fd466eb4
|
337 |
extern u64 hwpoison_filter_memcg; |
1bfe5febe
|
338 |
extern u32 hwpoison_filter_enable; |
eb36c5873
|
339 340 341 342 |
extern unsigned long vm_mmap_pgoff(struct file *, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long); |
ca57df79d
|
343 344 |
extern void set_pageblock_order(void); |
02c6de8d7
|
345 346 |
unsigned long reclaim_clean_pages_from_list(struct zone *zone, struct list_head *page_list); |
d95ea5d18
|
347 348 349 350 351 352 353 354 355 356 357 358 359 |
/* The ALLOC_WMARK bits are used as an index to zone->watermark */ #define ALLOC_WMARK_MIN WMARK_MIN #define ALLOC_WMARK_LOW WMARK_LOW #define ALLOC_WMARK_HIGH WMARK_HIGH #define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */ /* Mask to get the watermark bits */ #define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1) #define ALLOC_HARDER 0x10 /* try to alloc harder */ #define ALLOC_HIGH 0x20 /* __GFP_HIGH set */ #define ALLOC_CPUSET 0x40 /* check for correct cpuset */ #define ALLOC_CMA 0x80 /* allow allocations from CMA areas */ |
db9714188
|
360 |
#endif /* __MM_INTERNAL_H */ |