Blame view

mm/internal.h 11.8 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
2
3
4
5
6
7
8
9
10
  /* internal.h: mm/ internal definitions
   *
   * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
   * Written by David Howells (dhowells@redhat.com)
   *
   * This program is free software; you can redistribute it and/or
   * modify it under the terms of the GNU General Public License
   * as published by the Free Software Foundation; either version
   * 2 of the License, or (at your option) any later version.
   */
0f8053a50   Nick Piggin   [PATCH] mm: make ...
11
12
  #ifndef __MM_INTERNAL_H
  #define __MM_INTERNAL_H
29f175d12   Fabian Frederick   mm/readahead.c: i...
13
  #include <linux/fs.h>
0f8053a50   Nick Piggin   [PATCH] mm: make ...
14
  #include <linux/mm.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
15

42b777281   Jan Beulich   mm: remove double...
16
17
  void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
  		unsigned long floor, unsigned long ceiling);
7835e98b2   Nick Piggin   [PATCH] remove se...
18
  static inline void set_page_count(struct page *page, int v)
77a8a7883   Nick Piggin   [PATCH] mm: set_p...
19
  {
7835e98b2   Nick Piggin   [PATCH] remove se...
20
21
  	atomic_set(&page->_count, v);
  }
29f175d12   Fabian Frederick   mm/readahead.c: i...
22
23
24
25
26
27
28
29
30
31
32
33
34
  extern int __do_page_cache_readahead(struct address_space *mapping,
  		struct file *filp, pgoff_t offset, unsigned long nr_to_read,
  		unsigned long lookahead_size);
  
  /*
   * Submit IO for the read-ahead request in file_ra_state.
   */
  static inline unsigned long ra_submit(struct file_ra_state *ra,
  		struct address_space *mapping, struct file *filp)
  {
  	return __do_page_cache_readahead(mapping, filp,
  					ra->start, ra->size, ra->async_size);
  }
7835e98b2   Nick Piggin   [PATCH] remove se...
35
36
37
38
39
40
  /*
   * Turn a non-refcounted page (->_count == 0) into refcounted with
   * a count of one.
   */
  static inline void set_page_refcounted(struct page *page)
  {
309381fea   Sasha Levin   mm: dump page whe...
41
42
  	VM_BUG_ON_PAGE(PageTail(page), page);
  	VM_BUG_ON_PAGE(atomic_read(&page->_count), page);
77a8a7883   Nick Piggin   [PATCH] mm: set_p...
43
  	set_page_count(page, 1);
77a8a7883   Nick Piggin   [PATCH] mm: set_p...
44
  }
70b50f94f   Andrea Arcangeli   mm: thp: tail pag...
45
46
47
48
49
50
51
52
53
54
55
56
57
58
  static inline void __get_page_tail_foll(struct page *page,
  					bool get_page_head)
  {
  	/*
  	 * If we're getting a tail page, the elevated page->_count is
  	 * required only in the head page and we will elevate the head
  	 * page->_count and tail page->_mapcount.
  	 *
  	 * We elevate page_tail->_mapcount for tail pages to force
  	 * page_tail->_count to be zero at all times to avoid getting
  	 * false positives from get_page_unless_zero() with
  	 * speculative page access (like in
  	 * page_cache_get_speculative()) on tail pages.
  	 */
309381fea   Sasha Levin   mm: dump page whe...
59
  	VM_BUG_ON_PAGE(atomic_read(&page->first_page->_count) <= 0, page);
70b50f94f   Andrea Arcangeli   mm: thp: tail pag...
60
61
  	if (get_page_head)
  		atomic_inc(&page->first_page->_count);
c728852f5   Oleg Nesterov   mm: thp: __get_pa...
62
  	get_huge_page_tail(page);
70b50f94f   Andrea Arcangeli   mm: thp: tail pag...
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
  }
  
  /*
   * This is meant to be called as the FOLL_GET operation of
   * follow_page() and it must be called while holding the proper PT
   * lock while the pte (or pmd_trans_huge) is still mapping the page.
   */
  static inline void get_page_foll(struct page *page)
  {
  	if (unlikely(PageTail(page)))
  		/*
  		 * This is safe only because
  		 * __split_huge_page_refcount() can't run under
  		 * get_page_foll() because we hold the proper PT lock.
  		 */
  		__get_page_tail_foll(page, true);
  	else {
  		/*
  		 * Getting a normal page or the head of a compound page
  		 * requires to already have an elevated page->_count.
  		 */
309381fea   Sasha Levin   mm: dump page whe...
84
  		VM_BUG_ON_PAGE(atomic_read(&page->_count) <= 0, page);
70b50f94f   Andrea Arcangeli   mm: thp: tail pag...
85
86
87
  		atomic_inc(&page->_count);
  	}
  }
03f6462a3   Hugh Dickins   mm: move highest_...
88
  extern unsigned long highest_memmap_pfn;
894bc3104   Lee Schermerhorn   Unevictable LRU I...
89
90
91
  /*
   * in mm/vmscan.c:
   */
62695a84e   Nick Piggin   vmscan: move isol...
92
  extern int isolate_lru_page(struct page *page);
894bc3104   Lee Schermerhorn   Unevictable LRU I...
93
  extern void putback_lru_page(struct page *page);
6e543d578   Lisa Du   mm: vmscan: fix d...
94
  extern bool zone_reclaimable(struct zone *zone);
62695a84e   Nick Piggin   vmscan: move isol...
95

894bc3104   Lee Schermerhorn   Unevictable LRU I...
96
  /*
6219049ae   Bob Liu   mm: introduce mm_...
97
98
99
100
101
   * in mm/rmap.c:
   */
  extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
  
  /*
894bc3104   Lee Schermerhorn   Unevictable LRU I...
102
103
   * in mm/page_alloc.c
   */
0c0a4a517   Yasunori Goto   memory hotplug: f...
104
  extern void __free_pages_bootmem(struct page *page, unsigned int order);
20a0307c0   Wu Fengguang   mm: introduce Pag...
105
  extern void prep_compound_page(struct page *page, unsigned long order);
8d22ba1b7   Wu Fengguang   HWPOISON: detect ...
106
107
108
  #ifdef CONFIG_MEMORY_FAILURE
  extern bool is_free_buddy_page(struct page *page);
  #endif
42aa83cb6   Han Pingtian   mm: show message ...
109
  extern int user_min_free_kbytes;
20a0307c0   Wu Fengguang   mm: introduce Pag...
110

ff9543fd3   Michal Nazarewicz   mm: compaction: e...
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
  #if defined CONFIG_COMPACTION || defined CONFIG_CMA
  
  /*
   * in mm/compaction.c
   */
  /*
   * compact_control is used to track pages being migrated and the free pages
   * they are being migrated to during memory compaction. The free_pfn starts
   * at the end of a zone and migrate_pfn begins at the start. Movable pages
   * are moved to the end of a zone during a compaction run and the run
   * completes when free_pfn <= migrate_pfn
   */
  struct compact_control {
  	struct list_head freepages;	/* List of free pages to migrate to */
  	struct list_head migratepages;	/* List of pages being migrated */
  	unsigned long nr_freepages;	/* Number of isolated free pages */
  	unsigned long nr_migratepages;	/* Number of pages to migrate */
  	unsigned long free_pfn;		/* isolate_freepages search base */
  	unsigned long migrate_pfn;	/* isolate_migratepages search base */
68e3e9262   Linus Torvalds   Revert "mm: compa...
130
  	bool sync;			/* Synchronous migration */
bb13ffeb9   Mel Gorman   mm: compaction: c...
131
  	bool ignore_skip_hint;		/* Scan blocks even if marked skip */
c89511ab2   Mel Gorman   mm: compaction: R...
132
133
134
135
  	bool finished_update_free;	/* True when the zone cached pfns are
  					 * no longer being updated
  					 */
  	bool finished_update_migrate;
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
136
137
138
139
  
  	int order;			/* order a direct compactor needs */
  	int migratetype;		/* MOVABLE, RECLAIMABLE etc */
  	struct zone *zone;
e64c5237c   Shaohua Li   mm: compaction: a...
140
  	bool contended;			/* True if a lock was contended */
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
141
142
143
  };
  
  unsigned long
bb13ffeb9   Mel Gorman   mm: compaction: c...
144
145
  isolate_freepages_range(struct compact_control *cc,
  			unsigned long start_pfn, unsigned long end_pfn);
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
146
147
  unsigned long
  isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
e46a28790   Minchan Kim   CMA: migrate mloc...
148
  	unsigned long low_pfn, unsigned long end_pfn, bool unevictable);
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
149
150
  
  #endif
0f8053a50   Nick Piggin   [PATCH] mm: make ...
151

48f13bf3e   Mel Gorman   Breakout page_ord...
152
  /*
6c14466cc   Mel Gorman   mm: improve docum...
153
154
155
156
157
   * This function returns the order of a free page in the buddy system. In
   * general, page_zone(page)->lock must be held by the caller to prevent the
   * page from being allocated in parallel and returning garbage as the order.
   * If a caller does not hold page_zone(page)->lock, it must guarantee that the
   * page cannot be allocated or merged in parallel.
48f13bf3e   Mel Gorman   Breakout page_ord...
158
159
160
   */
  static inline unsigned long page_order(struct page *page)
  {
572438f9b   KAMEZAWA Hiroyuki   mm: fix is_mem_se...
161
  	/* PageBuddy() must be checked by the caller */
48f13bf3e   Mel Gorman   Breakout page_ord...
162
163
  	return page_private(page);
  }
b5a0e0113   Alexander van Heukelum   Solve section mis...
164

6038def0d   Namhyung Kim   mm: nommu: sort m...
165
166
167
  /* mm/util.c */
  void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
  		struct vm_area_struct *prev, struct rb_node *rb_parent);
af8e3354b   Hugh Dickins   mm: CONFIG_MMU fo...
168
  #ifdef CONFIG_MMU
cea10a19b   Michel Lespinasse   mm: directly use ...
169
170
  extern long __mlock_vma_pages_range(struct vm_area_struct *vma,
  		unsigned long start, unsigned long end, int *nonblocking);
af8e3354b   Hugh Dickins   mm: CONFIG_MMU fo...
171
172
173
174
175
176
  extern void munlock_vma_pages_range(struct vm_area_struct *vma,
  			unsigned long start, unsigned long end);
  static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
  {
  	munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end);
  }
b291f0003   Nick Piggin   mlock: mlocked pa...
177
  /*
39b5f29ac   Hugh Dickins   mm: remove vma ar...
178
179
   * Called only in fault path, to determine if a new page is being
   * mapped into a LOCKED vma.  If it is, mark page as mlocked.
b291f0003   Nick Piggin   mlock: mlocked pa...
180
   */
096a7cf44   Ying Han   mm: rename is_mlo...
181
182
  static inline int mlocked_vma_newpage(struct vm_area_struct *vma,
  				    struct page *page)
b291f0003   Nick Piggin   mlock: mlocked pa...
183
  {
309381fea   Sasha Levin   mm: dump page whe...
184
  	VM_BUG_ON_PAGE(PageLRU(page), page);
b291f0003   Nick Piggin   mlock: mlocked pa...
185
186
187
  
  	if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED))
  		return 0;
5344b7e64   Nick Piggin   vmstat: mlocked p...
188
  	if (!TestSetPageMlocked(page)) {
8449d21fb   David Rientjes   mm, thp: fix mloc...
189
190
  		mod_zone_page_state(page_zone(page), NR_MLOCK,
  				    hpage_nr_pages(page));
5344b7e64   Nick Piggin   vmstat: mlocked p...
191
192
  		count_vm_event(UNEVICTABLE_PGMLOCKED);
  	}
b291f0003   Nick Piggin   mlock: mlocked pa...
193
194
195
196
  	return 1;
  }
  
  /*
73848b468   Hugh Dickins   ksm: fix mlockfre...
197
   * must be called with vma's mmap_sem held for read or write, and page locked.
b291f0003   Nick Piggin   mlock: mlocked pa...
198
199
   */
  extern void mlock_vma_page(struct page *page);
ff6a6da60   Michel Lespinasse   mm: accelerate mu...
200
  extern unsigned int munlock_vma_page(struct page *page);
b291f0003   Nick Piggin   mlock: mlocked pa...
201
202
203
204
205
206
207
208
209
210
  
  /*
   * Clear the page's PageMlocked().  This can be useful in a situation where
   * we want to unconditionally remove a page from the pagecache -- e.g.,
   * on truncation or freeing.
   *
   * It is legal to call this function for any page, mlocked or not.
   * If called for a page that is still mapped by mlocked vmas, all we do
   * is revert to lazy LRU behaviour -- semantics are not broken.
   */
e6c509f85   Hugh Dickins   mm: use clear_pag...
211
  extern void clear_page_mlock(struct page *page);
b291f0003   Nick Piggin   mlock: mlocked pa...
212
213
214
  
  /*
   * mlock_migrate_page - called only from migrate_page_copy() to
5344b7e64   Nick Piggin   vmstat: mlocked p...
215
   * migrate the Mlocked page flag; update statistics.
b291f0003   Nick Piggin   mlock: mlocked pa...
216
217
218
   */
  static inline void mlock_migrate_page(struct page *newpage, struct page *page)
  {
5344b7e64   Nick Piggin   vmstat: mlocked p...
219
220
  	if (TestClearPageMlocked(page)) {
  		unsigned long flags;
b32967ff1   Mel Gorman   mm: numa: Add THP...
221
  		int nr_pages = hpage_nr_pages(page);
5344b7e64   Nick Piggin   vmstat: mlocked p...
222
223
  
  		local_irq_save(flags);
b32967ff1   Mel Gorman   mm: numa: Add THP...
224
  		__mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
b291f0003   Nick Piggin   mlock: mlocked pa...
225
  		SetPageMlocked(newpage);
b32967ff1   Mel Gorman   mm: numa: Add THP...
226
  		__mod_zone_page_state(page_zone(newpage), NR_MLOCK, nr_pages);
5344b7e64   Nick Piggin   vmstat: mlocked p...
227
228
  		local_irq_restore(flags);
  	}
b291f0003   Nick Piggin   mlock: mlocked pa...
229
  }
b32967ff1   Mel Gorman   mm: numa: Add THP...
230
  extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
71e3aac07   Andrea Arcangeli   thp: transparent ...
231
232
233
234
  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  extern unsigned long vma_address(struct page *page,
  				 struct vm_area_struct *vma);
  #endif
af8e3354b   Hugh Dickins   mm: CONFIG_MMU fo...
235
  #else /* !CONFIG_MMU */
096a7cf44   Ying Han   mm: rename is_mlo...
236
  static inline int mlocked_vma_newpage(struct vm_area_struct *v, struct page *p)
b291f0003   Nick Piggin   mlock: mlocked pa...
237
238
239
240
241
242
  {
  	return 0;
  }
  static inline void clear_page_mlock(struct page *page) { }
  static inline void mlock_vma_page(struct page *page) { }
  static inline void mlock_migrate_page(struct page *new, struct page *old) { }
af8e3354b   Hugh Dickins   mm: CONFIG_MMU fo...
243
  #endif /* !CONFIG_MMU */
894bc3104   Lee Schermerhorn   Unevictable LRU I...
244

b5a0e0113   Alexander van Heukelum   Solve section mis...
245
  /*
69d177c2f   Andy Whitcroft   hugetlbfs: handle...
246
247
248
249
250
251
252
253
254
255
256
257
   * Return the mem_map entry representing the 'offset' subpage within
   * the maximally aligned gigantic page 'base'.  Handle any discontiguity
   * in the mem_map at MAX_ORDER_NR_PAGES boundaries.
   */
  static inline struct page *mem_map_offset(struct page *base, int offset)
  {
  	if (unlikely(offset >= MAX_ORDER_NR_PAGES))
  		return pfn_to_page(page_to_pfn(base) + offset);
  	return base + offset;
  }
  
  /*
25985edce   Lucas De Marchi   Fix common misspe...
258
   * Iterator over all subpages within the maximally aligned gigantic
69d177c2f   Andy Whitcroft   hugetlbfs: handle...
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
   * page 'base'.  Handle any discontiguity in the mem_map.
   */
  static inline struct page *mem_map_next(struct page *iter,
  						struct page *base, int offset)
  {
  	if (unlikely((offset & (MAX_ORDER_NR_PAGES - 1)) == 0)) {
  		unsigned long pfn = page_to_pfn(base) + offset;
  		if (!pfn_valid(pfn))
  			return NULL;
  		return pfn_to_page(pfn);
  	}
  	return iter + 1;
  }
  
  /*
b5a0e0113   Alexander van Heukelum   Solve section mis...
274
275
276
277
278
279
280
281
282
283
   * FLATMEM and DISCONTIGMEM configurations use alloc_bootmem_node,
   * so all functions starting at paging_init should be marked __init
   * in those cases. SPARSEMEM, however, allows for memory hotplug,
   * and alloc_bootmem_node is not used.
   */
  #ifdef CONFIG_SPARSEMEM
  #define __paginginit __meminit
  #else
  #define __paginginit __init
  #endif
6b74ab97b   Mel Gorman   mm: add a basic d...
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
  /* Memory initialisation debug and verification */
  enum mminit_level {
  	MMINIT_WARNING,
  	MMINIT_VERIFY,
  	MMINIT_TRACE
  };
  
  #ifdef CONFIG_DEBUG_MEMORY_INIT
  
  extern int mminit_loglevel;
  
  #define mminit_dprintk(level, prefix, fmt, arg...) \
  do { \
  	if (level < mminit_loglevel) { \
  		printk(level <= MMINIT_WARNING ? KERN_WARNING : KERN_DEBUG); \
  		printk(KERN_CONT "mminit::" prefix " " fmt, ##arg); \
  	} \
  } while (0)
708614e61   Mel Gorman   mm: verify the pa...
302
303
304
  extern void mminit_verify_pageflags_layout(void);
  extern void mminit_verify_page_links(struct page *page,
  		enum zone_type zone, unsigned long nid, unsigned long pfn);
68ad8df42   Mel Gorman   mm: print out the...
305
  extern void mminit_verify_zonelist(void);
708614e61   Mel Gorman   mm: verify the pa...
306

6b74ab97b   Mel Gorman   mm: add a basic d...
307
308
309
310
311
312
  #else
  
  static inline void mminit_dprintk(enum mminit_level level,
  				const char *prefix, const char *fmt, ...)
  {
  }
708614e61   Mel Gorman   mm: verify the pa...
313
314
315
316
317
318
319
320
  static inline void mminit_verify_pageflags_layout(void)
  {
  }
  
  static inline void mminit_verify_page_links(struct page *page,
  		enum zone_type zone, unsigned long nid, unsigned long pfn)
  {
  }
68ad8df42   Mel Gorman   mm: print out the...
321
322
323
324
  
  static inline void mminit_verify_zonelist(void)
  {
  }
6b74ab97b   Mel Gorman   mm: add a basic d...
325
  #endif /* CONFIG_DEBUG_MEMORY_INIT */
2dbb51c49   Mel Gorman   mm: make defensiv...
326
327
328
329
330
331
332
333
334
335
336
  
  /* mminit_validate_memmodel_limits is independent of CONFIG_DEBUG_MEMORY_INIT */
  #if defined(CONFIG_SPARSEMEM)
  extern void mminit_validate_memmodel_limits(unsigned long *start_pfn,
  				unsigned long *end_pfn);
  #else
  static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn,
  				unsigned long *end_pfn)
  {
  }
  #endif /* CONFIG_SPARSEMEM */
fa5e084e4   Mel Gorman   vmscan: do not un...
337
338
339
340
  #define ZONE_RECLAIM_NOSCAN	-2
  #define ZONE_RECLAIM_FULL	-1
  #define ZONE_RECLAIM_SOME	0
  #define ZONE_RECLAIM_SUCCESS	1
7c116f2b0   Wu Fengguang   HWPOISON: add fs/...
341

31d3d3484   Wu Fengguang   HWPOISON: limit h...
342
  extern int hwpoison_filter(struct page *p);
7c116f2b0   Wu Fengguang   HWPOISON: add fs/...
343
344
  extern u32 hwpoison_filter_dev_major;
  extern u32 hwpoison_filter_dev_minor;
478c5ffc0   Wu Fengguang   HWPOISON: add pag...
345
346
  extern u64 hwpoison_filter_flags_mask;
  extern u64 hwpoison_filter_flags_value;
4fd466eb4   Andi Kleen   HWPOISON: add mem...
347
  extern u64 hwpoison_filter_memcg;
1bfe5febe   Haicheng Li   HWPOISON: add an ...
348
  extern u32 hwpoison_filter_enable;
eb36c5873   Al Viro   new helper: vm_mm...
349
350
351
352
  
  extern unsigned long vm_mmap_pgoff(struct file *, unsigned long,
          unsigned long, unsigned long,
          unsigned long, unsigned long);
ca57df79d   Xishi Qiu   mm: setup pageblo...
353
354
  
  extern void set_pageblock_order(void);
02c6de8d7   Minchan Kim   mm: cma: discard ...
355
356
  unsigned long reclaim_clean_pages_from_list(struct zone *zone,
  					    struct list_head *page_list);
d95ea5d18   Bartlomiej Zolnierkiewicz   cma: fix watermar...
357
358
359
360
361
362
363
364
365
366
367
368
369
  /* The ALLOC_WMARK bits are used as an index to zone->watermark */
  #define ALLOC_WMARK_MIN		WMARK_MIN
  #define ALLOC_WMARK_LOW		WMARK_LOW
  #define ALLOC_WMARK_HIGH	WMARK_HIGH
  #define ALLOC_NO_WATERMARKS	0x04 /* don't check watermarks at all */
  
  /* Mask to get the watermark bits */
  #define ALLOC_WMARK_MASK	(ALLOC_NO_WATERMARKS-1)
  
  #define ALLOC_HARDER		0x10 /* try to alloc harder */
  #define ALLOC_HIGH		0x20 /* __GFP_HIGH set */
  #define ALLOC_CPUSET		0x40 /* check for correct cpuset */
  #define ALLOC_CMA		0x80 /* allow allocations from CMA areas */
3a025760f   Johannes Weiner   mm: page_alloc: s...
370
  #define ALLOC_FAIR		0x100 /* fair zone allocation */
d95ea5d18   Bartlomiej Zolnierkiewicz   cma: fix watermar...
371

db9714188   Michel Lespinasse   mm: adjust final ...
372
  #endif	/* __MM_INTERNAL_H */