Blame view

mm/internal.h 15.3 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
2
3
4
5
6
7
8
9
10
  /* internal.h: mm/ internal definitions
   *
   * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
   * Written by David Howells (dhowells@redhat.com)
   *
   * This program is free software; you can redistribute it and/or
   * modify it under the terms of the GNU General Public License
   * as published by the Free Software Foundation; either version
   * 2 of the License, or (at your option) any later version.
   */
0f8053a50   Nick Piggin   [PATCH] mm: make ...
11
12
  #ifndef __MM_INTERNAL_H
  #define __MM_INTERNAL_H
29f175d12   Fabian Frederick   mm/readahead.c: i...
13
  #include <linux/fs.h>
0f8053a50   Nick Piggin   [PATCH] mm: make ...
14
  #include <linux/mm.h>
e9b61f198   Kirill A. Shutemov   thp: reintroduce ...
15
  #include <linux/pagemap.h>
edf14cdbf   Vlastimil Babka   mm, printk: intro...
16
  #include <linux/tracepoint-defs.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
17

dd56b0464   Mel Gorman   mm: page_alloc: h...
18
19
20
21
22
23
24
25
  /*
   * The set of flags that only affect watermark checking and reclaim
   * behaviour. This is used by the MM to obey the caller constraints
   * about IO, FS and watermark checking while ignoring placement
   * hints such as HIGHMEM usage.
   */
  #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
  			__GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\
e838a45f9   Mel Gorman   mm, sl[au]b: add ...
26
27
  			__GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
  			__GFP_ATOMIC)
dd56b0464   Mel Gorman   mm: page_alloc: h...
28
29
30
31
32
33
34
35
36
  
  /* The GFP flags allowed during early boot */
  #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
  
  /* Control allocation cpuset and node placement constraints */
  #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
  
  /* Do not use these with a slab allocator */
  #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
bae473a42   Kirill A. Shutemov   mm: introduce fau...
37
  int do_swap_page(struct fault_env *fe, pte_t orig_pte);
8a966ed74   Ebru Akagunduz   mm: make swapin r...
38

42b777281   Jan Beulich   mm: remove double...
39
40
  void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
  		unsigned long floor, unsigned long ceiling);
aac453635   Michal Hocko   mm, oom: introduc...
41
42
43
44
  void unmap_page_range(struct mmu_gather *tlb,
  			     struct vm_area_struct *vma,
  			     unsigned long addr, unsigned long end,
  			     struct zap_details *details);
29f175d12   Fabian Frederick   mm/readahead.c: i...
45
46
47
48
49
50
51
52
53
54
55
56
57
  extern int __do_page_cache_readahead(struct address_space *mapping,
  		struct file *filp, pgoff_t offset, unsigned long nr_to_read,
  		unsigned long lookahead_size);
  
  /*
   * Submit IO for the read-ahead request in file_ra_state.
   */
  static inline unsigned long ra_submit(struct file_ra_state *ra,
  		struct address_space *mapping, struct file *filp)
  {
  	return __do_page_cache_readahead(mapping, filp,
  					ra->start, ra->size, ra->async_size);
  }
7835e98b2   Nick Piggin   [PATCH] remove se...
58
  /*
0139aa7b7   Joonsoo Kim   mm: rename _count...
59
   * Turn a non-refcounted page (->_refcount == 0) into refcounted with
7835e98b2   Nick Piggin   [PATCH] remove se...
60
61
62
63
   * a count of one.
   */
  static inline void set_page_refcounted(struct page *page)
  {
309381fea   Sasha Levin   mm: dump page whe...
64
  	VM_BUG_ON_PAGE(PageTail(page), page);
fe896d187   Joonsoo Kim   mm: introduce pag...
65
  	VM_BUG_ON_PAGE(page_ref_count(page), page);
77a8a7883   Nick Piggin   [PATCH] mm: set_p...
66
  	set_page_count(page, 1);
77a8a7883   Nick Piggin   [PATCH] mm: set_p...
67
  }
03f6462a3   Hugh Dickins   mm: move highest_...
68
  extern unsigned long highest_memmap_pfn;
894bc3104   Lee Schermerhorn   Unevictable LRU I...
69
70
71
  /*
   * in mm/vmscan.c:
   */
62695a84e   Nick Piggin   vmscan: move isol...
72
  extern int isolate_lru_page(struct page *page);
894bc3104   Lee Schermerhorn   Unevictable LRU I...
73
  extern void putback_lru_page(struct page *page);
599d0c954   Mel Gorman   mm, vmscan: move ...
74
  extern bool pgdat_reclaimable(struct pglist_data *pgdat);
62695a84e   Nick Piggin   vmscan: move isol...
75

894bc3104   Lee Schermerhorn   Unevictable LRU I...
76
  /*
6219049ae   Bob Liu   mm: introduce mm_...
77
78
79
80
81
   * in mm/rmap.c:
   */
  extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
  
  /*
894bc3104   Lee Schermerhorn   Unevictable LRU I...
82
83
   * in mm/page_alloc.c
   */
3c605096d   Joonsoo Kim   mm/page_alloc: re...
84
85
  
  /*
1a6d53a10   Vlastimil Babka   mm: reduce try_to...
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
   * Structure for holding the mostly immutable allocation parameters passed
   * between functions involved in allocations, including the alloc_pages*
   * family of functions.
   *
   * nodemask, migratetype and high_zoneidx are initialized only once in
   * __alloc_pages_nodemask() and then never change.
   *
   * zonelist, preferred_zone and classzone_idx are set first in
   * __alloc_pages_nodemask() for the fast path, and might be later changed
   * in __alloc_pages_slowpath(). All other functions pass the whole strucure
   * by a const pointer.
   */
  struct alloc_context {
  	struct zonelist *zonelist;
  	nodemask_t *nodemask;
c33d6c06f   Mel Gorman   mm, page_alloc: a...
101
  	struct zoneref *preferred_zoneref;
1a6d53a10   Vlastimil Babka   mm: reduce try_to...
102
103
  	int migratetype;
  	enum zone_type high_zoneidx;
c9ab0c4fb   Mel Gorman   mm, page_alloc: r...
104
  	bool spread_dirty_pages;
1a6d53a10   Vlastimil Babka   mm: reduce try_to...
105
  };
93ea9964d   Mel Gorman   mm, page_alloc: r...
106
  #define ac_classzone_idx(ac) zonelist_zone_idx(ac->preferred_zoneref)
1a6d53a10   Vlastimil Babka   mm: reduce try_to...
107
  /*
3c605096d   Joonsoo Kim   mm/page_alloc: re...
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
   * Locate the struct page for both the matching buddy in our
   * pair (buddy1) and the combined O(n+1) page they form (page).
   *
   * 1) Any buddy B1 will have an order O twin B2 which satisfies
   * the following equation:
   *     B2 = B1 ^ (1 << O)
   * For example, if the starting buddy (buddy2) is #8 its order
   * 1 buddy is #10:
   *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
   *
   * 2) Any buddy B will have an order O+1 parent P which
   * satisfies the following equation:
   *     P = B & ~(1 << O)
   *
   * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
   */
  static inline unsigned long
  __find_buddy_index(unsigned long page_idx, unsigned int order)
  {
  	return page_idx ^ (1 << order);
  }
7cf91a98e   Joonsoo Kim   mm/compaction: sp...
129
130
131
132
133
134
135
136
137
138
139
  extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
  				unsigned long end_pfn, struct zone *zone);
  
  static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn,
  				unsigned long end_pfn, struct zone *zone)
  {
  	if (zone->contiguous)
  		return pfn_to_page(start_pfn);
  
  	return __pageblock_pfn_to_page(start_pfn, end_pfn, zone);
  }
3c605096d   Joonsoo Kim   mm/page_alloc: re...
140
  extern int __isolate_free_page(struct page *page, unsigned int order);
d70ddd7a5   Mel Gorman   mm: page_alloc: p...
141
142
  extern void __free_pages_bootmem(struct page *page, unsigned long pfn,
  					unsigned int order);
d00181b96   Kirill A. Shutemov   mm: use 'unsigned...
143
  extern void prep_compound_page(struct page *page, unsigned int order);
46f24fd85   Joonsoo Kim   mm/page_alloc: in...
144
145
  extern void post_alloc_hook(struct page *page, unsigned int order,
  					gfp_t gfp_flags);
42aa83cb6   Han Pingtian   mm: show message ...
146
  extern int user_min_free_kbytes;
20a0307c0   Wu Fengguang   mm: introduce Pag...
147

ff9543fd3   Michal Nazarewicz   mm: compaction: e...
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
  #if defined CONFIG_COMPACTION || defined CONFIG_CMA
  
  /*
   * in mm/compaction.c
   */
  /*
   * compact_control is used to track pages being migrated and the free pages
   * they are being migrated to during memory compaction. The free_pfn starts
   * at the end of a zone and migrate_pfn begins at the start. Movable pages
   * are moved to the end of a zone during a compaction run and the run
   * completes when free_pfn <= migrate_pfn
   */
  struct compact_control {
  	struct list_head freepages;	/* List of free pages to migrate to */
  	struct list_head migratepages;	/* List of pages being migrated */
  	unsigned long nr_freepages;	/* Number of isolated free pages */
  	unsigned long nr_migratepages;	/* Number of pages to migrate */
  	unsigned long free_pfn;		/* isolate_freepages search base */
  	unsigned long migrate_pfn;	/* isolate_migratepages search base */
1a16718cf   Joonsoo Kim   mm/compaction: co...
167
  	unsigned long last_migrated_pfn;/* Not yet flushed page being freed */
e0b9daeb4   David Rientjes   mm, compaction: e...
168
  	enum migrate_mode mode;		/* Async or sync migration mode */
bb13ffeb9   Mel Gorman   mm: compaction: c...
169
  	bool ignore_skip_hint;		/* Scan blocks even if marked skip */
9f7e33879   Vlastimil Babka   mm, compaction: m...
170
  	bool ignore_block_suitable;	/* Scan blocks considered unsuitable */
accf62422   Vlastimil Babka   mm, kswapd: repla...
171
  	bool direct_compaction;		/* False from kcompactd or /proc/... */
06ed29989   Vlastimil Babka   mm, compaction: m...
172
  	bool whole_zone;		/* Whole zone should/has been scanned */
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
173
  	int order;			/* order a direct compactor needs */
6d7ce5594   David Rientjes   mm, compaction: p...
174
  	const gfp_t gfp_mask;		/* gfp mask of a direct compactor */
c603844bd   Mel Gorman   mm, page_alloc: c...
175
  	const unsigned int alloc_flags;	/* alloc flags of a direct compactor */
ebff39801   Vlastimil Babka   mm, compaction: p...
176
  	const int classzone_idx;	/* zone index of a direct compactor */
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
177
  	struct zone *zone;
c3486f537   Vlastimil Babka   mm, compaction: s...
178
  	bool contended;			/* Signal lock or sched contention */
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
179
180
181
  };
  
  unsigned long
bb13ffeb9   Mel Gorman   mm: compaction: c...
182
183
  isolate_freepages_range(struct compact_control *cc,
  			unsigned long start_pfn, unsigned long end_pfn);
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
184
  unsigned long
edc2ca612   Vlastimil Babka   mm, compaction: m...
185
186
  isolate_migratepages_range(struct compact_control *cc,
  			   unsigned long low_pfn, unsigned long end_pfn);
2149cdaef   Joonsoo Kim   mm/compaction: en...
187
188
  int find_suitable_fallback(struct free_area *area, unsigned int order,
  			int migratetype, bool only_stealable, bool *can_steal);
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
189
190
  
  #endif
0f8053a50   Nick Piggin   [PATCH] mm: make ...
191

48f13bf3e   Mel Gorman   Breakout page_ord...
192
  /*
6c14466cc   Mel Gorman   mm: improve docum...
193
194
195
196
   * This function returns the order of a free page in the buddy system. In
   * general, page_zone(page)->lock must be held by the caller to prevent the
   * page from being allocated in parallel and returning garbage as the order.
   * If a caller does not hold page_zone(page)->lock, it must guarantee that the
99c0fd5e5   Vlastimil Babka   mm, compaction: s...
197
198
   * page cannot be allocated or merged in parallel. Alternatively, it must
   * handle invalid values gracefully, and use page_order_unsafe() below.
48f13bf3e   Mel Gorman   Breakout page_ord...
199
   */
d00181b96   Kirill A. Shutemov   mm: use 'unsigned...
200
  static inline unsigned int page_order(struct page *page)
48f13bf3e   Mel Gorman   Breakout page_ord...
201
  {
572438f9b   KAMEZAWA Hiroyuki   mm: fix is_mem_se...
202
  	/* PageBuddy() must be checked by the caller */
48f13bf3e   Mel Gorman   Breakout page_ord...
203
204
  	return page_private(page);
  }
b5a0e0113   Alexander van Heukelum   Solve section mis...
205

99c0fd5e5   Vlastimil Babka   mm, compaction: s...
206
207
208
209
210
  /*
   * Like page_order(), but for callers who cannot afford to hold the zone lock.
   * PageBuddy() should be checked first by the caller to minimize race window,
   * and invalid values must be handled gracefully.
   *
4db0c3c29   Jason Low   mm: remove rest o...
211
   * READ_ONCE is used so that if the caller assigns the result into a local
99c0fd5e5   Vlastimil Babka   mm, compaction: s...
212
213
214
215
216
   * variable and e.g. tests it for valid range before using, the compiler cannot
   * decide to remove the variable and inline the page_private(page) multiple
   * times, potentially observing different values in the tests and the actual
   * use of the result.
   */
4db0c3c29   Jason Low   mm: remove rest o...
217
  #define page_order_unsafe(page)		READ_ONCE(page_private(page))
99c0fd5e5   Vlastimil Babka   mm, compaction: s...
218

4bbd4c776   Kirill A. Shutemov   mm: move get_user...
219
220
221
222
  static inline bool is_cow_mapping(vm_flags_t flags)
  {
  	return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
  }
30bdbb780   Konstantin Khlebnikov   mm: polish virtua...
223
224
225
226
227
228
229
  /*
   * These three helpers classifies VMAs for virtual memory accounting.
   */
  
  /*
   * Executable code area - executable, not writable, not stack
   */
d977d56ce   Konstantin Khlebnikov   mm: warn about Vm...
230
231
  static inline bool is_exec_mapping(vm_flags_t flags)
  {
30bdbb780   Konstantin Khlebnikov   mm: polish virtua...
232
  	return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
d977d56ce   Konstantin Khlebnikov   mm: warn about Vm...
233
  }
30bdbb780   Konstantin Khlebnikov   mm: polish virtua...
234
235
236
237
238
239
  /*
   * Stack area - atomatically grows in one direction
   *
   * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous:
   * do_mmap() forbids all other combinations.
   */
d977d56ce   Konstantin Khlebnikov   mm: warn about Vm...
240
241
  static inline bool is_stack_mapping(vm_flags_t flags)
  {
30bdbb780   Konstantin Khlebnikov   mm: polish virtua...
242
  	return (flags & VM_STACK) == VM_STACK;
d977d56ce   Konstantin Khlebnikov   mm: warn about Vm...
243
  }
30bdbb780   Konstantin Khlebnikov   mm: polish virtua...
244
245
246
  /*
   * Data area - private, writable, not stack
   */
d977d56ce   Konstantin Khlebnikov   mm: warn about Vm...
247
248
  static inline bool is_data_mapping(vm_flags_t flags)
  {
30bdbb780   Konstantin Khlebnikov   mm: polish virtua...
249
  	return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
d977d56ce   Konstantin Khlebnikov   mm: warn about Vm...
250
  }
6038def0d   Namhyung Kim   mm: nommu: sort m...
251
252
253
  /* mm/util.c */
  void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
  		struct vm_area_struct *prev, struct rb_node *rb_parent);
af8e3354b   Hugh Dickins   mm: CONFIG_MMU fo...
254
  #ifdef CONFIG_MMU
fc05f5662   Kirill A. Shutemov   mm: rename __mloc...
255
  extern long populate_vma_page_range(struct vm_area_struct *vma,
cea10a19b   Michel Lespinasse   mm: directly use ...
256
  		unsigned long start, unsigned long end, int *nonblocking);
af8e3354b   Hugh Dickins   mm: CONFIG_MMU fo...
257
258
259
260
261
262
  extern void munlock_vma_pages_range(struct vm_area_struct *vma,
  			unsigned long start, unsigned long end);
  static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
  {
  	munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end);
  }
b291f0003   Nick Piggin   mlock: mlocked pa...
263
  /*
73848b468   Hugh Dickins   ksm: fix mlockfre...
264
   * must be called with vma's mmap_sem held for read or write, and page locked.
b291f0003   Nick Piggin   mlock: mlocked pa...
265
266
   */
  extern void mlock_vma_page(struct page *page);
ff6a6da60   Michel Lespinasse   mm: accelerate mu...
267
  extern unsigned int munlock_vma_page(struct page *page);
b291f0003   Nick Piggin   mlock: mlocked pa...
268
269
270
271
272
273
274
275
276
277
  
  /*
   * Clear the page's PageMlocked().  This can be useful in a situation where
   * we want to unconditionally remove a page from the pagecache -- e.g.,
   * on truncation or freeing.
   *
   * It is legal to call this function for any page, mlocked or not.
   * If called for a page that is still mapped by mlocked vmas, all we do
   * is revert to lazy LRU behaviour -- semantics are not broken.
   */
e6c509f85   Hugh Dickins   mm: use clear_pag...
278
  extern void clear_page_mlock(struct page *page);
b291f0003   Nick Piggin   mlock: mlocked pa...
279
280
  
  /*
51afb12ba   Hugh Dickins   mm: page migratio...
281
282
283
   * mlock_migrate_page - called only from migrate_misplaced_transhuge_page()
   * (because that does not go through the full procedure of migration ptes):
   * to migrate the Mlocked page flag; update statistics.
b291f0003   Nick Piggin   mlock: mlocked pa...
284
285
286
   */
  static inline void mlock_migrate_page(struct page *newpage, struct page *page)
  {
5344b7e64   Nick Piggin   vmstat: mlocked p...
287
  	if (TestClearPageMlocked(page)) {
b32967ff1   Mel Gorman   mm: numa: Add THP...
288
  		int nr_pages = hpage_nr_pages(page);
5344b7e64   Nick Piggin   vmstat: mlocked p...
289

51afb12ba   Hugh Dickins   mm: page migratio...
290
  		/* Holding pmd lock, no change in irq context: __mod is safe */
b32967ff1   Mel Gorman   mm: numa: Add THP...
291
  		__mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
b291f0003   Nick Piggin   mlock: mlocked pa...
292
  		SetPageMlocked(newpage);
b32967ff1   Mel Gorman   mm: numa: Add THP...
293
  		__mod_zone_page_state(page_zone(newpage), NR_MLOCK, nr_pages);
5344b7e64   Nick Piggin   vmstat: mlocked p...
294
  	}
b291f0003   Nick Piggin   mlock: mlocked pa...
295
  }
b32967ff1   Mel Gorman   mm: numa: Add THP...
296
  extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
e9b61f198   Kirill A. Shutemov   thp: reintroduce ...
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
  /*
   * At what user virtual address is page expected in @vma?
   */
  static inline unsigned long
  __vma_address(struct page *page, struct vm_area_struct *vma)
  {
  	pgoff_t pgoff = page_to_pgoff(page);
  	return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
  }
  
  static inline unsigned long
  vma_address(struct page *page, struct vm_area_struct *vma)
  {
  	unsigned long address = __vma_address(page, vma);
  
  	/* page should be within @vma mapping range */
  	VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
  
  	return address;
  }
af8e3354b   Hugh Dickins   mm: CONFIG_MMU fo...
317
  #else /* !CONFIG_MMU */
b291f0003   Nick Piggin   mlock: mlocked pa...
318
319
320
  static inline void clear_page_mlock(struct page *page) { }
  static inline void mlock_vma_page(struct page *page) { }
  static inline void mlock_migrate_page(struct page *new, struct page *old) { }
af8e3354b   Hugh Dickins   mm: CONFIG_MMU fo...
321
  #endif /* !CONFIG_MMU */
894bc3104   Lee Schermerhorn   Unevictable LRU I...
322

b5a0e0113   Alexander van Heukelum   Solve section mis...
323
  /*
69d177c2f   Andy Whitcroft   hugetlbfs: handle...
324
325
326
327
328
329
330
   * Return the mem_map entry representing the 'offset' subpage within
   * the maximally aligned gigantic page 'base'.  Handle any discontiguity
   * in the mem_map at MAX_ORDER_NR_PAGES boundaries.
   */
  static inline struct page *mem_map_offset(struct page *base, int offset)
  {
  	if (unlikely(offset >= MAX_ORDER_NR_PAGES))
bc7f84c0e   Fabian Frederick   mm/internal.h: us...
331
  		return nth_page(base, offset);
69d177c2f   Andy Whitcroft   hugetlbfs: handle...
332
333
334
335
  	return base + offset;
  }
  
  /*
25985edce   Lucas De Marchi   Fix common misspe...
336
   * Iterator over all subpages within the maximally aligned gigantic
69d177c2f   Andy Whitcroft   hugetlbfs: handle...
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
   * page 'base'.  Handle any discontiguity in the mem_map.
   */
  static inline struct page *mem_map_next(struct page *iter,
  						struct page *base, int offset)
  {
  	if (unlikely((offset & (MAX_ORDER_NR_PAGES - 1)) == 0)) {
  		unsigned long pfn = page_to_pfn(base) + offset;
  		if (!pfn_valid(pfn))
  			return NULL;
  		return pfn_to_page(pfn);
  	}
  	return iter + 1;
  }
  
  /*
b5a0e0113   Alexander van Heukelum   Solve section mis...
352
353
354
355
356
357
358
359
360
361
   * FLATMEM and DISCONTIGMEM configurations use alloc_bootmem_node,
   * so all functions starting at paging_init should be marked __init
   * in those cases. SPARSEMEM, however, allows for memory hotplug,
   * and alloc_bootmem_node is not used.
   */
  #ifdef CONFIG_SPARSEMEM
  #define __paginginit __meminit
  #else
  #define __paginginit __init
  #endif
6b74ab97b   Mel Gorman   mm: add a basic d...
362
363
364
365
366
367
368
369
370
371
372
373
374
375
  /* Memory initialisation debug and verification */
  enum mminit_level {
  	MMINIT_WARNING,
  	MMINIT_VERIFY,
  	MMINIT_TRACE
  };
  
  #ifdef CONFIG_DEBUG_MEMORY_INIT
  
  extern int mminit_loglevel;
  
  #define mminit_dprintk(level, prefix, fmt, arg...) \
  do { \
  	if (level < mminit_loglevel) { \
fc5199d1a   Rasmus Villemoes   mm/internal.h: do...
376
  		if (level <= MMINIT_WARNING) \
1170532bb   Joe Perches   mm: convert print...
377
  			pr_warn("mminit::" prefix " " fmt, ##arg);	\
fc5199d1a   Rasmus Villemoes   mm/internal.h: do...
378
379
  		else \
  			printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \
6b74ab97b   Mel Gorman   mm: add a basic d...
380
381
  	} \
  } while (0)
708614e61   Mel Gorman   mm: verify the pa...
382
  extern void mminit_verify_pageflags_layout(void);
68ad8df42   Mel Gorman   mm: print out the...
383
  extern void mminit_verify_zonelist(void);
6b74ab97b   Mel Gorman   mm: add a basic d...
384
385
386
387
388
389
  #else
  
  static inline void mminit_dprintk(enum mminit_level level,
  				const char *prefix, const char *fmt, ...)
  {
  }
708614e61   Mel Gorman   mm: verify the pa...
390
391
392
  static inline void mminit_verify_pageflags_layout(void)
  {
  }
68ad8df42   Mel Gorman   mm: print out the...
393
394
395
  static inline void mminit_verify_zonelist(void)
  {
  }
6b74ab97b   Mel Gorman   mm: add a basic d...
396
  #endif /* CONFIG_DEBUG_MEMORY_INIT */
2dbb51c49   Mel Gorman   mm: make defensiv...
397
398
399
400
401
402
403
404
405
406
407
  
  /* mminit_validate_memmodel_limits is independent of CONFIG_DEBUG_MEMORY_INIT */
  #if defined(CONFIG_SPARSEMEM)
  extern void mminit_validate_memmodel_limits(unsigned long *start_pfn,
  				unsigned long *end_pfn);
  #else
  static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn,
  				unsigned long *end_pfn)
  {
  }
  #endif /* CONFIG_SPARSEMEM */
a5f5f91da   Mel Gorman   mm: convert zone_...
408
409
410
411
  #define NODE_RECLAIM_NOSCAN	-2
  #define NODE_RECLAIM_FULL	-1
  #define NODE_RECLAIM_SOME	0
  #define NODE_RECLAIM_SUCCESS	1
7c116f2b0   Wu Fengguang   HWPOISON: add fs/...
412

31d3d3484   Wu Fengguang   HWPOISON: limit h...
413
  extern int hwpoison_filter(struct page *p);
7c116f2b0   Wu Fengguang   HWPOISON: add fs/...
414
415
  extern u32 hwpoison_filter_dev_major;
  extern u32 hwpoison_filter_dev_minor;
478c5ffc0   Wu Fengguang   HWPOISON: add pag...
416
417
  extern u64 hwpoison_filter_flags_mask;
  extern u64 hwpoison_filter_flags_value;
4fd466eb4   Andi Kleen   HWPOISON: add mem...
418
  extern u64 hwpoison_filter_memcg;
1bfe5febe   Haicheng Li   HWPOISON: add an ...
419
  extern u32 hwpoison_filter_enable;
eb36c5873   Al Viro   new helper: vm_mm...
420

dc0ef0df7   Michal Hocko   mm: make mmap_sem...
421
  extern unsigned long  __must_check vm_mmap_pgoff(struct file *, unsigned long,
eb36c5873   Al Viro   new helper: vm_mm...
422
          unsigned long, unsigned long,
9fbeb5ab5   Michal Hocko   mm: make vm_mmap ...
423
          unsigned long, unsigned long);
ca57df79d   Xishi Qiu   mm: setup pageblo...
424
425
  
  extern void set_pageblock_order(void);
02c6de8d7   Minchan Kim   mm: cma: discard ...
426
427
  unsigned long reclaim_clean_pages_from_list(struct zone *zone,
  					    struct list_head *page_list);
d95ea5d18   Bartlomiej Zolnierkiewicz   cma: fix watermar...
428
429
430
431
432
433
434
435
436
437
438
439
440
  /* The ALLOC_WMARK bits are used as an index to zone->watermark */
  #define ALLOC_WMARK_MIN		WMARK_MIN
  #define ALLOC_WMARK_LOW		WMARK_LOW
  #define ALLOC_WMARK_HIGH	WMARK_HIGH
  #define ALLOC_NO_WATERMARKS	0x04 /* don't check watermarks at all */
  
  /* Mask to get the watermark bits */
  #define ALLOC_WMARK_MASK	(ALLOC_NO_WATERMARKS-1)
  
  #define ALLOC_HARDER		0x10 /* try to alloc harder */
  #define ALLOC_HIGH		0x20 /* __GFP_HIGH set */
  #define ALLOC_CPUSET		0x40 /* check for correct cpuset */
  #define ALLOC_CMA		0x80 /* allow allocations from CMA areas */
72b252aed   Mel Gorman   mm: send one IPI ...
441
442
443
444
445
  enum ttu_flags;
  struct tlbflush_unmap_batch;
  
  #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
  void try_to_unmap_flush(void);
d950c9477   Mel Gorman   mm: defer flush o...
446
  void try_to_unmap_flush_dirty(void);
72b252aed   Mel Gorman   mm: send one IPI ...
447
448
449
450
  #else
  static inline void try_to_unmap_flush(void)
  {
  }
d950c9477   Mel Gorman   mm: defer flush o...
451
452
453
  static inline void try_to_unmap_flush_dirty(void)
  {
  }
72b252aed   Mel Gorman   mm: send one IPI ...
454
455
  
  #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
edf14cdbf   Vlastimil Babka   mm, printk: intro...
456
457
458
459
  
  extern const struct trace_print_flags pageflag_names[];
  extern const struct trace_print_flags vmaflag_names[];
  extern const struct trace_print_flags gfpflag_names[];
db9714188   Michel Lespinasse   mm: adjust final ...
460
  #endif	/* __MM_INTERNAL_H */