Blame view

include/linux/page-flags.h 23.4 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  /* SPDX-License-Identifier: GPL-2.0 */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2
3
4
5
6
7
  /*
   * Macros for manipulating and testing page->flags
   */
  
  #ifndef PAGE_FLAGS_H
  #define PAGE_FLAGS_H
f886ed443   Andrew Morton   [PATCH] PG_uncach...
8
  #include <linux/types.h>
187f1882b   Paul Gortmaker   BUG: headers with...
9
  #include <linux/bug.h>
072bb0aa5   Mel Gorman   mm: sl[au]b: add ...
10
  #include <linux/mmdebug.h>
9223b4190   Christoph Lameter   pageflags: get ri...
11
  #ifndef __GENERATING_BOUNDS_H
6d7779538   Christoph Lameter   mm: optimize comp...
12
  #include <linux/mm_types.h>
01fc0ac19   Sam Ravnborg   kbuild: move boun...
13
  #include <generated/bounds.h>
9223b4190   Christoph Lameter   pageflags: get ri...
14
  #endif /* !__GENERATING_BOUNDS_H */
f886ed443   Andrew Morton   [PATCH] PG_uncach...
15

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
16
17
18
19
20
21
  /*
   * Various page->flags bits:
   *
   * PG_reserved is set for special pages, which can never be swapped out. Some
   * of them might not even exist (eg empty_bad_page)...
   *
da6052f7b   Nick Piggin   [PATCH] update so...
22
23
24
   * The PG_private bitflag is set on pagecache pages if they contain filesystem
   * specific data (which is normally at page->private). It can be used by
   * private allocations for its own usage.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
25
   *
da6052f7b   Nick Piggin   [PATCH] update so...
26
27
28
29
30
31
32
33
34
   * During initiation of disk I/O, PG_locked is set. This bit is set before I/O
   * and cleared when writeback _starts_ or when read _completes_. PG_writeback
   * is set before writeback starts and cleared when it finishes.
   *
   * PG_locked also pins a page in pagecache, and blocks truncation of the file
   * while it is held.
   *
   * page_waitqueue(page) is a wait queue of all tasks waiting for the page
   * to become unlocked.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
35
36
37
38
   *
   * PG_uptodate tells whether the page's contents is valid.  When a read
   * completes, the page becomes uptodate, unless a disk I/O error happened.
   *
da6052f7b   Nick Piggin   [PATCH] update so...
39
40
   * PG_referenced, PG_reclaim are used for page reclaim for anonymous and
   * file-backed pagecache (see mm/vmscan.c).
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
41
42
43
44
45
46
47
48
49
50
51
   *
   * PG_error is set to indicate that an I/O error occurred on this page.
   *
   * PG_arch_1 is an architecture specific page state bit.  The generic code
   * guarantees that this bit is cleared for a page when it first is entered into
   * the page cache.
   *
   * PG_highmem pages are not permanently mapped into the kernel virtual address
   * space, they need to be kmapped separately for doing IO on the pages.  The
   * struct page (these bits with information) are always mapped into kernel
   * address space...
da6052f7b   Nick Piggin   [PATCH] update so...
52
   *
d466f2fcb   Andi Kleen   HWPOISON: Add pag...
53
54
55
   * PG_hwpoison indicates that a page got corrupted in hardware and contains
   * data with incorrect ECC bits that triggered a machine check. Accessing is
   * not safe since it may cause another machine check. Don't touch!
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
56
57
58
59
   */
  
  /*
   * Don't use the *_dontuse flags.  Use the macros.  Otherwise you'll break
91fc8ab3c   Andy Whitcroft   [PATCH] page flag...
60
61
62
63
64
65
66
   * locked- and dirty-page accounting.
   *
   * The page flags field is split into two parts, the main flags area
   * which extends from the low bits upwards, and the fields area which
   * extends from the high bits downwards.
   *
   *  | FIELD | ... | FLAGS |
9223b4190   Christoph Lameter   pageflags: get ri...
67
68
   *  N-1           ^       0
   *               (NR_PAGEFLAGS)
91fc8ab3c   Andy Whitcroft   [PATCH] page flag...
69
   *
9223b4190   Christoph Lameter   pageflags: get ri...
70
71
72
   * The fields area is reserved for fields mapping zone, node (for NUMA) and
   * SPARSEMEM section (for variants of SPARSEMEM that require section ids like
   * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP).
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
73
   */
e26831814   Christoph Lameter   pageflags: use an...
74
75
76
77
78
79
80
81
  enum pageflags {
  	PG_locked,		/* Page is locked. Don't touch. */
  	PG_error,
  	PG_referenced,
  	PG_uptodate,
  	PG_dirty,
  	PG_lru,
  	PG_active,
b91e1302a   Linus Torvalds   mm: optimize Page...
82
  	PG_waiters,		/* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
e26831814   Christoph Lameter   pageflags: use an...
83
84
  	PG_slab,
  	PG_owner_priv_1,	/* Owner use. If pagecache, fs may use*/
e26831814   Christoph Lameter   pageflags: use an...
85
86
87
  	PG_arch_1,
  	PG_reserved,
  	PG_private,		/* If pagecache, has fs-private data */
266cf658e   David Howells   FS-Cache: Recruit...
88
  	PG_private_2,		/* If pagecache, has fs aux data */
e26831814   Christoph Lameter   pageflags: use an...
89
  	PG_writeback,		/* Page is under writeback */
e20b8cca7   Christoph Lameter   PAGEFLAGS_EXTENDE...
90
  	PG_head,		/* A head page */
e26831814   Christoph Lameter   pageflags: use an...
91
92
  	PG_mappedtodisk,	/* Has blocks allocated on-disk */
  	PG_reclaim,		/* To be reclaimed asap */
b2e185384   Rik van Riel   define page_file_...
93
  	PG_swapbacked,		/* Page is backed by RAM/swap */
894bc3104   Lee Schermerhorn   Unevictable LRU I...
94
  	PG_unevictable,		/* Page is "unevictable"  */
af8e3354b   Hugh Dickins   mm: CONFIG_MMU fo...
95
  #ifdef CONFIG_MMU
b291f0003   Nick Piggin   mlock: mlocked pa...
96
  	PG_mlocked,		/* Page is vma mlocked */
894bc3104   Lee Schermerhorn   Unevictable LRU I...
97
  #endif
46cf98cda   Venkatesh Pallipadi   x86, pat: General...
98
  #ifdef CONFIG_ARCH_USES_PG_UNCACHED
602c4d112   Christoph Lameter   page flags: handl...
99
  	PG_uncached,		/* Page has been mapped as uncached */
f886ed443   Andrew Morton   [PATCH] PG_uncach...
100
  #endif
d466f2fcb   Andi Kleen   HWPOISON: Add pag...
101
102
103
  #ifdef CONFIG_MEMORY_FAILURE
  	PG_hwpoison,		/* hardware poisoned page. Don't touch */
  #endif
33c3fc71c   Vladimir Davydov   mm: introduce idl...
104
105
106
107
  #if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
  	PG_young,
  	PG_idle,
  #endif
0cad47cf1   Andy Whitcroft   page-flags: recor...
108
109
110
111
  	__NR_PAGEFLAGS,
  
  	/* Filesystems */
  	PG_checked = PG_owner_priv_1,
6326fec11   Nicholas Piggin   mm: Use owner_pri...
112
113
  	/* SwapBacked */
  	PG_swapcache = PG_owner_priv_1,	/* Swap page: swp_entry_t in private */
266cf658e   David Howells   FS-Cache: Recruit...
114
115
116
117
118
  	/* Two page bits are conscripted by FS-Cache to maintain local caching
  	 * state.  These bits are set on pages belonging to the netfs's inodes
  	 * when those inodes are being locally cached.
  	 */
  	PG_fscache = PG_private_2,	/* page backed by cache */
0cad47cf1   Andy Whitcroft   page-flags: recor...
119
  	/* XEN */
d8ac3dd41   Jennifer Herbert   mm: add 'foreign'...
120
  	/* Pinned in Xen as a read-only pagetable page. */
0cad47cf1   Andy Whitcroft   page-flags: recor...
121
  	PG_pinned = PG_owner_priv_1,
d8ac3dd41   Jennifer Herbert   mm: add 'foreign'...
122
  	/* Pinned as part of domain save (see xen_mm_pin_all()). */
0cad47cf1   Andy Whitcroft   page-flags: recor...
123
  	PG_savepinned = PG_dirty,
d8ac3dd41   Jennifer Herbert   mm: add 'foreign'...
124
125
  	/* Has a grant mapping of another (foreign) domain's page. */
  	PG_foreign = PG_owner_priv_1,
8a38082d2   Andy Whitcroft   slub: record page...
126

9023cb7e8   Andy Whitcroft   slob: record page...
127
  	/* SLOB */
9023cb7e8   Andy Whitcroft   slob: record page...
128
  	PG_slob_free = PG_private,
53f9263ba   Kirill A. Shutemov   mm: rework mapcou...
129
130
131
  
  	/* Compound pages. Stored in first tail page's flags */
  	PG_double_map = PG_private_2,
bda807d44   Minchan Kim   mm: migrate: supp...
132
133
134
  
  	/* non-lru isolated movable page */
  	PG_isolated = PG_reclaim,
e26831814   Christoph Lameter   pageflags: use an...
135
  };
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
136

9223b4190   Christoph Lameter   pageflags: get ri...
137
  #ifndef __GENERATING_BOUNDS_H
0e6d31a73   Kirill A. Shutemov   page-flags: move ...
138
139
140
141
142
143
144
145
146
147
  struct page;	/* forward declaration */
  
  static inline struct page *compound_head(struct page *page)
  {
  	unsigned long head = READ_ONCE(page->compound_head);
  
  	if (unlikely(head & 1))
  		return (struct page *) (head - 1);
  	return page;
  }
4b0f32616   Denys Vlasenko   include/linux/pag...
148
  static __always_inline int PageTail(struct page *page)
0e6d31a73   Kirill A. Shutemov   page-flags: move ...
149
150
151
  {
  	return READ_ONCE(page->compound_head) & 1;
  }
4b0f32616   Denys Vlasenko   include/linux/pag...
152
  static __always_inline int PageCompound(struct page *page)
0e6d31a73   Kirill A. Shutemov   page-flags: move ...
153
154
155
  {
  	return test_bit(PG_head, &page->flags) || PageTail(page);
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
156
  /*
95ad97554   Kirill A. Shutemov   page-flags: intro...
157
158
159
160
161
162
163
164
165
   * Page flags policies wrt compound pages
   *
   * PF_ANY:
   *     the page flag is relevant for small, head and tail pages.
   *
   * PF_HEAD:
   *     for compound page all operations related to the page flag applied to
   *     head page.
   *
629060270   Nicholas Piggin   mm: add PageWaite...
166
167
168
   * PF_ONLY_HEAD:
   *     for compound page, callers only ever operate on the head page.
   *
95ad97554   Kirill A. Shutemov   page-flags: intro...
169
170
171
172
173
174
175
176
177
   * PF_NO_TAIL:
   *     modifications of the page flag must be done on small or head pages,
   *     checks can be done on tail pages too.
   *
   * PF_NO_COMPOUND:
   *     the page flag is not relevant for compound pages.
   */
  #define PF_ANY(page, enforce)	page
  #define PF_HEAD(page, enforce)	compound_head(page)
629060270   Nicholas Piggin   mm: add PageWaite...
178
179
180
  #define PF_ONLY_HEAD(page, enforce) ({					\
  		VM_BUG_ON_PGFLAGS(PageTail(page), page);		\
  		page;})
95ad97554   Kirill A. Shutemov   page-flags: intro...
181
182
183
  #define PF_NO_TAIL(page, enforce) ({					\
  		VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page);	\
  		compound_head(page);})
822cdd115   Kirill A. Shutemov   page-flags: look ...
184
  #define PF_NO_COMPOUND(page, enforce) ({				\
95ad97554   Kirill A. Shutemov   page-flags: intro...
185
186
187
188
  		VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page);	\
  		page;})
  
  /*
f94a62e91   Christoph Lameter   pageflags: introd...
189
190
   * Macros to create function definitions for page flags
   */
95ad97554   Kirill A. Shutemov   page-flags: intro...
191
  #define TESTPAGEFLAG(uname, lname, policy)				\
4b0f32616   Denys Vlasenko   include/linux/pag...
192
  static __always_inline int Page##uname(struct page *page)		\
95ad97554   Kirill A. Shutemov   page-flags: intro...
193
  	{ return test_bit(PG_##lname, &policy(page, 0)->flags); }
f94a62e91   Christoph Lameter   pageflags: introd...
194

95ad97554   Kirill A. Shutemov   page-flags: intro...
195
  #define SETPAGEFLAG(uname, lname, policy)				\
4b0f32616   Denys Vlasenko   include/linux/pag...
196
  static __always_inline void SetPage##uname(struct page *page)		\
95ad97554   Kirill A. Shutemov   page-flags: intro...
197
  	{ set_bit(PG_##lname, &policy(page, 1)->flags); }
f94a62e91   Christoph Lameter   pageflags: introd...
198

95ad97554   Kirill A. Shutemov   page-flags: intro...
199
  #define CLEARPAGEFLAG(uname, lname, policy)				\
4b0f32616   Denys Vlasenko   include/linux/pag...
200
  static __always_inline void ClearPage##uname(struct page *page)		\
95ad97554   Kirill A. Shutemov   page-flags: intro...
201
  	{ clear_bit(PG_##lname, &policy(page, 1)->flags); }
f94a62e91   Christoph Lameter   pageflags: introd...
202

95ad97554   Kirill A. Shutemov   page-flags: intro...
203
  #define __SETPAGEFLAG(uname, lname, policy)				\
4b0f32616   Denys Vlasenko   include/linux/pag...
204
  static __always_inline void __SetPage##uname(struct page *page)		\
95ad97554   Kirill A. Shutemov   page-flags: intro...
205
  	{ __set_bit(PG_##lname, &policy(page, 1)->flags); }
f94a62e91   Christoph Lameter   pageflags: introd...
206

95ad97554   Kirill A. Shutemov   page-flags: intro...
207
  #define __CLEARPAGEFLAG(uname, lname, policy)				\
4b0f32616   Denys Vlasenko   include/linux/pag...
208
  static __always_inline void __ClearPage##uname(struct page *page)	\
95ad97554   Kirill A. Shutemov   page-flags: intro...
209
  	{ __clear_bit(PG_##lname, &policy(page, 1)->flags); }
f94a62e91   Christoph Lameter   pageflags: introd...
210

95ad97554   Kirill A. Shutemov   page-flags: intro...
211
  #define TESTSETFLAG(uname, lname, policy)				\
4b0f32616   Denys Vlasenko   include/linux/pag...
212
  static __always_inline int TestSetPage##uname(struct page *page)	\
95ad97554   Kirill A. Shutemov   page-flags: intro...
213
  	{ return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
f94a62e91   Christoph Lameter   pageflags: introd...
214

95ad97554   Kirill A. Shutemov   page-flags: intro...
215
  #define TESTCLEARFLAG(uname, lname, policy)				\
4b0f32616   Denys Vlasenko   include/linux/pag...
216
  static __always_inline int TestClearPage##uname(struct page *page)	\
95ad97554   Kirill A. Shutemov   page-flags: intro...
217
  	{ return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
f94a62e91   Christoph Lameter   pageflags: introd...
218

95ad97554   Kirill A. Shutemov   page-flags: intro...
219
220
221
222
  #define PAGEFLAG(uname, lname, policy)					\
  	TESTPAGEFLAG(uname, lname, policy)				\
  	SETPAGEFLAG(uname, lname, policy)				\
  	CLEARPAGEFLAG(uname, lname, policy)
f94a62e91   Christoph Lameter   pageflags: introd...
223

95ad97554   Kirill A. Shutemov   page-flags: intro...
224
225
226
227
  #define __PAGEFLAG(uname, lname, policy)				\
  	TESTPAGEFLAG(uname, lname, policy)				\
  	__SETPAGEFLAG(uname, lname, policy)				\
  	__CLEARPAGEFLAG(uname, lname, policy)
f94a62e91   Christoph Lameter   pageflags: introd...
228

95ad97554   Kirill A. Shutemov   page-flags: intro...
229
230
231
  #define TESTSCFLAG(uname, lname, policy)				\
  	TESTSETFLAG(uname, lname, policy)				\
  	TESTCLEARFLAG(uname, lname, policy)
f94a62e91   Christoph Lameter   pageflags: introd...
232

2f3e442cc   Johannes Weiner   mm: page-flags: c...
233
234
  #define TESTPAGEFLAG_FALSE(uname)					\
  static inline int Page##uname(const struct page *page) { return 0; }
8a7a8544a   Lee Schermerhorn   pageflag helpers ...
235
236
237
238
239
240
241
242
  #define SETPAGEFLAG_NOOP(uname)						\
  static inline void SetPage##uname(struct page *page) {  }
  
  #define CLEARPAGEFLAG_NOOP(uname)					\
  static inline void ClearPage##uname(struct page *page) {  }
  
  #define __CLEARPAGEFLAG_NOOP(uname)					\
  static inline void __ClearPage##uname(struct page *page) {  }
2f3e442cc   Johannes Weiner   mm: page-flags: c...
243
244
  #define TESTSETFLAG_FALSE(uname)					\
  static inline int TestSetPage##uname(struct page *page) { return 0; }
8a7a8544a   Lee Schermerhorn   pageflag helpers ...
245
246
  #define TESTCLEARFLAG_FALSE(uname)					\
  static inline int TestClearPage##uname(struct page *page) { return 0; }
2f3e442cc   Johannes Weiner   mm: page-flags: c...
247
248
249
250
251
  #define PAGEFLAG_FALSE(uname) TESTPAGEFLAG_FALSE(uname)			\
  	SETPAGEFLAG_NOOP(uname) CLEARPAGEFLAG_NOOP(uname)
  
  #define TESTSCFLAG_FALSE(uname)						\
  	TESTSETFLAG_FALSE(uname) TESTCLEARFLAG_FALSE(uname)
48c935ad8   Kirill A. Shutemov   page-flags: defin...
252
  __PAGEFLAG(Locked, locked, PF_NO_TAIL)
629060270   Nicholas Piggin   mm: add PageWaite...
253
  PAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) __CLEARPAGEFLAG(Waiters, waiters, PF_ONLY_HEAD)
df8c94d13   Kirill A. Shutemov   page-flags: defin...
254
  PAGEFLAG(Error, error, PF_NO_COMPOUND) TESTCLEARFLAG(Error, error, PF_NO_COMPOUND)
8cb38fabb   Kirill A. Shutemov   page-flags: defin...
255
256
257
  PAGEFLAG(Referenced, referenced, PF_HEAD)
  	TESTCLEARFLAG(Referenced, referenced, PF_HEAD)
  	__SETPAGEFLAG(Referenced, referenced, PF_HEAD)
df8c94d13   Kirill A. Shutemov   page-flags: defin...
258
259
  PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD)
  	__CLEARPAGEFLAG(Dirty, dirty, PF_HEAD)
8cb38fabb   Kirill A. Shutemov   page-flags: defin...
260
261
262
  PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD)
  PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD)
  	TESTCLEARFLAG(Active, active, PF_HEAD)
dcb351cd0   Kirill A. Shutemov   page-flags: defin...
263
264
  __PAGEFLAG(Slab, slab, PF_NO_TAIL)
  __PAGEFLAG(SlobFree, slob_free, PF_NO_TAIL)
df8c94d13   Kirill A. Shutemov   page-flags: defin...
265
  PAGEFLAG(Checked, checked, PF_NO_COMPOUND)	   /* Used by some filesystems */
c13985fa8   Kirill A. Shutemov   page-flags: defin...
266
267
268
269
270
271
  
  /* Xen */
  PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND)
  	TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND)
  PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND);
  PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND);
de09d31dd   Kirill A. Shutemov   page-flags: defin...
272
273
  PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
  	__CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
da5efc408   Kirill A. Shutemov   page-flags: defin...
274
275
276
  PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
  	__CLEARPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
  	__SETPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
95ad97554   Kirill A. Shutemov   page-flags: intro...
277

6a1e7f777   Christoph Lameter   pageflags: conver...
278
  /*
266cf658e   David Howells   FS-Cache: Recruit...
279
280
281
282
   * Private page markings that may be used by the filesystem that owns the page
   * for its own purposes.
   * - PG_private and PG_private_2 cause releasepage() and co to be invoked
   */
95ad97554   Kirill A. Shutemov   page-flags: intro...
283
284
285
286
287
  PAGEFLAG(Private, private, PF_ANY) __SETPAGEFLAG(Private, private, PF_ANY)
  	__CLEARPAGEFLAG(Private, private, PF_ANY)
  PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY)
  PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
  	TESTCLEARFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
266cf658e   David Howells   FS-Cache: Recruit...
288
289
  
  /*
6a1e7f777   Christoph Lameter   pageflags: conver...
290
291
292
   * Only test-and-set exist for PG_writeback.  The unconditional operators are
   * risky: they bypass page accounting.
   */
225311a46   Huang Ying   mm: test code to ...
293
294
  TESTPAGEFLAG(Writeback, writeback, PF_NO_TAIL)
  	TESTSCFLAG(Writeback, writeback, PF_NO_TAIL)
e2f0a0db9   Kirill A. Shutemov   page-flags: relax...
295
  PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL)
6a1e7f777   Christoph Lameter   pageflags: conver...
296

579f82901   Shaohua Li   swap: add a simpl...
297
  /* PG_readahead is only used for reads; PG_reclaim is only for writes */
e2f0a0db9   Kirill A. Shutemov   page-flags: relax...
298
299
  PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL)
  	TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL)
df8c94d13   Kirill A. Shutemov   page-flags: defin...
300
301
  PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND)
  	TESTCLEARFLAG(Readahead, reclaim, PF_NO_COMPOUND)
6a1e7f777   Christoph Lameter   pageflags: conver...
302
303
  
  #ifdef CONFIG_HIGHMEM
f94a62e91   Christoph Lameter   pageflags: introd...
304
  /*
6a1e7f777   Christoph Lameter   pageflags: conver...
305
306
   * Must use a macro here due to header dependency issues. page_zone() is not
   * available at this point.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
307
   */
3ca65c19d   Vineet Gupta   mm: optimize Page...
308
  #define PageHighMem(__p) is_highmem_idx(page_zonenum(__p))
6a1e7f777   Christoph Lameter   pageflags: conver...
309
  #else
ec7cade8c   Christoph Lameter   page flags: add P...
310
  PAGEFLAG_FALSE(HighMem)
6a1e7f777   Christoph Lameter   pageflags: conver...
311
312
313
  #endif
  
  #ifdef CONFIG_SWAP
6326fec11   Nicholas Piggin   mm: Use owner_pri...
314
315
  static __always_inline int PageSwapCache(struct page *page)
  {
38d8b4e6b   Huang Ying   mm, THP, swap: de...
316
317
318
  #ifdef CONFIG_THP_SWAP
  	page = compound_head(page);
  #endif
6326fec11   Nicholas Piggin   mm: Use owner_pri...
319
320
321
  	return PageSwapBacked(page) && test_bit(PG_swapcache, &page->flags);
  
  }
38d8b4e6b   Huang Ying   mm, THP, swap: de...
322
323
  SETPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
  CLEARPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
6a1e7f777   Christoph Lameter   pageflags: conver...
324
  #else
ec7cade8c   Christoph Lameter   page flags: add P...
325
  PAGEFLAG_FALSE(SwapCache)
6a1e7f777   Christoph Lameter   pageflags: conver...
326
  #endif
8cb38fabb   Kirill A. Shutemov   page-flags: defin...
327
328
329
  PAGEFLAG(Unevictable, unevictable, PF_HEAD)
  	__CLEARPAGEFLAG(Unevictable, unevictable, PF_HEAD)
  	TESTCLEARFLAG(Unevictable, unevictable, PF_HEAD)
b291f0003   Nick Piggin   mlock: mlocked pa...
330

af8e3354b   Hugh Dickins   mm: CONFIG_MMU fo...
331
  #ifdef CONFIG_MMU
e4f87d5d7   Kirill A. Shutemov   page-flags: defin...
332
333
334
  PAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
  	__CLEARPAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
  	TESTSCFLAG(Mlocked, mlocked, PF_NO_TAIL)
894bc3104   Lee Schermerhorn   Unevictable LRU I...
335
  #else
2f3e442cc   Johannes Weiner   mm: page-flags: c...
336
  PAGEFLAG_FALSE(Mlocked) __CLEARPAGEFLAG_NOOP(Mlocked)
685eaade5   Kirill A. Shutemov   page-flags: drop ...
337
  	TESTSCFLAG_FALSE(Mlocked)
894bc3104   Lee Schermerhorn   Unevictable LRU I...
338
  #endif
46cf98cda   Venkatesh Pallipadi   x86, pat: General...
339
  #ifdef CONFIG_ARCH_USES_PG_UNCACHED
b9d418170   Kirill A. Shutemov   page-flags: defin...
340
  PAGEFLAG(Uncached, uncached, PF_NO_COMPOUND)
602c4d112   Christoph Lameter   page flags: handl...
341
  #else
ec7cade8c   Christoph Lameter   page flags: add P...
342
  PAGEFLAG_FALSE(Uncached)
6a1e7f777   Christoph Lameter   pageflags: conver...
343
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
344

d466f2fcb   Andi Kleen   HWPOISON: Add pag...
345
  #ifdef CONFIG_MEMORY_FAILURE
95ad97554   Kirill A. Shutemov   page-flags: intro...
346
347
  PAGEFLAG(HWPoison, hwpoison, PF_ANY)
  TESTSCFLAG(HWPoison, hwpoison, PF_ANY)
d466f2fcb   Andi Kleen   HWPOISON: Add pag...
348
349
350
351
352
  #define __PG_HWPOISON (1UL << PG_hwpoison)
  #else
  PAGEFLAG_FALSE(HWPoison)
  #define __PG_HWPOISON 0
  #endif
33c3fc71c   Vladimir Davydov   mm: introduce idl...
353
  #if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
95ad97554   Kirill A. Shutemov   page-flags: intro...
354
355
356
357
  TESTPAGEFLAG(Young, young, PF_ANY)
  SETPAGEFLAG(Young, young, PF_ANY)
  TESTCLEARFLAG(Young, young, PF_ANY)
  PAGEFLAG(Idle, idle, PF_ANY)
33c3fc71c   Vladimir Davydov   mm: introduce idl...
358
  #endif
e8c6158fe   Kirill A. Shutemov   mm: consolidate a...
359
360
361
362
363
364
  /*
   * On an anonymous page mapped into a user virtual memory area,
   * page->mapping points to its anon_vma, not to a struct address_space;
   * with the PAGE_MAPPING_ANON bit set to distinguish it.  See rmap.h.
   *
   * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
bda807d44   Minchan Kim   mm: migrate: supp...
365
366
   * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON
   * bit; and then page->mapping points, not to an anon_vma, but to a private
e8c6158fe   Kirill A. Shutemov   mm: consolidate a...
367
368
   * structure which KSM associates with that merged page.  See ksm.h.
   *
bda807d44   Minchan Kim   mm: migrate: supp...
369
370
   * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable
   * page and then page->mapping points a struct address_space.
e8c6158fe   Kirill A. Shutemov   mm: consolidate a...
371
372
373
374
375
   *
   * Please note that, confusingly, "page_mapping" refers to the inode
   * address_space which maps the page from disk; whereas "page_mapped"
   * refers to user virtual address space into which the page is mapped.
   */
bda807d44   Minchan Kim   mm: migrate: supp...
376
377
378
379
  #define PAGE_MAPPING_ANON	0x1
  #define PAGE_MAPPING_MOVABLE	0x2
  #define PAGE_MAPPING_KSM	(PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
  #define PAGE_MAPPING_FLAGS	(PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
e8c6158fe   Kirill A. Shutemov   mm: consolidate a...
380

bda807d44   Minchan Kim   mm: migrate: supp...
381
  static __always_inline int PageMappingFlags(struct page *page)
175145748   Mel Gorman   mm, page_alloc: u...
382
  {
bda807d44   Minchan Kim   mm: migrate: supp...
383
  	return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0;
175145748   Mel Gorman   mm, page_alloc: u...
384
  }
4b0f32616   Denys Vlasenko   include/linux/pag...
385
  static __always_inline int PageAnon(struct page *page)
e8c6158fe   Kirill A. Shutemov   mm: consolidate a...
386
  {
822cdd115   Kirill A. Shutemov   page-flags: look ...
387
  	page = compound_head(page);
bda807d44   Minchan Kim   mm: migrate: supp...
388
389
390
391
392
393
394
  	return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
  }
  
  static __always_inline int __PageMovable(struct page *page)
  {
  	return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
  				PAGE_MAPPING_MOVABLE;
e8c6158fe   Kirill A. Shutemov   mm: consolidate a...
395
396
397
398
399
400
401
402
403
  }
  
  #ifdef CONFIG_KSM
  /*
   * A KSM page is one of those write-protected "shared pages" or "merged pages"
   * which KSM maps into multiple mms, wherever identical anonymous page content
   * is found in VM_MERGEABLE vmas.  It's a PageAnon page, pointing not to any
   * anon_vma, but to that page's node of the stable tree.
   */
4b0f32616   Denys Vlasenko   include/linux/pag...
404
  static __always_inline int PageKsm(struct page *page)
e8c6158fe   Kirill A. Shutemov   mm: consolidate a...
405
  {
822cdd115   Kirill A. Shutemov   page-flags: look ...
406
  	page = compound_head(page);
e8c6158fe   Kirill A. Shutemov   mm: consolidate a...
407
  	return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
bda807d44   Minchan Kim   mm: migrate: supp...
408
  				PAGE_MAPPING_KSM;
e8c6158fe   Kirill A. Shutemov   mm: consolidate a...
409
410
411
412
  }
  #else
  TESTPAGEFLAG_FALSE(Ksm)
  #endif
1a9b5b7fe   Wu Fengguang   mm: export stable...
413
  u64 stable_page_flags(struct page *page);
0ed361dec   Nick Piggin   mm: fix PageUptod...
414
415
  static inline int PageUptodate(struct page *page)
  {
d2998c4de   Kirill A. Shutemov   page-flags: defin...
416
417
418
  	int ret;
  	page = compound_head(page);
  	ret = test_bit(PG_uptodate, &(page)->flags);
0ed361dec   Nick Piggin   mm: fix PageUptod...
419
420
421
422
423
424
425
426
427
428
429
430
431
  	/*
  	 * Must ensure that the data we read out of the page is loaded
  	 * _after_ we've loaded page->flags to check for PageUptodate.
  	 * We can skip the barrier if the page is not uptodate, because
  	 * we wouldn't be reading anything from it.
  	 *
  	 * See SetPageUptodate() for the other side of the story.
  	 */
  	if (ret)
  		smp_rmb();
  
  	return ret;
  }
4b0f32616   Denys Vlasenko   include/linux/pag...
432
  static __always_inline void __SetPageUptodate(struct page *page)
0ed361dec   Nick Piggin   mm: fix PageUptod...
433
  {
d2998c4de   Kirill A. Shutemov   page-flags: defin...
434
  	VM_BUG_ON_PAGE(PageTail(page), page);
0ed361dec   Nick Piggin   mm: fix PageUptod...
435
  	smp_wmb();
df8c94d13   Kirill A. Shutemov   page-flags: defin...
436
  	__set_bit(PG_uptodate, &page->flags);
0ed361dec   Nick Piggin   mm: fix PageUptod...
437
  }
4b0f32616   Denys Vlasenko   include/linux/pag...
438
  static __always_inline void SetPageUptodate(struct page *page)
2dcea57ae   Heiko Carstens   [PATCH] convert s...
439
  {
d2998c4de   Kirill A. Shutemov   page-flags: defin...
440
  	VM_BUG_ON_PAGE(PageTail(page), page);
0ed361dec   Nick Piggin   mm: fix PageUptod...
441
442
443
444
  	/*
  	 * Memory barrier must be issued before setting the PG_uptodate bit,
  	 * so that all previous stores issued in order to bring the page
  	 * uptodate are actually visible before PageUptodate becomes true.
0ed361dec   Nick Piggin   mm: fix PageUptod...
445
446
  	 */
  	smp_wmb();
df8c94d13   Kirill A. Shutemov   page-flags: defin...
447
  	set_bit(PG_uptodate, &page->flags);
0ed361dec   Nick Piggin   mm: fix PageUptod...
448
  }
d2998c4de   Kirill A. Shutemov   page-flags: defin...
449
  CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
450

6a1e7f777   Christoph Lameter   pageflags: conver...
451
  int test_clear_page_writeback(struct page *page);
1c8349a17   Namjae Jeon   ext4: fix data in...
452
453
454
455
456
457
  int __test_set_page_writeback(struct page *page, bool keep_write);
  
  #define test_set_page_writeback(page)			\
  	__test_set_page_writeback(page, false)
  #define test_set_page_writeback_keepwrite(page)	\
  	__test_set_page_writeback(page, true)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
458

6a1e7f777   Christoph Lameter   pageflags: conver...
459
460
461
462
  static inline void set_page_writeback(struct page *page)
  {
  	test_set_page_writeback(page);
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
463

1c8349a17   Namjae Jeon   ext4: fix data in...
464
465
466
467
  static inline void set_page_writeback_keepwrite(struct page *page)
  {
  	test_set_page_writeback_keepwrite(page);
  }
95ad97554   Kirill A. Shutemov   page-flags: intro...
468
  __PAGEFLAG(Head, head, PF_ANY) CLEARPAGEFLAG(Head, head, PF_ANY)
e20b8cca7   Christoph Lameter   PAGEFLAGS_EXTENDE...
469

4b0f32616   Denys Vlasenko   include/linux/pag...
470
  static __always_inline void set_compound_head(struct page *page, struct page *head)
ad4b3fb7f   Christoffer Dall   mm: Fix PageHead ...
471
  {
1d798ca3f   Kirill A. Shutemov   mm: make compound...
472
  	WRITE_ONCE(page->compound_head, (unsigned long)head + 1);
ad4b3fb7f   Christoffer Dall   mm: Fix PageHead ...
473
  }
4b0f32616   Denys Vlasenko   include/linux/pag...
474
  static __always_inline void clear_compound_head(struct page *page)
6a1e7f777   Christoph Lameter   pageflags: conver...
475
  {
1d798ca3f   Kirill A. Shutemov   mm: make compound...
476
  	WRITE_ONCE(page->compound_head, 0);
6a1e7f777   Christoph Lameter   pageflags: conver...
477
  }
6d7779538   Christoph Lameter   mm: optimize comp...
478

4e6af67e9   Andrea Arcangeli   thp: clear page c...
479
480
481
  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  static inline void ClearPageCompound(struct page *page)
  {
1d798ca3f   Kirill A. Shutemov   mm: make compound...
482
483
  	BUG_ON(!PageHead(page));
  	ClearPageHead(page);
4e6af67e9   Andrea Arcangeli   thp: clear page c...
484
485
  }
  #endif
d2a1a1f0a   Yu Zhao   mm: use unsigned ...
486
  #define PG_head_mask ((1UL << PG_head))
dfa7e20cc   Russ Anderson   mm: Minor clean-u...
487

e8c6158fe   Kirill A. Shutemov   mm: consolidate a...
488
489
490
  #ifdef CONFIG_HUGETLB_PAGE
  int PageHuge(struct page *page);
  int PageHeadHuge(struct page *page);
7e1f049ef   Naoya Horiguchi   mm: hugetlb: clea...
491
  bool page_huge_active(struct page *page);
e8c6158fe   Kirill A. Shutemov   mm: consolidate a...
492
493
494
  #else
  TESTPAGEFLAG_FALSE(Huge)
  TESTPAGEFLAG_FALSE(HeadHuge)
7e1f049ef   Naoya Horiguchi   mm: hugetlb: clea...
495
496
497
498
499
  
  static inline bool page_huge_active(struct page *page)
  {
  	return 0;
  }
e8c6158fe   Kirill A. Shutemov   mm: consolidate a...
500
  #endif
7e1f049ef   Naoya Horiguchi   mm: hugetlb: clea...
501

936a5fe6e   Andrea Arcangeli   thp: kvm mmu tran...
502
  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
71e3aac07   Andrea Arcangeli   thp: transparent ...
503
504
505
506
507
508
509
510
511
512
  /*
   * PageHuge() only returns true for hugetlbfs pages, but not for
   * normal or transparent huge pages.
   *
   * PageTransHuge() returns true for both transparent huge and
   * hugetlbfs pages, but not normal pages. PageTransHuge() can only be
   * called only in the core VM paths where hugetlbfs pages can't exist.
   */
  static inline int PageTransHuge(struct page *page)
  {
309381fea   Sasha Levin   mm: dump page whe...
513
  	VM_BUG_ON_PAGE(PageTail(page), page);
71e3aac07   Andrea Arcangeli   thp: transparent ...
514
515
  	return PageHead(page);
  }
385de3572   Dean Nelson   thp: allow a hwpo...
516
517
518
519
520
  /*
   * PageTransCompound returns true for both transparent huge pages
   * and hugetlbfs pages, so it should only be called when it's known
   * that hugetlbfs pages aren't involved.
   */
936a5fe6e   Andrea Arcangeli   thp: kvm mmu tran...
521
522
523
524
  static inline int PageTransCompound(struct page *page)
  {
  	return PageCompound(page);
  }
71e3aac07   Andrea Arcangeli   thp: transparent ...
525

385de3572   Dean Nelson   thp: allow a hwpo...
526
  /*
127393fbe   Andrea Arcangeli   mm: thp: kvm: fix...
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
   * PageTransCompoundMap is the same as PageTransCompound, but it also
   * guarantees the primary MMU has the entire compound page mapped
   * through pmd_trans_huge, which in turn guarantees the secondary MMUs
   * can also map the entire compound page. This allows the secondary
   * MMUs to call get_user_pages() only once for each compound page and
   * to immediately map the entire compound page with a single secondary
   * MMU fault. If there will be a pmd split later, the secondary MMUs
   * will get an update through the MMU notifier invalidation through
   * split_huge_pmd().
   *
   * Unlike PageTransCompound, this is safe to be called only while
   * split_huge_pmd() cannot run from under us, like if protected by the
   * MMU notifier, otherwise it may result in page->_mapcount < 0 false
   * positives.
   */
  static inline int PageTransCompoundMap(struct page *page)
  {
  	return PageTransCompound(page) && atomic_read(&page->_mapcount) < 0;
  }
  
  /*
385de3572   Dean Nelson   thp: allow a hwpo...
548
549
550
551
552
553
554
555
   * PageTransTail returns true for both transparent huge pages
   * and hugetlbfs pages, so it should only be called when it's known
   * that hugetlbfs pages aren't involved.
   */
  static inline int PageTransTail(struct page *page)
  {
  	return PageTail(page);
  }
53f9263ba   Kirill A. Shutemov   mm: rework mapcou...
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
  /*
   * PageDoubleMap indicates that the compound page is mapped with PTEs as well
   * as PMDs.
   *
   * This is required for optimization of rmap operations for THP: we can postpone
   * per small page mapcount accounting (and its overhead from atomic operations)
   * until the first PMD split.
   *
   * For the page PageDoubleMap means ->_mapcount in all sub-pages is offset up
   * by one. This reference will go away with last compound_mapcount.
   *
   * See also __split_huge_pmd_locked() and page_remove_anon_compound_rmap().
   */
  static inline int PageDoubleMap(struct page *page)
  {
  	return PageHead(page) && test_bit(PG_double_map, &page[1].flags);
  }
9a73f61bd   Kirill A. Shutemov   thp, mlock: do no...
573
574
575
576
577
578
579
580
581
582
583
  static inline void SetPageDoubleMap(struct page *page)
  {
  	VM_BUG_ON_PAGE(!PageHead(page), page);
  	set_bit(PG_double_map, &page[1].flags);
  }
  
  static inline void ClearPageDoubleMap(struct page *page)
  {
  	VM_BUG_ON_PAGE(!PageHead(page), page);
  	clear_bit(PG_double_map, &page[1].flags);
  }
53f9263ba   Kirill A. Shutemov   mm: rework mapcou...
584
585
586
587
588
589
590
591
592
593
594
  static inline int TestSetPageDoubleMap(struct page *page)
  {
  	VM_BUG_ON_PAGE(!PageHead(page), page);
  	return test_and_set_bit(PG_double_map, &page[1].flags);
  }
  
  static inline int TestClearPageDoubleMap(struct page *page)
  {
  	VM_BUG_ON_PAGE(!PageHead(page), page);
  	return test_and_clear_bit(PG_double_map, &page[1].flags);
  }
936a5fe6e   Andrea Arcangeli   thp: kvm mmu tran...
595
  #else
d8c1bdeb5   Kirill A. Shutemov   page-flags: trivi...
596
597
  TESTPAGEFLAG_FALSE(TransHuge)
  TESTPAGEFLAG_FALSE(TransCompound)
127393fbe   Andrea Arcangeli   mm: thp: kvm: fix...
598
  TESTPAGEFLAG_FALSE(TransCompoundMap)
d8c1bdeb5   Kirill A. Shutemov   page-flags: trivi...
599
  TESTPAGEFLAG_FALSE(TransTail)
9a73f61bd   Kirill A. Shutemov   thp, mlock: do no...
600
  PAGEFLAG_FALSE(DoubleMap)
53f9263ba   Kirill A. Shutemov   mm: rework mapcou...
601
602
  	TESTSETFLAG_FALSE(DoubleMap)
  	TESTCLEARFLAG_FALSE(DoubleMap)
936a5fe6e   Andrea Arcangeli   thp: kvm mmu tran...
603
  #endif
072bb0aa5   Mel Gorman   mm: sl[au]b: add ...
604
  /*
632c0a1af   Vladimir Davydov   mm: clean up non-...
605
606
607
608
609
   * For pages that are never mapped to userspace, page->mapcount may be
   * used for storing extra information about page type. Any value used
   * for this purpose must be <= -2, but it's better start not too close
   * to -2 so that an underflow of the page_mapcount() won't be mistaken
   * for a special page.
e8c6158fe   Kirill A. Shutemov   mm: consolidate a...
610
   */
632c0a1af   Vladimir Davydov   mm: clean up non-...
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
  #define PAGE_MAPCOUNT_OPS(uname, lname)					\
  static __always_inline int Page##uname(struct page *page)		\
  {									\
  	return atomic_read(&page->_mapcount) ==				\
  				PAGE_##lname##_MAPCOUNT_VALUE;		\
  }									\
  static __always_inline void __SetPage##uname(struct page *page)		\
  {									\
  	VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);	\
  	atomic_set(&page->_mapcount, PAGE_##lname##_MAPCOUNT_VALUE);	\
  }									\
  static __always_inline void __ClearPage##uname(struct page *page)	\
  {									\
  	VM_BUG_ON_PAGE(!Page##uname(page), page);			\
  	atomic_set(&page->_mapcount, -1);				\
e8c6158fe   Kirill A. Shutemov   mm: consolidate a...
626
  }
632c0a1af   Vladimir Davydov   mm: clean up non-...
627
628
629
630
631
632
  /*
   * PageBuddy() indicate that the page is free and in the buddy system
   * (see mm/page_alloc.c).
   */
  #define PAGE_BUDDY_MAPCOUNT_VALUE		(-128)
  PAGE_MAPCOUNT_OPS(Buddy, BUDDY)
e8c6158fe   Kirill A. Shutemov   mm: consolidate a...
633

632c0a1af   Vladimir Davydov   mm: clean up non-...
634
635
636
637
638
639
  /*
   * PageBalloon() is set on pages that are on the balloon page list
   * (see mm/balloon_compaction.c).
   */
  #define PAGE_BALLOON_MAPCOUNT_VALUE		(-256)
  PAGE_MAPCOUNT_OPS(Balloon, BALLOON)
e8c6158fe   Kirill A. Shutemov   mm: consolidate a...
640

4949148ad   Vladimir Davydov   mm: charge/unchar...
641
642
643
644
645
646
  /*
   * If kmemcg is enabled, the buddy allocator will set PageKmemcg() on
   * pages allocated with __GFP_ACCOUNT. It gets cleared on page free.
   */
  #define PAGE_KMEMCG_MAPCOUNT_VALUE		(-512)
  PAGE_MAPCOUNT_OPS(Kmemcg, KMEMCG)
832fc1de0   Naoya Horiguchi   /proc/kpageflags:...
647
  extern bool is_free_buddy_page(struct page *page);
bda807d44   Minchan Kim   mm: migrate: supp...
648
  __PAGEFLAG(Isolated, isolated, PF_ANY);
e8c6158fe   Kirill A. Shutemov   mm: consolidate a...
649
  /*
072bb0aa5   Mel Gorman   mm: sl[au]b: add ...
650
651
652
653
654
   * If network-based swap is enabled, sl*b must keep track of whether pages
   * were allocated from pfmemalloc reserves.
   */
  static inline int PageSlabPfmemalloc(struct page *page)
  {
309381fea   Sasha Levin   mm: dump page whe...
655
  	VM_BUG_ON_PAGE(!PageSlab(page), page);
072bb0aa5   Mel Gorman   mm: sl[au]b: add ...
656
657
658
659
660
  	return PageActive(page);
  }
  
  static inline void SetPageSlabPfmemalloc(struct page *page)
  {
309381fea   Sasha Levin   mm: dump page whe...
661
  	VM_BUG_ON_PAGE(!PageSlab(page), page);
072bb0aa5   Mel Gorman   mm: sl[au]b: add ...
662
663
664
665
666
  	SetPageActive(page);
  }
  
  static inline void __ClearPageSlabPfmemalloc(struct page *page)
  {
309381fea   Sasha Levin   mm: dump page whe...
667
  	VM_BUG_ON_PAGE(!PageSlab(page), page);
072bb0aa5   Mel Gorman   mm: sl[au]b: add ...
668
669
670
671
672
  	__ClearPageActive(page);
  }
  
  static inline void ClearPageSlabPfmemalloc(struct page *page)
  {
309381fea   Sasha Levin   mm: dump page whe...
673
  	VM_BUG_ON_PAGE(!PageSlab(page), page);
072bb0aa5   Mel Gorman   mm: sl[au]b: add ...
674
675
  	ClearPageActive(page);
  }
af8e3354b   Hugh Dickins   mm: CONFIG_MMU fo...
676
  #ifdef CONFIG_MMU
d2a1a1f0a   Yu Zhao   mm: use unsigned ...
677
  #define __PG_MLOCKED		(1UL << PG_mlocked)
33925b25d   David Howells   nommu: there is n...
678
  #else
b291f0003   Nick Piggin   mlock: mlocked pa...
679
  #define __PG_MLOCKED		0
894bc3104   Lee Schermerhorn   Unevictable LRU I...
680
  #endif
dfa7e20cc   Russ Anderson   mm: Minor clean-u...
681
682
683
684
  /*
   * Flags checked when a page is freed.  Pages being freed should not have
   * these flags set.  It they are, there is a problem.
   */
6326fec11   Nicholas Piggin   mm: Use owner_pri...
685
686
687
688
689
690
  #define PAGE_FLAGS_CHECK_AT_FREE				\
  	(1UL << PG_lru		| 1UL << PG_locked	|	\
  	 1UL << PG_private	| 1UL << PG_private_2	|	\
  	 1UL << PG_writeback	| 1UL << PG_reserved	|	\
  	 1UL << PG_slab		| 1UL << PG_active 	|	\
  	 1UL << PG_unevictable	| __PG_MLOCKED)
dfa7e20cc   Russ Anderson   mm: Minor clean-u...
691
692
693
  
  /*
   * Flags checked when a page is prepped for return by the page allocator.
f4c18e6f7   Naoya Horiguchi   mm: check __PG_HW...
694
   * Pages being prepped should not have these flags set.  It they are set,
79f4b7bf3   Hugh Dickins   badpage: simplify...
695
   * there has been a kernel bug or struct page corruption.
f4c18e6f7   Naoya Horiguchi   mm: check __PG_HW...
696
697
698
   *
   * __PG_HWPOISON is exceptional because it needs to be kept beyond page's
   * alloc-free cycle to prevent from reusing the page.
dfa7e20cc   Russ Anderson   mm: Minor clean-u...
699
   */
f4c18e6f7   Naoya Horiguchi   mm: check __PG_HW...
700
  #define PAGE_FLAGS_CHECK_AT_PREP	\
d2a1a1f0a   Yu Zhao   mm: use unsigned ...
701
  	(((1UL << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON)
dfa7e20cc   Russ Anderson   mm: Minor clean-u...
702

edcf4748c   Johannes Weiner   mm: return boolea...
703
  #define PAGE_FLAGS_PRIVATE				\
d2a1a1f0a   Yu Zhao   mm: use unsigned ...
704
  	(1UL << PG_private | 1UL << PG_private_2)
266cf658e   David Howells   FS-Cache: Recruit...
705
706
707
708
709
710
711
  /**
   * page_has_private - Determine if page has private stuff
   * @page: The page to be checked
   *
   * Determine if a page has private stuff, indicating that release routines
   * should be invoked upon it.
   */
edcf4748c   Johannes Weiner   mm: return boolea...
712
713
714
715
  static inline int page_has_private(struct page *page)
  {
  	return !!(page->flags & PAGE_FLAGS_PRIVATE);
  }
95ad97554   Kirill A. Shutemov   page-flags: intro...
716
717
  #undef PF_ANY
  #undef PF_HEAD
629060270   Nicholas Piggin   mm: add PageWaite...
718
  #undef PF_ONLY_HEAD
95ad97554   Kirill A. Shutemov   page-flags: intro...
719
720
  #undef PF_NO_TAIL
  #undef PF_NO_COMPOUND
edcf4748c   Johannes Weiner   mm: return boolea...
721
  #endif /* !__GENERATING_BOUNDS_H */
266cf658e   David Howells   FS-Cache: Recruit...
722

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
723
  #endif	/* PAGE_FLAGS_H */