Blame view
include/linux/page-flags.h
14.3 KB
1da177e4c
|
1 2 3 4 5 6 |
/* * Macros for manipulating and testing page->flags */ #ifndef PAGE_FLAGS_H #define PAGE_FLAGS_H |
f886ed443
|
7 |
#include <linux/types.h> |
9223b4190
|
8 |
#ifndef __GENERATING_BOUNDS_H |
6d7779538
|
9 |
#include <linux/mm_types.h> |
01fc0ac19
|
10 |
#include <generated/bounds.h> |
9223b4190
|
11 |
#endif /* !__GENERATING_BOUNDS_H */ |
f886ed443
|
12 |
|
1da177e4c
|
13 14 15 16 17 18 |
/* * Various page->flags bits: * * PG_reserved is set for special pages, which can never be swapped out. Some * of them might not even exist (eg empty_bad_page)... * |
da6052f7b
|
19 20 21 |
* The PG_private bitflag is set on pagecache pages if they contain filesystem * specific data (which is normally at page->private). It can be used by * private allocations for its own usage. |
1da177e4c
|
22 |
* |
da6052f7b
|
23 24 25 26 27 28 29 30 31 |
* During initiation of disk I/O, PG_locked is set. This bit is set before I/O * and cleared when writeback _starts_ or when read _completes_. PG_writeback * is set before writeback starts and cleared when it finishes. * * PG_locked also pins a page in pagecache, and blocks truncation of the file * while it is held. * * page_waitqueue(page) is a wait queue of all tasks waiting for the page * to become unlocked. |
1da177e4c
|
32 33 34 35 |
* * PG_uptodate tells whether the page's contents is valid. When a read * completes, the page becomes uptodate, unless a disk I/O error happened. * |
da6052f7b
|
36 37 |
* PG_referenced, PG_reclaim are used for page reclaim for anonymous and * file-backed pagecache (see mm/vmscan.c). |
1da177e4c
|
38 39 40 41 42 43 44 45 46 47 48 |
* * PG_error is set to indicate that an I/O error occurred on this page. * * PG_arch_1 is an architecture specific page state bit. The generic code * guarantees that this bit is cleared for a page when it first is entered into * the page cache. * * PG_highmem pages are not permanently mapped into the kernel virtual address * space, they need to be kmapped separately for doing IO on the pages. The * struct page (these bits with information) are always mapped into kernel * address space... |
da6052f7b
|
49 |
* |
d466f2fcb
|
50 51 52 |
* PG_hwpoison indicates that a page got corrupted in hardware and contains * data with incorrect ECC bits that triggered a machine check. Accessing is * not safe since it may cause another machine check. Don't touch! |
1da177e4c
|
53 54 55 56 |
*/ /* * Don't use the *_dontuse flags. Use the macros. Otherwise you'll break |
91fc8ab3c
|
57 58 59 60 61 62 63 |
* locked- and dirty-page accounting. * * The page flags field is split into two parts, the main flags area * which extends from the low bits upwards, and the fields area which * extends from the high bits downwards. * * | FIELD | ... | FLAGS | |
9223b4190
|
64 65 |
* N-1 ^ 0 * (NR_PAGEFLAGS) |
91fc8ab3c
|
66 |
* |
9223b4190
|
67 68 69 |
* The fields area is reserved for fields mapping zone, node (for NUMA) and * SPARSEMEM section (for variants of SPARSEMEM that require section ids like * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP). |
1da177e4c
|
70 |
*/ |
e26831814
|
71 72 73 74 75 76 77 78 79 80 |
enum pageflags { PG_locked, /* Page is locked. Don't touch. */ PG_error, PG_referenced, PG_uptodate, PG_dirty, PG_lru, PG_active, PG_slab, PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/ |
e26831814
|
81 82 83 |
PG_arch_1, PG_reserved, PG_private, /* If pagecache, has fs-private data */ |
266cf658e
|
84 |
PG_private_2, /* If pagecache, has fs aux data */ |
e26831814
|
85 |
PG_writeback, /* Page is under writeback */ |
e20b8cca7
|
86 87 88 89 |
#ifdef CONFIG_PAGEFLAGS_EXTENDED PG_head, /* A head page */ PG_tail, /* A tail page */ #else |
e26831814
|
90 |
PG_compound, /* A compound page */ |
e20b8cca7
|
91 |
#endif |
e26831814
|
92 93 94 |
PG_swapcache, /* Swap page: swp_entry_t in private */ PG_mappedtodisk, /* Has blocks allocated on-disk */ PG_reclaim, /* To be reclaimed asap */ |
b2e185384
|
95 |
PG_swapbacked, /* Page is backed by RAM/swap */ |
894bc3104
|
96 |
PG_unevictable, /* Page is "unevictable" */ |
af8e3354b
|
97 |
#ifdef CONFIG_MMU |
b291f0003
|
98 |
PG_mlocked, /* Page is vma mlocked */ |
894bc3104
|
99 |
#endif |
46cf98cda
|
100 |
#ifdef CONFIG_ARCH_USES_PG_UNCACHED |
602c4d112
|
101 |
PG_uncached, /* Page has been mapped as uncached */ |
f886ed443
|
102 |
#endif |
d466f2fcb
|
103 104 105 |
#ifdef CONFIG_MEMORY_FAILURE PG_hwpoison, /* hardware poisoned page. Don't touch */ #endif |
e9da73d67
|
106 107 108 |
#ifdef CONFIG_TRANSPARENT_HUGEPAGE PG_compound_lock, #endif |
0cad47cf1
|
109 110 111 112 |
__NR_PAGEFLAGS, /* Filesystems */ PG_checked = PG_owner_priv_1, |
266cf658e
|
113 114 115 116 117 |
/* Two page bits are conscripted by FS-Cache to maintain local caching * state. These bits are set on pages belonging to the netfs's inodes * when those inodes are being locally cached. */ PG_fscache = PG_private_2, /* page backed by cache */ |
0cad47cf1
|
118 119 120 |
/* XEN */ PG_pinned = PG_owner_priv_1, PG_savepinned = PG_dirty, |
8a38082d2
|
121 |
|
9023cb7e8
|
122 |
/* SLOB */ |
9023cb7e8
|
123 |
PG_slob_free = PG_private, |
e26831814
|
124 |
}; |
1da177e4c
|
125 |
|
9223b4190
|
126 |
#ifndef __GENERATING_BOUNDS_H |
1da177e4c
|
127 |
/* |
f94a62e91
|
128 129 130 |
* Macros to create function definitions for page flags */ #define TESTPAGEFLAG(uname, lname) \ |
67db392d1
|
131 |
static inline int Page##uname(const struct page *page) \ |
f94a62e91
|
132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 |
{ return test_bit(PG_##lname, &page->flags); } #define SETPAGEFLAG(uname, lname) \ static inline void SetPage##uname(struct page *page) \ { set_bit(PG_##lname, &page->flags); } #define CLEARPAGEFLAG(uname, lname) \ static inline void ClearPage##uname(struct page *page) \ { clear_bit(PG_##lname, &page->flags); } #define __SETPAGEFLAG(uname, lname) \ static inline void __SetPage##uname(struct page *page) \ { __set_bit(PG_##lname, &page->flags); } #define __CLEARPAGEFLAG(uname, lname) \ static inline void __ClearPage##uname(struct page *page) \ { __clear_bit(PG_##lname, &page->flags); } #define TESTSETFLAG(uname, lname) \ static inline int TestSetPage##uname(struct page *page) \ { return test_and_set_bit(PG_##lname, &page->flags); } #define TESTCLEARFLAG(uname, lname) \ static inline int TestClearPage##uname(struct page *page) \ { return test_and_clear_bit(PG_##lname, &page->flags); } |
451ea25da
|
157 158 159 |
#define __TESTCLEARFLAG(uname, lname) \ static inline int __TestClearPage##uname(struct page *page) \ { return __test_and_clear_bit(PG_##lname, &page->flags); } |
f94a62e91
|
160 161 162 163 164 165 |
#define PAGEFLAG(uname, lname) TESTPAGEFLAG(uname, lname) \ SETPAGEFLAG(uname, lname) CLEARPAGEFLAG(uname, lname) #define __PAGEFLAG(uname, lname) TESTPAGEFLAG(uname, lname) \ __SETPAGEFLAG(uname, lname) __CLEARPAGEFLAG(uname, lname) |
ec7cade8c
|
166 |
#define PAGEFLAG_FALSE(uname) \ |
67db392d1
|
167 |
static inline int Page##uname(const struct page *page) \ |
ec7cade8c
|
168 |
{ return 0; } |
f94a62e91
|
169 170 |
#define TESTSCFLAG(uname, lname) \ TESTSETFLAG(uname, lname) TESTCLEARFLAG(uname, lname) |
8a7a8544a
|
171 172 173 174 175 176 177 178 179 180 181 |
#define SETPAGEFLAG_NOOP(uname) \ static inline void SetPage##uname(struct page *page) { } #define CLEARPAGEFLAG_NOOP(uname) \ static inline void ClearPage##uname(struct page *page) { } #define __CLEARPAGEFLAG_NOOP(uname) \ static inline void __ClearPage##uname(struct page *page) { } #define TESTCLEARFLAG_FALSE(uname) \ static inline int TestClearPage##uname(struct page *page) { return 0; } |
451ea25da
|
182 183 |
#define __TESTCLEARFLAG_FALSE(uname) \ static inline int __TestClearPage##uname(struct page *page) { return 0; } |
6a1e7f777
|
184 |
struct page; /* forward declaration */ |
cb240452b
|
185 |
TESTPAGEFLAG(Locked, locked) |
212260aa0
|
186 |
PAGEFLAG(Error, error) TESTCLEARFLAG(Error, error) |
6a1e7f777
|
187 188 189 190 |
PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced) PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty) PAGEFLAG(LRU, lru) __CLEARPAGEFLAG(LRU, lru) PAGEFLAG(Active, active) __CLEARPAGEFLAG(Active, active) |
894bc3104
|
191 |
TESTCLEARFLAG(Active, active) |
6a1e7f777
|
192 |
__PAGEFLAG(Slab, slab) |
0cad47cf1
|
193 194 195 |
PAGEFLAG(Checked, checked) /* Used by some filesystems */ PAGEFLAG(Pinned, pinned) TESTSCFLAG(Pinned, pinned) /* Xen */ PAGEFLAG(SavePinned, savepinned); /* Xen */ |
6a1e7f777
|
196 |
PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved) |
b2e185384
|
197 |
PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked) |
6a1e7f777
|
198 |
|
9023cb7e8
|
199 |
__PAGEFLAG(SlobFree, slob_free) |
6a1e7f777
|
200 |
/* |
266cf658e
|
201 202 203 204 205 206 207 208 209 210 |
* Private page markings that may be used by the filesystem that owns the page * for its own purposes. * - PG_private and PG_private_2 cause releasepage() and co to be invoked */ PAGEFLAG(Private, private) __SETPAGEFLAG(Private, private) __CLEARPAGEFLAG(Private, private) PAGEFLAG(Private2, private_2) TESTSCFLAG(Private2, private_2) PAGEFLAG(OwnerPriv1, owner_priv_1) TESTCLEARFLAG(OwnerPriv1, owner_priv_1) /* |
6a1e7f777
|
211 212 213 214 |
* Only test-and-set exist for PG_writeback. The unconditional operators are * risky: they bypass page accounting. */ TESTPAGEFLAG(Writeback, writeback) TESTSCFLAG(Writeback, writeback) |
6a1e7f777
|
215 216 217 218 |
PAGEFLAG(MappedToDisk, mappedtodisk) /* PG_readahead is only used for file reads; PG_reclaim is only for writes */ PAGEFLAG(Reclaim, reclaim) TESTCLEARFLAG(Reclaim, reclaim) |
0a128b2b1
|
219 |
PAGEFLAG(Readahead, reclaim) /* Reminder to do async read-ahead */ |
6a1e7f777
|
220 221 |
#ifdef CONFIG_HIGHMEM |
f94a62e91
|
222 |
/* |
6a1e7f777
|
223 224 |
* Must use a macro here due to header dependency issues. page_zone() is not * available at this point. |
1da177e4c
|
225 |
*/ |
0a128b2b1
|
226 |
#define PageHighMem(__p) is_highmem(page_zone(__p)) |
6a1e7f777
|
227 |
#else |
ec7cade8c
|
228 |
PAGEFLAG_FALSE(HighMem) |
6a1e7f777
|
229 230 231 232 233 |
#endif #ifdef CONFIG_SWAP PAGEFLAG(SwapCache, swapcache) #else |
ec7cade8c
|
234 |
PAGEFLAG_FALSE(SwapCache) |
6d91add09
|
235 |
SETPAGEFLAG_NOOP(SwapCache) CLEARPAGEFLAG_NOOP(SwapCache) |
6a1e7f777
|
236 |
#endif |
894bc3104
|
237 238 |
PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable) TESTCLEARFLAG(Unevictable, unevictable) |
b291f0003
|
239 |
|
af8e3354b
|
240 |
#ifdef CONFIG_MMU |
b291f0003
|
241 |
PAGEFLAG(Mlocked, mlocked) __CLEARPAGEFLAG(Mlocked, mlocked) |
451ea25da
|
242 |
TESTSCFLAG(Mlocked, mlocked) __TESTCLEARFLAG(Mlocked, mlocked) |
894bc3104
|
243 |
#else |
451ea25da
|
244 245 |
PAGEFLAG_FALSE(Mlocked) SETPAGEFLAG_NOOP(Mlocked) TESTCLEARFLAG_FALSE(Mlocked) __TESTCLEARFLAG_FALSE(Mlocked) |
894bc3104
|
246 |
#endif |
46cf98cda
|
247 |
#ifdef CONFIG_ARCH_USES_PG_UNCACHED |
6a1e7f777
|
248 |
PAGEFLAG(Uncached, uncached) |
602c4d112
|
249 |
#else |
ec7cade8c
|
250 |
PAGEFLAG_FALSE(Uncached) |
6a1e7f777
|
251 |
#endif |
1da177e4c
|
252 |
|
d466f2fcb
|
253 254 |
#ifdef CONFIG_MEMORY_FAILURE PAGEFLAG(HWPoison, hwpoison) |
847ce401d
|
255 |
TESTSCFLAG(HWPoison, hwpoison) |
d466f2fcb
|
256 257 258 259 260 |
#define __PG_HWPOISON (1UL << PG_hwpoison) #else PAGEFLAG_FALSE(HWPoison) #define __PG_HWPOISON 0 #endif |
1a9b5b7fe
|
261 |
u64 stable_page_flags(struct page *page); |
0ed361dec
|
262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 |
static inline int PageUptodate(struct page *page) { int ret = test_bit(PG_uptodate, &(page)->flags); /* * Must ensure that the data we read out of the page is loaded * _after_ we've loaded page->flags to check for PageUptodate. * We can skip the barrier if the page is not uptodate, because * we wouldn't be reading anything from it. * * See SetPageUptodate() for the other side of the story. */ if (ret) smp_rmb(); return ret; } static inline void __SetPageUptodate(struct page *page) { smp_wmb(); __set_bit(PG_uptodate, &(page)->flags); |
0ed361dec
|
284 |
} |
2dcea57ae
|
285 286 |
static inline void SetPageUptodate(struct page *page) { |
0ed361dec
|
287 |
#ifdef CONFIG_S390 |
2dcea57ae
|
288 |
if (!test_and_set_bit(PG_uptodate, &page->flags)) |
a43a9d93d
|
289 |
page_set_storage_key(page_to_phys(page), PAGE_DEFAULT_KEY, 0); |
f6ac2354d
|
290 |
#else |
0ed361dec
|
291 292 293 294 295 296 297 298 299 300 |
/* * Memory barrier must be issued before setting the PG_uptodate bit, * so that all previous stores issued in order to bring the page * uptodate are actually visible before PageUptodate becomes true. * * s390 doesn't need an explicit smp_wmb here because the test and * set bit already provides full barriers. */ smp_wmb(); set_bit(PG_uptodate, &(page)->flags); |
1da177e4c
|
301 |
#endif |
0ed361dec
|
302 |
} |
6a1e7f777
|
303 |
CLEARPAGEFLAG(Uptodate, uptodate) |
1da177e4c
|
304 |
|
6a1e7f777
|
305 |
extern void cancel_dirty_page(struct page *page, unsigned int account_size); |
d77c2d7cc
|
306 |
|
6a1e7f777
|
307 308 |
int test_clear_page_writeback(struct page *page); int test_set_page_writeback(struct page *page); |
1da177e4c
|
309 |
|
6a1e7f777
|
310 311 312 313 |
static inline void set_page_writeback(struct page *page) { test_set_page_writeback(page); } |
1da177e4c
|
314 |
|
e20b8cca7
|
315 316 317 318 319 320 321 |
#ifdef CONFIG_PAGEFLAGS_EXTENDED /* * System with lots of page flags available. This allows separate * flags for PageHead() and PageTail() checks of compound pages so that bit * tests can be used in performance sensitive paths. PageCompound is * generally not used in hot code paths. */ |
4e6af67e9
|
322 |
__PAGEFLAG(Head, head) CLEARPAGEFLAG(Head, head) |
e20b8cca7
|
323 324 325 326 327 328 329 |
__PAGEFLAG(Tail, tail) static inline int PageCompound(struct page *page) { return page->flags & ((1L << PG_head) | (1L << PG_tail)); } |
4e6af67e9
|
330 331 332 333 334 335 336 |
#ifdef CONFIG_TRANSPARENT_HUGEPAGE static inline void ClearPageCompound(struct page *page) { BUG_ON(!PageHead(page)); ClearPageHead(page); } #endif |
e20b8cca7
|
337 338 339 340 341 342 343 |
#else /* * Reduce page flag use as much as possible by overlapping * compound page flags with the flags used for page cache pages. Possible * because PageCompound is always set for compound pages and not for * pages on the LRU and/or pagecache. */ |
6a1e7f777
|
344 345 |
TESTPAGEFLAG(Compound, compound) __PAGEFLAG(Head, compound) |
1da177e4c
|
346 |
|
d85f33855
|
347 |
/* |
6d7779538
|
348 |
* PG_reclaim is used in combination with PG_compound to mark the |
6a1e7f777
|
349 350 351 352 |
* head and tail of a compound page. This saves one page flag * but makes it impossible to use compound pages for the page cache. * The PG_reclaim bit would have to be used for reclaim or readahead * if compound pages enter the page cache. |
6d7779538
|
353 354 355 |
* * PG_compound & PG_reclaim => Tail page * PG_compound & ~PG_reclaim => Head page |
d85f33855
|
356 |
*/ |
6d7779538
|
357 |
#define PG_head_tail_mask ((1L << PG_compound) | (1L << PG_reclaim)) |
6a1e7f777
|
358 359 360 361 |
static inline int PageTail(struct page *page) { return ((page->flags & PG_head_tail_mask) == PG_head_tail_mask); } |
6d7779538
|
362 363 364 365 366 367 368 369 370 371 |
static inline void __SetPageTail(struct page *page) { page->flags |= PG_head_tail_mask; } static inline void __ClearPageTail(struct page *page) { page->flags &= ~PG_head_tail_mask; } |
4e6af67e9
|
372 373 374 375 376 377 378 |
#ifdef CONFIG_TRANSPARENT_HUGEPAGE static inline void ClearPageCompound(struct page *page) { BUG_ON((page->flags & PG_head_tail_mask) != (1 << PG_compound)); clear_bit(PG_compound, &page->flags); } #endif |
e20b8cca7
|
379 |
#endif /* !PAGEFLAGS_EXTENDED */ |
dfa7e20cc
|
380 |
|
936a5fe6e
|
381 |
#ifdef CONFIG_TRANSPARENT_HUGEPAGE |
71e3aac07
|
382 383 384 385 386 387 388 389 390 391 392 393 394 |
/* * PageHuge() only returns true for hugetlbfs pages, but not for * normal or transparent huge pages. * * PageTransHuge() returns true for both transparent huge and * hugetlbfs pages, but not normal pages. PageTransHuge() can only be * called only in the core VM paths where hugetlbfs pages can't exist. */ static inline int PageTransHuge(struct page *page) { VM_BUG_ON(PageTail(page)); return PageHead(page); } |
936a5fe6e
|
395 396 397 398 |
static inline int PageTransCompound(struct page *page) { return PageCompound(page); } |
71e3aac07
|
399 |
|
936a5fe6e
|
400 |
#else |
71e3aac07
|
401 402 403 404 405 |
static inline int PageTransHuge(struct page *page) { return 0; } |
936a5fe6e
|
406 407 408 409 410 |
static inline int PageTransCompound(struct page *page) { return 0; } #endif |
af8e3354b
|
411 |
#ifdef CONFIG_MMU |
33925b25d
|
412 413 |
#define __PG_MLOCKED (1 << PG_mlocked) #else |
b291f0003
|
414 |
#define __PG_MLOCKED 0 |
894bc3104
|
415 |
#endif |
e9da73d67
|
416 417 418 419 420 |
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #define __PG_COMPOUND_LOCK (1 << PG_compound_lock) #else #define __PG_COMPOUND_LOCK 0 #endif |
dfa7e20cc
|
421 422 423 424 |
/* * Flags checked when a page is freed. Pages being freed should not have * these flags set. It they are, there is a problem. */ |
79f4b7bf3
|
425 |
#define PAGE_FLAGS_CHECK_AT_FREE \ |
266cf658e
|
426 427 |
(1 << PG_lru | 1 << PG_locked | \ 1 << PG_private | 1 << PG_private_2 | \ |
5f24ce5fd
|
428 |
1 << PG_writeback | 1 << PG_reserved | \ |
266cf658e
|
429 |
1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \ |
e9da73d67
|
430 431 |
1 << PG_unevictable | __PG_MLOCKED | __PG_HWPOISON | \ __PG_COMPOUND_LOCK) |
dfa7e20cc
|
432 433 434 |
/* * Flags checked when a page is prepped for return by the page allocator. |
79f4b7bf3
|
435 436 |
* Pages being prepped should not have any flags set. It they are set, * there has been a kernel bug or struct page corruption. |
dfa7e20cc
|
437 |
*/ |
79f4b7bf3
|
438 |
#define PAGE_FLAGS_CHECK_AT_PREP ((1 << NR_PAGEFLAGS) - 1) |
dfa7e20cc
|
439 |
|
edcf4748c
|
440 441 |
#define PAGE_FLAGS_PRIVATE \ (1 << PG_private | 1 << PG_private_2) |
266cf658e
|
442 443 444 445 446 447 448 |
/** * page_has_private - Determine if page has private stuff * @page: The page to be checked * * Determine if a page has private stuff, indicating that release routines * should be invoked upon it. */ |
edcf4748c
|
449 450 451 452 453 454 |
static inline int page_has_private(struct page *page) { return !!(page->flags & PAGE_FLAGS_PRIVATE); } #endif /* !__GENERATING_BOUNDS_H */ |
266cf658e
|
455 |
|
1da177e4c
|
456 |
#endif /* PAGE_FLAGS_H */ |