Blame view
fs/proc/page.c
4.87 KB
6d80e53f0
|
1 2 3 4 |
#include <linux/bootmem.h> #include <linux/compiler.h> #include <linux/fs.h> #include <linux/init.h> |
9a8408951
|
5 |
#include <linux/ksm.h> |
6d80e53f0
|
6 7 8 9 |
#include <linux/mm.h> #include <linux/mmzone.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> |
20a0307c0
|
10 |
#include <linux/hugetlb.h> |
1a9b5b7fe
|
11 |
#include <linux/kernel-page-flags.h> |
6d80e53f0
|
12 13 14 15 16 |
#include <asm/uaccess.h> #include "internal.h" #define KPMSIZE sizeof(u64) #define KPMMASK (KPMSIZE - 1) |
ed7ce0f10
|
17 |
|
6d80e53f0
|
18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 |
/* /proc/kpagecount - an array exposing page counts * * Each entry is a u64 representing the corresponding * physical page count. */ static ssize_t kpagecount_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { u64 __user *out = (u64 __user *)buf; struct page *ppage; unsigned long src = *ppos; unsigned long pfn; ssize_t ret = 0; u64 pcount; pfn = src / KPMSIZE; count = min_t(size_t, count, (max_pfn * KPMSIZE) - src); if (src & KPMMASK || count & KPMMASK) return -EINVAL; while (count > 0) { |
6d80e53f0
|
39 40 |
if (pfn_valid(pfn)) ppage = pfn_to_page(pfn); |
ed7ce0f10
|
41 42 |
else ppage = NULL; |
a6fc86d2b
|
43 |
if (!ppage || PageSlab(ppage)) |
6d80e53f0
|
44 45 46 |
pcount = 0; else pcount = page_mapcount(ppage); |
ed7ce0f10
|
47 |
if (put_user(pcount, out)) { |
6d80e53f0
|
48 49 50 |
ret = -EFAULT; break; } |
ed7ce0f10
|
51 52 |
pfn++; out++; |
6d80e53f0
|
53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 |
count -= KPMSIZE; } *ppos += (char __user *)out - buf; if (!ret) ret = (char __user *)out - buf; return ret; } static const struct file_operations proc_kpagecount_operations = { .llseek = mem_lseek, .read = kpagecount_read, }; /* /proc/kpageflags - an array exposing page flags * * Each entry is a u64 representing the corresponding * physical page flags. */ |
177975495
|
72 73 74 75 |
static inline u64 kpf_copy_bit(u64 kflags, int ubit, int kbit) { return ((kflags >> kbit) & 1) << ubit; } |
1a9b5b7fe
|
76 |
u64 stable_page_flags(struct page *page) |
177975495
|
77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 |
{ u64 k; u64 u; /* * pseudo flag: KPF_NOPAGE * it differentiates a memory hole from a page with no flags */ if (!page) return 1 << KPF_NOPAGE; k = page->flags; u = 0; /* * pseudo flags for the well known (anonymous) memory mapped pages * * Note that page->_mapcount is overloaded in SLOB/SLUB/SLQB, so the * simple test in page_mapped() is not enough. */ if (!PageSlab(page) && page_mapped(page)) u |= 1 << KPF_MMAP; if (PageAnon(page)) u |= 1 << KPF_ANON; |
9a8408951
|
101 102 |
if (PageKsm(page)) u |= 1 << KPF_KSM; |
177975495
|
103 104 105 106 107 108 109 110 111 112 113 |
/* * compound pages: export both head/tail info * they together define a compound page's start/end pos and order */ if (PageHead(page)) u |= 1 << KPF_COMPOUND_HEAD; if (PageTail(page)) u |= 1 << KPF_COMPOUND_TAIL; if (PageHuge(page)) u |= 1 << KPF_HUGE; |
177975495
|
114 |
/* |
5f24ce5fd
|
115 116 117 |
* Caveats on high order pages: page->_count will only be set * -1 on the head page; SLUB/SLQB do the same for PG_slab; * SLOB won't set PG_slab at all on compound pages. |
177975495
|
118 |
*/ |
5f24ce5fd
|
119 120 121 122 |
if (PageBuddy(page)) u |= 1 << KPF_BUDDY; u |= kpf_copy_bit(k, KPF_LOCKED, PG_locked); |
177975495
|
123 |
u |= kpf_copy_bit(k, KPF_SLAB, PG_slab); |
177975495
|
124 125 126 127 128 129 130 131 132 133 134 135 136 |
u |= kpf_copy_bit(k, KPF_ERROR, PG_error); u |= kpf_copy_bit(k, KPF_DIRTY, PG_dirty); u |= kpf_copy_bit(k, KPF_UPTODATE, PG_uptodate); u |= kpf_copy_bit(k, KPF_WRITEBACK, PG_writeback); u |= kpf_copy_bit(k, KPF_LRU, PG_lru); u |= kpf_copy_bit(k, KPF_REFERENCED, PG_referenced); u |= kpf_copy_bit(k, KPF_ACTIVE, PG_active); u |= kpf_copy_bit(k, KPF_RECLAIM, PG_reclaim); u |= kpf_copy_bit(k, KPF_SWAPCACHE, PG_swapcache); u |= kpf_copy_bit(k, KPF_SWAPBACKED, PG_swapbacked); |
177975495
|
137 138 |
u |= kpf_copy_bit(k, KPF_UNEVICTABLE, PG_unevictable); u |= kpf_copy_bit(k, KPF_MLOCKED, PG_mlocked); |
177975495
|
139 |
|
253fb02d6
|
140 141 142 |
#ifdef CONFIG_MEMORY_FAILURE u |= kpf_copy_bit(k, KPF_HWPOISON, PG_hwpoison); #endif |
ed430fec7
|
143 |
#ifdef CONFIG_ARCH_USES_PG_UNCACHED |
177975495
|
144 145 146 147 148 149 150 151 152 153 154 155 |
u |= kpf_copy_bit(k, KPF_UNCACHED, PG_uncached); #endif u |= kpf_copy_bit(k, KPF_RESERVED, PG_reserved); u |= kpf_copy_bit(k, KPF_MAPPEDTODISK, PG_mappedtodisk); u |= kpf_copy_bit(k, KPF_PRIVATE, PG_private); u |= kpf_copy_bit(k, KPF_PRIVATE_2, PG_private_2); u |= kpf_copy_bit(k, KPF_OWNER_PRIVATE, PG_owner_priv_1); u |= kpf_copy_bit(k, KPF_ARCH, PG_arch_1); return u; }; |
6d80e53f0
|
156 157 158 159 160 161 162 163 164 |
static ssize_t kpageflags_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { u64 __user *out = (u64 __user *)buf; struct page *ppage; unsigned long src = *ppos; unsigned long pfn; ssize_t ret = 0; |
6d80e53f0
|
165 166 167 168 169 170 171 |
pfn = src / KPMSIZE; count = min_t(unsigned long, count, (max_pfn * KPMSIZE) - src); if (src & KPMMASK || count & KPMMASK) return -EINVAL; while (count > 0) { |
6d80e53f0
|
172 173 |
if (pfn_valid(pfn)) ppage = pfn_to_page(pfn); |
ed7ce0f10
|
174 175 |
else ppage = NULL; |
177975495
|
176 |
|
1a9b5b7fe
|
177 |
if (put_user(stable_page_flags(ppage), out)) { |
6d80e53f0
|
178 179 180 |
ret = -EFAULT; break; } |
ed7ce0f10
|
181 182 |
pfn++; out++; |
6d80e53f0
|
183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 |
count -= KPMSIZE; } *ppos += (char __user *)out - buf; if (!ret) ret = (char __user *)out - buf; return ret; } static const struct file_operations proc_kpageflags_operations = { .llseek = mem_lseek, .read = kpageflags_read, }; static int __init proc_page_init(void) { proc_create("kpagecount", S_IRUSR, NULL, &proc_kpagecount_operations); proc_create("kpageflags", S_IRUSR, NULL, &proc_kpageflags_operations); return 0; } module_init(proc_page_init); |