Commit 6d80e53f0056178c63fa8fbf3e8de40fb4df5f50
1 parent
97ce5d6dcb
Exists in
master
and in
7 other branches
proc: move pagecount stuff to fs/proc/page.c
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Showing 3 changed files with 148 additions and 135 deletions Side-by-side Diff
fs/proc/Makefile
fs/proc/page.c
1 | +#include <linux/bootmem.h> | |
2 | +#include <linux/compiler.h> | |
3 | +#include <linux/fs.h> | |
4 | +#include <linux/init.h> | |
5 | +#include <linux/mm.h> | |
6 | +#include <linux/mmzone.h> | |
7 | +#include <linux/proc_fs.h> | |
8 | +#include <linux/seq_file.h> | |
9 | +#include <asm/uaccess.h> | |
10 | +#include "internal.h" | |
11 | + | |
12 | +#define KPMSIZE sizeof(u64) | |
13 | +#define KPMMASK (KPMSIZE - 1) | |
14 | +/* /proc/kpagecount - an array exposing page counts | |
15 | + * | |
16 | + * Each entry is a u64 representing the corresponding | |
17 | + * physical page count. | |
18 | + */ | |
19 | +static ssize_t kpagecount_read(struct file *file, char __user *buf, | |
20 | + size_t count, loff_t *ppos) | |
21 | +{ | |
22 | + u64 __user *out = (u64 __user *)buf; | |
23 | + struct page *ppage; | |
24 | + unsigned long src = *ppos; | |
25 | + unsigned long pfn; | |
26 | + ssize_t ret = 0; | |
27 | + u64 pcount; | |
28 | + | |
29 | + pfn = src / KPMSIZE; | |
30 | + count = min_t(size_t, count, (max_pfn * KPMSIZE) - src); | |
31 | + if (src & KPMMASK || count & KPMMASK) | |
32 | + return -EINVAL; | |
33 | + | |
34 | + while (count > 0) { | |
35 | + ppage = NULL; | |
36 | + if (pfn_valid(pfn)) | |
37 | + ppage = pfn_to_page(pfn); | |
38 | + pfn++; | |
39 | + if (!ppage) | |
40 | + pcount = 0; | |
41 | + else | |
42 | + pcount = page_mapcount(ppage); | |
43 | + | |
44 | + if (put_user(pcount, out++)) { | |
45 | + ret = -EFAULT; | |
46 | + break; | |
47 | + } | |
48 | + | |
49 | + count -= KPMSIZE; | |
50 | + } | |
51 | + | |
52 | + *ppos += (char __user *)out - buf; | |
53 | + if (!ret) | |
54 | + ret = (char __user *)out - buf; | |
55 | + return ret; | |
56 | +} | |
57 | + | |
58 | +static const struct file_operations proc_kpagecount_operations = { | |
59 | + .llseek = mem_lseek, | |
60 | + .read = kpagecount_read, | |
61 | +}; | |
62 | + | |
63 | +/* /proc/kpageflags - an array exposing page flags | |
64 | + * | |
65 | + * Each entry is a u64 representing the corresponding | |
66 | + * physical page flags. | |
67 | + */ | |
68 | + | |
69 | +/* These macros are used to decouple internal flags from exported ones */ | |
70 | + | |
71 | +#define KPF_LOCKED 0 | |
72 | +#define KPF_ERROR 1 | |
73 | +#define KPF_REFERENCED 2 | |
74 | +#define KPF_UPTODATE 3 | |
75 | +#define KPF_DIRTY 4 | |
76 | +#define KPF_LRU 5 | |
77 | +#define KPF_ACTIVE 6 | |
78 | +#define KPF_SLAB 7 | |
79 | +#define KPF_WRITEBACK 8 | |
80 | +#define KPF_RECLAIM 9 | |
81 | +#define KPF_BUDDY 10 | |
82 | + | |
83 | +#define kpf_copy_bit(flags, srcpos, dstpos) (((flags >> srcpos) & 1) << dstpos) | |
84 | + | |
85 | +static ssize_t kpageflags_read(struct file *file, char __user *buf, | |
86 | + size_t count, loff_t *ppos) | |
87 | +{ | |
88 | + u64 __user *out = (u64 __user *)buf; | |
89 | + struct page *ppage; | |
90 | + unsigned long src = *ppos; | |
91 | + unsigned long pfn; | |
92 | + ssize_t ret = 0; | |
93 | + u64 kflags, uflags; | |
94 | + | |
95 | + pfn = src / KPMSIZE; | |
96 | + count = min_t(unsigned long, count, (max_pfn * KPMSIZE) - src); | |
97 | + if (src & KPMMASK || count & KPMMASK) | |
98 | + return -EINVAL; | |
99 | + | |
100 | + while (count > 0) { | |
101 | + ppage = NULL; | |
102 | + if (pfn_valid(pfn)) | |
103 | + ppage = pfn_to_page(pfn); | |
104 | + pfn++; | |
105 | + if (!ppage) | |
106 | + kflags = 0; | |
107 | + else | |
108 | + kflags = ppage->flags; | |
109 | + | |
110 | + uflags = kpf_copy_bit(KPF_LOCKED, PG_locked, kflags) | | |
111 | + kpf_copy_bit(kflags, KPF_ERROR, PG_error) | | |
112 | + kpf_copy_bit(kflags, KPF_REFERENCED, PG_referenced) | | |
113 | + kpf_copy_bit(kflags, KPF_UPTODATE, PG_uptodate) | | |
114 | + kpf_copy_bit(kflags, KPF_DIRTY, PG_dirty) | | |
115 | + kpf_copy_bit(kflags, KPF_LRU, PG_lru) | | |
116 | + kpf_copy_bit(kflags, KPF_ACTIVE, PG_active) | | |
117 | + kpf_copy_bit(kflags, KPF_SLAB, PG_slab) | | |
118 | + kpf_copy_bit(kflags, KPF_WRITEBACK, PG_writeback) | | |
119 | + kpf_copy_bit(kflags, KPF_RECLAIM, PG_reclaim) | | |
120 | + kpf_copy_bit(kflags, KPF_BUDDY, PG_buddy); | |
121 | + | |
122 | + if (put_user(uflags, out++)) { | |
123 | + ret = -EFAULT; | |
124 | + break; | |
125 | + } | |
126 | + | |
127 | + count -= KPMSIZE; | |
128 | + } | |
129 | + | |
130 | + *ppos += (char __user *)out - buf; | |
131 | + if (!ret) | |
132 | + ret = (char __user *)out - buf; | |
133 | + return ret; | |
134 | +} | |
135 | + | |
136 | +static const struct file_operations proc_kpageflags_operations = { | |
137 | + .llseek = mem_lseek, | |
138 | + .read = kpageflags_read, | |
139 | +}; | |
140 | + | |
141 | +static int __init proc_page_init(void) | |
142 | +{ | |
143 | + proc_create("kpagecount", S_IRUSR, NULL, &proc_kpagecount_operations); | |
144 | + proc_create("kpageflags", S_IRUSR, NULL, &proc_kpageflags_operations); | |
145 | + return 0; | |
146 | +} | |
147 | +module_init(proc_page_init); |
fs/proc/proc_misc.c
... | ... | @@ -57,146 +57,11 @@ |
57 | 57 | #include <asm/div64.h> |
58 | 58 | #include "internal.h" |
59 | 59 | |
60 | -#ifdef CONFIG_PROC_PAGE_MONITOR | |
61 | -#define KPMSIZE sizeof(u64) | |
62 | -#define KPMMASK (KPMSIZE - 1) | |
63 | -/* /proc/kpagecount - an array exposing page counts | |
64 | - * | |
65 | - * Each entry is a u64 representing the corresponding | |
66 | - * physical page count. | |
67 | - */ | |
68 | -static ssize_t kpagecount_read(struct file *file, char __user *buf, | |
69 | - size_t count, loff_t *ppos) | |
70 | -{ | |
71 | - u64 __user *out = (u64 __user *)buf; | |
72 | - struct page *ppage; | |
73 | - unsigned long src = *ppos; | |
74 | - unsigned long pfn; | |
75 | - ssize_t ret = 0; | |
76 | - u64 pcount; | |
77 | - | |
78 | - pfn = src / KPMSIZE; | |
79 | - count = min_t(size_t, count, (max_pfn * KPMSIZE) - src); | |
80 | - if (src & KPMMASK || count & KPMMASK) | |
81 | - return -EINVAL; | |
82 | - | |
83 | - while (count > 0) { | |
84 | - ppage = NULL; | |
85 | - if (pfn_valid(pfn)) | |
86 | - ppage = pfn_to_page(pfn); | |
87 | - pfn++; | |
88 | - if (!ppage) | |
89 | - pcount = 0; | |
90 | - else | |
91 | - pcount = page_mapcount(ppage); | |
92 | - | |
93 | - if (put_user(pcount, out++)) { | |
94 | - ret = -EFAULT; | |
95 | - break; | |
96 | - } | |
97 | - | |
98 | - count -= KPMSIZE; | |
99 | - } | |
100 | - | |
101 | - *ppos += (char __user *)out - buf; | |
102 | - if (!ret) | |
103 | - ret = (char __user *)out - buf; | |
104 | - return ret; | |
105 | -} | |
106 | - | |
107 | -static struct file_operations proc_kpagecount_operations = { | |
108 | - .llseek = mem_lseek, | |
109 | - .read = kpagecount_read, | |
110 | -}; | |
111 | - | |
112 | -/* /proc/kpageflags - an array exposing page flags | |
113 | - * | |
114 | - * Each entry is a u64 representing the corresponding | |
115 | - * physical page flags. | |
116 | - */ | |
117 | - | |
118 | -/* These macros are used to decouple internal flags from exported ones */ | |
119 | - | |
120 | -#define KPF_LOCKED 0 | |
121 | -#define KPF_ERROR 1 | |
122 | -#define KPF_REFERENCED 2 | |
123 | -#define KPF_UPTODATE 3 | |
124 | -#define KPF_DIRTY 4 | |
125 | -#define KPF_LRU 5 | |
126 | -#define KPF_ACTIVE 6 | |
127 | -#define KPF_SLAB 7 | |
128 | -#define KPF_WRITEBACK 8 | |
129 | -#define KPF_RECLAIM 9 | |
130 | -#define KPF_BUDDY 10 | |
131 | - | |
132 | -#define kpf_copy_bit(flags, srcpos, dstpos) (((flags >> srcpos) & 1) << dstpos) | |
133 | - | |
134 | -static ssize_t kpageflags_read(struct file *file, char __user *buf, | |
135 | - size_t count, loff_t *ppos) | |
136 | -{ | |
137 | - u64 __user *out = (u64 __user *)buf; | |
138 | - struct page *ppage; | |
139 | - unsigned long src = *ppos; | |
140 | - unsigned long pfn; | |
141 | - ssize_t ret = 0; | |
142 | - u64 kflags, uflags; | |
143 | - | |
144 | - pfn = src / KPMSIZE; | |
145 | - count = min_t(unsigned long, count, (max_pfn * KPMSIZE) - src); | |
146 | - if (src & KPMMASK || count & KPMMASK) | |
147 | - return -EINVAL; | |
148 | - | |
149 | - while (count > 0) { | |
150 | - ppage = NULL; | |
151 | - if (pfn_valid(pfn)) | |
152 | - ppage = pfn_to_page(pfn); | |
153 | - pfn++; | |
154 | - if (!ppage) | |
155 | - kflags = 0; | |
156 | - else | |
157 | - kflags = ppage->flags; | |
158 | - | |
159 | - uflags = kpf_copy_bit(KPF_LOCKED, PG_locked, kflags) | | |
160 | - kpf_copy_bit(kflags, KPF_ERROR, PG_error) | | |
161 | - kpf_copy_bit(kflags, KPF_REFERENCED, PG_referenced) | | |
162 | - kpf_copy_bit(kflags, KPF_UPTODATE, PG_uptodate) | | |
163 | - kpf_copy_bit(kflags, KPF_DIRTY, PG_dirty) | | |
164 | - kpf_copy_bit(kflags, KPF_LRU, PG_lru) | | |
165 | - kpf_copy_bit(kflags, KPF_ACTIVE, PG_active) | | |
166 | - kpf_copy_bit(kflags, KPF_SLAB, PG_slab) | | |
167 | - kpf_copy_bit(kflags, KPF_WRITEBACK, PG_writeback) | | |
168 | - kpf_copy_bit(kflags, KPF_RECLAIM, PG_reclaim) | | |
169 | - kpf_copy_bit(kflags, KPF_BUDDY, PG_buddy); | |
170 | - | |
171 | - if (put_user(uflags, out++)) { | |
172 | - ret = -EFAULT; | |
173 | - break; | |
174 | - } | |
175 | - | |
176 | - count -= KPMSIZE; | |
177 | - } | |
178 | - | |
179 | - *ppos += (char __user *)out - buf; | |
180 | - if (!ret) | |
181 | - ret = (char __user *)out - buf; | |
182 | - return ret; | |
183 | -} | |
184 | - | |
185 | -static struct file_operations proc_kpageflags_operations = { | |
186 | - .llseek = mem_lseek, | |
187 | - .read = kpageflags_read, | |
188 | -}; | |
189 | -#endif /* CONFIG_PROC_PAGE_MONITOR */ | |
190 | - | |
191 | 60 | void __init proc_misc_init(void) |
192 | 61 | { |
193 | 62 | proc_symlink("mounts", NULL, "self/mounts"); |
194 | 63 | |
195 | 64 | /* And now for trickier ones */ |
196 | -#ifdef CONFIG_PROC_PAGE_MONITOR | |
197 | - proc_create("kpagecount", S_IRUSR, NULL, &proc_kpagecount_operations); | |
198 | - proc_create("kpageflags", S_IRUSR, NULL, &proc_kpageflags_operations); | |
199 | -#endif | |
200 | 65 | #ifdef CONFIG_PROC_VMCORE |
201 | 66 | proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &proc_vmcore_operations); |
202 | 67 | #endif |