Blame view
fs/proc/task_mmu.c
27.1 KB
1da177e4c Linux-2.6.12-rc2 |
1 2 |
#include <linux/mm.h> #include <linux/hugetlb.h> |
22e057c59 smaps: teach smap... |
3 |
#include <linux/huge_mm.h> |
1da177e4c Linux-2.6.12-rc2 |
4 5 |
#include <linux/mount.h> #include <linux/seq_file.h> |
e070ad49f [PATCH] add /proc... |
6 |
#include <linux/highmem.h> |
5096add84 proc: maps protec... |
7 |
#include <linux/ptrace.h> |
5a0e3ad6a include cleanup: ... |
8 |
#include <linux/slab.h> |
6e21c8f14 [PATCH] /proc/<pi... |
9 10 |
#include <linux/pagemap.h> #include <linux/mempolicy.h> |
22e057c59 smaps: teach smap... |
11 |
#include <linux/rmap.h> |
85863e475 maps4: add /proc/... |
12 13 |
#include <linux/swap.h> #include <linux/swapops.h> |
e070ad49f [PATCH] add /proc... |
14 |
|
1da177e4c Linux-2.6.12-rc2 |
15 16 |
#include <asm/elf.h> #include <asm/uaccess.h> |
e070ad49f [PATCH] add /proc... |
17 |
#include <asm/tlbflush.h> |
1da177e4c Linux-2.6.12-rc2 |
18 |
#include "internal.h" |
df5f8314c proc: seqfile con... |
19 |
void task_mem(struct seq_file *m, struct mm_struct *mm) |
1da177e4c Linux-2.6.12-rc2 |
20 |
{ |
b084d4353 mm: count swap usage |
21 |
unsigned long data, text, lib, swap; |
365e9c87a [PATCH] mm: updat... |
22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 |
unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss; /* * Note: to minimize their overhead, mm maintains hiwater_vm and * hiwater_rss only when about to *lower* total_vm or rss. Any * collector of these hiwater stats must therefore get total_vm * and rss too, which will usually be the higher. Barriers? not * worth the effort, such snapshots can always be inconsistent. */ hiwater_vm = total_vm = mm->total_vm; if (hiwater_vm < mm->hiwater_vm) hiwater_vm = mm->hiwater_vm; hiwater_rss = total_rss = get_mm_rss(mm); if (hiwater_rss < mm->hiwater_rss) hiwater_rss = mm->hiwater_rss; |
1da177e4c Linux-2.6.12-rc2 |
37 38 39 40 |
data = mm->total_vm - mm->shared_vm - mm->stack_vm; text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10; lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text; |
b084d4353 mm: count swap usage |
41 |
swap = get_mm_counter(mm, MM_SWAPENTS); |
df5f8314c proc: seqfile con... |
42 |
seq_printf(m, |
365e9c87a [PATCH] mm: updat... |
43 44 |
"VmPeak:\t%8lu kB " |
1da177e4c Linux-2.6.12-rc2 |
45 46 47 48 |
"VmSize:\t%8lu kB " "VmLck:\t%8lu kB " |
bc3e53f68 mm: distinguish b... |
49 50 |
"VmPin:\t%8lu kB " |
365e9c87a [PATCH] mm: updat... |
51 52 |
"VmHWM:\t%8lu kB " |
1da177e4c Linux-2.6.12-rc2 |
53 54 55 56 57 58 59 60 61 62 |
"VmRSS:\t%8lu kB " "VmData:\t%8lu kB " "VmStk:\t%8lu kB " "VmExe:\t%8lu kB " "VmLib:\t%8lu kB " |
b084d4353 mm: count swap usage |
63 64 65 66 |
"VmPTE:\t%8lu kB " "VmSwap:\t%8lu kB ", |
365e9c87a [PATCH] mm: updat... |
67 68 |
hiwater_vm << (PAGE_SHIFT-10), (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10), |
1da177e4c Linux-2.6.12-rc2 |
69 |
mm->locked_vm << (PAGE_SHIFT-10), |
bc3e53f68 mm: distinguish b... |
70 |
mm->pinned_vm << (PAGE_SHIFT-10), |
365e9c87a [PATCH] mm: updat... |
71 72 |
hiwater_rss << (PAGE_SHIFT-10), total_rss << (PAGE_SHIFT-10), |
1da177e4c Linux-2.6.12-rc2 |
73 74 |
data << (PAGE_SHIFT-10), mm->stack_vm << (PAGE_SHIFT-10), text, lib, |
b084d4353 mm: count swap usage |
75 76 |
(PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10, swap << (PAGE_SHIFT-10)); |
1da177e4c Linux-2.6.12-rc2 |
77 78 79 80 81 82 |
} unsigned long task_vsize(struct mm_struct *mm) { return PAGE_SIZE * mm->total_vm; } |
a2ade7b6c proc: use unsigne... |
83 84 85 |
unsigned long task_statm(struct mm_struct *mm, unsigned long *shared, unsigned long *text, unsigned long *data, unsigned long *resident) |
1da177e4c Linux-2.6.12-rc2 |
86 |
{ |
d559db086 mm: clean up mm_c... |
87 |
*shared = get_mm_counter(mm, MM_FILEPAGES); |
1da177e4c Linux-2.6.12-rc2 |
88 89 90 |
*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> PAGE_SHIFT; *data = mm->total_vm - mm->shared_vm; |
d559db086 mm: clean up mm_c... |
91 |
*resident = *shared + get_mm_counter(mm, MM_ANONPAGES); |
1da177e4c Linux-2.6.12-rc2 |
92 93 |
return mm->total_vm; } |
1da177e4c Linux-2.6.12-rc2 |
94 95 96 97 98 99 100 |
static void pad_len_spaces(struct seq_file *m, int len) { len = 25 + sizeof(void*) * 6 - len; if (len < 1) len = 1; seq_printf(m, "%*c", len, ' '); } |
a6198797c maps4: regroup ta... |
101 102 103 104 105 106 107 108 |
static void vma_stop(struct proc_maps_private *priv, struct vm_area_struct *vma) { if (vma && vma != priv->tail_vma) { struct mm_struct *mm = vma->vm_mm; up_read(&mm->mmap_sem); mmput(mm); } } |
ec4dd3eb3 maps4: add propor... |
109 |
|
a6198797c maps4: regroup ta... |
110 |
static void *m_start(struct seq_file *m, loff_t *pos) |
e070ad49f [PATCH] add /proc... |
111 |
{ |
a6198797c maps4: regroup ta... |
112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 |
struct proc_maps_private *priv = m->private; unsigned long last_addr = m->version; struct mm_struct *mm; struct vm_area_struct *vma, *tail_vma = NULL; loff_t l = *pos; /* Clear the per syscall fields in priv */ priv->task = NULL; priv->tail_vma = NULL; /* * We remember last_addr rather than next_addr to hit with * mmap_cache most of the time. We have zero last_addr at * the beginning and also after lseek. We will have -1 last_addr * after the end of the vmas. */ if (last_addr == -1UL) return NULL; priv->task = get_pid_task(priv->pid, PIDTYPE_PID); if (!priv->task) |
ec6fd8a43 report errors in ... |
134 |
return ERR_PTR(-ESRCH); |
a6198797c maps4: regroup ta... |
135 136 |
mm = mm_for_maps(priv->task); |
ec6fd8a43 report errors in ... |
137 138 |
if (!mm || IS_ERR(mm)) return mm; |
00f89d218 mm_for_maps: shif... |
139 |
down_read(&mm->mmap_sem); |
a6198797c maps4: regroup ta... |
140 |
|
31db58b3a mm: arch: make ge... |
141 |
tail_vma = get_gate_vma(priv->task->mm); |
a6198797c maps4: regroup ta... |
142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 |
priv->tail_vma = tail_vma; /* Start with last addr hint */ vma = find_vma(mm, last_addr); if (last_addr && vma) { vma = vma->vm_next; goto out; } /* * Check the vma index is within the range and do * sequential scan until m_index. */ vma = NULL; if ((unsigned long)l < mm->map_count) { vma = mm->mmap; while (l-- && vma) vma = vma->vm_next; goto out; } if (l != mm->map_count) tail_vma = NULL; /* After gate vma */ out: if (vma) return vma; /* End of vmas has been reached */ m->version = (tail_vma != NULL)? 0: -1UL; up_read(&mm->mmap_sem); mmput(mm); return tail_vma; } static void *m_next(struct seq_file *m, void *v, loff_t *pos) { struct proc_maps_private *priv = m->private; struct vm_area_struct *vma = v; struct vm_area_struct *tail_vma = priv->tail_vma; (*pos)++; if (vma && (vma != tail_vma) && vma->vm_next) return vma->vm_next; vma_stop(priv, vma); return (vma != tail_vma)? tail_vma: NULL; } static void m_stop(struct seq_file *m, void *v) { struct proc_maps_private *priv = m->private; struct vm_area_struct *vma = v; |
76597cd31 proc: fix oops on... |
194 195 |
if (!IS_ERR(vma)) vma_stop(priv, vma); |
a6198797c maps4: regroup ta... |
196 197 198 199 200 |
if (priv->task) put_task_struct(priv->task); } static int do_maps_open(struct inode *inode, struct file *file, |
03a44825b procfs: constify ... |
201 |
const struct seq_operations *ops) |
a6198797c maps4: regroup ta... |
202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 |
{ struct proc_maps_private *priv; int ret = -ENOMEM; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (priv) { priv->pid = proc_pid(inode); ret = seq_open(file, ops); if (!ret) { struct seq_file *m = file->private_data; m->private = priv; } else { kfree(priv); } } return ret; } |
e070ad49f [PATCH] add /proc... |
218 |
|
7c88db0cb proc: fix vma dis... |
219 |
static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma) |
1da177e4c Linux-2.6.12-rc2 |
220 |
{ |
e070ad49f [PATCH] add /proc... |
221 222 |
struct mm_struct *mm = vma->vm_mm; struct file *file = vma->vm_file; |
ca16d140a mm: don't access ... |
223 |
vm_flags_t flags = vma->vm_flags; |
1da177e4c Linux-2.6.12-rc2 |
224 |
unsigned long ino = 0; |
6260a4b05 /proc/pid/maps: d... |
225 |
unsigned long long pgoff = 0; |
a09a79f66 Don't lock guardp... |
226 |
unsigned long start, end; |
1da177e4c Linux-2.6.12-rc2 |
227 228 229 230 |
dev_t dev = 0; int len; if (file) { |
2fddfeefe [PATCH] proc: cha... |
231 |
struct inode *inode = vma->vm_file->f_path.dentry->d_inode; |
1da177e4c Linux-2.6.12-rc2 |
232 233 |
dev = inode->i_sb->s_dev; ino = inode->i_ino; |
6260a4b05 /proc/pid/maps: d... |
234 |
pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT; |
1da177e4c Linux-2.6.12-rc2 |
235 |
} |
d7824370e mm: fix up some u... |
236 237 |
/* We don't show the stack guard page in /proc/maps */ start = vma->vm_start; |
a09a79f66 Don't lock guardp... |
238 239 240 241 242 |
if (stack_guard_page_start(vma, start)) start += PAGE_SIZE; end = vma->vm_end; if (stack_guard_page_end(vma, end)) end -= PAGE_SIZE; |
d7824370e mm: fix up some u... |
243 |
|
1804dc6e1 /proc/self/maps d... |
244 |
seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n", |
d7824370e mm: fix up some u... |
245 |
start, |
a09a79f66 Don't lock guardp... |
246 |
end, |
1da177e4c Linux-2.6.12-rc2 |
247 248 249 250 |
flags & VM_READ ? 'r' : '-', flags & VM_WRITE ? 'w' : '-', flags & VM_EXEC ? 'x' : '-', flags & VM_MAYSHARE ? 's' : 'p', |
6260a4b05 /proc/pid/maps: d... |
251 |
pgoff, |
1da177e4c Linux-2.6.12-rc2 |
252 253 254 255 256 257 |
MAJOR(dev), MINOR(dev), ino, &len); /* * Print the dentry name for named mappings, and a * special [heap] marker for the heap: */ |
e070ad49f [PATCH] add /proc... |
258 |
if (file) { |
1da177e4c Linux-2.6.12-rc2 |
259 |
pad_len_spaces(m, len); |
c32c2f63a d_path: Make seq_... |
260 261 |
seq_path(m, &file->f_path, " "); |
1da177e4c Linux-2.6.12-rc2 |
262 |
} else { |
e6e5494cb [PATCH] vdso: ran... |
263 264 265 |
const char *name = arch_vma_name(vma); if (!name) { if (mm) { |
0db0c01b5 procfs: fix /proc... |
266 267 |
if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) { |
e6e5494cb [PATCH] vdso: ran... |
268 269 270 271 |
name = "[heap]"; } else if (vma->vm_start <= mm->start_stack && vma->vm_end >= mm->start_stack) { name = "[stack]"; |
1da177e4c Linux-2.6.12-rc2 |
272 |
} |
e6e5494cb [PATCH] vdso: ran... |
273 274 |
} else { name = "[vdso]"; |
1da177e4c Linux-2.6.12-rc2 |
275 |
} |
e6e5494cb [PATCH] vdso: ran... |
276 277 |
} if (name) { |
1da177e4c Linux-2.6.12-rc2 |
278 |
pad_len_spaces(m, len); |
e6e5494cb [PATCH] vdso: ran... |
279 |
seq_puts(m, name); |
1da177e4c Linux-2.6.12-rc2 |
280 281 282 283 |
} } seq_putc(m, ' '); |
7c88db0cb proc: fix vma dis... |
284 285 286 287 288 289 290 291 292 |
} static int show_map(struct seq_file *m, void *v) { struct vm_area_struct *vma = v; struct proc_maps_private *priv = m->private; struct task_struct *task = priv->task; show_map_vma(m, vma); |
e070ad49f [PATCH] add /proc... |
293 |
|
e070ad49f [PATCH] add /proc... |
294 |
if (m->count < m->size) /* vma is copied successfully */ |
31db58b3a mm: arch: make ge... |
295 296 |
m->version = (vma != get_gate_vma(task->mm)) ? vma->vm_start : 0; |
1da177e4c Linux-2.6.12-rc2 |
297 298 |
return 0; } |
03a44825b procfs: constify ... |
299 |
static const struct seq_operations proc_pid_maps_op = { |
a6198797c maps4: regroup ta... |
300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 |
.start = m_start, .next = m_next, .stop = m_stop, .show = show_map }; static int maps_open(struct inode *inode, struct file *file) { return do_maps_open(inode, file, &proc_pid_maps_op); } const struct file_operations proc_maps_operations = { .open = maps_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private, }; /* * Proportional Set Size(PSS): my share of RSS. * * PSS of a process is the count of pages it has in memory, where each * page is divided by the number of processes sharing it. So if a * process has 1000 pages all to itself, and 1000 shared with one other * process, its PSS will be 1500. * * To keep (accumulated) division errors low, we adopt a 64bit * fixed-point pss counter to minimize division errors. So (pss >> * PSS_SHIFT) would be the real byte count. * * A shift of 12 before division means (assuming 4K page size): * - 1M 3-user-pages add up to 8KB errors; * - supports mapcount up to 2^24, or 16M; * - supports PSS up to 2^52 bytes, or 4PB. */ #define PSS_SHIFT 12 |
1e8832811 maps4: make page ... |
336 |
#ifdef CONFIG_PROC_PAGE_MONITOR |
214e471ff smaps: account sw... |
337 |
struct mem_size_stats { |
a6198797c maps4: regroup ta... |
338 339 340 341 342 343 344 |
struct vm_area_struct *vma; unsigned long resident; unsigned long shared_clean; unsigned long shared_dirty; unsigned long private_clean; unsigned long private_dirty; unsigned long referenced; |
b40d4f84b /proc/pid/smaps: ... |
345 |
unsigned long anonymous; |
4031a219d smaps: have smaps... |
346 |
unsigned long anonymous_thp; |
214e471ff smaps: account sw... |
347 |
unsigned long swap; |
a6198797c maps4: regroup ta... |
348 349 |
u64 pss; }; |
ae11c4d9f smaps: break out ... |
350 351 |
static void smaps_pte_entry(pte_t ptent, unsigned long addr, |
3c9acc784 smaps: pass pte s... |
352 |
unsigned long ptent_size, struct mm_walk *walk) |
ae11c4d9f smaps: break out ... |
353 354 355 356 357 358 359 |
{ struct mem_size_stats *mss = walk->private; struct vm_area_struct *vma = mss->vma; struct page *page; int mapcount; if (is_swap_pte(ptent)) { |
3c9acc784 smaps: pass pte s... |
360 |
mss->swap += ptent_size; |
ae11c4d9f smaps: break out ... |
361 362 363 364 365 366 367 368 369 370 371 |
return; } if (!pte_present(ptent)) return; page = vm_normal_page(vma, addr, ptent); if (!page) return; if (PageAnon(page)) |
3c9acc784 smaps: pass pte s... |
372 |
mss->anonymous += ptent_size; |
ae11c4d9f smaps: break out ... |
373 |
|
3c9acc784 smaps: pass pte s... |
374 |
mss->resident += ptent_size; |
ae11c4d9f smaps: break out ... |
375 376 |
/* Accumulate the size in pages that have been accessed. */ if (pte_young(ptent) || PageReferenced(page)) |
3c9acc784 smaps: pass pte s... |
377 |
mss->referenced += ptent_size; |
ae11c4d9f smaps: break out ... |
378 379 380 |
mapcount = page_mapcount(page); if (mapcount >= 2) { if (pte_dirty(ptent) || PageDirty(page)) |
3c9acc784 smaps: pass pte s... |
381 |
mss->shared_dirty += ptent_size; |
ae11c4d9f smaps: break out ... |
382 |
else |
3c9acc784 smaps: pass pte s... |
383 384 |
mss->shared_clean += ptent_size; mss->pss += (ptent_size << PSS_SHIFT) / mapcount; |
ae11c4d9f smaps: break out ... |
385 386 |
} else { if (pte_dirty(ptent) || PageDirty(page)) |
3c9acc784 smaps: pass pte s... |
387 |
mss->private_dirty += ptent_size; |
ae11c4d9f smaps: break out ... |
388 |
else |
3c9acc784 smaps: pass pte s... |
389 390 |
mss->private_clean += ptent_size; mss->pss += (ptent_size << PSS_SHIFT); |
ae11c4d9f smaps: break out ... |
391 392 |
} } |
b3ae5acbb maps4: use pagewa... |
393 |
static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, |
2165009bd pagemap: pass mm ... |
394 |
struct mm_walk *walk) |
e070ad49f [PATCH] add /proc... |
395 |
{ |
2165009bd pagemap: pass mm ... |
396 |
struct mem_size_stats *mss = walk->private; |
b3ae5acbb maps4: use pagewa... |
397 |
struct vm_area_struct *vma = mss->vma; |
ae11c4d9f smaps: break out ... |
398 |
pte_t *pte; |
705e87c0c [PATCH] mm: pte_o... |
399 |
spinlock_t *ptl; |
e070ad49f [PATCH] add /proc... |
400 |
|
22e057c59 smaps: teach smap... |
401 402 403 404 405 406 407 408 409 |
spin_lock(&walk->mm->page_table_lock); if (pmd_trans_huge(*pmd)) { if (pmd_trans_splitting(*pmd)) { spin_unlock(&walk->mm->page_table_lock); wait_split_huge_page(vma->anon_vma, pmd); } else { smaps_pte_entry(*(pte_t *)pmd, addr, HPAGE_PMD_SIZE, walk); spin_unlock(&walk->mm->page_table_lock); |
4031a219d smaps: have smaps... |
410 |
mss->anonymous_thp += HPAGE_PMD_SIZE; |
22e057c59 smaps: teach smap... |
411 412 413 414 415 416 417 418 419 420 |
return 0; } } else { spin_unlock(&walk->mm->page_table_lock); } /* * The mmap_sem held all the way back in m_start() is what * keeps khugepaged out of here and from collapsing things * in here. */ |
705e87c0c [PATCH] mm: pte_o... |
421 |
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); |
ae11c4d9f smaps: break out ... |
422 |
for (; addr != end; pte++, addr += PAGE_SIZE) |
3c9acc784 smaps: pass pte s... |
423 |
smaps_pte_entry(*pte, addr, PAGE_SIZE, walk); |
705e87c0c [PATCH] mm: pte_o... |
424 425 |
pte_unmap_unlock(pte - 1, ptl); cond_resched(); |
b3ae5acbb maps4: use pagewa... |
426 |
return 0; |
e070ad49f [PATCH] add /proc... |
427 |
} |
e070ad49f [PATCH] add /proc... |
428 429 |
static int show_smap(struct seq_file *m, void *v) { |
7c88db0cb proc: fix vma dis... |
430 431 |
struct proc_maps_private *priv = m->private; struct task_struct *task = priv->task; |
e070ad49f [PATCH] add /proc... |
432 |
struct vm_area_struct *vma = v; |
e070ad49f [PATCH] add /proc... |
433 |
struct mem_size_stats mss; |
2165009bd pagemap: pass mm ... |
434 435 436 437 438 |
struct mm_walk smaps_walk = { .pmd_entry = smaps_pte_range, .mm = vma->vm_mm, .private = &mss, }; |
e070ad49f [PATCH] add /proc... |
439 440 |
memset(&mss, 0, sizeof mss); |
b3ae5acbb maps4: use pagewa... |
441 |
mss.vma = vma; |
d82ef020c proc: pagemap: Ho... |
442 |
/* mmap_sem is held in m_start */ |
5ddfae16b [PATCH] smaps: hu... |
443 |
if (vma->vm_mm && !is_vm_hugetlb_page(vma)) |
2165009bd pagemap: pass mm ... |
444 |
walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk); |
4752c3697 maps4: simplify i... |
445 |
|
7c88db0cb proc: fix vma dis... |
446 |
show_map_vma(m, vma); |
4752c3697 maps4: simplify i... |
447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 |
seq_printf(m, "Size: %8lu kB " "Rss: %8lu kB " "Pss: %8lu kB " "Shared_Clean: %8lu kB " "Shared_Dirty: %8lu kB " "Private_Clean: %8lu kB " "Private_Dirty: %8lu kB " |
214e471ff smaps: account sw... |
463 464 |
"Referenced: %8lu kB " |
b40d4f84b /proc/pid/smaps: ... |
465 466 |
"Anonymous: %8lu kB " |
4031a219d smaps: have smaps... |
467 468 |
"AnonHugePages: %8lu kB " |
08fba6998 mm: report the pa... |
469 470 |
"Swap: %8lu kB " |
3340289dd mm: report the MM... |
471 472 |
"KernelPageSize: %8lu kB " |
2d90508f6 mm: smaps: export... |
473 474 475 476 |
"MMUPageSize: %8lu kB " "Locked: %8lu kB ", |
4752c3697 maps4: simplify i... |
477 478 479 480 481 482 483 |
(vma->vm_end - vma->vm_start) >> 10, mss.resident >> 10, (unsigned long)(mss.pss >> (10 + PSS_SHIFT)), mss.shared_clean >> 10, mss.shared_dirty >> 10, mss.private_clean >> 10, mss.private_dirty >> 10, |
214e471ff smaps: account sw... |
484 |
mss.referenced >> 10, |
b40d4f84b /proc/pid/smaps: ... |
485 |
mss.anonymous >> 10, |
4031a219d smaps: have smaps... |
486 |
mss.anonymous_thp >> 10, |
08fba6998 mm: report the pa... |
487 |
mss.swap >> 10, |
3340289dd mm: report the MM... |
488 |
vma_kernel_pagesize(vma) >> 10, |
2d90508f6 mm: smaps: export... |
489 490 491 |
vma_mmu_pagesize(vma) >> 10, (vma->vm_flags & VM_LOCKED) ? (unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0); |
4752c3697 maps4: simplify i... |
492 |
|
7c88db0cb proc: fix vma dis... |
493 |
if (m->count < m->size) /* vma is copied successfully */ |
31db58b3a mm: arch: make ge... |
494 495 |
m->version = (vma != get_gate_vma(task->mm)) ? vma->vm_start : 0; |
7c88db0cb proc: fix vma dis... |
496 |
return 0; |
e070ad49f [PATCH] add /proc... |
497 |
} |
03a44825b procfs: constify ... |
498 |
static const struct seq_operations proc_pid_smaps_op = { |
a6198797c maps4: regroup ta... |
499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 |
.start = m_start, .next = m_next, .stop = m_stop, .show = show_smap }; static int smaps_open(struct inode *inode, struct file *file) { return do_maps_open(inode, file, &proc_pid_smaps_op); } const struct file_operations proc_smaps_operations = { .open = smaps_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private, }; static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, |
2165009bd pagemap: pass mm ... |
518 |
unsigned long end, struct mm_walk *walk) |
a6198797c maps4: regroup ta... |
519 |
{ |
2165009bd pagemap: pass mm ... |
520 |
struct vm_area_struct *vma = walk->private; |
a6198797c maps4: regroup ta... |
521 522 523 |
pte_t *pte, ptent; spinlock_t *ptl; struct page *page; |
033193275 pagewalk: only sp... |
524 |
split_huge_page_pmd(walk->mm, pmd); |
a6198797c maps4: regroup ta... |
525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 |
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); for (; addr != end; pte++, addr += PAGE_SIZE) { ptent = *pte; if (!pte_present(ptent)) continue; page = vm_normal_page(vma, addr, ptent); if (!page) continue; /* Clear accessed and referenced bits. */ ptep_test_and_clear_young(vma, addr, pte); ClearPageReferenced(page); } pte_unmap_unlock(pte - 1, ptl); cond_resched(); return 0; } |
398499d5f pagemap clear_ref... |
543 544 545 |
#define CLEAR_REFS_ALL 1 #define CLEAR_REFS_ANON 2 #define CLEAR_REFS_MAPPED 3 |
f248dcb34 maps4: move clear... |
546 547 |
static ssize_t clear_refs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) |
b813e931b smaps: add clear_... |
548 |
{ |
f248dcb34 maps4: move clear... |
549 |
struct task_struct *task; |
fb92a4b06 fs/proc/task_mmu.... |
550 |
char buffer[PROC_NUMBUF]; |
f248dcb34 maps4: move clear... |
551 |
struct mm_struct *mm; |
b813e931b smaps: add clear_... |
552 |
struct vm_area_struct *vma; |
0a8cb8e34 fs/proc: convert ... |
553 554 |
int type; int rv; |
b813e931b smaps: add clear_... |
555 |
|
f248dcb34 maps4: move clear... |
556 557 558 559 560 |
memset(buffer, 0, sizeof(buffer)); if (count > sizeof(buffer) - 1) count = sizeof(buffer) - 1; if (copy_from_user(buffer, buf, count)) return -EFAULT; |
0a8cb8e34 fs/proc: convert ... |
561 562 563 |
rv = kstrtoint(strstrip(buffer), 10, &type); if (rv < 0) return rv; |
398499d5f pagemap clear_ref... |
564 |
if (type < CLEAR_REFS_ALL || type > CLEAR_REFS_MAPPED) |
f248dcb34 maps4: move clear... |
565 |
return -EINVAL; |
f248dcb34 maps4: move clear... |
566 567 568 569 570 |
task = get_proc_task(file->f_path.dentry->d_inode); if (!task) return -ESRCH; mm = get_task_mm(task); if (mm) { |
20cbc9726 Fix clear_refs_wr... |
571 572 573 574 |
struct mm_walk clear_refs_walk = { .pmd_entry = clear_refs_pte_range, .mm = mm, }; |
f248dcb34 maps4: move clear... |
575 |
down_read(&mm->mmap_sem); |
2165009bd pagemap: pass mm ... |
576 577 |
for (vma = mm->mmap; vma; vma = vma->vm_next) { clear_refs_walk.private = vma; |
398499d5f pagemap clear_ref... |
578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 |
if (is_vm_hugetlb_page(vma)) continue; /* * Writing 1 to /proc/pid/clear_refs affects all pages. * * Writing 2 to /proc/pid/clear_refs only affects * Anonymous pages. * * Writing 3 to /proc/pid/clear_refs only affects file * mapped pages. */ if (type == CLEAR_REFS_ANON && vma->vm_file) continue; if (type == CLEAR_REFS_MAPPED && !vma->vm_file) continue; walk_page_range(vma->vm_start, vma->vm_end, &clear_refs_walk); |
2165009bd pagemap: pass mm ... |
595 |
} |
f248dcb34 maps4: move clear... |
596 597 598 599 600 |
flush_tlb_mm(mm); up_read(&mm->mmap_sem); mmput(mm); } put_task_struct(task); |
fb92a4b06 fs/proc/task_mmu.... |
601 602 |
return count; |
b813e931b smaps: add clear_... |
603 |
} |
f248dcb34 maps4: move clear... |
604 605 |
const struct file_operations proc_clear_refs_operations = { .write = clear_refs_write, |
6038f373a llseek: automatic... |
606 |
.llseek = noop_llseek, |
f248dcb34 maps4: move clear... |
607 |
}; |
85863e475 maps4: add /proc/... |
608 |
struct pagemapread { |
d82ef020c proc: pagemap: Ho... |
609 610 |
int pos, len; u64 *buffer; |
85863e475 maps4: add /proc/... |
611 |
}; |
f16278c67 Change pagemap ou... |
612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 |
#define PM_ENTRY_BYTES sizeof(u64) #define PM_STATUS_BITS 3 #define PM_STATUS_OFFSET (64 - PM_STATUS_BITS) #define PM_STATUS_MASK (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET) #define PM_STATUS(nr) (((nr) << PM_STATUS_OFFSET) & PM_STATUS_MASK) #define PM_PSHIFT_BITS 6 #define PM_PSHIFT_OFFSET (PM_STATUS_OFFSET - PM_PSHIFT_BITS) #define PM_PSHIFT_MASK (((1LL << PM_PSHIFT_BITS) - 1) << PM_PSHIFT_OFFSET) #define PM_PSHIFT(x) (((u64) (x) << PM_PSHIFT_OFFSET) & PM_PSHIFT_MASK) #define PM_PFRAME_MASK ((1LL << PM_PSHIFT_OFFSET) - 1) #define PM_PFRAME(x) ((x) & PM_PFRAME_MASK) #define PM_PRESENT PM_STATUS(4LL) #define PM_SWAP PM_STATUS(2LL) #define PM_NOT_PRESENT PM_PSHIFT(PAGE_SHIFT) |
85863e475 maps4: add /proc/... |
627 628 629 630 631 |
#define PM_END_OF_BUFFER 1 static int add_to_pagemap(unsigned long addr, u64 pfn, struct pagemapread *pm) { |
d82ef020c proc: pagemap: Ho... |
632 633 |
pm->buffer[pm->pos++] = pfn; if (pm->pos >= pm->len) |
aae8679b0 pagemap: fix bug ... |
634 |
return PM_END_OF_BUFFER; |
85863e475 maps4: add /proc/... |
635 636 637 638 |
return 0; } static int pagemap_pte_hole(unsigned long start, unsigned long end, |
2165009bd pagemap: pass mm ... |
639 |
struct mm_walk *walk) |
85863e475 maps4: add /proc/... |
640 |
{ |
2165009bd pagemap: pass mm ... |
641 |
struct pagemapread *pm = walk->private; |
85863e475 maps4: add /proc/... |
642 643 644 645 646 647 648 649 650 |
unsigned long addr; int err = 0; for (addr = start; addr < end; addr += PAGE_SIZE) { err = add_to_pagemap(addr, PM_NOT_PRESENT, pm); if (err) break; } return err; } |
9d02dbc81 make swap_pte_to_... |
651 |
static u64 swap_pte_to_pagemap_entry(pte_t pte) |
85863e475 maps4: add /proc/... |
652 653 |
{ swp_entry_t e = pte_to_swp_entry(pte); |
f16278c67 Change pagemap ou... |
654 |
return swp_type(e) | (swp_offset(e) << MAX_SWAPFILES_SHIFT); |
85863e475 maps4: add /proc/... |
655 |
} |
49c50342c pagemap: fix 32-b... |
656 |
static u64 pte_to_pagemap_entry(pte_t pte) |
bcf8039ed pagemap: fix larg... |
657 |
{ |
49c50342c pagemap: fix 32-b... |
658 |
u64 pme = 0; |
bcf8039ed pagemap: fix larg... |
659 660 661 662 663 664 665 666 |
if (is_swap_pte(pte)) pme = PM_PFRAME(swap_pte_to_pagemap_entry(pte)) | PM_PSHIFT(PAGE_SHIFT) | PM_SWAP; else if (pte_present(pte)) pme = PM_PFRAME(pte_pfn(pte)) | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT; return pme; } |
85863e475 maps4: add /proc/... |
667 |
static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, |
2165009bd pagemap: pass mm ... |
668 |
struct mm_walk *walk) |
85863e475 maps4: add /proc/... |
669 |
{ |
bcf8039ed pagemap: fix larg... |
670 |
struct vm_area_struct *vma; |
2165009bd pagemap: pass mm ... |
671 |
struct pagemapread *pm = walk->private; |
85863e475 maps4: add /proc/... |
672 673 |
pte_t *pte; int err = 0; |
033193275 pagewalk: only sp... |
674 |
split_huge_page_pmd(walk->mm, pmd); |
bcf8039ed pagemap: fix larg... |
675 676 |
/* find the first VMA at or above 'addr' */ vma = find_vma(walk->mm, addr); |
85863e475 maps4: add /proc/... |
677 678 |
for (; addr != end; addr += PAGE_SIZE) { u64 pfn = PM_NOT_PRESENT; |
bcf8039ed pagemap: fix larg... |
679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 |
/* check to see if we've left 'vma' behind * and need a new, higher one */ if (vma && (addr >= vma->vm_end)) vma = find_vma(walk->mm, addr); /* check that 'vma' actually covers this address, * and that it isn't a huge page vma */ if (vma && (vma->vm_start <= addr) && !is_vm_hugetlb_page(vma)) { pte = pte_offset_map(pmd, addr); pfn = pte_to_pagemap_entry(*pte); /* unmap before userspace copy */ pte_unmap(pte); } |
85863e475 maps4: add /proc/... |
694 695 696 697 698 699 700 701 702 |
err = add_to_pagemap(addr, pfn, pm); if (err) return err; } cond_resched(); return err; } |
1a5cb8146 pagemap: add #ifd... |
703 |
#ifdef CONFIG_HUGETLB_PAGE |
5dc37642c mm hugetlb: add h... |
704 705 706 707 708 709 710 711 |
static u64 huge_pte_to_pagemap_entry(pte_t pte, int offset) { u64 pme = 0; if (pte_present(pte)) pme = PM_PFRAME(pte_pfn(pte) + offset) | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT; return pme; } |
116354d17 pagemap: fix pfn ... |
712 713 714 715 |
/* This function walks within one hugetlb entry in the single call */ static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask, unsigned long addr, unsigned long end, struct mm_walk *walk) |
5dc37642c mm hugetlb: add h... |
716 |
{ |
5dc37642c mm hugetlb: add h... |
717 |
struct pagemapread *pm = walk->private; |
5dc37642c mm hugetlb: add h... |
718 |
int err = 0; |
116354d17 pagemap: fix pfn ... |
719 |
u64 pfn; |
5dc37642c mm hugetlb: add h... |
720 |
|
5dc37642c mm hugetlb: add h... |
721 |
for (; addr != end; addr += PAGE_SIZE) { |
116354d17 pagemap: fix pfn ... |
722 723 |
int offset = (addr & ~hmask) >> PAGE_SHIFT; pfn = huge_pte_to_pagemap_entry(*pte, offset); |
5dc37642c mm hugetlb: add h... |
724 725 726 727 728 729 730 731 732 |
err = add_to_pagemap(addr, pfn, pm); if (err) return err; } cond_resched(); return err; } |
1a5cb8146 pagemap: add #ifd... |
733 |
#endif /* HUGETLB_PAGE */ |
5dc37642c mm hugetlb: add h... |
734 |
|
85863e475 maps4: add /proc/... |
735 736 737 |
/* * /proc/pid/pagemap - an array mapping virtual pages to pfns * |
f16278c67 Change pagemap ou... |
738 739 740 741 742 743 744 745 746 747 748 749 750 751 |
* For each page in the address space, this file contains one 64-bit entry * consisting of the following: * * Bits 0-55 page frame number (PFN) if present * Bits 0-4 swap type if swapped * Bits 5-55 swap offset if swapped * Bits 55-60 page shift (page size = 1<<page shift) * Bit 61 reserved for future use * Bit 62 page swapped * Bit 63 page present * * If the page is not present but in swap, then the PFN contains an * encoding of the swap file number and the page's offset into the * swap. Unmapped pages return a null PFN. This allows determining |
85863e475 maps4: add /proc/... |
752 753 754 755 756 757 758 |
* precisely which pages are mapped (or in swap) and comparing mapped * pages between processes. * * Efficient users of this interface will use /proc/pid/maps to * determine which areas of memory are actually mapped and llseek to * skip over unmapped regions. */ |
d82ef020c proc: pagemap: Ho... |
759 |
#define PAGEMAP_WALK_SIZE (PMD_SIZE) |
ea251c1d5 pagemap: set page... |
760 |
#define PAGEMAP_WALK_MASK (PMD_MASK) |
85863e475 maps4: add /proc/... |
761 762 763 764 |
static ssize_t pagemap_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode); |
85863e475 maps4: add /proc/... |
765 766 |
struct mm_struct *mm; struct pagemapread pm; |
85863e475 maps4: add /proc/... |
767 |
int ret = -ESRCH; |
ee1e6ab60 proc: fix /proc/*... |
768 |
struct mm_walk pagemap_walk = {}; |
5d7e0d2bd Fix pagemap_read(... |
769 770 771 772 |
unsigned long src; unsigned long svpfn; unsigned long start_vaddr; unsigned long end_vaddr; |
d82ef020c proc: pagemap: Ho... |
773 |
int copied = 0; |
85863e475 maps4: add /proc/... |
774 775 776 |
if (!task) goto out; |
85863e475 maps4: add /proc/... |
777 778 |
ret = -EINVAL; /* file position must be aligned */ |
aae8679b0 pagemap: fix bug ... |
779 |
if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES)) |
fb39380b8 pagemap: proper r... |
780 |
goto out_task; |
85863e475 maps4: add /proc/... |
781 782 |
ret = 0; |
081617863 pagemap: require ... |
783 784 |
if (!count) goto out_task; |
d82ef020c proc: pagemap: Ho... |
785 786 |
pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT); pm.buffer = kmalloc(pm.len, GFP_TEMPORARY); |
5d7e0d2bd Fix pagemap_read(... |
787 |
ret = -ENOMEM; |
d82ef020c proc: pagemap: Ho... |
788 |
if (!pm.buffer) |
98bc93e50 proc: fix pagemap... |
789 790 791 792 793 794 |
goto out_task; mm = mm_for_maps(task); ret = PTR_ERR(mm); if (!mm || IS_ERR(mm)) goto out_free; |
85863e475 maps4: add /proc/... |
795 |
|
5d7e0d2bd Fix pagemap_read(... |
796 797 |
pagemap_walk.pmd_entry = pagemap_pte_range; pagemap_walk.pte_hole = pagemap_pte_hole; |
1a5cb8146 pagemap: add #ifd... |
798 |
#ifdef CONFIG_HUGETLB_PAGE |
5dc37642c mm hugetlb: add h... |
799 |
pagemap_walk.hugetlb_entry = pagemap_hugetlb_range; |
1a5cb8146 pagemap: add #ifd... |
800 |
#endif |
5d7e0d2bd Fix pagemap_read(... |
801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 |
pagemap_walk.mm = mm; pagemap_walk.private = ± src = *ppos; svpfn = src / PM_ENTRY_BYTES; start_vaddr = svpfn << PAGE_SHIFT; end_vaddr = TASK_SIZE_OF(task); /* watch out for wraparound */ if (svpfn > TASK_SIZE_OF(task) >> PAGE_SHIFT) start_vaddr = end_vaddr; /* * The odds are that this will stop walking way * before end_vaddr, because the length of the * user buffer is tracked in "pm", and the walk * will stop when we hit the end of the buffer. */ |
d82ef020c proc: pagemap: Ho... |
819 820 821 822 823 824 |
ret = 0; while (count && (start_vaddr < end_vaddr)) { int len; unsigned long end; pm.pos = 0; |
ea251c1d5 pagemap: set page... |
825 |
end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK; |
d82ef020c proc: pagemap: Ho... |
826 827 828 829 830 831 832 833 834 |
/* overflow ? */ if (end < start_vaddr || end > end_vaddr) end = end_vaddr; down_read(&mm->mmap_sem); ret = walk_page_range(start_vaddr, end, &pagemap_walk); up_read(&mm->mmap_sem); start_vaddr = end; len = min(count, PM_ENTRY_BYTES * pm.pos); |
309361e09 proc: copy_to_use... |
835 |
if (copy_to_user(buf, pm.buffer, len)) { |
d82ef020c proc: pagemap: Ho... |
836 |
ret = -EFAULT; |
98bc93e50 proc: fix pagemap... |
837 |
goto out_mm; |
d82ef020c proc: pagemap: Ho... |
838 839 840 841 |
} copied += len; buf += len; count -= len; |
85863e475 maps4: add /proc/... |
842 |
} |
d82ef020c proc: pagemap: Ho... |
843 844 845 |
*ppos += copied; if (!ret || ret == PM_END_OF_BUFFER) ret = copied; |
fb39380b8 pagemap: proper r... |
846 847 |
out_mm: mmput(mm); |
98bc93e50 proc: fix pagemap... |
848 849 |
out_free: kfree(pm.buffer); |
85863e475 maps4: add /proc/... |
850 851 852 853 854 855 856 857 858 859 |
out_task: put_task_struct(task); out: return ret; } const struct file_operations proc_pagemap_operations = { .llseek = mem_lseek, /* borrow this */ .read = pagemap_read, }; |
1e8832811 maps4: make page ... |
860 |
#endif /* CONFIG_PROC_PAGE_MONITOR */ |
85863e475 maps4: add /proc/... |
861 |
|
6e21c8f14 [PATCH] /proc/<pi... |
862 |
#ifdef CONFIG_NUMA |
6e21c8f14 [PATCH] /proc/<pi... |
863 |
|
f69ff943d mm: proc: move sh... |
864 865 866 867 868 869 870 871 872 873 874 |
struct numa_maps { struct vm_area_struct *vma; unsigned long pages; unsigned long anon; unsigned long active; unsigned long writeback; unsigned long mapcount_max; unsigned long dirty; unsigned long swapcache; unsigned long node[MAX_NUMNODES]; }; |
5b52fc890 proc: allocate st... |
875 876 877 878 |
struct numa_maps_private { struct proc_maps_private proc_maps; struct numa_maps md; }; |
eb4866d00 make /proc/$pid/n... |
879 880 |
static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty, unsigned long nr_pages) |
f69ff943d mm: proc: move sh... |
881 882 |
{ int count = page_mapcount(page); |
eb4866d00 make /proc/$pid/n... |
883 |
md->pages += nr_pages; |
f69ff943d mm: proc: move sh... |
884 |
if (pte_dirty || PageDirty(page)) |
eb4866d00 make /proc/$pid/n... |
885 |
md->dirty += nr_pages; |
f69ff943d mm: proc: move sh... |
886 887 |
if (PageSwapCache(page)) |
eb4866d00 make /proc/$pid/n... |
888 |
md->swapcache += nr_pages; |
f69ff943d mm: proc: move sh... |
889 890 |
if (PageActive(page) || PageUnevictable(page)) |
eb4866d00 make /proc/$pid/n... |
891 |
md->active += nr_pages; |
f69ff943d mm: proc: move sh... |
892 893 |
if (PageWriteback(page)) |
eb4866d00 make /proc/$pid/n... |
894 |
md->writeback += nr_pages; |
f69ff943d mm: proc: move sh... |
895 896 |
if (PageAnon(page)) |
eb4866d00 make /proc/$pid/n... |
897 |
md->anon += nr_pages; |
f69ff943d mm: proc: move sh... |
898 899 900 |
if (count > md->mapcount_max) md->mapcount_max = count; |
eb4866d00 make /proc/$pid/n... |
901 |
md->node[page_to_nid(page)] += nr_pages; |
f69ff943d mm: proc: move sh... |
902 |
} |
3200a8aaa break out numa_ma... |
903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 |
static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma, unsigned long addr) { struct page *page; int nid; if (!pte_present(pte)) return NULL; page = vm_normal_page(vma, addr, pte); if (!page) return NULL; if (PageReserved(page)) return NULL; nid = page_to_nid(page); if (!node_isset(nid, node_states[N_HIGH_MEMORY])) return NULL; return page; } |
f69ff943d mm: proc: move sh... |
925 926 927 928 929 930 931 932 933 |
static int gather_pte_stats(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) { struct numa_maps *md; spinlock_t *ptl; pte_t *orig_pte; pte_t *pte; md = walk->private; |
32ef43848 teach /proc/$pid/... |
934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 |
spin_lock(&walk->mm->page_table_lock); if (pmd_trans_huge(*pmd)) { if (pmd_trans_splitting(*pmd)) { spin_unlock(&walk->mm->page_table_lock); wait_split_huge_page(md->vma->anon_vma, pmd); } else { pte_t huge_pte = *(pte_t *)pmd; struct page *page; page = can_gather_numa_stats(huge_pte, md->vma, addr); if (page) gather_stats(page, md, pte_dirty(huge_pte), HPAGE_PMD_SIZE/PAGE_SIZE); spin_unlock(&walk->mm->page_table_lock); return 0; } } else { spin_unlock(&walk->mm->page_table_lock); } |
f69ff943d mm: proc: move sh... |
953 954 |
orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); do { |
3200a8aaa break out numa_ma... |
955 |
struct page *page = can_gather_numa_stats(*pte, md->vma, addr); |
f69ff943d mm: proc: move sh... |
956 957 |
if (!page) continue; |
eb4866d00 make /proc/$pid/n... |
958 |
gather_stats(page, md, pte_dirty(*pte), 1); |
f69ff943d mm: proc: move sh... |
959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 |
} while (pte++, addr += PAGE_SIZE, addr != end); pte_unmap_unlock(orig_pte, ptl); return 0; } #ifdef CONFIG_HUGETLB_PAGE static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask, unsigned long addr, unsigned long end, struct mm_walk *walk) { struct numa_maps *md; struct page *page; if (pte_none(*pte)) return 0; page = pte_page(*pte); if (!page) return 0; md = walk->private; |
eb4866d00 make /proc/$pid/n... |
979 |
gather_stats(page, md, pte_dirty(*pte), 1); |
f69ff943d mm: proc: move sh... |
980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 |
return 0; } #else static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask, unsigned long addr, unsigned long end, struct mm_walk *walk) { return 0; } #endif /* * Display pages allocated per node and memory policy via /proc. */ static int show_numa_map(struct seq_file *m, void *v) { |
5b52fc890 proc: allocate st... |
996 997 |
struct numa_maps_private *numa_priv = m->private; struct proc_maps_private *proc_priv = &numa_priv->proc_maps; |
f69ff943d mm: proc: move sh... |
998 |
struct vm_area_struct *vma = v; |
5b52fc890 proc: allocate st... |
999 |
struct numa_maps *md = &numa_priv->md; |
f69ff943d mm: proc: move sh... |
1000 1001 1002 1003 1004 1005 1006 1007 1008 |
struct file *file = vma->vm_file; struct mm_struct *mm = vma->vm_mm; struct mm_walk walk = {}; struct mempolicy *pol; int n; char buffer[50]; if (!mm) return 0; |
5b52fc890 proc: allocate st... |
1009 1010 |
/* Ensure we start with an empty set of numa_maps statistics. */ memset(md, 0, sizeof(*md)); |
f69ff943d mm: proc: move sh... |
1011 1012 1013 1014 1015 1016 1017 |
md->vma = vma; walk.hugetlb_entry = gather_hugetbl_stats; walk.pmd_entry = gather_pte_stats; walk.private = md; walk.mm = mm; |
5b52fc890 proc: allocate st... |
1018 |
pol = get_vma_policy(proc_priv->task, vma, vma->vm_start); |
f69ff943d mm: proc: move sh... |
1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 |
mpol_to_str(buffer, sizeof(buffer), pol, 0); mpol_cond_put(pol); seq_printf(m, "%08lx %s", vma->vm_start, buffer); if (file) { seq_printf(m, " file="); seq_path(m, &file->f_path, " \t= "); } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) { seq_printf(m, " heap"); } else if (vma->vm_start <= mm->start_stack && vma->vm_end >= mm->start_stack) { seq_printf(m, " stack"); } |
fc360bd9c /proc/self/numa_m... |
1034 1035 |
if (is_vm_hugetlb_page(vma)) seq_printf(m, " huge"); |
f69ff943d mm: proc: move sh... |
1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 |
walk_page_range(vma->vm_start, vma->vm_end, &walk); if (!md->pages) goto out; if (md->anon) seq_printf(m, " anon=%lu", md->anon); if (md->dirty) seq_printf(m, " dirty=%lu", md->dirty); if (md->pages != md->anon && md->pages != md->dirty) seq_printf(m, " mapped=%lu", md->pages); if (md->mapcount_max > 1) seq_printf(m, " mapmax=%lu", md->mapcount_max); if (md->swapcache) seq_printf(m, " swapcache=%lu", md->swapcache); if (md->active < md->pages && !is_vm_hugetlb_page(vma)) seq_printf(m, " active=%lu", md->active); if (md->writeback) seq_printf(m, " writeback=%lu", md->writeback); for_each_node_state(n, N_HIGH_MEMORY) if (md->node[n]) seq_printf(m, " N%d=%lu", n, md->node[n]); out: seq_putc(m, ' '); |
f69ff943d mm: proc: move sh... |
1068 1069 |
if (m->count < m->size) |
5b52fc890 proc: allocate st... |
1070 |
m->version = (vma != proc_priv->tail_vma) ? vma->vm_start : 0; |
f69ff943d mm: proc: move sh... |
1071 1072 |
return 0; } |
5b52fc890 proc: allocate st... |
1073 |
|
03a44825b procfs: constify ... |
1074 |
static const struct seq_operations proc_pid_numa_maps_op = { |
1a75a6c82 [PATCH] Fold numa... |
1075 1076 1077 |
.start = m_start, .next = m_next, .stop = m_stop, |
3bbfe0596 proc: remove kern... |
1078 |
.show = show_numa_map, |
6e21c8f14 [PATCH] /proc/<pi... |
1079 |
}; |
662795deb [PATCH] proc: Mov... |
1080 1081 1082 |
static int numa_maps_open(struct inode *inode, struct file *file) { |
5b52fc890 proc: allocate st... |
1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 |
struct numa_maps_private *priv; int ret = -ENOMEM; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (priv) { priv->proc_maps.pid = proc_pid(inode); ret = seq_open(file, &proc_pid_numa_maps_op); if (!ret) { struct seq_file *m = file->private_data; m->private = priv; } else { kfree(priv); } } return ret; |
662795deb [PATCH] proc: Mov... |
1097 |
} |
00977a59b [PATCH] mark stru... |
1098 |
const struct file_operations proc_numa_maps_operations = { |
662795deb [PATCH] proc: Mov... |
1099 1100 1101 |
.open = numa_maps_open, .read = seq_read, .llseek = seq_lseek, |
99f895518 [PATCH] proc: don... |
1102 |
.release = seq_release_private, |
662795deb [PATCH] proc: Mov... |
1103 |
}; |
f69ff943d mm: proc: move sh... |
1104 |
#endif /* CONFIG_NUMA */ |