Blame view

fs/proc/task_mmu.c 41.7 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
  #include <linux/mm.h>
615d6e875   Davidlohr Bueso   mm: per-thread vm...
2
  #include <linux/vmacache.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3
  #include <linux/hugetlb.h>
22e057c59   Dave Hansen   smaps: teach smap...
4
  #include <linux/huge_mm.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
5
6
  #include <linux/mount.h>
  #include <linux/seq_file.h>
e070ad49f   Mauricio Lin   [PATCH] add /proc...
7
  #include <linux/highmem.h>
5096add84   Kees Cook   proc: maps protec...
8
  #include <linux/ptrace.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
9
  #include <linux/slab.h>
6e21c8f14   Christoph Lameter   [PATCH] /proc/<pi...
10
11
  #include <linux/pagemap.h>
  #include <linux/mempolicy.h>
22e057c59   Dave Hansen   smaps: teach smap...
12
  #include <linux/rmap.h>
85863e475   Matt Mackall   maps4: add /proc/...
13
14
  #include <linux/swap.h>
  #include <linux/swapops.h>
0f8975ec4   Pavel Emelyanov   mm: soft-dirty bi...
15
  #include <linux/mmu_notifier.h>
33c3fc71c   Vladimir Davydov   mm: introduce idl...
16
  #include <linux/page_idle.h>
6a15a3709   Vlastimil Babka   mm, proc: reduce ...
17
  #include <linux/shmem_fs.h>
e070ad49f   Mauricio Lin   [PATCH] add /proc...
18

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
19
20
  #include <asm/elf.h>
  #include <asm/uaccess.h>
e070ad49f   Mauricio Lin   [PATCH] add /proc...
21
  #include <asm/tlbflush.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
22
  #include "internal.h"
df5f8314c   Eric W. Biederman   proc: seqfile con...
23
  void task_mem(struct seq_file *m, struct mm_struct *mm)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
24
  {
846383359   Konstantin Khlebnikov   mm: rework virtua...
25
  	unsigned long text, lib, swap, ptes, pmds, anon, file, shmem;
365e9c87a   Hugh Dickins   [PATCH] mm: updat...
26
  	unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
8cee852ec   Jerome Marchand   mm, procfs: break...
27
28
29
  	anon = get_mm_counter(mm, MM_ANONPAGES);
  	file = get_mm_counter(mm, MM_FILEPAGES);
  	shmem = get_mm_counter(mm, MM_SHMEMPAGES);
365e9c87a   Hugh Dickins   [PATCH] mm: updat...
30
31
32
33
34
35
36
37
38
39
  	/*
  	 * Note: to minimize their overhead, mm maintains hiwater_vm and
  	 * hiwater_rss only when about to *lower* total_vm or rss.  Any
  	 * collector of these hiwater stats must therefore get total_vm
  	 * and rss too, which will usually be the higher.  Barriers? not
  	 * worth the effort, such snapshots can always be inconsistent.
  	 */
  	hiwater_vm = total_vm = mm->total_vm;
  	if (hiwater_vm < mm->hiwater_vm)
  		hiwater_vm = mm->hiwater_vm;
8cee852ec   Jerome Marchand   mm, procfs: break...
40
  	hiwater_rss = total_rss = anon + file + shmem;
365e9c87a   Hugh Dickins   [PATCH] mm: updat...
41
42
  	if (hiwater_rss < mm->hiwater_rss)
  		hiwater_rss = mm->hiwater_rss;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
43

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
44
45
  	text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
  	lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
b084d4353   KAMEZAWA Hiroyuki   mm: count swap usage
46
  	swap = get_mm_counter(mm, MM_SWAPENTS);
dc6c9a35b   Kirill A. Shutemov   mm: account pmd p...
47
48
  	ptes = PTRS_PER_PTE * sizeof(pte_t) * atomic_long_read(&mm->nr_ptes);
  	pmds = PTRS_PER_PMD * sizeof(pmd_t) * mm_nr_pmds(mm);
df5f8314c   Eric W. Biederman   proc: seqfile con...
49
  	seq_printf(m,
365e9c87a   Hugh Dickins   [PATCH] mm: updat...
50
51
  		"VmPeak:\t%8lu kB
  "
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
52
53
54
55
  		"VmSize:\t%8lu kB
  "
  		"VmLck:\t%8lu kB
  "
bc3e53f68   Christoph Lameter   mm: distinguish b...
56
57
  		"VmPin:\t%8lu kB
  "
365e9c87a   Hugh Dickins   [PATCH] mm: updat...
58
59
  		"VmHWM:\t%8lu kB
  "
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
60
61
  		"VmRSS:\t%8lu kB
  "
8cee852ec   Jerome Marchand   mm, procfs: break...
62
63
64
65
66
67
  		"RssAnon:\t%8lu kB
  "
  		"RssFile:\t%8lu kB
  "
  		"RssShmem:\t%8lu kB
  "
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
68
69
70
71
72
73
74
75
  		"VmData:\t%8lu kB
  "
  		"VmStk:\t%8lu kB
  "
  		"VmExe:\t%8lu kB
  "
  		"VmLib:\t%8lu kB
  "
b084d4353   KAMEZAWA Hiroyuki   mm: count swap usage
76
77
  		"VmPTE:\t%8lu kB
  "
dc6c9a35b   Kirill A. Shutemov   mm: account pmd p...
78
79
  		"VmPMD:\t%8lu kB
  "
b084d4353   KAMEZAWA Hiroyuki   mm: count swap usage
80
81
  		"VmSwap:\t%8lu kB
  ",
365e9c87a   Hugh Dickins   [PATCH] mm: updat...
82
  		hiwater_vm << (PAGE_SHIFT-10),
314e51b98   Konstantin Khlebnikov   mm: kill vma flag...
83
  		total_vm << (PAGE_SHIFT-10),
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
84
  		mm->locked_vm << (PAGE_SHIFT-10),
bc3e53f68   Christoph Lameter   mm: distinguish b...
85
  		mm->pinned_vm << (PAGE_SHIFT-10),
365e9c87a   Hugh Dickins   [PATCH] mm: updat...
86
87
  		hiwater_rss << (PAGE_SHIFT-10),
  		total_rss << (PAGE_SHIFT-10),
8cee852ec   Jerome Marchand   mm, procfs: break...
88
89
90
  		anon << (PAGE_SHIFT-10),
  		file << (PAGE_SHIFT-10),
  		shmem << (PAGE_SHIFT-10),
846383359   Konstantin Khlebnikov   mm: rework virtua...
91
  		mm->data_vm << (PAGE_SHIFT-10),
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
92
  		mm->stack_vm << (PAGE_SHIFT-10), text, lib,
dc6c9a35b   Kirill A. Shutemov   mm: account pmd p...
93
94
  		ptes >> 10,
  		pmds >> 10,
b084d4353   KAMEZAWA Hiroyuki   mm: count swap usage
95
  		swap << (PAGE_SHIFT-10));
5d317b2b6   Naoya Horiguchi   mm: hugetlb: proc...
96
  	hugetlb_report_usage(m, mm);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
97
98
99
100
101
102
  }
  
  unsigned long task_vsize(struct mm_struct *mm)
  {
  	return PAGE_SIZE * mm->total_vm;
  }
a2ade7b6c   Alexey Dobriyan   proc: use unsigne...
103
104
105
  unsigned long task_statm(struct mm_struct *mm,
  			 unsigned long *shared, unsigned long *text,
  			 unsigned long *data, unsigned long *resident)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
106
  {
eca56ff90   Jerome Marchand   mm, shmem: add in...
107
108
  	*shared = get_mm_counter(mm, MM_FILEPAGES) +
  			get_mm_counter(mm, MM_SHMEMPAGES);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
109
110
  	*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
  								>> PAGE_SHIFT;
846383359   Konstantin Khlebnikov   mm: rework virtua...
111
  	*data = mm->data_vm + mm->stack_vm;
d559db086   KAMEZAWA Hiroyuki   mm: clean up mm_c...
112
  	*resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
113
114
  	return mm->total_vm;
  }
9e7814404   KAMEZAWA Hiroyuki   hold task->mempol...
115
116
  #ifdef CONFIG_NUMA
  /*
498f23717   Oleg Nesterov   mempolicy: fix sh...
117
   * Save get_task_policy() for show_numa_map().
9e7814404   KAMEZAWA Hiroyuki   hold task->mempol...
118
119
120
121
122
123
   */
  static void hold_task_mempolicy(struct proc_maps_private *priv)
  {
  	struct task_struct *task = priv->task;
  
  	task_lock(task);
498f23717   Oleg Nesterov   mempolicy: fix sh...
124
  	priv->task_mempolicy = get_task_policy(task);
9e7814404   KAMEZAWA Hiroyuki   hold task->mempol...
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
  	mpol_get(priv->task_mempolicy);
  	task_unlock(task);
  }
  static void release_task_mempolicy(struct proc_maps_private *priv)
  {
  	mpol_put(priv->task_mempolicy);
  }
  #else
  static void hold_task_mempolicy(struct proc_maps_private *priv)
  {
  }
  static void release_task_mempolicy(struct proc_maps_private *priv)
  {
  }
  #endif
59b4bf12d   Oleg Nesterov   fs/proc/task_mmu....
140
  static void vma_stop(struct proc_maps_private *priv)
a6198797c   Matt Mackall   maps4: regroup ta...
141
  {
59b4bf12d   Oleg Nesterov   fs/proc/task_mmu....
142
143
144
145
146
  	struct mm_struct *mm = priv->mm;
  
  	release_task_mempolicy(priv);
  	up_read(&mm->mmap_sem);
  	mmput(mm);
a6198797c   Matt Mackall   maps4: regroup ta...
147
  }
ec4dd3eb3   Fengguang Wu   maps4: add propor...
148

ad2a00e4b   Oleg Nesterov   fs/proc/task_mmu....
149
150
151
152
153
154
155
  static struct vm_area_struct *
  m_next_vma(struct proc_maps_private *priv, struct vm_area_struct *vma)
  {
  	if (vma == priv->tail_vma)
  		return NULL;
  	return vma->vm_next ?: priv->tail_vma;
  }
b8c20a9b8   Oleg Nesterov   fs/proc/task_mmu....
156
157
158
  static void m_cache_vma(struct seq_file *m, struct vm_area_struct *vma)
  {
  	if (m->count < m->size)	/* vma is copied successfully */
855af072b   Robert Ho   mm, proc: fix reg...
159
  		m->version = m_next_vma(m->private, vma) ? vma->vm_end : -1UL;
b8c20a9b8   Oleg Nesterov   fs/proc/task_mmu....
160
  }
0c255321f   Oleg Nesterov   fs/proc/task_mmu....
161
  static void *m_start(struct seq_file *m, loff_t *ppos)
e070ad49f   Mauricio Lin   [PATCH] add /proc...
162
  {
a6198797c   Matt Mackall   maps4: regroup ta...
163
  	struct proc_maps_private *priv = m->private;
b8c20a9b8   Oleg Nesterov   fs/proc/task_mmu....
164
  	unsigned long last_addr = m->version;
a6198797c   Matt Mackall   maps4: regroup ta...
165
  	struct mm_struct *mm;
0c255321f   Oleg Nesterov   fs/proc/task_mmu....
166
167
  	struct vm_area_struct *vma;
  	unsigned int pos = *ppos;
a6198797c   Matt Mackall   maps4: regroup ta...
168

b8c20a9b8   Oleg Nesterov   fs/proc/task_mmu....
169
170
171
  	/* See m_cache_vma(). Zero at the start or after lseek. */
  	if (last_addr == -1UL)
  		return NULL;
2c03376d2   Oleg Nesterov   proc/maps: replac...
172
  	priv->task = get_proc_task(priv->inode);
a6198797c   Matt Mackall   maps4: regroup ta...
173
  	if (!priv->task)
ec6fd8a43   Al Viro   report errors in ...
174
  		return ERR_PTR(-ESRCH);
a6198797c   Matt Mackall   maps4: regroup ta...
175

29a40ace8   Oleg Nesterov   fs/proc/task_mmu....
176
177
178
  	mm = priv->mm;
  	if (!mm || !atomic_inc_not_zero(&mm->mm_users))
  		return NULL;
a6198797c   Matt Mackall   maps4: regroup ta...
179

0c255321f   Oleg Nesterov   fs/proc/task_mmu....
180
  	down_read(&mm->mmap_sem);
9e7814404   KAMEZAWA Hiroyuki   hold task->mempol...
181
  	hold_task_mempolicy(priv);
0c255321f   Oleg Nesterov   fs/proc/task_mmu....
182
  	priv->tail_vma = get_gate_vma(mm);
a6198797c   Matt Mackall   maps4: regroup ta...
183

b8c20a9b8   Oleg Nesterov   fs/proc/task_mmu....
184
  	if (last_addr) {
855af072b   Robert Ho   mm, proc: fix reg...
185
186
187
188
  		vma = find_vma(mm, last_addr - 1);
  		if (vma && vma->vm_start <= last_addr)
  			vma = m_next_vma(priv, vma);
  		if (vma)
b8c20a9b8   Oleg Nesterov   fs/proc/task_mmu....
189
190
191
192
  			return vma;
  	}
  
  	m->version = 0;
0c255321f   Oleg Nesterov   fs/proc/task_mmu....
193
  	if (pos < mm->map_count) {
557c2d8a7   Oleg Nesterov   fs/proc/task_mmu....
194
195
  		for (vma = mm->mmap; pos; pos--) {
  			m->version = vma->vm_start;
a6198797c   Matt Mackall   maps4: regroup ta...
196
  			vma = vma->vm_next;
557c2d8a7   Oleg Nesterov   fs/proc/task_mmu....
197
  		}
a6198797c   Matt Mackall   maps4: regroup ta...
198
  		return vma;
0c255321f   Oleg Nesterov   fs/proc/task_mmu....
199
  	}
a6198797c   Matt Mackall   maps4: regroup ta...
200

557c2d8a7   Oleg Nesterov   fs/proc/task_mmu....
201
  	/* we do not bother to update m->version in this case */
0c255321f   Oleg Nesterov   fs/proc/task_mmu....
202
203
  	if (pos == mm->map_count && priv->tail_vma)
  		return priv->tail_vma;
59b4bf12d   Oleg Nesterov   fs/proc/task_mmu....
204
205
206
  
  	vma_stop(priv);
  	return NULL;
a6198797c   Matt Mackall   maps4: regroup ta...
207
208
209
210
211
  }
  
  static void *m_next(struct seq_file *m, void *v, loff_t *pos)
  {
  	struct proc_maps_private *priv = m->private;
ad2a00e4b   Oleg Nesterov   fs/proc/task_mmu....
212
  	struct vm_area_struct *next;
a6198797c   Matt Mackall   maps4: regroup ta...
213
214
  
  	(*pos)++;
ad2a00e4b   Oleg Nesterov   fs/proc/task_mmu....
215
  	next = m_next_vma(priv, v);
59b4bf12d   Oleg Nesterov   fs/proc/task_mmu....
216
217
218
  	if (!next)
  		vma_stop(priv);
  	return next;
a6198797c   Matt Mackall   maps4: regroup ta...
219
220
221
222
223
  }
  
  static void m_stop(struct seq_file *m, void *v)
  {
  	struct proc_maps_private *priv = m->private;
a6198797c   Matt Mackall   maps4: regroup ta...
224

59b4bf12d   Oleg Nesterov   fs/proc/task_mmu....
225
226
  	if (!IS_ERR_OR_NULL(v))
  		vma_stop(priv);
0d5f5f45f   Oleg Nesterov   fs/proc/task_mmu....
227
  	if (priv->task) {
a6198797c   Matt Mackall   maps4: regroup ta...
228
  		put_task_struct(priv->task);
0d5f5f45f   Oleg Nesterov   fs/proc/task_mmu....
229
230
  		priv->task = NULL;
  	}
a6198797c   Matt Mackall   maps4: regroup ta...
231
  }
4db7d0ee1   Oleg Nesterov   fs/proc/task_mmu....
232
233
234
235
236
237
238
  static int proc_maps_open(struct inode *inode, struct file *file,
  			const struct seq_operations *ops, int psize)
  {
  	struct proc_maps_private *priv = __seq_open_private(file, ops, psize);
  
  	if (!priv)
  		return -ENOMEM;
2c03376d2   Oleg Nesterov   proc/maps: replac...
239
  	priv->inode = inode;
29a40ace8   Oleg Nesterov   fs/proc/task_mmu....
240
241
242
243
244
245
246
  	priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
  	if (IS_ERR(priv->mm)) {
  		int err = PTR_ERR(priv->mm);
  
  		seq_release_private(inode, file);
  		return err;
  	}
4db7d0ee1   Oleg Nesterov   fs/proc/task_mmu....
247
248
  	return 0;
  }
29a40ace8   Oleg Nesterov   fs/proc/task_mmu....
249
250
251
252
253
254
255
256
257
258
  static int proc_map_release(struct inode *inode, struct file *file)
  {
  	struct seq_file *seq = file->private_data;
  	struct proc_maps_private *priv = seq->private;
  
  	if (priv->mm)
  		mmdrop(priv->mm);
  
  	return seq_release_private(inode, file);
  }
a6198797c   Matt Mackall   maps4: regroup ta...
259
  static int do_maps_open(struct inode *inode, struct file *file,
03a44825b   Jan Engelhardt   procfs: constify ...
260
  			const struct seq_operations *ops)
a6198797c   Matt Mackall   maps4: regroup ta...
261
  {
4db7d0ee1   Oleg Nesterov   fs/proc/task_mmu....
262
263
  	return proc_maps_open(inode, file, ops,
  				sizeof(struct proc_maps_private));
a6198797c   Matt Mackall   maps4: regroup ta...
264
  }
e070ad49f   Mauricio Lin   [PATCH] add /proc...
265

65376df58   Johannes Weiner   proc: revert /pro...
266
267
268
269
270
  /*
   * Indicate if the VMA is a stack for the given task; for
   * /proc/PID/maps that is the stack of the main task.
   */
  static int is_stack(struct proc_maps_private *priv,
b18cb64ea   Andy Lutomirski   fs/proc: Stop try...
271
  		    struct vm_area_struct *vma)
58cb65487   Oleg Nesterov   proc/maps: make v...
272
  {
b18cb64ea   Andy Lutomirski   fs/proc: Stop try...
273
274
275
276
277
278
279
  	/*
  	 * We make no effort to guess what a given thread considers to be
  	 * its "stack".  It's not even well-defined for programs written
  	 * languages like Go.
  	 */
  	return vma->vm_start <= vma->vm_mm->start_stack &&
  		vma->vm_end >= vma->vm_mm->start_stack;
58cb65487   Oleg Nesterov   proc/maps: make v...
280
  }
b76437579   Siddhesh Poyarekar   procfs: mark thre...
281
282
  static void
  show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
283
  {
e070ad49f   Mauricio Lin   [PATCH] add /proc...
284
285
  	struct mm_struct *mm = vma->vm_mm;
  	struct file *file = vma->vm_file;
b76437579   Siddhesh Poyarekar   procfs: mark thre...
286
  	struct proc_maps_private *priv = m->private;
ca16d140a   KOSAKI Motohiro   mm: don't access ...
287
  	vm_flags_t flags = vma->vm_flags;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
288
  	unsigned long ino = 0;
6260a4b05   KAMEZAWA Hiroyuki   /proc/pid/maps: d...
289
  	unsigned long long pgoff = 0;
a09a79f66   Mikulas Patocka   Don't lock guardp...
290
  	unsigned long start, end;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
291
  	dev_t dev = 0;
b76437579   Siddhesh Poyarekar   procfs: mark thre...
292
  	const char *name = NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
293
294
  
  	if (file) {
496ad9aa8   Al Viro   new helper: file_...
295
  		struct inode *inode = file_inode(vma->vm_file);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
296
297
  		dev = inode->i_sb->s_dev;
  		ino = inode->i_ino;
6260a4b05   KAMEZAWA Hiroyuki   /proc/pid/maps: d...
298
  		pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
299
  	}
d7824370e   Linus Torvalds   mm: fix up some u...
300
301
  	/* We don't show the stack guard page in /proc/maps */
  	start = vma->vm_start;
a09a79f66   Mikulas Patocka   Don't lock guardp...
302
  	end = vma->vm_end;
d7824370e   Linus Torvalds   mm: fix up some u...
303

652586df9   Tetsuo Handa   seq_file: remove ...
304
305
  	seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
  	seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
d7824370e   Linus Torvalds   mm: fix up some u...
306
  			start,
a09a79f66   Mikulas Patocka   Don't lock guardp...
307
  			end,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
308
309
310
311
  			flags & VM_READ ? 'r' : '-',
  			flags & VM_WRITE ? 'w' : '-',
  			flags & VM_EXEC ? 'x' : '-',
  			flags & VM_MAYSHARE ? 's' : 'p',
6260a4b05   KAMEZAWA Hiroyuki   /proc/pid/maps: d...
312
  			pgoff,
652586df9   Tetsuo Handa   seq_file: remove ...
313
  			MAJOR(dev), MINOR(dev), ino);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
314
315
316
317
318
  
  	/*
  	 * Print the dentry name for named mappings, and a
  	 * special [heap] marker for the heap:
  	 */
e070ad49f   Mauricio Lin   [PATCH] add /proc...
319
  	if (file) {
652586df9   Tetsuo Handa   seq_file: remove ...
320
  		seq_pad(m, ' ');
2726d5662   Miklos Szeredi   vfs: add seq_file...
321
322
  		seq_file_path(m, file, "
  ");
b76437579   Siddhesh Poyarekar   procfs: mark thre...
323
324
  		goto done;
  	}
78d683e83   Andy Lutomirski   mm, fs: Add vm_op...
325
326
327
328
329
  	if (vma->vm_ops && vma->vm_ops->name) {
  		name = vma->vm_ops->name(vma);
  		if (name)
  			goto done;
  	}
b76437579   Siddhesh Poyarekar   procfs: mark thre...
330
331
  	name = arch_vma_name(vma);
  	if (!name) {
b76437579   Siddhesh Poyarekar   procfs: mark thre...
332
333
334
335
336
337
338
339
340
341
  		if (!mm) {
  			name = "[vdso]";
  			goto done;
  		}
  
  		if (vma->vm_start <= mm->brk &&
  		    vma->vm_end >= mm->start_brk) {
  			name = "[heap]";
  			goto done;
  		}
b18cb64ea   Andy Lutomirski   fs/proc: Stop try...
342
  		if (is_stack(priv, vma))
65376df58   Johannes Weiner   proc: revert /pro...
343
  			name = "[stack]";
b76437579   Siddhesh Poyarekar   procfs: mark thre...
344
345
346
347
  	}
  
  done:
  	if (name) {
652586df9   Tetsuo Handa   seq_file: remove ...
348
  		seq_pad(m, ' ');
b76437579   Siddhesh Poyarekar   procfs: mark thre...
349
  		seq_puts(m, name);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
350
351
352
  	}
  	seq_putc(m, '
  ');
7c88db0cb   Joe Korty   proc: fix vma dis...
353
  }
b76437579   Siddhesh Poyarekar   procfs: mark thre...
354
  static int show_map(struct seq_file *m, void *v, int is_pid)
7c88db0cb   Joe Korty   proc: fix vma dis...
355
  {
ebb6cdde1   Oleg Nesterov   fs/proc/task_mmu....
356
  	show_map_vma(m, v, is_pid);
b8c20a9b8   Oleg Nesterov   fs/proc/task_mmu....
357
  	m_cache_vma(m, v);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
358
359
  	return 0;
  }
b76437579   Siddhesh Poyarekar   procfs: mark thre...
360
361
362
363
364
365
366
367
368
  static int show_pid_map(struct seq_file *m, void *v)
  {
  	return show_map(m, v, 1);
  }
  
  static int show_tid_map(struct seq_file *m, void *v)
  {
  	return show_map(m, v, 0);
  }
03a44825b   Jan Engelhardt   procfs: constify ...
369
  static const struct seq_operations proc_pid_maps_op = {
a6198797c   Matt Mackall   maps4: regroup ta...
370
371
372
  	.start	= m_start,
  	.next	= m_next,
  	.stop	= m_stop,
b76437579   Siddhesh Poyarekar   procfs: mark thre...
373
374
375
376
377
378
379
380
  	.show	= show_pid_map
  };
  
  static const struct seq_operations proc_tid_maps_op = {
  	.start	= m_start,
  	.next	= m_next,
  	.stop	= m_stop,
  	.show	= show_tid_map
a6198797c   Matt Mackall   maps4: regroup ta...
381
  };
b76437579   Siddhesh Poyarekar   procfs: mark thre...
382
  static int pid_maps_open(struct inode *inode, struct file *file)
a6198797c   Matt Mackall   maps4: regroup ta...
383
384
385
  {
  	return do_maps_open(inode, file, &proc_pid_maps_op);
  }
b76437579   Siddhesh Poyarekar   procfs: mark thre...
386
387
388
389
390
391
392
393
394
  static int tid_maps_open(struct inode *inode, struct file *file)
  {
  	return do_maps_open(inode, file, &proc_tid_maps_op);
  }
  
  const struct file_operations proc_pid_maps_operations = {
  	.open		= pid_maps_open,
  	.read		= seq_read,
  	.llseek		= seq_lseek,
29a40ace8   Oleg Nesterov   fs/proc/task_mmu....
395
  	.release	= proc_map_release,
b76437579   Siddhesh Poyarekar   procfs: mark thre...
396
397
398
399
  };
  
  const struct file_operations proc_tid_maps_operations = {
  	.open		= tid_maps_open,
a6198797c   Matt Mackall   maps4: regroup ta...
400
401
  	.read		= seq_read,
  	.llseek		= seq_lseek,
29a40ace8   Oleg Nesterov   fs/proc/task_mmu....
402
  	.release	= proc_map_release,
a6198797c   Matt Mackall   maps4: regroup ta...
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
  };
  
  /*
   * Proportional Set Size(PSS): my share of RSS.
   *
   * PSS of a process is the count of pages it has in memory, where each
   * page is divided by the number of processes sharing it.  So if a
   * process has 1000 pages all to itself, and 1000 shared with one other
   * process, its PSS will be 1500.
   *
   * To keep (accumulated) division errors low, we adopt a 64bit
   * fixed-point pss counter to minimize division errors. So (pss >>
   * PSS_SHIFT) would be the real byte count.
   *
   * A shift of 12 before division means (assuming 4K page size):
   * 	- 1M 3-user-pages add up to 8KB errors;
   * 	- supports mapcount up to 2^24, or 16M;
   * 	- supports PSS up to 2^52 bytes, or 4PB.
   */
  #define PSS_SHIFT 12
1e8832811   Matt Mackall   maps4: make page ...
423
  #ifdef CONFIG_PROC_PAGE_MONITOR
214e471ff   Peter Zijlstra   smaps: account sw...
424
  struct mem_size_stats {
a6198797c   Matt Mackall   maps4: regroup ta...
425
426
427
428
429
430
  	unsigned long resident;
  	unsigned long shared_clean;
  	unsigned long shared_dirty;
  	unsigned long private_clean;
  	unsigned long private_dirty;
  	unsigned long referenced;
b40d4f84b   Nikanth Karthikesan   /proc/pid/smaps: ...
431
  	unsigned long anonymous;
4031a219d   Dave Hansen   smaps: have smaps...
432
  	unsigned long anonymous_thp;
65c453778   Kirill A. Shutemov   mm, rmap: account...
433
  	unsigned long shmem_thp;
214e471ff   Peter Zijlstra   smaps: account sw...
434
  	unsigned long swap;
25ee01a2f   Naoya Horiguchi   mm: hugetlb: proc...
435
436
  	unsigned long shared_hugetlb;
  	unsigned long private_hugetlb;
a6198797c   Matt Mackall   maps4: regroup ta...
437
  	u64 pss;
8334b9622   Minchan Kim   mm: /proc/pid/sma...
438
  	u64 swap_pss;
c261e7d94   Vlastimil Babka   mm, proc: account...
439
  	bool check_shmem_swap;
a6198797c   Matt Mackall   maps4: regroup ta...
440
  };
c164e038e   Kirill A. Shutemov   mm: fix huge zero...
441
  static void smaps_account(struct mem_size_stats *mss, struct page *page,
afd9883f9   Kirill A. Shutemov   mm, proc: adjust ...
442
  		bool compound, bool young, bool dirty)
c164e038e   Kirill A. Shutemov   mm: fix huge zero...
443
  {
f4be6153c   Kirill A. Shutemov   fs/proc/task_mmu....
444
  	int i, nr = compound ? 1 << compound_order(page) : 1;
afd9883f9   Kirill A. Shutemov   mm, proc: adjust ...
445
  	unsigned long size = nr * PAGE_SIZE;
c164e038e   Kirill A. Shutemov   mm: fix huge zero...
446
447
448
449
450
451
  
  	if (PageAnon(page))
  		mss->anonymous += size;
  
  	mss->resident += size;
  	/* Accumulate the size in pages that have been accessed. */
33c3fc71c   Vladimir Davydov   mm: introduce idl...
452
  	if (young || page_is_young(page) || PageReferenced(page))
c164e038e   Kirill A. Shutemov   mm: fix huge zero...
453
  		mss->referenced += size;
c164e038e   Kirill A. Shutemov   mm: fix huge zero...
454

afd9883f9   Kirill A. Shutemov   mm, proc: adjust ...
455
456
457
458
459
460
  	/*
  	 * page_count(page) == 1 guarantees the page is mapped exactly once.
  	 * If any subpage of the compound page mapped with PTE it would elevate
  	 * page_count().
  	 */
  	if (page_count(page) == 1) {
c164e038e   Kirill A. Shutemov   mm: fix huge zero...
461
462
463
464
465
  		if (dirty || PageDirty(page))
  			mss->private_dirty += size;
  		else
  			mss->private_clean += size;
  		mss->pss += (u64)size << PSS_SHIFT;
afd9883f9   Kirill A. Shutemov   mm, proc: adjust ...
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
  		return;
  	}
  
  	for (i = 0; i < nr; i++, page++) {
  		int mapcount = page_mapcount(page);
  
  		if (mapcount >= 2) {
  			if (dirty || PageDirty(page))
  				mss->shared_dirty += PAGE_SIZE;
  			else
  				mss->shared_clean += PAGE_SIZE;
  			mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount;
  		} else {
  			if (dirty || PageDirty(page))
  				mss->private_dirty += PAGE_SIZE;
  			else
  				mss->private_clean += PAGE_SIZE;
  			mss->pss += PAGE_SIZE << PSS_SHIFT;
  		}
c164e038e   Kirill A. Shutemov   mm: fix huge zero...
485
486
  	}
  }
ae11c4d9f   Dave Hansen   smaps: break out ...
487

c261e7d94   Vlastimil Babka   mm, proc: account...
488
  #ifdef CONFIG_SHMEM
c261e7d94   Vlastimil Babka   mm, proc: account...
489
490
491
492
  static int smaps_pte_hole(unsigned long addr, unsigned long end,
  		struct mm_walk *walk)
  {
  	struct mem_size_stats *mss = walk->private;
48131e03c   Vlastimil Babka   mm, proc: reduce ...
493
494
  	mss->swap += shmem_partial_swap_usage(
  			walk->vma->vm_file->f_mapping, addr, end);
c261e7d94   Vlastimil Babka   mm, proc: account...
495
496
497
  
  	return 0;
  }
c261e7d94   Vlastimil Babka   mm, proc: account...
498
  #endif
c164e038e   Kirill A. Shutemov   mm: fix huge zero...
499
500
  static void smaps_pte_entry(pte_t *pte, unsigned long addr,
  		struct mm_walk *walk)
ae11c4d9f   Dave Hansen   smaps: break out ...
501
502
  {
  	struct mem_size_stats *mss = walk->private;
14eb6fdd4   Naoya Horiguchi   smaps: remove mem...
503
  	struct vm_area_struct *vma = walk->vma;
b1d4d9e0c   Konstantin Khlebnikov   proc/smaps: caref...
504
  	struct page *page = NULL;
ae11c4d9f   Dave Hansen   smaps: break out ...
505

c164e038e   Kirill A. Shutemov   mm: fix huge zero...
506
507
508
509
  	if (pte_present(*pte)) {
  		page = vm_normal_page(vma, addr, *pte);
  	} else if (is_swap_pte(*pte)) {
  		swp_entry_t swpent = pte_to_swp_entry(*pte);
ae11c4d9f   Dave Hansen   smaps: break out ...
510

8334b9622   Minchan Kim   mm: /proc/pid/sma...
511
512
  		if (!non_swap_entry(swpent)) {
  			int mapcount;
c164e038e   Kirill A. Shutemov   mm: fix huge zero...
513
  			mss->swap += PAGE_SIZE;
8334b9622   Minchan Kim   mm: /proc/pid/sma...
514
515
516
517
518
519
520
521
522
523
  			mapcount = swp_swapcount(swpent);
  			if (mapcount >= 2) {
  				u64 pss_delta = (u64)PAGE_SIZE << PSS_SHIFT;
  
  				do_div(pss_delta, mapcount);
  				mss->swap_pss += pss_delta;
  			} else {
  				mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT;
  			}
  		} else if (is_migration_entry(swpent))
b1d4d9e0c   Konstantin Khlebnikov   proc/smaps: caref...
524
  			page = migration_entry_to_page(swpent);
c261e7d94   Vlastimil Babka   mm, proc: account...
525
526
  	} else if (unlikely(IS_ENABLED(CONFIG_SHMEM) && mss->check_shmem_swap
  							&& pte_none(*pte))) {
48131e03c   Vlastimil Babka   mm, proc: reduce ...
527
528
529
530
531
532
533
534
  		page = find_get_entry(vma->vm_file->f_mapping,
  						linear_page_index(vma, addr));
  		if (!page)
  			return;
  
  		if (radix_tree_exceptional_entry(page))
  			mss->swap += PAGE_SIZE;
  		else
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
535
  			put_page(page);
48131e03c   Vlastimil Babka   mm, proc: reduce ...
536
537
  
  		return;
b1d4d9e0c   Konstantin Khlebnikov   proc/smaps: caref...
538
  	}
ae11c4d9f   Dave Hansen   smaps: break out ...
539

ae11c4d9f   Dave Hansen   smaps: break out ...
540
541
  	if (!page)
  		return;
afd9883f9   Kirill A. Shutemov   mm, proc: adjust ...
542
543
  
  	smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte));
ae11c4d9f   Dave Hansen   smaps: break out ...
544
  }
c164e038e   Kirill A. Shutemov   mm: fix huge zero...
545
546
547
548
549
  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
  		struct mm_walk *walk)
  {
  	struct mem_size_stats *mss = walk->private;
14eb6fdd4   Naoya Horiguchi   smaps: remove mem...
550
  	struct vm_area_struct *vma = walk->vma;
c164e038e   Kirill A. Shutemov   mm: fix huge zero...
551
552
553
554
555
556
  	struct page *page;
  
  	/* FOLL_DUMP will return -EFAULT on huge zero page */
  	page = follow_trans_huge_pmd(vma, addr, pmd, FOLL_DUMP);
  	if (IS_ERR_OR_NULL(page))
  		return;
65c453778   Kirill A. Shutemov   mm, rmap: account...
557
558
559
560
  	if (PageAnon(page))
  		mss->anonymous_thp += HPAGE_PMD_SIZE;
  	else if (PageSwapBacked(page))
  		mss->shmem_thp += HPAGE_PMD_SIZE;
ca120cf68   Dan Williams   mm: fix show_smap...
561
562
  	else if (is_zone_device_page(page))
  		/* pass */;
65c453778   Kirill A. Shutemov   mm, rmap: account...
563
564
  	else
  		VM_BUG_ON_PAGE(1, page);
afd9883f9   Kirill A. Shutemov   mm, proc: adjust ...
565
  	smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd));
c164e038e   Kirill A. Shutemov   mm: fix huge zero...
566
567
568
569
570
571
572
  }
  #else
  static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
  		struct mm_walk *walk)
  {
  }
  #endif
b3ae5acbb   Matt Mackall   maps4: use pagewa...
573
  static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
2165009bd   Dave Hansen   pagemap: pass mm ...
574
  			   struct mm_walk *walk)
e070ad49f   Mauricio Lin   [PATCH] add /proc...
575
  {
14eb6fdd4   Naoya Horiguchi   smaps: remove mem...
576
  	struct vm_area_struct *vma = walk->vma;
ae11c4d9f   Dave Hansen   smaps: break out ...
577
  	pte_t *pte;
705e87c0c   Hugh Dickins   [PATCH] mm: pte_o...
578
  	spinlock_t *ptl;
e070ad49f   Mauricio Lin   [PATCH] add /proc...
579

b6ec57f4b   Kirill A. Shutemov   thp: change pmd_t...
580
581
  	ptl = pmd_trans_huge_lock(pmd, vma);
  	if (ptl) {
c164e038e   Kirill A. Shutemov   mm: fix huge zero...
582
  		smaps_pmd_entry(pmd, addr, walk);
bf929152e   Kirill A. Shutemov   mm, thp: change p...
583
  		spin_unlock(ptl);
025c5b245   Naoya Horiguchi   thp: optimize awa...
584
  		return 0;
22e057c59   Dave Hansen   smaps: teach smap...
585
  	}
1a5a9906d   Andrea Arcangeli   mm: thp: fix pmd_...
586
587
588
  
  	if (pmd_trans_unstable(pmd))
  		return 0;
22e057c59   Dave Hansen   smaps: teach smap...
589
590
591
592
593
  	/*
  	 * The mmap_sem held all the way back in m_start() is what
  	 * keeps khugepaged out of here and from collapsing things
  	 * in here.
  	 */
705e87c0c   Hugh Dickins   [PATCH] mm: pte_o...
594
  	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
ae11c4d9f   Dave Hansen   smaps: break out ...
595
  	for (; addr != end; pte++, addr += PAGE_SIZE)
c164e038e   Kirill A. Shutemov   mm: fix huge zero...
596
  		smaps_pte_entry(pte, addr, walk);
705e87c0c   Hugh Dickins   [PATCH] mm: pte_o...
597
598
  	pte_unmap_unlock(pte - 1, ptl);
  	cond_resched();
b3ae5acbb   Matt Mackall   maps4: use pagewa...
599
  	return 0;
e070ad49f   Mauricio Lin   [PATCH] add /proc...
600
  }
834f82e2a   Cyrill Gorcunov   procfs: add VmFla...
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
  static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
  {
  	/*
  	 * Don't forget to update Documentation/ on changes.
  	 */
  	static const char mnemonics[BITS_PER_LONG][2] = {
  		/*
  		 * In case if we meet a flag we don't know about.
  		 */
  		[0 ... (BITS_PER_LONG-1)] = "??",
  
  		[ilog2(VM_READ)]	= "rd",
  		[ilog2(VM_WRITE)]	= "wr",
  		[ilog2(VM_EXEC)]	= "ex",
  		[ilog2(VM_SHARED)]	= "sh",
  		[ilog2(VM_MAYREAD)]	= "mr",
  		[ilog2(VM_MAYWRITE)]	= "mw",
  		[ilog2(VM_MAYEXEC)]	= "me",
  		[ilog2(VM_MAYSHARE)]	= "ms",
  		[ilog2(VM_GROWSDOWN)]	= "gd",
  		[ilog2(VM_PFNMAP)]	= "pf",
  		[ilog2(VM_DENYWRITE)]	= "dw",
4aae7e436   Qiaowei Ren   x86, mpx: Introdu...
623
624
625
  #ifdef CONFIG_X86_INTEL_MPX
  		[ilog2(VM_MPX)]		= "mp",
  #endif
834f82e2a   Cyrill Gorcunov   procfs: add VmFla...
626
627
628
629
630
631
632
633
634
  		[ilog2(VM_LOCKED)]	= "lo",
  		[ilog2(VM_IO)]		= "io",
  		[ilog2(VM_SEQ_READ)]	= "sr",
  		[ilog2(VM_RAND_READ)]	= "rr",
  		[ilog2(VM_DONTCOPY)]	= "dc",
  		[ilog2(VM_DONTEXPAND)]	= "de",
  		[ilog2(VM_ACCOUNT)]	= "ac",
  		[ilog2(VM_NORESERVE)]	= "nr",
  		[ilog2(VM_HUGETLB)]	= "ht",
834f82e2a   Cyrill Gorcunov   procfs: add VmFla...
635
636
  		[ilog2(VM_ARCH_1)]	= "ar",
  		[ilog2(VM_DONTDUMP)]	= "dd",
ec8e41aec   Naoya Horiguchi   /proc/pid/smaps: ...
637
638
639
  #ifdef CONFIG_MEM_SOFT_DIRTY
  		[ilog2(VM_SOFTDIRTY)]	= "sd",
  #endif
834f82e2a   Cyrill Gorcunov   procfs: add VmFla...
640
641
642
643
  		[ilog2(VM_MIXEDMAP)]	= "mm",
  		[ilog2(VM_HUGEPAGE)]	= "hg",
  		[ilog2(VM_NOHUGEPAGE)]	= "nh",
  		[ilog2(VM_MERGEABLE)]	= "mg",
16ba6f811   Andrea Arcangeli   userfaultfd: add ...
644
645
  		[ilog2(VM_UFFD_MISSING)]= "um",
  		[ilog2(VM_UFFD_WP)]	= "uw",
c1192f842   Dave Hansen   x86/mm/pkeys: Dum...
646
647
648
649
650
651
652
  #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
  		/* These come out via ProtectionKey: */
  		[ilog2(VM_PKEY_BIT0)]	= "",
  		[ilog2(VM_PKEY_BIT1)]	= "",
  		[ilog2(VM_PKEY_BIT2)]	= "",
  		[ilog2(VM_PKEY_BIT3)]	= "",
  #endif
834f82e2a   Cyrill Gorcunov   procfs: add VmFla...
653
654
655
656
657
  	};
  	size_t i;
  
  	seq_puts(m, "VmFlags: ");
  	for (i = 0; i < BITS_PER_LONG; i++) {
c1192f842   Dave Hansen   x86/mm/pkeys: Dum...
658
659
  		if (!mnemonics[i][0])
  			continue;
834f82e2a   Cyrill Gorcunov   procfs: add VmFla...
660
661
662
663
664
665
666
667
  		if (vma->vm_flags & (1UL << i)) {
  			seq_printf(m, "%c%c ",
  				   mnemonics[i][0], mnemonics[i][1]);
  		}
  	}
  	seq_putc(m, '
  ');
  }
25ee01a2f   Naoya Horiguchi   mm: hugetlb: proc...
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
  #ifdef CONFIG_HUGETLB_PAGE
  static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
  				 unsigned long addr, unsigned long end,
  				 struct mm_walk *walk)
  {
  	struct mem_size_stats *mss = walk->private;
  	struct vm_area_struct *vma = walk->vma;
  	struct page *page = NULL;
  
  	if (pte_present(*pte)) {
  		page = vm_normal_page(vma, addr, *pte);
  	} else if (is_swap_pte(*pte)) {
  		swp_entry_t swpent = pte_to_swp_entry(*pte);
  
  		if (is_migration_entry(swpent))
  			page = migration_entry_to_page(swpent);
  	}
  	if (page) {
  		int mapcount = page_mapcount(page);
  
  		if (mapcount >= 2)
  			mss->shared_hugetlb += huge_page_size(hstate_vma(vma));
  		else
  			mss->private_hugetlb += huge_page_size(hstate_vma(vma));
  	}
  	return 0;
  }
  #endif /* HUGETLB_PAGE */
c1192f842   Dave Hansen   x86/mm/pkeys: Dum...
696
697
698
  void __weak arch_show_smap(struct seq_file *m, struct vm_area_struct *vma)
  {
  }
b76437579   Siddhesh Poyarekar   procfs: mark thre...
699
  static int show_smap(struct seq_file *m, void *v, int is_pid)
e070ad49f   Mauricio Lin   [PATCH] add /proc...
700
701
  {
  	struct vm_area_struct *vma = v;
e070ad49f   Mauricio Lin   [PATCH] add /proc...
702
  	struct mem_size_stats mss;
2165009bd   Dave Hansen   pagemap: pass mm ...
703
704
  	struct mm_walk smaps_walk = {
  		.pmd_entry = smaps_pte_range,
25ee01a2f   Naoya Horiguchi   mm: hugetlb: proc...
705
706
707
  #ifdef CONFIG_HUGETLB_PAGE
  		.hugetlb_entry = smaps_hugetlb_range,
  #endif
2165009bd   Dave Hansen   pagemap: pass mm ...
708
709
710
  		.mm = vma->vm_mm,
  		.private = &mss,
  	};
e070ad49f   Mauricio Lin   [PATCH] add /proc...
711
712
  
  	memset(&mss, 0, sizeof mss);
c261e7d94   Vlastimil Babka   mm, proc: account...
713
714
715
  
  #ifdef CONFIG_SHMEM
  	if (vma->vm_file && shmem_mapping(vma->vm_file->f_mapping)) {
6a15a3709   Vlastimil Babka   mm, proc: reduce ...
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
  		/*
  		 * For shared or readonly shmem mappings we know that all
  		 * swapped out pages belong to the shmem object, and we can
  		 * obtain the swap value much more efficiently. For private
  		 * writable mappings, we might have COW pages that are
  		 * not affected by the parent swapped out pages of the shmem
  		 * object, so we have to distinguish them during the page walk.
  		 * Unless we know that the shmem object (or the part mapped by
  		 * our VMA) has no swapped out pages at all.
  		 */
  		unsigned long shmem_swapped = shmem_swap_usage(vma);
  
  		if (!shmem_swapped || (vma->vm_flags & VM_SHARED) ||
  					!(vma->vm_flags & VM_WRITE)) {
  			mss.swap = shmem_swapped;
  		} else {
  			mss.check_shmem_swap = true;
  			smaps_walk.pte_hole = smaps_pte_hole;
  		}
c261e7d94   Vlastimil Babka   mm, proc: account...
735
736
  	}
  #endif
d82ef020c   KAMEZAWA Hiroyuki   proc: pagemap: Ho...
737
  	/* mmap_sem is held in m_start */
14eb6fdd4   Naoya Horiguchi   smaps: remove mem...
738
  	walk_page_vma(vma, &smaps_walk);
4752c3697   Matt Mackall   maps4: simplify i...
739

b76437579   Siddhesh Poyarekar   procfs: mark thre...
740
  	show_map_vma(m, vma, is_pid);
4752c3697   Matt Mackall   maps4: simplify i...
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
  
  	seq_printf(m,
  		   "Size:           %8lu kB
  "
  		   "Rss:            %8lu kB
  "
  		   "Pss:            %8lu kB
  "
  		   "Shared_Clean:   %8lu kB
  "
  		   "Shared_Dirty:   %8lu kB
  "
  		   "Private_Clean:  %8lu kB
  "
  		   "Private_Dirty:  %8lu kB
  "
214e471ff   Peter Zijlstra   smaps: account sw...
757
758
  		   "Referenced:     %8lu kB
  "
b40d4f84b   Nikanth Karthikesan   /proc/pid/smaps: ...
759
760
  		   "Anonymous:      %8lu kB
  "
4031a219d   Dave Hansen   smaps: have smaps...
761
762
  		   "AnonHugePages:  %8lu kB
  "
65c453778   Kirill A. Shutemov   mm, rmap: account...
763
764
  		   "ShmemPmdMapped: %8lu kB
  "
25ee01a2f   Naoya Horiguchi   mm: hugetlb: proc...
765
766
767
768
  		   "Shared_Hugetlb: %8lu kB
  "
  		   "Private_Hugetlb: %7lu kB
  "
08fba6998   Mel Gorman   mm: report the pa...
769
770
  		   "Swap:           %8lu kB
  "
8334b9622   Minchan Kim   mm: /proc/pid/sma...
771
772
  		   "SwapPss:        %8lu kB
  "
3340289dd   Mel Gorman   mm: report the MM...
773
774
  		   "KernelPageSize: %8lu kB
  "
2d90508f6   Nikanth Karthikesan   mm: smaps: export...
775
776
777
778
  		   "MMUPageSize:    %8lu kB
  "
  		   "Locked:         %8lu kB
  ",
4752c3697   Matt Mackall   maps4: simplify i...
779
780
781
782
783
784
785
  		   (vma->vm_end - vma->vm_start) >> 10,
  		   mss.resident >> 10,
  		   (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
  		   mss.shared_clean  >> 10,
  		   mss.shared_dirty  >> 10,
  		   mss.private_clean >> 10,
  		   mss.private_dirty >> 10,
214e471ff   Peter Zijlstra   smaps: account sw...
786
  		   mss.referenced >> 10,
b40d4f84b   Nikanth Karthikesan   /proc/pid/smaps: ...
787
  		   mss.anonymous >> 10,
4031a219d   Dave Hansen   smaps: have smaps...
788
  		   mss.anonymous_thp >> 10,
65c453778   Kirill A. Shutemov   mm, rmap: account...
789
  		   mss.shmem_thp >> 10,
25ee01a2f   Naoya Horiguchi   mm: hugetlb: proc...
790
791
  		   mss.shared_hugetlb >> 10,
  		   mss.private_hugetlb >> 10,
08fba6998   Mel Gorman   mm: report the pa...
792
  		   mss.swap >> 10,
8334b9622   Minchan Kim   mm: /proc/pid/sma...
793
  		   (unsigned long)(mss.swap_pss >> (10 + PSS_SHIFT)),
3340289dd   Mel Gorman   mm: report the MM...
794
  		   vma_kernel_pagesize(vma) >> 10,
2d90508f6   Nikanth Karthikesan   mm: smaps: export...
795
796
797
  		   vma_mmu_pagesize(vma) >> 10,
  		   (vma->vm_flags & VM_LOCKED) ?
  			(unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0);
4752c3697   Matt Mackall   maps4: simplify i...
798

c1192f842   Dave Hansen   x86/mm/pkeys: Dum...
799
  	arch_show_smap(m, vma);
834f82e2a   Cyrill Gorcunov   procfs: add VmFla...
800
  	show_smap_vma_flags(m, vma);
b8c20a9b8   Oleg Nesterov   fs/proc/task_mmu....
801
  	m_cache_vma(m, vma);
7c88db0cb   Joe Korty   proc: fix vma dis...
802
  	return 0;
e070ad49f   Mauricio Lin   [PATCH] add /proc...
803
  }
b76437579   Siddhesh Poyarekar   procfs: mark thre...
804
805
806
807
808
809
810
811
812
  static int show_pid_smap(struct seq_file *m, void *v)
  {
  	return show_smap(m, v, 1);
  }
  
  static int show_tid_smap(struct seq_file *m, void *v)
  {
  	return show_smap(m, v, 0);
  }
03a44825b   Jan Engelhardt   procfs: constify ...
813
  static const struct seq_operations proc_pid_smaps_op = {
a6198797c   Matt Mackall   maps4: regroup ta...
814
815
816
  	.start	= m_start,
  	.next	= m_next,
  	.stop	= m_stop,
b76437579   Siddhesh Poyarekar   procfs: mark thre...
817
818
819
820
821
822
823
824
  	.show	= show_pid_smap
  };
  
  static const struct seq_operations proc_tid_smaps_op = {
  	.start	= m_start,
  	.next	= m_next,
  	.stop	= m_stop,
  	.show	= show_tid_smap
a6198797c   Matt Mackall   maps4: regroup ta...
825
  };
b76437579   Siddhesh Poyarekar   procfs: mark thre...
826
  static int pid_smaps_open(struct inode *inode, struct file *file)
a6198797c   Matt Mackall   maps4: regroup ta...
827
828
829
  {
  	return do_maps_open(inode, file, &proc_pid_smaps_op);
  }
b76437579   Siddhesh Poyarekar   procfs: mark thre...
830
831
832
833
834
835
836
837
838
  static int tid_smaps_open(struct inode *inode, struct file *file)
  {
  	return do_maps_open(inode, file, &proc_tid_smaps_op);
  }
  
  const struct file_operations proc_pid_smaps_operations = {
  	.open		= pid_smaps_open,
  	.read		= seq_read,
  	.llseek		= seq_lseek,
29a40ace8   Oleg Nesterov   fs/proc/task_mmu....
839
  	.release	= proc_map_release,
b76437579   Siddhesh Poyarekar   procfs: mark thre...
840
841
842
843
  };
  
  const struct file_operations proc_tid_smaps_operations = {
  	.open		= tid_smaps_open,
a6198797c   Matt Mackall   maps4: regroup ta...
844
845
  	.read		= seq_read,
  	.llseek		= seq_lseek,
29a40ace8   Oleg Nesterov   fs/proc/task_mmu....
846
  	.release	= proc_map_release,
a6198797c   Matt Mackall   maps4: regroup ta...
847
  };
040fa0207   Pavel Emelyanov   clear_refs: sanit...
848
849
850
851
  enum clear_refs_types {
  	CLEAR_REFS_ALL = 1,
  	CLEAR_REFS_ANON,
  	CLEAR_REFS_MAPPED,
0f8975ec4   Pavel Emelyanov   mm: soft-dirty bi...
852
  	CLEAR_REFS_SOFT_DIRTY,
695f05593   Petr Cermak   fs/proc/task_mmu....
853
  	CLEAR_REFS_MM_HIWATER_RSS,
040fa0207   Pavel Emelyanov   clear_refs: sanit...
854
855
  	CLEAR_REFS_LAST,
  };
af9de7eb1   Pavel Emelyanov   clear_refs: intro...
856
  struct clear_refs_private {
0f8975ec4   Pavel Emelyanov   mm: soft-dirty bi...
857
  	enum clear_refs_types type;
af9de7eb1   Pavel Emelyanov   clear_refs: intro...
858
  };
7d5b3bfaa   Kirill A. Shutemov   mm: /proc/pid/cle...
859
  #ifdef CONFIG_MEM_SOFT_DIRTY
0f8975ec4   Pavel Emelyanov   mm: soft-dirty bi...
860
861
862
  static inline void clear_soft_dirty(struct vm_area_struct *vma,
  		unsigned long addr, pte_t *pte)
  {
0f8975ec4   Pavel Emelyanov   mm: soft-dirty bi...
863
864
865
866
867
868
869
  	/*
  	 * The soft-dirty tracker uses #PF-s to catch writes
  	 * to pages, so write-protect the pte as well. See the
  	 * Documentation/vm/soft-dirty.txt for full description
  	 * of how soft-dirty works.
  	 */
  	pte_t ptent = *pte;
179ef71cb   Cyrill Gorcunov   mm: save soft-dir...
870
871
  
  	if (pte_present(ptent)) {
326c2597a   Laurent Dufour   mm: clear pte in ...
872
  		ptent = ptep_modify_prot_start(vma->vm_mm, addr, pte);
179ef71cb   Cyrill Gorcunov   mm: save soft-dir...
873
  		ptent = pte_wrprotect(ptent);
a7b761749   Martin Schwidefsky   mm: add architect...
874
  		ptent = pte_clear_soft_dirty(ptent);
326c2597a   Laurent Dufour   mm: clear pte in ...
875
  		ptep_modify_prot_commit(vma->vm_mm, addr, pte, ptent);
179ef71cb   Cyrill Gorcunov   mm: save soft-dir...
876
877
  	} else if (is_swap_pte(ptent)) {
  		ptent = pte_swp_clear_soft_dirty(ptent);
326c2597a   Laurent Dufour   mm: clear pte in ...
878
  		set_pte_at(vma->vm_mm, addr, pte, ptent);
179ef71cb   Cyrill Gorcunov   mm: save soft-dir...
879
  	}
0f8975ec4   Pavel Emelyanov   mm: soft-dirty bi...
880
  }
5d3875a01   Laurent Dufour   mm: clear_soft_di...
881
882
883
884
885
886
  #else
  static inline void clear_soft_dirty(struct vm_area_struct *vma,
  		unsigned long addr, pte_t *pte)
  {
  }
  #endif
0f8975ec4   Pavel Emelyanov   mm: soft-dirty bi...
887

5d3875a01   Laurent Dufour   mm: clear_soft_di...
888
  #if defined(CONFIG_MEM_SOFT_DIRTY) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
7d5b3bfaa   Kirill A. Shutemov   mm: /proc/pid/cle...
889
890
891
  static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
  		unsigned long addr, pmd_t *pmdp)
  {
5c9d08320   Kirill A. Shutemov   thp: fix MADV_DON...
892
893
894
895
896
897
898
899
  	pmd_t pmd = *pmdp;
  
  	/* See comment in change_huge_pmd() */
  	pmdp_invalidate(vma, addr, pmdp);
  	if (pmd_dirty(*pmdp))
  		pmd = pmd_mkdirty(pmd);
  	if (pmd_young(*pmdp))
  		pmd = pmd_mkyoung(pmd);
7d5b3bfaa   Kirill A. Shutemov   mm: /proc/pid/cle...
900
901
  
  	pmd = pmd_wrprotect(pmd);
a7b761749   Martin Schwidefsky   mm: add architect...
902
  	pmd = pmd_clear_soft_dirty(pmd);
7d5b3bfaa   Kirill A. Shutemov   mm: /proc/pid/cle...
903

7d5b3bfaa   Kirill A. Shutemov   mm: /proc/pid/cle...
904
905
  	set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
  }
7d5b3bfaa   Kirill A. Shutemov   mm: /proc/pid/cle...
906
  #else
7d5b3bfaa   Kirill A. Shutemov   mm: /proc/pid/cle...
907
908
909
910
911
  static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
  		unsigned long addr, pmd_t *pmdp)
  {
  }
  #endif
a6198797c   Matt Mackall   maps4: regroup ta...
912
  static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
2165009bd   Dave Hansen   pagemap: pass mm ...
913
  				unsigned long end, struct mm_walk *walk)
a6198797c   Matt Mackall   maps4: regroup ta...
914
  {
af9de7eb1   Pavel Emelyanov   clear_refs: intro...
915
  	struct clear_refs_private *cp = walk->private;
5c64f52ac   Naoya Horiguchi   clear_refs: remov...
916
  	struct vm_area_struct *vma = walk->vma;
a6198797c   Matt Mackall   maps4: regroup ta...
917
918
919
  	pte_t *pte, ptent;
  	spinlock_t *ptl;
  	struct page *page;
b6ec57f4b   Kirill A. Shutemov   thp: change pmd_t...
920
921
  	ptl = pmd_trans_huge_lock(pmd, vma);
  	if (ptl) {
7d5b3bfaa   Kirill A. Shutemov   mm: /proc/pid/cle...
922
923
924
925
926
927
928
929
930
  		if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
  			clear_soft_dirty_pmd(vma, addr, pmd);
  			goto out;
  		}
  
  		page = pmd_page(*pmd);
  
  		/* Clear accessed and referenced bits. */
  		pmdp_test_and_clear_young(vma, addr, pmd);
33c3fc71c   Vladimir Davydov   mm: introduce idl...
931
  		test_and_clear_page_young(page);
7d5b3bfaa   Kirill A. Shutemov   mm: /proc/pid/cle...
932
933
934
935
936
  		ClearPageReferenced(page);
  out:
  		spin_unlock(ptl);
  		return 0;
  	}
1a5a9906d   Andrea Arcangeli   mm: thp: fix pmd_...
937
938
  	if (pmd_trans_unstable(pmd))
  		return 0;
033193275   Dave Hansen   pagewalk: only sp...
939

a6198797c   Matt Mackall   maps4: regroup ta...
940
941
942
  	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
  	for (; addr != end; pte++, addr += PAGE_SIZE) {
  		ptent = *pte;
a6198797c   Matt Mackall   maps4: regroup ta...
943

0f8975ec4   Pavel Emelyanov   mm: soft-dirty bi...
944
945
946
947
  		if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
  			clear_soft_dirty(vma, addr, pte);
  			continue;
  		}
179ef71cb   Cyrill Gorcunov   mm: save soft-dir...
948
949
  		if (!pte_present(ptent))
  			continue;
a6198797c   Matt Mackall   maps4: regroup ta...
950
951
952
953
954
955
  		page = vm_normal_page(vma, addr, ptent);
  		if (!page)
  			continue;
  
  		/* Clear accessed and referenced bits. */
  		ptep_test_and_clear_young(vma, addr, pte);
33c3fc71c   Vladimir Davydov   mm: introduce idl...
956
  		test_and_clear_page_young(page);
a6198797c   Matt Mackall   maps4: regroup ta...
957
958
959
960
961
962
  		ClearPageReferenced(page);
  	}
  	pte_unmap_unlock(pte - 1, ptl);
  	cond_resched();
  	return 0;
  }
5c64f52ac   Naoya Horiguchi   clear_refs: remov...
963
964
965
966
967
  static int clear_refs_test_walk(unsigned long start, unsigned long end,
  				struct mm_walk *walk)
  {
  	struct clear_refs_private *cp = walk->private;
  	struct vm_area_struct *vma = walk->vma;
48684a65b   Naoya Horiguchi   mm: pagewalk: fix...
968
969
  	if (vma->vm_flags & VM_PFNMAP)
  		return 1;
5c64f52ac   Naoya Horiguchi   clear_refs: remov...
970
971
972
973
974
975
976
977
978
979
980
981
  	/*
  	 * Writing 1 to /proc/pid/clear_refs affects all pages.
  	 * Writing 2 to /proc/pid/clear_refs only affects anonymous pages.
  	 * Writing 3 to /proc/pid/clear_refs only affects file mapped pages.
  	 * Writing 4 to /proc/pid/clear_refs affects all pages.
  	 */
  	if (cp->type == CLEAR_REFS_ANON && vma->vm_file)
  		return 1;
  	if (cp->type == CLEAR_REFS_MAPPED && !vma->vm_file)
  		return 1;
  	return 0;
  }
f248dcb34   Matt Mackall   maps4: move clear...
982
983
  static ssize_t clear_refs_write(struct file *file, const char __user *buf,
  				size_t count, loff_t *ppos)
b813e931b   David Rientjes   smaps: add clear_...
984
  {
f248dcb34   Matt Mackall   maps4: move clear...
985
  	struct task_struct *task;
fb92a4b06   Vincent Li   fs/proc/task_mmu....
986
  	char buffer[PROC_NUMBUF];
f248dcb34   Matt Mackall   maps4: move clear...
987
  	struct mm_struct *mm;
b813e931b   David Rientjes   smaps: add clear_...
988
  	struct vm_area_struct *vma;
040fa0207   Pavel Emelyanov   clear_refs: sanit...
989
990
  	enum clear_refs_types type;
  	int itype;
0a8cb8e34   Alexey Dobriyan   fs/proc: convert ...
991
  	int rv;
b813e931b   David Rientjes   smaps: add clear_...
992

f248dcb34   Matt Mackall   maps4: move clear...
993
994
995
996
997
  	memset(buffer, 0, sizeof(buffer));
  	if (count > sizeof(buffer) - 1)
  		count = sizeof(buffer) - 1;
  	if (copy_from_user(buffer, buf, count))
  		return -EFAULT;
040fa0207   Pavel Emelyanov   clear_refs: sanit...
998
  	rv = kstrtoint(strstrip(buffer), 10, &itype);
0a8cb8e34   Alexey Dobriyan   fs/proc: convert ...
999
1000
  	if (rv < 0)
  		return rv;
040fa0207   Pavel Emelyanov   clear_refs: sanit...
1001
1002
  	type = (enum clear_refs_types)itype;
  	if (type < CLEAR_REFS_ALL || type >= CLEAR_REFS_LAST)
f248dcb34   Matt Mackall   maps4: move clear...
1003
  		return -EINVAL;
541c237c0   Pavel Emelyanov   pagemap: prepare ...
1004

496ad9aa8   Al Viro   new helper: file_...
1005
  	task = get_proc_task(file_inode(file));
f248dcb34   Matt Mackall   maps4: move clear...
1006
1007
1008
1009
  	if (!task)
  		return -ESRCH;
  	mm = get_task_mm(task);
  	if (mm) {
af9de7eb1   Pavel Emelyanov   clear_refs: intro...
1010
  		struct clear_refs_private cp = {
0f8975ec4   Pavel Emelyanov   mm: soft-dirty bi...
1011
  			.type = type,
af9de7eb1   Pavel Emelyanov   clear_refs: intro...
1012
  		};
20cbc9726   Andrew Morton   Fix clear_refs_wr...
1013
1014
  		struct mm_walk clear_refs_walk = {
  			.pmd_entry = clear_refs_pte_range,
5c64f52ac   Naoya Horiguchi   clear_refs: remov...
1015
  			.test_walk = clear_refs_test_walk,
20cbc9726   Andrew Morton   Fix clear_refs_wr...
1016
  			.mm = mm,
af9de7eb1   Pavel Emelyanov   clear_refs: intro...
1017
  			.private = &cp,
20cbc9726   Andrew Morton   Fix clear_refs_wr...
1018
  		};
695f05593   Petr Cermak   fs/proc/task_mmu....
1019
1020
  
  		if (type == CLEAR_REFS_MM_HIWATER_RSS) {
4e80153a6   Michal Hocko   mm, proc: make cl...
1021
1022
1023
1024
  			if (down_write_killable(&mm->mmap_sem)) {
  				count = -EINTR;
  				goto out_mm;
  			}
695f05593   Petr Cermak   fs/proc/task_mmu....
1025
1026
1027
1028
  			/*
  			 * Writing 5 to /proc/pid/clear_refs resets the peak
  			 * resident set size to this mm's current rss value.
  			 */
695f05593   Petr Cermak   fs/proc/task_mmu....
1029
1030
1031
1032
  			reset_mm_hiwater_rss(mm);
  			up_write(&mm->mmap_sem);
  			goto out_mm;
  		}
f248dcb34   Matt Mackall   maps4: move clear...
1033
  		down_read(&mm->mmap_sem);
64e455079   Peter Feiner   mm: softdirty: en...
1034
1035
1036
1037
1038
  		if (type == CLEAR_REFS_SOFT_DIRTY) {
  			for (vma = mm->mmap; vma; vma = vma->vm_next) {
  				if (!(vma->vm_flags & VM_SOFTDIRTY))
  					continue;
  				up_read(&mm->mmap_sem);
4e80153a6   Michal Hocko   mm, proc: make cl...
1039
1040
1041
1042
  				if (down_write_killable(&mm->mmap_sem)) {
  					count = -EINTR;
  					goto out_mm;
  				}
64e455079   Peter Feiner   mm: softdirty: en...
1043
1044
1045
1046
1047
1048
1049
  				for (vma = mm->mmap; vma; vma = vma->vm_next) {
  					vma->vm_flags &= ~VM_SOFTDIRTY;
  					vma_set_page_prot(vma);
  				}
  				downgrade_write(&mm->mmap_sem);
  				break;
  			}
0f8975ec4   Pavel Emelyanov   mm: soft-dirty bi...
1050
  			mmu_notifier_invalidate_range_start(mm, 0, -1);
64e455079   Peter Feiner   mm: softdirty: en...
1051
  		}
0f30206bf   James Morse   fs/proc/task_mmu....
1052
  		walk_page_range(0, mm->highest_vm_end, &clear_refs_walk);
0f8975ec4   Pavel Emelyanov   mm: soft-dirty bi...
1053
1054
  		if (type == CLEAR_REFS_SOFT_DIRTY)
  			mmu_notifier_invalidate_range_end(mm, 0, -1);
f248dcb34   Matt Mackall   maps4: move clear...
1055
1056
  		flush_tlb_mm(mm);
  		up_read(&mm->mmap_sem);
695f05593   Petr Cermak   fs/proc/task_mmu....
1057
  out_mm:
f248dcb34   Matt Mackall   maps4: move clear...
1058
1059
1060
  		mmput(mm);
  	}
  	put_task_struct(task);
fb92a4b06   Vincent Li   fs/proc/task_mmu....
1061
1062
  
  	return count;
b813e931b   David Rientjes   smaps: add clear_...
1063
  }
f248dcb34   Matt Mackall   maps4: move clear...
1064
1065
  const struct file_operations proc_clear_refs_operations = {
  	.write		= clear_refs_write,
6038f373a   Arnd Bergmann   llseek: automatic...
1066
  	.llseek		= noop_llseek,
f248dcb34   Matt Mackall   maps4: move clear...
1067
  };
092b50bac   Naoya Horiguchi   pagemap: introduc...
1068
1069
1070
  typedef struct {
  	u64 pme;
  } pagemap_entry_t;
85863e475   Matt Mackall   maps4: add /proc/...
1071
  struct pagemapread {
8c8296223   yonghua zheng   fs/proc/task_mmu....
1072
  	int pos, len;		/* units: PM_ENTRY_BYTES, not bytes */
092b50bac   Naoya Horiguchi   pagemap: introduc...
1073
  	pagemap_entry_t *buffer;
1c90308e7   Konstantin Khlebnikov   pagemap: hide phy...
1074
  	bool show_pfn;
85863e475   Matt Mackall   maps4: add /proc/...
1075
  };
5aaabe831   Naoya Horiguchi   pagemap: avoid sp...
1076
1077
  #define PAGEMAP_WALK_SIZE	(PMD_SIZE)
  #define PAGEMAP_WALK_MASK	(PMD_MASK)
deb945441   Konstantin Khlebnikov   pagemap: switch t...
1078
1079
1080
1081
  #define PM_ENTRY_BYTES		sizeof(pagemap_entry_t)
  #define PM_PFRAME_BITS		55
  #define PM_PFRAME_MASK		GENMASK_ULL(PM_PFRAME_BITS - 1, 0)
  #define PM_SOFT_DIRTY		BIT_ULL(55)
77bb499bb   Konstantin Khlebnikov   pagemap: add mmap...
1082
  #define PM_MMAP_EXCLUSIVE	BIT_ULL(56)
deb945441   Konstantin Khlebnikov   pagemap: switch t...
1083
1084
1085
  #define PM_FILE			BIT_ULL(61)
  #define PM_SWAP			BIT_ULL(62)
  #define PM_PRESENT		BIT_ULL(63)
85863e475   Matt Mackall   maps4: add /proc/...
1086
  #define PM_END_OF_BUFFER    1
deb945441   Konstantin Khlebnikov   pagemap: switch t...
1087
  static inline pagemap_entry_t make_pme(u64 frame, u64 flags)
092b50bac   Naoya Horiguchi   pagemap: introduc...
1088
  {
deb945441   Konstantin Khlebnikov   pagemap: switch t...
1089
  	return (pagemap_entry_t) { .pme = (frame & PM_PFRAME_MASK) | flags };
092b50bac   Naoya Horiguchi   pagemap: introduc...
1090
1091
1092
  }
  
  static int add_to_pagemap(unsigned long addr, pagemap_entry_t *pme,
85863e475   Matt Mackall   maps4: add /proc/...
1093
1094
  			  struct pagemapread *pm)
  {
092b50bac   Naoya Horiguchi   pagemap: introduc...
1095
  	pm->buffer[pm->pos++] = *pme;
d82ef020c   KAMEZAWA Hiroyuki   proc: pagemap: Ho...
1096
  	if (pm->pos >= pm->len)
aae8679b0   Thomas Tuttle   pagemap: fix bug ...
1097
  		return PM_END_OF_BUFFER;
85863e475   Matt Mackall   maps4: add /proc/...
1098
1099
1100
1101
  	return 0;
  }
  
  static int pagemap_pte_hole(unsigned long start, unsigned long end,
2165009bd   Dave Hansen   pagemap: pass mm ...
1102
  				struct mm_walk *walk)
85863e475   Matt Mackall   maps4: add /proc/...
1103
  {
2165009bd   Dave Hansen   pagemap: pass mm ...
1104
  	struct pagemapread *pm = walk->private;
68b5a6524   Peter Feiner   mm: softdirty: re...
1105
  	unsigned long addr = start;
85863e475   Matt Mackall   maps4: add /proc/...
1106
  	int err = 0;
092b50bac   Naoya Horiguchi   pagemap: introduc...
1107

68b5a6524   Peter Feiner   mm: softdirty: re...
1108
1109
  	while (addr < end) {
  		struct vm_area_struct *vma = find_vma(walk->mm, addr);
deb945441   Konstantin Khlebnikov   pagemap: switch t...
1110
  		pagemap_entry_t pme = make_pme(0, 0);
87e6d49a0   Peter Feiner   mm: softdirty: ad...
1111
1112
  		/* End of address space hole, which we mark as non-present. */
  		unsigned long hole_end;
68b5a6524   Peter Feiner   mm: softdirty: re...
1113

87e6d49a0   Peter Feiner   mm: softdirty: ad...
1114
1115
1116
1117
1118
1119
1120
1121
1122
  		if (vma)
  			hole_end = min(end, vma->vm_start);
  		else
  			hole_end = end;
  
  		for (; addr < hole_end; addr += PAGE_SIZE) {
  			err = add_to_pagemap(addr, &pme, pm);
  			if (err)
  				goto out;
68b5a6524   Peter Feiner   mm: softdirty: re...
1123
  		}
87e6d49a0   Peter Feiner   mm: softdirty: ad...
1124
1125
1126
1127
1128
  		if (!vma)
  			break;
  
  		/* Addresses in the VMA. */
  		if (vma->vm_flags & VM_SOFTDIRTY)
deb945441   Konstantin Khlebnikov   pagemap: switch t...
1129
  			pme = make_pme(0, PM_SOFT_DIRTY);
87e6d49a0   Peter Feiner   mm: softdirty: ad...
1130
  		for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) {
68b5a6524   Peter Feiner   mm: softdirty: re...
1131
1132
1133
1134
  			err = add_to_pagemap(addr, &pme, pm);
  			if (err)
  				goto out;
  		}
85863e475   Matt Mackall   maps4: add /proc/...
1135
  	}
68b5a6524   Peter Feiner   mm: softdirty: re...
1136
  out:
85863e475   Matt Mackall   maps4: add /proc/...
1137
1138
  	return err;
  }
deb945441   Konstantin Khlebnikov   pagemap: switch t...
1139
  static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
052fb0d63   Konstantin Khlebnikov   proc: report file...
1140
  		struct vm_area_struct *vma, unsigned long addr, pte_t pte)
85863e475   Matt Mackall   maps4: add /proc/...
1141
  {
deb945441   Konstantin Khlebnikov   pagemap: switch t...
1142
  	u64 frame = 0, flags = 0;
052fb0d63   Konstantin Khlebnikov   proc: report file...
1143
  	struct page *page = NULL;
85863e475   Matt Mackall   maps4: add /proc/...
1144

052fb0d63   Konstantin Khlebnikov   proc: report file...
1145
  	if (pte_present(pte)) {
1c90308e7   Konstantin Khlebnikov   pagemap: hide phy...
1146
1147
  		if (pm->show_pfn)
  			frame = pte_pfn(pte);
deb945441   Konstantin Khlebnikov   pagemap: switch t...
1148
  		flags |= PM_PRESENT;
052fb0d63   Konstantin Khlebnikov   proc: report file...
1149
  		page = vm_normal_page(vma, addr, pte);
e9cdd6e77   Cyrill Gorcunov   mm: /proc/pid/pag...
1150
  		if (pte_soft_dirty(pte))
deb945441   Konstantin Khlebnikov   pagemap: switch t...
1151
  			flags |= PM_SOFT_DIRTY;
052fb0d63   Konstantin Khlebnikov   proc: report file...
1152
  	} else if (is_swap_pte(pte)) {
179ef71cb   Cyrill Gorcunov   mm: save soft-dir...
1153
1154
  		swp_entry_t entry;
  		if (pte_swp_soft_dirty(pte))
deb945441   Konstantin Khlebnikov   pagemap: switch t...
1155
  			flags |= PM_SOFT_DIRTY;
179ef71cb   Cyrill Gorcunov   mm: save soft-dir...
1156
  		entry = pte_to_swp_entry(pte);
052fb0d63   Konstantin Khlebnikov   proc: report file...
1157
1158
  		frame = swp_type(entry) |
  			(swp_offset(entry) << MAX_SWAPFILES_SHIFT);
deb945441   Konstantin Khlebnikov   pagemap: switch t...
1159
  		flags |= PM_SWAP;
052fb0d63   Konstantin Khlebnikov   proc: report file...
1160
1161
  		if (is_migration_entry(entry))
  			page = migration_entry_to_page(entry);
052fb0d63   Konstantin Khlebnikov   proc: report file...
1162
1163
1164
1165
  	}
  
  	if (page && !PageAnon(page))
  		flags |= PM_FILE;
77bb499bb   Konstantin Khlebnikov   pagemap: add mmap...
1166
1167
  	if (page && page_mapcount(page) == 1)
  		flags |= PM_MMAP_EXCLUSIVE;
deb945441   Konstantin Khlebnikov   pagemap: switch t...
1168
1169
  	if (vma->vm_flags & VM_SOFTDIRTY)
  		flags |= PM_SOFT_DIRTY;
052fb0d63   Konstantin Khlebnikov   proc: report file...
1170

deb945441   Konstantin Khlebnikov   pagemap: switch t...
1171
  	return make_pme(frame, flags);
bcf8039ed   Dave Hansen   pagemap: fix larg...
1172
  }
356515e7b   Konstantin Khlebnikov   pagemap: rework h...
1173
  static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
2165009bd   Dave Hansen   pagemap: pass mm ...
1174
  			     struct mm_walk *walk)
85863e475   Matt Mackall   maps4: add /proc/...
1175
  {
f995ece24   Naoya Horiguchi   pagemap: use walk...
1176
  	struct vm_area_struct *vma = walk->vma;
2165009bd   Dave Hansen   pagemap: pass mm ...
1177
  	struct pagemapread *pm = walk->private;
bf929152e   Kirill A. Shutemov   mm, thp: change p...
1178
  	spinlock_t *ptl;
05fbf357d   Konstantin Khlebnikov   proc/pagemap: wal...
1179
  	pte_t *pte, *orig_pte;
85863e475   Matt Mackall   maps4: add /proc/...
1180
  	int err = 0;
356515e7b   Konstantin Khlebnikov   pagemap: rework h...
1181
  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
b6ec57f4b   Kirill A. Shutemov   thp: change pmd_t...
1182
1183
  	ptl = pmd_trans_huge_lock(pmdp, vma);
  	if (ptl) {
356515e7b   Konstantin Khlebnikov   pagemap: rework h...
1184
1185
  		u64 flags = 0, frame = 0;
  		pmd_t pmd = *pmdp;
0f8975ec4   Pavel Emelyanov   mm: soft-dirty bi...
1186

356515e7b   Konstantin Khlebnikov   pagemap: rework h...
1187
  		if ((vma->vm_flags & VM_SOFTDIRTY) || pmd_soft_dirty(pmd))
deb945441   Konstantin Khlebnikov   pagemap: switch t...
1188
  			flags |= PM_SOFT_DIRTY;
d9104d1ca   Cyrill Gorcunov   mm: track vma cha...
1189

356515e7b   Konstantin Khlebnikov   pagemap: rework h...
1190
1191
1192
1193
1194
1195
1196
  		/*
  		 * Currently pmd for thp is always present because thp
  		 * can not be swapped-out, migrated, or HWPOISONed
  		 * (split in such cases instead.)
  		 * This if-check is just to prepare for future implementation.
  		 */
  		if (pmd_present(pmd)) {
77bb499bb   Konstantin Khlebnikov   pagemap: add mmap...
1197
1198
1199
1200
  			struct page *page = pmd_page(pmd);
  
  			if (page_mapcount(page) == 1)
  				flags |= PM_MMAP_EXCLUSIVE;
356515e7b   Konstantin Khlebnikov   pagemap: rework h...
1201
  			flags |= PM_PRESENT;
1c90308e7   Konstantin Khlebnikov   pagemap: hide phy...
1202
1203
1204
  			if (pm->show_pfn)
  				frame = pmd_pfn(pmd) +
  					((addr & ~PMD_MASK) >> PAGE_SHIFT);
356515e7b   Konstantin Khlebnikov   pagemap: rework h...
1205
  		}
025c5b245   Naoya Horiguchi   thp: optimize awa...
1206
  		for (; addr != end; addr += PAGE_SIZE) {
356515e7b   Konstantin Khlebnikov   pagemap: rework h...
1207
  			pagemap_entry_t pme = make_pme(frame, flags);
025c5b245   Naoya Horiguchi   thp: optimize awa...
1208

092b50bac   Naoya Horiguchi   pagemap: introduc...
1209
  			err = add_to_pagemap(addr, &pme, pm);
025c5b245   Naoya Horiguchi   thp: optimize awa...
1210
1211
  			if (err)
  				break;
1c90308e7   Konstantin Khlebnikov   pagemap: hide phy...
1212
  			if (pm->show_pfn && (flags & PM_PRESENT))
356515e7b   Konstantin Khlebnikov   pagemap: rework h...
1213
  				frame++;
5aaabe831   Naoya Horiguchi   pagemap: avoid sp...
1214
  		}
bf929152e   Kirill A. Shutemov   mm, thp: change p...
1215
  		spin_unlock(ptl);
025c5b245   Naoya Horiguchi   thp: optimize awa...
1216
  		return err;
5aaabe831   Naoya Horiguchi   pagemap: avoid sp...
1217
  	}
356515e7b   Konstantin Khlebnikov   pagemap: rework h...
1218
  	if (pmd_trans_unstable(pmdp))
45f83cefe   Andrea Arcangeli   mm: thp: fix up p...
1219
  		return 0;
356515e7b   Konstantin Khlebnikov   pagemap: rework h...
1220
  #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
81d0fa623   Peter Feiner   mm: softdirty: un...
1221

f995ece24   Naoya Horiguchi   pagemap: use walk...
1222
1223
1224
1225
  	/*
  	 * We can assume that @vma always points to a valid one and @end never
  	 * goes beyond vma->vm_end.
  	 */
356515e7b   Konstantin Khlebnikov   pagemap: rework h...
1226
  	orig_pte = pte = pte_offset_map_lock(walk->mm, pmdp, addr, &ptl);
f995ece24   Naoya Horiguchi   pagemap: use walk...
1227
1228
  	for (; addr < end; pte++, addr += PAGE_SIZE) {
  		pagemap_entry_t pme;
05fbf357d   Konstantin Khlebnikov   proc/pagemap: wal...
1229

deb945441   Konstantin Khlebnikov   pagemap: switch t...
1230
  		pme = pte_to_pagemap_entry(pm, vma, addr, *pte);
f995ece24   Naoya Horiguchi   pagemap: use walk...
1231
  		err = add_to_pagemap(addr, &pme, pm);
05fbf357d   Konstantin Khlebnikov   proc/pagemap: wal...
1232
  		if (err)
81d0fa623   Peter Feiner   mm: softdirty: un...
1233
  			break;
85863e475   Matt Mackall   maps4: add /proc/...
1234
  	}
f995ece24   Naoya Horiguchi   pagemap: use walk...
1235
  	pte_unmap_unlock(orig_pte, ptl);
85863e475   Matt Mackall   maps4: add /proc/...
1236
1237
1238
1239
1240
  
  	cond_resched();
  
  	return err;
  }
1a5cb8146   Naoya Horiguchi   pagemap: add #ifd...
1241
  #ifdef CONFIG_HUGETLB_PAGE
116354d17   Naoya Horiguchi   pagemap: fix pfn ...
1242
  /* This function walks within one hugetlb entry in the single call */
356515e7b   Konstantin Khlebnikov   pagemap: rework h...
1243
  static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask,
116354d17   Naoya Horiguchi   pagemap: fix pfn ...
1244
1245
  				 unsigned long addr, unsigned long end,
  				 struct mm_walk *walk)
5dc37642c   Naoya Horiguchi   mm hugetlb: add h...
1246
  {
5dc37642c   Naoya Horiguchi   mm hugetlb: add h...
1247
  	struct pagemapread *pm = walk->private;
f995ece24   Naoya Horiguchi   pagemap: use walk...
1248
  	struct vm_area_struct *vma = walk->vma;
356515e7b   Konstantin Khlebnikov   pagemap: rework h...
1249
  	u64 flags = 0, frame = 0;
5dc37642c   Naoya Horiguchi   mm hugetlb: add h...
1250
  	int err = 0;
356515e7b   Konstantin Khlebnikov   pagemap: rework h...
1251
  	pte_t pte;
5dc37642c   Naoya Horiguchi   mm hugetlb: add h...
1252

f995ece24   Naoya Horiguchi   pagemap: use walk...
1253
  	if (vma->vm_flags & VM_SOFTDIRTY)
deb945441   Konstantin Khlebnikov   pagemap: switch t...
1254
  		flags |= PM_SOFT_DIRTY;
d9104d1ca   Cyrill Gorcunov   mm: track vma cha...
1255

356515e7b   Konstantin Khlebnikov   pagemap: rework h...
1256
1257
1258
1259
1260
1261
  	pte = huge_ptep_get(ptep);
  	if (pte_present(pte)) {
  		struct page *page = pte_page(pte);
  
  		if (!PageAnon(page))
  			flags |= PM_FILE;
77bb499bb   Konstantin Khlebnikov   pagemap: add mmap...
1262
1263
  		if (page_mapcount(page) == 1)
  			flags |= PM_MMAP_EXCLUSIVE;
356515e7b   Konstantin Khlebnikov   pagemap: rework h...
1264
  		flags |= PM_PRESENT;
1c90308e7   Konstantin Khlebnikov   pagemap: hide phy...
1265
1266
1267
  		if (pm->show_pfn)
  			frame = pte_pfn(pte) +
  				((addr & ~hmask) >> PAGE_SHIFT);
356515e7b   Konstantin Khlebnikov   pagemap: rework h...
1268
  	}
5dc37642c   Naoya Horiguchi   mm hugetlb: add h...
1269
  	for (; addr != end; addr += PAGE_SIZE) {
356515e7b   Konstantin Khlebnikov   pagemap: rework h...
1270
  		pagemap_entry_t pme = make_pme(frame, flags);
092b50bac   Naoya Horiguchi   pagemap: introduc...
1271
  		err = add_to_pagemap(addr, &pme, pm);
5dc37642c   Naoya Horiguchi   mm hugetlb: add h...
1272
1273
  		if (err)
  			return err;
1c90308e7   Konstantin Khlebnikov   pagemap: hide phy...
1274
  		if (pm->show_pfn && (flags & PM_PRESENT))
356515e7b   Konstantin Khlebnikov   pagemap: rework h...
1275
  			frame++;
5dc37642c   Naoya Horiguchi   mm hugetlb: add h...
1276
1277
1278
1279
1280
1281
  	}
  
  	cond_resched();
  
  	return err;
  }
1a5cb8146   Naoya Horiguchi   pagemap: add #ifd...
1282
  #endif /* HUGETLB_PAGE */
5dc37642c   Naoya Horiguchi   mm hugetlb: add h...
1283

85863e475   Matt Mackall   maps4: add /proc/...
1284
1285
1286
  /*
   * /proc/pid/pagemap - an array mapping virtual pages to pfns
   *
f16278c67   Hans Rosenfeld   Change pagemap ou...
1287
1288
1289
   * For each page in the address space, this file contains one 64-bit entry
   * consisting of the following:
   *
052fb0d63   Konstantin Khlebnikov   proc: report file...
1290
   * Bits 0-54  page frame number (PFN) if present
f16278c67   Hans Rosenfeld   Change pagemap ou...
1291
   * Bits 0-4   swap type if swapped
052fb0d63   Konstantin Khlebnikov   proc: report file...
1292
   * Bits 5-54  swap offset if swapped
deb945441   Konstantin Khlebnikov   pagemap: switch t...
1293
   * Bit  55    pte is soft-dirty (see Documentation/vm/soft-dirty.txt)
77bb499bb   Konstantin Khlebnikov   pagemap: add mmap...
1294
1295
   * Bit  56    page exclusively mapped
   * Bits 57-60 zero
052fb0d63   Konstantin Khlebnikov   proc: report file...
1296
   * Bit  61    page is file-page or shared-anon
f16278c67   Hans Rosenfeld   Change pagemap ou...
1297
1298
1299
1300
1301
1302
   * Bit  62    page swapped
   * Bit  63    page present
   *
   * If the page is not present but in swap, then the PFN contains an
   * encoding of the swap file number and the page's offset into the
   * swap. Unmapped pages return a null PFN. This allows determining
85863e475   Matt Mackall   maps4: add /proc/...
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
   * precisely which pages are mapped (or in swap) and comparing mapped
   * pages between processes.
   *
   * Efficient users of this interface will use /proc/pid/maps to
   * determine which areas of memory are actually mapped and llseek to
   * skip over unmapped regions.
   */
  static ssize_t pagemap_read(struct file *file, char __user *buf,
  			    size_t count, loff_t *ppos)
  {
a06db751c   Konstantin Khlebnikov   pagemap: check pe...
1313
  	struct mm_struct *mm = file->private_data;
85863e475   Matt Mackall   maps4: add /proc/...
1314
  	struct pagemapread pm;
ee1e6ab60   Alexey Dobriyan   proc: fix /proc/*...
1315
  	struct mm_walk pagemap_walk = {};
5d7e0d2bd   Andrew Morton   Fix pagemap_read(...
1316
1317
1318
1319
  	unsigned long src;
  	unsigned long svpfn;
  	unsigned long start_vaddr;
  	unsigned long end_vaddr;
a06db751c   Konstantin Khlebnikov   pagemap: check pe...
1320
  	int ret = 0, copied = 0;
85863e475   Matt Mackall   maps4: add /proc/...
1321

a06db751c   Konstantin Khlebnikov   pagemap: check pe...
1322
  	if (!mm || !atomic_inc_not_zero(&mm->mm_users))
85863e475   Matt Mackall   maps4: add /proc/...
1323
  		goto out;
85863e475   Matt Mackall   maps4: add /proc/...
1324
1325
  	ret = -EINVAL;
  	/* file position must be aligned */
aae8679b0   Thomas Tuttle   pagemap: fix bug ...
1326
  	if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
a06db751c   Konstantin Khlebnikov   pagemap: check pe...
1327
  		goto out_mm;
85863e475   Matt Mackall   maps4: add /proc/...
1328
1329
  
  	ret = 0;
081617863   Vitaly Mayatskikh   pagemap: require ...
1330
  	if (!count)
a06db751c   Konstantin Khlebnikov   pagemap: check pe...
1331
  		goto out_mm;
081617863   Vitaly Mayatskikh   pagemap: require ...
1332

1c90308e7   Konstantin Khlebnikov   pagemap: hide phy...
1333
1334
  	/* do not disclose physical addresses: attack vector */
  	pm.show_pfn = file_ns_capable(file, &init_user_ns, CAP_SYS_ADMIN);
8c8296223   yonghua zheng   fs/proc/task_mmu....
1335
1336
  	pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
  	pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_TEMPORARY);
5d7e0d2bd   Andrew Morton   Fix pagemap_read(...
1337
  	ret = -ENOMEM;
d82ef020c   KAMEZAWA Hiroyuki   proc: pagemap: Ho...
1338
  	if (!pm.buffer)
a06db751c   Konstantin Khlebnikov   pagemap: check pe...
1339
  		goto out_mm;
85863e475   Matt Mackall   maps4: add /proc/...
1340

356515e7b   Konstantin Khlebnikov   pagemap: rework h...
1341
  	pagemap_walk.pmd_entry = pagemap_pmd_range;
5d7e0d2bd   Andrew Morton   Fix pagemap_read(...
1342
  	pagemap_walk.pte_hole = pagemap_pte_hole;
1a5cb8146   Naoya Horiguchi   pagemap: add #ifd...
1343
  #ifdef CONFIG_HUGETLB_PAGE
5dc37642c   Naoya Horiguchi   mm hugetlb: add h...
1344
  	pagemap_walk.hugetlb_entry = pagemap_hugetlb_range;
1a5cb8146   Naoya Horiguchi   pagemap: add #ifd...
1345
  #endif
5d7e0d2bd   Andrew Morton   Fix pagemap_read(...
1346
1347
1348
1349
1350
1351
  	pagemap_walk.mm = mm;
  	pagemap_walk.private = &pm;
  
  	src = *ppos;
  	svpfn = src / PM_ENTRY_BYTES;
  	start_vaddr = svpfn << PAGE_SHIFT;
a06db751c   Konstantin Khlebnikov   pagemap: check pe...
1352
  	end_vaddr = mm->task_size;
5d7e0d2bd   Andrew Morton   Fix pagemap_read(...
1353
1354
  
  	/* watch out for wraparound */
a06db751c   Konstantin Khlebnikov   pagemap: check pe...
1355
  	if (svpfn > mm->task_size >> PAGE_SHIFT)
5d7e0d2bd   Andrew Morton   Fix pagemap_read(...
1356
1357
1358
1359
1360
1361
1362
1363
  		start_vaddr = end_vaddr;
  
  	/*
  	 * The odds are that this will stop walking way
  	 * before end_vaddr, because the length of the
  	 * user buffer is tracked in "pm", and the walk
  	 * will stop when we hit the end of the buffer.
  	 */
d82ef020c   KAMEZAWA Hiroyuki   proc: pagemap: Ho...
1364
1365
1366
1367
1368
1369
  	ret = 0;
  	while (count && (start_vaddr < end_vaddr)) {
  		int len;
  		unsigned long end;
  
  		pm.pos = 0;
ea251c1d5   Naoya Horiguchi   pagemap: set page...
1370
  		end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK;
d82ef020c   KAMEZAWA Hiroyuki   proc: pagemap: Ho...
1371
1372
1373
1374
1375
1376
1377
1378
1379
  		/* overflow ? */
  		if (end < start_vaddr || end > end_vaddr)
  			end = end_vaddr;
  		down_read(&mm->mmap_sem);
  		ret = walk_page_range(start_vaddr, end, &pagemap_walk);
  		up_read(&mm->mmap_sem);
  		start_vaddr = end;
  
  		len = min(count, PM_ENTRY_BYTES * pm.pos);
309361e09   Dan Carpenter   proc: copy_to_use...
1380
  		if (copy_to_user(buf, pm.buffer, len)) {
d82ef020c   KAMEZAWA Hiroyuki   proc: pagemap: Ho...
1381
  			ret = -EFAULT;
a06db751c   Konstantin Khlebnikov   pagemap: check pe...
1382
  			goto out_free;
d82ef020c   KAMEZAWA Hiroyuki   proc: pagemap: Ho...
1383
1384
1385
1386
  		}
  		copied += len;
  		buf += len;
  		count -= len;
85863e475   Matt Mackall   maps4: add /proc/...
1387
  	}
d82ef020c   KAMEZAWA Hiroyuki   proc: pagemap: Ho...
1388
1389
1390
  	*ppos += copied;
  	if (!ret || ret == PM_END_OF_BUFFER)
  		ret = copied;
98bc93e50   KOSAKI Motohiro   proc: fix pagemap...
1391
1392
  out_free:
  	kfree(pm.buffer);
a06db751c   Konstantin Khlebnikov   pagemap: check pe...
1393
1394
  out_mm:
  	mmput(mm);
85863e475   Matt Mackall   maps4: add /proc/...
1395
1396
1397
  out:
  	return ret;
  }
541c237c0   Pavel Emelyanov   pagemap: prepare ...
1398
1399
  static int pagemap_open(struct inode *inode, struct file *file)
  {
a06db751c   Konstantin Khlebnikov   pagemap: check pe...
1400
  	struct mm_struct *mm;
a06db751c   Konstantin Khlebnikov   pagemap: check pe...
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
  	mm = proc_mem_open(inode, PTRACE_MODE_READ);
  	if (IS_ERR(mm))
  		return PTR_ERR(mm);
  	file->private_data = mm;
  	return 0;
  }
  
  static int pagemap_release(struct inode *inode, struct file *file)
  {
  	struct mm_struct *mm = file->private_data;
  
  	if (mm)
  		mmdrop(mm);
541c237c0   Pavel Emelyanov   pagemap: prepare ...
1414
1415
  	return 0;
  }
85863e475   Matt Mackall   maps4: add /proc/...
1416
1417
1418
  const struct file_operations proc_pagemap_operations = {
  	.llseek		= mem_lseek, /* borrow this */
  	.read		= pagemap_read,
541c237c0   Pavel Emelyanov   pagemap: prepare ...
1419
  	.open		= pagemap_open,
a06db751c   Konstantin Khlebnikov   pagemap: check pe...
1420
  	.release	= pagemap_release,
85863e475   Matt Mackall   maps4: add /proc/...
1421
  };
1e8832811   Matt Mackall   maps4: make page ...
1422
  #endif /* CONFIG_PROC_PAGE_MONITOR */
85863e475   Matt Mackall   maps4: add /proc/...
1423

6e21c8f14   Christoph Lameter   [PATCH] /proc/<pi...
1424
  #ifdef CONFIG_NUMA
6e21c8f14   Christoph Lameter   [PATCH] /proc/<pi...
1425

f69ff943d   Stephen Wilson   mm: proc: move sh...
1426
  struct numa_maps {
f69ff943d   Stephen Wilson   mm: proc: move sh...
1427
1428
1429
1430
1431
1432
1433
1434
1435
  	unsigned long pages;
  	unsigned long anon;
  	unsigned long active;
  	unsigned long writeback;
  	unsigned long mapcount_max;
  	unsigned long dirty;
  	unsigned long swapcache;
  	unsigned long node[MAX_NUMNODES];
  };
5b52fc890   Stephen Wilson   proc: allocate st...
1436
1437
1438
1439
  struct numa_maps_private {
  	struct proc_maps_private proc_maps;
  	struct numa_maps md;
  };
eb4866d00   Dave Hansen   make /proc/$pid/n...
1440
1441
  static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
  			unsigned long nr_pages)
f69ff943d   Stephen Wilson   mm: proc: move sh...
1442
1443
  {
  	int count = page_mapcount(page);
eb4866d00   Dave Hansen   make /proc/$pid/n...
1444
  	md->pages += nr_pages;
f69ff943d   Stephen Wilson   mm: proc: move sh...
1445
  	if (pte_dirty || PageDirty(page))
eb4866d00   Dave Hansen   make /proc/$pid/n...
1446
  		md->dirty += nr_pages;
f69ff943d   Stephen Wilson   mm: proc: move sh...
1447
1448
  
  	if (PageSwapCache(page))
eb4866d00   Dave Hansen   make /proc/$pid/n...
1449
  		md->swapcache += nr_pages;
f69ff943d   Stephen Wilson   mm: proc: move sh...
1450
1451
  
  	if (PageActive(page) || PageUnevictable(page))
eb4866d00   Dave Hansen   make /proc/$pid/n...
1452
  		md->active += nr_pages;
f69ff943d   Stephen Wilson   mm: proc: move sh...
1453
1454
  
  	if (PageWriteback(page))
eb4866d00   Dave Hansen   make /proc/$pid/n...
1455
  		md->writeback += nr_pages;
f69ff943d   Stephen Wilson   mm: proc: move sh...
1456
1457
  
  	if (PageAnon(page))
eb4866d00   Dave Hansen   make /proc/$pid/n...
1458
  		md->anon += nr_pages;
f69ff943d   Stephen Wilson   mm: proc: move sh...
1459
1460
1461
  
  	if (count > md->mapcount_max)
  		md->mapcount_max = count;
eb4866d00   Dave Hansen   make /proc/$pid/n...
1462
  	md->node[page_to_nid(page)] += nr_pages;
f69ff943d   Stephen Wilson   mm: proc: move sh...
1463
  }
3200a8aaa   Dave Hansen   break out numa_ma...
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
  static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
  		unsigned long addr)
  {
  	struct page *page;
  	int nid;
  
  	if (!pte_present(pte))
  		return NULL;
  
  	page = vm_normal_page(vma, addr, pte);
  	if (!page)
  		return NULL;
  
  	if (PageReserved(page))
  		return NULL;
  
  	nid = page_to_nid(page);
4ff1b2c29   Lai Jiangshan   procfs: use N_MEM...
1481
  	if (!node_isset(nid, node_states[N_MEMORY]))
3200a8aaa   Dave Hansen   break out numa_ma...
1482
1483
1484
1485
  		return NULL;
  
  	return page;
  }
28093f9f3   Gerald Schaefer   numa: fix /proc/<...
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  static struct page *can_gather_numa_stats_pmd(pmd_t pmd,
  					      struct vm_area_struct *vma,
  					      unsigned long addr)
  {
  	struct page *page;
  	int nid;
  
  	if (!pmd_present(pmd))
  		return NULL;
  
  	page = vm_normal_page_pmd(vma, addr, pmd);
  	if (!page)
  		return NULL;
  
  	if (PageReserved(page))
  		return NULL;
  
  	nid = page_to_nid(page);
  	if (!node_isset(nid, node_states[N_MEMORY]))
  		return NULL;
  
  	return page;
  }
  #endif
f69ff943d   Stephen Wilson   mm: proc: move sh...
1511
1512
1513
  static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
  		unsigned long end, struct mm_walk *walk)
  {
d85f4d6d3   Naoya Horiguchi   numa_maps: remove...
1514
1515
  	struct numa_maps *md = walk->private;
  	struct vm_area_struct *vma = walk->vma;
f69ff943d   Stephen Wilson   mm: proc: move sh...
1516
1517
1518
  	spinlock_t *ptl;
  	pte_t *orig_pte;
  	pte_t *pte;
28093f9f3   Gerald Schaefer   numa: fix /proc/<...
1519
  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
b6ec57f4b   Kirill A. Shutemov   thp: change pmd_t...
1520
1521
  	ptl = pmd_trans_huge_lock(pmd, vma);
  	if (ptl) {
025c5b245   Naoya Horiguchi   thp: optimize awa...
1522
  		struct page *page;
28093f9f3   Gerald Schaefer   numa: fix /proc/<...
1523
  		page = can_gather_numa_stats_pmd(*pmd, vma, addr);
025c5b245   Naoya Horiguchi   thp: optimize awa...
1524
  		if (page)
28093f9f3   Gerald Schaefer   numa: fix /proc/<...
1525
  			gather_stats(page, md, pmd_dirty(*pmd),
025c5b245   Naoya Horiguchi   thp: optimize awa...
1526
  				     HPAGE_PMD_SIZE/PAGE_SIZE);
bf929152e   Kirill A. Shutemov   mm, thp: change p...
1527
  		spin_unlock(ptl);
025c5b245   Naoya Horiguchi   thp: optimize awa...
1528
  		return 0;
32ef43848   Dave Hansen   teach /proc/$pid/...
1529
  	}
1a5a9906d   Andrea Arcangeli   mm: thp: fix pmd_...
1530
1531
  	if (pmd_trans_unstable(pmd))
  		return 0;
28093f9f3   Gerald Schaefer   numa: fix /proc/<...
1532
  #endif
f69ff943d   Stephen Wilson   mm: proc: move sh...
1533
1534
  	orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
  	do {
d85f4d6d3   Naoya Horiguchi   numa_maps: remove...
1535
  		struct page *page = can_gather_numa_stats(*pte, vma, addr);
f69ff943d   Stephen Wilson   mm: proc: move sh...
1536
1537
  		if (!page)
  			continue;
eb4866d00   Dave Hansen   make /proc/$pid/n...
1538
  		gather_stats(page, md, pte_dirty(*pte), 1);
f69ff943d   Stephen Wilson   mm: proc: move sh...
1539
1540
1541
1542
1543
1544
  
  	} while (pte++, addr += PAGE_SIZE, addr != end);
  	pte_unmap_unlock(orig_pte, ptl);
  	return 0;
  }
  #ifdef CONFIG_HUGETLB_PAGE
632fd60fe   Naoya Horiguchi   numa_maps: fix ty...
1545
  static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
f69ff943d   Stephen Wilson   mm: proc: move sh...
1546
1547
  		unsigned long addr, unsigned long end, struct mm_walk *walk)
  {
5c2ff95e4   Michael Holzheu   numa: fix /proc/<...
1548
  	pte_t huge_pte = huge_ptep_get(pte);
f69ff943d   Stephen Wilson   mm: proc: move sh...
1549
1550
  	struct numa_maps *md;
  	struct page *page;
5c2ff95e4   Michael Holzheu   numa: fix /proc/<...
1551
  	if (!pte_present(huge_pte))
f69ff943d   Stephen Wilson   mm: proc: move sh...
1552
  		return 0;
5c2ff95e4   Michael Holzheu   numa: fix /proc/<...
1553
  	page = pte_page(huge_pte);
f69ff943d   Stephen Wilson   mm: proc: move sh...
1554
1555
1556
1557
  	if (!page)
  		return 0;
  
  	md = walk->private;
5c2ff95e4   Michael Holzheu   numa: fix /proc/<...
1558
  	gather_stats(page, md, pte_dirty(huge_pte), 1);
f69ff943d   Stephen Wilson   mm: proc: move sh...
1559
1560
1561
1562
  	return 0;
  }
  
  #else
632fd60fe   Naoya Horiguchi   numa_maps: fix ty...
1563
  static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
f69ff943d   Stephen Wilson   mm: proc: move sh...
1564
1565
1566
1567
1568
1569
1570
1571
1572
  		unsigned long addr, unsigned long end, struct mm_walk *walk)
  {
  	return 0;
  }
  #endif
  
  /*
   * Display pages allocated per node and memory policy via /proc.
   */
b76437579   Siddhesh Poyarekar   procfs: mark thre...
1573
  static int show_numa_map(struct seq_file *m, void *v, int is_pid)
f69ff943d   Stephen Wilson   mm: proc: move sh...
1574
  {
5b52fc890   Stephen Wilson   proc: allocate st...
1575
1576
  	struct numa_maps_private *numa_priv = m->private;
  	struct proc_maps_private *proc_priv = &numa_priv->proc_maps;
f69ff943d   Stephen Wilson   mm: proc: move sh...
1577
  	struct vm_area_struct *vma = v;
5b52fc890   Stephen Wilson   proc: allocate st...
1578
  	struct numa_maps *md = &numa_priv->md;
f69ff943d   Stephen Wilson   mm: proc: move sh...
1579
1580
  	struct file *file = vma->vm_file;
  	struct mm_struct *mm = vma->vm_mm;
d85f4d6d3   Naoya Horiguchi   numa_maps: remove...
1581
1582
1583
1584
1585
1586
  	struct mm_walk walk = {
  		.hugetlb_entry = gather_hugetlb_stats,
  		.pmd_entry = gather_pte_stats,
  		.private = md,
  		.mm = mm,
  	};
f69ff943d   Stephen Wilson   mm: proc: move sh...
1587
  	struct mempolicy *pol;
948927ee9   David Rientjes   mm, mempolicy: ma...
1588
1589
  	char buffer[64];
  	int nid;
f69ff943d   Stephen Wilson   mm: proc: move sh...
1590
1591
1592
  
  	if (!mm)
  		return 0;
5b52fc890   Stephen Wilson   proc: allocate st...
1593
1594
  	/* Ensure we start with an empty set of numa_maps statistics. */
  	memset(md, 0, sizeof(*md));
f69ff943d   Stephen Wilson   mm: proc: move sh...
1595

498f23717   Oleg Nesterov   mempolicy: fix sh...
1596
1597
1598
1599
1600
1601
1602
  	pol = __get_vma_policy(vma, vma->vm_start);
  	if (pol) {
  		mpol_to_str(buffer, sizeof(buffer), pol);
  		mpol_cond_put(pol);
  	} else {
  		mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy);
  	}
f69ff943d   Stephen Wilson   mm: proc: move sh...
1603
1604
1605
1606
  
  	seq_printf(m, "%08lx %s", vma->vm_start, buffer);
  
  	if (file) {
17c2b4ee4   Fabian Frederick   fs/proc/task_mmu....
1607
  		seq_puts(m, " file=");
2726d5662   Miklos Szeredi   vfs: add seq_file...
1608
1609
  		seq_file_path(m, file, "
  \t= ");
f69ff943d   Stephen Wilson   mm: proc: move sh...
1610
  	} else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
17c2b4ee4   Fabian Frederick   fs/proc/task_mmu....
1611
  		seq_puts(m, " heap");
b18cb64ea   Andy Lutomirski   fs/proc: Stop try...
1612
  	} else if (is_stack(proc_priv, vma)) {
65376df58   Johannes Weiner   proc: revert /pro...
1613
  		seq_puts(m, " stack");
f69ff943d   Stephen Wilson   mm: proc: move sh...
1614
  	}
fc360bd9c   Andrew Morton   /proc/self/numa_m...
1615
  	if (is_vm_hugetlb_page(vma))
17c2b4ee4   Fabian Frederick   fs/proc/task_mmu....
1616
  		seq_puts(m, " huge");
fc360bd9c   Andrew Morton   /proc/self/numa_m...
1617

d85f4d6d3   Naoya Horiguchi   numa_maps: remove...
1618
1619
  	/* mmap_sem is held by m_start */
  	walk_page_vma(vma, &walk);
f69ff943d   Stephen Wilson   mm: proc: move sh...
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
  
  	if (!md->pages)
  		goto out;
  
  	if (md->anon)
  		seq_printf(m, " anon=%lu", md->anon);
  
  	if (md->dirty)
  		seq_printf(m, " dirty=%lu", md->dirty);
  
  	if (md->pages != md->anon && md->pages != md->dirty)
  		seq_printf(m, " mapped=%lu", md->pages);
  
  	if (md->mapcount_max > 1)
  		seq_printf(m, " mapmax=%lu", md->mapcount_max);
  
  	if (md->swapcache)
  		seq_printf(m, " swapcache=%lu", md->swapcache);
  
  	if (md->active < md->pages && !is_vm_hugetlb_page(vma))
  		seq_printf(m, " active=%lu", md->active);
  
  	if (md->writeback)
  		seq_printf(m, " writeback=%lu", md->writeback);
948927ee9   David Rientjes   mm, mempolicy: ma...
1644
1645
1646
  	for_each_node_state(nid, N_MEMORY)
  		if (md->node[nid])
  			seq_printf(m, " N%d=%lu", nid, md->node[nid]);
198d1597c   Rafael Aquini   fs: proc: task_mm...
1647
1648
  
  	seq_printf(m, " kernelpagesize_kB=%lu", vma_kernel_pagesize(vma) >> 10);
f69ff943d   Stephen Wilson   mm: proc: move sh...
1649
1650
1651
  out:
  	seq_putc(m, '
  ');
b8c20a9b8   Oleg Nesterov   fs/proc/task_mmu....
1652
  	m_cache_vma(m, vma);
f69ff943d   Stephen Wilson   mm: proc: move sh...
1653
1654
  	return 0;
  }
5b52fc890   Stephen Wilson   proc: allocate st...
1655

b76437579   Siddhesh Poyarekar   procfs: mark thre...
1656
1657
1658
1659
1660
1661
1662
1663
1664
  static int show_pid_numa_map(struct seq_file *m, void *v)
  {
  	return show_numa_map(m, v, 1);
  }
  
  static int show_tid_numa_map(struct seq_file *m, void *v)
  {
  	return show_numa_map(m, v, 0);
  }
03a44825b   Jan Engelhardt   procfs: constify ...
1665
  static const struct seq_operations proc_pid_numa_maps_op = {
b76437579   Siddhesh Poyarekar   procfs: mark thre...
1666
1667
1668
1669
  	.start  = m_start,
  	.next   = m_next,
  	.stop   = m_stop,
  	.show   = show_pid_numa_map,
6e21c8f14   Christoph Lameter   [PATCH] /proc/<pi...
1670
  };
662795deb   Eric W. Biederman   [PATCH] proc: Mov...
1671

b76437579   Siddhesh Poyarekar   procfs: mark thre...
1672
1673
1674
1675
1676
1677
1678
1679
1680
  static const struct seq_operations proc_tid_numa_maps_op = {
  	.start  = m_start,
  	.next   = m_next,
  	.stop   = m_stop,
  	.show   = show_tid_numa_map,
  };
  
  static int numa_maps_open(struct inode *inode, struct file *file,
  			  const struct seq_operations *ops)
662795deb   Eric W. Biederman   [PATCH] proc: Mov...
1681
  {
4db7d0ee1   Oleg Nesterov   fs/proc/task_mmu....
1682
1683
  	return proc_maps_open(inode, file, ops,
  				sizeof(struct numa_maps_private));
662795deb   Eric W. Biederman   [PATCH] proc: Mov...
1684
  }
b76437579   Siddhesh Poyarekar   procfs: mark thre...
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
  static int pid_numa_maps_open(struct inode *inode, struct file *file)
  {
  	return numa_maps_open(inode, file, &proc_pid_numa_maps_op);
  }
  
  static int tid_numa_maps_open(struct inode *inode, struct file *file)
  {
  	return numa_maps_open(inode, file, &proc_tid_numa_maps_op);
  }
  
  const struct file_operations proc_pid_numa_maps_operations = {
  	.open		= pid_numa_maps_open,
  	.read		= seq_read,
  	.llseek		= seq_lseek,
29a40ace8   Oleg Nesterov   fs/proc/task_mmu....
1699
  	.release	= proc_map_release,
b76437579   Siddhesh Poyarekar   procfs: mark thre...
1700
1701
1702
1703
  };
  
  const struct file_operations proc_tid_numa_maps_operations = {
  	.open		= tid_numa_maps_open,
662795deb   Eric W. Biederman   [PATCH] proc: Mov...
1704
1705
  	.read		= seq_read,
  	.llseek		= seq_lseek,
29a40ace8   Oleg Nesterov   fs/proc/task_mmu....
1706
  	.release	= proc_map_release,
662795deb   Eric W. Biederman   [PATCH] proc: Mov...
1707
  };
f69ff943d   Stephen Wilson   mm: proc: move sh...
1708
  #endif /* CONFIG_NUMA */