Blame view

fs/proc/task_mmu.c 39 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
  #include <linux/mm.h>
615d6e875   Davidlohr Bueso   mm: per-thread vm...
2
  #include <linux/vmacache.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3
  #include <linux/hugetlb.h>
22e057c59   Dave Hansen   smaps: teach smap...
4
  #include <linux/huge_mm.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
5
6
  #include <linux/mount.h>
  #include <linux/seq_file.h>
e070ad49f   Mauricio Lin   [PATCH] add /proc...
7
  #include <linux/highmem.h>
5096add84   Kees Cook   proc: maps protec...
8
  #include <linux/ptrace.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
9
  #include <linux/slab.h>
6e21c8f14   Christoph Lameter   [PATCH] /proc/<pi...
10
11
  #include <linux/pagemap.h>
  #include <linux/mempolicy.h>
22e057c59   Dave Hansen   smaps: teach smap...
12
  #include <linux/rmap.h>
85863e475   Matt Mackall   maps4: add /proc/...
13
14
  #include <linux/swap.h>
  #include <linux/swapops.h>
0f8975ec4   Pavel Emelyanov   mm: soft-dirty bi...
15
  #include <linux/mmu_notifier.h>
e070ad49f   Mauricio Lin   [PATCH] add /proc...
16

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
17
18
  #include <asm/elf.h>
  #include <asm/uaccess.h>
e070ad49f   Mauricio Lin   [PATCH] add /proc...
19
  #include <asm/tlbflush.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
20
  #include "internal.h"
df5f8314c   Eric W. Biederman   proc: seqfile con...
21
  void task_mem(struct seq_file *m, struct mm_struct *mm)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
22
  {
b084d4353   KAMEZAWA Hiroyuki   mm: count swap usage
23
  	unsigned long data, text, lib, swap;
365e9c87a   Hugh Dickins   [PATCH] mm: updat...
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
  	unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
  
  	/*
  	 * Note: to minimize their overhead, mm maintains hiwater_vm and
  	 * hiwater_rss only when about to *lower* total_vm or rss.  Any
  	 * collector of these hiwater stats must therefore get total_vm
  	 * and rss too, which will usually be the higher.  Barriers? not
  	 * worth the effort, such snapshots can always be inconsistent.
  	 */
  	hiwater_vm = total_vm = mm->total_vm;
  	if (hiwater_vm < mm->hiwater_vm)
  		hiwater_vm = mm->hiwater_vm;
  	hiwater_rss = total_rss = get_mm_rss(mm);
  	if (hiwater_rss < mm->hiwater_rss)
  		hiwater_rss = mm->hiwater_rss;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
39
40
41
42
  
  	data = mm->total_vm - mm->shared_vm - mm->stack_vm;
  	text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
  	lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
b084d4353   KAMEZAWA Hiroyuki   mm: count swap usage
43
  	swap = get_mm_counter(mm, MM_SWAPENTS);
df5f8314c   Eric W. Biederman   proc: seqfile con...
44
  	seq_printf(m,
365e9c87a   Hugh Dickins   [PATCH] mm: updat...
45
46
  		"VmPeak:\t%8lu kB
  "
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
47
48
49
50
  		"VmSize:\t%8lu kB
  "
  		"VmLck:\t%8lu kB
  "
bc3e53f68   Christoph Lameter   mm: distinguish b...
51
52
  		"VmPin:\t%8lu kB
  "
365e9c87a   Hugh Dickins   [PATCH] mm: updat...
53
54
  		"VmHWM:\t%8lu kB
  "
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
55
56
57
58
59
60
61
62
63
64
  		"VmRSS:\t%8lu kB
  "
  		"VmData:\t%8lu kB
  "
  		"VmStk:\t%8lu kB
  "
  		"VmExe:\t%8lu kB
  "
  		"VmLib:\t%8lu kB
  "
b084d4353   KAMEZAWA Hiroyuki   mm: count swap usage
65
66
67
68
  		"VmPTE:\t%8lu kB
  "
  		"VmSwap:\t%8lu kB
  ",
365e9c87a   Hugh Dickins   [PATCH] mm: updat...
69
  		hiwater_vm << (PAGE_SHIFT-10),
314e51b98   Konstantin Khlebnikov   mm: kill vma flag...
70
  		total_vm << (PAGE_SHIFT-10),
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
71
  		mm->locked_vm << (PAGE_SHIFT-10),
bc3e53f68   Christoph Lameter   mm: distinguish b...
72
  		mm->pinned_vm << (PAGE_SHIFT-10),
365e9c87a   Hugh Dickins   [PATCH] mm: updat...
73
74
  		hiwater_rss << (PAGE_SHIFT-10),
  		total_rss << (PAGE_SHIFT-10),
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
75
76
  		data << (PAGE_SHIFT-10),
  		mm->stack_vm << (PAGE_SHIFT-10), text, lib,
e1f56c89b   Kirill A. Shutemov   mm: convert mm->n...
77
78
  		(PTRS_PER_PTE * sizeof(pte_t) *
  		 atomic_long_read(&mm->nr_ptes)) >> 10,
b084d4353   KAMEZAWA Hiroyuki   mm: count swap usage
79
  		swap << (PAGE_SHIFT-10));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
80
81
82
83
84
85
  }
  
  unsigned long task_vsize(struct mm_struct *mm)
  {
  	return PAGE_SIZE * mm->total_vm;
  }
a2ade7b6c   Alexey Dobriyan   proc: use unsigne...
86
87
88
  unsigned long task_statm(struct mm_struct *mm,
  			 unsigned long *shared, unsigned long *text,
  			 unsigned long *data, unsigned long *resident)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
89
  {
d559db086   KAMEZAWA Hiroyuki   mm: clean up mm_c...
90
  	*shared = get_mm_counter(mm, MM_FILEPAGES);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
91
92
93
  	*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
  								>> PAGE_SHIFT;
  	*data = mm->total_vm - mm->shared_vm;
d559db086   KAMEZAWA Hiroyuki   mm: clean up mm_c...
94
  	*resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
95
96
  	return mm->total_vm;
  }
9e7814404   KAMEZAWA Hiroyuki   hold task->mempol...
97
98
  #ifdef CONFIG_NUMA
  /*
498f23717   Oleg Nesterov   mempolicy: fix sh...
99
   * Save get_task_policy() for show_numa_map().
9e7814404   KAMEZAWA Hiroyuki   hold task->mempol...
100
101
102
103
104
105
   */
  static void hold_task_mempolicy(struct proc_maps_private *priv)
  {
  	struct task_struct *task = priv->task;
  
  	task_lock(task);
498f23717   Oleg Nesterov   mempolicy: fix sh...
106
  	priv->task_mempolicy = get_task_policy(task);
9e7814404   KAMEZAWA Hiroyuki   hold task->mempol...
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
  	mpol_get(priv->task_mempolicy);
  	task_unlock(task);
  }
  static void release_task_mempolicy(struct proc_maps_private *priv)
  {
  	mpol_put(priv->task_mempolicy);
  }
  #else
  static void hold_task_mempolicy(struct proc_maps_private *priv)
  {
  }
  static void release_task_mempolicy(struct proc_maps_private *priv)
  {
  }
  #endif
59b4bf12d   Oleg Nesterov   fs/proc/task_mmu....
122
  static void vma_stop(struct proc_maps_private *priv)
a6198797c   Matt Mackall   maps4: regroup ta...
123
  {
59b4bf12d   Oleg Nesterov   fs/proc/task_mmu....
124
125
126
127
128
  	struct mm_struct *mm = priv->mm;
  
  	release_task_mempolicy(priv);
  	up_read(&mm->mmap_sem);
  	mmput(mm);
a6198797c   Matt Mackall   maps4: regroup ta...
129
  }
ec4dd3eb3   Fengguang Wu   maps4: add propor...
130

ad2a00e4b   Oleg Nesterov   fs/proc/task_mmu....
131
132
133
134
135
136
137
  static struct vm_area_struct *
  m_next_vma(struct proc_maps_private *priv, struct vm_area_struct *vma)
  {
  	if (vma == priv->tail_vma)
  		return NULL;
  	return vma->vm_next ?: priv->tail_vma;
  }
b8c20a9b8   Oleg Nesterov   fs/proc/task_mmu....
138
139
140
141
142
  static void m_cache_vma(struct seq_file *m, struct vm_area_struct *vma)
  {
  	if (m->count < m->size)	/* vma is copied successfully */
  		m->version = m_next_vma(m->private, vma) ? vma->vm_start : -1UL;
  }
0c255321f   Oleg Nesterov   fs/proc/task_mmu....
143
  static void *m_start(struct seq_file *m, loff_t *ppos)
e070ad49f   Mauricio Lin   [PATCH] add /proc...
144
  {
a6198797c   Matt Mackall   maps4: regroup ta...
145
  	struct proc_maps_private *priv = m->private;
b8c20a9b8   Oleg Nesterov   fs/proc/task_mmu....
146
  	unsigned long last_addr = m->version;
a6198797c   Matt Mackall   maps4: regroup ta...
147
  	struct mm_struct *mm;
0c255321f   Oleg Nesterov   fs/proc/task_mmu....
148
149
  	struct vm_area_struct *vma;
  	unsigned int pos = *ppos;
a6198797c   Matt Mackall   maps4: regroup ta...
150

b8c20a9b8   Oleg Nesterov   fs/proc/task_mmu....
151
152
153
  	/* See m_cache_vma(). Zero at the start or after lseek. */
  	if (last_addr == -1UL)
  		return NULL;
2c03376d2   Oleg Nesterov   proc/maps: replac...
154
  	priv->task = get_proc_task(priv->inode);
a6198797c   Matt Mackall   maps4: regroup ta...
155
  	if (!priv->task)
ec6fd8a43   Al Viro   report errors in ...
156
  		return ERR_PTR(-ESRCH);
a6198797c   Matt Mackall   maps4: regroup ta...
157

29a40ace8   Oleg Nesterov   fs/proc/task_mmu....
158
159
160
  	mm = priv->mm;
  	if (!mm || !atomic_inc_not_zero(&mm->mm_users))
  		return NULL;
a6198797c   Matt Mackall   maps4: regroup ta...
161

0c255321f   Oleg Nesterov   fs/proc/task_mmu....
162
  	down_read(&mm->mmap_sem);
9e7814404   KAMEZAWA Hiroyuki   hold task->mempol...
163
  	hold_task_mempolicy(priv);
0c255321f   Oleg Nesterov   fs/proc/task_mmu....
164
  	priv->tail_vma = get_gate_vma(mm);
a6198797c   Matt Mackall   maps4: regroup ta...
165

b8c20a9b8   Oleg Nesterov   fs/proc/task_mmu....
166
167
168
169
170
171
172
  	if (last_addr) {
  		vma = find_vma(mm, last_addr);
  		if (vma && (vma = m_next_vma(priv, vma)))
  			return vma;
  	}
  
  	m->version = 0;
0c255321f   Oleg Nesterov   fs/proc/task_mmu....
173
  	if (pos < mm->map_count) {
557c2d8a7   Oleg Nesterov   fs/proc/task_mmu....
174
175
  		for (vma = mm->mmap; pos; pos--) {
  			m->version = vma->vm_start;
a6198797c   Matt Mackall   maps4: regroup ta...
176
  			vma = vma->vm_next;
557c2d8a7   Oleg Nesterov   fs/proc/task_mmu....
177
  		}
a6198797c   Matt Mackall   maps4: regroup ta...
178
  		return vma;
0c255321f   Oleg Nesterov   fs/proc/task_mmu....
179
  	}
a6198797c   Matt Mackall   maps4: regroup ta...
180

557c2d8a7   Oleg Nesterov   fs/proc/task_mmu....
181
  	/* we do not bother to update m->version in this case */
0c255321f   Oleg Nesterov   fs/proc/task_mmu....
182
183
  	if (pos == mm->map_count && priv->tail_vma)
  		return priv->tail_vma;
59b4bf12d   Oleg Nesterov   fs/proc/task_mmu....
184
185
186
  
  	vma_stop(priv);
  	return NULL;
a6198797c   Matt Mackall   maps4: regroup ta...
187
188
189
190
191
  }
  
  static void *m_next(struct seq_file *m, void *v, loff_t *pos)
  {
  	struct proc_maps_private *priv = m->private;
ad2a00e4b   Oleg Nesterov   fs/proc/task_mmu....
192
  	struct vm_area_struct *next;
a6198797c   Matt Mackall   maps4: regroup ta...
193
194
  
  	(*pos)++;
ad2a00e4b   Oleg Nesterov   fs/proc/task_mmu....
195
  	next = m_next_vma(priv, v);
59b4bf12d   Oleg Nesterov   fs/proc/task_mmu....
196
197
198
  	if (!next)
  		vma_stop(priv);
  	return next;
a6198797c   Matt Mackall   maps4: regroup ta...
199
200
201
202
203
  }
  
  static void m_stop(struct seq_file *m, void *v)
  {
  	struct proc_maps_private *priv = m->private;
a6198797c   Matt Mackall   maps4: regroup ta...
204

59b4bf12d   Oleg Nesterov   fs/proc/task_mmu....
205
206
  	if (!IS_ERR_OR_NULL(v))
  		vma_stop(priv);
0d5f5f45f   Oleg Nesterov   fs/proc/task_mmu....
207
  	if (priv->task) {
a6198797c   Matt Mackall   maps4: regroup ta...
208
  		put_task_struct(priv->task);
0d5f5f45f   Oleg Nesterov   fs/proc/task_mmu....
209
210
  		priv->task = NULL;
  	}
a6198797c   Matt Mackall   maps4: regroup ta...
211
  }
4db7d0ee1   Oleg Nesterov   fs/proc/task_mmu....
212
213
214
215
216
217
218
  static int proc_maps_open(struct inode *inode, struct file *file,
  			const struct seq_operations *ops, int psize)
  {
  	struct proc_maps_private *priv = __seq_open_private(file, ops, psize);
  
  	if (!priv)
  		return -ENOMEM;
2c03376d2   Oleg Nesterov   proc/maps: replac...
219
  	priv->inode = inode;
29a40ace8   Oleg Nesterov   fs/proc/task_mmu....
220
221
222
223
224
225
226
  	priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
  	if (IS_ERR(priv->mm)) {
  		int err = PTR_ERR(priv->mm);
  
  		seq_release_private(inode, file);
  		return err;
  	}
4db7d0ee1   Oleg Nesterov   fs/proc/task_mmu....
227
228
  	return 0;
  }
29a40ace8   Oleg Nesterov   fs/proc/task_mmu....
229
230
231
232
233
234
235
236
237
238
  static int proc_map_release(struct inode *inode, struct file *file)
  {
  	struct seq_file *seq = file->private_data;
  	struct proc_maps_private *priv = seq->private;
  
  	if (priv->mm)
  		mmdrop(priv->mm);
  
  	return seq_release_private(inode, file);
  }
a6198797c   Matt Mackall   maps4: regroup ta...
239
  static int do_maps_open(struct inode *inode, struct file *file,
03a44825b   Jan Engelhardt   procfs: constify ...
240
  			const struct seq_operations *ops)
a6198797c   Matt Mackall   maps4: regroup ta...
241
  {
4db7d0ee1   Oleg Nesterov   fs/proc/task_mmu....
242
243
  	return proc_maps_open(inode, file, ops,
  				sizeof(struct proc_maps_private));
a6198797c   Matt Mackall   maps4: regroup ta...
244
  }
e070ad49f   Mauricio Lin   [PATCH] add /proc...
245

58cb65487   Oleg Nesterov   proc/maps: make v...
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
  static pid_t pid_of_stack(struct proc_maps_private *priv,
  				struct vm_area_struct *vma, bool is_pid)
  {
  	struct inode *inode = priv->inode;
  	struct task_struct *task;
  	pid_t ret = 0;
  
  	rcu_read_lock();
  	task = pid_task(proc_pid(inode), PIDTYPE_PID);
  	if (task) {
  		task = task_of_stack(task, vma, is_pid);
  		if (task)
  			ret = task_pid_nr_ns(task, inode->i_sb->s_fs_info);
  	}
  	rcu_read_unlock();
  
  	return ret;
  }
b76437579   Siddhesh Poyarekar   procfs: mark thre...
264
265
  static void
  show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
266
  {
e070ad49f   Mauricio Lin   [PATCH] add /proc...
267
268
  	struct mm_struct *mm = vma->vm_mm;
  	struct file *file = vma->vm_file;
b76437579   Siddhesh Poyarekar   procfs: mark thre...
269
  	struct proc_maps_private *priv = m->private;
ca16d140a   KOSAKI Motohiro   mm: don't access ...
270
  	vm_flags_t flags = vma->vm_flags;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
271
  	unsigned long ino = 0;
6260a4b05   KAMEZAWA Hiroyuki   /proc/pid/maps: d...
272
  	unsigned long long pgoff = 0;
a09a79f66   Mikulas Patocka   Don't lock guardp...
273
  	unsigned long start, end;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
274
  	dev_t dev = 0;
b76437579   Siddhesh Poyarekar   procfs: mark thre...
275
  	const char *name = NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
276
277
  
  	if (file) {
496ad9aa8   Al Viro   new helper: file_...
278
  		struct inode *inode = file_inode(vma->vm_file);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
279
280
  		dev = inode->i_sb->s_dev;
  		ino = inode->i_ino;
6260a4b05   KAMEZAWA Hiroyuki   /proc/pid/maps: d...
281
  		pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
282
  	}
d7824370e   Linus Torvalds   mm: fix up some u...
283
284
  	/* We don't show the stack guard page in /proc/maps */
  	start = vma->vm_start;
a09a79f66   Mikulas Patocka   Don't lock guardp...
285
286
287
288
289
  	if (stack_guard_page_start(vma, start))
  		start += PAGE_SIZE;
  	end = vma->vm_end;
  	if (stack_guard_page_end(vma, end))
  		end -= PAGE_SIZE;
d7824370e   Linus Torvalds   mm: fix up some u...
290

652586df9   Tetsuo Handa   seq_file: remove ...
291
292
  	seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
  	seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
d7824370e   Linus Torvalds   mm: fix up some u...
293
  			start,
a09a79f66   Mikulas Patocka   Don't lock guardp...
294
  			end,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
295
296
297
298
  			flags & VM_READ ? 'r' : '-',
  			flags & VM_WRITE ? 'w' : '-',
  			flags & VM_EXEC ? 'x' : '-',
  			flags & VM_MAYSHARE ? 's' : 'p',
6260a4b05   KAMEZAWA Hiroyuki   /proc/pid/maps: d...
299
  			pgoff,
652586df9   Tetsuo Handa   seq_file: remove ...
300
  			MAJOR(dev), MINOR(dev), ino);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
301
302
303
304
305
  
  	/*
  	 * Print the dentry name for named mappings, and a
  	 * special [heap] marker for the heap:
  	 */
e070ad49f   Mauricio Lin   [PATCH] add /proc...
306
  	if (file) {
652586df9   Tetsuo Handa   seq_file: remove ...
307
  		seq_pad(m, ' ');
c32c2f63a   Jan Blunck   d_path: Make seq_...
308
309
  		seq_path(m, &file->f_path, "
  ");
b76437579   Siddhesh Poyarekar   procfs: mark thre...
310
311
  		goto done;
  	}
78d683e83   Andy Lutomirski   mm, fs: Add vm_op...
312
313
314
315
316
  	if (vma->vm_ops && vma->vm_ops->name) {
  		name = vma->vm_ops->name(vma);
  		if (name)
  			goto done;
  	}
b76437579   Siddhesh Poyarekar   procfs: mark thre...
317
318
319
320
321
322
323
324
325
326
327
328
329
330
  	name = arch_vma_name(vma);
  	if (!name) {
  		pid_t tid;
  
  		if (!mm) {
  			name = "[vdso]";
  			goto done;
  		}
  
  		if (vma->vm_start <= mm->brk &&
  		    vma->vm_end >= mm->start_brk) {
  			name = "[heap]";
  			goto done;
  		}
58cb65487   Oleg Nesterov   proc/maps: make v...
331
  		tid = pid_of_stack(priv, vma, is_pid);
b76437579   Siddhesh Poyarekar   procfs: mark thre...
332
333
334
335
336
337
338
339
  		if (tid != 0) {
  			/*
  			 * Thread stack in /proc/PID/task/TID/maps or
  			 * the main process stack.
  			 */
  			if (!is_pid || (vma->vm_start <= mm->start_stack &&
  			    vma->vm_end >= mm->start_stack)) {
  				name = "[stack]";
e6e5494cb   Ingo Molnar   [PATCH] vdso: ran...
340
  			} else {
b76437579   Siddhesh Poyarekar   procfs: mark thre...
341
  				/* Thread stack in /proc/PID/maps */
652586df9   Tetsuo Handa   seq_file: remove ...
342
  				seq_pad(m, ' ');
b76437579   Siddhesh Poyarekar   procfs: mark thre...
343
  				seq_printf(m, "[stack:%d]", tid);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
344
  			}
e6e5494cb   Ingo Molnar   [PATCH] vdso: ran...
345
  		}
b76437579   Siddhesh Poyarekar   procfs: mark thre...
346
347
348
349
  	}
  
  done:
  	if (name) {
652586df9   Tetsuo Handa   seq_file: remove ...
350
  		seq_pad(m, ' ');
b76437579   Siddhesh Poyarekar   procfs: mark thre...
351
  		seq_puts(m, name);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
352
353
354
  	}
  	seq_putc(m, '
  ');
7c88db0cb   Joe Korty   proc: fix vma dis...
355
  }
b76437579   Siddhesh Poyarekar   procfs: mark thre...
356
  static int show_map(struct seq_file *m, void *v, int is_pid)
7c88db0cb   Joe Korty   proc: fix vma dis...
357
  {
ebb6cdde1   Oleg Nesterov   fs/proc/task_mmu....
358
  	show_map_vma(m, v, is_pid);
b8c20a9b8   Oleg Nesterov   fs/proc/task_mmu....
359
  	m_cache_vma(m, v);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
360
361
  	return 0;
  }
b76437579   Siddhesh Poyarekar   procfs: mark thre...
362
363
364
365
366
367
368
369
370
  static int show_pid_map(struct seq_file *m, void *v)
  {
  	return show_map(m, v, 1);
  }
  
  static int show_tid_map(struct seq_file *m, void *v)
  {
  	return show_map(m, v, 0);
  }
03a44825b   Jan Engelhardt   procfs: constify ...
371
  static const struct seq_operations proc_pid_maps_op = {
a6198797c   Matt Mackall   maps4: regroup ta...
372
373
374
  	.start	= m_start,
  	.next	= m_next,
  	.stop	= m_stop,
b76437579   Siddhesh Poyarekar   procfs: mark thre...
375
376
377
378
379
380
381
382
  	.show	= show_pid_map
  };
  
  static const struct seq_operations proc_tid_maps_op = {
  	.start	= m_start,
  	.next	= m_next,
  	.stop	= m_stop,
  	.show	= show_tid_map
a6198797c   Matt Mackall   maps4: regroup ta...
383
  };
b76437579   Siddhesh Poyarekar   procfs: mark thre...
384
  static int pid_maps_open(struct inode *inode, struct file *file)
a6198797c   Matt Mackall   maps4: regroup ta...
385
386
387
  {
  	return do_maps_open(inode, file, &proc_pid_maps_op);
  }
b76437579   Siddhesh Poyarekar   procfs: mark thre...
388
389
390
391
392
393
394
395
396
  static int tid_maps_open(struct inode *inode, struct file *file)
  {
  	return do_maps_open(inode, file, &proc_tid_maps_op);
  }
  
  const struct file_operations proc_pid_maps_operations = {
  	.open		= pid_maps_open,
  	.read		= seq_read,
  	.llseek		= seq_lseek,
29a40ace8   Oleg Nesterov   fs/proc/task_mmu....
397
  	.release	= proc_map_release,
b76437579   Siddhesh Poyarekar   procfs: mark thre...
398
399
400
401
  };
  
  const struct file_operations proc_tid_maps_operations = {
  	.open		= tid_maps_open,
a6198797c   Matt Mackall   maps4: regroup ta...
402
403
  	.read		= seq_read,
  	.llseek		= seq_lseek,
29a40ace8   Oleg Nesterov   fs/proc/task_mmu....
404
  	.release	= proc_map_release,
a6198797c   Matt Mackall   maps4: regroup ta...
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
  };
  
  /*
   * Proportional Set Size(PSS): my share of RSS.
   *
   * PSS of a process is the count of pages it has in memory, where each
   * page is divided by the number of processes sharing it.  So if a
   * process has 1000 pages all to itself, and 1000 shared with one other
   * process, its PSS will be 1500.
   *
   * To keep (accumulated) division errors low, we adopt a 64bit
   * fixed-point pss counter to minimize division errors. So (pss >>
   * PSS_SHIFT) would be the real byte count.
   *
   * A shift of 12 before division means (assuming 4K page size):
   * 	- 1M 3-user-pages add up to 8KB errors;
   * 	- supports mapcount up to 2^24, or 16M;
   * 	- supports PSS up to 2^52 bytes, or 4PB.
   */
  #define PSS_SHIFT 12
1e8832811   Matt Mackall   maps4: make page ...
425
  #ifdef CONFIG_PROC_PAGE_MONITOR
214e471ff   Peter Zijlstra   smaps: account sw...
426
  struct mem_size_stats {
a6198797c   Matt Mackall   maps4: regroup ta...
427
428
429
430
431
432
433
  	struct vm_area_struct *vma;
  	unsigned long resident;
  	unsigned long shared_clean;
  	unsigned long shared_dirty;
  	unsigned long private_clean;
  	unsigned long private_dirty;
  	unsigned long referenced;
b40d4f84b   Nikanth Karthikesan   /proc/pid/smaps: ...
434
  	unsigned long anonymous;
4031a219d   Dave Hansen   smaps: have smaps...
435
  	unsigned long anonymous_thp;
214e471ff   Peter Zijlstra   smaps: account sw...
436
  	unsigned long swap;
bca155437   Konstantin Khlebnikov   proc/smaps: show ...
437
  	unsigned long nonlinear;
a6198797c   Matt Mackall   maps4: regroup ta...
438
439
  	u64 pss;
  };
c164e038e   Kirill A. Shutemov   mm: fix huge zero...
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
  static void smaps_account(struct mem_size_stats *mss, struct page *page,
  		unsigned long size, bool young, bool dirty)
  {
  	int mapcount;
  
  	if (PageAnon(page))
  		mss->anonymous += size;
  
  	mss->resident += size;
  	/* Accumulate the size in pages that have been accessed. */
  	if (young || PageReferenced(page))
  		mss->referenced += size;
  	mapcount = page_mapcount(page);
  	if (mapcount >= 2) {
  		u64 pss_delta;
  
  		if (dirty || PageDirty(page))
  			mss->shared_dirty += size;
  		else
  			mss->shared_clean += size;
  		pss_delta = (u64)size << PSS_SHIFT;
  		do_div(pss_delta, mapcount);
  		mss->pss += pss_delta;
  	} else {
  		if (dirty || PageDirty(page))
  			mss->private_dirty += size;
  		else
  			mss->private_clean += size;
  		mss->pss += (u64)size << PSS_SHIFT;
  	}
  }
ae11c4d9f   Dave Hansen   smaps: break out ...
471

c164e038e   Kirill A. Shutemov   mm: fix huge zero...
472
473
  static void smaps_pte_entry(pte_t *pte, unsigned long addr,
  		struct mm_walk *walk)
ae11c4d9f   Dave Hansen   smaps: break out ...
474
475
476
  {
  	struct mem_size_stats *mss = walk->private;
  	struct vm_area_struct *vma = mss->vma;
bca155437   Konstantin Khlebnikov   proc/smaps: show ...
477
  	pgoff_t pgoff = linear_page_index(vma, addr);
b1d4d9e0c   Konstantin Khlebnikov   proc/smaps: caref...
478
  	struct page *page = NULL;
ae11c4d9f   Dave Hansen   smaps: break out ...
479

c164e038e   Kirill A. Shutemov   mm: fix huge zero...
480
481
482
483
  	if (pte_present(*pte)) {
  		page = vm_normal_page(vma, addr, *pte);
  	} else if (is_swap_pte(*pte)) {
  		swp_entry_t swpent = pte_to_swp_entry(*pte);
ae11c4d9f   Dave Hansen   smaps: break out ...
484

b1d4d9e0c   Konstantin Khlebnikov   proc/smaps: caref...
485
  		if (!non_swap_entry(swpent))
c164e038e   Kirill A. Shutemov   mm: fix huge zero...
486
  			mss->swap += PAGE_SIZE;
b1d4d9e0c   Konstantin Khlebnikov   proc/smaps: caref...
487
488
  		else if (is_migration_entry(swpent))
  			page = migration_entry_to_page(swpent);
c164e038e   Kirill A. Shutemov   mm: fix huge zero...
489
490
491
  	} else if (pte_file(*pte)) {
  		if (pte_to_pgoff(*pte) != pgoff)
  			mss->nonlinear += PAGE_SIZE;
b1d4d9e0c   Konstantin Khlebnikov   proc/smaps: caref...
492
  	}
ae11c4d9f   Dave Hansen   smaps: break out ...
493

ae11c4d9f   Dave Hansen   smaps: break out ...
494
495
  	if (!page)
  		return;
bca155437   Konstantin Khlebnikov   proc/smaps: show ...
496
  	if (page->index != pgoff)
c164e038e   Kirill A. Shutemov   mm: fix huge zero...
497
  		mss->nonlinear += PAGE_SIZE;
bca155437   Konstantin Khlebnikov   proc/smaps: show ...
498

c164e038e   Kirill A. Shutemov   mm: fix huge zero...
499
  	smaps_account(mss, page, PAGE_SIZE, pte_young(*pte), pte_dirty(*pte));
ae11c4d9f   Dave Hansen   smaps: break out ...
500
  }
c164e038e   Kirill A. Shutemov   mm: fix huge zero...
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
  		struct mm_walk *walk)
  {
  	struct mem_size_stats *mss = walk->private;
  	struct vm_area_struct *vma = mss->vma;
  	struct page *page;
  
  	/* FOLL_DUMP will return -EFAULT on huge zero page */
  	page = follow_trans_huge_pmd(vma, addr, pmd, FOLL_DUMP);
  	if (IS_ERR_OR_NULL(page))
  		return;
  	mss->anonymous_thp += HPAGE_PMD_SIZE;
  	smaps_account(mss, page, HPAGE_PMD_SIZE,
  			pmd_young(*pmd), pmd_dirty(*pmd));
  }
  #else
  static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
  		struct mm_walk *walk)
  {
  }
  #endif
b3ae5acbb   Matt Mackall   maps4: use pagewa...
523
  static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
2165009bd   Dave Hansen   pagemap: pass mm ...
524
  			   struct mm_walk *walk)
e070ad49f   Mauricio Lin   [PATCH] add /proc...
525
  {
2165009bd   Dave Hansen   pagemap: pass mm ...
526
  	struct mem_size_stats *mss = walk->private;
b3ae5acbb   Matt Mackall   maps4: use pagewa...
527
  	struct vm_area_struct *vma = mss->vma;
ae11c4d9f   Dave Hansen   smaps: break out ...
528
  	pte_t *pte;
705e87c0c   Hugh Dickins   [PATCH] mm: pte_o...
529
  	spinlock_t *ptl;
e070ad49f   Mauricio Lin   [PATCH] add /proc...
530

bf929152e   Kirill A. Shutemov   mm, thp: change p...
531
  	if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
c164e038e   Kirill A. Shutemov   mm: fix huge zero...
532
  		smaps_pmd_entry(pmd, addr, walk);
bf929152e   Kirill A. Shutemov   mm, thp: change p...
533
  		spin_unlock(ptl);
025c5b245   Naoya Horiguchi   thp: optimize awa...
534
  		return 0;
22e057c59   Dave Hansen   smaps: teach smap...
535
  	}
1a5a9906d   Andrea Arcangeli   mm: thp: fix pmd_...
536
537
538
  
  	if (pmd_trans_unstable(pmd))
  		return 0;
22e057c59   Dave Hansen   smaps: teach smap...
539
540
541
542
543
  	/*
  	 * The mmap_sem held all the way back in m_start() is what
  	 * keeps khugepaged out of here and from collapsing things
  	 * in here.
  	 */
705e87c0c   Hugh Dickins   [PATCH] mm: pte_o...
544
  	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
ae11c4d9f   Dave Hansen   smaps: break out ...
545
  	for (; addr != end; pte++, addr += PAGE_SIZE)
c164e038e   Kirill A. Shutemov   mm: fix huge zero...
546
  		smaps_pte_entry(pte, addr, walk);
705e87c0c   Hugh Dickins   [PATCH] mm: pte_o...
547
548
  	pte_unmap_unlock(pte - 1, ptl);
  	cond_resched();
b3ae5acbb   Matt Mackall   maps4: use pagewa...
549
  	return 0;
e070ad49f   Mauricio Lin   [PATCH] add /proc...
550
  }
834f82e2a   Cyrill Gorcunov   procfs: add VmFla...
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
  static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
  {
  	/*
  	 * Don't forget to update Documentation/ on changes.
  	 */
  	static const char mnemonics[BITS_PER_LONG][2] = {
  		/*
  		 * In case if we meet a flag we don't know about.
  		 */
  		[0 ... (BITS_PER_LONG-1)] = "??",
  
  		[ilog2(VM_READ)]	= "rd",
  		[ilog2(VM_WRITE)]	= "wr",
  		[ilog2(VM_EXEC)]	= "ex",
  		[ilog2(VM_SHARED)]	= "sh",
  		[ilog2(VM_MAYREAD)]	= "mr",
  		[ilog2(VM_MAYWRITE)]	= "mw",
  		[ilog2(VM_MAYEXEC)]	= "me",
  		[ilog2(VM_MAYSHARE)]	= "ms",
  		[ilog2(VM_GROWSDOWN)]	= "gd",
  		[ilog2(VM_PFNMAP)]	= "pf",
  		[ilog2(VM_DENYWRITE)]	= "dw",
4aae7e436   Qiaowei Ren   x86, mpx: Introdu...
573
574
575
  #ifdef CONFIG_X86_INTEL_MPX
  		[ilog2(VM_MPX)]		= "mp",
  #endif
834f82e2a   Cyrill Gorcunov   procfs: add VmFla...
576
577
578
579
580
581
582
583
584
585
586
587
  		[ilog2(VM_LOCKED)]	= "lo",
  		[ilog2(VM_IO)]		= "io",
  		[ilog2(VM_SEQ_READ)]	= "sr",
  		[ilog2(VM_RAND_READ)]	= "rr",
  		[ilog2(VM_DONTCOPY)]	= "dc",
  		[ilog2(VM_DONTEXPAND)]	= "de",
  		[ilog2(VM_ACCOUNT)]	= "ac",
  		[ilog2(VM_NORESERVE)]	= "nr",
  		[ilog2(VM_HUGETLB)]	= "ht",
  		[ilog2(VM_NONLINEAR)]	= "nl",
  		[ilog2(VM_ARCH_1)]	= "ar",
  		[ilog2(VM_DONTDUMP)]	= "dd",
ec8e41aec   Naoya Horiguchi   /proc/pid/smaps: ...
588
589
590
  #ifdef CONFIG_MEM_SOFT_DIRTY
  		[ilog2(VM_SOFTDIRTY)]	= "sd",
  #endif
834f82e2a   Cyrill Gorcunov   procfs: add VmFla...
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
  		[ilog2(VM_MIXEDMAP)]	= "mm",
  		[ilog2(VM_HUGEPAGE)]	= "hg",
  		[ilog2(VM_NOHUGEPAGE)]	= "nh",
  		[ilog2(VM_MERGEABLE)]	= "mg",
  	};
  	size_t i;
  
  	seq_puts(m, "VmFlags: ");
  	for (i = 0; i < BITS_PER_LONG; i++) {
  		if (vma->vm_flags & (1UL << i)) {
  			seq_printf(m, "%c%c ",
  				   mnemonics[i][0], mnemonics[i][1]);
  		}
  	}
  	seq_putc(m, '
  ');
  }
b76437579   Siddhesh Poyarekar   procfs: mark thre...
608
  static int show_smap(struct seq_file *m, void *v, int is_pid)
e070ad49f   Mauricio Lin   [PATCH] add /proc...
609
610
  {
  	struct vm_area_struct *vma = v;
e070ad49f   Mauricio Lin   [PATCH] add /proc...
611
  	struct mem_size_stats mss;
2165009bd   Dave Hansen   pagemap: pass mm ...
612
613
614
615
616
  	struct mm_walk smaps_walk = {
  		.pmd_entry = smaps_pte_range,
  		.mm = vma->vm_mm,
  		.private = &mss,
  	};
e070ad49f   Mauricio Lin   [PATCH] add /proc...
617
618
  
  	memset(&mss, 0, sizeof mss);
b3ae5acbb   Matt Mackall   maps4: use pagewa...
619
  	mss.vma = vma;
d82ef020c   KAMEZAWA Hiroyuki   proc: pagemap: Ho...
620
  	/* mmap_sem is held in m_start */
5ddfae16b   Nick Piggin   [PATCH] smaps: hu...
621
  	if (vma->vm_mm && !is_vm_hugetlb_page(vma))
2165009bd   Dave Hansen   pagemap: pass mm ...
622
  		walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
4752c3697   Matt Mackall   maps4: simplify i...
623

b76437579   Siddhesh Poyarekar   procfs: mark thre...
624
  	show_map_vma(m, vma, is_pid);
4752c3697   Matt Mackall   maps4: simplify i...
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
  
  	seq_printf(m,
  		   "Size:           %8lu kB
  "
  		   "Rss:            %8lu kB
  "
  		   "Pss:            %8lu kB
  "
  		   "Shared_Clean:   %8lu kB
  "
  		   "Shared_Dirty:   %8lu kB
  "
  		   "Private_Clean:  %8lu kB
  "
  		   "Private_Dirty:  %8lu kB
  "
214e471ff   Peter Zijlstra   smaps: account sw...
641
642
  		   "Referenced:     %8lu kB
  "
b40d4f84b   Nikanth Karthikesan   /proc/pid/smaps: ...
643
644
  		   "Anonymous:      %8lu kB
  "
4031a219d   Dave Hansen   smaps: have smaps...
645
646
  		   "AnonHugePages:  %8lu kB
  "
08fba6998   Mel Gorman   mm: report the pa...
647
648
  		   "Swap:           %8lu kB
  "
3340289dd   Mel Gorman   mm: report the MM...
649
650
  		   "KernelPageSize: %8lu kB
  "
2d90508f6   Nikanth Karthikesan   mm: smaps: export...
651
652
653
654
  		   "MMUPageSize:    %8lu kB
  "
  		   "Locked:         %8lu kB
  ",
4752c3697   Matt Mackall   maps4: simplify i...
655
656
657
658
659
660
661
  		   (vma->vm_end - vma->vm_start) >> 10,
  		   mss.resident >> 10,
  		   (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
  		   mss.shared_clean  >> 10,
  		   mss.shared_dirty  >> 10,
  		   mss.private_clean >> 10,
  		   mss.private_dirty >> 10,
214e471ff   Peter Zijlstra   smaps: account sw...
662
  		   mss.referenced >> 10,
b40d4f84b   Nikanth Karthikesan   /proc/pid/smaps: ...
663
  		   mss.anonymous >> 10,
4031a219d   Dave Hansen   smaps: have smaps...
664
  		   mss.anonymous_thp >> 10,
08fba6998   Mel Gorman   mm: report the pa...
665
  		   mss.swap >> 10,
3340289dd   Mel Gorman   mm: report the MM...
666
  		   vma_kernel_pagesize(vma) >> 10,
2d90508f6   Nikanth Karthikesan   mm: smaps: export...
667
668
669
  		   vma_mmu_pagesize(vma) >> 10,
  		   (vma->vm_flags & VM_LOCKED) ?
  			(unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0);
4752c3697   Matt Mackall   maps4: simplify i...
670

bca155437   Konstantin Khlebnikov   proc/smaps: show ...
671
672
673
674
  	if (vma->vm_flags & VM_NONLINEAR)
  		seq_printf(m, "Nonlinear:      %8lu kB
  ",
  				mss.nonlinear >> 10);
834f82e2a   Cyrill Gorcunov   procfs: add VmFla...
675
  	show_smap_vma_flags(m, vma);
b8c20a9b8   Oleg Nesterov   fs/proc/task_mmu....
676
  	m_cache_vma(m, vma);
7c88db0cb   Joe Korty   proc: fix vma dis...
677
  	return 0;
e070ad49f   Mauricio Lin   [PATCH] add /proc...
678
  }
b76437579   Siddhesh Poyarekar   procfs: mark thre...
679
680
681
682
683
684
685
686
687
  static int show_pid_smap(struct seq_file *m, void *v)
  {
  	return show_smap(m, v, 1);
  }
  
  static int show_tid_smap(struct seq_file *m, void *v)
  {
  	return show_smap(m, v, 0);
  }
03a44825b   Jan Engelhardt   procfs: constify ...
688
  static const struct seq_operations proc_pid_smaps_op = {
a6198797c   Matt Mackall   maps4: regroup ta...
689
690
691
  	.start	= m_start,
  	.next	= m_next,
  	.stop	= m_stop,
b76437579   Siddhesh Poyarekar   procfs: mark thre...
692
693
694
695
696
697
698
699
  	.show	= show_pid_smap
  };
  
  static const struct seq_operations proc_tid_smaps_op = {
  	.start	= m_start,
  	.next	= m_next,
  	.stop	= m_stop,
  	.show	= show_tid_smap
a6198797c   Matt Mackall   maps4: regroup ta...
700
  };
b76437579   Siddhesh Poyarekar   procfs: mark thre...
701
  static int pid_smaps_open(struct inode *inode, struct file *file)
a6198797c   Matt Mackall   maps4: regroup ta...
702
703
704
  {
  	return do_maps_open(inode, file, &proc_pid_smaps_op);
  }
b76437579   Siddhesh Poyarekar   procfs: mark thre...
705
706
707
708
709
710
711
712
713
  static int tid_smaps_open(struct inode *inode, struct file *file)
  {
  	return do_maps_open(inode, file, &proc_tid_smaps_op);
  }
  
  const struct file_operations proc_pid_smaps_operations = {
  	.open		= pid_smaps_open,
  	.read		= seq_read,
  	.llseek		= seq_lseek,
29a40ace8   Oleg Nesterov   fs/proc/task_mmu....
714
  	.release	= proc_map_release,
b76437579   Siddhesh Poyarekar   procfs: mark thre...
715
716
717
718
  };
  
  const struct file_operations proc_tid_smaps_operations = {
  	.open		= tid_smaps_open,
a6198797c   Matt Mackall   maps4: regroup ta...
719
720
  	.read		= seq_read,
  	.llseek		= seq_lseek,
29a40ace8   Oleg Nesterov   fs/proc/task_mmu....
721
  	.release	= proc_map_release,
a6198797c   Matt Mackall   maps4: regroup ta...
722
  };
541c237c0   Pavel Emelyanov   pagemap: prepare ...
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
  /*
   * We do not want to have constant page-shift bits sitting in
   * pagemap entries and are about to reuse them some time soon.
   *
   * Here's the "migration strategy":
   * 1. when the system boots these bits remain what they are,
   *    but a warning about future change is printed in log;
   * 2. once anyone clears soft-dirty bits via clear_refs file,
   *    these flag is set to denote, that user is aware of the
   *    new API and those page-shift bits change their meaning.
   *    The respective warning is printed in dmesg;
   * 3. In a couple of releases we will remove all the mentions
   *    of page-shift in pagemap entries.
   */
  
  static bool soft_dirty_cleared __read_mostly;
040fa0207   Pavel Emelyanov   clear_refs: sanit...
739
740
741
742
  enum clear_refs_types {
  	CLEAR_REFS_ALL = 1,
  	CLEAR_REFS_ANON,
  	CLEAR_REFS_MAPPED,
0f8975ec4   Pavel Emelyanov   mm: soft-dirty bi...
743
  	CLEAR_REFS_SOFT_DIRTY,
040fa0207   Pavel Emelyanov   clear_refs: sanit...
744
745
  	CLEAR_REFS_LAST,
  };
af9de7eb1   Pavel Emelyanov   clear_refs: intro...
746
747
  struct clear_refs_private {
  	struct vm_area_struct *vma;
0f8975ec4   Pavel Emelyanov   mm: soft-dirty bi...
748
  	enum clear_refs_types type;
af9de7eb1   Pavel Emelyanov   clear_refs: intro...
749
  };
0f8975ec4   Pavel Emelyanov   mm: soft-dirty bi...
750
751
752
753
754
755
756
757
758
759
760
  static inline void clear_soft_dirty(struct vm_area_struct *vma,
  		unsigned long addr, pte_t *pte)
  {
  #ifdef CONFIG_MEM_SOFT_DIRTY
  	/*
  	 * The soft-dirty tracker uses #PF-s to catch writes
  	 * to pages, so write-protect the pte as well. See the
  	 * Documentation/vm/soft-dirty.txt for full description
  	 * of how soft-dirty works.
  	 */
  	pte_t ptent = *pte;
179ef71cb   Cyrill Gorcunov   mm: save soft-dir...
761
762
763
764
765
766
  
  	if (pte_present(ptent)) {
  		ptent = pte_wrprotect(ptent);
  		ptent = pte_clear_flags(ptent, _PAGE_SOFT_DIRTY);
  	} else if (is_swap_pte(ptent)) {
  		ptent = pte_swp_clear_soft_dirty(ptent);
41bb3476b   Cyrill Gorcunov   mm: save soft-dir...
767
768
  	} else if (pte_file(ptent)) {
  		ptent = pte_file_clear_soft_dirty(ptent);
179ef71cb   Cyrill Gorcunov   mm: save soft-dir...
769
  	}
0f8975ec4   Pavel Emelyanov   mm: soft-dirty bi...
770
771
772
  	set_pte_at(vma->vm_mm, addr, pte, ptent);
  #endif
  }
a6198797c   Matt Mackall   maps4: regroup ta...
773
  static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
2165009bd   Dave Hansen   pagemap: pass mm ...
774
  				unsigned long end, struct mm_walk *walk)
a6198797c   Matt Mackall   maps4: regroup ta...
775
  {
af9de7eb1   Pavel Emelyanov   clear_refs: intro...
776
777
  	struct clear_refs_private *cp = walk->private;
  	struct vm_area_struct *vma = cp->vma;
a6198797c   Matt Mackall   maps4: regroup ta...
778
779
780
  	pte_t *pte, ptent;
  	spinlock_t *ptl;
  	struct page *page;
e180377f1   Kirill A. Shutemov   thp: change split...
781
  	split_huge_page_pmd(vma, addr, pmd);
1a5a9906d   Andrea Arcangeli   mm: thp: fix pmd_...
782
783
  	if (pmd_trans_unstable(pmd))
  		return 0;
033193275   Dave Hansen   pagewalk: only sp...
784

a6198797c   Matt Mackall   maps4: regroup ta...
785
786
787
  	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
  	for (; addr != end; pte++, addr += PAGE_SIZE) {
  		ptent = *pte;
a6198797c   Matt Mackall   maps4: regroup ta...
788

0f8975ec4   Pavel Emelyanov   mm: soft-dirty bi...
789
790
791
792
  		if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
  			clear_soft_dirty(vma, addr, pte);
  			continue;
  		}
179ef71cb   Cyrill Gorcunov   mm: save soft-dir...
793
794
  		if (!pte_present(ptent))
  			continue;
a6198797c   Matt Mackall   maps4: regroup ta...
795
796
797
798
799
800
801
802
803
804
805
806
  		page = vm_normal_page(vma, addr, ptent);
  		if (!page)
  			continue;
  
  		/* Clear accessed and referenced bits. */
  		ptep_test_and_clear_young(vma, addr, pte);
  		ClearPageReferenced(page);
  	}
  	pte_unmap_unlock(pte - 1, ptl);
  	cond_resched();
  	return 0;
  }
f248dcb34   Matt Mackall   maps4: move clear...
807
808
  static ssize_t clear_refs_write(struct file *file, const char __user *buf,
  				size_t count, loff_t *ppos)
b813e931b   David Rientjes   smaps: add clear_...
809
  {
f248dcb34   Matt Mackall   maps4: move clear...
810
  	struct task_struct *task;
fb92a4b06   Vincent Li   fs/proc/task_mmu....
811
  	char buffer[PROC_NUMBUF];
f248dcb34   Matt Mackall   maps4: move clear...
812
  	struct mm_struct *mm;
b813e931b   David Rientjes   smaps: add clear_...
813
  	struct vm_area_struct *vma;
040fa0207   Pavel Emelyanov   clear_refs: sanit...
814
815
  	enum clear_refs_types type;
  	int itype;
0a8cb8e34   Alexey Dobriyan   fs/proc: convert ...
816
  	int rv;
b813e931b   David Rientjes   smaps: add clear_...
817

f248dcb34   Matt Mackall   maps4: move clear...
818
819
820
821
822
  	memset(buffer, 0, sizeof(buffer));
  	if (count > sizeof(buffer) - 1)
  		count = sizeof(buffer) - 1;
  	if (copy_from_user(buffer, buf, count))
  		return -EFAULT;
040fa0207   Pavel Emelyanov   clear_refs: sanit...
823
  	rv = kstrtoint(strstrip(buffer), 10, &itype);
0a8cb8e34   Alexey Dobriyan   fs/proc: convert ...
824
825
  	if (rv < 0)
  		return rv;
040fa0207   Pavel Emelyanov   clear_refs: sanit...
826
827
  	type = (enum clear_refs_types)itype;
  	if (type < CLEAR_REFS_ALL || type >= CLEAR_REFS_LAST)
f248dcb34   Matt Mackall   maps4: move clear...
828
  		return -EINVAL;
541c237c0   Pavel Emelyanov   pagemap: prepare ...
829
830
831
  
  	if (type == CLEAR_REFS_SOFT_DIRTY) {
  		soft_dirty_cleared = true;
c86c97ff4   Cyrill Gorcunov   mm: softdirty: cl...
832
833
834
835
  		pr_warn_once("The pagemap bits 55-60 has changed their meaning!"
  			     " See the linux/Documentation/vm/pagemap.txt for "
  			     "details.
  ");
541c237c0   Pavel Emelyanov   pagemap: prepare ...
836
  	}
496ad9aa8   Al Viro   new helper: file_...
837
  	task = get_proc_task(file_inode(file));
f248dcb34   Matt Mackall   maps4: move clear...
838
839
840
841
  	if (!task)
  		return -ESRCH;
  	mm = get_task_mm(task);
  	if (mm) {
af9de7eb1   Pavel Emelyanov   clear_refs: intro...
842
  		struct clear_refs_private cp = {
0f8975ec4   Pavel Emelyanov   mm: soft-dirty bi...
843
  			.type = type,
af9de7eb1   Pavel Emelyanov   clear_refs: intro...
844
  		};
20cbc9726   Andrew Morton   Fix clear_refs_wr...
845
846
847
  		struct mm_walk clear_refs_walk = {
  			.pmd_entry = clear_refs_pte_range,
  			.mm = mm,
af9de7eb1   Pavel Emelyanov   clear_refs: intro...
848
  			.private = &cp,
20cbc9726   Andrew Morton   Fix clear_refs_wr...
849
  		};
f248dcb34   Matt Mackall   maps4: move clear...
850
  		down_read(&mm->mmap_sem);
64e455079   Peter Feiner   mm: softdirty: en...
851
852
853
854
855
856
857
858
859
860
861
862
863
  		if (type == CLEAR_REFS_SOFT_DIRTY) {
  			for (vma = mm->mmap; vma; vma = vma->vm_next) {
  				if (!(vma->vm_flags & VM_SOFTDIRTY))
  					continue;
  				up_read(&mm->mmap_sem);
  				down_write(&mm->mmap_sem);
  				for (vma = mm->mmap; vma; vma = vma->vm_next) {
  					vma->vm_flags &= ~VM_SOFTDIRTY;
  					vma_set_page_prot(vma);
  				}
  				downgrade_write(&mm->mmap_sem);
  				break;
  			}
0f8975ec4   Pavel Emelyanov   mm: soft-dirty bi...
864
  			mmu_notifier_invalidate_range_start(mm, 0, -1);
64e455079   Peter Feiner   mm: softdirty: en...
865
  		}
2165009bd   Dave Hansen   pagemap: pass mm ...
866
  		for (vma = mm->mmap; vma; vma = vma->vm_next) {
af9de7eb1   Pavel Emelyanov   clear_refs: intro...
867
  			cp.vma = vma;
398499d5f   Moussa A. Ba   pagemap clear_ref...
868
869
870
871
872
873
874
875
876
877
  			if (is_vm_hugetlb_page(vma))
  				continue;
  			/*
  			 * Writing 1 to /proc/pid/clear_refs affects all pages.
  			 *
  			 * Writing 2 to /proc/pid/clear_refs only affects
  			 * Anonymous pages.
  			 *
  			 * Writing 3 to /proc/pid/clear_refs only affects file
  			 * mapped pages.
c86c97ff4   Cyrill Gorcunov   mm: softdirty: cl...
878
879
  			 *
  			 * Writing 4 to /proc/pid/clear_refs affects all pages.
398499d5f   Moussa A. Ba   pagemap clear_ref...
880
881
882
883
884
885
886
  			 */
  			if (type == CLEAR_REFS_ANON && vma->vm_file)
  				continue;
  			if (type == CLEAR_REFS_MAPPED && !vma->vm_file)
  				continue;
  			walk_page_range(vma->vm_start, vma->vm_end,
  					&clear_refs_walk);
2165009bd   Dave Hansen   pagemap: pass mm ...
887
  		}
0f8975ec4   Pavel Emelyanov   mm: soft-dirty bi...
888
889
  		if (type == CLEAR_REFS_SOFT_DIRTY)
  			mmu_notifier_invalidate_range_end(mm, 0, -1);
f248dcb34   Matt Mackall   maps4: move clear...
890
891
892
893
894
  		flush_tlb_mm(mm);
  		up_read(&mm->mmap_sem);
  		mmput(mm);
  	}
  	put_task_struct(task);
fb92a4b06   Vincent Li   fs/proc/task_mmu....
895
896
  
  	return count;
b813e931b   David Rientjes   smaps: add clear_...
897
  }
f248dcb34   Matt Mackall   maps4: move clear...
898
899
  const struct file_operations proc_clear_refs_operations = {
  	.write		= clear_refs_write,
6038f373a   Arnd Bergmann   llseek: automatic...
900
  	.llseek		= noop_llseek,
f248dcb34   Matt Mackall   maps4: move clear...
901
  };
092b50bac   Naoya Horiguchi   pagemap: introduc...
902
903
904
  typedef struct {
  	u64 pme;
  } pagemap_entry_t;
85863e475   Matt Mackall   maps4: add /proc/...
905
  struct pagemapread {
8c8296223   yonghua zheng   fs/proc/task_mmu....
906
  	int pos, len;		/* units: PM_ENTRY_BYTES, not bytes */
092b50bac   Naoya Horiguchi   pagemap: introduc...
907
  	pagemap_entry_t *buffer;
2b0a9f017   Pavel Emelyanov   pagemap: introduc...
908
  	bool v2;
85863e475   Matt Mackall   maps4: add /proc/...
909
  };
5aaabe831   Naoya Horiguchi   pagemap: avoid sp...
910
911
  #define PAGEMAP_WALK_SIZE	(PMD_SIZE)
  #define PAGEMAP_WALK_MASK	(PMD_MASK)
8c8296223   yonghua zheng   fs/proc/task_mmu....
912
  #define PM_ENTRY_BYTES      sizeof(pagemap_entry_t)
f16278c67   Hans Rosenfeld   Change pagemap ou...
913
914
915
916
917
918
919
  #define PM_STATUS_BITS      3
  #define PM_STATUS_OFFSET    (64 - PM_STATUS_BITS)
  #define PM_STATUS_MASK      (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET)
  #define PM_STATUS(nr)       (((nr) << PM_STATUS_OFFSET) & PM_STATUS_MASK)
  #define PM_PSHIFT_BITS      6
  #define PM_PSHIFT_OFFSET    (PM_STATUS_OFFSET - PM_PSHIFT_BITS)
  #define PM_PSHIFT_MASK      (((1LL << PM_PSHIFT_BITS) - 1) << PM_PSHIFT_OFFSET)
2b0a9f017   Pavel Emelyanov   pagemap: introduc...
920
  #define __PM_PSHIFT(x)      (((u64) (x) << PM_PSHIFT_OFFSET) & PM_PSHIFT_MASK)
f16278c67   Hans Rosenfeld   Change pagemap ou...
921
922
  #define PM_PFRAME_MASK      ((1LL << PM_PSHIFT_OFFSET) - 1)
  #define PM_PFRAME(x)        ((x) & PM_PFRAME_MASK)
2b0a9f017   Pavel Emelyanov   pagemap: introduc...
923
924
  /* in "new" pagemap pshift bits are occupied with more status bits */
  #define PM_STATUS2(v2, x)   (__PM_PSHIFT(v2 ? x : PAGE_SHIFT))
f16278c67   Hans Rosenfeld   Change pagemap ou...
925

0f8975ec4   Pavel Emelyanov   mm: soft-dirty bi...
926
  #define __PM_SOFT_DIRTY      (1LL)
f16278c67   Hans Rosenfeld   Change pagemap ou...
927
928
  #define PM_PRESENT          PM_STATUS(4LL)
  #define PM_SWAP             PM_STATUS(2LL)
052fb0d63   Konstantin Khlebnikov   proc: report file...
929
  #define PM_FILE             PM_STATUS(1LL)
2b0a9f017   Pavel Emelyanov   pagemap: introduc...
930
  #define PM_NOT_PRESENT(v2)  PM_STATUS2(v2, 0)
85863e475   Matt Mackall   maps4: add /proc/...
931
  #define PM_END_OF_BUFFER    1
092b50bac   Naoya Horiguchi   pagemap: introduc...
932
933
934
935
936
937
  static inline pagemap_entry_t make_pme(u64 val)
  {
  	return (pagemap_entry_t) { .pme = val };
  }
  
  static int add_to_pagemap(unsigned long addr, pagemap_entry_t *pme,
85863e475   Matt Mackall   maps4: add /proc/...
938
939
  			  struct pagemapread *pm)
  {
092b50bac   Naoya Horiguchi   pagemap: introduc...
940
  	pm->buffer[pm->pos++] = *pme;
d82ef020c   KAMEZAWA Hiroyuki   proc: pagemap: Ho...
941
  	if (pm->pos >= pm->len)
aae8679b0   Thomas Tuttle   pagemap: fix bug ...
942
  		return PM_END_OF_BUFFER;
85863e475   Matt Mackall   maps4: add /proc/...
943
944
945
946
  	return 0;
  }
  
  static int pagemap_pte_hole(unsigned long start, unsigned long end,
2165009bd   Dave Hansen   pagemap: pass mm ...
947
  				struct mm_walk *walk)
85863e475   Matt Mackall   maps4: add /proc/...
948
  {
2165009bd   Dave Hansen   pagemap: pass mm ...
949
  	struct pagemapread *pm = walk->private;
68b5a6524   Peter Feiner   mm: softdirty: re...
950
  	unsigned long addr = start;
85863e475   Matt Mackall   maps4: add /proc/...
951
  	int err = 0;
092b50bac   Naoya Horiguchi   pagemap: introduc...
952

68b5a6524   Peter Feiner   mm: softdirty: re...
953
954
955
  	while (addr < end) {
  		struct vm_area_struct *vma = find_vma(walk->mm, addr);
  		pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2));
87e6d49a0   Peter Feiner   mm: softdirty: ad...
956
957
  		/* End of address space hole, which we mark as non-present. */
  		unsigned long hole_end;
68b5a6524   Peter Feiner   mm: softdirty: re...
958

87e6d49a0   Peter Feiner   mm: softdirty: ad...
959
960
961
962
963
964
965
966
967
  		if (vma)
  			hole_end = min(end, vma->vm_start);
  		else
  			hole_end = end;
  
  		for (; addr < hole_end; addr += PAGE_SIZE) {
  			err = add_to_pagemap(addr, &pme, pm);
  			if (err)
  				goto out;
68b5a6524   Peter Feiner   mm: softdirty: re...
968
  		}
87e6d49a0   Peter Feiner   mm: softdirty: ad...
969
970
971
972
973
974
975
  		if (!vma)
  			break;
  
  		/* Addresses in the VMA. */
  		if (vma->vm_flags & VM_SOFTDIRTY)
  			pme.pme |= PM_STATUS2(pm->v2, __PM_SOFT_DIRTY);
  		for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) {
68b5a6524   Peter Feiner   mm: softdirty: re...
976
977
978
979
  			err = add_to_pagemap(addr, &pme, pm);
  			if (err)
  				goto out;
  		}
85863e475   Matt Mackall   maps4: add /proc/...
980
  	}
68b5a6524   Peter Feiner   mm: softdirty: re...
981
  out:
85863e475   Matt Mackall   maps4: add /proc/...
982
983
  	return err;
  }
2b0a9f017   Pavel Emelyanov   pagemap: introduc...
984
  static void pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
052fb0d63   Konstantin Khlebnikov   proc: report file...
985
  		struct vm_area_struct *vma, unsigned long addr, pte_t pte)
85863e475   Matt Mackall   maps4: add /proc/...
986
  {
052fb0d63   Konstantin Khlebnikov   proc: report file...
987
988
  	u64 frame, flags;
  	struct page *page = NULL;
0f8975ec4   Pavel Emelyanov   mm: soft-dirty bi...
989
  	int flags2 = 0;
85863e475   Matt Mackall   maps4: add /proc/...
990

052fb0d63   Konstantin Khlebnikov   proc: report file...
991
992
993
994
  	if (pte_present(pte)) {
  		frame = pte_pfn(pte);
  		flags = PM_PRESENT;
  		page = vm_normal_page(vma, addr, pte);
e9cdd6e77   Cyrill Gorcunov   mm: /proc/pid/pag...
995
996
  		if (pte_soft_dirty(pte))
  			flags2 |= __PM_SOFT_DIRTY;
052fb0d63   Konstantin Khlebnikov   proc: report file...
997
  	} else if (is_swap_pte(pte)) {
179ef71cb   Cyrill Gorcunov   mm: save soft-dir...
998
999
1000
1001
  		swp_entry_t entry;
  		if (pte_swp_soft_dirty(pte))
  			flags2 |= __PM_SOFT_DIRTY;
  		entry = pte_to_swp_entry(pte);
052fb0d63   Konstantin Khlebnikov   proc: report file...
1002
1003
1004
1005
1006
1007
  		frame = swp_type(entry) |
  			(swp_offset(entry) << MAX_SWAPFILES_SHIFT);
  		flags = PM_SWAP;
  		if (is_migration_entry(entry))
  			page = migration_entry_to_page(entry);
  	} else {
d9104d1ca   Cyrill Gorcunov   mm: track vma cha...
1008
1009
1010
  		if (vma->vm_flags & VM_SOFTDIRTY)
  			flags2 |= __PM_SOFT_DIRTY;
  		*pme = make_pme(PM_NOT_PRESENT(pm->v2) | PM_STATUS2(pm->v2, flags2));
052fb0d63   Konstantin Khlebnikov   proc: report file...
1011
1012
1013
1014
1015
  		return;
  	}
  
  	if (page && !PageAnon(page))
  		flags |= PM_FILE;
e9cdd6e77   Cyrill Gorcunov   mm: /proc/pid/pag...
1016
  	if ((vma->vm_flags & VM_SOFTDIRTY))
0f8975ec4   Pavel Emelyanov   mm: soft-dirty bi...
1017
  		flags2 |= __PM_SOFT_DIRTY;
052fb0d63   Konstantin Khlebnikov   proc: report file...
1018

0f8975ec4   Pavel Emelyanov   mm: soft-dirty bi...
1019
  	*pme = make_pme(PM_PFRAME(frame) | PM_STATUS2(pm->v2, flags2) | flags);
bcf8039ed   Dave Hansen   pagemap: fix larg...
1020
  }
5aaabe831   Naoya Horiguchi   pagemap: avoid sp...
1021
  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2b0a9f017   Pavel Emelyanov   pagemap: introduc...
1022
  static void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
0f8975ec4   Pavel Emelyanov   mm: soft-dirty bi...
1023
  		pmd_t pmd, int offset, int pmd_flags2)
5aaabe831   Naoya Horiguchi   pagemap: avoid sp...
1024
  {
5aaabe831   Naoya Horiguchi   pagemap: avoid sp...
1025
1026
1027
1028
1029
1030
  	/*
  	 * Currently pmd for thp is always present because thp can not be
  	 * swapped-out, migrated, or HWPOISONed (split in such cases instead.)
  	 * This if-check is just to prepare for future implementation.
  	 */
  	if (pmd_present(pmd))
092b50bac   Naoya Horiguchi   pagemap: introduc...
1031
  		*pme = make_pme(PM_PFRAME(pmd_pfn(pmd) + offset)
0f8975ec4   Pavel Emelyanov   mm: soft-dirty bi...
1032
  				| PM_STATUS2(pm->v2, pmd_flags2) | PM_PRESENT);
16fbdce62   Konstantin Khlebnikov   proc/pid/pagemap:...
1033
  	else
d9104d1ca   Cyrill Gorcunov   mm: track vma cha...
1034
  		*pme = make_pme(PM_NOT_PRESENT(pm->v2) | PM_STATUS2(pm->v2, pmd_flags2));
5aaabe831   Naoya Horiguchi   pagemap: avoid sp...
1035
1036
  }
  #else
2b0a9f017   Pavel Emelyanov   pagemap: introduc...
1037
  static inline void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
0f8975ec4   Pavel Emelyanov   mm: soft-dirty bi...
1038
  		pmd_t pmd, int offset, int pmd_flags2)
5aaabe831   Naoya Horiguchi   pagemap: avoid sp...
1039
  {
5aaabe831   Naoya Horiguchi   pagemap: avoid sp...
1040
1041
  }
  #endif
85863e475   Matt Mackall   maps4: add /proc/...
1042
  static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
2165009bd   Dave Hansen   pagemap: pass mm ...
1043
  			     struct mm_walk *walk)
85863e475   Matt Mackall   maps4: add /proc/...
1044
  {
bcf8039ed   Dave Hansen   pagemap: fix larg...
1045
  	struct vm_area_struct *vma;
2165009bd   Dave Hansen   pagemap: pass mm ...
1046
  	struct pagemapread *pm = walk->private;
bf929152e   Kirill A. Shutemov   mm, thp: change p...
1047
  	spinlock_t *ptl;
85863e475   Matt Mackall   maps4: add /proc/...
1048
1049
  	pte_t *pte;
  	int err = 0;
bcf8039ed   Dave Hansen   pagemap: fix larg...
1050
1051
  	/* find the first VMA at or above 'addr' */
  	vma = find_vma(walk->mm, addr);
bf929152e   Kirill A. Shutemov   mm, thp: change p...
1052
  	if (vma && pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
0f8975ec4   Pavel Emelyanov   mm: soft-dirty bi...
1053
  		int pmd_flags2;
d9104d1ca   Cyrill Gorcunov   mm: track vma cha...
1054
1055
1056
1057
  		if ((vma->vm_flags & VM_SOFTDIRTY) || pmd_soft_dirty(*pmd))
  			pmd_flags2 = __PM_SOFT_DIRTY;
  		else
  			pmd_flags2 = 0;
025c5b245   Naoya Horiguchi   thp: optimize awa...
1058
1059
  		for (; addr != end; addr += PAGE_SIZE) {
  			unsigned long offset;
81d0fa623   Peter Feiner   mm: softdirty: un...
1060
  			pagemap_entry_t pme;
025c5b245   Naoya Horiguchi   thp: optimize awa...
1061
1062
1063
  
  			offset = (addr & ~PAGEMAP_WALK_MASK) >>
  					PAGE_SHIFT;
0f8975ec4   Pavel Emelyanov   mm: soft-dirty bi...
1064
  			thp_pmd_to_pagemap_entry(&pme, pm, *pmd, offset, pmd_flags2);
092b50bac   Naoya Horiguchi   pagemap: introduc...
1065
  			err = add_to_pagemap(addr, &pme, pm);
025c5b245   Naoya Horiguchi   thp: optimize awa...
1066
1067
  			if (err)
  				break;
5aaabe831   Naoya Horiguchi   pagemap: avoid sp...
1068
  		}
bf929152e   Kirill A. Shutemov   mm, thp: change p...
1069
  		spin_unlock(ptl);
025c5b245   Naoya Horiguchi   thp: optimize awa...
1070
  		return err;
5aaabe831   Naoya Horiguchi   pagemap: avoid sp...
1071
  	}
45f83cefe   Andrea Arcangeli   mm: thp: fix up p...
1072
1073
  	if (pmd_trans_unstable(pmd))
  		return 0;
81d0fa623   Peter Feiner   mm: softdirty: un...
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
  
  	while (1) {
  		/* End of address space hole, which we mark as non-present. */
  		unsigned long hole_end;
  
  		if (vma)
  			hole_end = min(end, vma->vm_start);
  		else
  			hole_end = end;
  
  		for (; addr < hole_end; addr += PAGE_SIZE) {
  			pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2));
  
  			err = add_to_pagemap(addr, &pme, pm);
  			if (err)
  				return err;
16fbdce62   Konstantin Khlebnikov   proc/pid/pagemap:...
1090
  		}
bcf8039ed   Dave Hansen   pagemap: fix larg...
1091

81d0fa623   Peter Feiner   mm: softdirty: un...
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
  		if (!vma || vma->vm_start >= end)
  			break;
  		/*
  		 * We can't possibly be in a hugetlb VMA. In general,
  		 * for a mm_walk with a pmd_entry and a hugetlb_entry,
  		 * the pmd_entry can only be called on addresses in a
  		 * hugetlb if the walk starts in a non-hugetlb VMA and
  		 * spans a hugepage VMA. Since pagemap_read walks are
  		 * PMD-sized and PMD-aligned, this will never be true.
  		 */
  		BUG_ON(is_vm_hugetlb_page(vma));
  
  		/* Addresses in the VMA. */
  		for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) {
  			pagemap_entry_t pme;
bcf8039ed   Dave Hansen   pagemap: fix larg...
1107
  			pte = pte_offset_map(pmd, addr);
2b0a9f017   Pavel Emelyanov   pagemap: introduc...
1108
  			pte_to_pagemap_entry(&pme, pm, vma, addr, *pte);
bcf8039ed   Dave Hansen   pagemap: fix larg...
1109
  			pte_unmap(pte);
81d0fa623   Peter Feiner   mm: softdirty: un...
1110
1111
1112
  			err = add_to_pagemap(addr, &pme, pm);
  			if (err)
  				return err;
bcf8039ed   Dave Hansen   pagemap: fix larg...
1113
  		}
81d0fa623   Peter Feiner   mm: softdirty: un...
1114
1115
1116
1117
1118
  
  		if (addr == end)
  			break;
  
  		vma = find_vma(walk->mm, addr);
85863e475   Matt Mackall   maps4: add /proc/...
1119
1120
1121
1122
1123
1124
  	}
  
  	cond_resched();
  
  	return err;
  }
1a5cb8146   Naoya Horiguchi   pagemap: add #ifd...
1125
  #ifdef CONFIG_HUGETLB_PAGE
2b0a9f017   Pavel Emelyanov   pagemap: introduc...
1126
  static void huge_pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
d9104d1ca   Cyrill Gorcunov   mm: track vma cha...
1127
  					pte_t pte, int offset, int flags2)
5dc37642c   Naoya Horiguchi   mm hugetlb: add h...
1128
  {
5dc37642c   Naoya Horiguchi   mm hugetlb: add h...
1129
  	if (pte_present(pte))
d9104d1ca   Cyrill Gorcunov   mm: track vma cha...
1130
1131
1132
  		*pme = make_pme(PM_PFRAME(pte_pfn(pte) + offset)	|
  				PM_STATUS2(pm->v2, flags2)		|
  				PM_PRESENT);
16fbdce62   Konstantin Khlebnikov   proc/pid/pagemap:...
1133
  	else
d9104d1ca   Cyrill Gorcunov   mm: track vma cha...
1134
1135
  		*pme = make_pme(PM_NOT_PRESENT(pm->v2)			|
  				PM_STATUS2(pm->v2, flags2));
5dc37642c   Naoya Horiguchi   mm hugetlb: add h...
1136
  }
116354d17   Naoya Horiguchi   pagemap: fix pfn ...
1137
1138
1139
1140
  /* This function walks within one hugetlb entry in the single call */
  static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask,
  				 unsigned long addr, unsigned long end,
  				 struct mm_walk *walk)
5dc37642c   Naoya Horiguchi   mm hugetlb: add h...
1141
  {
5dc37642c   Naoya Horiguchi   mm hugetlb: add h...
1142
  	struct pagemapread *pm = walk->private;
d9104d1ca   Cyrill Gorcunov   mm: track vma cha...
1143
  	struct vm_area_struct *vma;
5dc37642c   Naoya Horiguchi   mm hugetlb: add h...
1144
  	int err = 0;
d9104d1ca   Cyrill Gorcunov   mm: track vma cha...
1145
  	int flags2;
16fbdce62   Konstantin Khlebnikov   proc/pid/pagemap:...
1146
  	pagemap_entry_t pme;
5dc37642c   Naoya Horiguchi   mm hugetlb: add h...
1147

d9104d1ca   Cyrill Gorcunov   mm: track vma cha...
1148
1149
1150
1151
1152
1153
1154
  	vma = find_vma(walk->mm, addr);
  	WARN_ON_ONCE(!vma);
  
  	if (vma && (vma->vm_flags & VM_SOFTDIRTY))
  		flags2 = __PM_SOFT_DIRTY;
  	else
  		flags2 = 0;
5dc37642c   Naoya Horiguchi   mm hugetlb: add h...
1155
  	for (; addr != end; addr += PAGE_SIZE) {
116354d17   Naoya Horiguchi   pagemap: fix pfn ...
1156
  		int offset = (addr & ~hmask) >> PAGE_SHIFT;
d9104d1ca   Cyrill Gorcunov   mm: track vma cha...
1157
  		huge_pte_to_pagemap_entry(&pme, pm, *pte, offset, flags2);
092b50bac   Naoya Horiguchi   pagemap: introduc...
1158
  		err = add_to_pagemap(addr, &pme, pm);
5dc37642c   Naoya Horiguchi   mm hugetlb: add h...
1159
1160
1161
1162
1163
1164
1165
1166
  		if (err)
  			return err;
  	}
  
  	cond_resched();
  
  	return err;
  }
1a5cb8146   Naoya Horiguchi   pagemap: add #ifd...
1167
  #endif /* HUGETLB_PAGE */
5dc37642c   Naoya Horiguchi   mm hugetlb: add h...
1168

85863e475   Matt Mackall   maps4: add /proc/...
1169
1170
1171
  /*
   * /proc/pid/pagemap - an array mapping virtual pages to pfns
   *
f16278c67   Hans Rosenfeld   Change pagemap ou...
1172
1173
1174
   * For each page in the address space, this file contains one 64-bit entry
   * consisting of the following:
   *
052fb0d63   Konstantin Khlebnikov   proc: report file...
1175
   * Bits 0-54  page frame number (PFN) if present
f16278c67   Hans Rosenfeld   Change pagemap ou...
1176
   * Bits 0-4   swap type if swapped
052fb0d63   Konstantin Khlebnikov   proc: report file...
1177
   * Bits 5-54  swap offset if swapped
f16278c67   Hans Rosenfeld   Change pagemap ou...
1178
   * Bits 55-60 page shift (page size = 1<<page shift)
052fb0d63   Konstantin Khlebnikov   proc: report file...
1179
   * Bit  61    page is file-page or shared-anon
f16278c67   Hans Rosenfeld   Change pagemap ou...
1180
1181
1182
1183
1184
1185
   * Bit  62    page swapped
   * Bit  63    page present
   *
   * If the page is not present but in swap, then the PFN contains an
   * encoding of the swap file number and the page's offset into the
   * swap. Unmapped pages return a null PFN. This allows determining
85863e475   Matt Mackall   maps4: add /proc/...
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
   * precisely which pages are mapped (or in swap) and comparing mapped
   * pages between processes.
   *
   * Efficient users of this interface will use /proc/pid/maps to
   * determine which areas of memory are actually mapped and llseek to
   * skip over unmapped regions.
   */
  static ssize_t pagemap_read(struct file *file, char __user *buf,
  			    size_t count, loff_t *ppos)
  {
496ad9aa8   Al Viro   new helper: file_...
1196
  	struct task_struct *task = get_proc_task(file_inode(file));
85863e475   Matt Mackall   maps4: add /proc/...
1197
1198
  	struct mm_struct *mm;
  	struct pagemapread pm;
85863e475   Matt Mackall   maps4: add /proc/...
1199
  	int ret = -ESRCH;
ee1e6ab60   Alexey Dobriyan   proc: fix /proc/*...
1200
  	struct mm_walk pagemap_walk = {};
5d7e0d2bd   Andrew Morton   Fix pagemap_read(...
1201
1202
1203
1204
  	unsigned long src;
  	unsigned long svpfn;
  	unsigned long start_vaddr;
  	unsigned long end_vaddr;
d82ef020c   KAMEZAWA Hiroyuki   proc: pagemap: Ho...
1205
  	int copied = 0;
85863e475   Matt Mackall   maps4: add /proc/...
1206
1207
1208
  
  	if (!task)
  		goto out;
85863e475   Matt Mackall   maps4: add /proc/...
1209
1210
  	ret = -EINVAL;
  	/* file position must be aligned */
aae8679b0   Thomas Tuttle   pagemap: fix bug ...
1211
  	if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
fb39380b8   Marcelo Tosatti   pagemap: proper r...
1212
  		goto out_task;
85863e475   Matt Mackall   maps4: add /proc/...
1213
1214
  
  	ret = 0;
081617863   Vitaly Mayatskikh   pagemap: require ...
1215
1216
  	if (!count)
  		goto out_task;
541c237c0   Pavel Emelyanov   pagemap: prepare ...
1217
  	pm.v2 = soft_dirty_cleared;
8c8296223   yonghua zheng   fs/proc/task_mmu....
1218
1219
  	pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
  	pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_TEMPORARY);
5d7e0d2bd   Andrew Morton   Fix pagemap_read(...
1220
  	ret = -ENOMEM;
d82ef020c   KAMEZAWA Hiroyuki   proc: pagemap: Ho...
1221
  	if (!pm.buffer)
98bc93e50   KOSAKI Motohiro   proc: fix pagemap...
1222
  		goto out_task;
e7dcd9990   Cong Wang   proc: remove mm_f...
1223
  	mm = mm_access(task, PTRACE_MODE_READ);
98bc93e50   KOSAKI Motohiro   proc: fix pagemap...
1224
1225
1226
  	ret = PTR_ERR(mm);
  	if (!mm || IS_ERR(mm))
  		goto out_free;
85863e475   Matt Mackall   maps4: add /proc/...
1227

5d7e0d2bd   Andrew Morton   Fix pagemap_read(...
1228
1229
  	pagemap_walk.pmd_entry = pagemap_pte_range;
  	pagemap_walk.pte_hole = pagemap_pte_hole;
1a5cb8146   Naoya Horiguchi   pagemap: add #ifd...
1230
  #ifdef CONFIG_HUGETLB_PAGE
5dc37642c   Naoya Horiguchi   mm hugetlb: add h...
1231
  	pagemap_walk.hugetlb_entry = pagemap_hugetlb_range;
1a5cb8146   Naoya Horiguchi   pagemap: add #ifd...
1232
  #endif
5d7e0d2bd   Andrew Morton   Fix pagemap_read(...
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
  	pagemap_walk.mm = mm;
  	pagemap_walk.private = &pm;
  
  	src = *ppos;
  	svpfn = src / PM_ENTRY_BYTES;
  	start_vaddr = svpfn << PAGE_SHIFT;
  	end_vaddr = TASK_SIZE_OF(task);
  
  	/* watch out for wraparound */
  	if (svpfn > TASK_SIZE_OF(task) >> PAGE_SHIFT)
  		start_vaddr = end_vaddr;
  
  	/*
  	 * The odds are that this will stop walking way
  	 * before end_vaddr, because the length of the
  	 * user buffer is tracked in "pm", and the walk
  	 * will stop when we hit the end of the buffer.
  	 */
d82ef020c   KAMEZAWA Hiroyuki   proc: pagemap: Ho...
1251
1252
1253
1254
1255
1256
  	ret = 0;
  	while (count && (start_vaddr < end_vaddr)) {
  		int len;
  		unsigned long end;
  
  		pm.pos = 0;
ea251c1d5   Naoya Horiguchi   pagemap: set page...
1257
  		end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK;
d82ef020c   KAMEZAWA Hiroyuki   proc: pagemap: Ho...
1258
1259
1260
1261
1262
1263
1264
1265
1266
  		/* overflow ? */
  		if (end < start_vaddr || end > end_vaddr)
  			end = end_vaddr;
  		down_read(&mm->mmap_sem);
  		ret = walk_page_range(start_vaddr, end, &pagemap_walk);
  		up_read(&mm->mmap_sem);
  		start_vaddr = end;
  
  		len = min(count, PM_ENTRY_BYTES * pm.pos);
309361e09   Dan Carpenter   proc: copy_to_use...
1267
  		if (copy_to_user(buf, pm.buffer, len)) {
d82ef020c   KAMEZAWA Hiroyuki   proc: pagemap: Ho...
1268
  			ret = -EFAULT;
98bc93e50   KOSAKI Motohiro   proc: fix pagemap...
1269
  			goto out_mm;
d82ef020c   KAMEZAWA Hiroyuki   proc: pagemap: Ho...
1270
1271
1272
1273
  		}
  		copied += len;
  		buf += len;
  		count -= len;
85863e475   Matt Mackall   maps4: add /proc/...
1274
  	}
d82ef020c   KAMEZAWA Hiroyuki   proc: pagemap: Ho...
1275
1276
1277
  	*ppos += copied;
  	if (!ret || ret == PM_END_OF_BUFFER)
  		ret = copied;
fb39380b8   Marcelo Tosatti   pagemap: proper r...
1278
1279
  out_mm:
  	mmput(mm);
98bc93e50   KOSAKI Motohiro   proc: fix pagemap...
1280
1281
  out_free:
  	kfree(pm.buffer);
85863e475   Matt Mackall   maps4: add /proc/...
1282
1283
1284
1285
1286
  out_task:
  	put_task_struct(task);
  out:
  	return ret;
  }
541c237c0   Pavel Emelyanov   pagemap: prepare ...
1287
1288
1289
1290
1291
1292
1293
1294
  static int pagemap_open(struct inode *inode, struct file *file)
  {
  	pr_warn_once("Bits 55-60 of /proc/PID/pagemap entries are about "
  			"to stop being page-shift some time soon. See the "
  			"linux/Documentation/vm/pagemap.txt for details.
  ");
  	return 0;
  }
85863e475   Matt Mackall   maps4: add /proc/...
1295
1296
1297
  const struct file_operations proc_pagemap_operations = {
  	.llseek		= mem_lseek, /* borrow this */
  	.read		= pagemap_read,
541c237c0   Pavel Emelyanov   pagemap: prepare ...
1298
  	.open		= pagemap_open,
85863e475   Matt Mackall   maps4: add /proc/...
1299
  };
1e8832811   Matt Mackall   maps4: make page ...
1300
  #endif /* CONFIG_PROC_PAGE_MONITOR */
85863e475   Matt Mackall   maps4: add /proc/...
1301

6e21c8f14   Christoph Lameter   [PATCH] /proc/<pi...
1302
  #ifdef CONFIG_NUMA
6e21c8f14   Christoph Lameter   [PATCH] /proc/<pi...
1303

f69ff943d   Stephen Wilson   mm: proc: move sh...
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
  struct numa_maps {
  	struct vm_area_struct *vma;
  	unsigned long pages;
  	unsigned long anon;
  	unsigned long active;
  	unsigned long writeback;
  	unsigned long mapcount_max;
  	unsigned long dirty;
  	unsigned long swapcache;
  	unsigned long node[MAX_NUMNODES];
  };
5b52fc890   Stephen Wilson   proc: allocate st...
1315
1316
1317
1318
  struct numa_maps_private {
  	struct proc_maps_private proc_maps;
  	struct numa_maps md;
  };
eb4866d00   Dave Hansen   make /proc/$pid/n...
1319
1320
  static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
  			unsigned long nr_pages)
f69ff943d   Stephen Wilson   mm: proc: move sh...
1321
1322
  {
  	int count = page_mapcount(page);
eb4866d00   Dave Hansen   make /proc/$pid/n...
1323
  	md->pages += nr_pages;
f69ff943d   Stephen Wilson   mm: proc: move sh...
1324
  	if (pte_dirty || PageDirty(page))
eb4866d00   Dave Hansen   make /proc/$pid/n...
1325
  		md->dirty += nr_pages;
f69ff943d   Stephen Wilson   mm: proc: move sh...
1326
1327
  
  	if (PageSwapCache(page))
eb4866d00   Dave Hansen   make /proc/$pid/n...
1328
  		md->swapcache += nr_pages;
f69ff943d   Stephen Wilson   mm: proc: move sh...
1329
1330
  
  	if (PageActive(page) || PageUnevictable(page))
eb4866d00   Dave Hansen   make /proc/$pid/n...
1331
  		md->active += nr_pages;
f69ff943d   Stephen Wilson   mm: proc: move sh...
1332
1333
  
  	if (PageWriteback(page))
eb4866d00   Dave Hansen   make /proc/$pid/n...
1334
  		md->writeback += nr_pages;
f69ff943d   Stephen Wilson   mm: proc: move sh...
1335
1336
  
  	if (PageAnon(page))
eb4866d00   Dave Hansen   make /proc/$pid/n...
1337
  		md->anon += nr_pages;
f69ff943d   Stephen Wilson   mm: proc: move sh...
1338
1339
1340
  
  	if (count > md->mapcount_max)
  		md->mapcount_max = count;
eb4866d00   Dave Hansen   make /proc/$pid/n...
1341
  	md->node[page_to_nid(page)] += nr_pages;
f69ff943d   Stephen Wilson   mm: proc: move sh...
1342
  }
3200a8aaa   Dave Hansen   break out numa_ma...
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
  static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
  		unsigned long addr)
  {
  	struct page *page;
  	int nid;
  
  	if (!pte_present(pte))
  		return NULL;
  
  	page = vm_normal_page(vma, addr, pte);
  	if (!page)
  		return NULL;
  
  	if (PageReserved(page))
  		return NULL;
  
  	nid = page_to_nid(page);
4ff1b2c29   Lai Jiangshan   procfs: use N_MEM...
1360
  	if (!node_isset(nid, node_states[N_MEMORY]))
3200a8aaa   Dave Hansen   break out numa_ma...
1361
1362
1363
1364
  		return NULL;
  
  	return page;
  }
f69ff943d   Stephen Wilson   mm: proc: move sh...
1365
1366
1367
1368
1369
1370
1371
1372
1373
  static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
  		unsigned long end, struct mm_walk *walk)
  {
  	struct numa_maps *md;
  	spinlock_t *ptl;
  	pte_t *orig_pte;
  	pte_t *pte;
  
  	md = walk->private;
025c5b245   Naoya Horiguchi   thp: optimize awa...
1374

bf929152e   Kirill A. Shutemov   mm, thp: change p...
1375
  	if (pmd_trans_huge_lock(pmd, md->vma, &ptl) == 1) {
025c5b245   Naoya Horiguchi   thp: optimize awa...
1376
1377
1378
1379
1380
1381
1382
  		pte_t huge_pte = *(pte_t *)pmd;
  		struct page *page;
  
  		page = can_gather_numa_stats(huge_pte, md->vma, addr);
  		if (page)
  			gather_stats(page, md, pte_dirty(huge_pte),
  				     HPAGE_PMD_SIZE/PAGE_SIZE);
bf929152e   Kirill A. Shutemov   mm, thp: change p...
1383
  		spin_unlock(ptl);
025c5b245   Naoya Horiguchi   thp: optimize awa...
1384
  		return 0;
32ef43848   Dave Hansen   teach /proc/$pid/...
1385
  	}
1a5a9906d   Andrea Arcangeli   mm: thp: fix pmd_...
1386
1387
  	if (pmd_trans_unstable(pmd))
  		return 0;
f69ff943d   Stephen Wilson   mm: proc: move sh...
1388
1389
  	orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
  	do {
3200a8aaa   Dave Hansen   break out numa_ma...
1390
  		struct page *page = can_gather_numa_stats(*pte, md->vma, addr);
f69ff943d   Stephen Wilson   mm: proc: move sh...
1391
1392
  		if (!page)
  			continue;
eb4866d00   Dave Hansen   make /proc/$pid/n...
1393
  		gather_stats(page, md, pte_dirty(*pte), 1);
f69ff943d   Stephen Wilson   mm: proc: move sh...
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
  
  	} while (pte++, addr += PAGE_SIZE, addr != end);
  	pte_unmap_unlock(orig_pte, ptl);
  	return 0;
  }
  #ifdef CONFIG_HUGETLB_PAGE
  static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
  		unsigned long addr, unsigned long end, struct mm_walk *walk)
  {
  	struct numa_maps *md;
  	struct page *page;
d4c54919e   Naoya Horiguchi   mm: add !pte_pres...
1405
  	if (!pte_present(*pte))
f69ff943d   Stephen Wilson   mm: proc: move sh...
1406
1407
1408
1409
1410
1411
1412
  		return 0;
  
  	page = pte_page(*pte);
  	if (!page)
  		return 0;
  
  	md = walk->private;
eb4866d00   Dave Hansen   make /proc/$pid/n...
1413
  	gather_stats(page, md, pte_dirty(*pte), 1);
f69ff943d   Stephen Wilson   mm: proc: move sh...
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
  	return 0;
  }
  
  #else
  static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
  		unsigned long addr, unsigned long end, struct mm_walk *walk)
  {
  	return 0;
  }
  #endif
  
  /*
   * Display pages allocated per node and memory policy via /proc.
   */
b76437579   Siddhesh Poyarekar   procfs: mark thre...
1428
  static int show_numa_map(struct seq_file *m, void *v, int is_pid)
f69ff943d   Stephen Wilson   mm: proc: move sh...
1429
  {
5b52fc890   Stephen Wilson   proc: allocate st...
1430
1431
  	struct numa_maps_private *numa_priv = m->private;
  	struct proc_maps_private *proc_priv = &numa_priv->proc_maps;
f69ff943d   Stephen Wilson   mm: proc: move sh...
1432
  	struct vm_area_struct *vma = v;
5b52fc890   Stephen Wilson   proc: allocate st...
1433
  	struct numa_maps *md = &numa_priv->md;
f69ff943d   Stephen Wilson   mm: proc: move sh...
1434
1435
1436
1437
  	struct file *file = vma->vm_file;
  	struct mm_struct *mm = vma->vm_mm;
  	struct mm_walk walk = {};
  	struct mempolicy *pol;
948927ee9   David Rientjes   mm, mempolicy: ma...
1438
1439
  	char buffer[64];
  	int nid;
f69ff943d   Stephen Wilson   mm: proc: move sh...
1440
1441
1442
  
  	if (!mm)
  		return 0;
5b52fc890   Stephen Wilson   proc: allocate st...
1443
1444
  	/* Ensure we start with an empty set of numa_maps statistics. */
  	memset(md, 0, sizeof(*md));
f69ff943d   Stephen Wilson   mm: proc: move sh...
1445
1446
1447
1448
1449
1450
1451
  
  	md->vma = vma;
  
  	walk.hugetlb_entry = gather_hugetbl_stats;
  	walk.pmd_entry = gather_pte_stats;
  	walk.private = md;
  	walk.mm = mm;
498f23717   Oleg Nesterov   mempolicy: fix sh...
1452
1453
1454
1455
1456
1457
1458
  	pol = __get_vma_policy(vma, vma->vm_start);
  	if (pol) {
  		mpol_to_str(buffer, sizeof(buffer), pol);
  		mpol_cond_put(pol);
  	} else {
  		mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy);
  	}
f69ff943d   Stephen Wilson   mm: proc: move sh...
1459
1460
1461
1462
  
  	seq_printf(m, "%08lx %s", vma->vm_start, buffer);
  
  	if (file) {
17c2b4ee4   Fabian Frederick   fs/proc/task_mmu....
1463
  		seq_puts(m, " file=");
f69ff943d   Stephen Wilson   mm: proc: move sh...
1464
1465
1466
  		seq_path(m, &file->f_path, "
  \t= ");
  	} else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
17c2b4ee4   Fabian Frederick   fs/proc/task_mmu....
1467
  		seq_puts(m, " heap");
b76437579   Siddhesh Poyarekar   procfs: mark thre...
1468
  	} else {
58cb65487   Oleg Nesterov   proc/maps: make v...
1469
  		pid_t tid = pid_of_stack(proc_priv, vma, is_pid);
b76437579   Siddhesh Poyarekar   procfs: mark thre...
1470
1471
1472
1473
1474
1475
1476
  		if (tid != 0) {
  			/*
  			 * Thread stack in /proc/PID/task/TID/maps or
  			 * the main process stack.
  			 */
  			if (!is_pid || (vma->vm_start <= mm->start_stack &&
  			    vma->vm_end >= mm->start_stack))
17c2b4ee4   Fabian Frederick   fs/proc/task_mmu....
1477
  				seq_puts(m, " stack");
b76437579   Siddhesh Poyarekar   procfs: mark thre...
1478
1479
1480
  			else
  				seq_printf(m, " stack:%d", tid);
  		}
f69ff943d   Stephen Wilson   mm: proc: move sh...
1481
  	}
fc360bd9c   Andrew Morton   /proc/self/numa_m...
1482
  	if (is_vm_hugetlb_page(vma))
17c2b4ee4   Fabian Frederick   fs/proc/task_mmu....
1483
  		seq_puts(m, " huge");
fc360bd9c   Andrew Morton   /proc/self/numa_m...
1484

f69ff943d   Stephen Wilson   mm: proc: move sh...
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
  	walk_page_range(vma->vm_start, vma->vm_end, &walk);
  
  	if (!md->pages)
  		goto out;
  
  	if (md->anon)
  		seq_printf(m, " anon=%lu", md->anon);
  
  	if (md->dirty)
  		seq_printf(m, " dirty=%lu", md->dirty);
  
  	if (md->pages != md->anon && md->pages != md->dirty)
  		seq_printf(m, " mapped=%lu", md->pages);
  
  	if (md->mapcount_max > 1)
  		seq_printf(m, " mapmax=%lu", md->mapcount_max);
  
  	if (md->swapcache)
  		seq_printf(m, " swapcache=%lu", md->swapcache);
  
  	if (md->active < md->pages && !is_vm_hugetlb_page(vma))
  		seq_printf(m, " active=%lu", md->active);
  
  	if (md->writeback)
  		seq_printf(m, " writeback=%lu", md->writeback);
948927ee9   David Rientjes   mm, mempolicy: ma...
1510
1511
1512
  	for_each_node_state(nid, N_MEMORY)
  		if (md->node[nid])
  			seq_printf(m, " N%d=%lu", nid, md->node[nid]);
f69ff943d   Stephen Wilson   mm: proc: move sh...
1513
1514
1515
  out:
  	seq_putc(m, '
  ');
b8c20a9b8   Oleg Nesterov   fs/proc/task_mmu....
1516
  	m_cache_vma(m, vma);
f69ff943d   Stephen Wilson   mm: proc: move sh...
1517
1518
  	return 0;
  }
5b52fc890   Stephen Wilson   proc: allocate st...
1519

b76437579   Siddhesh Poyarekar   procfs: mark thre...
1520
1521
1522
1523
1524
1525
1526
1527
1528
  static int show_pid_numa_map(struct seq_file *m, void *v)
  {
  	return show_numa_map(m, v, 1);
  }
  
  static int show_tid_numa_map(struct seq_file *m, void *v)
  {
  	return show_numa_map(m, v, 0);
  }
03a44825b   Jan Engelhardt   procfs: constify ...
1529
  static const struct seq_operations proc_pid_numa_maps_op = {
b76437579   Siddhesh Poyarekar   procfs: mark thre...
1530
1531
1532
1533
  	.start  = m_start,
  	.next   = m_next,
  	.stop   = m_stop,
  	.show   = show_pid_numa_map,
6e21c8f14   Christoph Lameter   [PATCH] /proc/<pi...
1534
  };
662795deb   Eric W. Biederman   [PATCH] proc: Mov...
1535

b76437579   Siddhesh Poyarekar   procfs: mark thre...
1536
1537
1538
1539
1540
1541
1542
1543
1544
  static const struct seq_operations proc_tid_numa_maps_op = {
  	.start  = m_start,
  	.next   = m_next,
  	.stop   = m_stop,
  	.show   = show_tid_numa_map,
  };
  
  static int numa_maps_open(struct inode *inode, struct file *file,
  			  const struct seq_operations *ops)
662795deb   Eric W. Biederman   [PATCH] proc: Mov...
1545
  {
4db7d0ee1   Oleg Nesterov   fs/proc/task_mmu....
1546
1547
  	return proc_maps_open(inode, file, ops,
  				sizeof(struct numa_maps_private));
662795deb   Eric W. Biederman   [PATCH] proc: Mov...
1548
  }
b76437579   Siddhesh Poyarekar   procfs: mark thre...
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
  static int pid_numa_maps_open(struct inode *inode, struct file *file)
  {
  	return numa_maps_open(inode, file, &proc_pid_numa_maps_op);
  }
  
  static int tid_numa_maps_open(struct inode *inode, struct file *file)
  {
  	return numa_maps_open(inode, file, &proc_tid_numa_maps_op);
  }
  
  const struct file_operations proc_pid_numa_maps_operations = {
  	.open		= pid_numa_maps_open,
  	.read		= seq_read,
  	.llseek		= seq_lseek,
29a40ace8   Oleg Nesterov   fs/proc/task_mmu....
1563
  	.release	= proc_map_release,
b76437579   Siddhesh Poyarekar   procfs: mark thre...
1564
1565
1566
1567
  };
  
  const struct file_operations proc_tid_numa_maps_operations = {
  	.open		= tid_numa_maps_open,
662795deb   Eric W. Biederman   [PATCH] proc: Mov...
1568
1569
  	.read		= seq_read,
  	.llseek		= seq_lseek,
29a40ace8   Oleg Nesterov   fs/proc/task_mmu....
1570
  	.release	= proc_map_release,
662795deb   Eric W. Biederman   [PATCH] proc: Mov...
1571
  };
f69ff943d   Stephen Wilson   mm: proc: move sh...
1572
  #endif /* CONFIG_NUMA */