Blame view

mm/memory.c 94.6 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
  /*
   *  linux/mm/memory.c
   *
   *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   */
  
  /*
   * demand-loading started 01.12.91 - seems it is high on the list of
   * things wanted, and it should be easy to implement. - Linus
   */
  
  /*
   * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
   * pages started 02.12.91, seems to work. - Linus.
   *
   * Tested sharing by executing about 30 /bin/sh: under the old kernel it
   * would have taken more than the 6M I have free, but it worked well as
   * far as I could see.
   *
   * Also corrected some "invalidate()"s - I wasn't doing enough of them.
   */
  
  /*
   * Real VM (paging to/from disk) started 18.12.91. Much more work and
   * thought has to go into this. Oh, well..
   * 19.12.91  -  works, somewhat. Sometimes I get faults, don't know why.
   *		Found it. Everything seems to work now.
   * 20.12.91  -  Ok, making the swap-device changeable like the root.
   */
  
  /*
   * 05.04.94  -  Multi-page memory management added for v1.1.
   * 		Idea by Alex Bligh (alex@cconcepts.co.uk)
   *
   * 16.07.99  -  Support of BIGMEM added by Gerhard Wichert, Siemens AG
   *		(Gerhard.Wichert@pdb.siemens.de)
   *
   * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
   */
  
  #include <linux/kernel_stat.h>
  #include <linux/mm.h>
  #include <linux/hugetlb.h>
  #include <linux/mman.h>
  #include <linux/swap.h>
  #include <linux/highmem.h>
  #include <linux/pagemap.h>
9a8408951   Hugh Dickins   ksm: identify Pag...
48
  #include <linux/ksm.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
49
50
  #include <linux/rmap.h>
  #include <linux/module.h>
0ff922452   Shailabh Nagar   [PATCH] per-task-...
51
  #include <linux/delayacct.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
52
  #include <linux/init.h>
edc79b2a4   Peter Zijlstra   [PATCH] mm: balan...
53
  #include <linux/writeback.h>
8a9f3ccd2   Balbir Singh   Memory controller...
54
  #include <linux/memcontrol.h>
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
55
  #include <linux/mmu_notifier.h>
3dc147414   Hugh Dickins   badpage: replace ...
56
57
58
  #include <linux/kallsyms.h>
  #include <linux/swapops.h>
  #include <linux/elf.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
59

6952b61de   Alexey Dobriyan   headers: taskstat...
60
  #include <asm/io.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
61
62
63
64
65
  #include <asm/pgalloc.h>
  #include <asm/uaccess.h>
  #include <asm/tlb.h>
  #include <asm/tlbflush.h>
  #include <asm/pgtable.h>
42b777281   Jan Beulich   mm: remove double...
66
  #include "internal.h"
d41dee369   Andy Whitcroft   [PATCH] sparsemem...
67
  #ifndef CONFIG_NEED_MULTIPLE_NODES
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
  /* use the per-pgdat data instead for discontigmem - mbligh */
  unsigned long max_mapnr;
  struct page *mem_map;
  
  EXPORT_SYMBOL(max_mapnr);
  EXPORT_SYMBOL(mem_map);
  #endif
  
  unsigned long num_physpages;
  /*
   * A number of key systems in x86 including ioremap() rely on the assumption
   * that high_memory defines the upper bound on direct map memory, then end
   * of ZONE_NORMAL.  Under CONFIG_DISCONTIG this means that max_low_pfn and
   * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
   * and ZONE_HIGHMEM.
   */
  void * high_memory;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
85
86
87
  
  EXPORT_SYMBOL(num_physpages);
  EXPORT_SYMBOL(high_memory);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
88

32a932332   Ingo Molnar   brk randomization...
89
90
91
92
93
94
95
96
97
98
99
100
  /*
   * Randomize the address space (stacks, mmaps, brk, etc.).
   *
   * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization,
   *   as ancient (libc5 based) binaries can segfault. )
   */
  int randomize_va_space __read_mostly =
  #ifdef CONFIG_COMPAT_BRK
  					1;
  #else
  					2;
  #endif
a62eaf151   Andi Kleen   [PATCH] x86_64: A...
101
102
103
104
  
  static int __init disable_randmaps(char *s)
  {
  	randomize_va_space = 0;
9b41046cd   OGAWA Hirofumi   [PATCH] Don't pas...
105
  	return 1;
a62eaf151   Andi Kleen   [PATCH] x86_64: A...
106
107
  }
  __setup("norandmaps", disable_randmaps);
62eede62d   Hugh Dickins   mm: ZERO_PAGE wit...
108
  unsigned long zero_pfn __read_mostly;
03f6462a3   Hugh Dickins   mm: move highest_...
109
  unsigned long highest_memmap_pfn __read_mostly;
a13ea5b75   Hugh Dickins   mm: reinstate ZER...
110
111
112
113
114
115
116
117
118
119
  
  /*
   * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
   */
  static int __init init_zero_pfn(void)
  {
  	zero_pfn = page_to_pfn(ZERO_PAGE(0));
  	return 0;
  }
  core_initcall(init_zero_pfn);
a62eaf151   Andi Kleen   [PATCH] x86_64: A...
120

d559db086   KAMEZAWA Hiroyuki   mm: clean up mm_c...
121

34e55232e   KAMEZAWA Hiroyuki   mm: avoid false s...
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
  #if defined(SPLIT_RSS_COUNTING)
  
  void __sync_task_rss_stat(struct task_struct *task, struct mm_struct *mm)
  {
  	int i;
  
  	for (i = 0; i < NR_MM_COUNTERS; i++) {
  		if (task->rss_stat.count[i]) {
  			add_mm_counter(mm, i, task->rss_stat.count[i]);
  			task->rss_stat.count[i] = 0;
  		}
  	}
  	task->rss_stat.events = 0;
  }
  
  static void add_mm_counter_fast(struct mm_struct *mm, int member, int val)
  {
  	struct task_struct *task = current;
  
  	if (likely(task->mm == mm))
  		task->rss_stat.count[member] += val;
  	else
  		add_mm_counter(mm, member, val);
  }
  #define inc_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, 1)
  #define dec_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, -1)
  
  /* sync counter once per 64 page faults */
  #define TASK_RSS_EVENTS_THRESH	(64)
  static void check_sync_rss_stat(struct task_struct *task)
  {
  	if (unlikely(task != current))
  		return;
  	if (unlikely(task->rss_stat.events++ > TASK_RSS_EVENTS_THRESH))
  		__sync_task_rss_stat(task, task->mm);
  }
  
  unsigned long get_mm_counter(struct mm_struct *mm, int member)
  {
  	long val = 0;
  
  	/*
  	 * Don't use task->mm here...for avoiding to use task_get_mm()..
  	 * The caller must guarantee task->mm is not invalid.
  	 */
  	val = atomic_long_read(&mm->rss_stat.count[member]);
  	/*
  	 * counter is updated in asynchronous manner and may go to minus.
  	 * But it's never be expected number for users.
  	 */
  	if (val < 0)
  		return 0;
  	return (unsigned long)val;
  }
  
  void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
  {
  	__sync_task_rss_stat(task, mm);
  }
  #else
  
  #define inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member)
  #define dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member)
  
  static void check_sync_rss_stat(struct task_struct *task)
  {
  }
  
  void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
  {
  }
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
  /*
   * If a p?d_bad entry is found while walking page tables, report
   * the error, before resetting entry to p?d_none.  Usually (but
   * very seldom) called out from the p?d_none_or_clear_bad macros.
   */
  
  void pgd_clear_bad(pgd_t *pgd)
  {
  	pgd_ERROR(*pgd);
  	pgd_clear(pgd);
  }
  
  void pud_clear_bad(pud_t *pud)
  {
  	pud_ERROR(*pud);
  	pud_clear(pud);
  }
  
  void pmd_clear_bad(pmd_t *pmd)
  {
  	pmd_ERROR(*pmd);
  	pmd_clear(pmd);
  }
  
  /*
   * Note: this doesn't free the actual pages themselves. That
   * has been handled earlier when unmapping all the memory regions.
   */
9e1b32caa   Benjamin Herrenschmidt   mm: Pass virtual ...
222
223
  static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
  			   unsigned long addr)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
224
  {
2f569afd9   Martin Schwidefsky   CONFIG_HIGHPTE vs...
225
  	pgtable_t token = pmd_pgtable(*pmd);
e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
226
  	pmd_clear(pmd);
9e1b32caa   Benjamin Herrenschmidt   mm: Pass virtual ...
227
  	pte_free_tlb(tlb, token, addr);
e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
228
  	tlb->mm->nr_ptes--;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
229
  }
e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
230
231
232
  static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
  				unsigned long addr, unsigned long end,
  				unsigned long floor, unsigned long ceiling)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
233
234
235
  {
  	pmd_t *pmd;
  	unsigned long next;
e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
236
  	unsigned long start;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
237

e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
238
  	start = addr;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
239
  	pmd = pmd_offset(pud, addr);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
240
241
242
243
  	do {
  		next = pmd_addr_end(addr, end);
  		if (pmd_none_or_clear_bad(pmd))
  			continue;
9e1b32caa   Benjamin Herrenschmidt   mm: Pass virtual ...
244
  		free_pte_range(tlb, pmd, addr);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
245
  	} while (pmd++, addr = next, addr != end);
e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
246
247
248
249
250
251
252
  	start &= PUD_MASK;
  	if (start < floor)
  		return;
  	if (ceiling) {
  		ceiling &= PUD_MASK;
  		if (!ceiling)
  			return;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
253
  	}
e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
254
255
256
257
258
  	if (end - 1 > ceiling - 1)
  		return;
  
  	pmd = pmd_offset(pud, start);
  	pud_clear(pud);
9e1b32caa   Benjamin Herrenschmidt   mm: Pass virtual ...
259
  	pmd_free_tlb(tlb, pmd, start);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
260
  }
e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
261
262
263
  static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
  				unsigned long addr, unsigned long end,
  				unsigned long floor, unsigned long ceiling)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
264
265
266
  {
  	pud_t *pud;
  	unsigned long next;
e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
267
  	unsigned long start;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
268

e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
269
  	start = addr;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
270
  	pud = pud_offset(pgd, addr);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
271
272
273
274
  	do {
  		next = pud_addr_end(addr, end);
  		if (pud_none_or_clear_bad(pud))
  			continue;
e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
275
  		free_pmd_range(tlb, pud, addr, next, floor, ceiling);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
276
  	} while (pud++, addr = next, addr != end);
e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
277
278
279
280
281
282
283
  	start &= PGDIR_MASK;
  	if (start < floor)
  		return;
  	if (ceiling) {
  		ceiling &= PGDIR_MASK;
  		if (!ceiling)
  			return;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
284
  	}
e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
285
286
287
288
289
  	if (end - 1 > ceiling - 1)
  		return;
  
  	pud = pud_offset(pgd, start);
  	pgd_clear(pgd);
9e1b32caa   Benjamin Herrenschmidt   mm: Pass virtual ...
290
  	pud_free_tlb(tlb, pud, start);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
291
292
293
  }
  
  /*
e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
294
295
   * This function frees user-level page tables of a process.
   *
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
296
297
   * Must be called with pagetable lock held.
   */
42b777281   Jan Beulich   mm: remove double...
298
  void free_pgd_range(struct mmu_gather *tlb,
e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
299
300
  			unsigned long addr, unsigned long end,
  			unsigned long floor, unsigned long ceiling)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
301
302
303
  {
  	pgd_t *pgd;
  	unsigned long next;
e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
  	unsigned long start;
  
  	/*
  	 * The next few lines have given us lots of grief...
  	 *
  	 * Why are we testing PMD* at this top level?  Because often
  	 * there will be no work to do at all, and we'd prefer not to
  	 * go all the way down to the bottom just to discover that.
  	 *
  	 * Why all these "- 1"s?  Because 0 represents both the bottom
  	 * of the address space and the top of it (using -1 for the
  	 * top wouldn't help much: the masks would do the wrong thing).
  	 * The rule is that addr 0 and floor 0 refer to the bottom of
  	 * the address space, but end 0 and ceiling 0 refer to the top
  	 * Comparisons need to use "end - 1" and "ceiling - 1" (though
  	 * that end 0 case should be mythical).
  	 *
  	 * Wherever addr is brought up or ceiling brought down, we must
  	 * be careful to reject "the opposite 0" before it confuses the
  	 * subsequent tests.  But what about where end is brought down
  	 * by PMD_SIZE below? no, end can't go down to 0 there.
  	 *
  	 * Whereas we round start (addr) and ceiling down, by different
  	 * masks at different levels, in order to test whether a table
  	 * now has no other vmas using it, so can be freed, we don't
  	 * bother to round floor or end up - the tests don't need that.
  	 */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
331

e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
  	addr &= PMD_MASK;
  	if (addr < floor) {
  		addr += PMD_SIZE;
  		if (!addr)
  			return;
  	}
  	if (ceiling) {
  		ceiling &= PMD_MASK;
  		if (!ceiling)
  			return;
  	}
  	if (end - 1 > ceiling - 1)
  		end -= PMD_SIZE;
  	if (addr > end - 1)
  		return;
  
  	start = addr;
42b777281   Jan Beulich   mm: remove double...
349
  	pgd = pgd_offset(tlb->mm, addr);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
350
351
352
353
  	do {
  		next = pgd_addr_end(addr, end);
  		if (pgd_none_or_clear_bad(pgd))
  			continue;
42b777281   Jan Beulich   mm: remove double...
354
  		free_pud_range(tlb, pgd, addr, next, floor, ceiling);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
355
  	} while (pgd++, addr = next, addr != end);
e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
356
  }
42b777281   Jan Beulich   mm: remove double...
357
  void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
3bf5ee956   Hugh Dickins   [PATCH] freepgt: ...
358
  		unsigned long floor, unsigned long ceiling)
e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
359
360
361
362
  {
  	while (vma) {
  		struct vm_area_struct *next = vma->vm_next;
  		unsigned long addr = vma->vm_start;
8f4f8c164   Hugh Dickins   [PATCH] mm: unlin...
363
  		/*
25d9e2d15   npiggin@suse.de   truncate: new hel...
364
365
  		 * Hide vma from rmap and truncate_pagecache before freeing
  		 * pgtables
8f4f8c164   Hugh Dickins   [PATCH] mm: unlin...
366
  		 */
5beb49305   Rik van Riel   mm: change anon_v...
367
  		unlink_anon_vmas(vma);
8f4f8c164   Hugh Dickins   [PATCH] mm: unlin...
368
  		unlink_file_vma(vma);
9da61aef0   David Gibson   [PATCH] hugepage:...
369
  		if (is_vm_hugetlb_page(vma)) {
3bf5ee956   Hugh Dickins   [PATCH] freepgt: ...
370
  			hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
371
  				floor, next? next->vm_start: ceiling);
3bf5ee956   Hugh Dickins   [PATCH] freepgt: ...
372
373
374
375
376
  		} else {
  			/*
  			 * Optimization: gather nearby vmas into one call down
  			 */
  			while (next && next->vm_start <= vma->vm_end + PMD_SIZE
4866920b9   David Gibson   [PATCH] hugepage:...
377
  			       && !is_vm_hugetlb_page(next)) {
3bf5ee956   Hugh Dickins   [PATCH] freepgt: ...
378
379
  				vma = next;
  				next = vma->vm_next;
5beb49305   Rik van Riel   mm: change anon_v...
380
  				unlink_anon_vmas(vma);
8f4f8c164   Hugh Dickins   [PATCH] mm: unlin...
381
  				unlink_file_vma(vma);
3bf5ee956   Hugh Dickins   [PATCH] freepgt: ...
382
383
384
385
  			}
  			free_pgd_range(tlb, addr, vma->vm_end,
  				floor, next? next->vm_start: ceiling);
  		}
e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
386
387
  		vma = next;
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
388
  }
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
389
  int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
390
  {
2f569afd9   Martin Schwidefsky   CONFIG_HIGHPTE vs...
391
  	pgtable_t new = pte_alloc_one(mm, address);
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
392
393
  	if (!new)
  		return -ENOMEM;
362a61ad6   Nick Piggin   fix SMP data race...
394
395
396
397
398
399
400
401
402
403
404
405
406
407
  	/*
  	 * Ensure all pte setup (eg. pte page lock and page clearing) are
  	 * visible before the pte is made visible to other CPUs by being
  	 * put into page tables.
  	 *
  	 * The other side of the story is the pointer chasing in the page
  	 * table walking code (when walking the page table without locking;
  	 * ie. most of the time). Fortunately, these data accesses consist
  	 * of a chain of data-dependent loads, meaning most CPUs (alpha
  	 * being the notable exception) will already guarantee loads are
  	 * seen in-order. See the alpha page table accessors for the
  	 * smp_read_barrier_depends() barriers in page table walking code.
  	 */
  	smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
c74df32c7   Hugh Dickins   [PATCH] mm: ptd_a...
408
  	spin_lock(&mm->page_table_lock);
2f569afd9   Martin Schwidefsky   CONFIG_HIGHPTE vs...
409
  	if (!pmd_present(*pmd)) {	/* Has another populated it ? */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
410
  		mm->nr_ptes++;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
411
  		pmd_populate(mm, pmd, new);
2f569afd9   Martin Schwidefsky   CONFIG_HIGHPTE vs...
412
  		new = NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
413
  	}
c74df32c7   Hugh Dickins   [PATCH] mm: ptd_a...
414
  	spin_unlock(&mm->page_table_lock);
2f569afd9   Martin Schwidefsky   CONFIG_HIGHPTE vs...
415
416
  	if (new)
  		pte_free(mm, new);
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
417
  	return 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
418
  }
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
419
  int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
420
  {
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
421
422
423
  	pte_t *new = pte_alloc_one_kernel(&init_mm, address);
  	if (!new)
  		return -ENOMEM;
362a61ad6   Nick Piggin   fix SMP data race...
424
  	smp_wmb(); /* See comment in __pte_alloc */
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
425
  	spin_lock(&init_mm.page_table_lock);
2f569afd9   Martin Schwidefsky   CONFIG_HIGHPTE vs...
426
  	if (!pmd_present(*pmd)) {	/* Has another populated it ? */
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
427
  		pmd_populate_kernel(&init_mm, pmd, new);
2f569afd9   Martin Schwidefsky   CONFIG_HIGHPTE vs...
428
429
  		new = NULL;
  	}
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
430
  	spin_unlock(&init_mm.page_table_lock);
2f569afd9   Martin Schwidefsky   CONFIG_HIGHPTE vs...
431
432
  	if (new)
  		pte_free_kernel(&init_mm, new);
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
433
  	return 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
434
  }
d559db086   KAMEZAWA Hiroyuki   mm: clean up mm_c...
435
436
437
438
439
440
  static inline void init_rss_vec(int *rss)
  {
  	memset(rss, 0, sizeof(int) * NR_MM_COUNTERS);
  }
  
  static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss)
ae8597623   Hugh Dickins   [PATCH] mm: batch...
441
  {
d559db086   KAMEZAWA Hiroyuki   mm: clean up mm_c...
442
  	int i;
34e55232e   KAMEZAWA Hiroyuki   mm: avoid false s...
443
444
  	if (current->mm == mm)
  		sync_mm_rss(current, mm);
d559db086   KAMEZAWA Hiroyuki   mm: clean up mm_c...
445
446
447
  	for (i = 0; i < NR_MM_COUNTERS; i++)
  		if (rss[i])
  			add_mm_counter(mm, i, rss[i]);
ae8597623   Hugh Dickins   [PATCH] mm: batch...
448
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
449
  /*
6aab341e0   Linus Torvalds   mm: re-architect ...
450
451
452
   * This function is called to print an error when a bad pte
   * is found. For example, we might have a PFN-mapped pte in
   * a region that doesn't allow it.
b5810039a   Nick Piggin   [PATCH] core remo...
453
454
455
   *
   * The calling function must still handle the error.
   */
3dc147414   Hugh Dickins   badpage: replace ...
456
457
  static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
  			  pte_t pte, struct page *page)
b5810039a   Nick Piggin   [PATCH] core remo...
458
  {
3dc147414   Hugh Dickins   badpage: replace ...
459
460
461
462
463
  	pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
  	pud_t *pud = pud_offset(pgd, addr);
  	pmd_t *pmd = pmd_offset(pud, addr);
  	struct address_space *mapping;
  	pgoff_t index;
d936cf9b3   Hugh Dickins   badpage: ratelimi...
464
465
466
467
468
469
470
471
472
473
474
475
476
477
  	static unsigned long resume;
  	static unsigned long nr_shown;
  	static unsigned long nr_unshown;
  
  	/*
  	 * Allow a burst of 60 reports, then keep quiet for that minute;
  	 * or allow a steady drip of one report per second.
  	 */
  	if (nr_shown == 60) {
  		if (time_before(jiffies, resume)) {
  			nr_unshown++;
  			return;
  		}
  		if (nr_unshown) {
1e9e63650   Hugh Dickins   badpage: KERN_ALE...
478
479
480
  			printk(KERN_ALERT
  				"BUG: Bad page map: %lu messages suppressed
  ",
d936cf9b3   Hugh Dickins   badpage: ratelimi...
481
482
483
484
485
486
487
  				nr_unshown);
  			nr_unshown = 0;
  		}
  		nr_shown = 0;
  	}
  	if (nr_shown++ == 0)
  		resume = jiffies + 60 * HZ;
3dc147414   Hugh Dickins   badpage: replace ...
488
489
490
  
  	mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL;
  	index = linear_page_index(vma, addr);
1e9e63650   Hugh Dickins   badpage: KERN_ALE...
491
492
493
  	printk(KERN_ALERT
  		"BUG: Bad page map in process %s  pte:%08llx pmd:%08llx
  ",
3dc147414   Hugh Dickins   badpage: replace ...
494
495
496
  		current->comm,
  		(long long)pte_val(pte), (long long)pmd_val(*pmd));
  	if (page) {
1e9e63650   Hugh Dickins   badpage: KERN_ALE...
497
  		printk(KERN_ALERT
3dc147414   Hugh Dickins   badpage: replace ...
498
499
500
501
502
  		"page:%p flags:%p count:%d mapcount:%d mapping:%p index:%lx
  ",
  		page, (void *)page->flags, page_count(page),
  		page_mapcount(page), page->mapping, page->index);
  	}
1e9e63650   Hugh Dickins   badpage: KERN_ALE...
503
  	printk(KERN_ALERT
3dc147414   Hugh Dickins   badpage: replace ...
504
505
506
507
508
509
510
  		"addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx
  ",
  		(void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
  	/*
  	 * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y
  	 */
  	if (vma->vm_ops)
1e9e63650   Hugh Dickins   badpage: KERN_ALE...
511
512
  		print_symbol(KERN_ALERT "vma->vm_ops->fault: %s
  ",
3dc147414   Hugh Dickins   badpage: replace ...
513
514
  				(unsigned long)vma->vm_ops->fault);
  	if (vma->vm_file && vma->vm_file->f_op)
1e9e63650   Hugh Dickins   badpage: KERN_ALE...
515
516
  		print_symbol(KERN_ALERT "vma->vm_file->f_op->mmap: %s
  ",
3dc147414   Hugh Dickins   badpage: replace ...
517
  				(unsigned long)vma->vm_file->f_op->mmap);
b5810039a   Nick Piggin   [PATCH] core remo...
518
  	dump_stack();
3dc147414   Hugh Dickins   badpage: replace ...
519
  	add_taint(TAINT_BAD_PAGE);
b5810039a   Nick Piggin   [PATCH] core remo...
520
  }
67121172f   Linus Torvalds   Allow arbitrary r...
521
522
523
524
  static inline int is_cow_mapping(unsigned int flags)
  {
  	return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
  }
62eede62d   Hugh Dickins   mm: ZERO_PAGE wit...
525
526
527
528
529
530
531
532
533
534
535
536
537
  #ifndef is_zero_pfn
  static inline int is_zero_pfn(unsigned long pfn)
  {
  	return pfn == zero_pfn;
  }
  #endif
  
  #ifndef my_zero_pfn
  static inline unsigned long my_zero_pfn(unsigned long addr)
  {
  	return zero_pfn;
  }
  #endif
b5810039a   Nick Piggin   [PATCH] core remo...
538
  /*
7e675137a   Nick Piggin   mm: introduce pte...
539
   * vm_normal_page -- This function gets the "struct page" associated with a pte.
6aab341e0   Linus Torvalds   mm: re-architect ...
540
   *
7e675137a   Nick Piggin   mm: introduce pte...
541
542
543
   * "Special" mappings do not wish to be associated with a "struct page" (either
   * it doesn't exist, or it exists but they don't want to touch it). In this
   * case, NULL is returned here. "Normal" mappings do have a struct page.
b379d7901   Jared Hulbert   mm: introduce VM_...
544
   *
7e675137a   Nick Piggin   mm: introduce pte...
545
546
547
548
549
550
551
552
   * There are 2 broad cases. Firstly, an architecture may define a pte_special()
   * pte bit, in which case this function is trivial. Secondly, an architecture
   * may not have a spare pte bit, which requires a more complicated scheme,
   * described below.
   *
   * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a
   * special mapping (even if there are underlying and valid "struct pages").
   * COWed pages of a VM_PFNMAP are always normal.
6aab341e0   Linus Torvalds   mm: re-architect ...
553
   *
b379d7901   Jared Hulbert   mm: introduce VM_...
554
555
   * The way we recognize COWed pages within VM_PFNMAP mappings is through the
   * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit
7e675137a   Nick Piggin   mm: introduce pte...
556
557
   * set, and the vm_pgoff will point to the first PFN mapped: thus every special
   * mapping will always honor the rule
6aab341e0   Linus Torvalds   mm: re-architect ...
558
559
560
   *
   *	pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
   *
7e675137a   Nick Piggin   mm: introduce pte...
561
562
563
564
565
566
   * And for normal mappings this is false.
   *
   * This restricts such mappings to be a linear translation from virtual address
   * to pfn. To get around this restriction, we allow arbitrary mappings so long
   * as the vma is not a COW mapping; in that case, we know that all ptes are
   * special (because none can have been COWed).
b379d7901   Jared Hulbert   mm: introduce VM_...
567
   *
b379d7901   Jared Hulbert   mm: introduce VM_...
568
   *
7e675137a   Nick Piggin   mm: introduce pte...
569
   * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP.
b379d7901   Jared Hulbert   mm: introduce VM_...
570
571
572
573
574
575
576
577
578
   *
   * VM_MIXEDMAP mappings can likewise contain memory with or without "struct
   * page" backing, however the difference is that _all_ pages with a struct
   * page (that is, those where pfn_valid is true) are refcounted and considered
   * normal pages by the VM. The disadvantage is that pages are refcounted
   * (which can be slower and simply not an option for some PFNMAP users). The
   * advantage is that we don't have to follow the strict linearity rule of
   * PFNMAP mappings in order to support COWable mappings.
   *
ee498ed73   Hugh Dickins   [PATCH] unpaged: ...
579
   */
7e675137a   Nick Piggin   mm: introduce pte...
580
581
582
583
584
585
586
  #ifdef __HAVE_ARCH_PTE_SPECIAL
  # define HAVE_PTE_SPECIAL 1
  #else
  # define HAVE_PTE_SPECIAL 0
  #endif
  struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
  				pte_t pte)
ee498ed73   Hugh Dickins   [PATCH] unpaged: ...
587
  {
22b31eec6   Hugh Dickins   badpage: vm_norma...
588
  	unsigned long pfn = pte_pfn(pte);
7e675137a   Nick Piggin   mm: introduce pte...
589
590
  
  	if (HAVE_PTE_SPECIAL) {
22b31eec6   Hugh Dickins   badpage: vm_norma...
591
592
  		if (likely(!pte_special(pte)))
  			goto check_pfn;
a13ea5b75   Hugh Dickins   mm: reinstate ZER...
593
594
  		if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
  			return NULL;
62eede62d   Hugh Dickins   mm: ZERO_PAGE wit...
595
  		if (!is_zero_pfn(pfn))
22b31eec6   Hugh Dickins   badpage: vm_norma...
596
  			print_bad_pte(vma, addr, pte, NULL);
7e675137a   Nick Piggin   mm: introduce pte...
597
598
599
600
  		return NULL;
  	}
  
  	/* !HAVE_PTE_SPECIAL case follows: */
b379d7901   Jared Hulbert   mm: introduce VM_...
601
602
603
604
605
606
  	if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
  		if (vma->vm_flags & VM_MIXEDMAP) {
  			if (!pfn_valid(pfn))
  				return NULL;
  			goto out;
  		} else {
7e675137a   Nick Piggin   mm: introduce pte...
607
608
  			unsigned long off;
  			off = (addr - vma->vm_start) >> PAGE_SHIFT;
b379d7901   Jared Hulbert   mm: introduce VM_...
609
610
611
612
613
  			if (pfn == vma->vm_pgoff + off)
  				return NULL;
  			if (!is_cow_mapping(vma->vm_flags))
  				return NULL;
  		}
6aab341e0   Linus Torvalds   mm: re-architect ...
614
  	}
62eede62d   Hugh Dickins   mm: ZERO_PAGE wit...
615
616
  	if (is_zero_pfn(pfn))
  		return NULL;
22b31eec6   Hugh Dickins   badpage: vm_norma...
617
618
619
620
621
  check_pfn:
  	if (unlikely(pfn > highest_memmap_pfn)) {
  		print_bad_pte(vma, addr, pte, NULL);
  		return NULL;
  	}
6aab341e0   Linus Torvalds   mm: re-architect ...
622
623
  
  	/*
7e675137a   Nick Piggin   mm: introduce pte...
624
  	 * NOTE! We still have PageReserved() pages in the page tables.
7e675137a   Nick Piggin   mm: introduce pte...
625
  	 * eg. VDSO mappings can cause them to exist.
6aab341e0   Linus Torvalds   mm: re-architect ...
626
  	 */
b379d7901   Jared Hulbert   mm: introduce VM_...
627
  out:
6aab341e0   Linus Torvalds   mm: re-architect ...
628
  	return pfn_to_page(pfn);
ee498ed73   Hugh Dickins   [PATCH] unpaged: ...
629
630
631
  }
  
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
632
633
634
   * copy one vm_area from one task to the other. Assumes the page tables
   * already present in the new task to be cleared in the whole range
   * covered by this vma.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
635
   */
570a335b8   Hugh Dickins   swap_info: swap c...
636
  static inline unsigned long
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
637
  copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
b5810039a   Nick Piggin   [PATCH] core remo...
638
  		pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
8c1037627   Hugh Dickins   [PATCH] mm: copy_...
639
  		unsigned long addr, int *rss)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
640
  {
b5810039a   Nick Piggin   [PATCH] core remo...
641
  	unsigned long vm_flags = vma->vm_flags;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
642
643
  	pte_t pte = *src_pte;
  	struct page *page;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
644
645
646
647
  
  	/* pte contains position in swap or file, so copy. */
  	if (unlikely(!pte_present(pte))) {
  		if (!pte_file(pte)) {
0697212a4   Christoph Lameter   [PATCH] Swapless ...
648
  			swp_entry_t entry = pte_to_swp_entry(pte);
570a335b8   Hugh Dickins   swap_info: swap c...
649
650
  			if (swap_duplicate(entry) < 0)
  				return entry.val;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
651
652
653
  			/* make sure dst_mm is on swapoff's mmlist. */
  			if (unlikely(list_empty(&dst_mm->mmlist))) {
  				spin_lock(&mmlist_lock);
f412ac08c   Hugh Dickins   [PATCH] mm: fix r...
654
655
656
  				if (list_empty(&dst_mm->mmlist))
  					list_add(&dst_mm->mmlist,
  						 &src_mm->mmlist);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
657
658
  				spin_unlock(&mmlist_lock);
  			}
b084d4353   KAMEZAWA Hiroyuki   mm: count swap usage
659
660
661
  			if (likely(!non_swap_entry(entry)))
  				rss[MM_SWAPENTS]++;
  			else if (is_write_migration_entry(entry) &&
0697212a4   Christoph Lameter   [PATCH] Swapless ...
662
663
664
665
666
667
668
669
670
  					is_cow_mapping(vm_flags)) {
  				/*
  				 * COW mappings require pages in both parent
  				 * and child to be set to read.
  				 */
  				make_migration_entry_read(&entry);
  				pte = swp_entry_to_pte(entry);
  				set_pte_at(src_mm, addr, src_pte, pte);
  			}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
671
  		}
ae8597623   Hugh Dickins   [PATCH] mm: batch...
672
  		goto out_set_pte;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
673
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
674
675
676
677
  	/*
  	 * If it's a COW mapping, write protect it both
  	 * in the parent and the child
  	 */
67121172f   Linus Torvalds   Allow arbitrary r...
678
  	if (is_cow_mapping(vm_flags)) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
679
  		ptep_set_wrprotect(src_mm, addr, src_pte);
3dc907951   Zachary Amsden   [PATCH] paravirt:...
680
  		pte = pte_wrprotect(pte);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
681
682
683
684
685
686
687
688
689
  	}
  
  	/*
  	 * If it's a shared mapping, mark it clean in
  	 * the child
  	 */
  	if (vm_flags & VM_SHARED)
  		pte = pte_mkclean(pte);
  	pte = pte_mkold(pte);
6aab341e0   Linus Torvalds   mm: re-architect ...
690
691
692
693
  
  	page = vm_normal_page(vma, addr, pte);
  	if (page) {
  		get_page(page);
21333b2b6   Hugh Dickins   ksm: no debug in ...
694
  		page_dup_rmap(page);
d559db086   KAMEZAWA Hiroyuki   mm: clean up mm_c...
695
696
697
698
  		if (PageAnon(page))
  			rss[MM_ANONPAGES]++;
  		else
  			rss[MM_FILEPAGES]++;
6aab341e0   Linus Torvalds   mm: re-architect ...
699
  	}
ae8597623   Hugh Dickins   [PATCH] mm: batch...
700
701
702
  
  out_set_pte:
  	set_pte_at(dst_mm, addr, dst_pte, pte);
570a335b8   Hugh Dickins   swap_info: swap c...
703
  	return 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
704
705
706
707
708
709
  }
  
  static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
  		pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
  		unsigned long addr, unsigned long end)
  {
c36987e2e   Daisuke Nishimura   mm: don't call pt...
710
  	pte_t *orig_src_pte, *orig_dst_pte;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
711
  	pte_t *src_pte, *dst_pte;
c74df32c7   Hugh Dickins   [PATCH] mm: ptd_a...
712
  	spinlock_t *src_ptl, *dst_ptl;
e040f218b   Hugh Dickins   [PATCH] mm: copy_...
713
  	int progress = 0;
d559db086   KAMEZAWA Hiroyuki   mm: clean up mm_c...
714
  	int rss[NR_MM_COUNTERS];
570a335b8   Hugh Dickins   swap_info: swap c...
715
  	swp_entry_t entry = (swp_entry_t){0};
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
716
717
  
  again:
d559db086   KAMEZAWA Hiroyuki   mm: clean up mm_c...
718
  	init_rss_vec(rss);
c74df32c7   Hugh Dickins   [PATCH] mm: ptd_a...
719
  	dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
720
721
722
  	if (!dst_pte)
  		return -ENOMEM;
  	src_pte = pte_offset_map_nested(src_pmd, addr);
4c21e2f24   Hugh Dickins   [PATCH] mm: split...
723
  	src_ptl = pte_lockptr(src_mm, src_pmd);
f20dc5f7c   Ingo Molnar   [PATCH] lockdep: ...
724
  	spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
c36987e2e   Daisuke Nishimura   mm: don't call pt...
725
726
  	orig_src_pte = src_pte;
  	orig_dst_pte = dst_pte;
6606c3e0d   Zachary Amsden   [PATCH] paravirt:...
727
  	arch_enter_lazy_mmu_mode();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
728

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
729
730
731
732
733
  	do {
  		/*
  		 * We are holding two locks at this point - either of them
  		 * could generate latencies in another task on another CPU.
  		 */
e040f218b   Hugh Dickins   [PATCH] mm: copy_...
734
735
736
  		if (progress >= 32) {
  			progress = 0;
  			if (need_resched() ||
95c354fe9   Nick Piggin   spinlock: lockbre...
737
  			    spin_needbreak(src_ptl) || spin_needbreak(dst_ptl))
e040f218b   Hugh Dickins   [PATCH] mm: copy_...
738
739
  				break;
  		}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
740
741
742
743
  		if (pte_none(*src_pte)) {
  			progress++;
  			continue;
  		}
570a335b8   Hugh Dickins   swap_info: swap c...
744
745
746
747
  		entry.val = copy_one_pte(dst_mm, src_mm, dst_pte, src_pte,
  							vma, addr, rss);
  		if (entry.val)
  			break;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
748
749
  		progress += 8;
  	} while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
750

6606c3e0d   Zachary Amsden   [PATCH] paravirt:...
751
  	arch_leave_lazy_mmu_mode();
c74df32c7   Hugh Dickins   [PATCH] mm: ptd_a...
752
  	spin_unlock(src_ptl);
c36987e2e   Daisuke Nishimura   mm: don't call pt...
753
  	pte_unmap_nested(orig_src_pte);
d559db086   KAMEZAWA Hiroyuki   mm: clean up mm_c...
754
  	add_mm_rss_vec(dst_mm, rss);
c36987e2e   Daisuke Nishimura   mm: don't call pt...
755
  	pte_unmap_unlock(orig_dst_pte, dst_ptl);
c74df32c7   Hugh Dickins   [PATCH] mm: ptd_a...
756
  	cond_resched();
570a335b8   Hugh Dickins   swap_info: swap c...
757
758
759
760
761
762
  
  	if (entry.val) {
  		if (add_swap_count_continuation(entry, GFP_KERNEL) < 0)
  			return -ENOMEM;
  		progress = 0;
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
  	if (addr != end)
  		goto again;
  	return 0;
  }
  
  static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
  		pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma,
  		unsigned long addr, unsigned long end)
  {
  	pmd_t *src_pmd, *dst_pmd;
  	unsigned long next;
  
  	dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
  	if (!dst_pmd)
  		return -ENOMEM;
  	src_pmd = pmd_offset(src_pud, addr);
  	do {
  		next = pmd_addr_end(addr, end);
  		if (pmd_none_or_clear_bad(src_pmd))
  			continue;
  		if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd,
  						vma, addr, next))
  			return -ENOMEM;
  	} while (dst_pmd++, src_pmd++, addr = next, addr != end);
  	return 0;
  }
  
  static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
  		pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma,
  		unsigned long addr, unsigned long end)
  {
  	pud_t *src_pud, *dst_pud;
  	unsigned long next;
  
  	dst_pud = pud_alloc(dst_mm, dst_pgd, addr);
  	if (!dst_pud)
  		return -ENOMEM;
  	src_pud = pud_offset(src_pgd, addr);
  	do {
  		next = pud_addr_end(addr, end);
  		if (pud_none_or_clear_bad(src_pud))
  			continue;
  		if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud,
  						vma, addr, next))
  			return -ENOMEM;
  	} while (dst_pud++, src_pud++, addr = next, addr != end);
  	return 0;
  }
  
  int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
  		struct vm_area_struct *vma)
  {
  	pgd_t *src_pgd, *dst_pgd;
  	unsigned long next;
  	unsigned long addr = vma->vm_start;
  	unsigned long end = vma->vm_end;
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
819
  	int ret;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
820

d992895ba   Nick Piggin   [PATCH] Lazy page...
821
822
823
824
825
826
  	/*
  	 * Don't copy ptes where a page fault will fill them correctly.
  	 * Fork becomes much lighter when there are big shared or private
  	 * readonly mappings. The tradeoff is that copy_page_range is more
  	 * efficient than faulting.
  	 */
4d7672b46   Linus Torvalds   Make sure we copy...
827
  	if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP|VM_INSERTPAGE))) {
d992895ba   Nick Piggin   [PATCH] Lazy page...
828
829
830
  		if (!vma->anon_vma)
  			return 0;
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
831
832
  	if (is_vm_hugetlb_page(vma))
  		return copy_hugetlb_page_range(dst_mm, src_mm, vma);
34801ba9b   venkatesh.pallipadi@intel.com   x86: PAT: move tr...
833
  	if (unlikely(is_pfn_mapping(vma))) {
2ab640379   venkatesh.pallipadi@intel.com   x86: PAT: hooks i...
834
835
836
837
838
839
840
841
  		/*
  		 * We do not free on error cases below as remove_vma
  		 * gets called on error from higher level routine
  		 */
  		ret = track_pfn_vma_copy(vma);
  		if (ret)
  			return ret;
  	}
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
842
843
844
845
846
847
848
849
850
851
  	/*
  	 * We need to invalidate the secondary MMU mappings only when
  	 * there could be a permission downgrade on the ptes of the
  	 * parent mm. And a permission downgrade will only happen if
  	 * is_cow_mapping() returns true.
  	 */
  	if (is_cow_mapping(vma->vm_flags))
  		mmu_notifier_invalidate_range_start(src_mm, addr, end);
  
  	ret = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
852
853
854
855
856
857
  	dst_pgd = pgd_offset(dst_mm, addr);
  	src_pgd = pgd_offset(src_mm, addr);
  	do {
  		next = pgd_addr_end(addr, end);
  		if (pgd_none_or_clear_bad(src_pgd))
  			continue;
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
858
859
860
861
862
  		if (unlikely(copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd,
  					    vma, addr, next))) {
  			ret = -ENOMEM;
  			break;
  		}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
863
  	} while (dst_pgd++, src_pgd++, addr = next, addr != end);
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
864
865
866
867
868
  
  	if (is_cow_mapping(vma->vm_flags))
  		mmu_notifier_invalidate_range_end(src_mm,
  						  vma->vm_start, end);
  	return ret;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
869
  }
51c6f666f   Robin Holt   [PATCH] mm: ZAP_B...
870
  static unsigned long zap_pte_range(struct mmu_gather *tlb,
b5810039a   Nick Piggin   [PATCH] core remo...
871
  				struct vm_area_struct *vma, pmd_t *pmd,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
872
  				unsigned long addr, unsigned long end,
51c6f666f   Robin Holt   [PATCH] mm: ZAP_B...
873
  				long *zap_work, struct zap_details *details)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
874
  {
b5810039a   Nick Piggin   [PATCH] core remo...
875
  	struct mm_struct *mm = tlb->mm;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
876
  	pte_t *pte;
508034a32   Hugh Dickins   [PATCH] mm: unmap...
877
  	spinlock_t *ptl;
d559db086   KAMEZAWA Hiroyuki   mm: clean up mm_c...
878
879
880
  	int rss[NR_MM_COUNTERS];
  
  	init_rss_vec(rss);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
881

508034a32   Hugh Dickins   [PATCH] mm: unmap...
882
  	pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
6606c3e0d   Zachary Amsden   [PATCH] paravirt:...
883
  	arch_enter_lazy_mmu_mode();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
884
885
  	do {
  		pte_t ptent = *pte;
51c6f666f   Robin Holt   [PATCH] mm: ZAP_B...
886
887
  		if (pte_none(ptent)) {
  			(*zap_work)--;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
888
  			continue;
51c6f666f   Robin Holt   [PATCH] mm: ZAP_B...
889
  		}
6f5e6b9e6   Hugh Dickins   [PATCH] fix free ...
890
891
  
  		(*zap_work) -= PAGE_SIZE;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
892
  		if (pte_present(ptent)) {
ee498ed73   Hugh Dickins   [PATCH] unpaged: ...
893
  			struct page *page;
51c6f666f   Robin Holt   [PATCH] mm: ZAP_B...
894

6aab341e0   Linus Torvalds   mm: re-architect ...
895
  			page = vm_normal_page(vma, addr, ptent);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
  			if (unlikely(details) && page) {
  				/*
  				 * unmap_shared_mapping_pages() wants to
  				 * invalidate cache without truncating:
  				 * unmap shared but keep private pages.
  				 */
  				if (details->check_mapping &&
  				    details->check_mapping != page->mapping)
  					continue;
  				/*
  				 * Each page->index must be checked when
  				 * invalidating or truncating nonlinear.
  				 */
  				if (details->nonlinear_vma &&
  				    (page->index < details->first_index ||
  				     page->index > details->last_index))
  					continue;
  			}
b5810039a   Nick Piggin   [PATCH] core remo...
914
  			ptent = ptep_get_and_clear_full(mm, addr, pte,
a600388d2   Zachary Amsden   [PATCH] x86: ptep...
915
  							tlb->fullmm);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
916
917
918
919
920
921
  			tlb_remove_tlb_entry(tlb, pte, addr);
  			if (unlikely(!page))
  				continue;
  			if (unlikely(details) && details->nonlinear_vma
  			    && linear_page_index(details->nonlinear_vma,
  						addr) != page->index)
b5810039a   Nick Piggin   [PATCH] core remo...
922
  				set_pte_at(mm, addr, pte,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
923
  					   pgoff_to_pte(page->index));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
924
  			if (PageAnon(page))
d559db086   KAMEZAWA Hiroyuki   mm: clean up mm_c...
925
  				rss[MM_ANONPAGES]--;
6237bcd94   Hugh Dickins   [PATCH] mm: zap_p...
926
927
928
  			else {
  				if (pte_dirty(ptent))
  					set_page_dirty(page);
4917e5d04   Johannes Weiner   mm: more likely r...
929
930
  				if (pte_young(ptent) &&
  				    likely(!VM_SequentialReadHint(vma)))
bf3f3bc5e   Nick Piggin   mm: don't mark_pa...
931
  					mark_page_accessed(page);
d559db086   KAMEZAWA Hiroyuki   mm: clean up mm_c...
932
  				rss[MM_FILEPAGES]--;
6237bcd94   Hugh Dickins   [PATCH] mm: zap_p...
933
  			}
edc315fd2   Hugh Dickins   badpage: remove v...
934
  			page_remove_rmap(page);
3dc147414   Hugh Dickins   badpage: replace ...
935
936
  			if (unlikely(page_mapcount(page) < 0))
  				print_bad_pte(vma, addr, ptent, page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
937
938
939
940
941
942
943
944
945
  			tlb_remove_page(tlb, page);
  			continue;
  		}
  		/*
  		 * If details->check_mapping, we leave swap entries;
  		 * if details->nonlinear_vma, we leave file entries.
  		 */
  		if (unlikely(details))
  			continue;
2509ef26d   Hugh Dickins   badpage: zap prin...
946
947
948
  		if (pte_file(ptent)) {
  			if (unlikely(!(vma->vm_flags & VM_NONLINEAR)))
  				print_bad_pte(vma, addr, ptent, NULL);
b084d4353   KAMEZAWA Hiroyuki   mm: count swap usage
949
950
951
952
953
954
955
956
  		} else {
  			swp_entry_t entry = pte_to_swp_entry(ptent);
  
  			if (!non_swap_entry(entry))
  				rss[MM_SWAPENTS]--;
  			if (unlikely(!free_swap_and_cache(entry)))
  				print_bad_pte(vma, addr, ptent, NULL);
  		}
9888a1cae   Zachary Amsden   [PATCH] paravirt:...
957
  		pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
51c6f666f   Robin Holt   [PATCH] mm: ZAP_B...
958
  	} while (pte++, addr += PAGE_SIZE, (addr != end && *zap_work > 0));
ae8597623   Hugh Dickins   [PATCH] mm: batch...
959

d559db086   KAMEZAWA Hiroyuki   mm: clean up mm_c...
960
  	add_mm_rss_vec(mm, rss);
6606c3e0d   Zachary Amsden   [PATCH] paravirt:...
961
  	arch_leave_lazy_mmu_mode();
508034a32   Hugh Dickins   [PATCH] mm: unmap...
962
  	pte_unmap_unlock(pte - 1, ptl);
51c6f666f   Robin Holt   [PATCH] mm: ZAP_B...
963
964
  
  	return addr;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
965
  }
51c6f666f   Robin Holt   [PATCH] mm: ZAP_B...
966
  static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
b5810039a   Nick Piggin   [PATCH] core remo...
967
  				struct vm_area_struct *vma, pud_t *pud,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
968
  				unsigned long addr, unsigned long end,
51c6f666f   Robin Holt   [PATCH] mm: ZAP_B...
969
  				long *zap_work, struct zap_details *details)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
970
971
972
973
974
975
976
  {
  	pmd_t *pmd;
  	unsigned long next;
  
  	pmd = pmd_offset(pud, addr);
  	do {
  		next = pmd_addr_end(addr, end);
51c6f666f   Robin Holt   [PATCH] mm: ZAP_B...
977
978
  		if (pmd_none_or_clear_bad(pmd)) {
  			(*zap_work)--;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
979
  			continue;
51c6f666f   Robin Holt   [PATCH] mm: ZAP_B...
980
981
982
983
984
985
  		}
  		next = zap_pte_range(tlb, vma, pmd, addr, next,
  						zap_work, details);
  	} while (pmd++, addr = next, (addr != end && *zap_work > 0));
  
  	return addr;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
986
  }
51c6f666f   Robin Holt   [PATCH] mm: ZAP_B...
987
  static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
b5810039a   Nick Piggin   [PATCH] core remo...
988
  				struct vm_area_struct *vma, pgd_t *pgd,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
989
  				unsigned long addr, unsigned long end,
51c6f666f   Robin Holt   [PATCH] mm: ZAP_B...
990
  				long *zap_work, struct zap_details *details)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
991
992
993
994
995
996
997
  {
  	pud_t *pud;
  	unsigned long next;
  
  	pud = pud_offset(pgd, addr);
  	do {
  		next = pud_addr_end(addr, end);
51c6f666f   Robin Holt   [PATCH] mm: ZAP_B...
998
999
  		if (pud_none_or_clear_bad(pud)) {
  			(*zap_work)--;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1000
  			continue;
51c6f666f   Robin Holt   [PATCH] mm: ZAP_B...
1001
1002
1003
1004
1005
1006
  		}
  		next = zap_pmd_range(tlb, vma, pud, addr, next,
  						zap_work, details);
  	} while (pud++, addr = next, (addr != end && *zap_work > 0));
  
  	return addr;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1007
  }
51c6f666f   Robin Holt   [PATCH] mm: ZAP_B...
1008
1009
  static unsigned long unmap_page_range(struct mmu_gather *tlb,
  				struct vm_area_struct *vma,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1010
  				unsigned long addr, unsigned long end,
51c6f666f   Robin Holt   [PATCH] mm: ZAP_B...
1011
  				long *zap_work, struct zap_details *details)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1012
1013
1014
1015
1016
1017
1018
1019
  {
  	pgd_t *pgd;
  	unsigned long next;
  
  	if (details && !details->check_mapping && !details->nonlinear_vma)
  		details = NULL;
  
  	BUG_ON(addr >= end);
569b846df   KAMEZAWA Hiroyuki   memcg: coalesce u...
1020
  	mem_cgroup_uncharge_start();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1021
1022
1023
1024
  	tlb_start_vma(tlb, vma);
  	pgd = pgd_offset(vma->vm_mm, addr);
  	do {
  		next = pgd_addr_end(addr, end);
51c6f666f   Robin Holt   [PATCH] mm: ZAP_B...
1025
1026
  		if (pgd_none_or_clear_bad(pgd)) {
  			(*zap_work)--;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1027
  			continue;
51c6f666f   Robin Holt   [PATCH] mm: ZAP_B...
1028
1029
1030
1031
  		}
  		next = zap_pud_range(tlb, vma, pgd, addr, next,
  						zap_work, details);
  	} while (pgd++, addr = next, (addr != end && *zap_work > 0));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1032
  	tlb_end_vma(tlb, vma);
569b846df   KAMEZAWA Hiroyuki   memcg: coalesce u...
1033
  	mem_cgroup_uncharge_end();
51c6f666f   Robin Holt   [PATCH] mm: ZAP_B...
1034
1035
  
  	return addr;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
  }
  
  #ifdef CONFIG_PREEMPT
  # define ZAP_BLOCK_SIZE	(8 * PAGE_SIZE)
  #else
  /* No preempt: go for improved straight-line efficiency */
  # define ZAP_BLOCK_SIZE	(1024 * PAGE_SIZE)
  #endif
  
  /**
   * unmap_vmas - unmap a range of memory covered by a list of vma's
   * @tlbp: address of the caller's struct mmu_gather
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1048
1049
1050
1051
1052
1053
   * @vma: the starting vma
   * @start_addr: virtual address at which to start unmapping
   * @end_addr: virtual address at which to end unmapping
   * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here
   * @details: details of nonlinear truncation or shared cache invalidation
   *
ee39b37b2   Hugh Dickins   [PATCH] freepgt: ...
1054
   * Returns the end address of the unmapping (restart addr if interrupted).
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1055
   *
508034a32   Hugh Dickins   [PATCH] mm: unmap...
1056
   * Unmap all pages in the vma list.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1057
   *
508034a32   Hugh Dickins   [PATCH] mm: unmap...
1058
1059
   * We aim to not hold locks for too long (for scheduling latency reasons).
   * So zap pages in ZAP_BLOCK_SIZE bytecounts.  This means we need to
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
   * return the ending mmu_gather to the caller.
   *
   * Only addresses between `start' and `end' will be unmapped.
   *
   * The VMA list must be sorted in ascending virtual address order.
   *
   * unmap_vmas() assumes that the caller will flush the whole unmapped address
   * range after unmap_vmas() returns.  So the only responsibility here is to
   * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
   * drops the lock and schedules.
   */
508034a32   Hugh Dickins   [PATCH] mm: unmap...
1071
  unsigned long unmap_vmas(struct mmu_gather **tlbp,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1072
1073
1074
1075
  		struct vm_area_struct *vma, unsigned long start_addr,
  		unsigned long end_addr, unsigned long *nr_accounted,
  		struct zap_details *details)
  {
51c6f666f   Robin Holt   [PATCH] mm: ZAP_B...
1076
  	long zap_work = ZAP_BLOCK_SIZE;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1077
1078
  	unsigned long tlb_start = 0;	/* For tlb_finish_mmu */
  	int tlb_start_valid = 0;
ee39b37b2   Hugh Dickins   [PATCH] freepgt: ...
1079
  	unsigned long start = start_addr;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1080
  	spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL;
4d6ddfa92   Hugh Dickins   [PATCH] mm: tlb_i...
1081
  	int fullmm = (*tlbp)->fullmm;
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
1082
  	struct mm_struct *mm = vma->vm_mm;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1083

cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
1084
  	mmu_notifier_invalidate_range_start(mm, start_addr, end_addr);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1085
  	for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
  		unsigned long end;
  
  		start = max(vma->vm_start, start_addr);
  		if (start >= vma->vm_end)
  			continue;
  		end = min(vma->vm_end, end_addr);
  		if (end <= vma->vm_start)
  			continue;
  
  		if (vma->vm_flags & VM_ACCOUNT)
  			*nr_accounted += (end - start) >> PAGE_SHIFT;
34801ba9b   venkatesh.pallipadi@intel.com   x86: PAT: move tr...
1097
  		if (unlikely(is_pfn_mapping(vma)))
2ab640379   venkatesh.pallipadi@intel.com   x86: PAT: hooks i...
1098
  			untrack_pfn_vma(vma, 0, 0);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1099
  		while (start != end) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1100
1101
1102
1103
  			if (!tlb_start_valid) {
  				tlb_start = start;
  				tlb_start_valid = 1;
  			}
51c6f666f   Robin Holt   [PATCH] mm: ZAP_B...
1104
  			if (unlikely(is_vm_hugetlb_page(vma))) {
a137e1cc6   Andi Kleen   hugetlbfs: per mo...
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
  				/*
  				 * It is undesirable to test vma->vm_file as it
  				 * should be non-null for valid hugetlb area.
  				 * However, vm_file will be NULL in the error
  				 * cleanup path of do_mmap_pgoff. When
  				 * hugetlbfs ->mmap method fails,
  				 * do_mmap_pgoff() nullifies vma->vm_file
  				 * before calling this function to clean up.
  				 * Since no pte has actually been setup, it is
  				 * safe to do nothing in this case.
  				 */
  				if (vma->vm_file) {
  					unmap_hugepage_range(vma, start, end, NULL);
  					zap_work -= (end - start) /
a55164389   Andi Kleen   hugetlb: modular ...
1119
  					pages_per_huge_page(hstate_vma(vma));
a137e1cc6   Andi Kleen   hugetlbfs: per mo...
1120
  				}
51c6f666f   Robin Holt   [PATCH] mm: ZAP_B...
1121
1122
1123
1124
1125
1126
1127
1128
  				start = end;
  			} else
  				start = unmap_page_range(*tlbp, vma,
  						start, end, &zap_work, details);
  
  			if (zap_work > 0) {
  				BUG_ON(start != end);
  				break;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1129
  			}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1130
1131
1132
  			tlb_finish_mmu(*tlbp, tlb_start, start);
  
  			if (need_resched() ||
95c354fe9   Nick Piggin   spinlock: lockbre...
1133
  				(i_mmap_lock && spin_needbreak(i_mmap_lock))) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1134
  				if (i_mmap_lock) {
508034a32   Hugh Dickins   [PATCH] mm: unmap...
1135
  					*tlbp = NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1136
1137
  					goto out;
  				}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1138
  				cond_resched();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1139
  			}
508034a32   Hugh Dickins   [PATCH] mm: unmap...
1140
  			*tlbp = tlb_gather_mmu(vma->vm_mm, fullmm);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1141
  			tlb_start_valid = 0;
51c6f666f   Robin Holt   [PATCH] mm: ZAP_B...
1142
  			zap_work = ZAP_BLOCK_SIZE;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1143
1144
1145
  		}
  	}
  out:
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
1146
  	mmu_notifier_invalidate_range_end(mm, start_addr, end_addr);
ee39b37b2   Hugh Dickins   [PATCH] freepgt: ...
1147
  	return start;	/* which is now the end (or restart) address */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1148
1149
1150
1151
1152
1153
1154
1155
1156
  }
  
  /**
   * zap_page_range - remove user pages in a given range
   * @vma: vm_area_struct holding the applicable pages
   * @address: starting address of pages to zap
   * @size: number of bytes to zap
   * @details: details of nonlinear truncation or shared cache invalidation
   */
ee39b37b2   Hugh Dickins   [PATCH] freepgt: ...
1157
  unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1158
1159
1160
1161
1162
1163
  		unsigned long size, struct zap_details *details)
  {
  	struct mm_struct *mm = vma->vm_mm;
  	struct mmu_gather *tlb;
  	unsigned long end = address + size;
  	unsigned long nr_accounted = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1164
  	lru_add_drain();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1165
  	tlb = tlb_gather_mmu(mm, 0);
365e9c87a   Hugh Dickins   [PATCH] mm: updat...
1166
  	update_hiwater_rss(mm);
508034a32   Hugh Dickins   [PATCH] mm: unmap...
1167
1168
1169
  	end = unmap_vmas(&tlb, vma, address, end, &nr_accounted, details);
  	if (tlb)
  		tlb_finish_mmu(tlb, address, end);
ee39b37b2   Hugh Dickins   [PATCH] freepgt: ...
1170
  	return end;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1171
  }
c627f9cc0   Jack Steiner   mm: add zap_vma_p...
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
  /**
   * zap_vma_ptes - remove ptes mapping the vma
   * @vma: vm_area_struct holding ptes to be zapped
   * @address: starting address of pages to zap
   * @size: number of bytes to zap
   *
   * This function only unmaps ptes assigned to VM_PFNMAP vmas.
   *
   * The entire address range must be fully contained within the vma.
   *
   * Returns 0 if successful.
   */
  int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
  		unsigned long size)
  {
  	if (address < vma->vm_start || address + size > vma->vm_end ||
  	    		!(vma->vm_flags & VM_PFNMAP))
  		return -1;
  	zap_page_range(vma, address, size, NULL);
  	return 0;
  }
  EXPORT_SYMBOL_GPL(zap_vma_ptes);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1194
1195
  /*
   * Do a quick page-table lookup for a single page.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1196
   */
6aab341e0   Linus Torvalds   mm: re-architect ...
1197
  struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
deceb6cd1   Hugh Dickins   [PATCH] mm: follo...
1198
  			unsigned int flags)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1199
1200
1201
1202
1203
  {
  	pgd_t *pgd;
  	pud_t *pud;
  	pmd_t *pmd;
  	pte_t *ptep, pte;
deceb6cd1   Hugh Dickins   [PATCH] mm: follo...
1204
  	spinlock_t *ptl;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1205
  	struct page *page;
6aab341e0   Linus Torvalds   mm: re-architect ...
1206
  	struct mm_struct *mm = vma->vm_mm;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1207

deceb6cd1   Hugh Dickins   [PATCH] mm: follo...
1208
1209
1210
1211
1212
  	page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
  	if (!IS_ERR(page)) {
  		BUG_ON(flags & FOLL_GET);
  		goto out;
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1213

deceb6cd1   Hugh Dickins   [PATCH] mm: follo...
1214
  	page = NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1215
1216
  	pgd = pgd_offset(mm, address);
  	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
deceb6cd1   Hugh Dickins   [PATCH] mm: follo...
1217
  		goto no_page_table;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1218
1219
  
  	pud = pud_offset(pgd, address);
ceb868796   Andi Kleen   hugetlb: introduc...
1220
  	if (pud_none(*pud))
deceb6cd1   Hugh Dickins   [PATCH] mm: follo...
1221
  		goto no_page_table;
ceb868796   Andi Kleen   hugetlb: introduc...
1222
1223
1224
1225
1226
1227
1228
  	if (pud_huge(*pud)) {
  		BUG_ON(flags & FOLL_GET);
  		page = follow_huge_pud(mm, address, pud, flags & FOLL_WRITE);
  		goto out;
  	}
  	if (unlikely(pud_bad(*pud)))
  		goto no_page_table;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1229
  	pmd = pmd_offset(pud, address);
aeed5fce3   Hugh Dickins   x86: fix PAE pmd_...
1230
  	if (pmd_none(*pmd))
deceb6cd1   Hugh Dickins   [PATCH] mm: follo...
1231
  		goto no_page_table;
deceb6cd1   Hugh Dickins   [PATCH] mm: follo...
1232
1233
1234
  	if (pmd_huge(*pmd)) {
  		BUG_ON(flags & FOLL_GET);
  		page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1235
  		goto out;
deceb6cd1   Hugh Dickins   [PATCH] mm: follo...
1236
  	}
aeed5fce3   Hugh Dickins   x86: fix PAE pmd_...
1237
1238
  	if (unlikely(pmd_bad(*pmd)))
  		goto no_page_table;
deceb6cd1   Hugh Dickins   [PATCH] mm: follo...
1239
  	ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1240
1241
  
  	pte = *ptep;
deceb6cd1   Hugh Dickins   [PATCH] mm: follo...
1242
  	if (!pte_present(pte))
89f5b7da2   Linus Torvalds   Reinstate ZERO_PA...
1243
  		goto no_page;
deceb6cd1   Hugh Dickins   [PATCH] mm: follo...
1244
1245
  	if ((flags & FOLL_WRITE) && !pte_write(pte))
  		goto unlock;
a13ea5b75   Hugh Dickins   mm: reinstate ZER...
1246

6aab341e0   Linus Torvalds   mm: re-architect ...
1247
  	page = vm_normal_page(vma, address, pte);
a13ea5b75   Hugh Dickins   mm: reinstate ZER...
1248
1249
  	if (unlikely(!page)) {
  		if ((flags & FOLL_DUMP) ||
62eede62d   Hugh Dickins   mm: ZERO_PAGE wit...
1250
  		    !is_zero_pfn(pte_pfn(pte)))
a13ea5b75   Hugh Dickins   mm: reinstate ZER...
1251
1252
1253
  			goto bad_page;
  		page = pte_page(pte);
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1254

deceb6cd1   Hugh Dickins   [PATCH] mm: follo...
1255
1256
1257
1258
1259
1260
  	if (flags & FOLL_GET)
  		get_page(page);
  	if (flags & FOLL_TOUCH) {
  		if ((flags & FOLL_WRITE) &&
  		    !pte_dirty(pte) && !PageDirty(page))
  			set_page_dirty(page);
bd775c42e   KOSAKI Motohiro   mm: add comment w...
1261
1262
1263
1264
1265
  		/*
  		 * pte_mkyoung() would be more correct here, but atomic care
  		 * is needed to avoid losing the dirty bit: it is easier to use
  		 * mark_page_accessed().
  		 */
deceb6cd1   Hugh Dickins   [PATCH] mm: follo...
1266
1267
1268
1269
  		mark_page_accessed(page);
  	}
  unlock:
  	pte_unmap_unlock(ptep, ptl);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1270
  out:
deceb6cd1   Hugh Dickins   [PATCH] mm: follo...
1271
  	return page;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1272

89f5b7da2   Linus Torvalds   Reinstate ZERO_PA...
1273
1274
1275
1276
1277
1278
1279
1280
  bad_page:
  	pte_unmap_unlock(ptep, ptl);
  	return ERR_PTR(-EFAULT);
  
  no_page:
  	pte_unmap_unlock(ptep, ptl);
  	if (!pte_none(pte))
  		return page;
8e4b9a607   Hugh Dickins   mm: FOLL_DUMP rep...
1281

deceb6cd1   Hugh Dickins   [PATCH] mm: follo...
1282
1283
1284
  no_page_table:
  	/*
  	 * When core dumping an enormous anonymous area that nobody
8e4b9a607   Hugh Dickins   mm: FOLL_DUMP rep...
1285
1286
1287
1288
1289
  	 * has touched so far, we don't want to allocate unnecessary pages or
  	 * page tables.  Return error instead of NULL to skip handle_mm_fault,
  	 * then get_dump_page() will return NULL to leave a hole in the dump.
  	 * But we can only make this optimization where a hole would surely
  	 * be zero-filled if handle_mm_fault() actually did handle it.
deceb6cd1   Hugh Dickins   [PATCH] mm: follo...
1290
  	 */
8e4b9a607   Hugh Dickins   mm: FOLL_DUMP rep...
1291
1292
1293
  	if ((flags & FOLL_DUMP) &&
  	    (!vma->vm_ops || !vma->vm_ops->fault))
  		return ERR_PTR(-EFAULT);
deceb6cd1   Hugh Dickins   [PATCH] mm: follo...
1294
  	return page;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1295
  }
b291f0003   Nick Piggin   mlock: mlocked pa...
1296
  int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
58fa879e1   Hugh Dickins   mm: FOLL flags fo...
1297
  		     unsigned long start, int nr_pages, unsigned int gup_flags,
9d73777e5   Peter Zijlstra   clarify get_user_...
1298
  		     struct page **pages, struct vm_area_struct **vmas)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1299
1300
  {
  	int i;
58fa879e1   Hugh Dickins   mm: FOLL flags fo...
1301
  	unsigned long vm_flags;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1302

9d73777e5   Peter Zijlstra   clarify get_user_...
1303
  	if (nr_pages <= 0)
900cf086f   Jonathan Corbet   Be more robust ab...
1304
  		return 0;
58fa879e1   Hugh Dickins   mm: FOLL flags fo...
1305
1306
  
  	VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1307
1308
  	/* 
  	 * Require read or write permissions.
58fa879e1   Hugh Dickins   mm: FOLL flags fo...
1309
  	 * If FOLL_FORCE is set, we only require the "MAY" flags.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1310
  	 */
58fa879e1   Hugh Dickins   mm: FOLL flags fo...
1311
1312
1313
1314
  	vm_flags  = (gup_flags & FOLL_WRITE) ?
  			(VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
  	vm_flags &= (gup_flags & FOLL_FORCE) ?
  			(VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1315
1316
1317
  	i = 0;
  
  	do {
deceb6cd1   Hugh Dickins   [PATCH] mm: follo...
1318
  		struct vm_area_struct *vma;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1319
1320
1321
1322
1323
1324
1325
1326
1327
  
  		vma = find_extend_vma(mm, start);
  		if (!vma && in_gate_area(tsk, start)) {
  			unsigned long pg = start & PAGE_MASK;
  			struct vm_area_struct *gate_vma = get_gate_vma(tsk);
  			pgd_t *pgd;
  			pud_t *pud;
  			pmd_t *pmd;
  			pte_t *pte;
b291f0003   Nick Piggin   mlock: mlocked pa...
1328
1329
  
  			/* user gate pages are read-only */
58fa879e1   Hugh Dickins   mm: FOLL flags fo...
1330
  			if (gup_flags & FOLL_WRITE)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1331
1332
1333
1334
1335
1336
1337
1338
1339
  				return i ? : -EFAULT;
  			if (pg > TASK_SIZE)
  				pgd = pgd_offset_k(pg);
  			else
  				pgd = pgd_offset_gate(mm, pg);
  			BUG_ON(pgd_none(*pgd));
  			pud = pud_offset(pgd, pg);
  			BUG_ON(pud_none(*pud));
  			pmd = pmd_offset(pud, pg);
690dbe1ce   Hugh Dickins   [PATCH] x86_64: a...
1340
1341
  			if (pmd_none(*pmd))
  				return i ? : -EFAULT;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1342
  			pte = pte_offset_map(pmd, pg);
690dbe1ce   Hugh Dickins   [PATCH] x86_64: a...
1343
1344
1345
1346
  			if (pte_none(*pte)) {
  				pte_unmap(pte);
  				return i ? : -EFAULT;
  			}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1347
  			if (pages) {
fa2a455b0   Nick Piggin   [PATCH] Fix vma a...
1348
  				struct page *page = vm_normal_page(gate_vma, start, *pte);
6aab341e0   Linus Torvalds   mm: re-architect ...
1349
1350
1351
  				pages[i] = page;
  				if (page)
  					get_page(page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1352
1353
1354
1355
1356
1357
  			}
  			pte_unmap(pte);
  			if (vmas)
  				vmas[i] = gate_vma;
  			i++;
  			start += PAGE_SIZE;
9d73777e5   Peter Zijlstra   clarify get_user_...
1358
  			nr_pages--;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1359
1360
  			continue;
  		}
b291f0003   Nick Piggin   mlock: mlocked pa...
1361
1362
  		if (!vma ||
  		    (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
1c3aff1ce   Hugh Dickins   mm: remove unused...
1363
  		    !(vm_flags & vma->vm_flags))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1364
  			return i ? : -EFAULT;
2a15efc95   Hugh Dickins   mm: follow_hugetl...
1365
1366
  		if (is_vm_hugetlb_page(vma)) {
  			i = follow_hugetlb_page(mm, vma, pages, vmas,
58fa879e1   Hugh Dickins   mm: FOLL flags fo...
1367
  					&start, &nr_pages, i, gup_flags);
2a15efc95   Hugh Dickins   mm: follow_hugetl...
1368
1369
  			continue;
  		}
deceb6cd1   Hugh Dickins   [PATCH] mm: follo...
1370

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1371
  		do {
08ef47293   Hugh Dickins   [PATCH] get_user_...
1372
  			struct page *page;
58fa879e1   Hugh Dickins   mm: FOLL flags fo...
1373
  			unsigned int foll_flags = gup_flags;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1374

462e00cc7   Ethan Solomita   oom: stop allocat...
1375
  			/*
4779280d1   Ying Han   mm: make get_user...
1376
  			 * If we have a pending SIGKILL, don't keep faulting
1c3aff1ce   Hugh Dickins   mm: remove unused...
1377
  			 * pages and potentially allocating memory.
462e00cc7   Ethan Solomita   oom: stop allocat...
1378
  			 */
1c3aff1ce   Hugh Dickins   mm: remove unused...
1379
  			if (unlikely(fatal_signal_pending(current)))
4779280d1   Ying Han   mm: make get_user...
1380
  				return i ? i : -ERESTARTSYS;
462e00cc7   Ethan Solomita   oom: stop allocat...
1381

deceb6cd1   Hugh Dickins   [PATCH] mm: follo...
1382
  			cond_resched();
6aab341e0   Linus Torvalds   mm: re-architect ...
1383
  			while (!(page = follow_page(vma, start, foll_flags))) {
deceb6cd1   Hugh Dickins   [PATCH] mm: follo...
1384
  				int ret;
d06063cc2   Linus Torvalds   Move FAULT_FLAG_x...
1385

d26ed650d   Hugh Dickins   mm: don't rely on...
1386
1387
1388
  				ret = handle_mm_fault(mm, vma, start,
  					(foll_flags & FOLL_WRITE) ?
  					FAULT_FLAG_WRITE : 0);
83c54070e   Nick Piggin   mm: fault feedbac...
1389
1390
1391
  				if (ret & VM_FAULT_ERROR) {
  					if (ret & VM_FAULT_OOM)
  						return i ? i : -ENOMEM;
d1737fdbe   Andi Kleen   HWPOISON: Add bas...
1392
1393
  					if (ret &
  					    (VM_FAULT_HWPOISON|VM_FAULT_SIGBUS))
83c54070e   Nick Piggin   mm: fault feedbac...
1394
1395
1396
1397
1398
1399
1400
  						return i ? i : -EFAULT;
  					BUG();
  				}
  				if (ret & VM_FAULT_MAJOR)
  					tsk->maj_flt++;
  				else
  					tsk->min_flt++;
a68d2ebc1   Linus Torvalds   Fix up recent get...
1401
  				/*
83c54070e   Nick Piggin   mm: fault feedbac...
1402
1403
1404
1405
  				 * The VM_FAULT_WRITE bit tells us that
  				 * do_wp_page has broken COW when necessary,
  				 * even if maybe_mkwrite decided not to set
  				 * pte_write. We can thus safely do subsequent
878b63ac8   Hugh Dickins   mm: gup persist f...
1406
1407
1408
1409
1410
1411
  				 * page lookups as if they were reads. But only
  				 * do so when looping for pte_write is futile:
  				 * in some cases userspace may also be wanting
  				 * to write to the gotten user page, which a
  				 * read fault here might prevent (a readonly
  				 * page might get reCOWed by userspace write).
a68d2ebc1   Linus Torvalds   Fix up recent get...
1412
  				 */
878b63ac8   Hugh Dickins   mm: gup persist f...
1413
1414
  				if ((ret & VM_FAULT_WRITE) &&
  				    !(vma->vm_flags & VM_WRITE))
deceb6cd1   Hugh Dickins   [PATCH] mm: follo...
1415
  					foll_flags &= ~FOLL_WRITE;
83c54070e   Nick Piggin   mm: fault feedbac...
1416

7f7bbbe50   Benjamin Herrenschmidt   [PATCH] page faul...
1417
  				cond_resched();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1418
  			}
89f5b7da2   Linus Torvalds   Reinstate ZERO_PA...
1419
1420
  			if (IS_ERR(page))
  				return i ? i : PTR_ERR(page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1421
  			if (pages) {
08ef47293   Hugh Dickins   [PATCH] get_user_...
1422
  				pages[i] = page;
03beb0766   James Bottomley   [PATCH] Add API f...
1423

a6f36be32   Russell King   [ARM] pass vma fo...
1424
  				flush_anon_page(vma, page, start);
08ef47293   Hugh Dickins   [PATCH] get_user_...
1425
  				flush_dcache_page(page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1426
1427
1428
1429
1430
  			}
  			if (vmas)
  				vmas[i] = vma;
  			i++;
  			start += PAGE_SIZE;
9d73777e5   Peter Zijlstra   clarify get_user_...
1431
1432
1433
  			nr_pages--;
  		} while (nr_pages && start < vma->vm_end);
  	} while (nr_pages);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1434
1435
  	return i;
  }
b291f0003   Nick Piggin   mlock: mlocked pa...
1436

d2bf6be8a   Nick Piggin   mm: clean up get_...
1437
1438
1439
1440
1441
  /**
   * get_user_pages() - pin user pages in memory
   * @tsk:	task_struct of target task
   * @mm:		mm_struct of target mm
   * @start:	starting user address
9d73777e5   Peter Zijlstra   clarify get_user_...
1442
   * @nr_pages:	number of pages from start to pin
d2bf6be8a   Nick Piggin   mm: clean up get_...
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
   * @write:	whether pages will be written to by the caller
   * @force:	whether to force write access even if user mapping is
   *		readonly. This will result in the page being COWed even
   *		in MAP_SHARED mappings. You do not want this.
   * @pages:	array that receives pointers to the pages pinned.
   *		Should be at least nr_pages long. Or NULL, if caller
   *		only intends to ensure the pages are faulted in.
   * @vmas:	array of pointers to vmas corresponding to each page.
   *		Or NULL if the caller does not require them.
   *
   * Returns number of pages pinned. This may be fewer than the number
9d73777e5   Peter Zijlstra   clarify get_user_...
1454
   * requested. If nr_pages is 0 or negative, returns 0. If no pages
d2bf6be8a   Nick Piggin   mm: clean up get_...
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
   * were pinned, returns -errno. Each page returned must be released
   * with a put_page() call when it is finished with. vmas will only
   * remain valid while mmap_sem is held.
   *
   * Must be called with mmap_sem held for read or write.
   *
   * get_user_pages walks a process's page tables and takes a reference to
   * each struct page that each user address corresponds to at a given
   * instant. That is, it takes the page that would be accessed if a user
   * thread accesses the given user virtual address at that instant.
   *
   * This does not guarantee that the page exists in the user mappings when
   * get_user_pages returns, and there may even be a completely different
   * page there in some cases (eg. if mmapped pagecache has been invalidated
   * and subsequently re faulted). However it does guarantee that the page
   * won't be freed completely. And mostly callers simply care that the page
   * contains data that was valid *at some point in time*. Typically, an IO
   * or similar operation cannot guarantee anything stronger anyway because
   * locks can't be held over the syscall boundary.
   *
   * If write=0, the page must not be written to. If the page is written to,
   * set_page_dirty (or set_page_dirty_lock, as appropriate) must be called
   * after the page is finished with, and before put_page is called.
   *
   * get_user_pages is typically used for fewer-copy IO operations, to get a
   * handle on the memory by some means other than accesses via the user virtual
   * addresses. The pages may be submitted for DMA to devices or accessed via
   * their kernel linear mapping (via the kmap APIs). Care should be taken to
   * use the correct cache flushing APIs.
   *
   * See also get_user_pages_fast, for performance critical applications.
   */
b291f0003   Nick Piggin   mlock: mlocked pa...
1487
  int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
9d73777e5   Peter Zijlstra   clarify get_user_...
1488
  		unsigned long start, int nr_pages, int write, int force,
b291f0003   Nick Piggin   mlock: mlocked pa...
1489
1490
  		struct page **pages, struct vm_area_struct **vmas)
  {
58fa879e1   Hugh Dickins   mm: FOLL flags fo...
1491
  	int flags = FOLL_TOUCH;
b291f0003   Nick Piggin   mlock: mlocked pa...
1492

58fa879e1   Hugh Dickins   mm: FOLL flags fo...
1493
1494
  	if (pages)
  		flags |= FOLL_GET;
b291f0003   Nick Piggin   mlock: mlocked pa...
1495
  	if (write)
58fa879e1   Hugh Dickins   mm: FOLL flags fo...
1496
  		flags |= FOLL_WRITE;
b291f0003   Nick Piggin   mlock: mlocked pa...
1497
  	if (force)
58fa879e1   Hugh Dickins   mm: FOLL flags fo...
1498
  		flags |= FOLL_FORCE;
b291f0003   Nick Piggin   mlock: mlocked pa...
1499

9d73777e5   Peter Zijlstra   clarify get_user_...
1500
  	return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas);
b291f0003   Nick Piggin   mlock: mlocked pa...
1501
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1502
  EXPORT_SYMBOL(get_user_pages);
f3e8fccd0   Hugh Dickins   mm: add get_dump_...
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
  /**
   * get_dump_page() - pin user page in memory while writing it to core dump
   * @addr: user address
   *
   * Returns struct page pointer of user page pinned for dump,
   * to be freed afterwards by page_cache_release() or put_page().
   *
   * Returns NULL on any kind of failure - a hole must then be inserted into
   * the corefile, to preserve alignment with its headers; and also returns
   * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
   * allowing a hole to be left in the corefile to save diskspace.
   *
   * Called without mmap_sem, but after all other threads have been killed.
   */
  #ifdef CONFIG_ELF_CORE
  struct page *get_dump_page(unsigned long addr)
  {
  	struct vm_area_struct *vma;
  	struct page *page;
  
  	if (__get_user_pages(current, current->mm, addr, 1,
58fa879e1   Hugh Dickins   mm: FOLL flags fo...
1524
  			FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma) < 1)
f3e8fccd0   Hugh Dickins   mm: add get_dump_...
1525
  		return NULL;
f3e8fccd0   Hugh Dickins   mm: add get_dump_...
1526
1527
1528
1529
  	flush_cache_page(vma, addr, page_to_pfn(page));
  	return page;
  }
  #endif /* CONFIG_ELF_CORE */
920c7a5d0   Harvey Harrison   mm: remove fastca...
1530
1531
  pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
  			spinlock_t **ptl)
c9cfcddfd   Linus Torvalds   VM: add common he...
1532
1533
1534
1535
  {
  	pgd_t * pgd = pgd_offset(mm, addr);
  	pud_t * pud = pud_alloc(mm, pgd, addr);
  	if (pud) {
49c91fb01   Trond Myklebust   [PATCH] VM: Fix t...
1536
  		pmd_t * pmd = pmd_alloc(mm, pud, addr);
c9cfcddfd   Linus Torvalds   VM: add common he...
1537
1538
1539
1540
1541
  		if (pmd)
  			return pte_alloc_map_lock(mm, pmd, addr, ptl);
  	}
  	return NULL;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1542
  /*
238f58d89   Linus Torvalds   Support strange d...
1543
1544
1545
1546
1547
1548
   * This is the old fallback for page remapping.
   *
   * For historical reasons, it only allows reserved pages. Only
   * old drivers should use this, and they needed to mark their
   * pages reserved for the old functions anyway.
   */
423bad600   Nick Piggin   mm: add vm_insert...
1549
1550
  static int insert_page(struct vm_area_struct *vma, unsigned long addr,
  			struct page *page, pgprot_t prot)
238f58d89   Linus Torvalds   Support strange d...
1551
  {
423bad600   Nick Piggin   mm: add vm_insert...
1552
  	struct mm_struct *mm = vma->vm_mm;
238f58d89   Linus Torvalds   Support strange d...
1553
  	int retval;
c9cfcddfd   Linus Torvalds   VM: add common he...
1554
  	pte_t *pte;
8a9f3ccd2   Balbir Singh   Memory controller...
1555
  	spinlock_t *ptl;
238f58d89   Linus Torvalds   Support strange d...
1556
  	retval = -EINVAL;
a145dd411   Linus Torvalds   VM: add "vm_inser...
1557
  	if (PageAnon(page))
5b4e655e9   KAMEZAWA Hiroyuki   memcg: avoid acco...
1558
  		goto out;
238f58d89   Linus Torvalds   Support strange d...
1559
1560
  	retval = -ENOMEM;
  	flush_dcache_page(page);
c9cfcddfd   Linus Torvalds   VM: add common he...
1561
  	pte = get_locked_pte(mm, addr, &ptl);
238f58d89   Linus Torvalds   Support strange d...
1562
  	if (!pte)
5b4e655e9   KAMEZAWA Hiroyuki   memcg: avoid acco...
1563
  		goto out;
238f58d89   Linus Torvalds   Support strange d...
1564
1565
1566
1567
1568
1569
  	retval = -EBUSY;
  	if (!pte_none(*pte))
  		goto out_unlock;
  
  	/* Ok, finally just insert the thing.. */
  	get_page(page);
34e55232e   KAMEZAWA Hiroyuki   mm: avoid false s...
1570
  	inc_mm_counter_fast(mm, MM_FILEPAGES);
238f58d89   Linus Torvalds   Support strange d...
1571
1572
1573
1574
  	page_add_file_rmap(page);
  	set_pte_at(mm, addr, pte, mk_pte(page, prot));
  
  	retval = 0;
8a9f3ccd2   Balbir Singh   Memory controller...
1575
1576
  	pte_unmap_unlock(pte, ptl);
  	return retval;
238f58d89   Linus Torvalds   Support strange d...
1577
1578
1579
1580
1581
  out_unlock:
  	pte_unmap_unlock(pte, ptl);
  out:
  	return retval;
  }
bfa5bf6d6   Rolf Eike Beer   [PATCH] Add kerne...
1582
1583
1584
1585
1586
1587
  /**
   * vm_insert_page - insert single page into user vma
   * @vma: user vma to map to
   * @addr: target user address of this page
   * @page: source kernel page
   *
a145dd411   Linus Torvalds   VM: add "vm_inser...
1588
1589
1590
1591
1592
1593
   * This allows drivers to insert individual pages they've allocated
   * into a user vma.
   *
   * The page has to be a nice clean _individual_ kernel allocation.
   * If you allocate a compound page, you need to have marked it as
   * such (__GFP_COMP), or manually just split the page up yourself
8dfcc9ba2   Nick Piggin   [PATCH] mm: split...
1594
   * (see split_page()).
a145dd411   Linus Torvalds   VM: add "vm_inser...
1595
1596
1597
1598
1599
1600
1601
1602
1603
   *
   * NOTE! Traditionally this was done with "remap_pfn_range()" which
   * took an arbitrary page protection parameter. This doesn't allow
   * that. Your vma protection will have to be set up correctly, which
   * means that if you want a shared writable mapping, you'd better
   * ask for a shared writable mapping!
   *
   * The page does not need to be reserved.
   */
423bad600   Nick Piggin   mm: add vm_insert...
1604
1605
  int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
  			struct page *page)
a145dd411   Linus Torvalds   VM: add "vm_inser...
1606
1607
1608
1609
1610
  {
  	if (addr < vma->vm_start || addr >= vma->vm_end)
  		return -EFAULT;
  	if (!page_count(page))
  		return -EINVAL;
4d7672b46   Linus Torvalds   Make sure we copy...
1611
  	vma->vm_flags |= VM_INSERTPAGE;
423bad600   Nick Piggin   mm: add vm_insert...
1612
  	return insert_page(vma, addr, page, vma->vm_page_prot);
a145dd411   Linus Torvalds   VM: add "vm_inser...
1613
  }
e3c3374fb   Linus Torvalds   Make vm_insert_pa...
1614
  EXPORT_SYMBOL(vm_insert_page);
a145dd411   Linus Torvalds   VM: add "vm_inser...
1615

423bad600   Nick Piggin   mm: add vm_insert...
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
  static int insert_pfn(struct vm_area_struct *vma, unsigned long addr,
  			unsigned long pfn, pgprot_t prot)
  {
  	struct mm_struct *mm = vma->vm_mm;
  	int retval;
  	pte_t *pte, entry;
  	spinlock_t *ptl;
  
  	retval = -ENOMEM;
  	pte = get_locked_pte(mm, addr, &ptl);
  	if (!pte)
  		goto out;
  	retval = -EBUSY;
  	if (!pte_none(*pte))
  		goto out_unlock;
  
  	/* Ok, finally just insert the thing.. */
  	entry = pte_mkspecial(pfn_pte(pfn, prot));
  	set_pte_at(mm, addr, pte, entry);
4b3073e1c   Russell King   MM: Pass a PTE po...
1635
  	update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
423bad600   Nick Piggin   mm: add vm_insert...
1636
1637
1638
1639
1640
1641
1642
  
  	retval = 0;
  out_unlock:
  	pte_unmap_unlock(pte, ptl);
  out:
  	return retval;
  }
e0dc0d8f4   Nick Piggin   [PATCH] add vm_in...
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
  /**
   * vm_insert_pfn - insert single pfn into user vma
   * @vma: user vma to map to
   * @addr: target user address of this page
   * @pfn: source kernel pfn
   *
   * Similar to vm_inert_page, this allows drivers to insert individual pages
   * they've allocated into a user vma. Same comments apply.
   *
   * This function should only be called from a vm_ops->fault handler, and
   * in that case the handler should return NULL.
0d71d10a4   Nick Piggin   mm: remove nopfn
1654
1655
1656
1657
1658
   *
   * vma cannot be a COW mapping.
   *
   * As this is called only for pages that do not currently exist, we
   * do not need to flush old virtual caches or the TLB.
e0dc0d8f4   Nick Piggin   [PATCH] add vm_in...
1659
1660
   */
  int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
423bad600   Nick Piggin   mm: add vm_insert...
1661
  			unsigned long pfn)
e0dc0d8f4   Nick Piggin   [PATCH] add vm_in...
1662
  {
2ab640379   venkatesh.pallipadi@intel.com   x86: PAT: hooks i...
1663
  	int ret;
e4b866ed1   venkatesh.pallipadi@intel.com   x86 PAT: change t...
1664
  	pgprot_t pgprot = vma->vm_page_prot;
7e675137a   Nick Piggin   mm: introduce pte...
1665
1666
1667
1668
1669
1670
  	/*
  	 * Technically, architectures with pte_special can avoid all these
  	 * restrictions (same for remap_pfn_range).  However we would like
  	 * consistency in testing and feature parity among all, so we should
  	 * try to keep these invariants in place for everybody.
  	 */
b379d7901   Jared Hulbert   mm: introduce VM_...
1671
1672
1673
1674
1675
  	BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
  	BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
  						(VM_PFNMAP|VM_MIXEDMAP));
  	BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
  	BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
e0dc0d8f4   Nick Piggin   [PATCH] add vm_in...
1676

423bad600   Nick Piggin   mm: add vm_insert...
1677
1678
  	if (addr < vma->vm_start || addr >= vma->vm_end)
  		return -EFAULT;
e4b866ed1   venkatesh.pallipadi@intel.com   x86 PAT: change t...
1679
  	if (track_pfn_vma_new(vma, &pgprot, pfn, PAGE_SIZE))
2ab640379   venkatesh.pallipadi@intel.com   x86: PAT: hooks i...
1680
  		return -EINVAL;
e4b866ed1   venkatesh.pallipadi@intel.com   x86 PAT: change t...
1681
  	ret = insert_pfn(vma, addr, pfn, pgprot);
2ab640379   venkatesh.pallipadi@intel.com   x86: PAT: hooks i...
1682
1683
1684
1685
1686
  
  	if (ret)
  		untrack_pfn_vma(vma, pfn, PAGE_SIZE);
  
  	return ret;
423bad600   Nick Piggin   mm: add vm_insert...
1687
1688
  }
  EXPORT_SYMBOL(vm_insert_pfn);
e0dc0d8f4   Nick Piggin   [PATCH] add vm_in...
1689

423bad600   Nick Piggin   mm: add vm_insert...
1690
1691
1692
1693
  int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
  			unsigned long pfn)
  {
  	BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
e0dc0d8f4   Nick Piggin   [PATCH] add vm_in...
1694

423bad600   Nick Piggin   mm: add vm_insert...
1695
1696
  	if (addr < vma->vm_start || addr >= vma->vm_end)
  		return -EFAULT;
e0dc0d8f4   Nick Piggin   [PATCH] add vm_in...
1697

423bad600   Nick Piggin   mm: add vm_insert...
1698
1699
1700
1701
  	/*
  	 * If we don't have pte special, then we have to use the pfn_valid()
  	 * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
  	 * refcount the page if pfn_valid is true (hence insert_page rather
62eede62d   Hugh Dickins   mm: ZERO_PAGE wit...
1702
1703
  	 * than insert_pfn).  If a zero_pfn were inserted into a VM_MIXEDMAP
  	 * without pte special, it would there be refcounted as a normal page.
423bad600   Nick Piggin   mm: add vm_insert...
1704
1705
1706
1707
1708
1709
1710
1711
  	 */
  	if (!HAVE_PTE_SPECIAL && pfn_valid(pfn)) {
  		struct page *page;
  
  		page = pfn_to_page(pfn);
  		return insert_page(vma, addr, page, vma->vm_page_prot);
  	}
  	return insert_pfn(vma, addr, pfn, vma->vm_page_prot);
e0dc0d8f4   Nick Piggin   [PATCH] add vm_in...
1712
  }
423bad600   Nick Piggin   mm: add vm_insert...
1713
  EXPORT_SYMBOL(vm_insert_mixed);
e0dc0d8f4   Nick Piggin   [PATCH] add vm_in...
1714

a145dd411   Linus Torvalds   VM: add "vm_inser...
1715
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1716
1717
1718
1719
1720
1721
1722
1723
1724
   * maps a range of physical memory into the requested pages. the old
   * mappings are removed. any references to nonexistent pages results
   * in null mappings (currently treated as "copy-on-access")
   */
  static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
  			unsigned long addr, unsigned long end,
  			unsigned long pfn, pgprot_t prot)
  {
  	pte_t *pte;
c74df32c7   Hugh Dickins   [PATCH] mm: ptd_a...
1725
  	spinlock_t *ptl;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1726

c74df32c7   Hugh Dickins   [PATCH] mm: ptd_a...
1727
  	pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1728
1729
  	if (!pte)
  		return -ENOMEM;
6606c3e0d   Zachary Amsden   [PATCH] paravirt:...
1730
  	arch_enter_lazy_mmu_mode();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1731
1732
  	do {
  		BUG_ON(!pte_none(*pte));
7e675137a   Nick Piggin   mm: introduce pte...
1733
  		set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1734
1735
  		pfn++;
  	} while (pte++, addr += PAGE_SIZE, addr != end);
6606c3e0d   Zachary Amsden   [PATCH] paravirt:...
1736
  	arch_leave_lazy_mmu_mode();
c74df32c7   Hugh Dickins   [PATCH] mm: ptd_a...
1737
  	pte_unmap_unlock(pte - 1, ptl);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
  	return 0;
  }
  
  static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
  			unsigned long addr, unsigned long end,
  			unsigned long pfn, pgprot_t prot)
  {
  	pmd_t *pmd;
  	unsigned long next;
  
  	pfn -= addr >> PAGE_SHIFT;
  	pmd = pmd_alloc(mm, pud, addr);
  	if (!pmd)
  		return -ENOMEM;
  	do {
  		next = pmd_addr_end(addr, end);
  		if (remap_pte_range(mm, pmd, addr, next,
  				pfn + (addr >> PAGE_SHIFT), prot))
  			return -ENOMEM;
  	} while (pmd++, addr = next, addr != end);
  	return 0;
  }
  
  static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd,
  			unsigned long addr, unsigned long end,
  			unsigned long pfn, pgprot_t prot)
  {
  	pud_t *pud;
  	unsigned long next;
  
  	pfn -= addr >> PAGE_SHIFT;
  	pud = pud_alloc(mm, pgd, addr);
  	if (!pud)
  		return -ENOMEM;
  	do {
  		next = pud_addr_end(addr, end);
  		if (remap_pmd_range(mm, pud, addr, next,
  				pfn + (addr >> PAGE_SHIFT), prot))
  			return -ENOMEM;
  	} while (pud++, addr = next, addr != end);
  	return 0;
  }
bfa5bf6d6   Rolf Eike Beer   [PATCH] Add kerne...
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
  /**
   * remap_pfn_range - remap kernel memory to userspace
   * @vma: user vma to map to
   * @addr: target user address to start at
   * @pfn: physical address of kernel memory
   * @size: size of map area
   * @prot: page protection flags for this mapping
   *
   *  Note: this is only safe if the mm semaphore is held when called.
   */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1790
1791
1792
1793
1794
  int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
  		    unsigned long pfn, unsigned long size, pgprot_t prot)
  {
  	pgd_t *pgd;
  	unsigned long next;
2d15cab85   Hugh Dickins   [PATCH] mm: fix r...
1795
  	unsigned long end = addr + PAGE_ALIGN(size);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1796
1797
1798
1799
1800
1801
1802
1803
  	struct mm_struct *mm = vma->vm_mm;
  	int err;
  
  	/*
  	 * Physically remapped pages are special. Tell the
  	 * rest of the world about it:
  	 *   VM_IO tells people not to look at these pages
  	 *	(accesses can have side effects).
0b14c179a   Hugh Dickins   [PATCH] unpaged: ...
1804
1805
1806
1807
1808
  	 *   VM_RESERVED is specified all over the place, because
  	 *	in 2.4 it kept swapout's vma scan off this vma; but
  	 *	in 2.6 the LRU scan won't even find its pages, so this
  	 *	flag means no more than count its pages in reserved_vm,
  	 * 	and omit it from core dump, even when VM_IO turned off.
6aab341e0   Linus Torvalds   mm: re-architect ...
1809
1810
1811
  	 *   VM_PFNMAP tells the core MM that the base pages are just
  	 *	raw PFN mappings, and do not have a "struct page" associated
  	 *	with them.
fb155c161   Linus Torvalds   Allow arbitrary s...
1812
1813
1814
1815
  	 *
  	 * There's a horrible special case to handle copy-on-write
  	 * behaviour that some programs depend on. We mark the "original"
  	 * un-COW'ed pages by matching them up with "vma->vm_pgoff".
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1816
  	 */
4bb9c5c02   Pallipadi, Venkatesh   VM, x86, PAT: Cha...
1817
  	if (addr == vma->vm_start && end == vma->vm_end) {
fb155c161   Linus Torvalds   Allow arbitrary s...
1818
  		vma->vm_pgoff = pfn;
895791dac   Pallipadi, Venkatesh   VM, x86, PAT: add...
1819
  		vma->vm_flags |= VM_PFN_AT_MMAP;
4bb9c5c02   Pallipadi, Venkatesh   VM, x86, PAT: Cha...
1820
  	} else if (is_cow_mapping(vma->vm_flags))
3c8bb73ac   venkatesh.pallipadi@intel.com   x86: PAT: store v...
1821
  		return -EINVAL;
fb155c161   Linus Torvalds   Allow arbitrary s...
1822

6aab341e0   Linus Torvalds   mm: re-architect ...
1823
  	vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1824

e4b866ed1   venkatesh.pallipadi@intel.com   x86 PAT: change t...
1825
  	err = track_pfn_vma_new(vma, &prot, pfn, PAGE_ALIGN(size));
a36706131   venkatesh.pallipadi@intel.com   x86 PAT: remove P...
1826
1827
1828
1829
1830
1831
  	if (err) {
  		/*
  		 * To indicate that track_pfn related cleanup is not
  		 * needed from higher level routine calling unmap_vmas
  		 */
  		vma->vm_flags &= ~(VM_IO | VM_RESERVED | VM_PFNMAP);
895791dac   Pallipadi, Venkatesh   VM, x86, PAT: add...
1832
  		vma->vm_flags &= ~VM_PFN_AT_MMAP;
2ab640379   venkatesh.pallipadi@intel.com   x86: PAT: hooks i...
1833
  		return -EINVAL;
a36706131   venkatesh.pallipadi@intel.com   x86 PAT: remove P...
1834
  	}
2ab640379   venkatesh.pallipadi@intel.com   x86: PAT: hooks i...
1835

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1836
1837
1838
1839
  	BUG_ON(addr >= end);
  	pfn -= addr >> PAGE_SHIFT;
  	pgd = pgd_offset(mm, addr);
  	flush_cache_range(vma, addr, end);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1840
1841
1842
1843
1844
1845
1846
  	do {
  		next = pgd_addr_end(addr, end);
  		err = remap_pud_range(mm, pgd, addr, next,
  				pfn + (addr >> PAGE_SHIFT), prot);
  		if (err)
  			break;
  	} while (pgd++, addr = next, addr != end);
2ab640379   venkatesh.pallipadi@intel.com   x86: PAT: hooks i...
1847
1848
1849
  
  	if (err)
  		untrack_pfn_vma(vma, pfn, PAGE_ALIGN(size));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1850
1851
1852
  	return err;
  }
  EXPORT_SYMBOL(remap_pfn_range);
aee16b3ce   Jeremy Fitzhardinge   Add apply_to_page...
1853
1854
1855
1856
1857
1858
  static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
  				     unsigned long addr, unsigned long end,
  				     pte_fn_t fn, void *data)
  {
  	pte_t *pte;
  	int err;
2f569afd9   Martin Schwidefsky   CONFIG_HIGHPTE vs...
1859
  	pgtable_t token;
949099148   Borislav Petkov   Add unitialized_v...
1860
  	spinlock_t *uninitialized_var(ptl);
aee16b3ce   Jeremy Fitzhardinge   Add apply_to_page...
1861
1862
1863
1864
1865
1866
1867
1868
  
  	pte = (mm == &init_mm) ?
  		pte_alloc_kernel(pmd, addr) :
  		pte_alloc_map_lock(mm, pmd, addr, &ptl);
  	if (!pte)
  		return -ENOMEM;
  
  	BUG_ON(pmd_huge(*pmd));
38e0edb15   Jeremy Fitzhardinge   mm/apply_to_range...
1869
  	arch_enter_lazy_mmu_mode();
2f569afd9   Martin Schwidefsky   CONFIG_HIGHPTE vs...
1870
  	token = pmd_pgtable(*pmd);
aee16b3ce   Jeremy Fitzhardinge   Add apply_to_page...
1871
1872
  
  	do {
c36987e2e   Daisuke Nishimura   mm: don't call pt...
1873
  		err = fn(pte++, token, addr, data);
aee16b3ce   Jeremy Fitzhardinge   Add apply_to_page...
1874
1875
  		if (err)
  			break;
c36987e2e   Daisuke Nishimura   mm: don't call pt...
1876
  	} while (addr += PAGE_SIZE, addr != end);
aee16b3ce   Jeremy Fitzhardinge   Add apply_to_page...
1877

38e0edb15   Jeremy Fitzhardinge   mm/apply_to_range...
1878
  	arch_leave_lazy_mmu_mode();
aee16b3ce   Jeremy Fitzhardinge   Add apply_to_page...
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
  	if (mm != &init_mm)
  		pte_unmap_unlock(pte-1, ptl);
  	return err;
  }
  
  static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
  				     unsigned long addr, unsigned long end,
  				     pte_fn_t fn, void *data)
  {
  	pmd_t *pmd;
  	unsigned long next;
  	int err;
ceb868796   Andi Kleen   hugetlb: introduc...
1891
  	BUG_ON(pud_huge(*pud));
aee16b3ce   Jeremy Fitzhardinge   Add apply_to_page...
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
  	pmd = pmd_alloc(mm, pud, addr);
  	if (!pmd)
  		return -ENOMEM;
  	do {
  		next = pmd_addr_end(addr, end);
  		err = apply_to_pte_range(mm, pmd, addr, next, fn, data);
  		if (err)
  			break;
  	} while (pmd++, addr = next, addr != end);
  	return err;
  }
  
  static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
  				     unsigned long addr, unsigned long end,
  				     pte_fn_t fn, void *data)
  {
  	pud_t *pud;
  	unsigned long next;
  	int err;
  
  	pud = pud_alloc(mm, pgd, addr);
  	if (!pud)
  		return -ENOMEM;
  	do {
  		next = pud_addr_end(addr, end);
  		err = apply_to_pmd_range(mm, pud, addr, next, fn, data);
  		if (err)
  			break;
  	} while (pud++, addr = next, addr != end);
  	return err;
  }
  
  /*
   * Scan a region of virtual memory, filling in page tables as necessary
   * and calling a provided function on each leaf page table.
   */
  int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
  			unsigned long size, pte_fn_t fn, void *data)
  {
  	pgd_t *pgd;
  	unsigned long next;
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
1933
  	unsigned long start = addr, end = addr + size;
aee16b3ce   Jeremy Fitzhardinge   Add apply_to_page...
1934
1935
1936
  	int err;
  
  	BUG_ON(addr >= end);
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
1937
  	mmu_notifier_invalidate_range_start(mm, start, end);
aee16b3ce   Jeremy Fitzhardinge   Add apply_to_page...
1938
1939
1940
1941
1942
1943
1944
  	pgd = pgd_offset(mm, addr);
  	do {
  		next = pgd_addr_end(addr, end);
  		err = apply_to_pud_range(mm, pgd, addr, next, fn, data);
  		if (err)
  			break;
  	} while (pgd++, addr = next, addr != end);
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
1945
  	mmu_notifier_invalidate_range_end(mm, start, end);
aee16b3ce   Jeremy Fitzhardinge   Add apply_to_page...
1946
1947
1948
  	return err;
  }
  EXPORT_SYMBOL_GPL(apply_to_page_range);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1949
  /*
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
1950
1951
1952
1953
1954
1955
1956
1957
   * handle_pte_fault chooses page fault handler according to an entry
   * which was read non-atomically.  Before making any commitment, on
   * those architectures or configurations (e.g. i386 with PAE) which
   * might give a mix of unmatched parts, do_swap_page and do_file_page
   * must check under lock before unmapping the pte and proceeding
   * (but do_wp_page is only called after already making such a check;
   * and do_anonymous_page and do_no_page can safely check later on).
   */
4c21e2f24   Hugh Dickins   [PATCH] mm: split...
1958
  static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
1959
1960
1961
1962
1963
  				pte_t *page_table, pte_t orig_pte)
  {
  	int same = 1;
  #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
  	if (sizeof(pte_t) > sizeof(unsigned long)) {
4c21e2f24   Hugh Dickins   [PATCH] mm: split...
1964
1965
  		spinlock_t *ptl = pte_lockptr(mm, pmd);
  		spin_lock(ptl);
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
1966
  		same = pte_same(*page_table, orig_pte);
4c21e2f24   Hugh Dickins   [PATCH] mm: split...
1967
  		spin_unlock(ptl);
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
1968
1969
1970
1971
1972
1973
1974
  	}
  #endif
  	pte_unmap(page_table);
  	return same;
  }
  
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
   * Do pte_mkwrite, but only if the vma says VM_WRITE.  We do this when
   * servicing faults for write access.  In the normal case, do always want
   * pte_mkwrite.  But get_user_pages can cause write faults for mappings
   * that do not have writing enabled, when used by access_process_vm.
   */
  static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
  {
  	if (likely(vma->vm_flags & VM_WRITE))
  		pte = pte_mkwrite(pte);
  	return pte;
  }
9de455b20   Atsushi Nemoto   [PATCH] Pass vma ...
1986
  static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma)
6aab341e0   Linus Torvalds   mm: re-architect ...
1987
1988
1989
1990
1991
1992
1993
1994
1995
  {
  	/*
  	 * If the source page was a PFN mapping, we don't have
  	 * a "struct page" for it. We do a best-effort copy by
  	 * just copying from the original user address. If that
  	 * fails, we just zero-fill it. Live with it.
  	 */
  	if (unlikely(!src)) {
  		void *kaddr = kmap_atomic(dst, KM_USER0);
5d2a2dbbc   Linus Torvalds   cow_user_page: fi...
1996
1997
1998
1999
2000
2001
2002
2003
2004
  		void __user *uaddr = (void __user *)(va & PAGE_MASK);
  
  		/*
  		 * This really shouldn't fail, because the page is there
  		 * in the page tables. But it might just be unreadable,
  		 * in which case we just give up and fill the result with
  		 * zeroes.
  		 */
  		if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE))
6aab341e0   Linus Torvalds   mm: re-architect ...
2005
2006
  			memset(kaddr, 0, PAGE_SIZE);
  		kunmap_atomic(kaddr, KM_USER0);
c4ec7b0de   Dmitriy Monakhov   [PATCH] mm: D-cac...
2007
  		flush_dcache_page(dst);
0ed361dec   Nick Piggin   mm: fix PageUptod...
2008
2009
  	} else
  		copy_user_highpage(dst, src, va, vma);
6aab341e0   Linus Torvalds   mm: re-architect ...
2010
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2011
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2012
2013
2014
2015
   * This routine handles present pages, when users try to write
   * to a shared page. It is done by copying the page to a new address
   * and decrementing the shared-page counter for the old page.
   *
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2016
2017
2018
2019
2020
2021
2022
2023
2024
   * Note that this routine assumes that the protection checks have been
   * done by the caller (the low-level page fault routine in most cases).
   * Thus we can safely just mark it writable once we've done any necessary
   * COW.
   *
   * We also mark the page dirty at this point even though the page will
   * change only once the write actually happens. This avoids a few races,
   * and potentially makes it more efficient.
   *
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2025
2026
2027
   * We enter with non-exclusive mmap_sem (to exclude vma changes,
   * but allow concurrent faults), with pte both mapped and locked.
   * We return with mmap_sem still held, but pte unmapped and unlocked.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2028
   */
65500d234   Hugh Dickins   [PATCH] mm: page ...
2029
2030
  static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
  		unsigned long address, pte_t *page_table, pmd_t *pmd,
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2031
  		spinlock_t *ptl, pte_t orig_pte)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2032
  {
e5bbe4dfc   Hugh Dickins   [PATCH] pfnmap: r...
2033
  	struct page *old_page, *new_page;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2034
  	pte_t entry;
83c54070e   Nick Piggin   mm: fault feedbac...
2035
  	int reuse = 0, ret = 0;
a200ee182   Peter Zijlstra   mm: set_page_dirt...
2036
  	int page_mkwrite = 0;
d08b3851d   Peter Zijlstra   [PATCH] mm: track...
2037
  	struct page *dirty_page = NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2038

6aab341e0   Linus Torvalds   mm: re-architect ...
2039
  	old_page = vm_normal_page(vma, address, orig_pte);
251b97f55   Peter Zijlstra   mm: dirty page ac...
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
  	if (!old_page) {
  		/*
  		 * VM_MIXEDMAP !pfn_valid() case
  		 *
  		 * We should not cow pages in a shared writeable mapping.
  		 * Just mark the pages writable as we can't do any dirty
  		 * accounting on raw pfn maps.
  		 */
  		if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
  				     (VM_WRITE|VM_SHARED))
  			goto reuse;
6aab341e0   Linus Torvalds   mm: re-architect ...
2051
  		goto gotten;
251b97f55   Peter Zijlstra   mm: dirty page ac...
2052
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2053

d08b3851d   Peter Zijlstra   [PATCH] mm: track...
2054
  	/*
ee6a64578   Peter Zijlstra   [PATCH] mm: fixup...
2055
2056
  	 * Take out anonymous pages first, anonymous shared vmas are
  	 * not dirty accountable.
d08b3851d   Peter Zijlstra   [PATCH] mm: track...
2057
  	 */
9a8408951   Hugh Dickins   ksm: identify Pag...
2058
  	if (PageAnon(old_page) && !PageKsm(old_page)) {
ab967d860   Hugh Dickins   mm: wp lock page ...
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
  		if (!trylock_page(old_page)) {
  			page_cache_get(old_page);
  			pte_unmap_unlock(page_table, ptl);
  			lock_page(old_page);
  			page_table = pte_offset_map_lock(mm, pmd, address,
  							 &ptl);
  			if (!pte_same(*page_table, orig_pte)) {
  				unlock_page(old_page);
  				page_cache_release(old_page);
  				goto unlock;
  			}
  			page_cache_release(old_page);
ee6a64578   Peter Zijlstra   [PATCH] mm: fixup...
2071
  		}
7b1fe5979   Hugh Dickins   mm: reuse_swap_pa...
2072
  		reuse = reuse_swap_page(old_page);
c44b67432   Rik van Riel   rmap: move exclus...
2073
2074
2075
2076
2077
2078
2079
  		if (reuse)
  			/*
  			 * The page is all ours.  Move it to our anon_vma so
  			 * the rmap code will not search our parent or siblings.
  			 * Protected against the rmap code by the page lock.
  			 */
  			page_move_anon_rmap(old_page, vma, address);
ab967d860   Hugh Dickins   mm: wp lock page ...
2080
  		unlock_page(old_page);
ee6a64578   Peter Zijlstra   [PATCH] mm: fixup...
2081
  	} else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
d08b3851d   Peter Zijlstra   [PATCH] mm: track...
2082
  					(VM_WRITE|VM_SHARED))) {
ee6a64578   Peter Zijlstra   [PATCH] mm: fixup...
2083
2084
2085
2086
2087
  		/*
  		 * Only catch write-faults on shared writable pages,
  		 * read-only shared pages can get COWed by
  		 * get_user_pages(.write=1, .force=1).
  		 */
9637a5efd   David Howells   [PATCH] add page_...
2088
  		if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
c2ec175c3   Nick Piggin   mm: page_mkwrite ...
2089
2090
2091
2092
2093
2094
2095
2096
  			struct vm_fault vmf;
  			int tmp;
  
  			vmf.virtual_address = (void __user *)(address &
  								PAGE_MASK);
  			vmf.pgoff = old_page->index;
  			vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
  			vmf.page = old_page;
9637a5efd   David Howells   [PATCH] add page_...
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
  			/*
  			 * Notify the address space that the page is about to
  			 * become writable so that it can prohibit this or wait
  			 * for the page to get into an appropriate state.
  			 *
  			 * We do this without the lock held, so that it can
  			 * sleep if it needs to.
  			 */
  			page_cache_get(old_page);
  			pte_unmap_unlock(page_table, ptl);
c2ec175c3   Nick Piggin   mm: page_mkwrite ...
2107
2108
2109
2110
  			tmp = vma->vm_ops->page_mkwrite(vma, &vmf);
  			if (unlikely(tmp &
  					(VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
  				ret = tmp;
9637a5efd   David Howells   [PATCH] add page_...
2111
  				goto unwritable_page;
c2ec175c3   Nick Piggin   mm: page_mkwrite ...
2112
  			}
b827e496c   Nick Piggin   mm: close page_mk...
2113
2114
2115
2116
2117
2118
2119
2120
2121
  			if (unlikely(!(tmp & VM_FAULT_LOCKED))) {
  				lock_page(old_page);
  				if (!old_page->mapping) {
  					ret = 0; /* retry the fault */
  					unlock_page(old_page);
  					goto unwritable_page;
  				}
  			} else
  				VM_BUG_ON(!PageLocked(old_page));
9637a5efd   David Howells   [PATCH] add page_...
2122

9637a5efd   David Howells   [PATCH] add page_...
2123
2124
2125
2126
2127
2128
2129
2130
  			/*
  			 * Since we dropped the lock we need to revalidate
  			 * the PTE as someone else may have changed it.  If
  			 * they did, we just return, as we can count on the
  			 * MMU to tell us if they didn't also make it writable.
  			 */
  			page_table = pte_offset_map_lock(mm, pmd, address,
  							 &ptl);
b827e496c   Nick Piggin   mm: close page_mk...
2131
2132
2133
  			if (!pte_same(*page_table, orig_pte)) {
  				unlock_page(old_page);
  				page_cache_release(old_page);
9637a5efd   David Howells   [PATCH] add page_...
2134
  				goto unlock;
b827e496c   Nick Piggin   mm: close page_mk...
2135
  			}
a200ee182   Peter Zijlstra   mm: set_page_dirt...
2136
2137
  
  			page_mkwrite = 1;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2138
  		}
d08b3851d   Peter Zijlstra   [PATCH] mm: track...
2139
2140
  		dirty_page = old_page;
  		get_page(dirty_page);
9637a5efd   David Howells   [PATCH] add page_...
2141
  		reuse = 1;
9637a5efd   David Howells   [PATCH] add page_...
2142
2143
2144
  	}
  
  	if (reuse) {
251b97f55   Peter Zijlstra   mm: dirty page ac...
2145
  reuse:
9637a5efd   David Howells   [PATCH] add page_...
2146
2147
2148
  		flush_cache_page(vma, address, pte_pfn(orig_pte));
  		entry = pte_mkyoung(orig_pte);
  		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
954ffcb35   KAMEZAWA Hiroyuki   flush icache befo...
2149
  		if (ptep_set_access_flags(vma, address, page_table, entry,1))
4b3073e1c   Russell King   MM: Pass a PTE po...
2150
  			update_mmu_cache(vma, address, page_table);
9637a5efd   David Howells   [PATCH] add page_...
2151
2152
  		ret |= VM_FAULT_WRITE;
  		goto unlock;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2153
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2154
2155
2156
2157
  
  	/*
  	 * Ok, we need to copy. Oh, well..
  	 */
b5810039a   Nick Piggin   [PATCH] core remo...
2158
  	page_cache_get(old_page);
920fc356f   Hugh Dickins   [PATCH] unpaged: ...
2159
  gotten:
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2160
  	pte_unmap_unlock(page_table, ptl);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2161
2162
  
  	if (unlikely(anon_vma_prepare(vma)))
65500d234   Hugh Dickins   [PATCH] mm: page ...
2163
  		goto oom;
a13ea5b75   Hugh Dickins   mm: reinstate ZER...
2164

62eede62d   Hugh Dickins   mm: ZERO_PAGE wit...
2165
  	if (is_zero_pfn(pte_pfn(orig_pte))) {
a13ea5b75   Hugh Dickins   mm: reinstate ZER...
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
  		new_page = alloc_zeroed_user_highpage_movable(vma, address);
  		if (!new_page)
  			goto oom;
  	} else {
  		new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
  		if (!new_page)
  			goto oom;
  		cow_user_page(new_page, old_page, address, vma);
  	}
  	__SetPageUptodate(new_page);
b291f0003   Nick Piggin   mlock: mlocked pa...
2176
2177
2178
2179
  	/*
  	 * Don't let another task, with possibly unlocked vma,
  	 * keep the mlocked page.
  	 */
ab92661d5   Carsten Otte   do_wp_page: fix r...
2180
  	if ((vma->vm_flags & VM_LOCKED) && old_page) {
b291f0003   Nick Piggin   mlock: mlocked pa...
2181
2182
2183
2184
  		lock_page(old_page);	/* for LRU manipulation */
  		clear_page_mlock(old_page);
  		unlock_page(old_page);
  	}
65500d234   Hugh Dickins   [PATCH] mm: page ...
2185

2c26fdd70   KAMEZAWA Hiroyuki   memcg: revert gfp...
2186
  	if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))
8a9f3ccd2   Balbir Singh   Memory controller...
2187
  		goto oom_free_new;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2188
2189
2190
  	/*
  	 * Re-check the pte - we dropped the lock
  	 */
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2191
  	page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
65500d234   Hugh Dickins   [PATCH] mm: page ...
2192
  	if (likely(pte_same(*page_table, orig_pte))) {
920fc356f   Hugh Dickins   [PATCH] unpaged: ...
2193
  		if (old_page) {
920fc356f   Hugh Dickins   [PATCH] unpaged: ...
2194
  			if (!PageAnon(old_page)) {
34e55232e   KAMEZAWA Hiroyuki   mm: avoid false s...
2195
2196
  				dec_mm_counter_fast(mm, MM_FILEPAGES);
  				inc_mm_counter_fast(mm, MM_ANONPAGES);
920fc356f   Hugh Dickins   [PATCH] unpaged: ...
2197
2198
  			}
  		} else
34e55232e   KAMEZAWA Hiroyuki   mm: avoid false s...
2199
  			inc_mm_counter_fast(mm, MM_ANONPAGES);
eca351336   Ben Collins   [PATCH] Fix missi...
2200
  		flush_cache_page(vma, address, pte_pfn(orig_pte));
65500d234   Hugh Dickins   [PATCH] mm: page ...
2201
2202
  		entry = mk_pte(new_page, vma->vm_page_prot);
  		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
4ce072f1f   Siddha, Suresh B   [PATCH] mm: fix a...
2203
2204
2205
2206
2207
2208
  		/*
  		 * Clear the pte entry and flush it first, before updating the
  		 * pte with the new entry. This will avoid a race condition
  		 * seen in the presence of one thread doing SMC and another
  		 * thread doing COW.
  		 */
828502d30   Izik Eidus   ksm: add mmu_noti...
2209
  		ptep_clear_flush(vma, address, page_table);
9617d95e6   Nick Piggin   [PATCH] mm: rmap ...
2210
  		page_add_new_anon_rmap(new_page, vma, address);
828502d30   Izik Eidus   ksm: add mmu_noti...
2211
2212
2213
2214
2215
2216
  		/*
  		 * We call the notify macro here because, when using secondary
  		 * mmu page tables (such as kvm shadow page tables), we want the
  		 * new page to be mapped directly into the secondary page table.
  		 */
  		set_pte_at_notify(mm, address, page_table, entry);
4b3073e1c   Russell King   MM: Pass a PTE po...
2217
  		update_mmu_cache(vma, address, page_table);
945754a17   Nick Piggin   mm: fix race in C...
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
  		if (old_page) {
  			/*
  			 * Only after switching the pte to the new page may
  			 * we remove the mapcount here. Otherwise another
  			 * process may come and find the rmap count decremented
  			 * before the pte is switched to the new page, and
  			 * "reuse" the old page writing into it while our pte
  			 * here still points into it and can be read by other
  			 * threads.
  			 *
  			 * The critical issue is to order this
  			 * page_remove_rmap with the ptp_clear_flush above.
  			 * Those stores are ordered by (if nothing else,)
  			 * the barrier present in the atomic_add_negative
  			 * in page_remove_rmap.
  			 *
  			 * Then the TLB flush in ptep_clear_flush ensures that
  			 * no process can access the old page before the
  			 * decremented mapcount is visible. And the old page
  			 * cannot be reused until after the decremented
  			 * mapcount is visible. So transitively, TLBs to
  			 * old page will be flushed before it can be reused.
  			 */
edc315fd2   Hugh Dickins   badpage: remove v...
2241
  			page_remove_rmap(old_page);
945754a17   Nick Piggin   mm: fix race in C...
2242
  		}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2243
2244
  		/* Free the old page.. */
  		new_page = old_page;
f33ea7f40   Nick Piggin   [PATCH] fix get_u...
2245
  		ret |= VM_FAULT_WRITE;
8a9f3ccd2   Balbir Singh   Memory controller...
2246
2247
  	} else
  		mem_cgroup_uncharge_page(new_page);
920fc356f   Hugh Dickins   [PATCH] unpaged: ...
2248
2249
2250
2251
  	if (new_page)
  		page_cache_release(new_page);
  	if (old_page)
  		page_cache_release(old_page);
65500d234   Hugh Dickins   [PATCH] mm: page ...
2252
  unlock:
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2253
  	pte_unmap_unlock(page_table, ptl);
d08b3851d   Peter Zijlstra   [PATCH] mm: track...
2254
  	if (dirty_page) {
79352894b   Nick Piggin   mm: fix clear_pag...
2255
2256
2257
2258
2259
2260
2261
2262
  		/*
  		 * Yes, Virginia, this is actually required to prevent a race
  		 * with clear_page_dirty_for_io() from clearing the page dirty
  		 * bit after it clear all dirty ptes, but before a racing
  		 * do_wp_page installs a dirty pte.
  		 *
  		 * do_no_page is protected similarly.
  		 */
b827e496c   Nick Piggin   mm: close page_mk...
2263
2264
2265
2266
  		if (!page_mkwrite) {
  			wait_on_page_locked(dirty_page);
  			set_page_dirty_balance(dirty_page, page_mkwrite);
  		}
d08b3851d   Peter Zijlstra   [PATCH] mm: track...
2267
  		put_page(dirty_page);
b827e496c   Nick Piggin   mm: close page_mk...
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
  		if (page_mkwrite) {
  			struct address_space *mapping = dirty_page->mapping;
  
  			set_page_dirty(dirty_page);
  			unlock_page(dirty_page);
  			page_cache_release(dirty_page);
  			if (mapping)	{
  				/*
  				 * Some device drivers do not set page.mapping
  				 * but still dirty their pages
  				 */
  				balance_dirty_pages_ratelimited(mapping);
  			}
  		}
  
  		/* file_update_time outside page_lock */
  		if (vma->vm_file)
  			file_update_time(vma->vm_file);
d08b3851d   Peter Zijlstra   [PATCH] mm: track...
2286
  	}
f33ea7f40   Nick Piggin   [PATCH] fix get_u...
2287
  	return ret;
8a9f3ccd2   Balbir Singh   Memory controller...
2288
  oom_free_new:
6dbf6d3bb   Hugh Dickins   memcg: page_cache...
2289
  	page_cache_release(new_page);
65500d234   Hugh Dickins   [PATCH] mm: page ...
2290
  oom:
b827e496c   Nick Piggin   mm: close page_mk...
2291
2292
2293
2294
2295
  	if (old_page) {
  		if (page_mkwrite) {
  			unlock_page(old_page);
  			page_cache_release(old_page);
  		}
920fc356f   Hugh Dickins   [PATCH] unpaged: ...
2296
  		page_cache_release(old_page);
b827e496c   Nick Piggin   mm: close page_mk...
2297
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2298
  	return VM_FAULT_OOM;
9637a5efd   David Howells   [PATCH] add page_...
2299
2300
2301
  
  unwritable_page:
  	page_cache_release(old_page);
c2ec175c3   Nick Piggin   mm: page_mkwrite ...
2302
  	return ret;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
  }
  
  /*
   * Helper functions for unmap_mapping_range().
   *
   * __ Notes on dropping i_mmap_lock to reduce latency while unmapping __
   *
   * We have to restart searching the prio_tree whenever we drop the lock,
   * since the iterator is only valid while the lock is held, and anyway
   * a later vma might be split and reinserted earlier while lock dropped.
   *
   * The list of nonlinear vmas could be handled more efficiently, using
   * a placeholder, but handle it in the same way until a need is shown.
   * It is important to search the prio_tree before nonlinear list: a vma
   * may become nonlinear and be shifted from prio_tree to nonlinear list
   * while the lock is dropped; but never shifted from list to prio_tree.
   *
   * In order to make forward progress despite restarting the search,
   * vm_truncate_count is used to mark a vma as now dealt with, so we can
   * quickly skip it next time around.  Since the prio_tree search only
   * shows us those vmas affected by unmapping the range in question, we
   * can't efficiently keep all vmas in step with mapping->truncate_count:
   * so instead reset them all whenever it wraps back to 0 (then go to 1).
   * mapping->truncate_count and vma->vm_truncate_count are protected by
   * i_mmap_lock.
   *
   * In order to make forward progress despite repeatedly restarting some
ee39b37b2   Hugh Dickins   [PATCH] freepgt: ...
2330
   * large vma, note the restart_addr from unmap_vmas when it breaks out:
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
   * and restart from that address when we reach that vma again.  It might
   * have been split or merged, shrunk or extended, but never shifted: so
   * restart_addr remains valid so long as it remains in the vma's range.
   * unmap_mapping_range forces truncate_count to leap over page-aligned
   * values so we can save vma's restart_addr in its truncate_count field.
   */
  #define is_restart_addr(truncate_count) (!((truncate_count) & ~PAGE_MASK))
  
  static void reset_vma_truncate_counts(struct address_space *mapping)
  {
  	struct vm_area_struct *vma;
  	struct prio_tree_iter iter;
  
  	vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, 0, ULONG_MAX)
  		vma->vm_truncate_count = 0;
  	list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)
  		vma->vm_truncate_count = 0;
  }
  
  static int unmap_mapping_range_vma(struct vm_area_struct *vma,
  		unsigned long start_addr, unsigned long end_addr,
  		struct zap_details *details)
  {
  	unsigned long restart_addr;
  	int need_break;
d00806b18   Nick Piggin   mm: fix fault vs ...
2356
2357
  	/*
  	 * files that support invalidating or truncating portions of the
d0217ac04   Nick Piggin   mm: fault feedbac...
2358
  	 * file from under mmaped areas must have their ->fault function
83c54070e   Nick Piggin   mm: fault feedbac...
2359
2360
  	 * return a locked page (and set VM_FAULT_LOCKED in the return).
  	 * This provides synchronisation against concurrent unmapping here.
d00806b18   Nick Piggin   mm: fix fault vs ...
2361
  	 */
d00806b18   Nick Piggin   mm: fix fault vs ...
2362

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
  again:
  	restart_addr = vma->vm_truncate_count;
  	if (is_restart_addr(restart_addr) && start_addr < restart_addr) {
  		start_addr = restart_addr;
  		if (start_addr >= end_addr) {
  			/* Top of vma has been split off since last time */
  			vma->vm_truncate_count = details->truncate_count;
  			return 0;
  		}
  	}
ee39b37b2   Hugh Dickins   [PATCH] freepgt: ...
2373
2374
  	restart_addr = zap_page_range(vma, start_addr,
  					end_addr - start_addr, details);
95c354fe9   Nick Piggin   spinlock: lockbre...
2375
  	need_break = need_resched() || spin_needbreak(details->i_mmap_lock);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2376

ee39b37b2   Hugh Dickins   [PATCH] freepgt: ...
2377
  	if (restart_addr >= end_addr) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2378
2379
2380
2381
2382
2383
  		/* We have now completed this vma: mark it so */
  		vma->vm_truncate_count = details->truncate_count;
  		if (!need_break)
  			return 0;
  	} else {
  		/* Note restart_addr in vma's truncate_count field */
ee39b37b2   Hugh Dickins   [PATCH] freepgt: ...
2384
  		vma->vm_truncate_count = restart_addr;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
  		if (!need_break)
  			goto again;
  	}
  
  	spin_unlock(details->i_mmap_lock);
  	cond_resched();
  	spin_lock(details->i_mmap_lock);
  	return -EINTR;
  }
  
  static inline void unmap_mapping_range_tree(struct prio_tree_root *root,
  					    struct zap_details *details)
  {
  	struct vm_area_struct *vma;
  	struct prio_tree_iter iter;
  	pgoff_t vba, vea, zba, zea;
  
  restart:
  	vma_prio_tree_foreach(vma, &iter, root,
  			details->first_index, details->last_index) {
  		/* Skip quickly over those we have already dealt with */
  		if (vma->vm_truncate_count == details->truncate_count)
  			continue;
  
  		vba = vma->vm_pgoff;
  		vea = vba + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) - 1;
  		/* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */
  		zba = details->first_index;
  		if (zba < vba)
  			zba = vba;
  		zea = details->last_index;
  		if (zea > vea)
  			zea = vea;
  
  		if (unmap_mapping_range_vma(vma,
  			((zba - vba) << PAGE_SHIFT) + vma->vm_start,
  			((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
  				details) < 0)
  			goto restart;
  	}
  }
  
  static inline void unmap_mapping_range_list(struct list_head *head,
  					    struct zap_details *details)
  {
  	struct vm_area_struct *vma;
  
  	/*
  	 * In nonlinear VMAs there is no correspondence between virtual address
  	 * offset and file offset.  So we must perform an exhaustive search
  	 * across *all* the pages in each nonlinear VMA, not just the pages
  	 * whose virtual address lies outside the file truncation point.
  	 */
  restart:
  	list_for_each_entry(vma, head, shared.vm_set.list) {
  		/* Skip quickly over those we have already dealt with */
  		if (vma->vm_truncate_count == details->truncate_count)
  			continue;
  		details->nonlinear_vma = vma;
  		if (unmap_mapping_range_vma(vma, vma->vm_start,
  					vma->vm_end, details) < 0)
  			goto restart;
  	}
  }
  
  /**
72fd4a35a   Robert P. J. Day   [PATCH] Numerous ...
2451
   * unmap_mapping_range - unmap the portion of all mmaps in the specified address_space corresponding to the specified page range in the underlying file.
3d41088fa   Martin Waitz   [PATCH] DocBook: ...
2452
   * @mapping: the address space containing mmaps to be unmapped.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2453
2454
   * @holebegin: byte in first page to unmap, relative to the start of
   * the underlying file.  This will be rounded down to a PAGE_SIZE
25d9e2d15   npiggin@suse.de   truncate: new hel...
2455
   * boundary.  Note that this is different from truncate_pagecache(), which
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
   * must keep the partial page.  In contrast, we must get rid of
   * partial pages.
   * @holelen: size of prospective hole in bytes.  This will be rounded
   * up to a PAGE_SIZE boundary.  A holelen of zero truncates to the
   * end of the file.
   * @even_cows: 1 when truncating a file, unmap even private COWed pages;
   * but 0 when invalidating pagecache, don't throw away private data.
   */
  void unmap_mapping_range(struct address_space *mapping,
  		loff_t const holebegin, loff_t const holelen, int even_cows)
  {
  	struct zap_details details;
  	pgoff_t hba = holebegin >> PAGE_SHIFT;
  	pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
  
  	/* Check for overflow. */
  	if (sizeof(holelen) > sizeof(hlen)) {
  		long long holeend =
  			(holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
  		if (holeend & ~(long long)ULONG_MAX)
  			hlen = ULONG_MAX - hba + 1;
  	}
  
  	details.check_mapping = even_cows? NULL: mapping;
  	details.nonlinear_vma = NULL;
  	details.first_index = hba;
  	details.last_index = hba + hlen - 1;
  	if (details.last_index < details.first_index)
  		details.last_index = ULONG_MAX;
  	details.i_mmap_lock = &mapping->i_mmap_lock;
  
  	spin_lock(&mapping->i_mmap_lock);
d00806b18   Nick Piggin   mm: fix fault vs ...
2488
  	/* Protect against endless unmapping loops */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2489
  	mapping->truncate_count++;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
  	if (unlikely(is_restart_addr(mapping->truncate_count))) {
  		if (mapping->truncate_count == 0)
  			reset_vma_truncate_counts(mapping);
  		mapping->truncate_count++;
  	}
  	details.truncate_count = mapping->truncate_count;
  
  	if (unlikely(!prio_tree_empty(&mapping->i_mmap)))
  		unmap_mapping_range_tree(&mapping->i_mmap, &details);
  	if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
  		unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
  	spin_unlock(&mapping->i_mmap_lock);
  }
  EXPORT_SYMBOL(unmap_mapping_range);
f6b3ec238   Badari Pulavarty   [PATCH] madvise(M...
2504
2505
2506
2507
2508
2509
2510
2511
2512
  int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end)
  {
  	struct address_space *mapping = inode->i_mapping;
  
  	/*
  	 * If the underlying filesystem is not going to provide
  	 * a way to truncate a range of blocks (punch a hole) -
  	 * we should return failure right now.
  	 */
acfa4380e   Al Viro   inode->i_op is ne...
2513
  	if (!inode->i_op->truncate_range)
f6b3ec238   Badari Pulavarty   [PATCH] madvise(M...
2514
  		return -ENOSYS;
1b1dcc1b5   Jes Sorensen   [PATCH] mutex sub...
2515
  	mutex_lock(&inode->i_mutex);
f6b3ec238   Badari Pulavarty   [PATCH] madvise(M...
2516
2517
2518
  	down_write(&inode->i_alloc_sem);
  	unmap_mapping_range(mapping, offset, (end - offset), 1);
  	truncate_inode_pages_range(mapping, offset, end);
d00806b18   Nick Piggin   mm: fix fault vs ...
2519
  	unmap_mapping_range(mapping, offset, (end - offset), 1);
f6b3ec238   Badari Pulavarty   [PATCH] madvise(M...
2520
2521
  	inode->i_op->truncate_range(inode, offset, end);
  	up_write(&inode->i_alloc_sem);
1b1dcc1b5   Jes Sorensen   [PATCH] mutex sub...
2522
  	mutex_unlock(&inode->i_mutex);
f6b3ec238   Badari Pulavarty   [PATCH] madvise(M...
2523
2524
2525
  
  	return 0;
  }
f6b3ec238   Badari Pulavarty   [PATCH] madvise(M...
2526

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2527
  /*
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2528
2529
2530
   * We enter with non-exclusive mmap_sem (to exclude vma changes,
   * but allow concurrent faults), and pte mapped but not yet locked.
   * We return with mmap_sem still held, but pte unmapped and unlocked.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2531
   */
65500d234   Hugh Dickins   [PATCH] mm: page ...
2532
2533
  static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
  		unsigned long address, pte_t *page_table, pmd_t *pmd,
30c9f3a9f   Linus Torvalds   Remove internal u...
2534
  		unsigned int flags, pte_t orig_pte)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2535
  {
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2536
  	spinlock_t *ptl;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2537
  	struct page *page;
65500d234   Hugh Dickins   [PATCH] mm: page ...
2538
  	swp_entry_t entry;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2539
  	pte_t pte;
7a81b88cb   KAMEZAWA Hiroyuki   memcg: introduce ...
2540
  	struct mem_cgroup *ptr = NULL;
83c54070e   Nick Piggin   mm: fault feedbac...
2541
  	int ret = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2542

4c21e2f24   Hugh Dickins   [PATCH] mm: split...
2543
  	if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2544
  		goto out;
65500d234   Hugh Dickins   [PATCH] mm: page ...
2545
2546
  
  	entry = pte_to_swp_entry(orig_pte);
d1737fdbe   Andi Kleen   HWPOISON: Add bas...
2547
2548
2549
2550
2551
2552
2553
  	if (unlikely(non_swap_entry(entry))) {
  		if (is_migration_entry(entry)) {
  			migration_entry_wait(mm, pmd, address);
  		} else if (is_hwpoison_entry(entry)) {
  			ret = VM_FAULT_HWPOISON;
  		} else {
  			print_bad_pte(vma, address, orig_pte, NULL);
d99be1a8e   Hugh Dickins   mm: sigbus instea...
2554
  			ret = VM_FAULT_SIGBUS;
d1737fdbe   Andi Kleen   HWPOISON: Add bas...
2555
  		}
0697212a4   Christoph Lameter   [PATCH] Swapless ...
2556
2557
  		goto out;
  	}
0ff922452   Shailabh Nagar   [PATCH] per-task-...
2558
  	delayacct_set_flag(DELAYACCT_PF_SWAPIN);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2559
2560
  	page = lookup_swap_cache(entry);
  	if (!page) {
a5c9b696e   Hugh Dickins   mm: pass mm to gr...
2561
  		grab_swap_token(mm); /* Contend for token _before_ read-in */
02098feaa   Hugh Dickins   swapin needs gfp_...
2562
2563
  		page = swapin_readahead(entry,
  					GFP_HIGHUSER_MOVABLE, vma, address);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2564
2565
  		if (!page) {
  			/*
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2566
2567
  			 * Back out if somebody else faulted in this pte
  			 * while we released the pte lock.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2568
  			 */
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2569
  			page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2570
2571
  			if (likely(pte_same(*page_table, orig_pte)))
  				ret = VM_FAULT_OOM;
0ff922452   Shailabh Nagar   [PATCH] per-task-...
2572
  			delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
65500d234   Hugh Dickins   [PATCH] mm: page ...
2573
  			goto unlock;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2574
2575
2576
2577
  		}
  
  		/* Had to read the page from swap area: Major fault */
  		ret = VM_FAULT_MAJOR;
f8891e5e1   Christoph Lameter   [PATCH] Light wei...
2578
  		count_vm_event(PGMAJFAULT);
d1737fdbe   Andi Kleen   HWPOISON: Add bas...
2579
  	} else if (PageHWPoison(page)) {
71f72525d   Wu Fengguang   HWPOISON: comment...
2580
2581
2582
2583
  		/*
  		 * hwpoisoned dirty swapcache pages are kept for killing
  		 * owner processes (which may be unknown at hwpoison time)
  		 */
d1737fdbe   Andi Kleen   HWPOISON: Add bas...
2584
2585
  		ret = VM_FAULT_HWPOISON;
  		delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
4779cb31c   Andi Kleen   HWPOISON: Fix pag...
2586
  		goto out_release;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2587
  	}
073e587ec   KAMEZAWA Hiroyuki   memcg: move charg...
2588
2589
  	lock_page(page);
  	delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
5ad646880   Hugh Dickins   ksm: let shared p...
2590
2591
2592
2593
2594
  	page = ksm_might_need_to_copy(page, vma, address);
  	if (!page) {
  		ret = VM_FAULT_OOM;
  		goto out;
  	}
2c26fdd70   KAMEZAWA Hiroyuki   memcg: revert gfp...
2595
  	if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) {
8a9f3ccd2   Balbir Singh   Memory controller...
2596
  		ret = VM_FAULT_OOM;
bc43f75cd   Johannes Weiner   mm: fix pageref l...
2597
  		goto out_page;
8a9f3ccd2   Balbir Singh   Memory controller...
2598
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2599
  	/*
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2600
  	 * Back out if somebody else already faulted in this pte.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2601
  	 */
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2602
  	page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
9e9bef07c   Hugh Dickins   [PATCH] mm: do_sw...
2603
  	if (unlikely(!pte_same(*page_table, orig_pte)))
b81074800   Kirill Korotaev   [PATCH] do_swap_p...
2604
  		goto out_nomap;
b81074800   Kirill Korotaev   [PATCH] do_swap_p...
2605
2606
2607
2608
  
  	if (unlikely(!PageUptodate(page))) {
  		ret = VM_FAULT_SIGBUS;
  		goto out_nomap;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2609
  	}
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
2610
2611
2612
2613
2614
2615
2616
2617
  	/*
  	 * The page isn't present yet, go ahead with the fault.
  	 *
  	 * Be careful about the sequence of operations here.
  	 * To get its accounting right, reuse_swap_page() must be called
  	 * while the page is counted on swap but not yet in mapcount i.e.
  	 * before page_add_anon_rmap() and swap_free(); try_to_free_swap()
  	 * must be called after the swap_free(), or it will never succeed.
03f3c4336   KAMEZAWA Hiroyuki   memcg: fix swap a...
2618
2619
2620
2621
  	 * Because delete_from_swap_page() may be called by reuse_swap_page(),
  	 * mem_cgroup_commit_charge_swapin() may not be able to find swp_entry
  	 * in page->private. In this case, a record in swap_cgroup  is silently
  	 * discarded at swap_free().
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
2622
  	 */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2623

34e55232e   KAMEZAWA Hiroyuki   mm: avoid false s...
2624
  	inc_mm_counter_fast(mm, MM_ANONPAGES);
b084d4353   KAMEZAWA Hiroyuki   mm: count swap usage
2625
  	dec_mm_counter_fast(mm, MM_SWAPENTS);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2626
  	pte = mk_pte(page, vma->vm_page_prot);
30c9f3a9f   Linus Torvalds   Remove internal u...
2627
  	if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page)) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2628
  		pte = maybe_mkwrite(pte_mkdirty(pte), vma);
30c9f3a9f   Linus Torvalds   Remove internal u...
2629
  		flags &= ~FAULT_FLAG_WRITE;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2630
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2631
2632
2633
  	flush_icache_page(vma, page);
  	set_pte_at(mm, address, page_table, pte);
  	page_add_anon_rmap(page, vma, address);
03f3c4336   KAMEZAWA Hiroyuki   memcg: fix swap a...
2634
2635
  	/* It's better to call commit-charge after rmap is established */
  	mem_cgroup_commit_charge_swapin(page, ptr);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2636

c475a8ab6   Hugh Dickins   [PATCH] can_share...
2637
  	swap_free(entry);
b291f0003   Nick Piggin   mlock: mlocked pa...
2638
  	if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
a2c43eed8   Hugh Dickins   mm: try_to_free_s...
2639
  		try_to_free_swap(page);
c475a8ab6   Hugh Dickins   [PATCH] can_share...
2640
  	unlock_page(page);
30c9f3a9f   Linus Torvalds   Remove internal u...
2641
  	if (flags & FAULT_FLAG_WRITE) {
61469f1d5   Hugh Dickins   memcg: when do_sw...
2642
2643
2644
  		ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte);
  		if (ret & VM_FAULT_ERROR)
  			ret &= VM_FAULT_ERROR;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2645
2646
2647
2648
  		goto out;
  	}
  
  	/* No need to invalidate - it was non-present before */
4b3073e1c   Russell King   MM: Pass a PTE po...
2649
  	update_mmu_cache(vma, address, page_table);
65500d234   Hugh Dickins   [PATCH] mm: page ...
2650
  unlock:
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2651
  	pte_unmap_unlock(page_table, ptl);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2652
2653
  out:
  	return ret;
b81074800   Kirill Korotaev   [PATCH] do_swap_p...
2654
  out_nomap:
7a81b88cb   KAMEZAWA Hiroyuki   memcg: introduce ...
2655
  	mem_cgroup_cancel_charge_swapin(ptr);
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2656
  	pte_unmap_unlock(page_table, ptl);
bc43f75cd   Johannes Weiner   mm: fix pageref l...
2657
  out_page:
b81074800   Kirill Korotaev   [PATCH] do_swap_p...
2658
  	unlock_page(page);
4779cb31c   Andi Kleen   HWPOISON: Fix pag...
2659
  out_release:
b81074800   Kirill Korotaev   [PATCH] do_swap_p...
2660
  	page_cache_release(page);
65500d234   Hugh Dickins   [PATCH] mm: page ...
2661
  	return ret;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2662
2663
2664
  }
  
  /*
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2665
2666
2667
   * We enter with non-exclusive mmap_sem (to exclude vma changes,
   * but allow concurrent faults), and pte mapped but not yet locked.
   * We return with mmap_sem still held, but pte unmapped and unlocked.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2668
   */
65500d234   Hugh Dickins   [PATCH] mm: page ...
2669
2670
  static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
  		unsigned long address, pte_t *page_table, pmd_t *pmd,
30c9f3a9f   Linus Torvalds   Remove internal u...
2671
  		unsigned int flags)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2672
  {
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2673
2674
  	struct page *page;
  	spinlock_t *ptl;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2675
  	pte_t entry;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2676

62eede62d   Hugh Dickins   mm: ZERO_PAGE wit...
2677
2678
2679
  	if (!(flags & FAULT_FLAG_WRITE)) {
  		entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
  						vma->vm_page_prot));
a13ea5b75   Hugh Dickins   mm: reinstate ZER...
2680
2681
2682
2683
2684
2685
  		ptl = pte_lockptr(mm, pmd);
  		spin_lock(ptl);
  		if (!pte_none(*page_table))
  			goto unlock;
  		goto setpte;
  	}
557ed1fa2   Nick Piggin   remove ZERO_PAGE
2686
2687
  	/* Allocate our own private page. */
  	pte_unmap(page_table);
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2688

557ed1fa2   Nick Piggin   remove ZERO_PAGE
2689
2690
2691
2692
2693
  	if (unlikely(anon_vma_prepare(vma)))
  		goto oom;
  	page = alloc_zeroed_user_highpage_movable(vma, address);
  	if (!page)
  		goto oom;
0ed361dec   Nick Piggin   mm: fix PageUptod...
2694
  	__SetPageUptodate(page);
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2695

2c26fdd70   KAMEZAWA Hiroyuki   memcg: revert gfp...
2696
  	if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))
8a9f3ccd2   Balbir Singh   Memory controller...
2697
  		goto oom_free_page;
557ed1fa2   Nick Piggin   remove ZERO_PAGE
2698
  	entry = mk_pte(page, vma->vm_page_prot);
1ac0cb5d0   Hugh Dickins   mm: fix anonymous...
2699
2700
  	if (vma->vm_flags & VM_WRITE)
  		entry = pte_mkwrite(pte_mkdirty(entry));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2701

557ed1fa2   Nick Piggin   remove ZERO_PAGE
2702
  	page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
1c2fb7a4c   Andrea Arcangeli   ksm: fix deadlock...
2703
  	if (!pte_none(*page_table))
557ed1fa2   Nick Piggin   remove ZERO_PAGE
2704
  		goto release;
9ba692948   Hugh Dickins   ksm: fix oom dead...
2705

34e55232e   KAMEZAWA Hiroyuki   mm: avoid false s...
2706
  	inc_mm_counter_fast(mm, MM_ANONPAGES);
557ed1fa2   Nick Piggin   remove ZERO_PAGE
2707
  	page_add_new_anon_rmap(page, vma, address);
a13ea5b75   Hugh Dickins   mm: reinstate ZER...
2708
  setpte:
65500d234   Hugh Dickins   [PATCH] mm: page ...
2709
  	set_pte_at(mm, address, page_table, entry);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2710
2711
  
  	/* No need to invalidate - it was non-present before */
4b3073e1c   Russell King   MM: Pass a PTE po...
2712
  	update_mmu_cache(vma, address, page_table);
65500d234   Hugh Dickins   [PATCH] mm: page ...
2713
  unlock:
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2714
  	pte_unmap_unlock(page_table, ptl);
83c54070e   Nick Piggin   mm: fault feedbac...
2715
  	return 0;
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2716
  release:
8a9f3ccd2   Balbir Singh   Memory controller...
2717
  	mem_cgroup_uncharge_page(page);
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2718
2719
  	page_cache_release(page);
  	goto unlock;
8a9f3ccd2   Balbir Singh   Memory controller...
2720
  oom_free_page:
6dbf6d3bb   Hugh Dickins   memcg: page_cache...
2721
  	page_cache_release(page);
65500d234   Hugh Dickins   [PATCH] mm: page ...
2722
  oom:
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2723
2724
2725
2726
  	return VM_FAULT_OOM;
  }
  
  /*
54cb8821d   Nick Piggin   mm: merge populat...
2727
   * __do_fault() tries to create a new page mapping. It aggressively
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2728
   * tries to share with existing pages, but makes a separate copy if
54cb8821d   Nick Piggin   mm: merge populat...
2729
2730
   * the FAULT_FLAG_WRITE is set in the flags parameter in order to avoid
   * the next page fault.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2731
2732
2733
2734
   *
   * As this is called only for pages that do not currently exist, we
   * do not need to flush old virtual caches or the TLB.
   *
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2735
   * We enter with non-exclusive mmap_sem (to exclude vma changes,
16abfa086   Hugh Dickins   Fix sys_remap_fil...
2736
   * but allow concurrent faults), and pte neither mapped nor locked.
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2737
   * We return with mmap_sem still held, but pte unmapped and unlocked.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2738
   */
54cb8821d   Nick Piggin   mm: merge populat...
2739
  static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
16abfa086   Hugh Dickins   Fix sys_remap_fil...
2740
  		unsigned long address, pmd_t *pmd,
54cb8821d   Nick Piggin   mm: merge populat...
2741
  		pgoff_t pgoff, unsigned int flags, pte_t orig_pte)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2742
  {
16abfa086   Hugh Dickins   Fix sys_remap_fil...
2743
  	pte_t *page_table;
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2744
  	spinlock_t *ptl;
d0217ac04   Nick Piggin   mm: fault feedbac...
2745
  	struct page *page;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2746
  	pte_t entry;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2747
  	int anon = 0;
5b4e655e9   KAMEZAWA Hiroyuki   memcg: avoid acco...
2748
  	int charged = 0;
d08b3851d   Peter Zijlstra   [PATCH] mm: track...
2749
  	struct page *dirty_page = NULL;
d0217ac04   Nick Piggin   mm: fault feedbac...
2750
2751
  	struct vm_fault vmf;
  	int ret;
a200ee182   Peter Zijlstra   mm: set_page_dirt...
2752
  	int page_mkwrite = 0;
54cb8821d   Nick Piggin   mm: merge populat...
2753

d0217ac04   Nick Piggin   mm: fault feedbac...
2754
2755
2756
2757
  	vmf.virtual_address = (void __user *)(address & PAGE_MASK);
  	vmf.pgoff = pgoff;
  	vmf.flags = flags;
  	vmf.page = NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2758

3c18ddd16   Nick Piggin   mm: remove nopage
2759
2760
2761
  	ret = vma->vm_ops->fault(vma, &vmf);
  	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
  		return ret;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2762

a3b947eac   Andi Kleen   HWPOISON: Add poi...
2763
2764
2765
2766
2767
  	if (unlikely(PageHWPoison(vmf.page))) {
  		if (ret & VM_FAULT_LOCKED)
  			unlock_page(vmf.page);
  		return VM_FAULT_HWPOISON;
  	}
d00806b18   Nick Piggin   mm: fix fault vs ...
2768
  	/*
d0217ac04   Nick Piggin   mm: fault feedbac...
2769
  	 * For consistency in subsequent calls, make the faulted page always
d00806b18   Nick Piggin   mm: fix fault vs ...
2770
2771
  	 * locked.
  	 */
83c54070e   Nick Piggin   mm: fault feedbac...
2772
  	if (unlikely(!(ret & VM_FAULT_LOCKED)))
d0217ac04   Nick Piggin   mm: fault feedbac...
2773
  		lock_page(vmf.page);
54cb8821d   Nick Piggin   mm: merge populat...
2774
  	else
d0217ac04   Nick Piggin   mm: fault feedbac...
2775
  		VM_BUG_ON(!PageLocked(vmf.page));
d00806b18   Nick Piggin   mm: fix fault vs ...
2776

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2777
2778
2779
  	/*
  	 * Should we do an early C-O-W break?
  	 */
d0217ac04   Nick Piggin   mm: fault feedbac...
2780
  	page = vmf.page;
54cb8821d   Nick Piggin   mm: merge populat...
2781
  	if (flags & FAULT_FLAG_WRITE) {
9637a5efd   David Howells   [PATCH] add page_...
2782
  		if (!(vma->vm_flags & VM_SHARED)) {
54cb8821d   Nick Piggin   mm: merge populat...
2783
  			anon = 1;
d00806b18   Nick Piggin   mm: fix fault vs ...
2784
  			if (unlikely(anon_vma_prepare(vma))) {
d0217ac04   Nick Piggin   mm: fault feedbac...
2785
  				ret = VM_FAULT_OOM;
54cb8821d   Nick Piggin   mm: merge populat...
2786
  				goto out;
d00806b18   Nick Piggin   mm: fix fault vs ...
2787
  			}
83c54070e   Nick Piggin   mm: fault feedbac...
2788
2789
  			page = alloc_page_vma(GFP_HIGHUSER_MOVABLE,
  						vma, address);
d00806b18   Nick Piggin   mm: fix fault vs ...
2790
  			if (!page) {
d0217ac04   Nick Piggin   mm: fault feedbac...
2791
  				ret = VM_FAULT_OOM;
54cb8821d   Nick Piggin   mm: merge populat...
2792
  				goto out;
d00806b18   Nick Piggin   mm: fix fault vs ...
2793
  			}
2c26fdd70   KAMEZAWA Hiroyuki   memcg: revert gfp...
2794
  			if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) {
5b4e655e9   KAMEZAWA Hiroyuki   memcg: avoid acco...
2795
2796
2797
2798
2799
  				ret = VM_FAULT_OOM;
  				page_cache_release(page);
  				goto out;
  			}
  			charged = 1;
b291f0003   Nick Piggin   mlock: mlocked pa...
2800
2801
2802
2803
2804
2805
  			/*
  			 * Don't let another task, with possibly unlocked vma,
  			 * keep the mlocked page.
  			 */
  			if (vma->vm_flags & VM_LOCKED)
  				clear_page_mlock(vmf.page);
d0217ac04   Nick Piggin   mm: fault feedbac...
2806
  			copy_user_highpage(page, vmf.page, address, vma);
0ed361dec   Nick Piggin   mm: fix PageUptod...
2807
  			__SetPageUptodate(page);
9637a5efd   David Howells   [PATCH] add page_...
2808
  		} else {
54cb8821d   Nick Piggin   mm: merge populat...
2809
2810
  			/*
  			 * If the page will be shareable, see if the backing
9637a5efd   David Howells   [PATCH] add page_...
2811
  			 * address space wants to know that the page is about
54cb8821d   Nick Piggin   mm: merge populat...
2812
2813
  			 * to become writable
  			 */
696761476   Mark Fasheh   ocfs2: release pa...
2814
  			if (vma->vm_ops->page_mkwrite) {
c2ec175c3   Nick Piggin   mm: page_mkwrite ...
2815
  				int tmp;
696761476   Mark Fasheh   ocfs2: release pa...
2816
  				unlock_page(page);
b827e496c   Nick Piggin   mm: close page_mk...
2817
  				vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
c2ec175c3   Nick Piggin   mm: page_mkwrite ...
2818
2819
2820
2821
  				tmp = vma->vm_ops->page_mkwrite(vma, &vmf);
  				if (unlikely(tmp &
  					  (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
  					ret = tmp;
b827e496c   Nick Piggin   mm: close page_mk...
2822
  					goto unwritable_page;
d0217ac04   Nick Piggin   mm: fault feedbac...
2823
  				}
b827e496c   Nick Piggin   mm: close page_mk...
2824
2825
2826
2827
2828
2829
2830
2831
2832
  				if (unlikely(!(tmp & VM_FAULT_LOCKED))) {
  					lock_page(page);
  					if (!page->mapping) {
  						ret = 0; /* retry the fault */
  						unlock_page(page);
  						goto unwritable_page;
  					}
  				} else
  					VM_BUG_ON(!PageLocked(page));
a200ee182   Peter Zijlstra   mm: set_page_dirt...
2833
  				page_mkwrite = 1;
9637a5efd   David Howells   [PATCH] add page_...
2834
2835
  			}
  		}
54cb8821d   Nick Piggin   mm: merge populat...
2836

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2837
  	}
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2838
  	page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2839
2840
2841
2842
2843
2844
  
  	/*
  	 * This silly early PAGE_DIRTY setting removes a race
  	 * due to the bad i386 page protection. But it's valid
  	 * for other architectures too.
  	 *
30c9f3a9f   Linus Torvalds   Remove internal u...
2845
  	 * Note that if FAULT_FLAG_WRITE is set, we either now have
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2846
2847
2848
2849
2850
  	 * an exclusive copy of the page, or this is a shared mapping,
  	 * so we can make it writable and dirty to avoid having to
  	 * handle that later.
  	 */
  	/* Only go through if we didn't race with anybody else... */
1c2fb7a4c   Andrea Arcangeli   ksm: fix deadlock...
2851
  	if (likely(pte_same(*page_table, orig_pte))) {
d00806b18   Nick Piggin   mm: fix fault vs ...
2852
2853
  		flush_icache_page(vma, page);
  		entry = mk_pte(page, vma->vm_page_prot);
54cb8821d   Nick Piggin   mm: merge populat...
2854
  		if (flags & FAULT_FLAG_WRITE)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2855
  			entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2856
  		if (anon) {
34e55232e   KAMEZAWA Hiroyuki   mm: avoid false s...
2857
  			inc_mm_counter_fast(mm, MM_ANONPAGES);
64d6519dd   Lee Schermerhorn   swap: cull unevic...
2858
  			page_add_new_anon_rmap(page, vma, address);
f57e88a8d   Hugh Dickins   [PATCH] unpaged: ...
2859
  		} else {
34e55232e   KAMEZAWA Hiroyuki   mm: avoid false s...
2860
  			inc_mm_counter_fast(mm, MM_FILEPAGES);
d00806b18   Nick Piggin   mm: fix fault vs ...
2861
  			page_add_file_rmap(page);
54cb8821d   Nick Piggin   mm: merge populat...
2862
  			if (flags & FAULT_FLAG_WRITE) {
d00806b18   Nick Piggin   mm: fix fault vs ...
2863
  				dirty_page = page;
d08b3851d   Peter Zijlstra   [PATCH] mm: track...
2864
2865
  				get_page(dirty_page);
  			}
4294621f4   Hugh Dickins   [PATCH] mm: rss =...
2866
  		}
64d6519dd   Lee Schermerhorn   swap: cull unevic...
2867
  		set_pte_at(mm, address, page_table, entry);
d00806b18   Nick Piggin   mm: fix fault vs ...
2868
2869
  
  		/* no need to invalidate: a not-present page won't be cached */
4b3073e1c   Russell King   MM: Pass a PTE po...
2870
  		update_mmu_cache(vma, address, page_table);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2871
  	} else {
5b4e655e9   KAMEZAWA Hiroyuki   memcg: avoid acco...
2872
2873
  		if (charged)
  			mem_cgroup_uncharge_page(page);
d00806b18   Nick Piggin   mm: fix fault vs ...
2874
2875
2876
  		if (anon)
  			page_cache_release(page);
  		else
54cb8821d   Nick Piggin   mm: merge populat...
2877
  			anon = 1; /* no anon but release faulted_page */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2878
  	}
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2879
  	pte_unmap_unlock(page_table, ptl);
d00806b18   Nick Piggin   mm: fix fault vs ...
2880
2881
  
  out:
b827e496c   Nick Piggin   mm: close page_mk...
2882
2883
  	if (dirty_page) {
  		struct address_space *mapping = page->mapping;
8f7b3d156   Anton Salikhmetov   Update ctime and ...
2884

b827e496c   Nick Piggin   mm: close page_mk...
2885
2886
2887
  		if (set_page_dirty(dirty_page))
  			page_mkwrite = 1;
  		unlock_page(dirty_page);
d08b3851d   Peter Zijlstra   [PATCH] mm: track...
2888
  		put_page(dirty_page);
b827e496c   Nick Piggin   mm: close page_mk...
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
  		if (page_mkwrite && mapping) {
  			/*
  			 * Some device drivers do not set page.mapping but still
  			 * dirty their pages
  			 */
  			balance_dirty_pages_ratelimited(mapping);
  		}
  
  		/* file_update_time outside page_lock */
  		if (vma->vm_file)
  			file_update_time(vma->vm_file);
  	} else {
  		unlock_page(vmf.page);
  		if (anon)
  			page_cache_release(vmf.page);
d08b3851d   Peter Zijlstra   [PATCH] mm: track...
2904
  	}
d00806b18   Nick Piggin   mm: fix fault vs ...
2905

83c54070e   Nick Piggin   mm: fault feedbac...
2906
  	return ret;
b827e496c   Nick Piggin   mm: close page_mk...
2907
2908
2909
2910
  
  unwritable_page:
  	page_cache_release(page);
  	return ret;
54cb8821d   Nick Piggin   mm: merge populat...
2911
  }
d00806b18   Nick Piggin   mm: fix fault vs ...
2912

54cb8821d   Nick Piggin   mm: merge populat...
2913
2914
  static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  		unsigned long address, pte_t *page_table, pmd_t *pmd,
30c9f3a9f   Linus Torvalds   Remove internal u...
2915
  		unsigned int flags, pte_t orig_pte)
54cb8821d   Nick Piggin   mm: merge populat...
2916
2917
  {
  	pgoff_t pgoff = (((address & PAGE_MASK)
0da7e01f5   Dean Nelson   calculation of pg...
2918
  			- vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
54cb8821d   Nick Piggin   mm: merge populat...
2919

16abfa086   Hugh Dickins   Fix sys_remap_fil...
2920
2921
  	pte_unmap(page_table);
  	return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
54cb8821d   Nick Piggin   mm: merge populat...
2922
  }
f4b81804a   Jes Sorensen   [PATCH] do_no_pfn()
2923
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2924
2925
2926
   * Fault of a previously existing named mapping. Repopulate the pte
   * from the encoded file_pte if possible. This enables swappable
   * nonlinear vmas.
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2927
2928
2929
2930
   *
   * We enter with non-exclusive mmap_sem (to exclude vma changes,
   * but allow concurrent faults), and pte mapped but not yet locked.
   * We return with mmap_sem still held, but pte unmapped and unlocked.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2931
   */
d0217ac04   Nick Piggin   mm: fault feedbac...
2932
  static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
65500d234   Hugh Dickins   [PATCH] mm: page ...
2933
  		unsigned long address, pte_t *page_table, pmd_t *pmd,
30c9f3a9f   Linus Torvalds   Remove internal u...
2934
  		unsigned int flags, pte_t orig_pte)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2935
  {
65500d234   Hugh Dickins   [PATCH] mm: page ...
2936
  	pgoff_t pgoff;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2937

30c9f3a9f   Linus Torvalds   Remove internal u...
2938
  	flags |= FAULT_FLAG_NONLINEAR;
4c21e2f24   Hugh Dickins   [PATCH] mm: split...
2939
  	if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
83c54070e   Nick Piggin   mm: fault feedbac...
2940
  		return 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2941

2509ef26d   Hugh Dickins   badpage: zap prin...
2942
  	if (unlikely(!(vma->vm_flags & VM_NONLINEAR))) {
65500d234   Hugh Dickins   [PATCH] mm: page ...
2943
2944
2945
  		/*
  		 * Page table corrupted: show pte and kill process.
  		 */
3dc147414   Hugh Dickins   badpage: replace ...
2946
  		print_bad_pte(vma, address, orig_pte, NULL);
d99be1a8e   Hugh Dickins   mm: sigbus instea...
2947
  		return VM_FAULT_SIGBUS;
65500d234   Hugh Dickins   [PATCH] mm: page ...
2948
  	}
65500d234   Hugh Dickins   [PATCH] mm: page ...
2949
2950
  
  	pgoff = pte_to_pgoff(orig_pte);
16abfa086   Hugh Dickins   Fix sys_remap_fil...
2951
  	return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
  }
  
  /*
   * These routines also need to handle stuff like marking pages dirty
   * and/or accessed for architectures that don't do it in hardware (most
   * RISC architectures).  The early dirtying is also good on the i386.
   *
   * There is also a hook called "update_mmu_cache()" that architectures
   * with external mmu caches can use to update those (ie the Sparc or
   * PowerPC hashed page tables that act as extended TLBs).
   *
c74df32c7   Hugh Dickins   [PATCH] mm: ptd_a...
2963
2964
2965
   * We enter with non-exclusive mmap_sem (to exclude vma changes,
   * but allow concurrent faults), and pte mapped but not yet locked.
   * We return with mmap_sem still held, but pte unmapped and unlocked.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2966
2967
   */
  static inline int handle_pte_fault(struct mm_struct *mm,
65500d234   Hugh Dickins   [PATCH] mm: page ...
2968
  		struct vm_area_struct *vma, unsigned long address,
30c9f3a9f   Linus Torvalds   Remove internal u...
2969
  		pte_t *pte, pmd_t *pmd, unsigned int flags)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2970
2971
  {
  	pte_t entry;
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2972
  	spinlock_t *ptl;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2973

8dab5241d   Benjamin Herrenschmidt   Rework ptep_set_a...
2974
  	entry = *pte;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2975
  	if (!pte_present(entry)) {
65500d234   Hugh Dickins   [PATCH] mm: page ...
2976
  		if (pte_none(entry)) {
f4b81804a   Jes Sorensen   [PATCH] do_no_pfn()
2977
  			if (vma->vm_ops) {
3c18ddd16   Nick Piggin   mm: remove nopage
2978
  				if (likely(vma->vm_ops->fault))
54cb8821d   Nick Piggin   mm: merge populat...
2979
  					return do_linear_fault(mm, vma, address,
30c9f3a9f   Linus Torvalds   Remove internal u...
2980
  						pte, pmd, flags, entry);
f4b81804a   Jes Sorensen   [PATCH] do_no_pfn()
2981
2982
  			}
  			return do_anonymous_page(mm, vma, address,
30c9f3a9f   Linus Torvalds   Remove internal u...
2983
  						 pte, pmd, flags);
65500d234   Hugh Dickins   [PATCH] mm: page ...
2984
  		}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2985
  		if (pte_file(entry))
d0217ac04   Nick Piggin   mm: fault feedbac...
2986
  			return do_nonlinear_fault(mm, vma, address,
30c9f3a9f   Linus Torvalds   Remove internal u...
2987
  					pte, pmd, flags, entry);
65500d234   Hugh Dickins   [PATCH] mm: page ...
2988
  		return do_swap_page(mm, vma, address,
30c9f3a9f   Linus Torvalds   Remove internal u...
2989
  					pte, pmd, flags, entry);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2990
  	}
4c21e2f24   Hugh Dickins   [PATCH] mm: split...
2991
  	ptl = pte_lockptr(mm, pmd);
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2992
2993
2994
  	spin_lock(ptl);
  	if (unlikely(!pte_same(*pte, entry)))
  		goto unlock;
30c9f3a9f   Linus Torvalds   Remove internal u...
2995
  	if (flags & FAULT_FLAG_WRITE) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2996
  		if (!pte_write(entry))
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2997
2998
  			return do_wp_page(mm, vma, address,
  					pte, pmd, ptl, entry);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2999
3000
3001
  		entry = pte_mkdirty(entry);
  	}
  	entry = pte_mkyoung(entry);
30c9f3a9f   Linus Torvalds   Remove internal u...
3002
  	if (ptep_set_access_flags(vma, address, pte, entry, flags & FAULT_FLAG_WRITE)) {
4b3073e1c   Russell King   MM: Pass a PTE po...
3003
  		update_mmu_cache(vma, address, pte);
1a44e1490   Andrea Arcangeli   [PATCH] .text pag...
3004
3005
3006
3007
3008
3009
3010
  	} else {
  		/*
  		 * This is needed only for protection faults but the arch code
  		 * is not yet telling us if this is a protection fault or not.
  		 * This still avoids useless tlb flushes for .text page faults
  		 * with threads.
  		 */
30c9f3a9f   Linus Torvalds   Remove internal u...
3011
  		if (flags & FAULT_FLAG_WRITE)
1a44e1490   Andrea Arcangeli   [PATCH] .text pag...
3012
3013
  			flush_tlb_page(vma, address);
  	}
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
3014
3015
  unlock:
  	pte_unmap_unlock(pte, ptl);
83c54070e   Nick Piggin   mm: fault feedbac...
3016
  	return 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3017
3018
3019
3020
3021
  }
  
  /*
   * By the time we get here, we already hold the mm semaphore
   */
83c54070e   Nick Piggin   mm: fault feedbac...
3022
  int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
d06063cc2   Linus Torvalds   Move FAULT_FLAG_x...
3023
  		unsigned long address, unsigned int flags)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3024
3025
3026
3027
3028
3029
3030
  {
  	pgd_t *pgd;
  	pud_t *pud;
  	pmd_t *pmd;
  	pte_t *pte;
  
  	__set_current_state(TASK_RUNNING);
f8891e5e1   Christoph Lameter   [PATCH] Light wei...
3031
  	count_vm_event(PGFAULT);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3032

34e55232e   KAMEZAWA Hiroyuki   mm: avoid false s...
3033
3034
  	/* do counter updates before entering really critical section. */
  	check_sync_rss_stat(current);
ac9b9c667   Hugh Dickins   [PATCH] Fix handl...
3035
  	if (unlikely(is_vm_hugetlb_page(vma)))
30c9f3a9f   Linus Torvalds   Remove internal u...
3036
  		return hugetlb_fault(mm, vma, address, flags);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3037

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3038
  	pgd = pgd_offset(mm, address);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3039
3040
  	pud = pud_alloc(mm, pgd, address);
  	if (!pud)
c74df32c7   Hugh Dickins   [PATCH] mm: ptd_a...
3041
  		return VM_FAULT_OOM;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3042
3043
  	pmd = pmd_alloc(mm, pud, address);
  	if (!pmd)
c74df32c7   Hugh Dickins   [PATCH] mm: ptd_a...
3044
  		return VM_FAULT_OOM;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3045
3046
  	pte = pte_alloc_map(mm, pmd, address);
  	if (!pte)
c74df32c7   Hugh Dickins   [PATCH] mm: ptd_a...
3047
  		return VM_FAULT_OOM;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3048

30c9f3a9f   Linus Torvalds   Remove internal u...
3049
  	return handle_pte_fault(mm, vma, address, pte, pmd, flags);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3050
3051
3052
3053
3054
  }
  
  #ifndef __PAGETABLE_PUD_FOLDED
  /*
   * Allocate page upper directory.
872fec16d   Hugh Dickins   [PATCH] mm: init_...
3055
   * We've already handled the fast-path in-line.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3056
   */
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
3057
  int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3058
  {
c74df32c7   Hugh Dickins   [PATCH] mm: ptd_a...
3059
3060
  	pud_t *new = pud_alloc_one(mm, address);
  	if (!new)
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
3061
  		return -ENOMEM;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3062

362a61ad6   Nick Piggin   fix SMP data race...
3063
  	smp_wmb(); /* See comment in __pte_alloc */
872fec16d   Hugh Dickins   [PATCH] mm: init_...
3064
  	spin_lock(&mm->page_table_lock);
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
3065
  	if (pgd_present(*pgd))		/* Another has populated it */
5e5419734   Benjamin Herrenschmidt   add mm argument t...
3066
  		pud_free(mm, new);
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
3067
3068
  	else
  		pgd_populate(mm, pgd, new);
c74df32c7   Hugh Dickins   [PATCH] mm: ptd_a...
3069
  	spin_unlock(&mm->page_table_lock);
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
3070
  	return 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3071
3072
3073
3074
3075
3076
  }
  #endif /* __PAGETABLE_PUD_FOLDED */
  
  #ifndef __PAGETABLE_PMD_FOLDED
  /*
   * Allocate page middle directory.
872fec16d   Hugh Dickins   [PATCH] mm: init_...
3077
   * We've already handled the fast-path in-line.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3078
   */
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
3079
  int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3080
  {
c74df32c7   Hugh Dickins   [PATCH] mm: ptd_a...
3081
3082
  	pmd_t *new = pmd_alloc_one(mm, address);
  	if (!new)
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
3083
  		return -ENOMEM;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3084

362a61ad6   Nick Piggin   fix SMP data race...
3085
  	smp_wmb(); /* See comment in __pte_alloc */
872fec16d   Hugh Dickins   [PATCH] mm: init_...
3086
  	spin_lock(&mm->page_table_lock);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3087
  #ifndef __ARCH_HAS_4LEVEL_HACK
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
3088
  	if (pud_present(*pud))		/* Another has populated it */
5e5419734   Benjamin Herrenschmidt   add mm argument t...
3089
  		pmd_free(mm, new);
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
3090
3091
  	else
  		pud_populate(mm, pud, new);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3092
  #else
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
3093
  	if (pgd_present(*pud))		/* Another has populated it */
5e5419734   Benjamin Herrenschmidt   add mm argument t...
3094
  		pmd_free(mm, new);
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
3095
3096
  	else
  		pgd_populate(mm, pud, new);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3097
  #endif /* __ARCH_HAS_4LEVEL_HACK */
c74df32c7   Hugh Dickins   [PATCH] mm: ptd_a...
3098
  	spin_unlock(&mm->page_table_lock);
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
3099
  	return 0;
e0f39591c   Alan Stern   [PATCH] Workaroun...
3100
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3101
3102
3103
3104
3105
3106
3107
3108
3109
  #endif /* __PAGETABLE_PMD_FOLDED */
  
  int make_pages_present(unsigned long addr, unsigned long end)
  {
  	int ret, len, write;
  	struct vm_area_struct * vma;
  
  	vma = find_vma(current->mm, addr);
  	if (!vma)
a477097d9   KOSAKI Motohiro   mlock() fix retur...
3110
  		return -ENOMEM;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3111
  	write = (vma->vm_flags & VM_WRITE) != 0;
5bcb28b13   Eric Sesterhenn   BUG_ON() Conversi...
3112
3113
  	BUG_ON(addr >= end);
  	BUG_ON(end > vma->vm_end);
68e116a3b   Rolf Eike Beer   MM: use DIV_ROUND...
3114
  	len = DIV_ROUND_UP(end, PAGE_SIZE) - addr/PAGE_SIZE;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3115
3116
  	ret = get_user_pages(current, current->mm, addr,
  			len, write, 0, NULL, NULL);
c11d69d8c   Lee Schermerhorn   mlock: revert mai...
3117
  	if (ret < 0)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3118
  		return ret;
9978ad583   Lee Schermerhorn   mlock: make mlock...
3119
  	return ret == len ? 0 : -EFAULT;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3120
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3121
3122
3123
  #if !defined(__HAVE_ARCH_GATE_AREA)
  
  #if defined(AT_SYSINFO_EHDR)
5ce7852cd   Adrian Bunk   [PATCH] mm/filema...
3124
  static struct vm_area_struct gate_vma;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3125
3126
3127
3128
3129
3130
  
  static int __init gate_vma_init(void)
  {
  	gate_vma.vm_mm = NULL;
  	gate_vma.vm_start = FIXADDR_USER_START;
  	gate_vma.vm_end = FIXADDR_USER_END;
b6558c4a2   Roland McGrath   [PATCH] Fix gate_...
3131
3132
  	gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
  	gate_vma.vm_page_prot = __P101;
f47aef55d   Roland McGrath   [PATCH] i386 vDSO...
3133
3134
3135
3136
3137
3138
3139
  	/*
  	 * Make sure the vDSO gets into every core dump.
  	 * Dumping its contents makes post-mortem fully interpretable later
  	 * without matching up the same kernel and hardware config to see
  	 * what PC values meant.
  	 */
  	gate_vma.vm_flags |= VM_ALWAYSDUMP;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163
  	return 0;
  }
  __initcall(gate_vma_init);
  #endif
  
  struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
  {
  #ifdef AT_SYSINFO_EHDR
  	return &gate_vma;
  #else
  	return NULL;
  #endif
  }
  
  int in_gate_area_no_task(unsigned long addr)
  {
  #ifdef AT_SYSINFO_EHDR
  	if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END))
  		return 1;
  #endif
  	return 0;
  }
  
  #endif	/* __HAVE_ARCH_GATE_AREA */
0ec76a110   David Howells   [PATCH] NOMMU: Ch...
3164

f8ad0f499   Johannes Weiner   mm: introduce fol...
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
  static int follow_pte(struct mm_struct *mm, unsigned long address,
  		pte_t **ptepp, spinlock_t **ptlp)
  {
  	pgd_t *pgd;
  	pud_t *pud;
  	pmd_t *pmd;
  	pte_t *ptep;
  
  	pgd = pgd_offset(mm, address);
  	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
  		goto out;
  
  	pud = pud_offset(pgd, address);
  	if (pud_none(*pud) || unlikely(pud_bad(*pud)))
  		goto out;
  
  	pmd = pmd_offset(pud, address);
  	if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
  		goto out;
  
  	/* We cannot handle huge page PFN maps. Luckily they don't exist. */
  	if (pmd_huge(*pmd))
  		goto out;
  
  	ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
  	if (!ptep)
  		goto out;
  	if (!pte_present(*ptep))
  		goto unlock;
  	*ptepp = ptep;
  	return 0;
  unlock:
  	pte_unmap_unlock(ptep, *ptlp);
  out:
  	return -EINVAL;
  }
3b6748e2d   Johannes Weiner   mm: introduce fol...
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
  /**
   * follow_pfn - look up PFN at a user virtual address
   * @vma: memory mapping
   * @address: user virtual address
   * @pfn: location to store found PFN
   *
   * Only IO mappings and raw PFN mappings are allowed.
   *
   * Returns zero and the pfn at @pfn on success, -ve otherwise.
   */
  int follow_pfn(struct vm_area_struct *vma, unsigned long address,
  	unsigned long *pfn)
  {
  	int ret = -EINVAL;
  	spinlock_t *ptl;
  	pte_t *ptep;
  
  	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
  		return ret;
  
  	ret = follow_pte(vma->vm_mm, address, &ptep, &ptl);
  	if (ret)
  		return ret;
  	*pfn = pte_pfn(*ptep);
  	pte_unmap_unlock(ptep, ptl);
  	return 0;
  }
  EXPORT_SYMBOL(follow_pfn);
28b2ee20c   Rik van Riel   access_process_vm...
3229
  #ifdef CONFIG_HAVE_IOREMAP_PROT
d87fe6607   venkatesh.pallipadi@intel.com   x86: PAT: modify ...
3230
3231
3232
  int follow_phys(struct vm_area_struct *vma,
  		unsigned long address, unsigned int flags,
  		unsigned long *prot, resource_size_t *phys)
28b2ee20c   Rik van Riel   access_process_vm...
3233
  {
03668a4de   Johannes Weiner   mm: use generic f...
3234
  	int ret = -EINVAL;
28b2ee20c   Rik van Riel   access_process_vm...
3235
3236
  	pte_t *ptep, pte;
  	spinlock_t *ptl;
28b2ee20c   Rik van Riel   access_process_vm...
3237

d87fe6607   venkatesh.pallipadi@intel.com   x86: PAT: modify ...
3238
3239
  	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
  		goto out;
28b2ee20c   Rik van Riel   access_process_vm...
3240

03668a4de   Johannes Weiner   mm: use generic f...
3241
  	if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
d87fe6607   venkatesh.pallipadi@intel.com   x86: PAT: modify ...
3242
  		goto out;
28b2ee20c   Rik van Riel   access_process_vm...
3243
  	pte = *ptep;
03668a4de   Johannes Weiner   mm: use generic f...
3244

28b2ee20c   Rik van Riel   access_process_vm...
3245
3246
  	if ((flags & FOLL_WRITE) && !pte_write(pte))
  		goto unlock;
28b2ee20c   Rik van Riel   access_process_vm...
3247
3248
  
  	*prot = pgprot_val(pte_pgprot(pte));
03668a4de   Johannes Weiner   mm: use generic f...
3249
  	*phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
28b2ee20c   Rik van Riel   access_process_vm...
3250

03668a4de   Johannes Weiner   mm: use generic f...
3251
  	ret = 0;
28b2ee20c   Rik van Riel   access_process_vm...
3252
3253
3254
  unlock:
  	pte_unmap_unlock(ptep, ptl);
  out:
d87fe6607   venkatesh.pallipadi@intel.com   x86: PAT: modify ...
3255
  	return ret;
28b2ee20c   Rik van Riel   access_process_vm...
3256
3257
3258
3259
3260
3261
3262
  }
  
  int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
  			void *buf, int len, int write)
  {
  	resource_size_t phys_addr;
  	unsigned long prot = 0;
2bc7273b0   KOSAKI Motohiro   mm: make maddr __...
3263
  	void __iomem *maddr;
28b2ee20c   Rik van Riel   access_process_vm...
3264
  	int offset = addr & (PAGE_SIZE-1);
d87fe6607   venkatesh.pallipadi@intel.com   x86: PAT: modify ...
3265
  	if (follow_phys(vma, addr, write, &prot, &phys_addr))
28b2ee20c   Rik van Riel   access_process_vm...
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
  		return -EINVAL;
  
  	maddr = ioremap_prot(phys_addr, PAGE_SIZE, prot);
  	if (write)
  		memcpy_toio(maddr + offset, buf, len);
  	else
  		memcpy_fromio(buf, maddr + offset, len);
  	iounmap(maddr);
  
  	return len;
  }
  #endif
0ec76a110   David Howells   [PATCH] NOMMU: Ch...
3278
3279
3280
3281
3282
3283
3284
3285
3286
  /*
   * Access another process' address space.
   * Source/target buffer must be kernel space,
   * Do not walk the page table directly, use get_user_pages
   */
  int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
  {
  	struct mm_struct *mm;
  	struct vm_area_struct *vma;
0ec76a110   David Howells   [PATCH] NOMMU: Ch...
3287
3288
3289
3290
3291
3292
3293
  	void *old_buf = buf;
  
  	mm = get_task_mm(tsk);
  	if (!mm)
  		return 0;
  
  	down_read(&mm->mmap_sem);
183ff22bb   Simon Arlott   spelling fixes: mm/
3294
  	/* ignore errors, just check how much was successfully transferred */
0ec76a110   David Howells   [PATCH] NOMMU: Ch...
3295
3296
3297
  	while (len) {
  		int bytes, ret, offset;
  		void *maddr;
28b2ee20c   Rik van Riel   access_process_vm...
3298
  		struct page *page = NULL;
0ec76a110   David Howells   [PATCH] NOMMU: Ch...
3299
3300
3301
  
  		ret = get_user_pages(tsk, mm, addr, 1,
  				write, 1, &page, &vma);
28b2ee20c   Rik van Riel   access_process_vm...
3302
3303
3304
3305
3306
3307
3308
3309
3310
3311
3312
3313
3314
3315
3316
3317
  		if (ret <= 0) {
  			/*
  			 * Check if this is a VM_IO | VM_PFNMAP VMA, which
  			 * we can access using slightly different code.
  			 */
  #ifdef CONFIG_HAVE_IOREMAP_PROT
  			vma = find_vma(mm, addr);
  			if (!vma)
  				break;
  			if (vma->vm_ops && vma->vm_ops->access)
  				ret = vma->vm_ops->access(vma, addr, buf,
  							  len, write);
  			if (ret <= 0)
  #endif
  				break;
  			bytes = ret;
0ec76a110   David Howells   [PATCH] NOMMU: Ch...
3318
  		} else {
28b2ee20c   Rik van Riel   access_process_vm...
3319
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329
3330
3331
3332
3333
3334
  			bytes = len;
  			offset = addr & (PAGE_SIZE-1);
  			if (bytes > PAGE_SIZE-offset)
  				bytes = PAGE_SIZE-offset;
  
  			maddr = kmap(page);
  			if (write) {
  				copy_to_user_page(vma, page, addr,
  						  maddr + offset, buf, bytes);
  				set_page_dirty_lock(page);
  			} else {
  				copy_from_user_page(vma, page, addr,
  						    buf, maddr + offset, bytes);
  			}
  			kunmap(page);
  			page_cache_release(page);
0ec76a110   David Howells   [PATCH] NOMMU: Ch...
3335
  		}
0ec76a110   David Howells   [PATCH] NOMMU: Ch...
3336
3337
3338
3339
3340
3341
3342
3343
3344
  		len -= bytes;
  		buf += bytes;
  		addr += bytes;
  	}
  	up_read(&mm->mmap_sem);
  	mmput(mm);
  
  	return buf - old_buf;
  }
03252919b   Andi Kleen   x86: print which ...
3345
3346
3347
3348
3349
3350
3351
3352
  
  /*
   * Print the name of a VMA.
   */
  void print_vma_addr(char *prefix, unsigned long ip)
  {
  	struct mm_struct *mm = current->mm;
  	struct vm_area_struct *vma;
e8bff74af   Ingo Molnar   x86: fix "BUG: sl...
3353
3354
3355
3356
3357
3358
  	/*
  	 * Do not print if we are in atomic
  	 * contexts (in exception stacks, etc.):
  	 */
  	if (preempt_count())
  		return;
03252919b   Andi Kleen   x86: print which ...
3359
3360
3361
3362
3363
3364
3365
  	down_read(&mm->mmap_sem);
  	vma = find_vma(mm, ip);
  	if (vma && vma->vm_file) {
  		struct file *f = vma->vm_file;
  		char *buf = (char *)__get_free_page(GFP_KERNEL);
  		if (buf) {
  			char *p, *s;
cf28b4863   Jan Blunck   d_path: Make d_pa...
3366
  			p = d_path(&f->f_path, buf, PAGE_SIZE);
03252919b   Andi Kleen   x86: print which ...
3367
3368
3369
3370
3371
3372
3373
3374
3375
3376
3377
3378
3379
  			if (IS_ERR(p))
  				p = "?";
  			s = strrchr(p, '/');
  			if (s)
  				p = s+1;
  			printk("%s%s[%lx+%lx]", prefix, p,
  					vma->vm_start,
  					vma->vm_end - vma->vm_start);
  			free_page((unsigned long)buf);
  		}
  	}
  	up_read(&current->mm->mmap_sem);
  }
3ee1afa30   Nick Piggin   x86: some lock an...
3380
3381
3382
3383
  
  #ifdef CONFIG_PROVE_LOCKING
  void might_fault(void)
  {
95156f005   Peter Zijlstra   lockdep, mm: fix ...
3384
3385
3386
3387
3388
3389
3390
3391
  	/*
  	 * Some code (nfs/sunrpc) uses socket ops on kernel memory while
  	 * holding the mmap_sem, this is safe because kernel memory doesn't
  	 * get paged out, therefore we'll never actually fault, and the
  	 * below annotations will generate false positives.
  	 */
  	if (segment_eq(get_fs(), KERNEL_DS))
  		return;
3ee1afa30   Nick Piggin   x86: some lock an...
3392
3393
3394
3395
3396
3397
3398
3399
3400
3401
3402
  	might_sleep();
  	/*
  	 * it would be nicer only to annotate paths which are not under
  	 * pagefault_disable, however that requires a larger audit and
  	 * providing helpers like get_user_atomic.
  	 */
  	if (!in_atomic() && current->mm)
  		might_lock_read(&current->mm->mmap_sem);
  }
  EXPORT_SYMBOL(might_fault);
  #endif