Blame view

mm/memory.c 107 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
  /*
   *  linux/mm/memory.c
   *
   *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   */
  
  /*
   * demand-loading started 01.12.91 - seems it is high on the list of
   * things wanted, and it should be easy to implement. - Linus
   */
  
  /*
   * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
   * pages started 02.12.91, seems to work. - Linus.
   *
   * Tested sharing by executing about 30 /bin/sh: under the old kernel it
   * would have taken more than the 6M I have free, but it worked well as
   * far as I could see.
   *
   * Also corrected some "invalidate()"s - I wasn't doing enough of them.
   */
  
  /*
   * Real VM (paging to/from disk) started 18.12.91. Much more work and
   * thought has to go into this. Oh, well..
   * 19.12.91  -  works, somewhat. Sometimes I get faults, don't know why.
   *		Found it. Everything seems to work now.
   * 20.12.91  -  Ok, making the swap-device changeable like the root.
   */
  
  /*
   * 05.04.94  -  Multi-page memory management added for v1.1.
   * 		Idea by Alex Bligh (alex@cconcepts.co.uk)
   *
   * 16.07.99  -  Support of BIGMEM added by Gerhard Wichert, Siemens AG
   *		(Gerhard.Wichert@pdb.siemens.de)
   *
   * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
   */
  
  #include <linux/kernel_stat.h>
  #include <linux/mm.h>
  #include <linux/hugetlb.h>
  #include <linux/mman.h>
  #include <linux/swap.h>
  #include <linux/highmem.h>
  #include <linux/pagemap.h>
9a8408951   Hugh Dickins   ksm: identify Pag...
48
  #include <linux/ksm.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
49
  #include <linux/rmap.h>
b95f1b31b   Paul Gortmaker   mm: Map most file...
50
  #include <linux/export.h>
0ff922452   Shailabh Nagar   [PATCH] per-task-...
51
  #include <linux/delayacct.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
52
  #include <linux/init.h>
edc79b2a4   Peter Zijlstra   [PATCH] mm: balan...
53
  #include <linux/writeback.h>
8a9f3ccd2   Balbir Singh   Memory controller...
54
  #include <linux/memcontrol.h>
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
55
  #include <linux/mmu_notifier.h>
3dc147414   Hugh Dickins   badpage: replace ...
56
57
58
  #include <linux/kallsyms.h>
  #include <linux/swapops.h>
  #include <linux/elf.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
59
  #include <linux/gfp.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
60

6952b61de   Alexey Dobriyan   headers: taskstat...
61
  #include <asm/io.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
62
63
64
65
66
  #include <asm/pgalloc.h>
  #include <asm/uaccess.h>
  #include <asm/tlb.h>
  #include <asm/tlbflush.h>
  #include <asm/pgtable.h>
42b777281   Jan Beulich   mm: remove double...
67
  #include "internal.h"
d41dee369   Andy Whitcroft   [PATCH] sparsemem...
68
  #ifndef CONFIG_NEED_MULTIPLE_NODES
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
  /* use the per-pgdat data instead for discontigmem - mbligh */
  unsigned long max_mapnr;
  struct page *mem_map;
  
  EXPORT_SYMBOL(max_mapnr);
  EXPORT_SYMBOL(mem_map);
  #endif
  
  unsigned long num_physpages;
  /*
   * A number of key systems in x86 including ioremap() rely on the assumption
   * that high_memory defines the upper bound on direct map memory, then end
   * of ZONE_NORMAL.  Under CONFIG_DISCONTIG this means that max_low_pfn and
   * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
   * and ZONE_HIGHMEM.
   */
  void * high_memory;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
86
87
88
  
  EXPORT_SYMBOL(num_physpages);
  EXPORT_SYMBOL(high_memory);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
89

32a932332   Ingo Molnar   brk randomization...
90
91
92
93
94
95
96
97
98
99
100
101
  /*
   * Randomize the address space (stacks, mmaps, brk, etc.).
   *
   * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization,
   *   as ancient (libc5 based) binaries can segfault. )
   */
  int randomize_va_space __read_mostly =
  #ifdef CONFIG_COMPAT_BRK
  					1;
  #else
  					2;
  #endif
a62eaf151   Andi Kleen   [PATCH] x86_64: A...
102
103
104
105
  
  static int __init disable_randmaps(char *s)
  {
  	randomize_va_space = 0;
9b41046cd   OGAWA Hirofumi   [PATCH] Don't pas...
106
  	return 1;
a62eaf151   Andi Kleen   [PATCH] x86_64: A...
107
108
  }
  __setup("norandmaps", disable_randmaps);
62eede62d   Hugh Dickins   mm: ZERO_PAGE wit...
109
  unsigned long zero_pfn __read_mostly;
03f6462a3   Hugh Dickins   mm: move highest_...
110
  unsigned long highest_memmap_pfn __read_mostly;
a13ea5b75   Hugh Dickins   mm: reinstate ZER...
111
112
113
114
115
116
117
118
119
120
  
  /*
   * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
   */
  static int __init init_zero_pfn(void)
  {
  	zero_pfn = page_to_pfn(ZERO_PAGE(0));
  	return 0;
  }
  core_initcall(init_zero_pfn);
a62eaf151   Andi Kleen   [PATCH] x86_64: A...
121

d559db086   KAMEZAWA Hiroyuki   mm: clean up mm_c...
122

34e55232e   KAMEZAWA Hiroyuki   mm: avoid false s...
123
  #if defined(SPLIT_RSS_COUNTING)
a3a2e76c7   KAMEZAWA Hiroyuki   mm: avoid null-po...
124
  static void __sync_task_rss_stat(struct task_struct *task, struct mm_struct *mm)
34e55232e   KAMEZAWA Hiroyuki   mm: avoid false s...
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
  {
  	int i;
  
  	for (i = 0; i < NR_MM_COUNTERS; i++) {
  		if (task->rss_stat.count[i]) {
  			add_mm_counter(mm, i, task->rss_stat.count[i]);
  			task->rss_stat.count[i] = 0;
  		}
  	}
  	task->rss_stat.events = 0;
  }
  
  static void add_mm_counter_fast(struct mm_struct *mm, int member, int val)
  {
  	struct task_struct *task = current;
  
  	if (likely(task->mm == mm))
  		task->rss_stat.count[member] += val;
  	else
  		add_mm_counter(mm, member, val);
  }
  #define inc_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, 1)
  #define dec_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, -1)
  
  /* sync counter once per 64 page faults */
  #define TASK_RSS_EVENTS_THRESH	(64)
  static void check_sync_rss_stat(struct task_struct *task)
  {
  	if (unlikely(task != current))
  		return;
  	if (unlikely(task->rss_stat.events++ > TASK_RSS_EVENTS_THRESH))
  		__sync_task_rss_stat(task, task->mm);
  }
  
  unsigned long get_mm_counter(struct mm_struct *mm, int member)
  {
  	long val = 0;
  
  	/*
  	 * Don't use task->mm here...for avoiding to use task_get_mm()..
  	 * The caller must guarantee task->mm is not invalid.
  	 */
  	val = atomic_long_read(&mm->rss_stat.count[member]);
  	/*
  	 * counter is updated in asynchronous manner and may go to minus.
  	 * But it's never be expected number for users.
  	 */
  	if (val < 0)
  		return 0;
  	return (unsigned long)val;
  }
  
  void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
  {
  	__sync_task_rss_stat(task, mm);
  }
9547d01bf   Peter Zijlstra   mm: uninline larg...
181
  #else /* SPLIT_RSS_COUNTING */
34e55232e   KAMEZAWA Hiroyuki   mm: avoid false s...
182
183
184
185
186
187
188
  
  #define inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member)
  #define dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member)
  
  static void check_sync_rss_stat(struct task_struct *task)
  {
  }
9547d01bf   Peter Zijlstra   mm: uninline larg...
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
  #endif /* SPLIT_RSS_COUNTING */
  
  #ifdef HAVE_GENERIC_MMU_GATHER
  
  static int tlb_next_batch(struct mmu_gather *tlb)
  {
  	struct mmu_gather_batch *batch;
  
  	batch = tlb->active;
  	if (batch->next) {
  		tlb->active = batch->next;
  		return 1;
  	}
  
  	batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
  	if (!batch)
  		return 0;
  
  	batch->next = NULL;
  	batch->nr   = 0;
  	batch->max  = MAX_GATHER_BATCH;
  
  	tlb->active->next = batch;
  	tlb->active = batch;
  
  	return 1;
  }
  
  /* tlb_gather_mmu
   *	Called to initialize an (on-stack) mmu_gather structure for page-table
   *	tear-down from @mm. The @fullmm argument is used when @mm is without
   *	users and we're going to destroy the full address space (exit/execve).
   */
  void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm)
  {
  	tlb->mm = mm;
  
  	tlb->fullmm     = fullmm;
  	tlb->need_flush = 0;
  	tlb->fast_mode  = (num_possible_cpus() == 1);
  	tlb->local.next = NULL;
  	tlb->local.nr   = 0;
  	tlb->local.max  = ARRAY_SIZE(tlb->__pages);
  	tlb->active     = &tlb->local;
  
  #ifdef CONFIG_HAVE_RCU_TABLE_FREE
  	tlb->batch = NULL;
  #endif
  }
  
  void tlb_flush_mmu(struct mmu_gather *tlb)
  {
  	struct mmu_gather_batch *batch;
  
  	if (!tlb->need_flush)
  		return;
  	tlb->need_flush = 0;
  	tlb_flush(tlb);
  #ifdef CONFIG_HAVE_RCU_TABLE_FREE
  	tlb_table_flush(tlb);
34e55232e   KAMEZAWA Hiroyuki   mm: avoid false s...
249
  #endif
9547d01bf   Peter Zijlstra   mm: uninline larg...
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
  	if (tlb_fast_mode(tlb))
  		return;
  
  	for (batch = &tlb->local; batch; batch = batch->next) {
  		free_pages_and_swap_cache(batch->pages, batch->nr);
  		batch->nr = 0;
  	}
  	tlb->active = &tlb->local;
  }
  
  /* tlb_finish_mmu
   *	Called at the end of the shootdown operation to free up any resources
   *	that were required.
   */
  void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
  {
  	struct mmu_gather_batch *batch, *next;
  
  	tlb_flush_mmu(tlb);
  
  	/* keep the page table cache within bounds */
  	check_pgt_cache();
  
  	for (batch = tlb->local.next; batch; batch = next) {
  		next = batch->next;
  		free_pages((unsigned long)batch, 0);
  	}
  	tlb->local.next = NULL;
  }
  
  /* __tlb_remove_page
   *	Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while
   *	handling the additional races in SMP caused by other CPUs caching valid
   *	mappings in their TLBs. Returns the number of free page slots left.
   *	When out of page slots we must call tlb_flush_mmu().
   */
  int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
  {
  	struct mmu_gather_batch *batch;
  
  	tlb->need_flush = 1;
  
  	if (tlb_fast_mode(tlb)) {
  		free_page_and_swap_cache(page);
  		return 1; /* avoid calling tlb_flush_mmu() */
  	}
  
  	batch = tlb->active;
  	batch->pages[batch->nr++] = page;
  	if (batch->nr == batch->max) {
  		if (!tlb_next_batch(tlb))
  			return 0;
0b43c3aab   Shaohua Li   mm: __tlb_remove_...
302
  		batch = tlb->active;
9547d01bf   Peter Zijlstra   mm: uninline larg...
303
304
305
306
307
308
309
  	}
  	VM_BUG_ON(batch->nr > batch->max);
  
  	return batch->max - batch->nr;
  }
  
  #endif /* HAVE_GENERIC_MMU_GATHER */
267239116   Peter Zijlstra   mm, powerpc: move...
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
  #ifdef CONFIG_HAVE_RCU_TABLE_FREE
  
  /*
   * See the comment near struct mmu_table_batch.
   */
  
  static void tlb_remove_table_smp_sync(void *arg)
  {
  	/* Simply deliver the interrupt */
  }
  
  static void tlb_remove_table_one(void *table)
  {
  	/*
  	 * This isn't an RCU grace period and hence the page-tables cannot be
  	 * assumed to be actually RCU-freed.
  	 *
  	 * It is however sufficient for software page-table walkers that rely on
  	 * IRQ disabling. See the comment near struct mmu_table_batch.
  	 */
  	smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
  	__tlb_remove_table(table);
  }
  
  static void tlb_remove_table_rcu(struct rcu_head *head)
  {
  	struct mmu_table_batch *batch;
  	int i;
  
  	batch = container_of(head, struct mmu_table_batch, rcu);
  
  	for (i = 0; i < batch->nr; i++)
  		__tlb_remove_table(batch->tables[i]);
  
  	free_page((unsigned long)batch);
  }
  
  void tlb_table_flush(struct mmu_gather *tlb)
  {
  	struct mmu_table_batch **batch = &tlb->batch;
  
  	if (*batch) {
  		call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
  		*batch = NULL;
  	}
  }
  
  void tlb_remove_table(struct mmu_gather *tlb, void *table)
  {
  	struct mmu_table_batch **batch = &tlb->batch;
  
  	tlb->need_flush = 1;
  
  	/*
  	 * When there's less then two users of this mm there cannot be a
  	 * concurrent page-table walk.
  	 */
  	if (atomic_read(&tlb->mm->mm_users) < 2) {
  		__tlb_remove_table(table);
  		return;
  	}
  
  	if (*batch == NULL) {
  		*batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
  		if (*batch == NULL) {
  			tlb_remove_table_one(table);
  			return;
  		}
  		(*batch)->nr = 0;
  	}
  	(*batch)->tables[(*batch)->nr++] = table;
  	if ((*batch)->nr == MAX_TABLE_BATCH)
  		tlb_table_flush(tlb);
  }
9547d01bf   Peter Zijlstra   mm: uninline larg...
384
  #endif /* CONFIG_HAVE_RCU_TABLE_FREE */
267239116   Peter Zijlstra   mm, powerpc: move...
385

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
  /*
   * If a p?d_bad entry is found while walking page tables, report
   * the error, before resetting entry to p?d_none.  Usually (but
   * very seldom) called out from the p?d_none_or_clear_bad macros.
   */
  
  void pgd_clear_bad(pgd_t *pgd)
  {
  	pgd_ERROR(*pgd);
  	pgd_clear(pgd);
  }
  
  void pud_clear_bad(pud_t *pud)
  {
  	pud_ERROR(*pud);
  	pud_clear(pud);
  }
  
  void pmd_clear_bad(pmd_t *pmd)
  {
  	pmd_ERROR(*pmd);
  	pmd_clear(pmd);
  }
  
  /*
   * Note: this doesn't free the actual pages themselves. That
   * has been handled earlier when unmapping all the memory regions.
   */
9e1b32caa   Benjamin Herrenschmidt   mm: Pass virtual ...
414
415
  static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
  			   unsigned long addr)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
416
  {
2f569afd9   Martin Schwidefsky   CONFIG_HIGHPTE vs...
417
  	pgtable_t token = pmd_pgtable(*pmd);
e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
418
  	pmd_clear(pmd);
9e1b32caa   Benjamin Herrenschmidt   mm: Pass virtual ...
419
  	pte_free_tlb(tlb, token, addr);
e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
420
  	tlb->mm->nr_ptes--;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
421
  }
e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
422
423
424
  static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
  				unsigned long addr, unsigned long end,
  				unsigned long floor, unsigned long ceiling)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
425
426
427
  {
  	pmd_t *pmd;
  	unsigned long next;
e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
428
  	unsigned long start;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
429

e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
430
  	start = addr;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
431
  	pmd = pmd_offset(pud, addr);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
432
433
434
435
  	do {
  		next = pmd_addr_end(addr, end);
  		if (pmd_none_or_clear_bad(pmd))
  			continue;
9e1b32caa   Benjamin Herrenschmidt   mm: Pass virtual ...
436
  		free_pte_range(tlb, pmd, addr);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
437
  	} while (pmd++, addr = next, addr != end);
e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
438
439
440
441
442
443
444
  	start &= PUD_MASK;
  	if (start < floor)
  		return;
  	if (ceiling) {
  		ceiling &= PUD_MASK;
  		if (!ceiling)
  			return;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
445
  	}
e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
446
447
448
449
450
  	if (end - 1 > ceiling - 1)
  		return;
  
  	pmd = pmd_offset(pud, start);
  	pud_clear(pud);
9e1b32caa   Benjamin Herrenschmidt   mm: Pass virtual ...
451
  	pmd_free_tlb(tlb, pmd, start);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
452
  }
e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
453
454
455
  static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
  				unsigned long addr, unsigned long end,
  				unsigned long floor, unsigned long ceiling)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
456
457
458
  {
  	pud_t *pud;
  	unsigned long next;
e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
459
  	unsigned long start;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
460

e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
461
  	start = addr;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
462
  	pud = pud_offset(pgd, addr);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
463
464
465
466
  	do {
  		next = pud_addr_end(addr, end);
  		if (pud_none_or_clear_bad(pud))
  			continue;
e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
467
  		free_pmd_range(tlb, pud, addr, next, floor, ceiling);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
468
  	} while (pud++, addr = next, addr != end);
e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
469
470
471
472
473
474
475
  	start &= PGDIR_MASK;
  	if (start < floor)
  		return;
  	if (ceiling) {
  		ceiling &= PGDIR_MASK;
  		if (!ceiling)
  			return;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
476
  	}
e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
477
478
479
480
481
  	if (end - 1 > ceiling - 1)
  		return;
  
  	pud = pud_offset(pgd, start);
  	pgd_clear(pgd);
9e1b32caa   Benjamin Herrenschmidt   mm: Pass virtual ...
482
  	pud_free_tlb(tlb, pud, start);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
483
484
485
  }
  
  /*
e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
486
487
   * This function frees user-level page tables of a process.
   *
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
488
489
   * Must be called with pagetable lock held.
   */
42b777281   Jan Beulich   mm: remove double...
490
  void free_pgd_range(struct mmu_gather *tlb,
e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
491
492
  			unsigned long addr, unsigned long end,
  			unsigned long floor, unsigned long ceiling)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
493
494
495
  {
  	pgd_t *pgd;
  	unsigned long next;
e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
  
  	/*
  	 * The next few lines have given us lots of grief...
  	 *
  	 * Why are we testing PMD* at this top level?  Because often
  	 * there will be no work to do at all, and we'd prefer not to
  	 * go all the way down to the bottom just to discover that.
  	 *
  	 * Why all these "- 1"s?  Because 0 represents both the bottom
  	 * of the address space and the top of it (using -1 for the
  	 * top wouldn't help much: the masks would do the wrong thing).
  	 * The rule is that addr 0 and floor 0 refer to the bottom of
  	 * the address space, but end 0 and ceiling 0 refer to the top
  	 * Comparisons need to use "end - 1" and "ceiling - 1" (though
  	 * that end 0 case should be mythical).
  	 *
  	 * Wherever addr is brought up or ceiling brought down, we must
  	 * be careful to reject "the opposite 0" before it confuses the
  	 * subsequent tests.  But what about where end is brought down
  	 * by PMD_SIZE below? no, end can't go down to 0 there.
  	 *
  	 * Whereas we round start (addr) and ceiling down, by different
  	 * masks at different levels, in order to test whether a table
  	 * now has no other vmas using it, so can be freed, we don't
  	 * bother to round floor or end up - the tests don't need that.
  	 */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
522

e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
  	addr &= PMD_MASK;
  	if (addr < floor) {
  		addr += PMD_SIZE;
  		if (!addr)
  			return;
  	}
  	if (ceiling) {
  		ceiling &= PMD_MASK;
  		if (!ceiling)
  			return;
  	}
  	if (end - 1 > ceiling - 1)
  		end -= PMD_SIZE;
  	if (addr > end - 1)
  		return;
42b777281   Jan Beulich   mm: remove double...
538
  	pgd = pgd_offset(tlb->mm, addr);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
539
540
541
542
  	do {
  		next = pgd_addr_end(addr, end);
  		if (pgd_none_or_clear_bad(pgd))
  			continue;
42b777281   Jan Beulich   mm: remove double...
543
  		free_pud_range(tlb, pgd, addr, next, floor, ceiling);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
544
  	} while (pgd++, addr = next, addr != end);
e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
545
  }
42b777281   Jan Beulich   mm: remove double...
546
  void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
3bf5ee956   Hugh Dickins   [PATCH] freepgt: ...
547
  		unsigned long floor, unsigned long ceiling)
e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
548
549
550
551
  {
  	while (vma) {
  		struct vm_area_struct *next = vma->vm_next;
  		unsigned long addr = vma->vm_start;
8f4f8c164   Hugh Dickins   [PATCH] mm: unlin...
552
  		/*
25d9e2d15   npiggin@suse.de   truncate: new hel...
553
554
  		 * Hide vma from rmap and truncate_pagecache before freeing
  		 * pgtables
8f4f8c164   Hugh Dickins   [PATCH] mm: unlin...
555
  		 */
5beb49305   Rik van Riel   mm: change anon_v...
556
  		unlink_anon_vmas(vma);
8f4f8c164   Hugh Dickins   [PATCH] mm: unlin...
557
  		unlink_file_vma(vma);
9da61aef0   David Gibson   [PATCH] hugepage:...
558
  		if (is_vm_hugetlb_page(vma)) {
3bf5ee956   Hugh Dickins   [PATCH] freepgt: ...
559
  			hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
560
  				floor, next? next->vm_start: ceiling);
3bf5ee956   Hugh Dickins   [PATCH] freepgt: ...
561
562
563
564
565
  		} else {
  			/*
  			 * Optimization: gather nearby vmas into one call down
  			 */
  			while (next && next->vm_start <= vma->vm_end + PMD_SIZE
4866920b9   David Gibson   [PATCH] hugepage:...
566
  			       && !is_vm_hugetlb_page(next)) {
3bf5ee956   Hugh Dickins   [PATCH] freepgt: ...
567
568
  				vma = next;
  				next = vma->vm_next;
5beb49305   Rik van Riel   mm: change anon_v...
569
  				unlink_anon_vmas(vma);
8f4f8c164   Hugh Dickins   [PATCH] mm: unlin...
570
  				unlink_file_vma(vma);
3bf5ee956   Hugh Dickins   [PATCH] freepgt: ...
571
572
573
574
  			}
  			free_pgd_range(tlb, addr, vma->vm_end,
  				floor, next? next->vm_start: ceiling);
  		}
e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
575
576
  		vma = next;
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
577
  }
8ac1f8320   Andrea Arcangeli   thp: pte alloc tr...
578
579
  int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
  		pmd_t *pmd, unsigned long address)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
580
  {
2f569afd9   Martin Schwidefsky   CONFIG_HIGHPTE vs...
581
  	pgtable_t new = pte_alloc_one(mm, address);
8ac1f8320   Andrea Arcangeli   thp: pte alloc tr...
582
  	int wait_split_huge_page;
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
583
584
  	if (!new)
  		return -ENOMEM;
362a61ad6   Nick Piggin   fix SMP data race...
585
586
587
588
589
590
591
592
593
594
595
596
597
598
  	/*
  	 * Ensure all pte setup (eg. pte page lock and page clearing) are
  	 * visible before the pte is made visible to other CPUs by being
  	 * put into page tables.
  	 *
  	 * The other side of the story is the pointer chasing in the page
  	 * table walking code (when walking the page table without locking;
  	 * ie. most of the time). Fortunately, these data accesses consist
  	 * of a chain of data-dependent loads, meaning most CPUs (alpha
  	 * being the notable exception) will already guarantee loads are
  	 * seen in-order. See the alpha page table accessors for the
  	 * smp_read_barrier_depends() barriers in page table walking code.
  	 */
  	smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
c74df32c7   Hugh Dickins   [PATCH] mm: ptd_a...
599
  	spin_lock(&mm->page_table_lock);
8ac1f8320   Andrea Arcangeli   thp: pte alloc tr...
600
601
  	wait_split_huge_page = 0;
  	if (likely(pmd_none(*pmd))) {	/* Has another populated it ? */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
602
  		mm->nr_ptes++;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
603
  		pmd_populate(mm, pmd, new);
2f569afd9   Martin Schwidefsky   CONFIG_HIGHPTE vs...
604
  		new = NULL;
8ac1f8320   Andrea Arcangeli   thp: pte alloc tr...
605
606
  	} else if (unlikely(pmd_trans_splitting(*pmd)))
  		wait_split_huge_page = 1;
c74df32c7   Hugh Dickins   [PATCH] mm: ptd_a...
607
  	spin_unlock(&mm->page_table_lock);
2f569afd9   Martin Schwidefsky   CONFIG_HIGHPTE vs...
608
609
  	if (new)
  		pte_free(mm, new);
8ac1f8320   Andrea Arcangeli   thp: pte alloc tr...
610
611
  	if (wait_split_huge_page)
  		wait_split_huge_page(vma->anon_vma, pmd);
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
612
  	return 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
613
  }
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
614
  int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
615
  {
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
616
617
618
  	pte_t *new = pte_alloc_one_kernel(&init_mm, address);
  	if (!new)
  		return -ENOMEM;
362a61ad6   Nick Piggin   fix SMP data race...
619
  	smp_wmb(); /* See comment in __pte_alloc */
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
620
  	spin_lock(&init_mm.page_table_lock);
8ac1f8320   Andrea Arcangeli   thp: pte alloc tr...
621
  	if (likely(pmd_none(*pmd))) {	/* Has another populated it ? */
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
622
  		pmd_populate_kernel(&init_mm, pmd, new);
2f569afd9   Martin Schwidefsky   CONFIG_HIGHPTE vs...
623
  		new = NULL;
8ac1f8320   Andrea Arcangeli   thp: pte alloc tr...
624
625
  	} else
  		VM_BUG_ON(pmd_trans_splitting(*pmd));
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
626
  	spin_unlock(&init_mm.page_table_lock);
2f569afd9   Martin Schwidefsky   CONFIG_HIGHPTE vs...
627
628
  	if (new)
  		pte_free_kernel(&init_mm, new);
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
629
  	return 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
630
  }
d559db086   KAMEZAWA Hiroyuki   mm: clean up mm_c...
631
632
633
634
635
636
  static inline void init_rss_vec(int *rss)
  {
  	memset(rss, 0, sizeof(int) * NR_MM_COUNTERS);
  }
  
  static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss)
ae8597623   Hugh Dickins   [PATCH] mm: batch...
637
  {
d559db086   KAMEZAWA Hiroyuki   mm: clean up mm_c...
638
  	int i;
34e55232e   KAMEZAWA Hiroyuki   mm: avoid false s...
639
640
  	if (current->mm == mm)
  		sync_mm_rss(current, mm);
d559db086   KAMEZAWA Hiroyuki   mm: clean up mm_c...
641
642
643
  	for (i = 0; i < NR_MM_COUNTERS; i++)
  		if (rss[i])
  			add_mm_counter(mm, i, rss[i]);
ae8597623   Hugh Dickins   [PATCH] mm: batch...
644
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
645
  /*
6aab341e0   Linus Torvalds   mm: re-architect ...
646
647
648
   * This function is called to print an error when a bad pte
   * is found. For example, we might have a PFN-mapped pte in
   * a region that doesn't allow it.
b5810039a   Nick Piggin   [PATCH] core remo...
649
650
651
   *
   * The calling function must still handle the error.
   */
3dc147414   Hugh Dickins   badpage: replace ...
652
653
  static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
  			  pte_t pte, struct page *page)
b5810039a   Nick Piggin   [PATCH] core remo...
654
  {
3dc147414   Hugh Dickins   badpage: replace ...
655
656
657
658
659
  	pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
  	pud_t *pud = pud_offset(pgd, addr);
  	pmd_t *pmd = pmd_offset(pud, addr);
  	struct address_space *mapping;
  	pgoff_t index;
d936cf9b3   Hugh Dickins   badpage: ratelimi...
660
661
662
663
664
665
666
667
668
669
670
671
672
673
  	static unsigned long resume;
  	static unsigned long nr_shown;
  	static unsigned long nr_unshown;
  
  	/*
  	 * Allow a burst of 60 reports, then keep quiet for that minute;
  	 * or allow a steady drip of one report per second.
  	 */
  	if (nr_shown == 60) {
  		if (time_before(jiffies, resume)) {
  			nr_unshown++;
  			return;
  		}
  		if (nr_unshown) {
1e9e63650   Hugh Dickins   badpage: KERN_ALE...
674
675
676
  			printk(KERN_ALERT
  				"BUG: Bad page map: %lu messages suppressed
  ",
d936cf9b3   Hugh Dickins   badpage: ratelimi...
677
678
679
680
681
682
683
  				nr_unshown);
  			nr_unshown = 0;
  		}
  		nr_shown = 0;
  	}
  	if (nr_shown++ == 0)
  		resume = jiffies + 60 * HZ;
3dc147414   Hugh Dickins   badpage: replace ...
684
685
686
  
  	mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL;
  	index = linear_page_index(vma, addr);
1e9e63650   Hugh Dickins   badpage: KERN_ALE...
687
688
689
  	printk(KERN_ALERT
  		"BUG: Bad page map in process %s  pte:%08llx pmd:%08llx
  ",
3dc147414   Hugh Dickins   badpage: replace ...
690
691
  		current->comm,
  		(long long)pte_val(pte), (long long)pmd_val(*pmd));
718a38211   Wu Fengguang   mm: introduce dum...
692
693
  	if (page)
  		dump_page(page);
1e9e63650   Hugh Dickins   badpage: KERN_ALE...
694
  	printk(KERN_ALERT
3dc147414   Hugh Dickins   badpage: replace ...
695
696
697
698
699
700
701
  		"addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx
  ",
  		(void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
  	/*
  	 * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y
  	 */
  	if (vma->vm_ops)
1e9e63650   Hugh Dickins   badpage: KERN_ALE...
702
703
  		print_symbol(KERN_ALERT "vma->vm_ops->fault: %s
  ",
3dc147414   Hugh Dickins   badpage: replace ...
704
705
  				(unsigned long)vma->vm_ops->fault);
  	if (vma->vm_file && vma->vm_file->f_op)
1e9e63650   Hugh Dickins   badpage: KERN_ALE...
706
707
  		print_symbol(KERN_ALERT "vma->vm_file->f_op->mmap: %s
  ",
3dc147414   Hugh Dickins   badpage: replace ...
708
  				(unsigned long)vma->vm_file->f_op->mmap);
b5810039a   Nick Piggin   [PATCH] core remo...
709
  	dump_stack();
3dc147414   Hugh Dickins   badpage: replace ...
710
  	add_taint(TAINT_BAD_PAGE);
b5810039a   Nick Piggin   [PATCH] core remo...
711
  }
ca16d140a   KOSAKI Motohiro   mm: don't access ...
712
  static inline int is_cow_mapping(vm_flags_t flags)
67121172f   Linus Torvalds   Allow arbitrary r...
713
714
715
  {
  	return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
  }
62eede62d   Hugh Dickins   mm: ZERO_PAGE wit...
716
717
718
719
720
721
722
723
724
725
726
727
728
  #ifndef is_zero_pfn
  static inline int is_zero_pfn(unsigned long pfn)
  {
  	return pfn == zero_pfn;
  }
  #endif
  
  #ifndef my_zero_pfn
  static inline unsigned long my_zero_pfn(unsigned long addr)
  {
  	return zero_pfn;
  }
  #endif
b5810039a   Nick Piggin   [PATCH] core remo...
729
  /*
7e675137a   Nick Piggin   mm: introduce pte...
730
   * vm_normal_page -- This function gets the "struct page" associated with a pte.
6aab341e0   Linus Torvalds   mm: re-architect ...
731
   *
7e675137a   Nick Piggin   mm: introduce pte...
732
733
734
   * "Special" mappings do not wish to be associated with a "struct page" (either
   * it doesn't exist, or it exists but they don't want to touch it). In this
   * case, NULL is returned here. "Normal" mappings do have a struct page.
b379d7901   Jared Hulbert   mm: introduce VM_...
735
   *
7e675137a   Nick Piggin   mm: introduce pte...
736
737
738
739
740
741
742
743
   * There are 2 broad cases. Firstly, an architecture may define a pte_special()
   * pte bit, in which case this function is trivial. Secondly, an architecture
   * may not have a spare pte bit, which requires a more complicated scheme,
   * described below.
   *
   * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a
   * special mapping (even if there are underlying and valid "struct pages").
   * COWed pages of a VM_PFNMAP are always normal.
6aab341e0   Linus Torvalds   mm: re-architect ...
744
   *
b379d7901   Jared Hulbert   mm: introduce VM_...
745
746
   * The way we recognize COWed pages within VM_PFNMAP mappings is through the
   * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit
7e675137a   Nick Piggin   mm: introduce pte...
747
748
   * set, and the vm_pgoff will point to the first PFN mapped: thus every special
   * mapping will always honor the rule
6aab341e0   Linus Torvalds   mm: re-architect ...
749
750
751
   *
   *	pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
   *
7e675137a   Nick Piggin   mm: introduce pte...
752
753
754
755
756
757
   * And for normal mappings this is false.
   *
   * This restricts such mappings to be a linear translation from virtual address
   * to pfn. To get around this restriction, we allow arbitrary mappings so long
   * as the vma is not a COW mapping; in that case, we know that all ptes are
   * special (because none can have been COWed).
b379d7901   Jared Hulbert   mm: introduce VM_...
758
   *
b379d7901   Jared Hulbert   mm: introduce VM_...
759
   *
7e675137a   Nick Piggin   mm: introduce pte...
760
   * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP.
b379d7901   Jared Hulbert   mm: introduce VM_...
761
762
763
764
765
766
767
768
769
   *
   * VM_MIXEDMAP mappings can likewise contain memory with or without "struct
   * page" backing, however the difference is that _all_ pages with a struct
   * page (that is, those where pfn_valid is true) are refcounted and considered
   * normal pages by the VM. The disadvantage is that pages are refcounted
   * (which can be slower and simply not an option for some PFNMAP users). The
   * advantage is that we don't have to follow the strict linearity rule of
   * PFNMAP mappings in order to support COWable mappings.
   *
ee498ed73   Hugh Dickins   [PATCH] unpaged: ...
770
   */
7e675137a   Nick Piggin   mm: introduce pte...
771
772
773
774
775
776
777
  #ifdef __HAVE_ARCH_PTE_SPECIAL
  # define HAVE_PTE_SPECIAL 1
  #else
  # define HAVE_PTE_SPECIAL 0
  #endif
  struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
  				pte_t pte)
ee498ed73   Hugh Dickins   [PATCH] unpaged: ...
778
  {
22b31eec6   Hugh Dickins   badpage: vm_norma...
779
  	unsigned long pfn = pte_pfn(pte);
7e675137a   Nick Piggin   mm: introduce pte...
780
781
  
  	if (HAVE_PTE_SPECIAL) {
22b31eec6   Hugh Dickins   badpage: vm_norma...
782
783
  		if (likely(!pte_special(pte)))
  			goto check_pfn;
a13ea5b75   Hugh Dickins   mm: reinstate ZER...
784
785
  		if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
  			return NULL;
62eede62d   Hugh Dickins   mm: ZERO_PAGE wit...
786
  		if (!is_zero_pfn(pfn))
22b31eec6   Hugh Dickins   badpage: vm_norma...
787
  			print_bad_pte(vma, addr, pte, NULL);
7e675137a   Nick Piggin   mm: introduce pte...
788
789
790
791
  		return NULL;
  	}
  
  	/* !HAVE_PTE_SPECIAL case follows: */
b379d7901   Jared Hulbert   mm: introduce VM_...
792
793
794
795
796
797
  	if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
  		if (vma->vm_flags & VM_MIXEDMAP) {
  			if (!pfn_valid(pfn))
  				return NULL;
  			goto out;
  		} else {
7e675137a   Nick Piggin   mm: introduce pte...
798
799
  			unsigned long off;
  			off = (addr - vma->vm_start) >> PAGE_SHIFT;
b379d7901   Jared Hulbert   mm: introduce VM_...
800
801
802
803
804
  			if (pfn == vma->vm_pgoff + off)
  				return NULL;
  			if (!is_cow_mapping(vma->vm_flags))
  				return NULL;
  		}
6aab341e0   Linus Torvalds   mm: re-architect ...
805
  	}
62eede62d   Hugh Dickins   mm: ZERO_PAGE wit...
806
807
  	if (is_zero_pfn(pfn))
  		return NULL;
22b31eec6   Hugh Dickins   badpage: vm_norma...
808
809
810
811
812
  check_pfn:
  	if (unlikely(pfn > highest_memmap_pfn)) {
  		print_bad_pte(vma, addr, pte, NULL);
  		return NULL;
  	}
6aab341e0   Linus Torvalds   mm: re-architect ...
813
814
  
  	/*
7e675137a   Nick Piggin   mm: introduce pte...
815
  	 * NOTE! We still have PageReserved() pages in the page tables.
7e675137a   Nick Piggin   mm: introduce pte...
816
  	 * eg. VDSO mappings can cause them to exist.
6aab341e0   Linus Torvalds   mm: re-architect ...
817
  	 */
b379d7901   Jared Hulbert   mm: introduce VM_...
818
  out:
6aab341e0   Linus Torvalds   mm: re-architect ...
819
  	return pfn_to_page(pfn);
ee498ed73   Hugh Dickins   [PATCH] unpaged: ...
820
821
822
  }
  
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
823
824
825
   * copy one vm_area from one task to the other. Assumes the page tables
   * already present in the new task to be cleared in the whole range
   * covered by this vma.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
826
   */
570a335b8   Hugh Dickins   swap_info: swap c...
827
  static inline unsigned long
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
828
  copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
b5810039a   Nick Piggin   [PATCH] core remo...
829
  		pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
8c1037627   Hugh Dickins   [PATCH] mm: copy_...
830
  		unsigned long addr, int *rss)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
831
  {
b5810039a   Nick Piggin   [PATCH] core remo...
832
  	unsigned long vm_flags = vma->vm_flags;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
833
834
  	pte_t pte = *src_pte;
  	struct page *page;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
835
836
837
838
  
  	/* pte contains position in swap or file, so copy. */
  	if (unlikely(!pte_present(pte))) {
  		if (!pte_file(pte)) {
0697212a4   Christoph Lameter   [PATCH] Swapless ...
839
  			swp_entry_t entry = pte_to_swp_entry(pte);
570a335b8   Hugh Dickins   swap_info: swap c...
840
841
  			if (swap_duplicate(entry) < 0)
  				return entry.val;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
842
843
844
  			/* make sure dst_mm is on swapoff's mmlist. */
  			if (unlikely(list_empty(&dst_mm->mmlist))) {
  				spin_lock(&mmlist_lock);
f412ac08c   Hugh Dickins   [PATCH] mm: fix r...
845
846
847
  				if (list_empty(&dst_mm->mmlist))
  					list_add(&dst_mm->mmlist,
  						 &src_mm->mmlist);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
848
849
  				spin_unlock(&mmlist_lock);
  			}
b084d4353   KAMEZAWA Hiroyuki   mm: count swap usage
850
851
852
  			if (likely(!non_swap_entry(entry)))
  				rss[MM_SWAPENTS]++;
  			else if (is_write_migration_entry(entry) &&
0697212a4   Christoph Lameter   [PATCH] Swapless ...
853
854
855
856
857
858
859
860
861
  					is_cow_mapping(vm_flags)) {
  				/*
  				 * COW mappings require pages in both parent
  				 * and child to be set to read.
  				 */
  				make_migration_entry_read(&entry);
  				pte = swp_entry_to_pte(entry);
  				set_pte_at(src_mm, addr, src_pte, pte);
  			}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
862
  		}
ae8597623   Hugh Dickins   [PATCH] mm: batch...
863
  		goto out_set_pte;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
864
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
865
866
867
868
  	/*
  	 * If it's a COW mapping, write protect it both
  	 * in the parent and the child
  	 */
67121172f   Linus Torvalds   Allow arbitrary r...
869
  	if (is_cow_mapping(vm_flags)) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
870
  		ptep_set_wrprotect(src_mm, addr, src_pte);
3dc907951   Zachary Amsden   [PATCH] paravirt:...
871
  		pte = pte_wrprotect(pte);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
872
873
874
875
876
877
878
879
880
  	}
  
  	/*
  	 * If it's a shared mapping, mark it clean in
  	 * the child
  	 */
  	if (vm_flags & VM_SHARED)
  		pte = pte_mkclean(pte);
  	pte = pte_mkold(pte);
6aab341e0   Linus Torvalds   mm: re-architect ...
881
882
883
884
  
  	page = vm_normal_page(vma, addr, pte);
  	if (page) {
  		get_page(page);
21333b2b6   Hugh Dickins   ksm: no debug in ...
885
  		page_dup_rmap(page);
d559db086   KAMEZAWA Hiroyuki   mm: clean up mm_c...
886
887
888
889
  		if (PageAnon(page))
  			rss[MM_ANONPAGES]++;
  		else
  			rss[MM_FILEPAGES]++;
6aab341e0   Linus Torvalds   mm: re-architect ...
890
  	}
ae8597623   Hugh Dickins   [PATCH] mm: batch...
891
892
893
  
  out_set_pte:
  	set_pte_at(dst_mm, addr, dst_pte, pte);
570a335b8   Hugh Dickins   swap_info: swap c...
894
  	return 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
895
  }
71e3aac07   Andrea Arcangeli   thp: transparent ...
896
897
898
  int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
  		   pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
  		   unsigned long addr, unsigned long end)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
899
  {
c36987e2e   Daisuke Nishimura   mm: don't call pt...
900
  	pte_t *orig_src_pte, *orig_dst_pte;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
901
  	pte_t *src_pte, *dst_pte;
c74df32c7   Hugh Dickins   [PATCH] mm: ptd_a...
902
  	spinlock_t *src_ptl, *dst_ptl;
e040f218b   Hugh Dickins   [PATCH] mm: copy_...
903
  	int progress = 0;
d559db086   KAMEZAWA Hiroyuki   mm: clean up mm_c...
904
  	int rss[NR_MM_COUNTERS];
570a335b8   Hugh Dickins   swap_info: swap c...
905
  	swp_entry_t entry = (swp_entry_t){0};
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
906
907
  
  again:
d559db086   KAMEZAWA Hiroyuki   mm: clean up mm_c...
908
  	init_rss_vec(rss);
c74df32c7   Hugh Dickins   [PATCH] mm: ptd_a...
909
  	dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
910
911
  	if (!dst_pte)
  		return -ENOMEM;
ece0e2b64   Peter Zijlstra   mm: remove pte_*m...
912
  	src_pte = pte_offset_map(src_pmd, addr);
4c21e2f24   Hugh Dickins   [PATCH] mm: split...
913
  	src_ptl = pte_lockptr(src_mm, src_pmd);
f20dc5f7c   Ingo Molnar   [PATCH] lockdep: ...
914
  	spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
c36987e2e   Daisuke Nishimura   mm: don't call pt...
915
916
  	orig_src_pte = src_pte;
  	orig_dst_pte = dst_pte;
6606c3e0d   Zachary Amsden   [PATCH] paravirt:...
917
  	arch_enter_lazy_mmu_mode();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
918

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
919
920
921
922
923
  	do {
  		/*
  		 * We are holding two locks at this point - either of them
  		 * could generate latencies in another task on another CPU.
  		 */
e040f218b   Hugh Dickins   [PATCH] mm: copy_...
924
925
926
  		if (progress >= 32) {
  			progress = 0;
  			if (need_resched() ||
95c354fe9   Nick Piggin   spinlock: lockbre...
927
  			    spin_needbreak(src_ptl) || spin_needbreak(dst_ptl))
e040f218b   Hugh Dickins   [PATCH] mm: copy_...
928
929
  				break;
  		}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
930
931
932
933
  		if (pte_none(*src_pte)) {
  			progress++;
  			continue;
  		}
570a335b8   Hugh Dickins   swap_info: swap c...
934
935
936
937
  		entry.val = copy_one_pte(dst_mm, src_mm, dst_pte, src_pte,
  							vma, addr, rss);
  		if (entry.val)
  			break;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
938
939
  		progress += 8;
  	} while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
940

6606c3e0d   Zachary Amsden   [PATCH] paravirt:...
941
  	arch_leave_lazy_mmu_mode();
c74df32c7   Hugh Dickins   [PATCH] mm: ptd_a...
942
  	spin_unlock(src_ptl);
ece0e2b64   Peter Zijlstra   mm: remove pte_*m...
943
  	pte_unmap(orig_src_pte);
d559db086   KAMEZAWA Hiroyuki   mm: clean up mm_c...
944
  	add_mm_rss_vec(dst_mm, rss);
c36987e2e   Daisuke Nishimura   mm: don't call pt...
945
  	pte_unmap_unlock(orig_dst_pte, dst_ptl);
c74df32c7   Hugh Dickins   [PATCH] mm: ptd_a...
946
  	cond_resched();
570a335b8   Hugh Dickins   swap_info: swap c...
947
948
949
950
951
952
  
  	if (entry.val) {
  		if (add_swap_count_continuation(entry, GFP_KERNEL) < 0)
  			return -ENOMEM;
  		progress = 0;
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
  	if (addr != end)
  		goto again;
  	return 0;
  }
  
  static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
  		pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma,
  		unsigned long addr, unsigned long end)
  {
  	pmd_t *src_pmd, *dst_pmd;
  	unsigned long next;
  
  	dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
  	if (!dst_pmd)
  		return -ENOMEM;
  	src_pmd = pmd_offset(src_pud, addr);
  	do {
  		next = pmd_addr_end(addr, end);
71e3aac07   Andrea Arcangeli   thp: transparent ...
971
972
  		if (pmd_trans_huge(*src_pmd)) {
  			int err;
14d1a55cd   Andrea Arcangeli   thp: add debug ch...
973
  			VM_BUG_ON(next-addr != HPAGE_PMD_SIZE);
71e3aac07   Andrea Arcangeli   thp: transparent ...
974
975
976
977
978
979
980
981
  			err = copy_huge_pmd(dst_mm, src_mm,
  					    dst_pmd, src_pmd, addr, vma);
  			if (err == -ENOMEM)
  				return -ENOMEM;
  			if (!err)
  				continue;
  			/* fall through */
  		}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
  		if (pmd_none_or_clear_bad(src_pmd))
  			continue;
  		if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd,
  						vma, addr, next))
  			return -ENOMEM;
  	} while (dst_pmd++, src_pmd++, addr = next, addr != end);
  	return 0;
  }
  
  static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
  		pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma,
  		unsigned long addr, unsigned long end)
  {
  	pud_t *src_pud, *dst_pud;
  	unsigned long next;
  
  	dst_pud = pud_alloc(dst_mm, dst_pgd, addr);
  	if (!dst_pud)
  		return -ENOMEM;
  	src_pud = pud_offset(src_pgd, addr);
  	do {
  		next = pud_addr_end(addr, end);
  		if (pud_none_or_clear_bad(src_pud))
  			continue;
  		if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud,
  						vma, addr, next))
  			return -ENOMEM;
  	} while (dst_pud++, src_pud++, addr = next, addr != end);
  	return 0;
  }
  
  int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
  		struct vm_area_struct *vma)
  {
  	pgd_t *src_pgd, *dst_pgd;
  	unsigned long next;
  	unsigned long addr = vma->vm_start;
  	unsigned long end = vma->vm_end;
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
1020
  	int ret;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1021

d992895ba   Nick Piggin   [PATCH] Lazy page...
1022
1023
1024
1025
1026
1027
  	/*
  	 * Don't copy ptes where a page fault will fill them correctly.
  	 * Fork becomes much lighter when there are big shared or private
  	 * readonly mappings. The tradeoff is that copy_page_range is more
  	 * efficient than faulting.
  	 */
4d7672b46   Linus Torvalds   Make sure we copy...
1028
  	if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP|VM_INSERTPAGE))) {
d992895ba   Nick Piggin   [PATCH] Lazy page...
1029
1030
1031
  		if (!vma->anon_vma)
  			return 0;
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1032
1033
  	if (is_vm_hugetlb_page(vma))
  		return copy_hugetlb_page_range(dst_mm, src_mm, vma);
34801ba9b   venkatesh.pallipadi@intel.com   x86: PAT: move tr...
1034
  	if (unlikely(is_pfn_mapping(vma))) {
2ab640379   venkatesh.pallipadi@intel.com   x86: PAT: hooks i...
1035
1036
1037
1038
1039
1040
1041
1042
  		/*
  		 * We do not free on error cases below as remove_vma
  		 * gets called on error from higher level routine
  		 */
  		ret = track_pfn_vma_copy(vma);
  		if (ret)
  			return ret;
  	}
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
  	/*
  	 * We need to invalidate the secondary MMU mappings only when
  	 * there could be a permission downgrade on the ptes of the
  	 * parent mm. And a permission downgrade will only happen if
  	 * is_cow_mapping() returns true.
  	 */
  	if (is_cow_mapping(vma->vm_flags))
  		mmu_notifier_invalidate_range_start(src_mm, addr, end);
  
  	ret = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1053
1054
1055
1056
1057
1058
  	dst_pgd = pgd_offset(dst_mm, addr);
  	src_pgd = pgd_offset(src_mm, addr);
  	do {
  		next = pgd_addr_end(addr, end);
  		if (pgd_none_or_clear_bad(src_pgd))
  			continue;
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
1059
1060
1061
1062
1063
  		if (unlikely(copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd,
  					    vma, addr, next))) {
  			ret = -ENOMEM;
  			break;
  		}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1064
  	} while (dst_pgd++, src_pgd++, addr = next, addr != end);
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
1065
1066
1067
1068
1069
  
  	if (is_cow_mapping(vma->vm_flags))
  		mmu_notifier_invalidate_range_end(src_mm,
  						  vma->vm_start, end);
  	return ret;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1070
  }
51c6f666f   Robin Holt   [PATCH] mm: ZAP_B...
1071
  static unsigned long zap_pte_range(struct mmu_gather *tlb,
b5810039a   Nick Piggin   [PATCH] core remo...
1072
  				struct vm_area_struct *vma, pmd_t *pmd,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1073
  				unsigned long addr, unsigned long end,
97a894136   Peter Zijlstra   mm: Remove i_mmap...
1074
  				struct zap_details *details)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1075
  {
b5810039a   Nick Piggin   [PATCH] core remo...
1076
  	struct mm_struct *mm = tlb->mm;
d16dfc550   Peter Zijlstra   mm: mmu_gather re...
1077
  	int force_flush = 0;
d559db086   KAMEZAWA Hiroyuki   mm: clean up mm_c...
1078
  	int rss[NR_MM_COUNTERS];
97a894136   Peter Zijlstra   mm: Remove i_mmap...
1079
  	spinlock_t *ptl;
5f1a19070   Steven Rostedt   mm: fix wrong kun...
1080
  	pte_t *start_pte;
97a894136   Peter Zijlstra   mm: Remove i_mmap...
1081
  	pte_t *pte;
d559db086   KAMEZAWA Hiroyuki   mm: clean up mm_c...
1082

d16dfc550   Peter Zijlstra   mm: mmu_gather re...
1083
  again:
e303297e6   Peter Zijlstra   mm: extended batc...
1084
  	init_rss_vec(rss);
5f1a19070   Steven Rostedt   mm: fix wrong kun...
1085
1086
  	start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
  	pte = start_pte;
6606c3e0d   Zachary Amsden   [PATCH] paravirt:...
1087
  	arch_enter_lazy_mmu_mode();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1088
1089
  	do {
  		pte_t ptent = *pte;
51c6f666f   Robin Holt   [PATCH] mm: ZAP_B...
1090
  		if (pte_none(ptent)) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1091
  			continue;
51c6f666f   Robin Holt   [PATCH] mm: ZAP_B...
1092
  		}
6f5e6b9e6   Hugh Dickins   [PATCH] fix free ...
1093

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1094
  		if (pte_present(ptent)) {
ee498ed73   Hugh Dickins   [PATCH] unpaged: ...
1095
  			struct page *page;
51c6f666f   Robin Holt   [PATCH] mm: ZAP_B...
1096

6aab341e0   Linus Torvalds   mm: re-architect ...
1097
  			page = vm_normal_page(vma, addr, ptent);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
  			if (unlikely(details) && page) {
  				/*
  				 * unmap_shared_mapping_pages() wants to
  				 * invalidate cache without truncating:
  				 * unmap shared but keep private pages.
  				 */
  				if (details->check_mapping &&
  				    details->check_mapping != page->mapping)
  					continue;
  				/*
  				 * Each page->index must be checked when
  				 * invalidating or truncating nonlinear.
  				 */
  				if (details->nonlinear_vma &&
  				    (page->index < details->first_index ||
  				     page->index > details->last_index))
  					continue;
  			}
b5810039a   Nick Piggin   [PATCH] core remo...
1116
  			ptent = ptep_get_and_clear_full(mm, addr, pte,
a600388d2   Zachary Amsden   [PATCH] x86: ptep...
1117
  							tlb->fullmm);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1118
1119
1120
1121
1122
1123
  			tlb_remove_tlb_entry(tlb, pte, addr);
  			if (unlikely(!page))
  				continue;
  			if (unlikely(details) && details->nonlinear_vma
  			    && linear_page_index(details->nonlinear_vma,
  						addr) != page->index)
b5810039a   Nick Piggin   [PATCH] core remo...
1124
  				set_pte_at(mm, addr, pte,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1125
  					   pgoff_to_pte(page->index));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1126
  			if (PageAnon(page))
d559db086   KAMEZAWA Hiroyuki   mm: clean up mm_c...
1127
  				rss[MM_ANONPAGES]--;
6237bcd94   Hugh Dickins   [PATCH] mm: zap_p...
1128
1129
1130
  			else {
  				if (pte_dirty(ptent))
  					set_page_dirty(page);
4917e5d04   Johannes Weiner   mm: more likely r...
1131
1132
  				if (pte_young(ptent) &&
  				    likely(!VM_SequentialReadHint(vma)))
bf3f3bc5e   Nick Piggin   mm: don't mark_pa...
1133
  					mark_page_accessed(page);
d559db086   KAMEZAWA Hiroyuki   mm: clean up mm_c...
1134
  				rss[MM_FILEPAGES]--;
6237bcd94   Hugh Dickins   [PATCH] mm: zap_p...
1135
  			}
edc315fd2   Hugh Dickins   badpage: remove v...
1136
  			page_remove_rmap(page);
3dc147414   Hugh Dickins   badpage: replace ...
1137
1138
  			if (unlikely(page_mapcount(page) < 0))
  				print_bad_pte(vma, addr, ptent, page);
d16dfc550   Peter Zijlstra   mm: mmu_gather re...
1139
1140
1141
  			force_flush = !__tlb_remove_page(tlb, page);
  			if (force_flush)
  				break;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1142
1143
1144
1145
1146
1147
1148
1149
  			continue;
  		}
  		/*
  		 * If details->check_mapping, we leave swap entries;
  		 * if details->nonlinear_vma, we leave file entries.
  		 */
  		if (unlikely(details))
  			continue;
2509ef26d   Hugh Dickins   badpage: zap prin...
1150
1151
1152
  		if (pte_file(ptent)) {
  			if (unlikely(!(vma->vm_flags & VM_NONLINEAR)))
  				print_bad_pte(vma, addr, ptent, NULL);
b084d4353   KAMEZAWA Hiroyuki   mm: count swap usage
1153
1154
1155
1156
1157
1158
1159
1160
  		} else {
  			swp_entry_t entry = pte_to_swp_entry(ptent);
  
  			if (!non_swap_entry(entry))
  				rss[MM_SWAPENTS]--;
  			if (unlikely(!free_swap_and_cache(entry)))
  				print_bad_pte(vma, addr, ptent, NULL);
  		}
9888a1cae   Zachary Amsden   [PATCH] paravirt:...
1161
  		pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
97a894136   Peter Zijlstra   mm: Remove i_mmap...
1162
  	} while (pte++, addr += PAGE_SIZE, addr != end);
ae8597623   Hugh Dickins   [PATCH] mm: batch...
1163

d559db086   KAMEZAWA Hiroyuki   mm: clean up mm_c...
1164
  	add_mm_rss_vec(mm, rss);
6606c3e0d   Zachary Amsden   [PATCH] paravirt:...
1165
  	arch_leave_lazy_mmu_mode();
5f1a19070   Steven Rostedt   mm: fix wrong kun...
1166
  	pte_unmap_unlock(start_pte, ptl);
51c6f666f   Robin Holt   [PATCH] mm: ZAP_B...
1167

d16dfc550   Peter Zijlstra   mm: mmu_gather re...
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
  	/*
  	 * mmu_gather ran out of room to batch pages, we break out of
  	 * the PTE lock to avoid doing the potential expensive TLB invalidate
  	 * and page-free while holding it.
  	 */
  	if (force_flush) {
  		force_flush = 0;
  		tlb_flush_mmu(tlb);
  		if (addr != end)
  			goto again;
  	}
51c6f666f   Robin Holt   [PATCH] mm: ZAP_B...
1179
  	return addr;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1180
  }
51c6f666f   Robin Holt   [PATCH] mm: ZAP_B...
1181
  static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
b5810039a   Nick Piggin   [PATCH] core remo...
1182
  				struct vm_area_struct *vma, pud_t *pud,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1183
  				unsigned long addr, unsigned long end,
97a894136   Peter Zijlstra   mm: Remove i_mmap...
1184
  				struct zap_details *details)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1185
1186
1187
1188
1189
1190
1191
  {
  	pmd_t *pmd;
  	unsigned long next;
  
  	pmd = pmd_offset(pud, addr);
  	do {
  		next = pmd_addr_end(addr, end);
71e3aac07   Andrea Arcangeli   thp: transparent ...
1192
  		if (pmd_trans_huge(*pmd)) {
14d1a55cd   Andrea Arcangeli   thp: add debug ch...
1193
1194
  			if (next-addr != HPAGE_PMD_SIZE) {
  				VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem));
71e3aac07   Andrea Arcangeli   thp: transparent ...
1195
  				split_huge_page_pmd(vma->vm_mm, pmd);
97a894136   Peter Zijlstra   mm: Remove i_mmap...
1196
  			} else if (zap_huge_pmd(tlb, vma, pmd))
71e3aac07   Andrea Arcangeli   thp: transparent ...
1197
  				continue;
71e3aac07   Andrea Arcangeli   thp: transparent ...
1198
1199
  			/* fall through */
  		}
97a894136   Peter Zijlstra   mm: Remove i_mmap...
1200
  		if (pmd_none_or_clear_bad(pmd))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1201
  			continue;
97a894136   Peter Zijlstra   mm: Remove i_mmap...
1202
1203
1204
  		next = zap_pte_range(tlb, vma, pmd, addr, next, details);
  		cond_resched();
  	} while (pmd++, addr = next, addr != end);
51c6f666f   Robin Holt   [PATCH] mm: ZAP_B...
1205
1206
  
  	return addr;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1207
  }
51c6f666f   Robin Holt   [PATCH] mm: ZAP_B...
1208
  static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
b5810039a   Nick Piggin   [PATCH] core remo...
1209
  				struct vm_area_struct *vma, pgd_t *pgd,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1210
  				unsigned long addr, unsigned long end,
97a894136   Peter Zijlstra   mm: Remove i_mmap...
1211
  				struct zap_details *details)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1212
1213
1214
1215
1216
1217
1218
  {
  	pud_t *pud;
  	unsigned long next;
  
  	pud = pud_offset(pgd, addr);
  	do {
  		next = pud_addr_end(addr, end);
97a894136   Peter Zijlstra   mm: Remove i_mmap...
1219
  		if (pud_none_or_clear_bad(pud))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1220
  			continue;
97a894136   Peter Zijlstra   mm: Remove i_mmap...
1221
1222
  		next = zap_pmd_range(tlb, vma, pud, addr, next, details);
  	} while (pud++, addr = next, addr != end);
51c6f666f   Robin Holt   [PATCH] mm: ZAP_B...
1223
1224
  
  	return addr;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1225
  }
51c6f666f   Robin Holt   [PATCH] mm: ZAP_B...
1226
1227
  static unsigned long unmap_page_range(struct mmu_gather *tlb,
  				struct vm_area_struct *vma,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1228
  				unsigned long addr, unsigned long end,
97a894136   Peter Zijlstra   mm: Remove i_mmap...
1229
  				struct zap_details *details)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1230
1231
1232
1233
1234
1235
1236
1237
  {
  	pgd_t *pgd;
  	unsigned long next;
  
  	if (details && !details->check_mapping && !details->nonlinear_vma)
  		details = NULL;
  
  	BUG_ON(addr >= end);
569b846df   KAMEZAWA Hiroyuki   memcg: coalesce u...
1238
  	mem_cgroup_uncharge_start();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1239
1240
1241
1242
  	tlb_start_vma(tlb, vma);
  	pgd = pgd_offset(vma->vm_mm, addr);
  	do {
  		next = pgd_addr_end(addr, end);
97a894136   Peter Zijlstra   mm: Remove i_mmap...
1243
  		if (pgd_none_or_clear_bad(pgd))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1244
  			continue;
97a894136   Peter Zijlstra   mm: Remove i_mmap...
1245
1246
  		next = zap_pud_range(tlb, vma, pgd, addr, next, details);
  	} while (pgd++, addr = next, addr != end);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1247
  	tlb_end_vma(tlb, vma);
569b846df   KAMEZAWA Hiroyuki   memcg: coalesce u...
1248
  	mem_cgroup_uncharge_end();
51c6f666f   Robin Holt   [PATCH] mm: ZAP_B...
1249
1250
  
  	return addr;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1251
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1252
1253
  /**
   * unmap_vmas - unmap a range of memory covered by a list of vma's
0164f69d0   Randy Dunlap   mm/memory.c: fix ...
1254
   * @tlb: address of the caller's struct mmu_gather
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1255
1256
1257
1258
1259
1260
   * @vma: the starting vma
   * @start_addr: virtual address at which to start unmapping
   * @end_addr: virtual address at which to end unmapping
   * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here
   * @details: details of nonlinear truncation or shared cache invalidation
   *
ee39b37b2   Hugh Dickins   [PATCH] freepgt: ...
1261
   * Returns the end address of the unmapping (restart addr if interrupted).
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1262
   *
508034a32   Hugh Dickins   [PATCH] mm: unmap...
1263
   * Unmap all pages in the vma list.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1264
   *
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1265
1266
1267
1268
1269
1270
1271
1272
1273
   * Only addresses between `start' and `end' will be unmapped.
   *
   * The VMA list must be sorted in ascending virtual address order.
   *
   * unmap_vmas() assumes that the caller will flush the whole unmapped address
   * range after unmap_vmas() returns.  So the only responsibility here is to
   * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
   * drops the lock and schedules.
   */
d16dfc550   Peter Zijlstra   mm: mmu_gather re...
1274
  unsigned long unmap_vmas(struct mmu_gather *tlb,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1275
1276
1277
1278
  		struct vm_area_struct *vma, unsigned long start_addr,
  		unsigned long end_addr, unsigned long *nr_accounted,
  		struct zap_details *details)
  {
ee39b37b2   Hugh Dickins   [PATCH] freepgt: ...
1279
  	unsigned long start = start_addr;
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
1280
  	struct mm_struct *mm = vma->vm_mm;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1281

cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
1282
  	mmu_notifier_invalidate_range_start(mm, start_addr, end_addr);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1283
  	for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
  		unsigned long end;
  
  		start = max(vma->vm_start, start_addr);
  		if (start >= vma->vm_end)
  			continue;
  		end = min(vma->vm_end, end_addr);
  		if (end <= vma->vm_start)
  			continue;
  
  		if (vma->vm_flags & VM_ACCOUNT)
  			*nr_accounted += (end - start) >> PAGE_SHIFT;
34801ba9b   venkatesh.pallipadi@intel.com   x86: PAT: move tr...
1295
  		if (unlikely(is_pfn_mapping(vma)))
2ab640379   venkatesh.pallipadi@intel.com   x86: PAT: hooks i...
1296
  			untrack_pfn_vma(vma, 0, 0);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1297
  		while (start != end) {
51c6f666f   Robin Holt   [PATCH] mm: ZAP_B...
1298
  			if (unlikely(is_vm_hugetlb_page(vma))) {
a137e1cc6   Andi Kleen   hugetlbfs: per mo...
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
  				/*
  				 * It is undesirable to test vma->vm_file as it
  				 * should be non-null for valid hugetlb area.
  				 * However, vm_file will be NULL in the error
  				 * cleanup path of do_mmap_pgoff. When
  				 * hugetlbfs ->mmap method fails,
  				 * do_mmap_pgoff() nullifies vma->vm_file
  				 * before calling this function to clean up.
  				 * Since no pte has actually been setup, it is
  				 * safe to do nothing in this case.
  				 */
97a894136   Peter Zijlstra   mm: Remove i_mmap...
1310
  				if (vma->vm_file)
a137e1cc6   Andi Kleen   hugetlbfs: per mo...
1311
  					unmap_hugepage_range(vma, start, end, NULL);
a137e1cc6   Andi Kleen   hugetlbfs: per mo...
1312

51c6f666f   Robin Holt   [PATCH] mm: ZAP_B...
1313
1314
  				start = end;
  			} else
97a894136   Peter Zijlstra   mm: Remove i_mmap...
1315
  				start = unmap_page_range(tlb, vma, start, end, details);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1316
1317
  		}
  	}
97a894136   Peter Zijlstra   mm: Remove i_mmap...
1318

cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
1319
  	mmu_notifier_invalidate_range_end(mm, start_addr, end_addr);
ee39b37b2   Hugh Dickins   [PATCH] freepgt: ...
1320
  	return start;	/* which is now the end (or restart) address */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1321
1322
1323
1324
1325
1326
1327
1328
1329
  }
  
  /**
   * zap_page_range - remove user pages in a given range
   * @vma: vm_area_struct holding the applicable pages
   * @address: starting address of pages to zap
   * @size: number of bytes to zap
   * @details: details of nonlinear truncation or shared cache invalidation
   */
ee39b37b2   Hugh Dickins   [PATCH] freepgt: ...
1330
  unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1331
1332
1333
  		unsigned long size, struct zap_details *details)
  {
  	struct mm_struct *mm = vma->vm_mm;
d16dfc550   Peter Zijlstra   mm: mmu_gather re...
1334
  	struct mmu_gather tlb;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1335
1336
  	unsigned long end = address + size;
  	unsigned long nr_accounted = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1337
  	lru_add_drain();
d16dfc550   Peter Zijlstra   mm: mmu_gather re...
1338
  	tlb_gather_mmu(&tlb, mm, 0);
365e9c87a   Hugh Dickins   [PATCH] mm: updat...
1339
  	update_hiwater_rss(mm);
508034a32   Hugh Dickins   [PATCH] mm: unmap...
1340
  	end = unmap_vmas(&tlb, vma, address, end, &nr_accounted, details);
d16dfc550   Peter Zijlstra   mm: mmu_gather re...
1341
  	tlb_finish_mmu(&tlb, address, end);
ee39b37b2   Hugh Dickins   [PATCH] freepgt: ...
1342
  	return end;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1343
  }
c627f9cc0   Jack Steiner   mm: add zap_vma_p...
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
  /**
   * zap_vma_ptes - remove ptes mapping the vma
   * @vma: vm_area_struct holding ptes to be zapped
   * @address: starting address of pages to zap
   * @size: number of bytes to zap
   *
   * This function only unmaps ptes assigned to VM_PFNMAP vmas.
   *
   * The entire address range must be fully contained within the vma.
   *
   * Returns 0 if successful.
   */
  int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
  		unsigned long size)
  {
  	if (address < vma->vm_start || address + size > vma->vm_end ||
  	    		!(vma->vm_flags & VM_PFNMAP))
  		return -1;
  	zap_page_range(vma, address, size, NULL);
  	return 0;
  }
  EXPORT_SYMBOL_GPL(zap_vma_ptes);
142762bd8   Johannes Weiner   mm: document foll...
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
  /**
   * follow_page - look up a page descriptor from a user-virtual address
   * @vma: vm_area_struct mapping @address
   * @address: virtual address to look up
   * @flags: flags modifying lookup behaviour
   *
   * @flags can have FOLL_ flags set, defined in <linux/mm.h>
   *
   * Returns the mapped (struct page *), %NULL if no mapping exists, or
   * an error pointer if there is a mapping to something not represented
   * by a page descriptor (see also vm_normal_page()).
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1377
   */
6aab341e0   Linus Torvalds   mm: re-architect ...
1378
  struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
deceb6cd1   Hugh Dickins   [PATCH] mm: follo...
1379
  			unsigned int flags)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1380
1381
1382
1383
1384
  {
  	pgd_t *pgd;
  	pud_t *pud;
  	pmd_t *pmd;
  	pte_t *ptep, pte;
deceb6cd1   Hugh Dickins   [PATCH] mm: follo...
1385
  	spinlock_t *ptl;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1386
  	struct page *page;
6aab341e0   Linus Torvalds   mm: re-architect ...
1387
  	struct mm_struct *mm = vma->vm_mm;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1388

deceb6cd1   Hugh Dickins   [PATCH] mm: follo...
1389
1390
1391
1392
1393
  	page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
  	if (!IS_ERR(page)) {
  		BUG_ON(flags & FOLL_GET);
  		goto out;
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1394

deceb6cd1   Hugh Dickins   [PATCH] mm: follo...
1395
  	page = NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1396
1397
  	pgd = pgd_offset(mm, address);
  	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
deceb6cd1   Hugh Dickins   [PATCH] mm: follo...
1398
  		goto no_page_table;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1399
1400
  
  	pud = pud_offset(pgd, address);
ceb868796   Andi Kleen   hugetlb: introduc...
1401
  	if (pud_none(*pud))
deceb6cd1   Hugh Dickins   [PATCH] mm: follo...
1402
  		goto no_page_table;
8a07651ee   Hugh Dickins   thp: transparent ...
1403
  	if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
ceb868796   Andi Kleen   hugetlb: introduc...
1404
1405
1406
1407
1408
1409
  		BUG_ON(flags & FOLL_GET);
  		page = follow_huge_pud(mm, address, pud, flags & FOLL_WRITE);
  		goto out;
  	}
  	if (unlikely(pud_bad(*pud)))
  		goto no_page_table;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1410
  	pmd = pmd_offset(pud, address);
aeed5fce3   Hugh Dickins   x86: fix PAE pmd_...
1411
  	if (pmd_none(*pmd))
deceb6cd1   Hugh Dickins   [PATCH] mm: follo...
1412
  		goto no_page_table;
71e3aac07   Andrea Arcangeli   thp: transparent ...
1413
  	if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) {
deceb6cd1   Hugh Dickins   [PATCH] mm: follo...
1414
1415
  		BUG_ON(flags & FOLL_GET);
  		page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1416
  		goto out;
deceb6cd1   Hugh Dickins   [PATCH] mm: follo...
1417
  	}
71e3aac07   Andrea Arcangeli   thp: transparent ...
1418
  	if (pmd_trans_huge(*pmd)) {
500d65d47   Andrea Arcangeli   thp: pmd_trans_hu...
1419
1420
1421
1422
  		if (flags & FOLL_SPLIT) {
  			split_huge_page_pmd(mm, pmd);
  			goto split_fallthrough;
  		}
71e3aac07   Andrea Arcangeli   thp: transparent ...
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
  		spin_lock(&mm->page_table_lock);
  		if (likely(pmd_trans_huge(*pmd))) {
  			if (unlikely(pmd_trans_splitting(*pmd))) {
  				spin_unlock(&mm->page_table_lock);
  				wait_split_huge_page(vma->anon_vma, pmd);
  			} else {
  				page = follow_trans_huge_pmd(mm, address,
  							     pmd, flags);
  				spin_unlock(&mm->page_table_lock);
  				goto out;
  			}
  		} else
  			spin_unlock(&mm->page_table_lock);
  		/* fall through */
  	}
500d65d47   Andrea Arcangeli   thp: pmd_trans_hu...
1438
  split_fallthrough:
aeed5fce3   Hugh Dickins   x86: fix PAE pmd_...
1439
1440
  	if (unlikely(pmd_bad(*pmd)))
  		goto no_page_table;
deceb6cd1   Hugh Dickins   [PATCH] mm: follo...
1441
  	ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1442
1443
  
  	pte = *ptep;
deceb6cd1   Hugh Dickins   [PATCH] mm: follo...
1444
  	if (!pte_present(pte))
89f5b7da2   Linus Torvalds   Reinstate ZERO_PA...
1445
  		goto no_page;
deceb6cd1   Hugh Dickins   [PATCH] mm: follo...
1446
1447
  	if ((flags & FOLL_WRITE) && !pte_write(pte))
  		goto unlock;
a13ea5b75   Hugh Dickins   mm: reinstate ZER...
1448

6aab341e0   Linus Torvalds   mm: re-architect ...
1449
  	page = vm_normal_page(vma, address, pte);
a13ea5b75   Hugh Dickins   mm: reinstate ZER...
1450
1451
  	if (unlikely(!page)) {
  		if ((flags & FOLL_DUMP) ||
62eede62d   Hugh Dickins   mm: ZERO_PAGE wit...
1452
  		    !is_zero_pfn(pte_pfn(pte)))
a13ea5b75   Hugh Dickins   mm: reinstate ZER...
1453
1454
1455
  			goto bad_page;
  		page = pte_page(pte);
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1456

deceb6cd1   Hugh Dickins   [PATCH] mm: follo...
1457
  	if (flags & FOLL_GET)
70b50f94f   Andrea Arcangeli   mm: thp: tail pag...
1458
  		get_page_foll(page);
deceb6cd1   Hugh Dickins   [PATCH] mm: follo...
1459
1460
1461
1462
  	if (flags & FOLL_TOUCH) {
  		if ((flags & FOLL_WRITE) &&
  		    !pte_dirty(pte) && !PageDirty(page))
  			set_page_dirty(page);
bd775c42e   KOSAKI Motohiro   mm: add comment w...
1463
1464
1465
1466
1467
  		/*
  		 * pte_mkyoung() would be more correct here, but atomic care
  		 * is needed to avoid losing the dirty bit: it is easier to use
  		 * mark_page_accessed().
  		 */
deceb6cd1   Hugh Dickins   [PATCH] mm: follo...
1468
1469
  		mark_page_accessed(page);
  	}
a1fde08c7   Linus Torvalds   VM: skip the stac...
1470
  	if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
110d74a92   Michel Lespinasse   mm: add FOLL_MLOC...
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
  		/*
  		 * The preliminary mapping check is mainly to avoid the
  		 * pointless overhead of lock_page on the ZERO_PAGE
  		 * which might bounce very badly if there is contention.
  		 *
  		 * If the page is already locked, we don't need to
  		 * handle it now - vmscan will handle it later if and
  		 * when it attempts to reclaim the page.
  		 */
  		if (page->mapping && trylock_page(page)) {
  			lru_add_drain();  /* push cached pages to LRU */
  			/*
  			 * Because we lock page here and migration is
  			 * blocked by the pte's page reference, we need
  			 * only check for file-cache page truncation.
  			 */
  			if (page->mapping)
  				mlock_vma_page(page);
  			unlock_page(page);
  		}
  	}
deceb6cd1   Hugh Dickins   [PATCH] mm: follo...
1492
1493
  unlock:
  	pte_unmap_unlock(ptep, ptl);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1494
  out:
deceb6cd1   Hugh Dickins   [PATCH] mm: follo...
1495
  	return page;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1496

89f5b7da2   Linus Torvalds   Reinstate ZERO_PA...
1497
1498
1499
1500
1501
1502
1503
1504
  bad_page:
  	pte_unmap_unlock(ptep, ptl);
  	return ERR_PTR(-EFAULT);
  
  no_page:
  	pte_unmap_unlock(ptep, ptl);
  	if (!pte_none(pte))
  		return page;
8e4b9a607   Hugh Dickins   mm: FOLL_DUMP rep...
1505

deceb6cd1   Hugh Dickins   [PATCH] mm: follo...
1506
1507
1508
  no_page_table:
  	/*
  	 * When core dumping an enormous anonymous area that nobody
8e4b9a607   Hugh Dickins   mm: FOLL_DUMP rep...
1509
1510
1511
1512
1513
  	 * has touched so far, we don't want to allocate unnecessary pages or
  	 * page tables.  Return error instead of NULL to skip handle_mm_fault,
  	 * then get_dump_page() will return NULL to leave a hole in the dump.
  	 * But we can only make this optimization where a hole would surely
  	 * be zero-filled if handle_mm_fault() actually did handle it.
deceb6cd1   Hugh Dickins   [PATCH] mm: follo...
1514
  	 */
8e4b9a607   Hugh Dickins   mm: FOLL_DUMP rep...
1515
1516
1517
  	if ((flags & FOLL_DUMP) &&
  	    (!vma->vm_ops || !vma->vm_ops->fault))
  		return ERR_PTR(-EFAULT);
deceb6cd1   Hugh Dickins   [PATCH] mm: follo...
1518
  	return page;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1519
  }
95042f9eb   Linus Torvalds   vm: fix mlock() o...
1520
1521
  static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
  {
a09a79f66   Mikulas Patocka   Don't lock guardp...
1522
1523
  	return stack_guard_page_start(vma, addr) ||
  	       stack_guard_page_end(vma, addr+PAGE_SIZE);
95042f9eb   Linus Torvalds   vm: fix mlock() o...
1524
  }
0014bd990   Huang Ying   mm: export __get_...
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
  /**
   * __get_user_pages() - pin user pages in memory
   * @tsk:	task_struct of target task
   * @mm:		mm_struct of target mm
   * @start:	starting user address
   * @nr_pages:	number of pages from start to pin
   * @gup_flags:	flags modifying pin behaviour
   * @pages:	array that receives pointers to the pages pinned.
   *		Should be at least nr_pages long. Or NULL, if caller
   *		only intends to ensure the pages are faulted in.
   * @vmas:	array of pointers to vmas corresponding to each page.
   *		Or NULL if the caller does not require them.
   * @nonblocking: whether waiting for disk IO or mmap_sem contention
   *
   * Returns number of pages pinned. This may be fewer than the number
   * requested. If nr_pages is 0 or negative, returns 0. If no pages
   * were pinned, returns -errno. Each page returned must be released
   * with a put_page() call when it is finished with. vmas will only
   * remain valid while mmap_sem is held.
   *
   * Must be called with mmap_sem held for read or write.
   *
   * __get_user_pages walks a process's page tables and takes a reference to
   * each struct page that each user address corresponds to at a given
   * instant. That is, it takes the page that would be accessed if a user
   * thread accesses the given user virtual address at that instant.
   *
   * This does not guarantee that the page exists in the user mappings when
   * __get_user_pages returns, and there may even be a completely different
   * page there in some cases (eg. if mmapped pagecache has been invalidated
   * and subsequently re faulted). However it does guarantee that the page
   * won't be freed completely. And mostly callers simply care that the page
   * contains data that was valid *at some point in time*. Typically, an IO
   * or similar operation cannot guarantee anything stronger anyway because
   * locks can't be held over the syscall boundary.
   *
   * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
   * the page is written to, set_page_dirty (or set_page_dirty_lock, as
   * appropriate) must be called after the page is finished with, and
   * before put_page is called.
   *
   * If @nonblocking != NULL, __get_user_pages will not wait for disk IO
   * or mmap_sem contention, and if waiting is needed to pin all pages,
   * *@nonblocking will be set to 0.
   *
   * In most cases, get_user_pages or get_user_pages_fast should be used
   * instead of __get_user_pages. __get_user_pages should be used only if
   * you need some special @gup_flags.
   */
b291f0003   Nick Piggin   mlock: mlocked pa...
1574
  int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
58fa879e1   Hugh Dickins   mm: FOLL flags fo...
1575
  		     unsigned long start, int nr_pages, unsigned int gup_flags,
53a7706d5   Michel Lespinasse   mlock: do not hol...
1576
1577
  		     struct page **pages, struct vm_area_struct **vmas,
  		     int *nonblocking)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1578
1579
  {
  	int i;
58fa879e1   Hugh Dickins   mm: FOLL flags fo...
1580
  	unsigned long vm_flags;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1581

9d73777e5   Peter Zijlstra   clarify get_user_...
1582
  	if (nr_pages <= 0)
900cf086f   Jonathan Corbet   Be more robust ab...
1583
  		return 0;
58fa879e1   Hugh Dickins   mm: FOLL flags fo...
1584
1585
  
  	VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1586
1587
  	/* 
  	 * Require read or write permissions.
58fa879e1   Hugh Dickins   mm: FOLL flags fo...
1588
  	 * If FOLL_FORCE is set, we only require the "MAY" flags.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1589
  	 */
58fa879e1   Hugh Dickins   mm: FOLL flags fo...
1590
1591
1592
1593
  	vm_flags  = (gup_flags & FOLL_WRITE) ?
  			(VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
  	vm_flags &= (gup_flags & FOLL_FORCE) ?
  			(VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1594
1595
1596
  	i = 0;
  
  	do {
deceb6cd1   Hugh Dickins   [PATCH] mm: follo...
1597
  		struct vm_area_struct *vma;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1598
1599
  
  		vma = find_extend_vma(mm, start);
e7f22e207   Stephen Wilson   mm: use mm_struct...
1600
  		if (!vma && in_gate_area(mm, start)) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1601
  			unsigned long pg = start & PAGE_MASK;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1602
1603
1604
1605
  			pgd_t *pgd;
  			pud_t *pud;
  			pmd_t *pmd;
  			pte_t *pte;
b291f0003   Nick Piggin   mlock: mlocked pa...
1606
1607
  
  			/* user gate pages are read-only */
58fa879e1   Hugh Dickins   mm: FOLL flags fo...
1608
  			if (gup_flags & FOLL_WRITE)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1609
1610
1611
1612
1613
1614
1615
1616
1617
  				return i ? : -EFAULT;
  			if (pg > TASK_SIZE)
  				pgd = pgd_offset_k(pg);
  			else
  				pgd = pgd_offset_gate(mm, pg);
  			BUG_ON(pgd_none(*pgd));
  			pud = pud_offset(pgd, pg);
  			BUG_ON(pud_none(*pud));
  			pmd = pmd_offset(pud, pg);
690dbe1ce   Hugh Dickins   [PATCH] x86_64: a...
1618
1619
  			if (pmd_none(*pmd))
  				return i ? : -EFAULT;
f66055ab6   Andrea Arcangeli   thp: verify pmd_t...
1620
  			VM_BUG_ON(pmd_trans_huge(*pmd));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1621
  			pte = pte_offset_map(pmd, pg);
690dbe1ce   Hugh Dickins   [PATCH] x86_64: a...
1622
1623
1624
1625
  			if (pte_none(*pte)) {
  				pte_unmap(pte);
  				return i ? : -EFAULT;
  			}
95042f9eb   Linus Torvalds   vm: fix mlock() o...
1626
  			vma = get_gate_vma(mm);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1627
  			if (pages) {
de51257aa   Hugh Dickins   mm: fix ia64 cras...
1628
  				struct page *page;
95042f9eb   Linus Torvalds   vm: fix mlock() o...
1629
  				page = vm_normal_page(vma, start, *pte);
de51257aa   Hugh Dickins   mm: fix ia64 cras...
1630
1631
1632
1633
1634
1635
1636
1637
1638
  				if (!page) {
  					if (!(gup_flags & FOLL_DUMP) &&
  					     is_zero_pfn(pte_pfn(*pte)))
  						page = pte_page(*pte);
  					else {
  						pte_unmap(pte);
  						return i ? : -EFAULT;
  					}
  				}
6aab341e0   Linus Torvalds   mm: re-architect ...
1639
  				pages[i] = page;
de51257aa   Hugh Dickins   mm: fix ia64 cras...
1640
  				get_page(page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1641
1642
  			}
  			pte_unmap(pte);
95042f9eb   Linus Torvalds   vm: fix mlock() o...
1643
  			goto next_page;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1644
  		}
b291f0003   Nick Piggin   mlock: mlocked pa...
1645
1646
  		if (!vma ||
  		    (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
1c3aff1ce   Hugh Dickins   mm: remove unused...
1647
  		    !(vm_flags & vma->vm_flags))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1648
  			return i ? : -EFAULT;
2a15efc95   Hugh Dickins   mm: follow_hugetl...
1649
1650
  		if (is_vm_hugetlb_page(vma)) {
  			i = follow_hugetlb_page(mm, vma, pages, vmas,
58fa879e1   Hugh Dickins   mm: FOLL flags fo...
1651
  					&start, &nr_pages, i, gup_flags);
2a15efc95   Hugh Dickins   mm: follow_hugetl...
1652
1653
  			continue;
  		}
deceb6cd1   Hugh Dickins   [PATCH] mm: follo...
1654

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1655
  		do {
08ef47293   Hugh Dickins   [PATCH] get_user_...
1656
  			struct page *page;
58fa879e1   Hugh Dickins   mm: FOLL flags fo...
1657
  			unsigned int foll_flags = gup_flags;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1658

462e00cc7   Ethan Solomita   oom: stop allocat...
1659
  			/*
4779280d1   Ying Han   mm: make get_user...
1660
  			 * If we have a pending SIGKILL, don't keep faulting
1c3aff1ce   Hugh Dickins   mm: remove unused...
1661
  			 * pages and potentially allocating memory.
462e00cc7   Ethan Solomita   oom: stop allocat...
1662
  			 */
1c3aff1ce   Hugh Dickins   mm: remove unused...
1663
  			if (unlikely(fatal_signal_pending(current)))
4779280d1   Ying Han   mm: make get_user...
1664
  				return i ? i : -ERESTARTSYS;
462e00cc7   Ethan Solomita   oom: stop allocat...
1665

deceb6cd1   Hugh Dickins   [PATCH] mm: follo...
1666
  			cond_resched();
6aab341e0   Linus Torvalds   mm: re-architect ...
1667
  			while (!(page = follow_page(vma, start, foll_flags))) {
deceb6cd1   Hugh Dickins   [PATCH] mm: follo...
1668
  				int ret;
53a7706d5   Michel Lespinasse   mlock: do not hol...
1669
  				unsigned int fault_flags = 0;
a09a79f66   Mikulas Patocka   Don't lock guardp...
1670
1671
1672
1673
1674
  				/* For mlock, just skip the stack guard page. */
  				if (foll_flags & FOLL_MLOCK) {
  					if (stack_guard_page(vma, start))
  						goto next_page;
  				}
53a7706d5   Michel Lespinasse   mlock: do not hol...
1675
1676
1677
1678
  				if (foll_flags & FOLL_WRITE)
  					fault_flags |= FAULT_FLAG_WRITE;
  				if (nonblocking)
  					fault_flags |= FAULT_FLAG_ALLOW_RETRY;
318b275fb   Gleb Natapov   mm: allow GUP to ...
1679
1680
  				if (foll_flags & FOLL_NOWAIT)
  					fault_flags |= (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT);
d06063cc2   Linus Torvalds   Move FAULT_FLAG_x...
1681

d26ed650d   Hugh Dickins   mm: don't rely on...
1682
  				ret = handle_mm_fault(mm, vma, start,
53a7706d5   Michel Lespinasse   mlock: do not hol...
1683
  							fault_flags);
d26ed650d   Hugh Dickins   mm: don't rely on...
1684

83c54070e   Nick Piggin   mm: fault feedbac...
1685
1686
1687
  				if (ret & VM_FAULT_ERROR) {
  					if (ret & VM_FAULT_OOM)
  						return i ? i : -ENOMEM;
69ebb83e1   Huang Ying   mm: make __get_us...
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
  					if (ret & (VM_FAULT_HWPOISON |
  						   VM_FAULT_HWPOISON_LARGE)) {
  						if (i)
  							return i;
  						else if (gup_flags & FOLL_HWPOISON)
  							return -EHWPOISON;
  						else
  							return -EFAULT;
  					}
  					if (ret & VM_FAULT_SIGBUS)
83c54070e   Nick Piggin   mm: fault feedbac...
1698
1699
1700
  						return i ? i : -EFAULT;
  					BUG();
  				}
e7f22e207   Stephen Wilson   mm: use mm_struct...
1701
1702
1703
1704
1705
1706
1707
  
  				if (tsk) {
  					if (ret & VM_FAULT_MAJOR)
  						tsk->maj_flt++;
  					else
  						tsk->min_flt++;
  				}
83c54070e   Nick Piggin   mm: fault feedbac...
1708

53a7706d5   Michel Lespinasse   mlock: do not hol...
1709
  				if (ret & VM_FAULT_RETRY) {
318b275fb   Gleb Natapov   mm: allow GUP to ...
1710
1711
  					if (nonblocking)
  						*nonblocking = 0;
53a7706d5   Michel Lespinasse   mlock: do not hol...
1712
1713
  					return i;
  				}
a68d2ebc1   Linus Torvalds   Fix up recent get...
1714
  				/*
83c54070e   Nick Piggin   mm: fault feedbac...
1715
1716
1717
1718
  				 * The VM_FAULT_WRITE bit tells us that
  				 * do_wp_page has broken COW when necessary,
  				 * even if maybe_mkwrite decided not to set
  				 * pte_write. We can thus safely do subsequent
878b63ac8   Hugh Dickins   mm: gup persist f...
1719
1720
1721
1722
1723
1724
  				 * page lookups as if they were reads. But only
  				 * do so when looping for pte_write is futile:
  				 * in some cases userspace may also be wanting
  				 * to write to the gotten user page, which a
  				 * read fault here might prevent (a readonly
  				 * page might get reCOWed by userspace write).
a68d2ebc1   Linus Torvalds   Fix up recent get...
1725
  				 */
878b63ac8   Hugh Dickins   mm: gup persist f...
1726
1727
  				if ((ret & VM_FAULT_WRITE) &&
  				    !(vma->vm_flags & VM_WRITE))
deceb6cd1   Hugh Dickins   [PATCH] mm: follo...
1728
  					foll_flags &= ~FOLL_WRITE;
83c54070e   Nick Piggin   mm: fault feedbac...
1729

7f7bbbe50   Benjamin Herrenschmidt   [PATCH] page faul...
1730
  				cond_resched();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1731
  			}
89f5b7da2   Linus Torvalds   Reinstate ZERO_PA...
1732
1733
  			if (IS_ERR(page))
  				return i ? i : PTR_ERR(page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1734
  			if (pages) {
08ef47293   Hugh Dickins   [PATCH] get_user_...
1735
  				pages[i] = page;
03beb0766   James Bottomley   [PATCH] Add API f...
1736

a6f36be32   Russell King   [ARM] pass vma fo...
1737
  				flush_anon_page(vma, page, start);
08ef47293   Hugh Dickins   [PATCH] get_user_...
1738
  				flush_dcache_page(page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1739
  			}
95042f9eb   Linus Torvalds   vm: fix mlock() o...
1740
  next_page:
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1741
1742
1743
1744
  			if (vmas)
  				vmas[i] = vma;
  			i++;
  			start += PAGE_SIZE;
9d73777e5   Peter Zijlstra   clarify get_user_...
1745
1746
1747
  			nr_pages--;
  		} while (nr_pages && start < vma->vm_end);
  	} while (nr_pages);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1748
1749
  	return i;
  }
0014bd990   Huang Ying   mm: export __get_...
1750
  EXPORT_SYMBOL(__get_user_pages);
b291f0003   Nick Piggin   mlock: mlocked pa...
1751

2efaca927   Benjamin Herrenschmidt   mm/futex: fix fut...
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
  /*
   * fixup_user_fault() - manually resolve a user page fault
   * @tsk:	the task_struct to use for page fault accounting, or
   *		NULL if faults are not to be recorded.
   * @mm:		mm_struct of target mm
   * @address:	user address
   * @fault_flags:flags to pass down to handle_mm_fault()
   *
   * This is meant to be called in the specific scenario where for locking reasons
   * we try to access user memory in atomic context (within a pagefault_disable()
   * section), this returns -EFAULT, and we want to resolve the user fault before
   * trying again.
   *
   * Typically this is meant to be used by the futex code.
   *
   * The main difference with get_user_pages() is that this function will
   * unconditionally call handle_mm_fault() which will in turn perform all the
   * necessary SW fixup of the dirty and young bits in the PTE, while
   * handle_mm_fault() only guarantees to update these in the struct page.
   *
   * This is important for some architectures where those bits also gate the
   * access permission to the page because they are maintained in software.  On
   * such architectures, gup() will not be enough to make a subsequent access
   * succeed.
   *
   * This should be called with the mm_sem held for read.
   */
  int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
  		     unsigned long address, unsigned int fault_flags)
  {
  	struct vm_area_struct *vma;
  	int ret;
  
  	vma = find_extend_vma(mm, address);
  	if (!vma || address < vma->vm_start)
  		return -EFAULT;
  
  	ret = handle_mm_fault(mm, vma, address, fault_flags);
  	if (ret & VM_FAULT_ERROR) {
  		if (ret & VM_FAULT_OOM)
  			return -ENOMEM;
  		if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
  			return -EHWPOISON;
  		if (ret & VM_FAULT_SIGBUS)
  			return -EFAULT;
  		BUG();
  	}
  	if (tsk) {
  		if (ret & VM_FAULT_MAJOR)
  			tsk->maj_flt++;
  		else
  			tsk->min_flt++;
  	}
  	return 0;
  }
  
  /*
d2bf6be8a   Nick Piggin   mm: clean up get_...
1809
   * get_user_pages() - pin user pages in memory
e7f22e207   Stephen Wilson   mm: use mm_struct...
1810
1811
   * @tsk:	the task_struct to use for page fault accounting, or
   *		NULL if faults are not to be recorded.
d2bf6be8a   Nick Piggin   mm: clean up get_...
1812
1813
   * @mm:		mm_struct of target mm
   * @start:	starting user address
9d73777e5   Peter Zijlstra   clarify get_user_...
1814
   * @nr_pages:	number of pages from start to pin
d2bf6be8a   Nick Piggin   mm: clean up get_...
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
   * @write:	whether pages will be written to by the caller
   * @force:	whether to force write access even if user mapping is
   *		readonly. This will result in the page being COWed even
   *		in MAP_SHARED mappings. You do not want this.
   * @pages:	array that receives pointers to the pages pinned.
   *		Should be at least nr_pages long. Or NULL, if caller
   *		only intends to ensure the pages are faulted in.
   * @vmas:	array of pointers to vmas corresponding to each page.
   *		Or NULL if the caller does not require them.
   *
   * Returns number of pages pinned. This may be fewer than the number
9d73777e5   Peter Zijlstra   clarify get_user_...
1826
   * requested. If nr_pages is 0 or negative, returns 0. If no pages
d2bf6be8a   Nick Piggin   mm: clean up get_...
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
   * were pinned, returns -errno. Each page returned must be released
   * with a put_page() call when it is finished with. vmas will only
   * remain valid while mmap_sem is held.
   *
   * Must be called with mmap_sem held for read or write.
   *
   * get_user_pages walks a process's page tables and takes a reference to
   * each struct page that each user address corresponds to at a given
   * instant. That is, it takes the page that would be accessed if a user
   * thread accesses the given user virtual address at that instant.
   *
   * This does not guarantee that the page exists in the user mappings when
   * get_user_pages returns, and there may even be a completely different
   * page there in some cases (eg. if mmapped pagecache has been invalidated
   * and subsequently re faulted). However it does guarantee that the page
   * won't be freed completely. And mostly callers simply care that the page
   * contains data that was valid *at some point in time*. Typically, an IO
   * or similar operation cannot guarantee anything stronger anyway because
   * locks can't be held over the syscall boundary.
   *
   * If write=0, the page must not be written to. If the page is written to,
   * set_page_dirty (or set_page_dirty_lock, as appropriate) must be called
   * after the page is finished with, and before put_page is called.
   *
   * get_user_pages is typically used for fewer-copy IO operations, to get a
   * handle on the memory by some means other than accesses via the user virtual
   * addresses. The pages may be submitted for DMA to devices or accessed via
   * their kernel linear mapping (via the kmap APIs). Care should be taken to
   * use the correct cache flushing APIs.
   *
   * See also get_user_pages_fast, for performance critical applications.
   */
b291f0003   Nick Piggin   mlock: mlocked pa...
1859
  int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
9d73777e5   Peter Zijlstra   clarify get_user_...
1860
  		unsigned long start, int nr_pages, int write, int force,
b291f0003   Nick Piggin   mlock: mlocked pa...
1861
1862
  		struct page **pages, struct vm_area_struct **vmas)
  {
58fa879e1   Hugh Dickins   mm: FOLL flags fo...
1863
  	int flags = FOLL_TOUCH;
b291f0003   Nick Piggin   mlock: mlocked pa...
1864

58fa879e1   Hugh Dickins   mm: FOLL flags fo...
1865
1866
  	if (pages)
  		flags |= FOLL_GET;
b291f0003   Nick Piggin   mlock: mlocked pa...
1867
  	if (write)
58fa879e1   Hugh Dickins   mm: FOLL flags fo...
1868
  		flags |= FOLL_WRITE;
b291f0003   Nick Piggin   mlock: mlocked pa...
1869
  	if (force)
58fa879e1   Hugh Dickins   mm: FOLL flags fo...
1870
  		flags |= FOLL_FORCE;
b291f0003   Nick Piggin   mlock: mlocked pa...
1871

53a7706d5   Michel Lespinasse   mlock: do not hol...
1872
1873
  	return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas,
  				NULL);
b291f0003   Nick Piggin   mlock: mlocked pa...
1874
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1875
  EXPORT_SYMBOL(get_user_pages);
f3e8fccd0   Hugh Dickins   mm: add get_dump_...
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
  /**
   * get_dump_page() - pin user page in memory while writing it to core dump
   * @addr: user address
   *
   * Returns struct page pointer of user page pinned for dump,
   * to be freed afterwards by page_cache_release() or put_page().
   *
   * Returns NULL on any kind of failure - a hole must then be inserted into
   * the corefile, to preserve alignment with its headers; and also returns
   * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
   * allowing a hole to be left in the corefile to save diskspace.
   *
   * Called without mmap_sem, but after all other threads have been killed.
   */
  #ifdef CONFIG_ELF_CORE
  struct page *get_dump_page(unsigned long addr)
  {
  	struct vm_area_struct *vma;
  	struct page *page;
  
  	if (__get_user_pages(current, current->mm, addr, 1,
53a7706d5   Michel Lespinasse   mlock: do not hol...
1897
1898
  			     FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma,
  			     NULL) < 1)
f3e8fccd0   Hugh Dickins   mm: add get_dump_...
1899
  		return NULL;
f3e8fccd0   Hugh Dickins   mm: add get_dump_...
1900
1901
1902
1903
  	flush_cache_page(vma, addr, page_to_pfn(page));
  	return page;
  }
  #endif /* CONFIG_ELF_CORE */
25ca1d6c0   Namhyung Kim   mm: wrap get_lock...
1904
  pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
920c7a5d0   Harvey Harrison   mm: remove fastca...
1905
  			spinlock_t **ptl)
c9cfcddfd   Linus Torvalds   VM: add common he...
1906
1907
1908
1909
  {
  	pgd_t * pgd = pgd_offset(mm, addr);
  	pud_t * pud = pud_alloc(mm, pgd, addr);
  	if (pud) {
49c91fb01   Trond Myklebust   [PATCH] VM: Fix t...
1910
  		pmd_t * pmd = pmd_alloc(mm, pud, addr);
f66055ab6   Andrea Arcangeli   thp: verify pmd_t...
1911
1912
  		if (pmd) {
  			VM_BUG_ON(pmd_trans_huge(*pmd));
c9cfcddfd   Linus Torvalds   VM: add common he...
1913
  			return pte_alloc_map_lock(mm, pmd, addr, ptl);
f66055ab6   Andrea Arcangeli   thp: verify pmd_t...
1914
  		}
c9cfcddfd   Linus Torvalds   VM: add common he...
1915
1916
1917
  	}
  	return NULL;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1918
  /*
238f58d89   Linus Torvalds   Support strange d...
1919
1920
1921
1922
1923
1924
   * This is the old fallback for page remapping.
   *
   * For historical reasons, it only allows reserved pages. Only
   * old drivers should use this, and they needed to mark their
   * pages reserved for the old functions anyway.
   */
423bad600   Nick Piggin   mm: add vm_insert...
1925
1926
  static int insert_page(struct vm_area_struct *vma, unsigned long addr,
  			struct page *page, pgprot_t prot)
238f58d89   Linus Torvalds   Support strange d...
1927
  {
423bad600   Nick Piggin   mm: add vm_insert...
1928
  	struct mm_struct *mm = vma->vm_mm;
238f58d89   Linus Torvalds   Support strange d...
1929
  	int retval;
c9cfcddfd   Linus Torvalds   VM: add common he...
1930
  	pte_t *pte;
8a9f3ccd2   Balbir Singh   Memory controller...
1931
  	spinlock_t *ptl;
238f58d89   Linus Torvalds   Support strange d...
1932
  	retval = -EINVAL;
a145dd411   Linus Torvalds   VM: add "vm_inser...
1933
  	if (PageAnon(page))
5b4e655e9   KAMEZAWA Hiroyuki   memcg: avoid acco...
1934
  		goto out;
238f58d89   Linus Torvalds   Support strange d...
1935
1936
  	retval = -ENOMEM;
  	flush_dcache_page(page);
c9cfcddfd   Linus Torvalds   VM: add common he...
1937
  	pte = get_locked_pte(mm, addr, &ptl);
238f58d89   Linus Torvalds   Support strange d...
1938
  	if (!pte)
5b4e655e9   KAMEZAWA Hiroyuki   memcg: avoid acco...
1939
  		goto out;
238f58d89   Linus Torvalds   Support strange d...
1940
1941
1942
1943
1944
1945
  	retval = -EBUSY;
  	if (!pte_none(*pte))
  		goto out_unlock;
  
  	/* Ok, finally just insert the thing.. */
  	get_page(page);
34e55232e   KAMEZAWA Hiroyuki   mm: avoid false s...
1946
  	inc_mm_counter_fast(mm, MM_FILEPAGES);
238f58d89   Linus Torvalds   Support strange d...
1947
1948
1949
1950
  	page_add_file_rmap(page);
  	set_pte_at(mm, addr, pte, mk_pte(page, prot));
  
  	retval = 0;
8a9f3ccd2   Balbir Singh   Memory controller...
1951
1952
  	pte_unmap_unlock(pte, ptl);
  	return retval;
238f58d89   Linus Torvalds   Support strange d...
1953
1954
1955
1956
1957
  out_unlock:
  	pte_unmap_unlock(pte, ptl);
  out:
  	return retval;
  }
bfa5bf6d6   Rolf Eike Beer   [PATCH] Add kerne...
1958
1959
1960
1961
1962
1963
  /**
   * vm_insert_page - insert single page into user vma
   * @vma: user vma to map to
   * @addr: target user address of this page
   * @page: source kernel page
   *
a145dd411   Linus Torvalds   VM: add "vm_inser...
1964
1965
1966
1967
1968
1969
   * This allows drivers to insert individual pages they've allocated
   * into a user vma.
   *
   * The page has to be a nice clean _individual_ kernel allocation.
   * If you allocate a compound page, you need to have marked it as
   * such (__GFP_COMP), or manually just split the page up yourself
8dfcc9ba2   Nick Piggin   [PATCH] mm: split...
1970
   * (see split_page()).
a145dd411   Linus Torvalds   VM: add "vm_inser...
1971
1972
1973
1974
1975
1976
1977
1978
1979
   *
   * NOTE! Traditionally this was done with "remap_pfn_range()" which
   * took an arbitrary page protection parameter. This doesn't allow
   * that. Your vma protection will have to be set up correctly, which
   * means that if you want a shared writable mapping, you'd better
   * ask for a shared writable mapping!
   *
   * The page does not need to be reserved.
   */
423bad600   Nick Piggin   mm: add vm_insert...
1980
1981
  int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
  			struct page *page)
a145dd411   Linus Torvalds   VM: add "vm_inser...
1982
1983
1984
1985
1986
  {
  	if (addr < vma->vm_start || addr >= vma->vm_end)
  		return -EFAULT;
  	if (!page_count(page))
  		return -EINVAL;
4d7672b46   Linus Torvalds   Make sure we copy...
1987
  	vma->vm_flags |= VM_INSERTPAGE;
423bad600   Nick Piggin   mm: add vm_insert...
1988
  	return insert_page(vma, addr, page, vma->vm_page_prot);
a145dd411   Linus Torvalds   VM: add "vm_inser...
1989
  }
e3c3374fb   Linus Torvalds   Make vm_insert_pa...
1990
  EXPORT_SYMBOL(vm_insert_page);
a145dd411   Linus Torvalds   VM: add "vm_inser...
1991

423bad600   Nick Piggin   mm: add vm_insert...
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
  static int insert_pfn(struct vm_area_struct *vma, unsigned long addr,
  			unsigned long pfn, pgprot_t prot)
  {
  	struct mm_struct *mm = vma->vm_mm;
  	int retval;
  	pte_t *pte, entry;
  	spinlock_t *ptl;
  
  	retval = -ENOMEM;
  	pte = get_locked_pte(mm, addr, &ptl);
  	if (!pte)
  		goto out;
  	retval = -EBUSY;
  	if (!pte_none(*pte))
  		goto out_unlock;
  
  	/* Ok, finally just insert the thing.. */
  	entry = pte_mkspecial(pfn_pte(pfn, prot));
  	set_pte_at(mm, addr, pte, entry);
4b3073e1c   Russell King   MM: Pass a PTE po...
2011
  	update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
423bad600   Nick Piggin   mm: add vm_insert...
2012
2013
2014
2015
2016
2017
2018
  
  	retval = 0;
  out_unlock:
  	pte_unmap_unlock(pte, ptl);
  out:
  	return retval;
  }
e0dc0d8f4   Nick Piggin   [PATCH] add vm_in...
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
  /**
   * vm_insert_pfn - insert single pfn into user vma
   * @vma: user vma to map to
   * @addr: target user address of this page
   * @pfn: source kernel pfn
   *
   * Similar to vm_inert_page, this allows drivers to insert individual pages
   * they've allocated into a user vma. Same comments apply.
   *
   * This function should only be called from a vm_ops->fault handler, and
   * in that case the handler should return NULL.
0d71d10a4   Nick Piggin   mm: remove nopfn
2030
2031
2032
2033
2034
   *
   * vma cannot be a COW mapping.
   *
   * As this is called only for pages that do not currently exist, we
   * do not need to flush old virtual caches or the TLB.
e0dc0d8f4   Nick Piggin   [PATCH] add vm_in...
2035
2036
   */
  int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
423bad600   Nick Piggin   mm: add vm_insert...
2037
  			unsigned long pfn)
e0dc0d8f4   Nick Piggin   [PATCH] add vm_in...
2038
  {
2ab640379   venkatesh.pallipadi@intel.com   x86: PAT: hooks i...
2039
  	int ret;
e4b866ed1   venkatesh.pallipadi@intel.com   x86 PAT: change t...
2040
  	pgprot_t pgprot = vma->vm_page_prot;
7e675137a   Nick Piggin   mm: introduce pte...
2041
2042
2043
2044
2045
2046
  	/*
  	 * Technically, architectures with pte_special can avoid all these
  	 * restrictions (same for remap_pfn_range).  However we would like
  	 * consistency in testing and feature parity among all, so we should
  	 * try to keep these invariants in place for everybody.
  	 */
b379d7901   Jared Hulbert   mm: introduce VM_...
2047
2048
2049
2050
2051
  	BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
  	BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
  						(VM_PFNMAP|VM_MIXEDMAP));
  	BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
  	BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
e0dc0d8f4   Nick Piggin   [PATCH] add vm_in...
2052

423bad600   Nick Piggin   mm: add vm_insert...
2053
2054
  	if (addr < vma->vm_start || addr >= vma->vm_end)
  		return -EFAULT;
e4b866ed1   venkatesh.pallipadi@intel.com   x86 PAT: change t...
2055
  	if (track_pfn_vma_new(vma, &pgprot, pfn, PAGE_SIZE))
2ab640379   venkatesh.pallipadi@intel.com   x86: PAT: hooks i...
2056
  		return -EINVAL;
e4b866ed1   venkatesh.pallipadi@intel.com   x86 PAT: change t...
2057
  	ret = insert_pfn(vma, addr, pfn, pgprot);
2ab640379   venkatesh.pallipadi@intel.com   x86: PAT: hooks i...
2058
2059
2060
2061
2062
  
  	if (ret)
  		untrack_pfn_vma(vma, pfn, PAGE_SIZE);
  
  	return ret;
423bad600   Nick Piggin   mm: add vm_insert...
2063
2064
  }
  EXPORT_SYMBOL(vm_insert_pfn);
e0dc0d8f4   Nick Piggin   [PATCH] add vm_in...
2065

423bad600   Nick Piggin   mm: add vm_insert...
2066
2067
2068
2069
  int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
  			unsigned long pfn)
  {
  	BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
e0dc0d8f4   Nick Piggin   [PATCH] add vm_in...
2070

423bad600   Nick Piggin   mm: add vm_insert...
2071
2072
  	if (addr < vma->vm_start || addr >= vma->vm_end)
  		return -EFAULT;
e0dc0d8f4   Nick Piggin   [PATCH] add vm_in...
2073

423bad600   Nick Piggin   mm: add vm_insert...
2074
2075
2076
2077
  	/*
  	 * If we don't have pte special, then we have to use the pfn_valid()
  	 * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
  	 * refcount the page if pfn_valid is true (hence insert_page rather
62eede62d   Hugh Dickins   mm: ZERO_PAGE wit...
2078
2079
  	 * than insert_pfn).  If a zero_pfn were inserted into a VM_MIXEDMAP
  	 * without pte special, it would there be refcounted as a normal page.
423bad600   Nick Piggin   mm: add vm_insert...
2080
2081
2082
2083
2084
2085
2086
2087
  	 */
  	if (!HAVE_PTE_SPECIAL && pfn_valid(pfn)) {
  		struct page *page;
  
  		page = pfn_to_page(pfn);
  		return insert_page(vma, addr, page, vma->vm_page_prot);
  	}
  	return insert_pfn(vma, addr, pfn, vma->vm_page_prot);
e0dc0d8f4   Nick Piggin   [PATCH] add vm_in...
2088
  }
423bad600   Nick Piggin   mm: add vm_insert...
2089
  EXPORT_SYMBOL(vm_insert_mixed);
e0dc0d8f4   Nick Piggin   [PATCH] add vm_in...
2090

a145dd411   Linus Torvalds   VM: add "vm_inser...
2091
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2092
2093
2094
2095
2096
2097
2098
2099
2100
   * maps a range of physical memory into the requested pages. the old
   * mappings are removed. any references to nonexistent pages results
   * in null mappings (currently treated as "copy-on-access")
   */
  static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
  			unsigned long addr, unsigned long end,
  			unsigned long pfn, pgprot_t prot)
  {
  	pte_t *pte;
c74df32c7   Hugh Dickins   [PATCH] mm: ptd_a...
2101
  	spinlock_t *ptl;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2102

c74df32c7   Hugh Dickins   [PATCH] mm: ptd_a...
2103
  	pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2104
2105
  	if (!pte)
  		return -ENOMEM;
6606c3e0d   Zachary Amsden   [PATCH] paravirt:...
2106
  	arch_enter_lazy_mmu_mode();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2107
2108
  	do {
  		BUG_ON(!pte_none(*pte));
7e675137a   Nick Piggin   mm: introduce pte...
2109
  		set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2110
2111
  		pfn++;
  	} while (pte++, addr += PAGE_SIZE, addr != end);
6606c3e0d   Zachary Amsden   [PATCH] paravirt:...
2112
  	arch_leave_lazy_mmu_mode();
c74df32c7   Hugh Dickins   [PATCH] mm: ptd_a...
2113
  	pte_unmap_unlock(pte - 1, ptl);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
  	return 0;
  }
  
  static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
  			unsigned long addr, unsigned long end,
  			unsigned long pfn, pgprot_t prot)
  {
  	pmd_t *pmd;
  	unsigned long next;
  
  	pfn -= addr >> PAGE_SHIFT;
  	pmd = pmd_alloc(mm, pud, addr);
  	if (!pmd)
  		return -ENOMEM;
f66055ab6   Andrea Arcangeli   thp: verify pmd_t...
2128
  	VM_BUG_ON(pmd_trans_huge(*pmd));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
  	do {
  		next = pmd_addr_end(addr, end);
  		if (remap_pte_range(mm, pmd, addr, next,
  				pfn + (addr >> PAGE_SHIFT), prot))
  			return -ENOMEM;
  	} while (pmd++, addr = next, addr != end);
  	return 0;
  }
  
  static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd,
  			unsigned long addr, unsigned long end,
  			unsigned long pfn, pgprot_t prot)
  {
  	pud_t *pud;
  	unsigned long next;
  
  	pfn -= addr >> PAGE_SHIFT;
  	pud = pud_alloc(mm, pgd, addr);
  	if (!pud)
  		return -ENOMEM;
  	do {
  		next = pud_addr_end(addr, end);
  		if (remap_pmd_range(mm, pud, addr, next,
  				pfn + (addr >> PAGE_SHIFT), prot))
  			return -ENOMEM;
  	} while (pud++, addr = next, addr != end);
  	return 0;
  }
bfa5bf6d6   Rolf Eike Beer   [PATCH] Add kerne...
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
  /**
   * remap_pfn_range - remap kernel memory to userspace
   * @vma: user vma to map to
   * @addr: target user address to start at
   * @pfn: physical address of kernel memory
   * @size: size of map area
   * @prot: page protection flags for this mapping
   *
   *  Note: this is only safe if the mm semaphore is held when called.
   */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2167
2168
2169
2170
2171
  int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
  		    unsigned long pfn, unsigned long size, pgprot_t prot)
  {
  	pgd_t *pgd;
  	unsigned long next;
2d15cab85   Hugh Dickins   [PATCH] mm: fix r...
2172
  	unsigned long end = addr + PAGE_ALIGN(size);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2173
2174
2175
2176
2177
2178
2179
2180
  	struct mm_struct *mm = vma->vm_mm;
  	int err;
  
  	/*
  	 * Physically remapped pages are special. Tell the
  	 * rest of the world about it:
  	 *   VM_IO tells people not to look at these pages
  	 *	(accesses can have side effects).
0b14c179a   Hugh Dickins   [PATCH] unpaged: ...
2181
2182
2183
2184
2185
  	 *   VM_RESERVED is specified all over the place, because
  	 *	in 2.4 it kept swapout's vma scan off this vma; but
  	 *	in 2.6 the LRU scan won't even find its pages, so this
  	 *	flag means no more than count its pages in reserved_vm,
  	 * 	and omit it from core dump, even when VM_IO turned off.
6aab341e0   Linus Torvalds   mm: re-architect ...
2186
2187
2188
  	 *   VM_PFNMAP tells the core MM that the base pages are just
  	 *	raw PFN mappings, and do not have a "struct page" associated
  	 *	with them.
fb155c161   Linus Torvalds   Allow arbitrary s...
2189
2190
2191
2192
  	 *
  	 * There's a horrible special case to handle copy-on-write
  	 * behaviour that some programs depend on. We mark the "original"
  	 * un-COW'ed pages by matching them up with "vma->vm_pgoff".
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2193
  	 */
4bb9c5c02   Pallipadi, Venkatesh   VM, x86, PAT: Cha...
2194
  	if (addr == vma->vm_start && end == vma->vm_end) {
fb155c161   Linus Torvalds   Allow arbitrary s...
2195
  		vma->vm_pgoff = pfn;
895791dac   Pallipadi, Venkatesh   VM, x86, PAT: add...
2196
  		vma->vm_flags |= VM_PFN_AT_MMAP;
4bb9c5c02   Pallipadi, Venkatesh   VM, x86, PAT: Cha...
2197
  	} else if (is_cow_mapping(vma->vm_flags))
3c8bb73ac   venkatesh.pallipadi@intel.com   x86: PAT: store v...
2198
  		return -EINVAL;
fb155c161   Linus Torvalds   Allow arbitrary s...
2199

6aab341e0   Linus Torvalds   mm: re-architect ...
2200
  	vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2201

e4b866ed1   venkatesh.pallipadi@intel.com   x86 PAT: change t...
2202
  	err = track_pfn_vma_new(vma, &prot, pfn, PAGE_ALIGN(size));
a36706131   venkatesh.pallipadi@intel.com   x86 PAT: remove P...
2203
2204
2205
2206
2207
2208
  	if (err) {
  		/*
  		 * To indicate that track_pfn related cleanup is not
  		 * needed from higher level routine calling unmap_vmas
  		 */
  		vma->vm_flags &= ~(VM_IO | VM_RESERVED | VM_PFNMAP);
895791dac   Pallipadi, Venkatesh   VM, x86, PAT: add...
2209
  		vma->vm_flags &= ~VM_PFN_AT_MMAP;
2ab640379   venkatesh.pallipadi@intel.com   x86: PAT: hooks i...
2210
  		return -EINVAL;
a36706131   venkatesh.pallipadi@intel.com   x86 PAT: remove P...
2211
  	}
2ab640379   venkatesh.pallipadi@intel.com   x86: PAT: hooks i...
2212

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2213
2214
2215
2216
  	BUG_ON(addr >= end);
  	pfn -= addr >> PAGE_SHIFT;
  	pgd = pgd_offset(mm, addr);
  	flush_cache_range(vma, addr, end);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2217
2218
2219
2220
2221
2222
2223
  	do {
  		next = pgd_addr_end(addr, end);
  		err = remap_pud_range(mm, pgd, addr, next,
  				pfn + (addr >> PAGE_SHIFT), prot);
  		if (err)
  			break;
  	} while (pgd++, addr = next, addr != end);
2ab640379   venkatesh.pallipadi@intel.com   x86: PAT: hooks i...
2224
2225
2226
  
  	if (err)
  		untrack_pfn_vma(vma, pfn, PAGE_ALIGN(size));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2227
2228
2229
  	return err;
  }
  EXPORT_SYMBOL(remap_pfn_range);
aee16b3ce   Jeremy Fitzhardinge   Add apply_to_page...
2230
2231
2232
2233
2234
2235
  static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
  				     unsigned long addr, unsigned long end,
  				     pte_fn_t fn, void *data)
  {
  	pte_t *pte;
  	int err;
2f569afd9   Martin Schwidefsky   CONFIG_HIGHPTE vs...
2236
  	pgtable_t token;
949099148   Borislav Petkov   Add unitialized_v...
2237
  	spinlock_t *uninitialized_var(ptl);
aee16b3ce   Jeremy Fitzhardinge   Add apply_to_page...
2238
2239
2240
2241
2242
2243
2244
2245
  
  	pte = (mm == &init_mm) ?
  		pte_alloc_kernel(pmd, addr) :
  		pte_alloc_map_lock(mm, pmd, addr, &ptl);
  	if (!pte)
  		return -ENOMEM;
  
  	BUG_ON(pmd_huge(*pmd));
38e0edb15   Jeremy Fitzhardinge   mm/apply_to_range...
2246
  	arch_enter_lazy_mmu_mode();
2f569afd9   Martin Schwidefsky   CONFIG_HIGHPTE vs...
2247
  	token = pmd_pgtable(*pmd);
aee16b3ce   Jeremy Fitzhardinge   Add apply_to_page...
2248
2249
  
  	do {
c36987e2e   Daisuke Nishimura   mm: don't call pt...
2250
  		err = fn(pte++, token, addr, data);
aee16b3ce   Jeremy Fitzhardinge   Add apply_to_page...
2251
2252
  		if (err)
  			break;
c36987e2e   Daisuke Nishimura   mm: don't call pt...
2253
  	} while (addr += PAGE_SIZE, addr != end);
aee16b3ce   Jeremy Fitzhardinge   Add apply_to_page...
2254

38e0edb15   Jeremy Fitzhardinge   mm/apply_to_range...
2255
  	arch_leave_lazy_mmu_mode();
aee16b3ce   Jeremy Fitzhardinge   Add apply_to_page...
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
  	if (mm != &init_mm)
  		pte_unmap_unlock(pte-1, ptl);
  	return err;
  }
  
  static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
  				     unsigned long addr, unsigned long end,
  				     pte_fn_t fn, void *data)
  {
  	pmd_t *pmd;
  	unsigned long next;
  	int err;
ceb868796   Andi Kleen   hugetlb: introduc...
2268
  	BUG_ON(pud_huge(*pud));
aee16b3ce   Jeremy Fitzhardinge   Add apply_to_page...
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
  	pmd = pmd_alloc(mm, pud, addr);
  	if (!pmd)
  		return -ENOMEM;
  	do {
  		next = pmd_addr_end(addr, end);
  		err = apply_to_pte_range(mm, pmd, addr, next, fn, data);
  		if (err)
  			break;
  	} while (pmd++, addr = next, addr != end);
  	return err;
  }
  
  static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
  				     unsigned long addr, unsigned long end,
  				     pte_fn_t fn, void *data)
  {
  	pud_t *pud;
  	unsigned long next;
  	int err;
  
  	pud = pud_alloc(mm, pgd, addr);
  	if (!pud)
  		return -ENOMEM;
  	do {
  		next = pud_addr_end(addr, end);
  		err = apply_to_pmd_range(mm, pud, addr, next, fn, data);
  		if (err)
  			break;
  	} while (pud++, addr = next, addr != end);
  	return err;
  }
  
  /*
   * Scan a region of virtual memory, filling in page tables as necessary
   * and calling a provided function on each leaf page table.
   */
  int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
  			unsigned long size, pte_fn_t fn, void *data)
  {
  	pgd_t *pgd;
  	unsigned long next;
57250a5bf   Jeremy Fitzhardinge   mmu-notifiers: re...
2310
  	unsigned long end = addr + size;
aee16b3ce   Jeremy Fitzhardinge   Add apply_to_page...
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
  	int err;
  
  	BUG_ON(addr >= end);
  	pgd = pgd_offset(mm, addr);
  	do {
  		next = pgd_addr_end(addr, end);
  		err = apply_to_pud_range(mm, pgd, addr, next, fn, data);
  		if (err)
  			break;
  	} while (pgd++, addr = next, addr != end);
57250a5bf   Jeremy Fitzhardinge   mmu-notifiers: re...
2321

aee16b3ce   Jeremy Fitzhardinge   Add apply_to_page...
2322
2323
2324
  	return err;
  }
  EXPORT_SYMBOL_GPL(apply_to_page_range);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2325
  /*
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2326
2327
2328
   * handle_pte_fault chooses page fault handler according to an entry
   * which was read non-atomically.  Before making any commitment, on
   * those architectures or configurations (e.g. i386 with PAE) which
a335b2e17   Ryota Ozaki   mm: Fix out-of-da...
2329
   * might give a mix of unmatched parts, do_swap_page and do_nonlinear_fault
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2330
2331
   * must check under lock before unmapping the pte and proceeding
   * (but do_wp_page is only called after already making such a check;
a335b2e17   Ryota Ozaki   mm: Fix out-of-da...
2332
   * and do_anonymous_page can safely check later on).
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2333
   */
4c21e2f24   Hugh Dickins   [PATCH] mm: split...
2334
  static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2335
2336
2337
2338
2339
  				pte_t *page_table, pte_t orig_pte)
  {
  	int same = 1;
  #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
  	if (sizeof(pte_t) > sizeof(unsigned long)) {
4c21e2f24   Hugh Dickins   [PATCH] mm: split...
2340
2341
  		spinlock_t *ptl = pte_lockptr(mm, pmd);
  		spin_lock(ptl);
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2342
  		same = pte_same(*page_table, orig_pte);
4c21e2f24   Hugh Dickins   [PATCH] mm: split...
2343
  		spin_unlock(ptl);
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2344
2345
2346
2347
2348
  	}
  #endif
  	pte_unmap(page_table);
  	return same;
  }
9de455b20   Atsushi Nemoto   [PATCH] Pass vma ...
2349
  static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma)
6aab341e0   Linus Torvalds   mm: re-architect ...
2350
2351
2352
2353
2354
2355
2356
2357
2358
  {
  	/*
  	 * If the source page was a PFN mapping, we don't have
  	 * a "struct page" for it. We do a best-effort copy by
  	 * just copying from the original user address. If that
  	 * fails, we just zero-fill it. Live with it.
  	 */
  	if (unlikely(!src)) {
  		void *kaddr = kmap_atomic(dst, KM_USER0);
5d2a2dbbc   Linus Torvalds   cow_user_page: fi...
2359
2360
2361
2362
2363
2364
2365
2366
2367
  		void __user *uaddr = (void __user *)(va & PAGE_MASK);
  
  		/*
  		 * This really shouldn't fail, because the page is there
  		 * in the page tables. But it might just be unreadable,
  		 * in which case we just give up and fill the result with
  		 * zeroes.
  		 */
  		if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE))
3ecb01df3   Jan Beulich   use clear_page()/...
2368
  			clear_page(kaddr);
6aab341e0   Linus Torvalds   mm: re-architect ...
2369
  		kunmap_atomic(kaddr, KM_USER0);
c4ec7b0de   Dmitriy Monakhov   [PATCH] mm: D-cac...
2370
  		flush_dcache_page(dst);
0ed361dec   Nick Piggin   mm: fix PageUptod...
2371
2372
  	} else
  		copy_user_highpage(dst, src, va, vma);
6aab341e0   Linus Torvalds   mm: re-architect ...
2373
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2374
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2375
2376
2377
2378
   * This routine handles present pages, when users try to write
   * to a shared page. It is done by copying the page to a new address
   * and decrementing the shared-page counter for the old page.
   *
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2379
2380
2381
2382
2383
2384
2385
2386
2387
   * Note that this routine assumes that the protection checks have been
   * done by the caller (the low-level page fault routine in most cases).
   * Thus we can safely just mark it writable once we've done any necessary
   * COW.
   *
   * We also mark the page dirty at this point even though the page will
   * change only once the write actually happens. This avoids a few races,
   * and potentially makes it more efficient.
   *
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2388
2389
2390
   * We enter with non-exclusive mmap_sem (to exclude vma changes,
   * but allow concurrent faults), with pte both mapped and locked.
   * We return with mmap_sem still held, but pte unmapped and unlocked.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2391
   */
65500d234   Hugh Dickins   [PATCH] mm: page ...
2392
2393
  static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
  		unsigned long address, pte_t *page_table, pmd_t *pmd,
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2394
  		spinlock_t *ptl, pte_t orig_pte)
e6219ec81   Namhyung Kim   mm: add lock rele...
2395
  	__releases(ptl)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2396
  {
e5bbe4dfc   Hugh Dickins   [PATCH] pfnmap: r...
2397
  	struct page *old_page, *new_page;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2398
  	pte_t entry;
b009c024f   Michel Lespinasse   do_wp_page: remov...
2399
  	int ret = 0;
a200ee182   Peter Zijlstra   mm: set_page_dirt...
2400
  	int page_mkwrite = 0;
d08b3851d   Peter Zijlstra   [PATCH] mm: track...
2401
  	struct page *dirty_page = NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2402

6aab341e0   Linus Torvalds   mm: re-architect ...
2403
  	old_page = vm_normal_page(vma, address, orig_pte);
251b97f55   Peter Zijlstra   mm: dirty page ac...
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
  	if (!old_page) {
  		/*
  		 * VM_MIXEDMAP !pfn_valid() case
  		 *
  		 * We should not cow pages in a shared writeable mapping.
  		 * Just mark the pages writable as we can't do any dirty
  		 * accounting on raw pfn maps.
  		 */
  		if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
  				     (VM_WRITE|VM_SHARED))
  			goto reuse;
6aab341e0   Linus Torvalds   mm: re-architect ...
2415
  		goto gotten;
251b97f55   Peter Zijlstra   mm: dirty page ac...
2416
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2417

d08b3851d   Peter Zijlstra   [PATCH] mm: track...
2418
  	/*
ee6a64578   Peter Zijlstra   [PATCH] mm: fixup...
2419
2420
  	 * Take out anonymous pages first, anonymous shared vmas are
  	 * not dirty accountable.
d08b3851d   Peter Zijlstra   [PATCH] mm: track...
2421
  	 */
9a8408951   Hugh Dickins   ksm: identify Pag...
2422
  	if (PageAnon(old_page) && !PageKsm(old_page)) {
ab967d860   Hugh Dickins   mm: wp lock page ...
2423
2424
2425
2426
2427
2428
2429
2430
  		if (!trylock_page(old_page)) {
  			page_cache_get(old_page);
  			pte_unmap_unlock(page_table, ptl);
  			lock_page(old_page);
  			page_table = pte_offset_map_lock(mm, pmd, address,
  							 &ptl);
  			if (!pte_same(*page_table, orig_pte)) {
  				unlock_page(old_page);
ab967d860   Hugh Dickins   mm: wp lock page ...
2431
2432
2433
  				goto unlock;
  			}
  			page_cache_release(old_page);
ee6a64578   Peter Zijlstra   [PATCH] mm: fixup...
2434
  		}
b009c024f   Michel Lespinasse   do_wp_page: remov...
2435
  		if (reuse_swap_page(old_page)) {
c44b67432   Rik van Riel   rmap: move exclus...
2436
2437
2438
2439
2440
2441
  			/*
  			 * The page is all ours.  Move it to our anon_vma so
  			 * the rmap code will not search our parent or siblings.
  			 * Protected against the rmap code by the page lock.
  			 */
  			page_move_anon_rmap(old_page, vma, address);
b009c024f   Michel Lespinasse   do_wp_page: remov...
2442
2443
2444
  			unlock_page(old_page);
  			goto reuse;
  		}
ab967d860   Hugh Dickins   mm: wp lock page ...
2445
  		unlock_page(old_page);
ee6a64578   Peter Zijlstra   [PATCH] mm: fixup...
2446
  	} else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
d08b3851d   Peter Zijlstra   [PATCH] mm: track...
2447
  					(VM_WRITE|VM_SHARED))) {
ee6a64578   Peter Zijlstra   [PATCH] mm: fixup...
2448
2449
2450
2451
2452
  		/*
  		 * Only catch write-faults on shared writable pages,
  		 * read-only shared pages can get COWed by
  		 * get_user_pages(.write=1, .force=1).
  		 */
9637a5efd   David Howells   [PATCH] add page_...
2453
  		if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
c2ec175c3   Nick Piggin   mm: page_mkwrite ...
2454
2455
2456
2457
2458
2459
2460
2461
  			struct vm_fault vmf;
  			int tmp;
  
  			vmf.virtual_address = (void __user *)(address &
  								PAGE_MASK);
  			vmf.pgoff = old_page->index;
  			vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
  			vmf.page = old_page;
9637a5efd   David Howells   [PATCH] add page_...
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
  			/*
  			 * Notify the address space that the page is about to
  			 * become writable so that it can prohibit this or wait
  			 * for the page to get into an appropriate state.
  			 *
  			 * We do this without the lock held, so that it can
  			 * sleep if it needs to.
  			 */
  			page_cache_get(old_page);
  			pte_unmap_unlock(page_table, ptl);
c2ec175c3   Nick Piggin   mm: page_mkwrite ...
2472
2473
2474
2475
  			tmp = vma->vm_ops->page_mkwrite(vma, &vmf);
  			if (unlikely(tmp &
  					(VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
  				ret = tmp;
9637a5efd   David Howells   [PATCH] add page_...
2476
  				goto unwritable_page;
c2ec175c3   Nick Piggin   mm: page_mkwrite ...
2477
  			}
b827e496c   Nick Piggin   mm: close page_mk...
2478
2479
2480
2481
2482
2483
2484
2485
2486
  			if (unlikely(!(tmp & VM_FAULT_LOCKED))) {
  				lock_page(old_page);
  				if (!old_page->mapping) {
  					ret = 0; /* retry the fault */
  					unlock_page(old_page);
  					goto unwritable_page;
  				}
  			} else
  				VM_BUG_ON(!PageLocked(old_page));
9637a5efd   David Howells   [PATCH] add page_...
2487

9637a5efd   David Howells   [PATCH] add page_...
2488
2489
2490
2491
2492
2493
2494
2495
  			/*
  			 * Since we dropped the lock we need to revalidate
  			 * the PTE as someone else may have changed it.  If
  			 * they did, we just return, as we can count on the
  			 * MMU to tell us if they didn't also make it writable.
  			 */
  			page_table = pte_offset_map_lock(mm, pmd, address,
  							 &ptl);
b827e496c   Nick Piggin   mm: close page_mk...
2496
2497
  			if (!pte_same(*page_table, orig_pte)) {
  				unlock_page(old_page);
9637a5efd   David Howells   [PATCH] add page_...
2498
  				goto unlock;
b827e496c   Nick Piggin   mm: close page_mk...
2499
  			}
a200ee182   Peter Zijlstra   mm: set_page_dirt...
2500
2501
  
  			page_mkwrite = 1;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2502
  		}
d08b3851d   Peter Zijlstra   [PATCH] mm: track...
2503
2504
  		dirty_page = old_page;
  		get_page(dirty_page);
9637a5efd   David Howells   [PATCH] add page_...
2505

251b97f55   Peter Zijlstra   mm: dirty page ac...
2506
  reuse:
9637a5efd   David Howells   [PATCH] add page_...
2507
2508
2509
  		flush_cache_page(vma, address, pte_pfn(orig_pte));
  		entry = pte_mkyoung(orig_pte);
  		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
954ffcb35   KAMEZAWA Hiroyuki   flush icache befo...
2510
  		if (ptep_set_access_flags(vma, address, page_table, entry,1))
4b3073e1c   Russell King   MM: Pass a PTE po...
2511
  			update_mmu_cache(vma, address, page_table);
72ddc8f72   Michel Lespinasse   do_wp_page: clari...
2512
  		pte_unmap_unlock(page_table, ptl);
9637a5efd   David Howells   [PATCH] add page_...
2513
  		ret |= VM_FAULT_WRITE;
72ddc8f72   Michel Lespinasse   do_wp_page: clari...
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
  
  		if (!dirty_page)
  			return ret;
  
  		/*
  		 * Yes, Virginia, this is actually required to prevent a race
  		 * with clear_page_dirty_for_io() from clearing the page dirty
  		 * bit after it clear all dirty ptes, but before a racing
  		 * do_wp_page installs a dirty pte.
  		 *
a335b2e17   Ryota Ozaki   mm: Fix out-of-da...
2524
  		 * __do_fault is protected similarly.
72ddc8f72   Michel Lespinasse   do_wp_page: clari...
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
  		 */
  		if (!page_mkwrite) {
  			wait_on_page_locked(dirty_page);
  			set_page_dirty_balance(dirty_page, page_mkwrite);
  		}
  		put_page(dirty_page);
  		if (page_mkwrite) {
  			struct address_space *mapping = dirty_page->mapping;
  
  			set_page_dirty(dirty_page);
  			unlock_page(dirty_page);
  			page_cache_release(dirty_page);
  			if (mapping)	{
  				/*
  				 * Some device drivers do not set page.mapping
  				 * but still dirty their pages
  				 */
  				balance_dirty_pages_ratelimited(mapping);
  			}
  		}
  
  		/* file_update_time outside page_lock */
  		if (vma->vm_file)
  			file_update_time(vma->vm_file);
  
  		return ret;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2551
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2552
2553
2554
2555
  
  	/*
  	 * Ok, we need to copy. Oh, well..
  	 */
b5810039a   Nick Piggin   [PATCH] core remo...
2556
  	page_cache_get(old_page);
920fc356f   Hugh Dickins   [PATCH] unpaged: ...
2557
  gotten:
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2558
  	pte_unmap_unlock(page_table, ptl);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2559
2560
  
  	if (unlikely(anon_vma_prepare(vma)))
65500d234   Hugh Dickins   [PATCH] mm: page ...
2561
  		goto oom;
a13ea5b75   Hugh Dickins   mm: reinstate ZER...
2562

62eede62d   Hugh Dickins   mm: ZERO_PAGE wit...
2563
  	if (is_zero_pfn(pte_pfn(orig_pte))) {
a13ea5b75   Hugh Dickins   mm: reinstate ZER...
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
  		new_page = alloc_zeroed_user_highpage_movable(vma, address);
  		if (!new_page)
  			goto oom;
  	} else {
  		new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
  		if (!new_page)
  			goto oom;
  		cow_user_page(new_page, old_page, address, vma);
  	}
  	__SetPageUptodate(new_page);
2c26fdd70   KAMEZAWA Hiroyuki   memcg: revert gfp...
2574
  	if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))
8a9f3ccd2   Balbir Singh   Memory controller...
2575
  		goto oom_free_new;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2576
2577
2578
  	/*
  	 * Re-check the pte - we dropped the lock
  	 */
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2579
  	page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
65500d234   Hugh Dickins   [PATCH] mm: page ...
2580
  	if (likely(pte_same(*page_table, orig_pte))) {
920fc356f   Hugh Dickins   [PATCH] unpaged: ...
2581
  		if (old_page) {
920fc356f   Hugh Dickins   [PATCH] unpaged: ...
2582
  			if (!PageAnon(old_page)) {
34e55232e   KAMEZAWA Hiroyuki   mm: avoid false s...
2583
2584
  				dec_mm_counter_fast(mm, MM_FILEPAGES);
  				inc_mm_counter_fast(mm, MM_ANONPAGES);
920fc356f   Hugh Dickins   [PATCH] unpaged: ...
2585
2586
  			}
  		} else
34e55232e   KAMEZAWA Hiroyuki   mm: avoid false s...
2587
  			inc_mm_counter_fast(mm, MM_ANONPAGES);
eca351336   Ben Collins   [PATCH] Fix missi...
2588
  		flush_cache_page(vma, address, pte_pfn(orig_pte));
65500d234   Hugh Dickins   [PATCH] mm: page ...
2589
2590
  		entry = mk_pte(new_page, vma->vm_page_prot);
  		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
4ce072f1f   Siddha, Suresh B   [PATCH] mm: fix a...
2591
2592
2593
2594
2595
2596
  		/*
  		 * Clear the pte entry and flush it first, before updating the
  		 * pte with the new entry. This will avoid a race condition
  		 * seen in the presence of one thread doing SMC and another
  		 * thread doing COW.
  		 */
828502d30   Izik Eidus   ksm: add mmu_noti...
2597
  		ptep_clear_flush(vma, address, page_table);
9617d95e6   Nick Piggin   [PATCH] mm: rmap ...
2598
  		page_add_new_anon_rmap(new_page, vma, address);
828502d30   Izik Eidus   ksm: add mmu_noti...
2599
2600
2601
2602
2603
2604
  		/*
  		 * We call the notify macro here because, when using secondary
  		 * mmu page tables (such as kvm shadow page tables), we want the
  		 * new page to be mapped directly into the secondary page table.
  		 */
  		set_pte_at_notify(mm, address, page_table, entry);
4b3073e1c   Russell King   MM: Pass a PTE po...
2605
  		update_mmu_cache(vma, address, page_table);
945754a17   Nick Piggin   mm: fix race in C...
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
  		if (old_page) {
  			/*
  			 * Only after switching the pte to the new page may
  			 * we remove the mapcount here. Otherwise another
  			 * process may come and find the rmap count decremented
  			 * before the pte is switched to the new page, and
  			 * "reuse" the old page writing into it while our pte
  			 * here still points into it and can be read by other
  			 * threads.
  			 *
  			 * The critical issue is to order this
  			 * page_remove_rmap with the ptp_clear_flush above.
  			 * Those stores are ordered by (if nothing else,)
  			 * the barrier present in the atomic_add_negative
  			 * in page_remove_rmap.
  			 *
  			 * Then the TLB flush in ptep_clear_flush ensures that
  			 * no process can access the old page before the
  			 * decremented mapcount is visible. And the old page
  			 * cannot be reused until after the decremented
  			 * mapcount is visible. So transitively, TLBs to
  			 * old page will be flushed before it can be reused.
  			 */
edc315fd2   Hugh Dickins   badpage: remove v...
2629
  			page_remove_rmap(old_page);
945754a17   Nick Piggin   mm: fix race in C...
2630
  		}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2631
2632
  		/* Free the old page.. */
  		new_page = old_page;
f33ea7f40   Nick Piggin   [PATCH] fix get_u...
2633
  		ret |= VM_FAULT_WRITE;
8a9f3ccd2   Balbir Singh   Memory controller...
2634
2635
  	} else
  		mem_cgroup_uncharge_page(new_page);
920fc356f   Hugh Dickins   [PATCH] unpaged: ...
2636
2637
  	if (new_page)
  		page_cache_release(new_page);
65500d234   Hugh Dickins   [PATCH] mm: page ...
2638
  unlock:
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2639
  	pte_unmap_unlock(page_table, ptl);
e15f8c01a   Michel Lespinasse   mlock: fix race w...
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
  	if (old_page) {
  		/*
  		 * Don't let another task, with possibly unlocked vma,
  		 * keep the mlocked page.
  		 */
  		if ((ret & VM_FAULT_WRITE) && (vma->vm_flags & VM_LOCKED)) {
  			lock_page(old_page);	/* LRU manipulation */
  			munlock_vma_page(old_page);
  			unlock_page(old_page);
  		}
  		page_cache_release(old_page);
  	}
f33ea7f40   Nick Piggin   [PATCH] fix get_u...
2652
  	return ret;
8a9f3ccd2   Balbir Singh   Memory controller...
2653
  oom_free_new:
6dbf6d3bb   Hugh Dickins   memcg: page_cache...
2654
  	page_cache_release(new_page);
65500d234   Hugh Dickins   [PATCH] mm: page ...
2655
  oom:
b827e496c   Nick Piggin   mm: close page_mk...
2656
2657
2658
2659
2660
  	if (old_page) {
  		if (page_mkwrite) {
  			unlock_page(old_page);
  			page_cache_release(old_page);
  		}
920fc356f   Hugh Dickins   [PATCH] unpaged: ...
2661
  		page_cache_release(old_page);
b827e496c   Nick Piggin   mm: close page_mk...
2662
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2663
  	return VM_FAULT_OOM;
9637a5efd   David Howells   [PATCH] add page_...
2664
2665
2666
  
  unwritable_page:
  	page_cache_release(old_page);
c2ec175c3   Nick Piggin   mm: page_mkwrite ...
2667
  	return ret;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2668
  }
97a894136   Peter Zijlstra   mm: Remove i_mmap...
2669
  static void unmap_mapping_range_vma(struct vm_area_struct *vma,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2670
2671
2672
  		unsigned long start_addr, unsigned long end_addr,
  		struct zap_details *details)
  {
97a894136   Peter Zijlstra   mm: Remove i_mmap...
2673
  	zap_page_range(vma, start_addr, end_addr - start_addr, details);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2674
2675
2676
2677
2678
2679
2680
2681
  }
  
  static inline void unmap_mapping_range_tree(struct prio_tree_root *root,
  					    struct zap_details *details)
  {
  	struct vm_area_struct *vma;
  	struct prio_tree_iter iter;
  	pgoff_t vba, vea, zba, zea;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2682
2683
  	vma_prio_tree_foreach(vma, &iter, root,
  			details->first_index, details->last_index) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
  
  		vba = vma->vm_pgoff;
  		vea = vba + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) - 1;
  		/* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */
  		zba = details->first_index;
  		if (zba < vba)
  			zba = vba;
  		zea = details->last_index;
  		if (zea > vea)
  			zea = vea;
97a894136   Peter Zijlstra   mm: Remove i_mmap...
2694
  		unmap_mapping_range_vma(vma,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2695
2696
  			((zba - vba) << PAGE_SHIFT) + vma->vm_start,
  			((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
97a894136   Peter Zijlstra   mm: Remove i_mmap...
2697
  				details);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
  	}
  }
  
  static inline void unmap_mapping_range_list(struct list_head *head,
  					    struct zap_details *details)
  {
  	struct vm_area_struct *vma;
  
  	/*
  	 * In nonlinear VMAs there is no correspondence between virtual address
  	 * offset and file offset.  So we must perform an exhaustive search
  	 * across *all* the pages in each nonlinear VMA, not just the pages
  	 * whose virtual address lies outside the file truncation point.
  	 */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2712
  	list_for_each_entry(vma, head, shared.vm_set.list) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2713
  		details->nonlinear_vma = vma;
97a894136   Peter Zijlstra   mm: Remove i_mmap...
2714
  		unmap_mapping_range_vma(vma, vma->vm_start, vma->vm_end, details);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2715
2716
2717
2718
  	}
  }
  
  /**
72fd4a35a   Robert P. J. Day   [PATCH] Numerous ...
2719
   * unmap_mapping_range - unmap the portion of all mmaps in the specified address_space corresponding to the specified page range in the underlying file.
3d41088fa   Martin Waitz   [PATCH] DocBook: ...
2720
   * @mapping: the address space containing mmaps to be unmapped.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2721
2722
   * @holebegin: byte in first page to unmap, relative to the start of
   * the underlying file.  This will be rounded down to a PAGE_SIZE
25d9e2d15   npiggin@suse.de   truncate: new hel...
2723
   * boundary.  Note that this is different from truncate_pagecache(), which
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
   * must keep the partial page.  In contrast, we must get rid of
   * partial pages.
   * @holelen: size of prospective hole in bytes.  This will be rounded
   * up to a PAGE_SIZE boundary.  A holelen of zero truncates to the
   * end of the file.
   * @even_cows: 1 when truncating a file, unmap even private COWed pages;
   * but 0 when invalidating pagecache, don't throw away private data.
   */
  void unmap_mapping_range(struct address_space *mapping,
  		loff_t const holebegin, loff_t const holelen, int even_cows)
  {
  	struct zap_details details;
  	pgoff_t hba = holebegin >> PAGE_SHIFT;
  	pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
  
  	/* Check for overflow. */
  	if (sizeof(holelen) > sizeof(hlen)) {
  		long long holeend =
  			(holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
  		if (holeend & ~(long long)ULONG_MAX)
  			hlen = ULONG_MAX - hba + 1;
  	}
  
  	details.check_mapping = even_cows? NULL: mapping;
  	details.nonlinear_vma = NULL;
  	details.first_index = hba;
  	details.last_index = hba + hlen - 1;
  	if (details.last_index < details.first_index)
  		details.last_index = ULONG_MAX;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2753

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2754

3d48ae45e   Peter Zijlstra   mm: Convert i_mma...
2755
  	mutex_lock(&mapping->i_mmap_mutex);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2756
2757
2758
2759
  	if (unlikely(!prio_tree_empty(&mapping->i_mmap)))
  		unmap_mapping_range_tree(&mapping->i_mmap, &details);
  	if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
  		unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
3d48ae45e   Peter Zijlstra   mm: Convert i_mma...
2760
  	mutex_unlock(&mapping->i_mmap_mutex);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2761
2762
  }
  EXPORT_SYMBOL(unmap_mapping_range);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2763
  /*
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2764
2765
2766
   * We enter with non-exclusive mmap_sem (to exclude vma changes,
   * but allow concurrent faults), and pte mapped but not yet locked.
   * We return with mmap_sem still held, but pte unmapped and unlocked.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2767
   */
65500d234   Hugh Dickins   [PATCH] mm: page ...
2768
2769
  static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
  		unsigned long address, pte_t *page_table, pmd_t *pmd,
30c9f3a9f   Linus Torvalds   Remove internal u...
2770
  		unsigned int flags, pte_t orig_pte)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2771
  {
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2772
  	spinlock_t *ptl;
4969c1192   Andrea Arcangeli   mm: fix swapin ra...
2773
  	struct page *page, *swapcache = NULL;
65500d234   Hugh Dickins   [PATCH] mm: page ...
2774
  	swp_entry_t entry;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2775
  	pte_t pte;
d065bd810   Michel Lespinasse   mm: retry page fa...
2776
  	int locked;
56039efa1   KAMEZAWA Hiroyuki   memcg: fix ugly i...
2777
  	struct mem_cgroup *ptr;
ad8c2ee80   Rik van Riel   rmap: add exclusi...
2778
  	int exclusive = 0;
83c54070e   Nick Piggin   mm: fault feedbac...
2779
  	int ret = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2780

4c21e2f24   Hugh Dickins   [PATCH] mm: split...
2781
  	if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2782
  		goto out;
65500d234   Hugh Dickins   [PATCH] mm: page ...
2783
2784
  
  	entry = pte_to_swp_entry(orig_pte);
d1737fdbe   Andi Kleen   HWPOISON: Add bas...
2785
2786
2787
2788
2789
2790
2791
  	if (unlikely(non_swap_entry(entry))) {
  		if (is_migration_entry(entry)) {
  			migration_entry_wait(mm, pmd, address);
  		} else if (is_hwpoison_entry(entry)) {
  			ret = VM_FAULT_HWPOISON;
  		} else {
  			print_bad_pte(vma, address, orig_pte, NULL);
d99be1a8e   Hugh Dickins   mm: sigbus instea...
2792
  			ret = VM_FAULT_SIGBUS;
d1737fdbe   Andi Kleen   HWPOISON: Add bas...
2793
  		}
0697212a4   Christoph Lameter   [PATCH] Swapless ...
2794
2795
  		goto out;
  	}
0ff922452   Shailabh Nagar   [PATCH] per-task-...
2796
  	delayacct_set_flag(DELAYACCT_PF_SWAPIN);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2797
2798
  	page = lookup_swap_cache(entry);
  	if (!page) {
a5c9b696e   Hugh Dickins   mm: pass mm to gr...
2799
  		grab_swap_token(mm); /* Contend for token _before_ read-in */
02098feaa   Hugh Dickins   swapin needs gfp_...
2800
2801
  		page = swapin_readahead(entry,
  					GFP_HIGHUSER_MOVABLE, vma, address);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2802
2803
  		if (!page) {
  			/*
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2804
2805
  			 * Back out if somebody else faulted in this pte
  			 * while we released the pte lock.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2806
  			 */
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2807
  			page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2808
2809
  			if (likely(pte_same(*page_table, orig_pte)))
  				ret = VM_FAULT_OOM;
0ff922452   Shailabh Nagar   [PATCH] per-task-...
2810
  			delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
65500d234   Hugh Dickins   [PATCH] mm: page ...
2811
  			goto unlock;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2812
2813
2814
2815
  		}
  
  		/* Had to read the page from swap area: Major fault */
  		ret = VM_FAULT_MAJOR;
f8891e5e1   Christoph Lameter   [PATCH] Light wei...
2816
  		count_vm_event(PGMAJFAULT);
456f998ec   Ying Han   memcg: add the pa...
2817
  		mem_cgroup_count_vm_event(mm, PGMAJFAULT);
d1737fdbe   Andi Kleen   HWPOISON: Add bas...
2818
  	} else if (PageHWPoison(page)) {
71f72525d   Wu Fengguang   HWPOISON: comment...
2819
2820
2821
2822
  		/*
  		 * hwpoisoned dirty swapcache pages are kept for killing
  		 * owner processes (which may be unknown at hwpoison time)
  		 */
d1737fdbe   Andi Kleen   HWPOISON: Add bas...
2823
2824
  		ret = VM_FAULT_HWPOISON;
  		delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
4779cb31c   Andi Kleen   HWPOISON: Fix pag...
2825
  		goto out_release;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2826
  	}
d065bd810   Michel Lespinasse   mm: retry page fa...
2827
  	locked = lock_page_or_retry(page, mm, flags);
073e587ec   KAMEZAWA Hiroyuki   memcg: move charg...
2828
  	delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
d065bd810   Michel Lespinasse   mm: retry page fa...
2829
2830
2831
2832
  	if (!locked) {
  		ret |= VM_FAULT_RETRY;
  		goto out_release;
  	}
073e587ec   KAMEZAWA Hiroyuki   memcg: move charg...
2833

4969c1192   Andrea Arcangeli   mm: fix swapin ra...
2834
  	/*
31c4a3d3a   Hugh Dickins   mm: further fix s...
2835
2836
2837
2838
  	 * Make sure try_to_free_swap or reuse_swap_page or swapoff did not
  	 * release the swapcache from under us.  The page pin, and pte_same
  	 * test below, are not enough to exclude that.  Even if it is still
  	 * swapcache, we need to check that the page's swap has not changed.
4969c1192   Andrea Arcangeli   mm: fix swapin ra...
2839
  	 */
31c4a3d3a   Hugh Dickins   mm: further fix s...
2840
  	if (unlikely(!PageSwapCache(page) || page_private(page) != entry.val))
4969c1192   Andrea Arcangeli   mm: fix swapin ra...
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
  		goto out_page;
  
  	if (ksm_might_need_to_copy(page, vma, address)) {
  		swapcache = page;
  		page = ksm_does_need_to_copy(page, vma, address);
  
  		if (unlikely(!page)) {
  			ret = VM_FAULT_OOM;
  			page = swapcache;
  			swapcache = NULL;
  			goto out_page;
  		}
5ad646880   Hugh Dickins   ksm: let shared p...
2853
  	}
2c26fdd70   KAMEZAWA Hiroyuki   memcg: revert gfp...
2854
  	if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) {
8a9f3ccd2   Balbir Singh   Memory controller...
2855
  		ret = VM_FAULT_OOM;
bc43f75cd   Johannes Weiner   mm: fix pageref l...
2856
  		goto out_page;
8a9f3ccd2   Balbir Singh   Memory controller...
2857
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2858
  	/*
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2859
  	 * Back out if somebody else already faulted in this pte.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2860
  	 */
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2861
  	page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
9e9bef07c   Hugh Dickins   [PATCH] mm: do_sw...
2862
  	if (unlikely(!pte_same(*page_table, orig_pte)))
b81074800   Kirill Korotaev   [PATCH] do_swap_p...
2863
  		goto out_nomap;
b81074800   Kirill Korotaev   [PATCH] do_swap_p...
2864
2865
2866
2867
  
  	if (unlikely(!PageUptodate(page))) {
  		ret = VM_FAULT_SIGBUS;
  		goto out_nomap;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2868
  	}
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
2869
2870
2871
2872
2873
2874
2875
2876
  	/*
  	 * The page isn't present yet, go ahead with the fault.
  	 *
  	 * Be careful about the sequence of operations here.
  	 * To get its accounting right, reuse_swap_page() must be called
  	 * while the page is counted on swap but not yet in mapcount i.e.
  	 * before page_add_anon_rmap() and swap_free(); try_to_free_swap()
  	 * must be called after the swap_free(), or it will never succeed.
03f3c4336   KAMEZAWA Hiroyuki   memcg: fix swap a...
2877
2878
2879
2880
  	 * Because delete_from_swap_page() may be called by reuse_swap_page(),
  	 * mem_cgroup_commit_charge_swapin() may not be able to find swp_entry
  	 * in page->private. In this case, a record in swap_cgroup  is silently
  	 * discarded at swap_free().
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
2881
  	 */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2882

34e55232e   KAMEZAWA Hiroyuki   mm: avoid false s...
2883
  	inc_mm_counter_fast(mm, MM_ANONPAGES);
b084d4353   KAMEZAWA Hiroyuki   mm: count swap usage
2884
  	dec_mm_counter_fast(mm, MM_SWAPENTS);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2885
  	pte = mk_pte(page, vma->vm_page_prot);
30c9f3a9f   Linus Torvalds   Remove internal u...
2886
  	if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page)) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2887
  		pte = maybe_mkwrite(pte_mkdirty(pte), vma);
30c9f3a9f   Linus Torvalds   Remove internal u...
2888
  		flags &= ~FAULT_FLAG_WRITE;
9a5b489b8   Andrea Arcangeli   mm: set VM_FAULT_...
2889
  		ret |= VM_FAULT_WRITE;
ad8c2ee80   Rik van Riel   rmap: add exclusi...
2890
  		exclusive = 1;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2891
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2892
2893
  	flush_icache_page(vma, page);
  	set_pte_at(mm, address, page_table, pte);
ad8c2ee80   Rik van Riel   rmap: add exclusi...
2894
  	do_page_add_anon_rmap(page, vma, address, exclusive);
03f3c4336   KAMEZAWA Hiroyuki   memcg: fix swap a...
2895
2896
  	/* It's better to call commit-charge after rmap is established */
  	mem_cgroup_commit_charge_swapin(page, ptr);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2897

c475a8ab6   Hugh Dickins   [PATCH] can_share...
2898
  	swap_free(entry);
b291f0003   Nick Piggin   mlock: mlocked pa...
2899
  	if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
a2c43eed8   Hugh Dickins   mm: try_to_free_s...
2900
  		try_to_free_swap(page);
c475a8ab6   Hugh Dickins   [PATCH] can_share...
2901
  	unlock_page(page);
4969c1192   Andrea Arcangeli   mm: fix swapin ra...
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
  	if (swapcache) {
  		/*
  		 * Hold the lock to avoid the swap entry to be reused
  		 * until we take the PT lock for the pte_same() check
  		 * (to avoid false positives from pte_same). For
  		 * further safety release the lock after the swap_free
  		 * so that the swap count won't change under a
  		 * parallel locked swapcache.
  		 */
  		unlock_page(swapcache);
  		page_cache_release(swapcache);
  	}
c475a8ab6   Hugh Dickins   [PATCH] can_share...
2914

30c9f3a9f   Linus Torvalds   Remove internal u...
2915
  	if (flags & FAULT_FLAG_WRITE) {
61469f1d5   Hugh Dickins   memcg: when do_sw...
2916
2917
2918
  		ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte);
  		if (ret & VM_FAULT_ERROR)
  			ret &= VM_FAULT_ERROR;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2919
2920
2921
2922
  		goto out;
  	}
  
  	/* No need to invalidate - it was non-present before */
4b3073e1c   Russell King   MM: Pass a PTE po...
2923
  	update_mmu_cache(vma, address, page_table);
65500d234   Hugh Dickins   [PATCH] mm: page ...
2924
  unlock:
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2925
  	pte_unmap_unlock(page_table, ptl);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2926
2927
  out:
  	return ret;
b81074800   Kirill Korotaev   [PATCH] do_swap_p...
2928
  out_nomap:
7a81b88cb   KAMEZAWA Hiroyuki   memcg: introduce ...
2929
  	mem_cgroup_cancel_charge_swapin(ptr);
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2930
  	pte_unmap_unlock(page_table, ptl);
bc43f75cd   Johannes Weiner   mm: fix pageref l...
2931
  out_page:
b81074800   Kirill Korotaev   [PATCH] do_swap_p...
2932
  	unlock_page(page);
4779cb31c   Andi Kleen   HWPOISON: Fix pag...
2933
  out_release:
b81074800   Kirill Korotaev   [PATCH] do_swap_p...
2934
  	page_cache_release(page);
4969c1192   Andrea Arcangeli   mm: fix swapin ra...
2935
2936
2937
2938
  	if (swapcache) {
  		unlock_page(swapcache);
  		page_cache_release(swapcache);
  	}
65500d234   Hugh Dickins   [PATCH] mm: page ...
2939
  	return ret;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2940
2941
2942
  }
  
  /*
8ca3eb080   Tony Luck   guard page for st...
2943
2944
   * This is like a special single-page "expand_{down|up}wards()",
   * except we must first make sure that 'address{-|+}PAGE_SIZE'
320b2b8de   Linus Torvalds   mm: keep a guard ...
2945
   * doesn't hit another vma.
320b2b8de   Linus Torvalds   mm: keep a guard ...
2946
2947
2948
2949
2950
   */
  static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
  {
  	address &= PAGE_MASK;
  	if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
0e8e50e20   Linus Torvalds   mm: make stack gu...
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
  		struct vm_area_struct *prev = vma->vm_prev;
  
  		/*
  		 * Is there a mapping abutting this one below?
  		 *
  		 * That's only ok if it's the same stack mapping
  		 * that has gotten split..
  		 */
  		if (prev && prev->vm_end == address)
  			return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
320b2b8de   Linus Torvalds   mm: keep a guard ...
2961

d05f3169c   Michal Hocko   mm: make expand_d...
2962
  		expand_downwards(vma, address - PAGE_SIZE);
320b2b8de   Linus Torvalds   mm: keep a guard ...
2963
  	}
8ca3eb080   Tony Luck   guard page for st...
2964
2965
2966
2967
2968
2969
2970
2971
2972
  	if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
  		struct vm_area_struct *next = vma->vm_next;
  
  		/* As VM_GROWSDOWN but s/below/above/ */
  		if (next && next->vm_start == address + PAGE_SIZE)
  			return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
  
  		expand_upwards(vma, address + PAGE_SIZE);
  	}
320b2b8de   Linus Torvalds   mm: keep a guard ...
2973
2974
2975
2976
  	return 0;
  }
  
  /*
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2977
2978
2979
   * We enter with non-exclusive mmap_sem (to exclude vma changes,
   * but allow concurrent faults), and pte mapped but not yet locked.
   * We return with mmap_sem still held, but pte unmapped and unlocked.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2980
   */
65500d234   Hugh Dickins   [PATCH] mm: page ...
2981
2982
  static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
  		unsigned long address, pte_t *page_table, pmd_t *pmd,
30c9f3a9f   Linus Torvalds   Remove internal u...
2983
  		unsigned int flags)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2984
  {
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2985
2986
  	struct page *page;
  	spinlock_t *ptl;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2987
  	pte_t entry;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2988

11ac55247   Linus Torvalds   mm: fix page tabl...
2989
2990
2991
2992
  	pte_unmap(page_table);
  
  	/* Check if we need to add a guard page to the stack */
  	if (check_stack_guard_page(vma, address) < 0)
320b2b8de   Linus Torvalds   mm: keep a guard ...
2993
  		return VM_FAULT_SIGBUS;
11ac55247   Linus Torvalds   mm: fix page tabl...
2994
  	/* Use the zero-page for reads */
62eede62d   Hugh Dickins   mm: ZERO_PAGE wit...
2995
2996
2997
  	if (!(flags & FAULT_FLAG_WRITE)) {
  		entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
  						vma->vm_page_prot));
11ac55247   Linus Torvalds   mm: fix page tabl...
2998
  		page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
a13ea5b75   Hugh Dickins   mm: reinstate ZER...
2999
3000
3001
3002
  		if (!pte_none(*page_table))
  			goto unlock;
  		goto setpte;
  	}
557ed1fa2   Nick Piggin   remove ZERO_PAGE
3003
  	/* Allocate our own private page. */
557ed1fa2   Nick Piggin   remove ZERO_PAGE
3004
3005
3006
3007
3008
  	if (unlikely(anon_vma_prepare(vma)))
  		goto oom;
  	page = alloc_zeroed_user_highpage_movable(vma, address);
  	if (!page)
  		goto oom;
0ed361dec   Nick Piggin   mm: fix PageUptod...
3009
  	__SetPageUptodate(page);
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
3010

2c26fdd70   KAMEZAWA Hiroyuki   memcg: revert gfp...
3011
  	if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))
8a9f3ccd2   Balbir Singh   Memory controller...
3012
  		goto oom_free_page;
557ed1fa2   Nick Piggin   remove ZERO_PAGE
3013
  	entry = mk_pte(page, vma->vm_page_prot);
1ac0cb5d0   Hugh Dickins   mm: fix anonymous...
3014
3015
  	if (vma->vm_flags & VM_WRITE)
  		entry = pte_mkwrite(pte_mkdirty(entry));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3016

557ed1fa2   Nick Piggin   remove ZERO_PAGE
3017
  	page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
1c2fb7a4c   Andrea Arcangeli   ksm: fix deadlock...
3018
  	if (!pte_none(*page_table))
557ed1fa2   Nick Piggin   remove ZERO_PAGE
3019
  		goto release;
9ba692948   Hugh Dickins   ksm: fix oom dead...
3020

34e55232e   KAMEZAWA Hiroyuki   mm: avoid false s...
3021
  	inc_mm_counter_fast(mm, MM_ANONPAGES);
557ed1fa2   Nick Piggin   remove ZERO_PAGE
3022
  	page_add_new_anon_rmap(page, vma, address);
a13ea5b75   Hugh Dickins   mm: reinstate ZER...
3023
  setpte:
65500d234   Hugh Dickins   [PATCH] mm: page ...
3024
  	set_pte_at(mm, address, page_table, entry);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3025
3026
  
  	/* No need to invalidate - it was non-present before */
4b3073e1c   Russell King   MM: Pass a PTE po...
3027
  	update_mmu_cache(vma, address, page_table);
65500d234   Hugh Dickins   [PATCH] mm: page ...
3028
  unlock:
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
3029
  	pte_unmap_unlock(page_table, ptl);
83c54070e   Nick Piggin   mm: fault feedbac...
3030
  	return 0;
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
3031
  release:
8a9f3ccd2   Balbir Singh   Memory controller...
3032
  	mem_cgroup_uncharge_page(page);
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
3033
3034
  	page_cache_release(page);
  	goto unlock;
8a9f3ccd2   Balbir Singh   Memory controller...
3035
  oom_free_page:
6dbf6d3bb   Hugh Dickins   memcg: page_cache...
3036
  	page_cache_release(page);
65500d234   Hugh Dickins   [PATCH] mm: page ...
3037
  oom:
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3038
3039
3040
3041
  	return VM_FAULT_OOM;
  }
  
  /*
54cb8821d   Nick Piggin   mm: merge populat...
3042
   * __do_fault() tries to create a new page mapping. It aggressively
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3043
   * tries to share with existing pages, but makes a separate copy if
54cb8821d   Nick Piggin   mm: merge populat...
3044
3045
   * the FAULT_FLAG_WRITE is set in the flags parameter in order to avoid
   * the next page fault.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3046
3047
3048
3049
   *
   * As this is called only for pages that do not currently exist, we
   * do not need to flush old virtual caches or the TLB.
   *
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
3050
   * We enter with non-exclusive mmap_sem (to exclude vma changes,
16abfa086   Hugh Dickins   Fix sys_remap_fil...
3051
   * but allow concurrent faults), and pte neither mapped nor locked.
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
3052
   * We return with mmap_sem still held, but pte unmapped and unlocked.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3053
   */
54cb8821d   Nick Piggin   mm: merge populat...
3054
  static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
16abfa086   Hugh Dickins   Fix sys_remap_fil...
3055
  		unsigned long address, pmd_t *pmd,
54cb8821d   Nick Piggin   mm: merge populat...
3056
  		pgoff_t pgoff, unsigned int flags, pte_t orig_pte)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3057
  {
16abfa086   Hugh Dickins   Fix sys_remap_fil...
3058
  	pte_t *page_table;
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
3059
  	spinlock_t *ptl;
d0217ac04   Nick Piggin   mm: fault feedbac...
3060
  	struct page *page;
1d65f86db   KAMEZAWA Hiroyuki   mm: preallocate p...
3061
  	struct page *cow_page;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3062
  	pte_t entry;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3063
  	int anon = 0;
d08b3851d   Peter Zijlstra   [PATCH] mm: track...
3064
  	struct page *dirty_page = NULL;
d0217ac04   Nick Piggin   mm: fault feedbac...
3065
3066
  	struct vm_fault vmf;
  	int ret;
a200ee182   Peter Zijlstra   mm: set_page_dirt...
3067
  	int page_mkwrite = 0;
54cb8821d   Nick Piggin   mm: merge populat...
3068

1d65f86db   KAMEZAWA Hiroyuki   mm: preallocate p...
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
  	/*
  	 * If we do COW later, allocate page befor taking lock_page()
  	 * on the file cache page. This will reduce lock holding time.
  	 */
  	if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
  
  		if (unlikely(anon_vma_prepare(vma)))
  			return VM_FAULT_OOM;
  
  		cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
  		if (!cow_page)
  			return VM_FAULT_OOM;
  
  		if (mem_cgroup_newpage_charge(cow_page, mm, GFP_KERNEL)) {
  			page_cache_release(cow_page);
  			return VM_FAULT_OOM;
  		}
  	} else
  		cow_page = NULL;
d0217ac04   Nick Piggin   mm: fault feedbac...
3088
3089
3090
3091
  	vmf.virtual_address = (void __user *)(address & PAGE_MASK);
  	vmf.pgoff = pgoff;
  	vmf.flags = flags;
  	vmf.page = NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3092

3c18ddd16   Nick Piggin   mm: remove nopage
3093
  	ret = vma->vm_ops->fault(vma, &vmf);
d065bd810   Michel Lespinasse   mm: retry page fa...
3094
3095
  	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
  			    VM_FAULT_RETRY)))
1d65f86db   KAMEZAWA Hiroyuki   mm: preallocate p...
3096
  		goto uncharge_out;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3097

a3b947eac   Andi Kleen   HWPOISON: Add poi...
3098
3099
3100
  	if (unlikely(PageHWPoison(vmf.page))) {
  		if (ret & VM_FAULT_LOCKED)
  			unlock_page(vmf.page);
1d65f86db   KAMEZAWA Hiroyuki   mm: preallocate p...
3101
3102
  		ret = VM_FAULT_HWPOISON;
  		goto uncharge_out;
a3b947eac   Andi Kleen   HWPOISON: Add poi...
3103
  	}
d00806b18   Nick Piggin   mm: fix fault vs ...
3104
  	/*
d0217ac04   Nick Piggin   mm: fault feedbac...
3105
  	 * For consistency in subsequent calls, make the faulted page always
d00806b18   Nick Piggin   mm: fix fault vs ...
3106
3107
  	 * locked.
  	 */
83c54070e   Nick Piggin   mm: fault feedbac...
3108
  	if (unlikely(!(ret & VM_FAULT_LOCKED)))
d0217ac04   Nick Piggin   mm: fault feedbac...
3109
  		lock_page(vmf.page);
54cb8821d   Nick Piggin   mm: merge populat...
3110
  	else
d0217ac04   Nick Piggin   mm: fault feedbac...
3111
  		VM_BUG_ON(!PageLocked(vmf.page));
d00806b18   Nick Piggin   mm: fix fault vs ...
3112

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3113
3114
3115
  	/*
  	 * Should we do an early C-O-W break?
  	 */
d0217ac04   Nick Piggin   mm: fault feedbac...
3116
  	page = vmf.page;
54cb8821d   Nick Piggin   mm: merge populat...
3117
  	if (flags & FAULT_FLAG_WRITE) {
9637a5efd   David Howells   [PATCH] add page_...
3118
  		if (!(vma->vm_flags & VM_SHARED)) {
1d65f86db   KAMEZAWA Hiroyuki   mm: preallocate p...
3119
  			page = cow_page;
54cb8821d   Nick Piggin   mm: merge populat...
3120
  			anon = 1;
d0217ac04   Nick Piggin   mm: fault feedbac...
3121
  			copy_user_highpage(page, vmf.page, address, vma);
0ed361dec   Nick Piggin   mm: fix PageUptod...
3122
  			__SetPageUptodate(page);
9637a5efd   David Howells   [PATCH] add page_...
3123
  		} else {
54cb8821d   Nick Piggin   mm: merge populat...
3124
3125
  			/*
  			 * If the page will be shareable, see if the backing
9637a5efd   David Howells   [PATCH] add page_...
3126
  			 * address space wants to know that the page is about
54cb8821d   Nick Piggin   mm: merge populat...
3127
3128
  			 * to become writable
  			 */
696761476   Mark Fasheh   ocfs2: release pa...
3129
  			if (vma->vm_ops->page_mkwrite) {
c2ec175c3   Nick Piggin   mm: page_mkwrite ...
3130
  				int tmp;
696761476   Mark Fasheh   ocfs2: release pa...
3131
  				unlock_page(page);
b827e496c   Nick Piggin   mm: close page_mk...
3132
  				vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
c2ec175c3   Nick Piggin   mm: page_mkwrite ...
3133
3134
3135
3136
  				tmp = vma->vm_ops->page_mkwrite(vma, &vmf);
  				if (unlikely(tmp &
  					  (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
  					ret = tmp;
b827e496c   Nick Piggin   mm: close page_mk...
3137
  					goto unwritable_page;
d0217ac04   Nick Piggin   mm: fault feedbac...
3138
  				}
b827e496c   Nick Piggin   mm: close page_mk...
3139
3140
3141
3142
3143
3144
3145
3146
3147
  				if (unlikely(!(tmp & VM_FAULT_LOCKED))) {
  					lock_page(page);
  					if (!page->mapping) {
  						ret = 0; /* retry the fault */
  						unlock_page(page);
  						goto unwritable_page;
  					}
  				} else
  					VM_BUG_ON(!PageLocked(page));
a200ee182   Peter Zijlstra   mm: set_page_dirt...
3148
  				page_mkwrite = 1;
9637a5efd   David Howells   [PATCH] add page_...
3149
3150
  			}
  		}
54cb8821d   Nick Piggin   mm: merge populat...
3151

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3152
  	}
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
3153
  	page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3154
3155
3156
3157
3158
3159
  
  	/*
  	 * This silly early PAGE_DIRTY setting removes a race
  	 * due to the bad i386 page protection. But it's valid
  	 * for other architectures too.
  	 *
30c9f3a9f   Linus Torvalds   Remove internal u...
3160
  	 * Note that if FAULT_FLAG_WRITE is set, we either now have
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3161
3162
3163
3164
3165
  	 * an exclusive copy of the page, or this is a shared mapping,
  	 * so we can make it writable and dirty to avoid having to
  	 * handle that later.
  	 */
  	/* Only go through if we didn't race with anybody else... */
1c2fb7a4c   Andrea Arcangeli   ksm: fix deadlock...
3166
  	if (likely(pte_same(*page_table, orig_pte))) {
d00806b18   Nick Piggin   mm: fix fault vs ...
3167
3168
  		flush_icache_page(vma, page);
  		entry = mk_pte(page, vma->vm_page_prot);
54cb8821d   Nick Piggin   mm: merge populat...
3169
  		if (flags & FAULT_FLAG_WRITE)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3170
  			entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3171
  		if (anon) {
34e55232e   KAMEZAWA Hiroyuki   mm: avoid false s...
3172
  			inc_mm_counter_fast(mm, MM_ANONPAGES);
64d6519dd   Lee Schermerhorn   swap: cull unevic...
3173
  			page_add_new_anon_rmap(page, vma, address);
f57e88a8d   Hugh Dickins   [PATCH] unpaged: ...
3174
  		} else {
34e55232e   KAMEZAWA Hiroyuki   mm: avoid false s...
3175
  			inc_mm_counter_fast(mm, MM_FILEPAGES);
d00806b18   Nick Piggin   mm: fix fault vs ...
3176
  			page_add_file_rmap(page);
54cb8821d   Nick Piggin   mm: merge populat...
3177
  			if (flags & FAULT_FLAG_WRITE) {
d00806b18   Nick Piggin   mm: fix fault vs ...
3178
  				dirty_page = page;
d08b3851d   Peter Zijlstra   [PATCH] mm: track...
3179
3180
  				get_page(dirty_page);
  			}
4294621f4   Hugh Dickins   [PATCH] mm: rss =...
3181
  		}
64d6519dd   Lee Schermerhorn   swap: cull unevic...
3182
  		set_pte_at(mm, address, page_table, entry);
d00806b18   Nick Piggin   mm: fix fault vs ...
3183
3184
  
  		/* no need to invalidate: a not-present page won't be cached */
4b3073e1c   Russell King   MM: Pass a PTE po...
3185
  		update_mmu_cache(vma, address, page_table);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3186
  	} else {
1d65f86db   KAMEZAWA Hiroyuki   mm: preallocate p...
3187
3188
  		if (cow_page)
  			mem_cgroup_uncharge_page(cow_page);
d00806b18   Nick Piggin   mm: fix fault vs ...
3189
3190
3191
  		if (anon)
  			page_cache_release(page);
  		else
54cb8821d   Nick Piggin   mm: merge populat...
3192
  			anon = 1; /* no anon but release faulted_page */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3193
  	}
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
3194
  	pte_unmap_unlock(page_table, ptl);
d00806b18   Nick Piggin   mm: fix fault vs ...
3195

b827e496c   Nick Piggin   mm: close page_mk...
3196
3197
  	if (dirty_page) {
  		struct address_space *mapping = page->mapping;
8f7b3d156   Anton Salikhmetov   Update ctime and ...
3198

b827e496c   Nick Piggin   mm: close page_mk...
3199
3200
3201
  		if (set_page_dirty(dirty_page))
  			page_mkwrite = 1;
  		unlock_page(dirty_page);
d08b3851d   Peter Zijlstra   [PATCH] mm: track...
3202
  		put_page(dirty_page);
b827e496c   Nick Piggin   mm: close page_mk...
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217
  		if (page_mkwrite && mapping) {
  			/*
  			 * Some device drivers do not set page.mapping but still
  			 * dirty their pages
  			 */
  			balance_dirty_pages_ratelimited(mapping);
  		}
  
  		/* file_update_time outside page_lock */
  		if (vma->vm_file)
  			file_update_time(vma->vm_file);
  	} else {
  		unlock_page(vmf.page);
  		if (anon)
  			page_cache_release(vmf.page);
d08b3851d   Peter Zijlstra   [PATCH] mm: track...
3218
  	}
d00806b18   Nick Piggin   mm: fix fault vs ...
3219

83c54070e   Nick Piggin   mm: fault feedbac...
3220
  	return ret;
b827e496c   Nick Piggin   mm: close page_mk...
3221
3222
3223
3224
  
  unwritable_page:
  	page_cache_release(page);
  	return ret;
1d65f86db   KAMEZAWA Hiroyuki   mm: preallocate p...
3225
3226
3227
3228
3229
3230
3231
  uncharge_out:
  	/* fs's fault handler get error */
  	if (cow_page) {
  		mem_cgroup_uncharge_page(cow_page);
  		page_cache_release(cow_page);
  	}
  	return ret;
54cb8821d   Nick Piggin   mm: merge populat...
3232
  }
d00806b18   Nick Piggin   mm: fix fault vs ...
3233

54cb8821d   Nick Piggin   mm: merge populat...
3234
3235
  static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  		unsigned long address, pte_t *page_table, pmd_t *pmd,
30c9f3a9f   Linus Torvalds   Remove internal u...
3236
  		unsigned int flags, pte_t orig_pte)
54cb8821d   Nick Piggin   mm: merge populat...
3237
3238
  {
  	pgoff_t pgoff = (((address & PAGE_MASK)
0da7e01f5   Dean Nelson   calculation of pg...
3239
  			- vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
54cb8821d   Nick Piggin   mm: merge populat...
3240

16abfa086   Hugh Dickins   Fix sys_remap_fil...
3241
3242
  	pte_unmap(page_table);
  	return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
54cb8821d   Nick Piggin   mm: merge populat...
3243
  }
f4b81804a   Jes Sorensen   [PATCH] do_no_pfn()
3244
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3245
3246
3247
   * Fault of a previously existing named mapping. Repopulate the pte
   * from the encoded file_pte if possible. This enables swappable
   * nonlinear vmas.
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
3248
3249
3250
3251
   *
   * We enter with non-exclusive mmap_sem (to exclude vma changes,
   * but allow concurrent faults), and pte mapped but not yet locked.
   * We return with mmap_sem still held, but pte unmapped and unlocked.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3252
   */
d0217ac04   Nick Piggin   mm: fault feedbac...
3253
  static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
65500d234   Hugh Dickins   [PATCH] mm: page ...
3254
  		unsigned long address, pte_t *page_table, pmd_t *pmd,
30c9f3a9f   Linus Torvalds   Remove internal u...
3255
  		unsigned int flags, pte_t orig_pte)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3256
  {
65500d234   Hugh Dickins   [PATCH] mm: page ...
3257
  	pgoff_t pgoff;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3258

30c9f3a9f   Linus Torvalds   Remove internal u...
3259
  	flags |= FAULT_FLAG_NONLINEAR;
4c21e2f24   Hugh Dickins   [PATCH] mm: split...
3260
  	if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
83c54070e   Nick Piggin   mm: fault feedbac...
3261
  		return 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3262

2509ef26d   Hugh Dickins   badpage: zap prin...
3263
  	if (unlikely(!(vma->vm_flags & VM_NONLINEAR))) {
65500d234   Hugh Dickins   [PATCH] mm: page ...
3264
3265
3266
  		/*
  		 * Page table corrupted: show pte and kill process.
  		 */
3dc147414   Hugh Dickins   badpage: replace ...
3267
  		print_bad_pte(vma, address, orig_pte, NULL);
d99be1a8e   Hugh Dickins   mm: sigbus instea...
3268
  		return VM_FAULT_SIGBUS;
65500d234   Hugh Dickins   [PATCH] mm: page ...
3269
  	}
65500d234   Hugh Dickins   [PATCH] mm: page ...
3270
3271
  
  	pgoff = pte_to_pgoff(orig_pte);
16abfa086   Hugh Dickins   Fix sys_remap_fil...
3272
  	return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
  }
  
  /*
   * These routines also need to handle stuff like marking pages dirty
   * and/or accessed for architectures that don't do it in hardware (most
   * RISC architectures).  The early dirtying is also good on the i386.
   *
   * There is also a hook called "update_mmu_cache()" that architectures
   * with external mmu caches can use to update those (ie the Sparc or
   * PowerPC hashed page tables that act as extended TLBs).
   *
c74df32c7   Hugh Dickins   [PATCH] mm: ptd_a...
3284
3285
3286
   * We enter with non-exclusive mmap_sem (to exclude vma changes,
   * but allow concurrent faults), and pte mapped but not yet locked.
   * We return with mmap_sem still held, but pte unmapped and unlocked.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3287
   */
71e3aac07   Andrea Arcangeli   thp: transparent ...
3288
3289
3290
  int handle_pte_fault(struct mm_struct *mm,
  		     struct vm_area_struct *vma, unsigned long address,
  		     pte_t *pte, pmd_t *pmd, unsigned int flags)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3291
3292
  {
  	pte_t entry;
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
3293
  	spinlock_t *ptl;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3294

8dab5241d   Benjamin Herrenschmidt   Rework ptep_set_a...
3295
  	entry = *pte;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3296
  	if (!pte_present(entry)) {
65500d234   Hugh Dickins   [PATCH] mm: page ...
3297
  		if (pte_none(entry)) {
f4b81804a   Jes Sorensen   [PATCH] do_no_pfn()
3298
  			if (vma->vm_ops) {
3c18ddd16   Nick Piggin   mm: remove nopage
3299
  				if (likely(vma->vm_ops->fault))
54cb8821d   Nick Piggin   mm: merge populat...
3300
  					return do_linear_fault(mm, vma, address,
30c9f3a9f   Linus Torvalds   Remove internal u...
3301
  						pte, pmd, flags, entry);
f4b81804a   Jes Sorensen   [PATCH] do_no_pfn()
3302
3303
  			}
  			return do_anonymous_page(mm, vma, address,
30c9f3a9f   Linus Torvalds   Remove internal u...
3304
  						 pte, pmd, flags);
65500d234   Hugh Dickins   [PATCH] mm: page ...
3305
  		}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3306
  		if (pte_file(entry))
d0217ac04   Nick Piggin   mm: fault feedbac...
3307
  			return do_nonlinear_fault(mm, vma, address,
30c9f3a9f   Linus Torvalds   Remove internal u...
3308
  					pte, pmd, flags, entry);
65500d234   Hugh Dickins   [PATCH] mm: page ...
3309
  		return do_swap_page(mm, vma, address,
30c9f3a9f   Linus Torvalds   Remove internal u...
3310
  					pte, pmd, flags, entry);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3311
  	}
4c21e2f24   Hugh Dickins   [PATCH] mm: split...
3312
  	ptl = pte_lockptr(mm, pmd);
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
3313
3314
3315
  	spin_lock(ptl);
  	if (unlikely(!pte_same(*pte, entry)))
  		goto unlock;
30c9f3a9f   Linus Torvalds   Remove internal u...
3316
  	if (flags & FAULT_FLAG_WRITE) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3317
  		if (!pte_write(entry))
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
3318
3319
  			return do_wp_page(mm, vma, address,
  					pte, pmd, ptl, entry);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3320
3321
3322
  		entry = pte_mkdirty(entry);
  	}
  	entry = pte_mkyoung(entry);
30c9f3a9f   Linus Torvalds   Remove internal u...
3323
  	if (ptep_set_access_flags(vma, address, pte, entry, flags & FAULT_FLAG_WRITE)) {
4b3073e1c   Russell King   MM: Pass a PTE po...
3324
  		update_mmu_cache(vma, address, pte);
1a44e1490   Andrea Arcangeli   [PATCH] .text pag...
3325
3326
3327
3328
3329
3330
3331
  	} else {
  		/*
  		 * This is needed only for protection faults but the arch code
  		 * is not yet telling us if this is a protection fault or not.
  		 * This still avoids useless tlb flushes for .text page faults
  		 * with threads.
  		 */
30c9f3a9f   Linus Torvalds   Remove internal u...
3332
  		if (flags & FAULT_FLAG_WRITE)
61c77326d   Shaohua Li   x86, mm: Avoid un...
3333
  			flush_tlb_fix_spurious_fault(vma, address);
1a44e1490   Andrea Arcangeli   [PATCH] .text pag...
3334
  	}
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
3335
3336
  unlock:
  	pte_unmap_unlock(pte, ptl);
83c54070e   Nick Piggin   mm: fault feedbac...
3337
  	return 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3338
3339
3340
3341
3342
  }
  
  /*
   * By the time we get here, we already hold the mm semaphore
   */
83c54070e   Nick Piggin   mm: fault feedbac...
3343
  int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
d06063cc2   Linus Torvalds   Move FAULT_FLAG_x...
3344
  		unsigned long address, unsigned int flags)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3345
3346
3347
3348
3349
3350
3351
  {
  	pgd_t *pgd;
  	pud_t *pud;
  	pmd_t *pmd;
  	pte_t *pte;
  
  	__set_current_state(TASK_RUNNING);
f8891e5e1   Christoph Lameter   [PATCH] Light wei...
3352
  	count_vm_event(PGFAULT);
456f998ec   Ying Han   memcg: add the pa...
3353
  	mem_cgroup_count_vm_event(mm, PGFAULT);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3354

34e55232e   KAMEZAWA Hiroyuki   mm: avoid false s...
3355
3356
  	/* do counter updates before entering really critical section. */
  	check_sync_rss_stat(current);
ac9b9c667   Hugh Dickins   [PATCH] Fix handl...
3357
  	if (unlikely(is_vm_hugetlb_page(vma)))
30c9f3a9f   Linus Torvalds   Remove internal u...
3358
  		return hugetlb_fault(mm, vma, address, flags);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3359

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3360
  	pgd = pgd_offset(mm, address);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3361
3362
  	pud = pud_alloc(mm, pgd, address);
  	if (!pud)
c74df32c7   Hugh Dickins   [PATCH] mm: ptd_a...
3363
  		return VM_FAULT_OOM;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3364
3365
  	pmd = pmd_alloc(mm, pud, address);
  	if (!pmd)
c74df32c7   Hugh Dickins   [PATCH] mm: ptd_a...
3366
  		return VM_FAULT_OOM;
71e3aac07   Andrea Arcangeli   thp: transparent ...
3367
3368
3369
3370
3371
3372
3373
3374
3375
3376
3377
3378
3379
3380
3381
3382
3383
3384
3385
3386
3387
3388
  	if (pmd_none(*pmd) && transparent_hugepage_enabled(vma)) {
  		if (!vma->vm_ops)
  			return do_huge_pmd_anonymous_page(mm, vma, address,
  							  pmd, flags);
  	} else {
  		pmd_t orig_pmd = *pmd;
  		barrier();
  		if (pmd_trans_huge(orig_pmd)) {
  			if (flags & FAULT_FLAG_WRITE &&
  			    !pmd_write(orig_pmd) &&
  			    !pmd_trans_splitting(orig_pmd))
  				return do_huge_pmd_wp_page(mm, vma, address,
  							   pmd, orig_pmd);
  			return 0;
  		}
  	}
  
  	/*
  	 * Use __pte_alloc instead of pte_alloc_map, because we can't
  	 * run pte_offset_map on the pmd, if an huge pmd could
  	 * materialize from under us from a different thread.
  	 */
cc03638df   Mel Gorman   mm: check if PTE ...
3389
  	if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
c74df32c7   Hugh Dickins   [PATCH] mm: ptd_a...
3390
  		return VM_FAULT_OOM;
71e3aac07   Andrea Arcangeli   thp: transparent ...
3391
3392
3393
3394
3395
3396
3397
3398
3399
3400
  	/* if an huge pmd materialized from under us just retry later */
  	if (unlikely(pmd_trans_huge(*pmd)))
  		return 0;
  	/*
  	 * A regular pmd is established and it can't morph into a huge pmd
  	 * from under us anymore at this point because we hold the mmap_sem
  	 * read mode and khugepaged takes it in write mode. So now it's
  	 * safe to run pte_offset_map().
  	 */
  	pte = pte_offset_map(pmd, address);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3401

30c9f3a9f   Linus Torvalds   Remove internal u...
3402
  	return handle_pte_fault(mm, vma, address, pte, pmd, flags);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3403
3404
3405
3406
3407
  }
  
  #ifndef __PAGETABLE_PUD_FOLDED
  /*
   * Allocate page upper directory.
872fec16d   Hugh Dickins   [PATCH] mm: init_...
3408
   * We've already handled the fast-path in-line.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3409
   */
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
3410
  int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3411
  {
c74df32c7   Hugh Dickins   [PATCH] mm: ptd_a...
3412
3413
  	pud_t *new = pud_alloc_one(mm, address);
  	if (!new)
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
3414
  		return -ENOMEM;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3415

362a61ad6   Nick Piggin   fix SMP data race...
3416
  	smp_wmb(); /* See comment in __pte_alloc */
872fec16d   Hugh Dickins   [PATCH] mm: init_...
3417
  	spin_lock(&mm->page_table_lock);
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
3418
  	if (pgd_present(*pgd))		/* Another has populated it */
5e5419734   Benjamin Herrenschmidt   add mm argument t...
3419
  		pud_free(mm, new);
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
3420
3421
  	else
  		pgd_populate(mm, pgd, new);
c74df32c7   Hugh Dickins   [PATCH] mm: ptd_a...
3422
  	spin_unlock(&mm->page_table_lock);
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
3423
  	return 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3424
3425
3426
3427
3428
3429
  }
  #endif /* __PAGETABLE_PUD_FOLDED */
  
  #ifndef __PAGETABLE_PMD_FOLDED
  /*
   * Allocate page middle directory.
872fec16d   Hugh Dickins   [PATCH] mm: init_...
3430
   * We've already handled the fast-path in-line.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3431
   */
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
3432
  int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3433
  {
c74df32c7   Hugh Dickins   [PATCH] mm: ptd_a...
3434
3435
  	pmd_t *new = pmd_alloc_one(mm, address);
  	if (!new)
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
3436
  		return -ENOMEM;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3437

362a61ad6   Nick Piggin   fix SMP data race...
3438
  	smp_wmb(); /* See comment in __pte_alloc */
872fec16d   Hugh Dickins   [PATCH] mm: init_...
3439
  	spin_lock(&mm->page_table_lock);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3440
  #ifndef __ARCH_HAS_4LEVEL_HACK
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
3441
  	if (pud_present(*pud))		/* Another has populated it */
5e5419734   Benjamin Herrenschmidt   add mm argument t...
3442
  		pmd_free(mm, new);
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
3443
3444
  	else
  		pud_populate(mm, pud, new);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3445
  #else
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
3446
  	if (pgd_present(*pud))		/* Another has populated it */
5e5419734   Benjamin Herrenschmidt   add mm argument t...
3447
  		pmd_free(mm, new);
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
3448
3449
  	else
  		pgd_populate(mm, pud, new);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3450
  #endif /* __ARCH_HAS_4LEVEL_HACK */
c74df32c7   Hugh Dickins   [PATCH] mm: ptd_a...
3451
  	spin_unlock(&mm->page_table_lock);
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
3452
  	return 0;
e0f39591c   Alan Stern   [PATCH] Workaroun...
3453
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3454
3455
3456
3457
3458
3459
3460
3461
3462
  #endif /* __PAGETABLE_PMD_FOLDED */
  
  int make_pages_present(unsigned long addr, unsigned long end)
  {
  	int ret, len, write;
  	struct vm_area_struct * vma;
  
  	vma = find_vma(current->mm, addr);
  	if (!vma)
a477097d9   KOSAKI Motohiro   mlock() fix retur...
3463
  		return -ENOMEM;
5ecfda041   Michel Lespinasse   mlock: avoid dirt...
3464
3465
3466
3467
3468
3469
  	/*
  	 * We want to touch writable mappings with a write fault in order
  	 * to break COW, except for shared mappings because these don't COW
  	 * and we would not want to dirty them for nothing.
  	 */
  	write = (vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE;
5bcb28b13   Eric Sesterhenn   BUG_ON() Conversi...
3470
3471
  	BUG_ON(addr >= end);
  	BUG_ON(end > vma->vm_end);
68e116a3b   Rolf Eike Beer   MM: use DIV_ROUND...
3472
  	len = DIV_ROUND_UP(end, PAGE_SIZE) - addr/PAGE_SIZE;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3473
3474
  	ret = get_user_pages(current, current->mm, addr,
  			len, write, 0, NULL, NULL);
c11d69d8c   Lee Schermerhorn   mlock: revert mai...
3475
  	if (ret < 0)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3476
  		return ret;
9978ad583   Lee Schermerhorn   mlock: make mlock...
3477
  	return ret == len ? 0 : -EFAULT;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3478
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3479
3480
3481
  #if !defined(__HAVE_ARCH_GATE_AREA)
  
  #if defined(AT_SYSINFO_EHDR)
5ce7852cd   Adrian Bunk   [PATCH] mm/filema...
3482
  static struct vm_area_struct gate_vma;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3483
3484
3485
3486
3487
3488
  
  static int __init gate_vma_init(void)
  {
  	gate_vma.vm_mm = NULL;
  	gate_vma.vm_start = FIXADDR_USER_START;
  	gate_vma.vm_end = FIXADDR_USER_END;
b6558c4a2   Roland McGrath   [PATCH] Fix gate_...
3489
3490
  	gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
  	gate_vma.vm_page_prot = __P101;
f47aef55d   Roland McGrath   [PATCH] i386 vDSO...
3491
3492
3493
3494
3495
3496
3497
  	/*
  	 * Make sure the vDSO gets into every core dump.
  	 * Dumping its contents makes post-mortem fully interpretable later
  	 * without matching up the same kernel and hardware config to see
  	 * what PC values meant.
  	 */
  	gate_vma.vm_flags |= VM_ALWAYSDUMP;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3498
3499
3500
3501
  	return 0;
  }
  __initcall(gate_vma_init);
  #endif
31db58b3a   Stephen Wilson   mm: arch: make ge...
3502
  struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3503
3504
3505
3506
3507
3508
3509
  {
  #ifdef AT_SYSINFO_EHDR
  	return &gate_vma;
  #else
  	return NULL;
  #endif
  }
cae5d3903   Stephen Wilson   mm: arch: rename ...
3510
  int in_gate_area_no_mm(unsigned long addr)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3511
3512
3513
3514
3515
3516
3517
3518
3519
  {
  #ifdef AT_SYSINFO_EHDR
  	if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END))
  		return 1;
  #endif
  	return 0;
  }
  
  #endif	/* __HAVE_ARCH_GATE_AREA */
0ec76a110   David Howells   [PATCH] NOMMU: Ch...
3520

1b36ba815   Namhyung Kim   mm: wrap follow_p...
3521
  static int __follow_pte(struct mm_struct *mm, unsigned long address,
f8ad0f499   Johannes Weiner   mm: introduce fol...
3522
3523
3524
3525
3526
3527
3528
3529
3530
3531
3532
3533
3534
3535
3536
3537
  		pte_t **ptepp, spinlock_t **ptlp)
  {
  	pgd_t *pgd;
  	pud_t *pud;
  	pmd_t *pmd;
  	pte_t *ptep;
  
  	pgd = pgd_offset(mm, address);
  	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
  		goto out;
  
  	pud = pud_offset(pgd, address);
  	if (pud_none(*pud) || unlikely(pud_bad(*pud)))
  		goto out;
  
  	pmd = pmd_offset(pud, address);
f66055ab6   Andrea Arcangeli   thp: verify pmd_t...
3538
  	VM_BUG_ON(pmd_trans_huge(*pmd));
f8ad0f499   Johannes Weiner   mm: introduce fol...
3539
3540
3541
3542
3543
3544
3545
3546
3547
3548
3549
3550
3551
3552
3553
3554
3555
3556
3557
  	if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
  		goto out;
  
  	/* We cannot handle huge page PFN maps. Luckily they don't exist. */
  	if (pmd_huge(*pmd))
  		goto out;
  
  	ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
  	if (!ptep)
  		goto out;
  	if (!pte_present(*ptep))
  		goto unlock;
  	*ptepp = ptep;
  	return 0;
  unlock:
  	pte_unmap_unlock(ptep, *ptlp);
  out:
  	return -EINVAL;
  }
1b36ba815   Namhyung Kim   mm: wrap follow_p...
3558
3559
3560
3561
3562
3563
3564
3565
3566
3567
  static inline int follow_pte(struct mm_struct *mm, unsigned long address,
  			     pte_t **ptepp, spinlock_t **ptlp)
  {
  	int res;
  
  	/* (void) is needed to make gcc happy */
  	(void) __cond_lock(*ptlp,
  			   !(res = __follow_pte(mm, address, ptepp, ptlp)));
  	return res;
  }
3b6748e2d   Johannes Weiner   mm: introduce fol...
3568
3569
3570
3571
3572
3573
3574
3575
3576
3577
3578
3579
3580
3581
3582
3583
3584
3585
3586
3587
3588
3589
3590
3591
3592
3593
3594
3595
  /**
   * follow_pfn - look up PFN at a user virtual address
   * @vma: memory mapping
   * @address: user virtual address
   * @pfn: location to store found PFN
   *
   * Only IO mappings and raw PFN mappings are allowed.
   *
   * Returns zero and the pfn at @pfn on success, -ve otherwise.
   */
  int follow_pfn(struct vm_area_struct *vma, unsigned long address,
  	unsigned long *pfn)
  {
  	int ret = -EINVAL;
  	spinlock_t *ptl;
  	pte_t *ptep;
  
  	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
  		return ret;
  
  	ret = follow_pte(vma->vm_mm, address, &ptep, &ptl);
  	if (ret)
  		return ret;
  	*pfn = pte_pfn(*ptep);
  	pte_unmap_unlock(ptep, ptl);
  	return 0;
  }
  EXPORT_SYMBOL(follow_pfn);
28b2ee20c   Rik van Riel   access_process_vm...
3596
  #ifdef CONFIG_HAVE_IOREMAP_PROT
d87fe6607   venkatesh.pallipadi@intel.com   x86: PAT: modify ...
3597
3598
3599
  int follow_phys(struct vm_area_struct *vma,
  		unsigned long address, unsigned int flags,
  		unsigned long *prot, resource_size_t *phys)
28b2ee20c   Rik van Riel   access_process_vm...
3600
  {
03668a4de   Johannes Weiner   mm: use generic f...
3601
  	int ret = -EINVAL;
28b2ee20c   Rik van Riel   access_process_vm...
3602
3603
  	pte_t *ptep, pte;
  	spinlock_t *ptl;
28b2ee20c   Rik van Riel   access_process_vm...
3604

d87fe6607   venkatesh.pallipadi@intel.com   x86: PAT: modify ...
3605
3606
  	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
  		goto out;
28b2ee20c   Rik van Riel   access_process_vm...
3607

03668a4de   Johannes Weiner   mm: use generic f...
3608
  	if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
d87fe6607   venkatesh.pallipadi@intel.com   x86: PAT: modify ...
3609
  		goto out;
28b2ee20c   Rik van Riel   access_process_vm...
3610
  	pte = *ptep;
03668a4de   Johannes Weiner   mm: use generic f...
3611

28b2ee20c   Rik van Riel   access_process_vm...
3612
3613
  	if ((flags & FOLL_WRITE) && !pte_write(pte))
  		goto unlock;
28b2ee20c   Rik van Riel   access_process_vm...
3614
3615
  
  	*prot = pgprot_val(pte_pgprot(pte));
03668a4de   Johannes Weiner   mm: use generic f...
3616
  	*phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
28b2ee20c   Rik van Riel   access_process_vm...
3617

03668a4de   Johannes Weiner   mm: use generic f...
3618
  	ret = 0;
28b2ee20c   Rik van Riel   access_process_vm...
3619
3620
3621
  unlock:
  	pte_unmap_unlock(ptep, ptl);
  out:
d87fe6607   venkatesh.pallipadi@intel.com   x86: PAT: modify ...
3622
  	return ret;
28b2ee20c   Rik van Riel   access_process_vm...
3623
3624
3625
3626
3627
3628
3629
  }
  
  int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
  			void *buf, int len, int write)
  {
  	resource_size_t phys_addr;
  	unsigned long prot = 0;
2bc7273b0   KOSAKI Motohiro   mm: make maddr __...
3630
  	void __iomem *maddr;
28b2ee20c   Rik van Riel   access_process_vm...
3631
  	int offset = addr & (PAGE_SIZE-1);
d87fe6607   venkatesh.pallipadi@intel.com   x86: PAT: modify ...
3632
  	if (follow_phys(vma, addr, write, &prot, &phys_addr))
28b2ee20c   Rik van Riel   access_process_vm...
3633
3634
3635
3636
3637
3638
3639
3640
3641
3642
3643
3644
  		return -EINVAL;
  
  	maddr = ioremap_prot(phys_addr, PAGE_SIZE, prot);
  	if (write)
  		memcpy_toio(maddr + offset, buf, len);
  	else
  		memcpy_fromio(buf, maddr + offset, len);
  	iounmap(maddr);
  
  	return len;
  }
  #endif
0ec76a110   David Howells   [PATCH] NOMMU: Ch...
3645
  /*
206cb6365   Stephen Wilson   mm: factor out ma...
3646
3647
   * Access another process' address space as given in mm.  If non-NULL, use the
   * given task for page fault accounting.
0ec76a110   David Howells   [PATCH] NOMMU: Ch...
3648
   */
206cb6365   Stephen Wilson   mm: factor out ma...
3649
3650
  static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
  		unsigned long addr, void *buf, int len, int write)
0ec76a110   David Howells   [PATCH] NOMMU: Ch...
3651
  {
0ec76a110   David Howells   [PATCH] NOMMU: Ch...
3652
  	struct vm_area_struct *vma;
0ec76a110   David Howells   [PATCH] NOMMU: Ch...
3653
  	void *old_buf = buf;
0ec76a110   David Howells   [PATCH] NOMMU: Ch...
3654
  	down_read(&mm->mmap_sem);
183ff22bb   Simon Arlott   spelling fixes: mm/
3655
  	/* ignore errors, just check how much was successfully transferred */
0ec76a110   David Howells   [PATCH] NOMMU: Ch...
3656
3657
3658
  	while (len) {
  		int bytes, ret, offset;
  		void *maddr;
28b2ee20c   Rik van Riel   access_process_vm...
3659
  		struct page *page = NULL;
0ec76a110   David Howells   [PATCH] NOMMU: Ch...
3660
3661
3662
  
  		ret = get_user_pages(tsk, mm, addr, 1,
  				write, 1, &page, &vma);
28b2ee20c   Rik van Riel   access_process_vm...
3663
3664
3665
3666
3667
3668
3669
  		if (ret <= 0) {
  			/*
  			 * Check if this is a VM_IO | VM_PFNMAP VMA, which
  			 * we can access using slightly different code.
  			 */
  #ifdef CONFIG_HAVE_IOREMAP_PROT
  			vma = find_vma(mm, addr);
fe936dfc2   Michael Ellerman   mm: check that we...
3670
  			if (!vma || vma->vm_start > addr)
28b2ee20c   Rik van Riel   access_process_vm...
3671
3672
3673
3674
3675
3676
3677
3678
  				break;
  			if (vma->vm_ops && vma->vm_ops->access)
  				ret = vma->vm_ops->access(vma, addr, buf,
  							  len, write);
  			if (ret <= 0)
  #endif
  				break;
  			bytes = ret;
0ec76a110   David Howells   [PATCH] NOMMU: Ch...
3679
  		} else {
28b2ee20c   Rik van Riel   access_process_vm...
3680
3681
3682
3683
3684
3685
3686
3687
3688
3689
3690
3691
3692
3693
3694
3695
  			bytes = len;
  			offset = addr & (PAGE_SIZE-1);
  			if (bytes > PAGE_SIZE-offset)
  				bytes = PAGE_SIZE-offset;
  
  			maddr = kmap(page);
  			if (write) {
  				copy_to_user_page(vma, page, addr,
  						  maddr + offset, buf, bytes);
  				set_page_dirty_lock(page);
  			} else {
  				copy_from_user_page(vma, page, addr,
  						    buf, maddr + offset, bytes);
  			}
  			kunmap(page);
  			page_cache_release(page);
0ec76a110   David Howells   [PATCH] NOMMU: Ch...
3696
  		}
0ec76a110   David Howells   [PATCH] NOMMU: Ch...
3697
3698
3699
3700
3701
  		len -= bytes;
  		buf += bytes;
  		addr += bytes;
  	}
  	up_read(&mm->mmap_sem);
0ec76a110   David Howells   [PATCH] NOMMU: Ch...
3702
3703
3704
  
  	return buf - old_buf;
  }
03252919b   Andi Kleen   x86: print which ...
3705

5ddd36b9c   Stephen Wilson   mm: implement acc...
3706
  /**
ae91dbfc9   Randy Dunlap   mm: fix memory.c ...
3707
   * access_remote_vm - access another process' address space
5ddd36b9c   Stephen Wilson   mm: implement acc...
3708
3709
3710
3711
3712
3713
3714
3715
3716
3717
3718
3719
3720
   * @mm:		the mm_struct of the target address space
   * @addr:	start address to access
   * @buf:	source or destination buffer
   * @len:	number of bytes to transfer
   * @write:	whether the access is a write
   *
   * The caller must hold a reference on @mm.
   */
  int access_remote_vm(struct mm_struct *mm, unsigned long addr,
  		void *buf, int len, int write)
  {
  	return __access_remote_vm(NULL, mm, addr, buf, len, write);
  }
03252919b   Andi Kleen   x86: print which ...
3721
  /*
206cb6365   Stephen Wilson   mm: factor out ma...
3722
3723
3724
3725
3726
3727
3728
3729
3730
3731
3732
3733
3734
3735
3736
3737
3738
3739
3740
   * Access another process' address space.
   * Source/target buffer must be kernel space,
   * Do not walk the page table directly, use get_user_pages
   */
  int access_process_vm(struct task_struct *tsk, unsigned long addr,
  		void *buf, int len, int write)
  {
  	struct mm_struct *mm;
  	int ret;
  
  	mm = get_task_mm(tsk);
  	if (!mm)
  		return 0;
  
  	ret = __access_remote_vm(tsk, mm, addr, buf, len, write);
  	mmput(mm);
  
  	return ret;
  }
03252919b   Andi Kleen   x86: print which ...
3741
3742
3743
3744
3745
3746
3747
  /*
   * Print the name of a VMA.
   */
  void print_vma_addr(char *prefix, unsigned long ip)
  {
  	struct mm_struct *mm = current->mm;
  	struct vm_area_struct *vma;
e8bff74af   Ingo Molnar   x86: fix "BUG: sl...
3748
3749
3750
3751
3752
3753
  	/*
  	 * Do not print if we are in atomic
  	 * contexts (in exception stacks, etc.):
  	 */
  	if (preempt_count())
  		return;
03252919b   Andi Kleen   x86: print which ...
3754
3755
3756
3757
3758
3759
3760
  	down_read(&mm->mmap_sem);
  	vma = find_vma(mm, ip);
  	if (vma && vma->vm_file) {
  		struct file *f = vma->vm_file;
  		char *buf = (char *)__get_free_page(GFP_KERNEL);
  		if (buf) {
  			char *p, *s;
cf28b4863   Jan Blunck   d_path: Make d_pa...
3761
  			p = d_path(&f->f_path, buf, PAGE_SIZE);
03252919b   Andi Kleen   x86: print which ...
3762
3763
3764
3765
3766
3767
3768
3769
3770
3771
3772
3773
3774
  			if (IS_ERR(p))
  				p = "?";
  			s = strrchr(p, '/');
  			if (s)
  				p = s+1;
  			printk("%s%s[%lx+%lx]", prefix, p,
  					vma->vm_start,
  					vma->vm_end - vma->vm_start);
  			free_page((unsigned long)buf);
  		}
  	}
  	up_read(&current->mm->mmap_sem);
  }
3ee1afa30   Nick Piggin   x86: some lock an...
3775
3776
3777
3778
  
  #ifdef CONFIG_PROVE_LOCKING
  void might_fault(void)
  {
95156f005   Peter Zijlstra   lockdep, mm: fix ...
3779
3780
3781
3782
3783
3784
3785
3786
  	/*
  	 * Some code (nfs/sunrpc) uses socket ops on kernel memory while
  	 * holding the mmap_sem, this is safe because kernel memory doesn't
  	 * get paged out, therefore we'll never actually fault, and the
  	 * below annotations will generate false positives.
  	 */
  	if (segment_eq(get_fs(), KERNEL_DS))
  		return;
3ee1afa30   Nick Piggin   x86: some lock an...
3787
3788
3789
3790
3791
3792
3793
3794
3795
3796
3797
  	might_sleep();
  	/*
  	 * it would be nicer only to annotate paths which are not under
  	 * pagefault_disable, however that requires a larger audit and
  	 * providing helpers like get_user_atomic.
  	 */
  	if (!in_atomic() && current->mm)
  		might_lock_read(&current->mm->mmap_sem);
  }
  EXPORT_SYMBOL(might_fault);
  #endif
47ad8475c   Andrea Arcangeli   thp: clear_copy_h...
3798
3799
3800
3801
3802
3803
3804
3805
3806
3807
3808
3809
3810
3811
3812
3813
3814
3815
3816
3817
3818
3819
3820
3821
3822
3823
3824
3825
3826
3827
3828
3829
3830
3831
3832
3833
3834
3835
3836
3837
3838
3839
3840
3841
3842
3843
3844
3845
3846
3847
3848
3849
3850
3851
3852
3853
3854
3855
3856
3857
3858
3859
3860
3861
3862
3863
3864
3865
3866
3867
3868
  
  #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
  static void clear_gigantic_page(struct page *page,
  				unsigned long addr,
  				unsigned int pages_per_huge_page)
  {
  	int i;
  	struct page *p = page;
  
  	might_sleep();
  	for (i = 0; i < pages_per_huge_page;
  	     i++, p = mem_map_next(p, page, i)) {
  		cond_resched();
  		clear_user_highpage(p, addr + i * PAGE_SIZE);
  	}
  }
  void clear_huge_page(struct page *page,
  		     unsigned long addr, unsigned int pages_per_huge_page)
  {
  	int i;
  
  	if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
  		clear_gigantic_page(page, addr, pages_per_huge_page);
  		return;
  	}
  
  	might_sleep();
  	for (i = 0; i < pages_per_huge_page; i++) {
  		cond_resched();
  		clear_user_highpage(page + i, addr + i * PAGE_SIZE);
  	}
  }
  
  static void copy_user_gigantic_page(struct page *dst, struct page *src,
  				    unsigned long addr,
  				    struct vm_area_struct *vma,
  				    unsigned int pages_per_huge_page)
  {
  	int i;
  	struct page *dst_base = dst;
  	struct page *src_base = src;
  
  	for (i = 0; i < pages_per_huge_page; ) {
  		cond_resched();
  		copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
  
  		i++;
  		dst = mem_map_next(dst, dst_base, i);
  		src = mem_map_next(src, src_base, i);
  	}
  }
  
  void copy_user_huge_page(struct page *dst, struct page *src,
  			 unsigned long addr, struct vm_area_struct *vma,
  			 unsigned int pages_per_huge_page)
  {
  	int i;
  
  	if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
  		copy_user_gigantic_page(dst, src, addr, vma,
  					pages_per_huge_page);
  		return;
  	}
  
  	might_sleep();
  	for (i = 0; i < pages_per_huge_page; i++) {
  		cond_resched();
  		copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
  	}
  }
  #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */