Blame view

mm/memory.c 110 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
  /*
   *  linux/mm/memory.c
   *
   *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   */
  
  /*
   * demand-loading started 01.12.91 - seems it is high on the list of
   * things wanted, and it should be easy to implement. - Linus
   */
  
  /*
   * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
   * pages started 02.12.91, seems to work. - Linus.
   *
   * Tested sharing by executing about 30 /bin/sh: under the old kernel it
   * would have taken more than the 6M I have free, but it worked well as
   * far as I could see.
   *
   * Also corrected some "invalidate()"s - I wasn't doing enough of them.
   */
  
  /*
   * Real VM (paging to/from disk) started 18.12.91. Much more work and
   * thought has to go into this. Oh, well..
   * 19.12.91  -  works, somewhat. Sometimes I get faults, don't know why.
   *		Found it. Everything seems to work now.
   * 20.12.91  -  Ok, making the swap-device changeable like the root.
   */
  
  /*
   * 05.04.94  -  Multi-page memory management added for v1.1.
   * 		Idea by Alex Bligh (alex@cconcepts.co.uk)
   *
   * 16.07.99  -  Support of BIGMEM added by Gerhard Wichert, Siemens AG
   *		(Gerhard.Wichert@pdb.siemens.de)
   *
   * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
   */
  
  #include <linux/kernel_stat.h>
  #include <linux/mm.h>
  #include <linux/hugetlb.h>
  #include <linux/mman.h>
  #include <linux/swap.h>
  #include <linux/highmem.h>
  #include <linux/pagemap.h>
9a8408951   Hugh Dickins   ksm: identify Pag...
48
  #include <linux/ksm.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
49
  #include <linux/rmap.h>
b95f1b31b   Paul Gortmaker   mm: Map most file...
50
  #include <linux/export.h>
0ff922452   Shailabh Nagar   [PATCH] per-task-...
51
  #include <linux/delayacct.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
52
  #include <linux/init.h>
01c8f1c44   Dan Williams   mm, dax, gpu: con...
53
  #include <linux/pfn_t.h>
edc79b2a4   Peter Zijlstra   [PATCH] mm: balan...
54
  #include <linux/writeback.h>
8a9f3ccd2   Balbir Singh   Memory controller...
55
  #include <linux/memcontrol.h>
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
56
  #include <linux/mmu_notifier.h>
3dc147414   Hugh Dickins   badpage: replace ...
57
58
59
  #include <linux/kallsyms.h>
  #include <linux/swapops.h>
  #include <linux/elf.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
60
  #include <linux/gfp.h>
4daae3b4b   Mel Gorman   mm: mempolicy: Us...
61
  #include <linux/migrate.h>
2fbc57c53   Andy Shevchenko   mm: use kbasename()
62
  #include <linux/string.h>
0abdd7a81   Dan Williams   dma-debug: introd...
63
  #include <linux/dma-debug.h>
1592eef01   Kirill A. Shutemov   mm: add debugfs t...
64
  #include <linux/debugfs.h>
6b251fc96   Andrea Arcangeli   userfaultfd: call...
65
  #include <linux/userfaultfd_k.h>
bc2466e42   Jan Kara   dax: Use radix tr...
66
  #include <linux/dax.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
67

6952b61de   Alexey Dobriyan   headers: taskstat...
68
  #include <asm/io.h>
33a709b25   Dave Hansen   mm/gup, x86/mm/pk...
69
  #include <asm/mmu_context.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
70
71
72
73
74
  #include <asm/pgalloc.h>
  #include <asm/uaccess.h>
  #include <asm/tlb.h>
  #include <asm/tlbflush.h>
  #include <asm/pgtable.h>
42b777281   Jan Beulich   mm: remove double...
75
  #include "internal.h"
90572890d   Peter Zijlstra   mm: numa: Change ...
76
77
  #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
  #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
75980e97d   Peter Zijlstra   mm: fold page->_l...
78
  #endif
d41dee369   Andy Whitcroft   [PATCH] sparsemem...
79
  #ifndef CONFIG_NEED_MULTIPLE_NODES
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
80
81
82
83
84
85
86
  /* use the per-pgdat data instead for discontigmem - mbligh */
  unsigned long max_mapnr;
  struct page *mem_map;
  
  EXPORT_SYMBOL(max_mapnr);
  EXPORT_SYMBOL(mem_map);
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
87
88
89
90
91
92
93
94
  /*
   * A number of key systems in x86 including ioremap() rely on the assumption
   * that high_memory defines the upper bound on direct map memory, then end
   * of ZONE_NORMAL.  Under CONFIG_DISCONTIG this means that max_low_pfn and
   * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
   * and ZONE_HIGHMEM.
   */
  void * high_memory;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
95

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
96
  EXPORT_SYMBOL(high_memory);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
97

32a932332   Ingo Molnar   brk randomization...
98
99
100
101
102
103
104
105
106
107
108
109
  /*
   * Randomize the address space (stacks, mmaps, brk, etc.).
   *
   * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization,
   *   as ancient (libc5 based) binaries can segfault. )
   */
  int randomize_va_space __read_mostly =
  #ifdef CONFIG_COMPAT_BRK
  					1;
  #else
  					2;
  #endif
a62eaf151   Andi Kleen   [PATCH] x86_64: A...
110
111
112
113
  
  static int __init disable_randmaps(char *s)
  {
  	randomize_va_space = 0;
9b41046cd   OGAWA Hirofumi   [PATCH] Don't pas...
114
  	return 1;
a62eaf151   Andi Kleen   [PATCH] x86_64: A...
115
116
  }
  __setup("norandmaps", disable_randmaps);
62eede62d   Hugh Dickins   mm: ZERO_PAGE wit...
117
  unsigned long zero_pfn __read_mostly;
03f6462a3   Hugh Dickins   mm: move highest_...
118
  unsigned long highest_memmap_pfn __read_mostly;
a13ea5b75   Hugh Dickins   mm: reinstate ZER...
119

0b70068e4   Ard Biesheuvel   mm: export symbol...
120
  EXPORT_SYMBOL(zero_pfn);
a13ea5b75   Hugh Dickins   mm: reinstate ZER...
121
122
123
124
125
126
127
128
129
  /*
   * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
   */
  static int __init init_zero_pfn(void)
  {
  	zero_pfn = page_to_pfn(ZERO_PAGE(0));
  	return 0;
  }
  core_initcall(init_zero_pfn);
a62eaf151   Andi Kleen   [PATCH] x86_64: A...
130

d559db086   KAMEZAWA Hiroyuki   mm: clean up mm_c...
131

34e55232e   KAMEZAWA Hiroyuki   mm: avoid false s...
132
  #if defined(SPLIT_RSS_COUNTING)
ea48cf786   David Rientjes   mm, counters: fol...
133
  void sync_mm_rss(struct mm_struct *mm)
34e55232e   KAMEZAWA Hiroyuki   mm: avoid false s...
134
135
136
137
  {
  	int i;
  
  	for (i = 0; i < NR_MM_COUNTERS; i++) {
05af2e104   David Rientjes   mm, counters: rem...
138
139
140
  		if (current->rss_stat.count[i]) {
  			add_mm_counter(mm, i, current->rss_stat.count[i]);
  			current->rss_stat.count[i] = 0;
34e55232e   KAMEZAWA Hiroyuki   mm: avoid false s...
141
142
  		}
  	}
05af2e104   David Rientjes   mm, counters: rem...
143
  	current->rss_stat.events = 0;
34e55232e   KAMEZAWA Hiroyuki   mm: avoid false s...
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
  }
  
  static void add_mm_counter_fast(struct mm_struct *mm, int member, int val)
  {
  	struct task_struct *task = current;
  
  	if (likely(task->mm == mm))
  		task->rss_stat.count[member] += val;
  	else
  		add_mm_counter(mm, member, val);
  }
  #define inc_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, 1)
  #define dec_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, -1)
  
  /* sync counter once per 64 page faults */
  #define TASK_RSS_EVENTS_THRESH	(64)
  static void check_sync_rss_stat(struct task_struct *task)
  {
  	if (unlikely(task != current))
  		return;
  	if (unlikely(task->rss_stat.events++ > TASK_RSS_EVENTS_THRESH))
ea48cf786   David Rientjes   mm, counters: fol...
165
  		sync_mm_rss(task->mm);
34e55232e   KAMEZAWA Hiroyuki   mm: avoid false s...
166
  }
9547d01bf   Peter Zijlstra   mm: uninline larg...
167
  #else /* SPLIT_RSS_COUNTING */
34e55232e   KAMEZAWA Hiroyuki   mm: avoid false s...
168
169
170
171
172
173
174
  
  #define inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member)
  #define dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member)
  
  static void check_sync_rss_stat(struct task_struct *task)
  {
  }
9547d01bf   Peter Zijlstra   mm: uninline larg...
175
176
177
  #endif /* SPLIT_RSS_COUNTING */
  
  #ifdef HAVE_GENERIC_MMU_GATHER
ca1d6c7d9   Nicholas Krause   mm/memory.c: make...
178
  static bool tlb_next_batch(struct mmu_gather *tlb)
9547d01bf   Peter Zijlstra   mm: uninline larg...
179
180
181
182
183
184
  {
  	struct mmu_gather_batch *batch;
  
  	batch = tlb->active;
  	if (batch->next) {
  		tlb->active = batch->next;
ca1d6c7d9   Nicholas Krause   mm/memory.c: make...
185
  		return true;
9547d01bf   Peter Zijlstra   mm: uninline larg...
186
  	}
53a59fc67   Michal Hocko   mm: limit mmu_gat...
187
  	if (tlb->batch_count == MAX_GATHER_BATCH_COUNT)
ca1d6c7d9   Nicholas Krause   mm/memory.c: make...
188
  		return false;
53a59fc67   Michal Hocko   mm: limit mmu_gat...
189

9547d01bf   Peter Zijlstra   mm: uninline larg...
190
191
  	batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
  	if (!batch)
ca1d6c7d9   Nicholas Krause   mm/memory.c: make...
192
  		return false;
9547d01bf   Peter Zijlstra   mm: uninline larg...
193

53a59fc67   Michal Hocko   mm: limit mmu_gat...
194
  	tlb->batch_count++;
9547d01bf   Peter Zijlstra   mm: uninline larg...
195
196
197
198
199
200
  	batch->next = NULL;
  	batch->nr   = 0;
  	batch->max  = MAX_GATHER_BATCH;
  
  	tlb->active->next = batch;
  	tlb->active = batch;
ca1d6c7d9   Nicholas Krause   mm/memory.c: make...
201
  	return true;
9547d01bf   Peter Zijlstra   mm: uninline larg...
202
203
204
205
206
207
208
  }
  
  /* tlb_gather_mmu
   *	Called to initialize an (on-stack) mmu_gather structure for page-table
   *	tear-down from @mm. The @fullmm argument is used when @mm is without
   *	users and we're going to destroy the full address space (exit/execve).
   */
2b047252d   Linus Torvalds   Fix TLB gather vi...
209
  void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
9547d01bf   Peter Zijlstra   mm: uninline larg...
210
211
  {
  	tlb->mm = mm;
2b047252d   Linus Torvalds   Fix TLB gather vi...
212
213
  	/* Is it from 0 to ~0? */
  	tlb->fullmm     = !(start | (end+1));
1de14c3c5   Dave Hansen   x86-32: Fix possi...
214
  	tlb->need_flush_all = 0;
9547d01bf   Peter Zijlstra   mm: uninline larg...
215
216
217
218
  	tlb->local.next = NULL;
  	tlb->local.nr   = 0;
  	tlb->local.max  = ARRAY_SIZE(tlb->__pages);
  	tlb->active     = &tlb->local;
53a59fc67   Michal Hocko   mm: limit mmu_gat...
219
  	tlb->batch_count = 0;
9547d01bf   Peter Zijlstra   mm: uninline larg...
220
221
222
223
  
  #ifdef CONFIG_HAVE_RCU_TABLE_FREE
  	tlb->batch = NULL;
  #endif
e77b0852b   Aneesh Kumar K.V   mm/mmu_gather: tr...
224
  	tlb->page_size = 0;
fb7332a9f   Will Deacon   mmu_gather: move ...
225
226
  
  	__tlb_reset_range(tlb);
9547d01bf   Peter Zijlstra   mm: uninline larg...
227
  }
1cf35d477   Linus Torvalds   mm: split 'tlb_fl...
228
  static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
9547d01bf   Peter Zijlstra   mm: uninline larg...
229
  {
721c21c17   Will Deacon   mm: mmu_gather: u...
230
231
  	if (!tlb->end)
  		return;
9547d01bf   Peter Zijlstra   mm: uninline larg...
232
  	tlb_flush(tlb);
34ee645e8   Joerg Roedel   mmu_notifier: cal...
233
  	mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
9547d01bf   Peter Zijlstra   mm: uninline larg...
234
235
  #ifdef CONFIG_HAVE_RCU_TABLE_FREE
  	tlb_table_flush(tlb);
34e55232e   KAMEZAWA Hiroyuki   mm: avoid false s...
236
  #endif
fb7332a9f   Will Deacon   mmu_gather: move ...
237
  	__tlb_reset_range(tlb);
1cf35d477   Linus Torvalds   mm: split 'tlb_fl...
238
239
240
241
242
  }
  
  static void tlb_flush_mmu_free(struct mmu_gather *tlb)
  {
  	struct mmu_gather_batch *batch;
34e55232e   KAMEZAWA Hiroyuki   mm: avoid false s...
243

721c21c17   Will Deacon   mm: mmu_gather: u...
244
  	for (batch = &tlb->local; batch && batch->nr; batch = batch->next) {
9547d01bf   Peter Zijlstra   mm: uninline larg...
245
246
247
248
249
  		free_pages_and_swap_cache(batch->pages, batch->nr);
  		batch->nr = 0;
  	}
  	tlb->active = &tlb->local;
  }
1cf35d477   Linus Torvalds   mm: split 'tlb_fl...
250
251
  void tlb_flush_mmu(struct mmu_gather *tlb)
  {
1cf35d477   Linus Torvalds   mm: split 'tlb_fl...
252
253
254
  	tlb_flush_mmu_tlbonly(tlb);
  	tlb_flush_mmu_free(tlb);
  }
9547d01bf   Peter Zijlstra   mm: uninline larg...
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
  /* tlb_finish_mmu
   *	Called at the end of the shootdown operation to free up any resources
   *	that were required.
   */
  void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
  {
  	struct mmu_gather_batch *batch, *next;
  
  	tlb_flush_mmu(tlb);
  
  	/* keep the page table cache within bounds */
  	check_pgt_cache();
  
  	for (batch = tlb->local.next; batch; batch = next) {
  		next = batch->next;
  		free_pages((unsigned long)batch, 0);
  	}
  	tlb->local.next = NULL;
  }
  
  /* __tlb_remove_page
   *	Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while
   *	handling the additional races in SMP caused by other CPUs caching valid
   *	mappings in their TLBs. Returns the number of free page slots left.
   *	When out of page slots we must call tlb_flush_mmu().
e9d55e157   Aneesh Kumar K.V   mm: change the in...
280
   *returns true if the caller should flush.
9547d01bf   Peter Zijlstra   mm: uninline larg...
281
   */
e77b0852b   Aneesh Kumar K.V   mm/mmu_gather: tr...
282
  bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size)
9547d01bf   Peter Zijlstra   mm: uninline larg...
283
284
  {
  	struct mmu_gather_batch *batch;
fb7332a9f   Will Deacon   mmu_gather: move ...
285
  	VM_BUG_ON(!tlb->end);
9547d01bf   Peter Zijlstra   mm: uninline larg...
286

e77b0852b   Aneesh Kumar K.V   mm/mmu_gather: tr...
287
288
289
290
291
292
  	if (!tlb->page_size)
  		tlb->page_size = page_size;
  	else {
  		if (page_size != tlb->page_size)
  			return true;
  	}
9547d01bf   Peter Zijlstra   mm: uninline larg...
293
  	batch = tlb->active;
9547d01bf   Peter Zijlstra   mm: uninline larg...
294
295
  	if (batch->nr == batch->max) {
  		if (!tlb_next_batch(tlb))
e9d55e157   Aneesh Kumar K.V   mm: change the in...
296
  			return true;
0b43c3aab   Shaohua Li   mm: __tlb_remove_...
297
  		batch = tlb->active;
9547d01bf   Peter Zijlstra   mm: uninline larg...
298
  	}
309381fea   Sasha Levin   mm: dump page whe...
299
  	VM_BUG_ON_PAGE(batch->nr > batch->max, page);
9547d01bf   Peter Zijlstra   mm: uninline larg...
300

e9d55e157   Aneesh Kumar K.V   mm: change the in...
301
302
  	batch->pages[batch->nr++] = page;
  	return false;
9547d01bf   Peter Zijlstra   mm: uninline larg...
303
304
305
  }
  
  #endif /* HAVE_GENERIC_MMU_GATHER */
267239116   Peter Zijlstra   mm, powerpc: move...
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
  #ifdef CONFIG_HAVE_RCU_TABLE_FREE
  
  /*
   * See the comment near struct mmu_table_batch.
   */
  
  static void tlb_remove_table_smp_sync(void *arg)
  {
  	/* Simply deliver the interrupt */
  }
  
  static void tlb_remove_table_one(void *table)
  {
  	/*
  	 * This isn't an RCU grace period and hence the page-tables cannot be
  	 * assumed to be actually RCU-freed.
  	 *
  	 * It is however sufficient for software page-table walkers that rely on
  	 * IRQ disabling. See the comment near struct mmu_table_batch.
  	 */
  	smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
  	__tlb_remove_table(table);
  }
  
  static void tlb_remove_table_rcu(struct rcu_head *head)
  {
  	struct mmu_table_batch *batch;
  	int i;
  
  	batch = container_of(head, struct mmu_table_batch, rcu);
  
  	for (i = 0; i < batch->nr; i++)
  		__tlb_remove_table(batch->tables[i]);
  
  	free_page((unsigned long)batch);
  }
  
  void tlb_table_flush(struct mmu_gather *tlb)
  {
  	struct mmu_table_batch **batch = &tlb->batch;
  
  	if (*batch) {
  		call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
  		*batch = NULL;
  	}
  }
  
  void tlb_remove_table(struct mmu_gather *tlb, void *table)
  {
  	struct mmu_table_batch **batch = &tlb->batch;
267239116   Peter Zijlstra   mm, powerpc: move...
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
  	/*
  	 * When there's less then two users of this mm there cannot be a
  	 * concurrent page-table walk.
  	 */
  	if (atomic_read(&tlb->mm->mm_users) < 2) {
  		__tlb_remove_table(table);
  		return;
  	}
  
  	if (*batch == NULL) {
  		*batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
  		if (*batch == NULL) {
  			tlb_remove_table_one(table);
  			return;
  		}
  		(*batch)->nr = 0;
  	}
  	(*batch)->tables[(*batch)->nr++] = table;
  	if ((*batch)->nr == MAX_TABLE_BATCH)
  		tlb_table_flush(tlb);
  }
9547d01bf   Peter Zijlstra   mm: uninline larg...
377
  #endif /* CONFIG_HAVE_RCU_TABLE_FREE */
267239116   Peter Zijlstra   mm, powerpc: move...
378

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
379
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
380
381
382
   * Note: this doesn't free the actual pages themselves. That
   * has been handled earlier when unmapping all the memory regions.
   */
9e1b32caa   Benjamin Herrenschmidt   mm: Pass virtual ...
383
384
  static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
  			   unsigned long addr)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
385
  {
2f569afd9   Martin Schwidefsky   CONFIG_HIGHPTE vs...
386
  	pgtable_t token = pmd_pgtable(*pmd);
e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
387
  	pmd_clear(pmd);
9e1b32caa   Benjamin Herrenschmidt   mm: Pass virtual ...
388
  	pte_free_tlb(tlb, token, addr);
e1f56c89b   Kirill A. Shutemov   mm: convert mm->n...
389
  	atomic_long_dec(&tlb->mm->nr_ptes);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
390
  }
e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
391
392
393
  static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
  				unsigned long addr, unsigned long end,
  				unsigned long floor, unsigned long ceiling)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
394
395
396
  {
  	pmd_t *pmd;
  	unsigned long next;
e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
397
  	unsigned long start;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
398

e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
399
  	start = addr;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
400
  	pmd = pmd_offset(pud, addr);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
401
402
403
404
  	do {
  		next = pmd_addr_end(addr, end);
  		if (pmd_none_or_clear_bad(pmd))
  			continue;
9e1b32caa   Benjamin Herrenschmidt   mm: Pass virtual ...
405
  		free_pte_range(tlb, pmd, addr);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
406
  	} while (pmd++, addr = next, addr != end);
e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
407
408
409
410
411
412
413
  	start &= PUD_MASK;
  	if (start < floor)
  		return;
  	if (ceiling) {
  		ceiling &= PUD_MASK;
  		if (!ceiling)
  			return;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
414
  	}
e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
415
416
417
418
419
  	if (end - 1 > ceiling - 1)
  		return;
  
  	pmd = pmd_offset(pud, start);
  	pud_clear(pud);
9e1b32caa   Benjamin Herrenschmidt   mm: Pass virtual ...
420
  	pmd_free_tlb(tlb, pmd, start);
dc6c9a35b   Kirill A. Shutemov   mm: account pmd p...
421
  	mm_dec_nr_pmds(tlb->mm);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
422
  }
e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
423
424
425
  static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
  				unsigned long addr, unsigned long end,
  				unsigned long floor, unsigned long ceiling)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
426
427
428
  {
  	pud_t *pud;
  	unsigned long next;
e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
429
  	unsigned long start;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
430

e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
431
  	start = addr;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
432
  	pud = pud_offset(pgd, addr);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
433
434
435
436
  	do {
  		next = pud_addr_end(addr, end);
  		if (pud_none_or_clear_bad(pud))
  			continue;
e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
437
  		free_pmd_range(tlb, pud, addr, next, floor, ceiling);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
438
  	} while (pud++, addr = next, addr != end);
e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
439
440
441
442
443
444
445
  	start &= PGDIR_MASK;
  	if (start < floor)
  		return;
  	if (ceiling) {
  		ceiling &= PGDIR_MASK;
  		if (!ceiling)
  			return;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
446
  	}
e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
447
448
449
450
451
  	if (end - 1 > ceiling - 1)
  		return;
  
  	pud = pud_offset(pgd, start);
  	pgd_clear(pgd);
9e1b32caa   Benjamin Herrenschmidt   mm: Pass virtual ...
452
  	pud_free_tlb(tlb, pud, start);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
453
454
455
  }
  
  /*
e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
456
   * This function frees user-level page tables of a process.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
457
   */
42b777281   Jan Beulich   mm: remove double...
458
  void free_pgd_range(struct mmu_gather *tlb,
e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
459
460
  			unsigned long addr, unsigned long end,
  			unsigned long floor, unsigned long ceiling)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
461
462
463
  {
  	pgd_t *pgd;
  	unsigned long next;
e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
  
  	/*
  	 * The next few lines have given us lots of grief...
  	 *
  	 * Why are we testing PMD* at this top level?  Because often
  	 * there will be no work to do at all, and we'd prefer not to
  	 * go all the way down to the bottom just to discover that.
  	 *
  	 * Why all these "- 1"s?  Because 0 represents both the bottom
  	 * of the address space and the top of it (using -1 for the
  	 * top wouldn't help much: the masks would do the wrong thing).
  	 * The rule is that addr 0 and floor 0 refer to the bottom of
  	 * the address space, but end 0 and ceiling 0 refer to the top
  	 * Comparisons need to use "end - 1" and "ceiling - 1" (though
  	 * that end 0 case should be mythical).
  	 *
  	 * Wherever addr is brought up or ceiling brought down, we must
  	 * be careful to reject "the opposite 0" before it confuses the
  	 * subsequent tests.  But what about where end is brought down
  	 * by PMD_SIZE below? no, end can't go down to 0 there.
  	 *
  	 * Whereas we round start (addr) and ceiling down, by different
  	 * masks at different levels, in order to test whether a table
  	 * now has no other vmas using it, so can be freed, we don't
  	 * bother to round floor or end up - the tests don't need that.
  	 */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
490

e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
  	addr &= PMD_MASK;
  	if (addr < floor) {
  		addr += PMD_SIZE;
  		if (!addr)
  			return;
  	}
  	if (ceiling) {
  		ceiling &= PMD_MASK;
  		if (!ceiling)
  			return;
  	}
  	if (end - 1 > ceiling - 1)
  		end -= PMD_SIZE;
  	if (addr > end - 1)
  		return;
42b777281   Jan Beulich   mm: remove double...
506
  	pgd = pgd_offset(tlb->mm, addr);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
507
508
509
510
  	do {
  		next = pgd_addr_end(addr, end);
  		if (pgd_none_or_clear_bad(pgd))
  			continue;
42b777281   Jan Beulich   mm: remove double...
511
  		free_pud_range(tlb, pgd, addr, next, floor, ceiling);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
512
  	} while (pgd++, addr = next, addr != end);
e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
513
  }
42b777281   Jan Beulich   mm: remove double...
514
  void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
3bf5ee956   Hugh Dickins   [PATCH] freepgt: ...
515
  		unsigned long floor, unsigned long ceiling)
e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
516
517
518
519
  {
  	while (vma) {
  		struct vm_area_struct *next = vma->vm_next;
  		unsigned long addr = vma->vm_start;
8f4f8c164   Hugh Dickins   [PATCH] mm: unlin...
520
  		/*
25d9e2d15   npiggin@suse.de   truncate: new hel...
521
522
  		 * Hide vma from rmap and truncate_pagecache before freeing
  		 * pgtables
8f4f8c164   Hugh Dickins   [PATCH] mm: unlin...
523
  		 */
5beb49305   Rik van Riel   mm: change anon_v...
524
  		unlink_anon_vmas(vma);
8f4f8c164   Hugh Dickins   [PATCH] mm: unlin...
525
  		unlink_file_vma(vma);
9da61aef0   David Gibson   [PATCH] hugepage:...
526
  		if (is_vm_hugetlb_page(vma)) {
3bf5ee956   Hugh Dickins   [PATCH] freepgt: ...
527
  			hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
528
  				floor, next? next->vm_start: ceiling);
3bf5ee956   Hugh Dickins   [PATCH] freepgt: ...
529
530
531
532
533
  		} else {
  			/*
  			 * Optimization: gather nearby vmas into one call down
  			 */
  			while (next && next->vm_start <= vma->vm_end + PMD_SIZE
4866920b9   David Gibson   [PATCH] hugepage:...
534
  			       && !is_vm_hugetlb_page(next)) {
3bf5ee956   Hugh Dickins   [PATCH] freepgt: ...
535
536
  				vma = next;
  				next = vma->vm_next;
5beb49305   Rik van Riel   mm: change anon_v...
537
  				unlink_anon_vmas(vma);
8f4f8c164   Hugh Dickins   [PATCH] mm: unlin...
538
  				unlink_file_vma(vma);
3bf5ee956   Hugh Dickins   [PATCH] freepgt: ...
539
540
541
542
  			}
  			free_pgd_range(tlb, addr, vma->vm_end,
  				floor, next? next->vm_start: ceiling);
  		}
e0da382c9   Hugh Dickins   [PATCH] freepgt: ...
543
544
  		vma = next;
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
545
  }
3ed3a4f0d   Kirill A. Shutemov   mm: cleanup *pte_...
546
  int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
547
  {
c4088ebdc   Kirill A. Shutemov   mm: convert the r...
548
  	spinlock_t *ptl;
2f569afd9   Martin Schwidefsky   CONFIG_HIGHPTE vs...
549
  	pgtable_t new = pte_alloc_one(mm, address);
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
550
551
  	if (!new)
  		return -ENOMEM;
362a61ad6   Nick Piggin   fix SMP data race...
552
553
554
555
556
557
558
559
560
561
562
563
564
565
  	/*
  	 * Ensure all pte setup (eg. pte page lock and page clearing) are
  	 * visible before the pte is made visible to other CPUs by being
  	 * put into page tables.
  	 *
  	 * The other side of the story is the pointer chasing in the page
  	 * table walking code (when walking the page table without locking;
  	 * ie. most of the time). Fortunately, these data accesses consist
  	 * of a chain of data-dependent loads, meaning most CPUs (alpha
  	 * being the notable exception) will already guarantee loads are
  	 * seen in-order. See the alpha page table accessors for the
  	 * smp_read_barrier_depends() barriers in page table walking code.
  	 */
  	smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
c4088ebdc   Kirill A. Shutemov   mm: convert the r...
566
  	ptl = pmd_lock(mm, pmd);
8ac1f8320   Andrea Arcangeli   thp: pte alloc tr...
567
  	if (likely(pmd_none(*pmd))) {	/* Has another populated it ? */
e1f56c89b   Kirill A. Shutemov   mm: convert mm->n...
568
  		atomic_long_inc(&mm->nr_ptes);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
569
  		pmd_populate(mm, pmd, new);
2f569afd9   Martin Schwidefsky   CONFIG_HIGHPTE vs...
570
  		new = NULL;
4b471e889   Kirill A. Shutemov   mm, thp: remove i...
571
  	}
c4088ebdc   Kirill A. Shutemov   mm: convert the r...
572
  	spin_unlock(ptl);
2f569afd9   Martin Schwidefsky   CONFIG_HIGHPTE vs...
573
574
  	if (new)
  		pte_free(mm, new);
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
575
  	return 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
576
  }
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
577
  int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
578
  {
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
579
580
581
  	pte_t *new = pte_alloc_one_kernel(&init_mm, address);
  	if (!new)
  		return -ENOMEM;
362a61ad6   Nick Piggin   fix SMP data race...
582
  	smp_wmb(); /* See comment in __pte_alloc */
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
583
  	spin_lock(&init_mm.page_table_lock);
8ac1f8320   Andrea Arcangeli   thp: pte alloc tr...
584
  	if (likely(pmd_none(*pmd))) {	/* Has another populated it ? */
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
585
  		pmd_populate_kernel(&init_mm, pmd, new);
2f569afd9   Martin Schwidefsky   CONFIG_HIGHPTE vs...
586
  		new = NULL;
4b471e889   Kirill A. Shutemov   mm, thp: remove i...
587
  	}
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
588
  	spin_unlock(&init_mm.page_table_lock);
2f569afd9   Martin Schwidefsky   CONFIG_HIGHPTE vs...
589
590
  	if (new)
  		pte_free_kernel(&init_mm, new);
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
591
  	return 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
592
  }
d559db086   KAMEZAWA Hiroyuki   mm: clean up mm_c...
593
594
595
596
597
598
  static inline void init_rss_vec(int *rss)
  {
  	memset(rss, 0, sizeof(int) * NR_MM_COUNTERS);
  }
  
  static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss)
ae8597623   Hugh Dickins   [PATCH] mm: batch...
599
  {
d559db086   KAMEZAWA Hiroyuki   mm: clean up mm_c...
600
  	int i;
34e55232e   KAMEZAWA Hiroyuki   mm: avoid false s...
601
  	if (current->mm == mm)
05af2e104   David Rientjes   mm, counters: rem...
602
  		sync_mm_rss(mm);
d559db086   KAMEZAWA Hiroyuki   mm: clean up mm_c...
603
604
605
  	for (i = 0; i < NR_MM_COUNTERS; i++)
  		if (rss[i])
  			add_mm_counter(mm, i, rss[i]);
ae8597623   Hugh Dickins   [PATCH] mm: batch...
606
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
607
  /*
6aab341e0   Linus Torvalds   mm: re-architect ...
608
609
610
   * This function is called to print an error when a bad pte
   * is found. For example, we might have a PFN-mapped pte in
   * a region that doesn't allow it.
b5810039a   Nick Piggin   [PATCH] core remo...
611
612
613
   *
   * The calling function must still handle the error.
   */
3dc147414   Hugh Dickins   badpage: replace ...
614
615
  static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
  			  pte_t pte, struct page *page)
b5810039a   Nick Piggin   [PATCH] core remo...
616
  {
3dc147414   Hugh Dickins   badpage: replace ...
617
618
619
620
621
  	pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
  	pud_t *pud = pud_offset(pgd, addr);
  	pmd_t *pmd = pmd_offset(pud, addr);
  	struct address_space *mapping;
  	pgoff_t index;
d936cf9b3   Hugh Dickins   badpage: ratelimi...
622
623
624
625
626
627
628
629
630
631
632
633
634
635
  	static unsigned long resume;
  	static unsigned long nr_shown;
  	static unsigned long nr_unshown;
  
  	/*
  	 * Allow a burst of 60 reports, then keep quiet for that minute;
  	 * or allow a steady drip of one report per second.
  	 */
  	if (nr_shown == 60) {
  		if (time_before(jiffies, resume)) {
  			nr_unshown++;
  			return;
  		}
  		if (nr_unshown) {
1170532bb   Joe Perches   mm: convert print...
636
637
638
  			pr_alert("BUG: Bad page map: %lu messages suppressed
  ",
  				 nr_unshown);
d936cf9b3   Hugh Dickins   badpage: ratelimi...
639
640
641
642
643
644
  			nr_unshown = 0;
  		}
  		nr_shown = 0;
  	}
  	if (nr_shown++ == 0)
  		resume = jiffies + 60 * HZ;
3dc147414   Hugh Dickins   badpage: replace ...
645
646
647
  
  	mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL;
  	index = linear_page_index(vma, addr);
1170532bb   Joe Perches   mm: convert print...
648
649
650
651
  	pr_alert("BUG: Bad page map in process %s  pte:%08llx pmd:%08llx
  ",
  		 current->comm,
  		 (long long)pte_val(pte), (long long)pmd_val(*pmd));
718a38211   Wu Fengguang   mm: introduce dum...
652
  	if (page)
f0b791a34   Dave Hansen   mm: print more de...
653
  		dump_page(page, "bad pte");
1170532bb   Joe Perches   mm: convert print...
654
655
656
  	pr_alert("addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx
  ",
  		 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
3dc147414   Hugh Dickins   badpage: replace ...
657
658
659
  	/*
  	 * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y
  	 */
2682582a6   Konstantin Khlebnikov   mm/memory: also p...
660
661
662
663
664
665
  	pr_alert("file:%pD fault:%pf mmap:%pf readpage:%pf
  ",
  		 vma->vm_file,
  		 vma->vm_ops ? vma->vm_ops->fault : NULL,
  		 vma->vm_file ? vma->vm_file->f_op->mmap : NULL,
  		 mapping ? mapping->a_ops->readpage : NULL);
b5810039a   Nick Piggin   [PATCH] core remo...
666
  	dump_stack();
373d4d099   Rusty Russell   taint: add explic...
667
  	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
b5810039a   Nick Piggin   [PATCH] core remo...
668
669
670
  }
  
  /*
7e675137a   Nick Piggin   mm: introduce pte...
671
   * vm_normal_page -- This function gets the "struct page" associated with a pte.
6aab341e0   Linus Torvalds   mm: re-architect ...
672
   *
7e675137a   Nick Piggin   mm: introduce pte...
673
674
675
   * "Special" mappings do not wish to be associated with a "struct page" (either
   * it doesn't exist, or it exists but they don't want to touch it). In this
   * case, NULL is returned here. "Normal" mappings do have a struct page.
b379d7901   Jared Hulbert   mm: introduce VM_...
676
   *
7e675137a   Nick Piggin   mm: introduce pte...
677
678
679
680
681
682
683
684
   * There are 2 broad cases. Firstly, an architecture may define a pte_special()
   * pte bit, in which case this function is trivial. Secondly, an architecture
   * may not have a spare pte bit, which requires a more complicated scheme,
   * described below.
   *
   * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a
   * special mapping (even if there are underlying and valid "struct pages").
   * COWed pages of a VM_PFNMAP are always normal.
6aab341e0   Linus Torvalds   mm: re-architect ...
685
   *
b379d7901   Jared Hulbert   mm: introduce VM_...
686
687
   * The way we recognize COWed pages within VM_PFNMAP mappings is through the
   * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit
7e675137a   Nick Piggin   mm: introduce pte...
688
689
   * set, and the vm_pgoff will point to the first PFN mapped: thus every special
   * mapping will always honor the rule
6aab341e0   Linus Torvalds   mm: re-architect ...
690
691
692
   *
   *	pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
   *
7e675137a   Nick Piggin   mm: introduce pte...
693
694
695
696
697
698
   * And for normal mappings this is false.
   *
   * This restricts such mappings to be a linear translation from virtual address
   * to pfn. To get around this restriction, we allow arbitrary mappings so long
   * as the vma is not a COW mapping; in that case, we know that all ptes are
   * special (because none can have been COWed).
b379d7901   Jared Hulbert   mm: introduce VM_...
699
   *
b379d7901   Jared Hulbert   mm: introduce VM_...
700
   *
7e675137a   Nick Piggin   mm: introduce pte...
701
   * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP.
b379d7901   Jared Hulbert   mm: introduce VM_...
702
703
704
705
706
707
708
709
710
   *
   * VM_MIXEDMAP mappings can likewise contain memory with or without "struct
   * page" backing, however the difference is that _all_ pages with a struct
   * page (that is, those where pfn_valid is true) are refcounted and considered
   * normal pages by the VM. The disadvantage is that pages are refcounted
   * (which can be slower and simply not an option for some PFNMAP users). The
   * advantage is that we don't have to follow the strict linearity rule of
   * PFNMAP mappings in order to support COWable mappings.
   *
ee498ed73   Hugh Dickins   [PATCH] unpaged: ...
711
   */
7e675137a   Nick Piggin   mm: introduce pte...
712
713
714
715
716
717
718
  #ifdef __HAVE_ARCH_PTE_SPECIAL
  # define HAVE_PTE_SPECIAL 1
  #else
  # define HAVE_PTE_SPECIAL 0
  #endif
  struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
  				pte_t pte)
ee498ed73   Hugh Dickins   [PATCH] unpaged: ...
719
  {
22b31eec6   Hugh Dickins   badpage: vm_norma...
720
  	unsigned long pfn = pte_pfn(pte);
7e675137a   Nick Piggin   mm: introduce pte...
721
722
  
  	if (HAVE_PTE_SPECIAL) {
b38af4721   Hugh Dickins   x86,mm: fix pte_s...
723
  		if (likely(!pte_special(pte)))
22b31eec6   Hugh Dickins   badpage: vm_norma...
724
  			goto check_pfn;
667a0a06c   David Vrabel   mm: provide a fin...
725
726
  		if (vma->vm_ops && vma->vm_ops->find_special_page)
  			return vma->vm_ops->find_special_page(vma, addr);
a13ea5b75   Hugh Dickins   mm: reinstate ZER...
727
728
  		if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
  			return NULL;
62eede62d   Hugh Dickins   mm: ZERO_PAGE wit...
729
  		if (!is_zero_pfn(pfn))
22b31eec6   Hugh Dickins   badpage: vm_norma...
730
  			print_bad_pte(vma, addr, pte, NULL);
7e675137a   Nick Piggin   mm: introduce pte...
731
732
733
734
  		return NULL;
  	}
  
  	/* !HAVE_PTE_SPECIAL case follows: */
b379d7901   Jared Hulbert   mm: introduce VM_...
735
736
737
738
739
740
  	if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
  		if (vma->vm_flags & VM_MIXEDMAP) {
  			if (!pfn_valid(pfn))
  				return NULL;
  			goto out;
  		} else {
7e675137a   Nick Piggin   mm: introduce pte...
741
742
  			unsigned long off;
  			off = (addr - vma->vm_start) >> PAGE_SHIFT;
b379d7901   Jared Hulbert   mm: introduce VM_...
743
744
745
746
747
  			if (pfn == vma->vm_pgoff + off)
  				return NULL;
  			if (!is_cow_mapping(vma->vm_flags))
  				return NULL;
  		}
6aab341e0   Linus Torvalds   mm: re-architect ...
748
  	}
b38af4721   Hugh Dickins   x86,mm: fix pte_s...
749
750
  	if (is_zero_pfn(pfn))
  		return NULL;
22b31eec6   Hugh Dickins   badpage: vm_norma...
751
752
753
754
755
  check_pfn:
  	if (unlikely(pfn > highest_memmap_pfn)) {
  		print_bad_pte(vma, addr, pte, NULL);
  		return NULL;
  	}
6aab341e0   Linus Torvalds   mm: re-architect ...
756
757
  
  	/*
7e675137a   Nick Piggin   mm: introduce pte...
758
  	 * NOTE! We still have PageReserved() pages in the page tables.
7e675137a   Nick Piggin   mm: introduce pte...
759
  	 * eg. VDSO mappings can cause them to exist.
6aab341e0   Linus Torvalds   mm: re-architect ...
760
  	 */
b379d7901   Jared Hulbert   mm: introduce VM_...
761
  out:
6aab341e0   Linus Torvalds   mm: re-architect ...
762
  	return pfn_to_page(pfn);
ee498ed73   Hugh Dickins   [PATCH] unpaged: ...
763
  }
28093f9f3   Gerald Schaefer   numa: fix /proc/<...
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
  				pmd_t pmd)
  {
  	unsigned long pfn = pmd_pfn(pmd);
  
  	/*
  	 * There is no pmd_special() but there may be special pmds, e.g.
  	 * in a direct-access (dax) mapping, so let's just replicate the
  	 * !HAVE_PTE_SPECIAL case from vm_normal_page() here.
  	 */
  	if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
  		if (vma->vm_flags & VM_MIXEDMAP) {
  			if (!pfn_valid(pfn))
  				return NULL;
  			goto out;
  		} else {
  			unsigned long off;
  			off = (addr - vma->vm_start) >> PAGE_SHIFT;
  			if (pfn == vma->vm_pgoff + off)
  				return NULL;
  			if (!is_cow_mapping(vma->vm_flags))
  				return NULL;
  		}
  	}
  
  	if (is_zero_pfn(pfn))
  		return NULL;
  	if (unlikely(pfn > highest_memmap_pfn))
  		return NULL;
  
  	/*
  	 * NOTE! We still have PageReserved() pages in the page tables.
  	 * eg. VDSO mappings can cause them to exist.
  	 */
  out:
  	return pfn_to_page(pfn);
  }
  #endif
ee498ed73   Hugh Dickins   [PATCH] unpaged: ...
803
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
804
805
806
   * copy one vm_area from one task to the other. Assumes the page tables
   * already present in the new task to be cleared in the whole range
   * covered by this vma.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
807
   */
570a335b8   Hugh Dickins   swap_info: swap c...
808
  static inline unsigned long
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
809
  copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
b5810039a   Nick Piggin   [PATCH] core remo...
810
  		pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
8c1037627   Hugh Dickins   [PATCH] mm: copy_...
811
  		unsigned long addr, int *rss)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
812
  {
b5810039a   Nick Piggin   [PATCH] core remo...
813
  	unsigned long vm_flags = vma->vm_flags;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
814
815
  	pte_t pte = *src_pte;
  	struct page *page;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
816
817
818
  
  	/* pte contains position in swap or file, so copy. */
  	if (unlikely(!pte_present(pte))) {
0661a3361   Kirill A. Shutemov   mm: remove rest u...
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
  		swp_entry_t entry = pte_to_swp_entry(pte);
  
  		if (likely(!non_swap_entry(entry))) {
  			if (swap_duplicate(entry) < 0)
  				return entry.val;
  
  			/* make sure dst_mm is on swapoff's mmlist. */
  			if (unlikely(list_empty(&dst_mm->mmlist))) {
  				spin_lock(&mmlist_lock);
  				if (list_empty(&dst_mm->mmlist))
  					list_add(&dst_mm->mmlist,
  							&src_mm->mmlist);
  				spin_unlock(&mmlist_lock);
  			}
  			rss[MM_SWAPENTS]++;
  		} else if (is_migration_entry(entry)) {
  			page = migration_entry_to_page(entry);
eca56ff90   Jerome Marchand   mm, shmem: add in...
836
  			rss[mm_counter(page)]++;
0661a3361   Kirill A. Shutemov   mm: remove rest u...
837
838
839
840
841
842
843
844
845
846
847
848
  
  			if (is_write_migration_entry(entry) &&
  					is_cow_mapping(vm_flags)) {
  				/*
  				 * COW mappings require pages in both
  				 * parent and child to be set to read.
  				 */
  				make_migration_entry_read(&entry);
  				pte = swp_entry_to_pte(entry);
  				if (pte_swp_soft_dirty(*src_pte))
  					pte = pte_swp_mksoft_dirty(pte);
  				set_pte_at(src_mm, addr, src_pte, pte);
0697212a4   Christoph Lameter   [PATCH] Swapless ...
849
  			}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
850
  		}
ae8597623   Hugh Dickins   [PATCH] mm: batch...
851
  		goto out_set_pte;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
852
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
853
854
855
856
  	/*
  	 * If it's a COW mapping, write protect it both
  	 * in the parent and the child
  	 */
67121172f   Linus Torvalds   Allow arbitrary r...
857
  	if (is_cow_mapping(vm_flags)) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
858
  		ptep_set_wrprotect(src_mm, addr, src_pte);
3dc907951   Zachary Amsden   [PATCH] paravirt:...
859
  		pte = pte_wrprotect(pte);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
860
861
862
863
864
865
866
867
868
  	}
  
  	/*
  	 * If it's a shared mapping, mark it clean in
  	 * the child
  	 */
  	if (vm_flags & VM_SHARED)
  		pte = pte_mkclean(pte);
  	pte = pte_mkold(pte);
6aab341e0   Linus Torvalds   mm: re-architect ...
869
870
871
872
  
  	page = vm_normal_page(vma, addr, pte);
  	if (page) {
  		get_page(page);
53f9263ba   Kirill A. Shutemov   mm: rework mapcou...
873
  		page_dup_rmap(page, false);
eca56ff90   Jerome Marchand   mm, shmem: add in...
874
  		rss[mm_counter(page)]++;
6aab341e0   Linus Torvalds   mm: re-architect ...
875
  	}
ae8597623   Hugh Dickins   [PATCH] mm: batch...
876
877
878
  
  out_set_pte:
  	set_pte_at(dst_mm, addr, dst_pte, pte);
570a335b8   Hugh Dickins   swap_info: swap c...
879
  	return 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
880
  }
21bda264f   Jerome Marchand   mm: make copy_pte...
881
  static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
71e3aac07   Andrea Arcangeli   thp: transparent ...
882
883
  		   pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
  		   unsigned long addr, unsigned long end)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
884
  {
c36987e2e   Daisuke Nishimura   mm: don't call pt...
885
  	pte_t *orig_src_pte, *orig_dst_pte;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
886
  	pte_t *src_pte, *dst_pte;
c74df32c7   Hugh Dickins   [PATCH] mm: ptd_a...
887
  	spinlock_t *src_ptl, *dst_ptl;
e040f218b   Hugh Dickins   [PATCH] mm: copy_...
888
  	int progress = 0;
d559db086   KAMEZAWA Hiroyuki   mm: clean up mm_c...
889
  	int rss[NR_MM_COUNTERS];
570a335b8   Hugh Dickins   swap_info: swap c...
890
  	swp_entry_t entry = (swp_entry_t){0};
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
891
892
  
  again:
d559db086   KAMEZAWA Hiroyuki   mm: clean up mm_c...
893
  	init_rss_vec(rss);
c74df32c7   Hugh Dickins   [PATCH] mm: ptd_a...
894
  	dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
895
896
  	if (!dst_pte)
  		return -ENOMEM;
ece0e2b64   Peter Zijlstra   mm: remove pte_*m...
897
  	src_pte = pte_offset_map(src_pmd, addr);
4c21e2f24   Hugh Dickins   [PATCH] mm: split...
898
  	src_ptl = pte_lockptr(src_mm, src_pmd);
f20dc5f7c   Ingo Molnar   [PATCH] lockdep: ...
899
  	spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
c36987e2e   Daisuke Nishimura   mm: don't call pt...
900
901
  	orig_src_pte = src_pte;
  	orig_dst_pte = dst_pte;
6606c3e0d   Zachary Amsden   [PATCH] paravirt:...
902
  	arch_enter_lazy_mmu_mode();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
903

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
904
905
906
907
908
  	do {
  		/*
  		 * We are holding two locks at this point - either of them
  		 * could generate latencies in another task on another CPU.
  		 */
e040f218b   Hugh Dickins   [PATCH] mm: copy_...
909
910
911
  		if (progress >= 32) {
  			progress = 0;
  			if (need_resched() ||
95c354fe9   Nick Piggin   spinlock: lockbre...
912
  			    spin_needbreak(src_ptl) || spin_needbreak(dst_ptl))
e040f218b   Hugh Dickins   [PATCH] mm: copy_...
913
914
  				break;
  		}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
915
916
917
918
  		if (pte_none(*src_pte)) {
  			progress++;
  			continue;
  		}
570a335b8   Hugh Dickins   swap_info: swap c...
919
920
921
922
  		entry.val = copy_one_pte(dst_mm, src_mm, dst_pte, src_pte,
  							vma, addr, rss);
  		if (entry.val)
  			break;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
923
924
  		progress += 8;
  	} while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
925

6606c3e0d   Zachary Amsden   [PATCH] paravirt:...
926
  	arch_leave_lazy_mmu_mode();
c74df32c7   Hugh Dickins   [PATCH] mm: ptd_a...
927
  	spin_unlock(src_ptl);
ece0e2b64   Peter Zijlstra   mm: remove pte_*m...
928
  	pte_unmap(orig_src_pte);
d559db086   KAMEZAWA Hiroyuki   mm: clean up mm_c...
929
  	add_mm_rss_vec(dst_mm, rss);
c36987e2e   Daisuke Nishimura   mm: don't call pt...
930
  	pte_unmap_unlock(orig_dst_pte, dst_ptl);
c74df32c7   Hugh Dickins   [PATCH] mm: ptd_a...
931
  	cond_resched();
570a335b8   Hugh Dickins   swap_info: swap c...
932
933
934
935
936
937
  
  	if (entry.val) {
  		if (add_swap_count_continuation(entry, GFP_KERNEL) < 0)
  			return -ENOMEM;
  		progress = 0;
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
  	if (addr != end)
  		goto again;
  	return 0;
  }
  
  static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
  		pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma,
  		unsigned long addr, unsigned long end)
  {
  	pmd_t *src_pmd, *dst_pmd;
  	unsigned long next;
  
  	dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
  	if (!dst_pmd)
  		return -ENOMEM;
  	src_pmd = pmd_offset(src_pud, addr);
  	do {
  		next = pmd_addr_end(addr, end);
5c7fb56e5   Dan Williams   mm, dax: dax-pmd ...
956
  		if (pmd_trans_huge(*src_pmd) || pmd_devmap(*src_pmd)) {
71e3aac07   Andrea Arcangeli   thp: transparent ...
957
  			int err;
14d1a55cd   Andrea Arcangeli   thp: add debug ch...
958
  			VM_BUG_ON(next-addr != HPAGE_PMD_SIZE);
71e3aac07   Andrea Arcangeli   thp: transparent ...
959
960
961
962
963
964
965
966
  			err = copy_huge_pmd(dst_mm, src_mm,
  					    dst_pmd, src_pmd, addr, vma);
  			if (err == -ENOMEM)
  				return -ENOMEM;
  			if (!err)
  				continue;
  			/* fall through */
  		}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
  		if (pmd_none_or_clear_bad(src_pmd))
  			continue;
  		if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd,
  						vma, addr, next))
  			return -ENOMEM;
  	} while (dst_pmd++, src_pmd++, addr = next, addr != end);
  	return 0;
  }
  
  static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
  		pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma,
  		unsigned long addr, unsigned long end)
  {
  	pud_t *src_pud, *dst_pud;
  	unsigned long next;
  
  	dst_pud = pud_alloc(dst_mm, dst_pgd, addr);
  	if (!dst_pud)
  		return -ENOMEM;
  	src_pud = pud_offset(src_pgd, addr);
  	do {
  		next = pud_addr_end(addr, end);
  		if (pud_none_or_clear_bad(src_pud))
  			continue;
  		if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud,
  						vma, addr, next))
  			return -ENOMEM;
  	} while (dst_pud++, src_pud++, addr = next, addr != end);
  	return 0;
  }
  
  int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
  		struct vm_area_struct *vma)
  {
  	pgd_t *src_pgd, *dst_pgd;
  	unsigned long next;
  	unsigned long addr = vma->vm_start;
  	unsigned long end = vma->vm_end;
2ec74c3ef   Sagi Grimberg   mm: move all mmu ...
1005
1006
1007
  	unsigned long mmun_start;	/* For mmu_notifiers */
  	unsigned long mmun_end;		/* For mmu_notifiers */
  	bool is_cow;
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
1008
  	int ret;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1009

d992895ba   Nick Piggin   [PATCH] Lazy page...
1010
1011
1012
1013
1014
1015
  	/*
  	 * Don't copy ptes where a page fault will fill them correctly.
  	 * Fork becomes much lighter when there are big shared or private
  	 * readonly mappings. The tradeoff is that copy_page_range is more
  	 * efficient than faulting.
  	 */
0661a3361   Kirill A. Shutemov   mm: remove rest u...
1016
1017
1018
  	if (!(vma->vm_flags & (VM_HUGETLB | VM_PFNMAP | VM_MIXEDMAP)) &&
  			!vma->anon_vma)
  		return 0;
d992895ba   Nick Piggin   [PATCH] Lazy page...
1019

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1020
1021
  	if (is_vm_hugetlb_page(vma))
  		return copy_hugetlb_page_range(dst_mm, src_mm, vma);
b3b9c2932   Konstantin Khlebnikov   mm, x86, pat: rew...
1022
  	if (unlikely(vma->vm_flags & VM_PFNMAP)) {
2ab640379   venkatesh.pallipadi@intel.com   x86: PAT: hooks i...
1023
1024
1025
1026
  		/*
  		 * We do not free on error cases below as remove_vma
  		 * gets called on error from higher level routine
  		 */
5180da410   Suresh Siddha   x86, pat: separat...
1027
  		ret = track_pfn_copy(vma);
2ab640379   venkatesh.pallipadi@intel.com   x86: PAT: hooks i...
1028
1029
1030
  		if (ret)
  			return ret;
  	}
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
1031
1032
1033
1034
1035
1036
  	/*
  	 * We need to invalidate the secondary MMU mappings only when
  	 * there could be a permission downgrade on the ptes of the
  	 * parent mm. And a permission downgrade will only happen if
  	 * is_cow_mapping() returns true.
  	 */
2ec74c3ef   Sagi Grimberg   mm: move all mmu ...
1037
1038
1039
1040
1041
1042
  	is_cow = is_cow_mapping(vma->vm_flags);
  	mmun_start = addr;
  	mmun_end   = end;
  	if (is_cow)
  		mmu_notifier_invalidate_range_start(src_mm, mmun_start,
  						    mmun_end);
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
1043
1044
  
  	ret = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1045
1046
1047
1048
1049
1050
  	dst_pgd = pgd_offset(dst_mm, addr);
  	src_pgd = pgd_offset(src_mm, addr);
  	do {
  		next = pgd_addr_end(addr, end);
  		if (pgd_none_or_clear_bad(src_pgd))
  			continue;
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
1051
1052
1053
1054
1055
  		if (unlikely(copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd,
  					    vma, addr, next))) {
  			ret = -ENOMEM;
  			break;
  		}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1056
  	} while (dst_pgd++, src_pgd++, addr = next, addr != end);
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
1057

2ec74c3ef   Sagi Grimberg   mm: move all mmu ...
1058
1059
  	if (is_cow)
  		mmu_notifier_invalidate_range_end(src_mm, mmun_start, mmun_end);
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
1060
  	return ret;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1061
  }
51c6f666f   Robin Holt   [PATCH] mm: ZAP_B...
1062
  static unsigned long zap_pte_range(struct mmu_gather *tlb,
b5810039a   Nick Piggin   [PATCH] core remo...
1063
  				struct vm_area_struct *vma, pmd_t *pmd,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1064
  				unsigned long addr, unsigned long end,
97a894136   Peter Zijlstra   mm: Remove i_mmap...
1065
  				struct zap_details *details)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1066
  {
b5810039a   Nick Piggin   [PATCH] core remo...
1067
  	struct mm_struct *mm = tlb->mm;
d16dfc550   Peter Zijlstra   mm: mmu_gather re...
1068
  	int force_flush = 0;
d559db086   KAMEZAWA Hiroyuki   mm: clean up mm_c...
1069
  	int rss[NR_MM_COUNTERS];
97a894136   Peter Zijlstra   mm: Remove i_mmap...
1070
  	spinlock_t *ptl;
5f1a19070   Steven Rostedt   mm: fix wrong kun...
1071
  	pte_t *start_pte;
97a894136   Peter Zijlstra   mm: Remove i_mmap...
1072
  	pte_t *pte;
8a5f14a23   Kirill A. Shutemov   mm: drop support ...
1073
  	swp_entry_t entry;
e9d55e157   Aneesh Kumar K.V   mm: change the in...
1074
  	struct page *pending_page = NULL;
d559db086   KAMEZAWA Hiroyuki   mm: clean up mm_c...
1075

d16dfc550   Peter Zijlstra   mm: mmu_gather re...
1076
  again:
e303297e6   Peter Zijlstra   mm: extended batc...
1077
  	init_rss_vec(rss);
5f1a19070   Steven Rostedt   mm: fix wrong kun...
1078
1079
  	start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
  	pte = start_pte;
6606c3e0d   Zachary Amsden   [PATCH] paravirt:...
1080
  	arch_enter_lazy_mmu_mode();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1081
1082
  	do {
  		pte_t ptent = *pte;
51c6f666f   Robin Holt   [PATCH] mm: ZAP_B...
1083
  		if (pte_none(ptent)) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1084
  			continue;
51c6f666f   Robin Holt   [PATCH] mm: ZAP_B...
1085
  		}
6f5e6b9e6   Hugh Dickins   [PATCH] fix free ...
1086

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1087
  		if (pte_present(ptent)) {
ee498ed73   Hugh Dickins   [PATCH] unpaged: ...
1088
  			struct page *page;
51c6f666f   Robin Holt   [PATCH] mm: ZAP_B...
1089

6aab341e0   Linus Torvalds   mm: re-architect ...
1090
  			page = vm_normal_page(vma, addr, ptent);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1091
1092
1093
1094
1095
1096
1097
  			if (unlikely(details) && page) {
  				/*
  				 * unmap_shared_mapping_pages() wants to
  				 * invalidate cache without truncating:
  				 * unmap shared but keep private pages.
  				 */
  				if (details->check_mapping &&
800d8c63b   Kirill A. Shutemov   shmem: add huge p...
1098
  				    details->check_mapping != page_rmapping(page))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1099
  					continue;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1100
  			}
b5810039a   Nick Piggin   [PATCH] core remo...
1101
  			ptent = ptep_get_and_clear_full(mm, addr, pte,
a600388d2   Zachary Amsden   [PATCH] x86: ptep...
1102
  							tlb->fullmm);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1103
1104
1105
  			tlb_remove_tlb_entry(tlb, pte, addr);
  			if (unlikely(!page))
  				continue;
eca56ff90   Jerome Marchand   mm, shmem: add in...
1106
1107
  
  			if (!PageAnon(page)) {
1cf35d477   Linus Torvalds   mm: split 'tlb_fl...
1108
  				if (pte_dirty(ptent)) {
aac453635   Michal Hocko   mm, oom: introduc...
1109
1110
1111
1112
1113
1114
  					/*
  					 * oom_reaper cannot tear down dirty
  					 * pages
  					 */
  					if (unlikely(details && details->ignore_dirty))
  						continue;
1cf35d477   Linus Torvalds   mm: split 'tlb_fl...
1115
  					force_flush = 1;
6237bcd94   Hugh Dickins   [PATCH] mm: zap_p...
1116
  					set_page_dirty(page);
1cf35d477   Linus Torvalds   mm: split 'tlb_fl...
1117
  				}
4917e5d04   Johannes Weiner   mm: more likely r...
1118
  				if (pte_young(ptent) &&
64363aad5   Joe Perches   mm: remove unused...
1119
  				    likely(!(vma->vm_flags & VM_SEQ_READ)))
bf3f3bc5e   Nick Piggin   mm: don't mark_pa...
1120
  					mark_page_accessed(page);
6237bcd94   Hugh Dickins   [PATCH] mm: zap_p...
1121
  			}
eca56ff90   Jerome Marchand   mm, shmem: add in...
1122
  			rss[mm_counter(page)]--;
d281ee614   Kirill A. Shutemov   rmap: add argumen...
1123
  			page_remove_rmap(page, false);
3dc147414   Hugh Dickins   badpage: replace ...
1124
1125
  			if (unlikely(page_mapcount(page) < 0))
  				print_bad_pte(vma, addr, ptent, page);
e9d55e157   Aneesh Kumar K.V   mm: change the in...
1126
  			if (unlikely(__tlb_remove_page(tlb, page))) {
1cf35d477   Linus Torvalds   mm: split 'tlb_fl...
1127
  				force_flush = 1;
e9d55e157   Aneesh Kumar K.V   mm: change the in...
1128
  				pending_page = page;
ce9ec37bd   Will Deacon   zap_pte_range: up...
1129
  				addr += PAGE_SIZE;
d16dfc550   Peter Zijlstra   mm: mmu_gather re...
1130
  				break;
1cf35d477   Linus Torvalds   mm: split 'tlb_fl...
1131
  			}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1132
1133
  			continue;
  		}
aac453635   Michal Hocko   mm, oom: introduc...
1134
1135
  		/* only check swap_entries if explicitly asked for in details */
  		if (unlikely(details && !details->check_swap_entries))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1136
  			continue;
b084d4353   KAMEZAWA Hiroyuki   mm: count swap usage
1137

8a5f14a23   Kirill A. Shutemov   mm: drop support ...
1138
1139
1140
1141
1142
  		entry = pte_to_swp_entry(ptent);
  		if (!non_swap_entry(entry))
  			rss[MM_SWAPENTS]--;
  		else if (is_migration_entry(entry)) {
  			struct page *page;
9f9f1acd7   Konstantin Khlebnikov   mm: fix rss count...
1143

8a5f14a23   Kirill A. Shutemov   mm: drop support ...
1144
  			page = migration_entry_to_page(entry);
eca56ff90   Jerome Marchand   mm, shmem: add in...
1145
  			rss[mm_counter(page)]--;
b084d4353   KAMEZAWA Hiroyuki   mm: count swap usage
1146
  		}
8a5f14a23   Kirill A. Shutemov   mm: drop support ...
1147
1148
  		if (unlikely(!free_swap_and_cache(entry)))
  			print_bad_pte(vma, addr, ptent, NULL);
9888a1cae   Zachary Amsden   [PATCH] paravirt:...
1149
  		pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
97a894136   Peter Zijlstra   mm: Remove i_mmap...
1150
  	} while (pte++, addr += PAGE_SIZE, addr != end);
ae8597623   Hugh Dickins   [PATCH] mm: batch...
1151

d559db086   KAMEZAWA Hiroyuki   mm: clean up mm_c...
1152
  	add_mm_rss_vec(mm, rss);
6606c3e0d   Zachary Amsden   [PATCH] paravirt:...
1153
  	arch_leave_lazy_mmu_mode();
51c6f666f   Robin Holt   [PATCH] mm: ZAP_B...
1154

1cf35d477   Linus Torvalds   mm: split 'tlb_fl...
1155
  	/* Do the actual TLB flush before dropping ptl */
fb7332a9f   Will Deacon   mmu_gather: move ...
1156
  	if (force_flush)
1cf35d477   Linus Torvalds   mm: split 'tlb_fl...
1157
  		tlb_flush_mmu_tlbonly(tlb);
1cf35d477   Linus Torvalds   mm: split 'tlb_fl...
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
  	pte_unmap_unlock(start_pte, ptl);
  
  	/*
  	 * If we forced a TLB flush (either due to running out of
  	 * batch buffers or because we needed to flush dirty TLB
  	 * entries before releasing the ptl), free the batched
  	 * memory too. Restart if we didn't do everything.
  	 */
  	if (force_flush) {
  		force_flush = 0;
  		tlb_flush_mmu_free(tlb);
e9d55e157   Aneesh Kumar K.V   mm: change the in...
1169
1170
1171
1172
1173
  		if (pending_page) {
  			/* remove the page with new size */
  			__tlb_remove_pte_page(tlb, pending_page);
  			pending_page = NULL;
  		}
2b047252d   Linus Torvalds   Fix TLB gather vi...
1174
  		if (addr != end)
d16dfc550   Peter Zijlstra   mm: mmu_gather re...
1175
1176
  			goto again;
  	}
51c6f666f   Robin Holt   [PATCH] mm: ZAP_B...
1177
  	return addr;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1178
  }
51c6f666f   Robin Holt   [PATCH] mm: ZAP_B...
1179
  static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
b5810039a   Nick Piggin   [PATCH] core remo...
1180
  				struct vm_area_struct *vma, pud_t *pud,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1181
  				unsigned long addr, unsigned long end,
97a894136   Peter Zijlstra   mm: Remove i_mmap...
1182
  				struct zap_details *details)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1183
1184
1185
1186
1187
1188
1189
  {
  	pmd_t *pmd;
  	unsigned long next;
  
  	pmd = pmd_offset(pud, addr);
  	do {
  		next = pmd_addr_end(addr, end);
5c7fb56e5   Dan Williams   mm, dax: dax-pmd ...
1190
  		if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
1a5a9906d   Andrea Arcangeli   mm: thp: fix pmd_...
1191
  			if (next - addr != HPAGE_PMD_SIZE) {
684283988   Hugh Dickins   huge pagecache: m...
1192
1193
  				VM_BUG_ON_VMA(vma_is_anonymous(vma) &&
  				    !rwsem_is_locked(&tlb->mm->mmap_sem), vma);
78ddc5347   Kirill A. Shutemov   thp: rename split...
1194
  				split_huge_pmd(vma, pmd, addr);
f21760b15   Shaohua Li   thp: add tlb_remo...
1195
  			} else if (zap_huge_pmd(tlb, vma, pmd, addr))
1a5a9906d   Andrea Arcangeli   mm: thp: fix pmd_...
1196
  				goto next;
71e3aac07   Andrea Arcangeli   thp: transparent ...
1197
1198
  			/* fall through */
  		}
1a5a9906d   Andrea Arcangeli   mm: thp: fix pmd_...
1199
1200
1201
1202
1203
1204
1205
1206
1207
  		/*
  		 * Here there can be other concurrent MADV_DONTNEED or
  		 * trans huge page faults running, and if the pmd is
  		 * none or trans huge it can change under us. This is
  		 * because MADV_DONTNEED holds the mmap_sem in read
  		 * mode.
  		 */
  		if (pmd_none_or_trans_huge_or_clear_bad(pmd))
  			goto next;
97a894136   Peter Zijlstra   mm: Remove i_mmap...
1208
  		next = zap_pte_range(tlb, vma, pmd, addr, next, details);
1a5a9906d   Andrea Arcangeli   mm: thp: fix pmd_...
1209
  next:
97a894136   Peter Zijlstra   mm: Remove i_mmap...
1210
1211
  		cond_resched();
  	} while (pmd++, addr = next, addr != end);
51c6f666f   Robin Holt   [PATCH] mm: ZAP_B...
1212
1213
  
  	return addr;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1214
  }
51c6f666f   Robin Holt   [PATCH] mm: ZAP_B...
1215
  static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
b5810039a   Nick Piggin   [PATCH] core remo...
1216
  				struct vm_area_struct *vma, pgd_t *pgd,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1217
  				unsigned long addr, unsigned long end,
97a894136   Peter Zijlstra   mm: Remove i_mmap...
1218
  				struct zap_details *details)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1219
1220
1221
1222
1223
1224
1225
  {
  	pud_t *pud;
  	unsigned long next;
  
  	pud = pud_offset(pgd, addr);
  	do {
  		next = pud_addr_end(addr, end);
97a894136   Peter Zijlstra   mm: Remove i_mmap...
1226
  		if (pud_none_or_clear_bad(pud))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1227
  			continue;
97a894136   Peter Zijlstra   mm: Remove i_mmap...
1228
1229
  		next = zap_pmd_range(tlb, vma, pud, addr, next, details);
  	} while (pud++, addr = next, addr != end);
51c6f666f   Robin Holt   [PATCH] mm: ZAP_B...
1230
1231
  
  	return addr;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1232
  }
aac453635   Michal Hocko   mm, oom: introduc...
1233
  void unmap_page_range(struct mmu_gather *tlb,
038c7aa16   Al Viro   VM: unmap_page_ra...
1234
1235
1236
  			     struct vm_area_struct *vma,
  			     unsigned long addr, unsigned long end,
  			     struct zap_details *details)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1237
1238
1239
  {
  	pgd_t *pgd;
  	unsigned long next;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1240
1241
1242
1243
1244
  	BUG_ON(addr >= end);
  	tlb_start_vma(tlb, vma);
  	pgd = pgd_offset(vma->vm_mm, addr);
  	do {
  		next = pgd_addr_end(addr, end);
97a894136   Peter Zijlstra   mm: Remove i_mmap...
1245
  		if (pgd_none_or_clear_bad(pgd))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1246
  			continue;
97a894136   Peter Zijlstra   mm: Remove i_mmap...
1247
1248
  		next = zap_pud_range(tlb, vma, pgd, addr, next, details);
  	} while (pgd++, addr = next, addr != end);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1249
1250
  	tlb_end_vma(tlb, vma);
  }
51c6f666f   Robin Holt   [PATCH] mm: ZAP_B...
1251

f5cc4eef9   Al Viro   VM: make zap_page...
1252
1253
1254
  
  static void unmap_single_vma(struct mmu_gather *tlb,
  		struct vm_area_struct *vma, unsigned long start_addr,
4f74d2c8e   Linus Torvalds   vm: remove 'nr_ac...
1255
  		unsigned long end_addr,
f5cc4eef9   Al Viro   VM: make zap_page...
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
  		struct zap_details *details)
  {
  	unsigned long start = max(vma->vm_start, start_addr);
  	unsigned long end;
  
  	if (start >= vma->vm_end)
  		return;
  	end = min(vma->vm_end, end_addr);
  	if (end <= vma->vm_start)
  		return;
cbc91f71b   Srikar Dronamraju   uprobes/core: Dec...
1266
1267
  	if (vma->vm_file)
  		uprobe_munmap(vma, start, end);
b3b9c2932   Konstantin Khlebnikov   mm, x86, pat: rew...
1268
  	if (unlikely(vma->vm_flags & VM_PFNMAP))
5180da410   Suresh Siddha   x86, pat: separat...
1269
  		untrack_pfn(vma, 0, 0);
f5cc4eef9   Al Viro   VM: make zap_page...
1270
1271
1272
1273
1274
1275
1276
  
  	if (start != end) {
  		if (unlikely(is_vm_hugetlb_page(vma))) {
  			/*
  			 * It is undesirable to test vma->vm_file as it
  			 * should be non-null for valid hugetlb area.
  			 * However, vm_file will be NULL in the error
7aa6b4ad5   Davidlohr Bueso   mm/memory.c: upda...
1277
  			 * cleanup path of mmap_region. When
f5cc4eef9   Al Viro   VM: make zap_page...
1278
  			 * hugetlbfs ->mmap method fails,
7aa6b4ad5   Davidlohr Bueso   mm/memory.c: upda...
1279
  			 * mmap_region() nullifies vma->vm_file
f5cc4eef9   Al Viro   VM: make zap_page...
1280
1281
1282
1283
  			 * before calling this function to clean up.
  			 * Since no pte has actually been setup, it is
  			 * safe to do nothing in this case.
  			 */
24669e584   Aneesh Kumar K.V   hugetlb: use mmu_...
1284
  			if (vma->vm_file) {
83cde9e8b   Davidlohr Bueso   mm: use new helpe...
1285
  				i_mmap_lock_write(vma->vm_file->f_mapping);
d833352a4   Mel Gorman   mm: hugetlbfs: cl...
1286
  				__unmap_hugepage_range_final(tlb, vma, start, end, NULL);
83cde9e8b   Davidlohr Bueso   mm: use new helpe...
1287
  				i_mmap_unlock_write(vma->vm_file->f_mapping);
24669e584   Aneesh Kumar K.V   hugetlb: use mmu_...
1288
  			}
f5cc4eef9   Al Viro   VM: make zap_page...
1289
1290
1291
  		} else
  			unmap_page_range(tlb, vma, start, end, details);
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1292
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1293
1294
  /**
   * unmap_vmas - unmap a range of memory covered by a list of vma's
0164f69d0   Randy Dunlap   mm/memory.c: fix ...
1295
   * @tlb: address of the caller's struct mmu_gather
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1296
1297
1298
   * @vma: the starting vma
   * @start_addr: virtual address at which to start unmapping
   * @end_addr: virtual address at which to end unmapping
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1299
   *
508034a32   Hugh Dickins   [PATCH] mm: unmap...
1300
   * Unmap all pages in the vma list.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1301
   *
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1302
1303
1304
1305
1306
1307
1308
1309
1310
   * Only addresses between `start' and `end' will be unmapped.
   *
   * The VMA list must be sorted in ascending virtual address order.
   *
   * unmap_vmas() assumes that the caller will flush the whole unmapped address
   * range after unmap_vmas() returns.  So the only responsibility here is to
   * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
   * drops the lock and schedules.
   */
6e8bb0193   Al Viro   VM: make unmap_vm...
1311
  void unmap_vmas(struct mmu_gather *tlb,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1312
  		struct vm_area_struct *vma, unsigned long start_addr,
4f74d2c8e   Linus Torvalds   vm: remove 'nr_ac...
1313
  		unsigned long end_addr)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1314
  {
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
1315
  	struct mm_struct *mm = vma->vm_mm;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1316

cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
1317
  	mmu_notifier_invalidate_range_start(mm, start_addr, end_addr);
f5cc4eef9   Al Viro   VM: make zap_page...
1318
  	for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next)
4f74d2c8e   Linus Torvalds   vm: remove 'nr_ac...
1319
  		unmap_single_vma(tlb, vma, start_addr, end_addr, NULL);
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
1320
  	mmu_notifier_invalidate_range_end(mm, start_addr, end_addr);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1321
1322
1323
1324
1325
  }
  
  /**
   * zap_page_range - remove user pages in a given range
   * @vma: vm_area_struct holding the applicable pages
eb4546bbb   Randy Dunlap   mm/memory.c: fix ...
1326
   * @start: starting address of pages to zap
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1327
   * @size: number of bytes to zap
8a5f14a23   Kirill A. Shutemov   mm: drop support ...
1328
   * @details: details of shared cache invalidation
f5cc4eef9   Al Viro   VM: make zap_page...
1329
1330
   *
   * Caller must protect the VMA list
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1331
   */
7e027b14d   Linus Torvalds   vm: simplify unma...
1332
  void zap_page_range(struct vm_area_struct *vma, unsigned long start,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1333
1334
1335
  		unsigned long size, struct zap_details *details)
  {
  	struct mm_struct *mm = vma->vm_mm;
d16dfc550   Peter Zijlstra   mm: mmu_gather re...
1336
  	struct mmu_gather tlb;
7e027b14d   Linus Torvalds   vm: simplify unma...
1337
  	unsigned long end = start + size;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1338

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1339
  	lru_add_drain();
2b047252d   Linus Torvalds   Fix TLB gather vi...
1340
  	tlb_gather_mmu(&tlb, mm, start, end);
365e9c87a   Hugh Dickins   [PATCH] mm: updat...
1341
  	update_hiwater_rss(mm);
7e027b14d   Linus Torvalds   vm: simplify unma...
1342
1343
  	mmu_notifier_invalidate_range_start(mm, start, end);
  	for ( ; vma && vma->vm_start < end; vma = vma->vm_next)
4f74d2c8e   Linus Torvalds   vm: remove 'nr_ac...
1344
  		unmap_single_vma(&tlb, vma, start, end, details);
7e027b14d   Linus Torvalds   vm: simplify unma...
1345
1346
  	mmu_notifier_invalidate_range_end(mm, start, end);
  	tlb_finish_mmu(&tlb, start, end);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1347
  }
c627f9cc0   Jack Steiner   mm: add zap_vma_p...
1348
  /**
f5cc4eef9   Al Viro   VM: make zap_page...
1349
1350
1351
1352
   * zap_page_range_single - remove user pages in a given range
   * @vma: vm_area_struct holding the applicable pages
   * @address: starting address of pages to zap
   * @size: number of bytes to zap
8a5f14a23   Kirill A. Shutemov   mm: drop support ...
1353
   * @details: details of shared cache invalidation
f5cc4eef9   Al Viro   VM: make zap_page...
1354
1355
   *
   * The range must fit into one VMA.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1356
   */
f5cc4eef9   Al Viro   VM: make zap_page...
1357
  static void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1358
1359
1360
  		unsigned long size, struct zap_details *details)
  {
  	struct mm_struct *mm = vma->vm_mm;
d16dfc550   Peter Zijlstra   mm: mmu_gather re...
1361
  	struct mmu_gather tlb;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1362
  	unsigned long end = address + size;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1363

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1364
  	lru_add_drain();
2b047252d   Linus Torvalds   Fix TLB gather vi...
1365
  	tlb_gather_mmu(&tlb, mm, address, end);
365e9c87a   Hugh Dickins   [PATCH] mm: updat...
1366
  	update_hiwater_rss(mm);
f5cc4eef9   Al Viro   VM: make zap_page...
1367
  	mmu_notifier_invalidate_range_start(mm, address, end);
4f74d2c8e   Linus Torvalds   vm: remove 'nr_ac...
1368
  	unmap_single_vma(&tlb, vma, address, end, details);
f5cc4eef9   Al Viro   VM: make zap_page...
1369
  	mmu_notifier_invalidate_range_end(mm, address, end);
d16dfc550   Peter Zijlstra   mm: mmu_gather re...
1370
  	tlb_finish_mmu(&tlb, address, end);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1371
  }
c627f9cc0   Jack Steiner   mm: add zap_vma_p...
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
  /**
   * zap_vma_ptes - remove ptes mapping the vma
   * @vma: vm_area_struct holding ptes to be zapped
   * @address: starting address of pages to zap
   * @size: number of bytes to zap
   *
   * This function only unmaps ptes assigned to VM_PFNMAP vmas.
   *
   * The entire address range must be fully contained within the vma.
   *
   * Returns 0 if successful.
   */
  int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
  		unsigned long size)
  {
  	if (address < vma->vm_start || address + size > vma->vm_end ||
  	    		!(vma->vm_flags & VM_PFNMAP))
  		return -1;
f5cc4eef9   Al Viro   VM: make zap_page...
1390
  	zap_page_range_single(vma, address, size, NULL);
c627f9cc0   Jack Steiner   mm: add zap_vma_p...
1391
1392
1393
  	return 0;
  }
  EXPORT_SYMBOL_GPL(zap_vma_ptes);
25ca1d6c0   Namhyung Kim   mm: wrap get_lock...
1394
  pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
920c7a5d0   Harvey Harrison   mm: remove fastca...
1395
  			spinlock_t **ptl)
c9cfcddfd   Linus Torvalds   VM: add common he...
1396
1397
1398
1399
  {
  	pgd_t * pgd = pgd_offset(mm, addr);
  	pud_t * pud = pud_alloc(mm, pgd, addr);
  	if (pud) {
49c91fb01   Trond Myklebust   [PATCH] VM: Fix t...
1400
  		pmd_t * pmd = pmd_alloc(mm, pud, addr);
f66055ab6   Andrea Arcangeli   thp: verify pmd_t...
1401
1402
  		if (pmd) {
  			VM_BUG_ON(pmd_trans_huge(*pmd));
c9cfcddfd   Linus Torvalds   VM: add common he...
1403
  			return pte_alloc_map_lock(mm, pmd, addr, ptl);
f66055ab6   Andrea Arcangeli   thp: verify pmd_t...
1404
  		}
c9cfcddfd   Linus Torvalds   VM: add common he...
1405
1406
1407
  	}
  	return NULL;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1408
  /*
238f58d89   Linus Torvalds   Support strange d...
1409
1410
1411
1412
1413
1414
   * This is the old fallback for page remapping.
   *
   * For historical reasons, it only allows reserved pages. Only
   * old drivers should use this, and they needed to mark their
   * pages reserved for the old functions anyway.
   */
423bad600   Nick Piggin   mm: add vm_insert...
1415
1416
  static int insert_page(struct vm_area_struct *vma, unsigned long addr,
  			struct page *page, pgprot_t prot)
238f58d89   Linus Torvalds   Support strange d...
1417
  {
423bad600   Nick Piggin   mm: add vm_insert...
1418
  	struct mm_struct *mm = vma->vm_mm;
238f58d89   Linus Torvalds   Support strange d...
1419
  	int retval;
c9cfcddfd   Linus Torvalds   VM: add common he...
1420
  	pte_t *pte;
8a9f3ccd2   Balbir Singh   Memory controller...
1421
  	spinlock_t *ptl;
238f58d89   Linus Torvalds   Support strange d...
1422
  	retval = -EINVAL;
a145dd411   Linus Torvalds   VM: add "vm_inser...
1423
  	if (PageAnon(page))
5b4e655e9   KAMEZAWA Hiroyuki   memcg: avoid acco...
1424
  		goto out;
238f58d89   Linus Torvalds   Support strange d...
1425
1426
  	retval = -ENOMEM;
  	flush_dcache_page(page);
c9cfcddfd   Linus Torvalds   VM: add common he...
1427
  	pte = get_locked_pte(mm, addr, &ptl);
238f58d89   Linus Torvalds   Support strange d...
1428
  	if (!pte)
5b4e655e9   KAMEZAWA Hiroyuki   memcg: avoid acco...
1429
  		goto out;
238f58d89   Linus Torvalds   Support strange d...
1430
1431
1432
1433
1434
1435
  	retval = -EBUSY;
  	if (!pte_none(*pte))
  		goto out_unlock;
  
  	/* Ok, finally just insert the thing.. */
  	get_page(page);
eca56ff90   Jerome Marchand   mm, shmem: add in...
1436
  	inc_mm_counter_fast(mm, mm_counter_file(page));
dd78fedde   Kirill A. Shutemov   rmap: support fil...
1437
  	page_add_file_rmap(page, false);
238f58d89   Linus Torvalds   Support strange d...
1438
1439
1440
  	set_pte_at(mm, addr, pte, mk_pte(page, prot));
  
  	retval = 0;
8a9f3ccd2   Balbir Singh   Memory controller...
1441
1442
  	pte_unmap_unlock(pte, ptl);
  	return retval;
238f58d89   Linus Torvalds   Support strange d...
1443
1444
1445
1446
1447
  out_unlock:
  	pte_unmap_unlock(pte, ptl);
  out:
  	return retval;
  }
bfa5bf6d6   Rolf Eike Beer   [PATCH] Add kerne...
1448
1449
1450
1451
1452
1453
  /**
   * vm_insert_page - insert single page into user vma
   * @vma: user vma to map to
   * @addr: target user address of this page
   * @page: source kernel page
   *
a145dd411   Linus Torvalds   VM: add "vm_inser...
1454
1455
1456
1457
1458
1459
   * This allows drivers to insert individual pages they've allocated
   * into a user vma.
   *
   * The page has to be a nice clean _individual_ kernel allocation.
   * If you allocate a compound page, you need to have marked it as
   * such (__GFP_COMP), or manually just split the page up yourself
8dfcc9ba2   Nick Piggin   [PATCH] mm: split...
1460
   * (see split_page()).
a145dd411   Linus Torvalds   VM: add "vm_inser...
1461
1462
1463
1464
1465
1466
1467
1468
   *
   * NOTE! Traditionally this was done with "remap_pfn_range()" which
   * took an arbitrary page protection parameter. This doesn't allow
   * that. Your vma protection will have to be set up correctly, which
   * means that if you want a shared writable mapping, you'd better
   * ask for a shared writable mapping!
   *
   * The page does not need to be reserved.
4b6e1e370   Konstantin Khlebnikov   mm: kill vma flag...
1469
1470
1471
1472
1473
   *
   * Usually this function is called from f_op->mmap() handler
   * under mm->mmap_sem write-lock, so it can change vma->vm_flags.
   * Caller must set VM_MIXEDMAP on vma if it wants to call this
   * function from other places, for example from page-fault handler.
a145dd411   Linus Torvalds   VM: add "vm_inser...
1474
   */
423bad600   Nick Piggin   mm: add vm_insert...
1475
1476
  int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
  			struct page *page)
a145dd411   Linus Torvalds   VM: add "vm_inser...
1477
1478
1479
1480
1481
  {
  	if (addr < vma->vm_start || addr >= vma->vm_end)
  		return -EFAULT;
  	if (!page_count(page))
  		return -EINVAL;
4b6e1e370   Konstantin Khlebnikov   mm: kill vma flag...
1482
1483
1484
1485
1486
  	if (!(vma->vm_flags & VM_MIXEDMAP)) {
  		BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
  		BUG_ON(vma->vm_flags & VM_PFNMAP);
  		vma->vm_flags |= VM_MIXEDMAP;
  	}
423bad600   Nick Piggin   mm: add vm_insert...
1487
  	return insert_page(vma, addr, page, vma->vm_page_prot);
a145dd411   Linus Torvalds   VM: add "vm_inser...
1488
  }
e3c3374fb   Linus Torvalds   Make vm_insert_pa...
1489
  EXPORT_SYMBOL(vm_insert_page);
a145dd411   Linus Torvalds   VM: add "vm_inser...
1490

423bad600   Nick Piggin   mm: add vm_insert...
1491
  static int insert_pfn(struct vm_area_struct *vma, unsigned long addr,
01c8f1c44   Dan Williams   mm, dax, gpu: con...
1492
  			pfn_t pfn, pgprot_t prot)
423bad600   Nick Piggin   mm: add vm_insert...
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
  {
  	struct mm_struct *mm = vma->vm_mm;
  	int retval;
  	pte_t *pte, entry;
  	spinlock_t *ptl;
  
  	retval = -ENOMEM;
  	pte = get_locked_pte(mm, addr, &ptl);
  	if (!pte)
  		goto out;
  	retval = -EBUSY;
  	if (!pte_none(*pte))
  		goto out_unlock;
  
  	/* Ok, finally just insert the thing.. */
01c8f1c44   Dan Williams   mm, dax, gpu: con...
1508
1509
1510
1511
  	if (pfn_t_devmap(pfn))
  		entry = pte_mkdevmap(pfn_t_pte(pfn, prot));
  	else
  		entry = pte_mkspecial(pfn_t_pte(pfn, prot));
423bad600   Nick Piggin   mm: add vm_insert...
1512
  	set_pte_at(mm, addr, pte, entry);
4b3073e1c   Russell King   MM: Pass a PTE po...
1513
  	update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
423bad600   Nick Piggin   mm: add vm_insert...
1514
1515
1516
1517
1518
1519
1520
  
  	retval = 0;
  out_unlock:
  	pte_unmap_unlock(pte, ptl);
  out:
  	return retval;
  }
e0dc0d8f4   Nick Piggin   [PATCH] add vm_in...
1521
1522
1523
1524
1525
1526
  /**
   * vm_insert_pfn - insert single pfn into user vma
   * @vma: user vma to map to
   * @addr: target user address of this page
   * @pfn: source kernel pfn
   *
c462f179e   Robert P. J. Day   mm/memory.c: fix ...
1527
   * Similar to vm_insert_page, this allows drivers to insert individual pages
e0dc0d8f4   Nick Piggin   [PATCH] add vm_in...
1528
1529
1530
1531
   * they've allocated into a user vma. Same comments apply.
   *
   * This function should only be called from a vm_ops->fault handler, and
   * in that case the handler should return NULL.
0d71d10a4   Nick Piggin   mm: remove nopfn
1532
1533
1534
1535
1536
   *
   * vma cannot be a COW mapping.
   *
   * As this is called only for pages that do not currently exist, we
   * do not need to flush old virtual caches or the TLB.
e0dc0d8f4   Nick Piggin   [PATCH] add vm_in...
1537
1538
   */
  int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
423bad600   Nick Piggin   mm: add vm_insert...
1539
  			unsigned long pfn)
e0dc0d8f4   Nick Piggin   [PATCH] add vm_in...
1540
  {
1745cbc5d   Andy Lutomirski   mm: Add vm_insert...
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
  	return vm_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot);
  }
  EXPORT_SYMBOL(vm_insert_pfn);
  
  /**
   * vm_insert_pfn_prot - insert single pfn into user vma with specified pgprot
   * @vma: user vma to map to
   * @addr: target user address of this page
   * @pfn: source kernel pfn
   * @pgprot: pgprot flags for the inserted page
   *
   * This is exactly like vm_insert_pfn, except that it allows drivers to
   * to override pgprot on a per-page basis.
   *
   * This only makes sense for IO mappings, and it makes no sense for
   * cow mappings.  In general, using multiple vmas is preferable;
   * vm_insert_pfn_prot should only be used if using multiple VMAs is
   * impractical.
   */
  int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
  			unsigned long pfn, pgprot_t pgprot)
  {
2ab640379   venkatesh.pallipadi@intel.com   x86: PAT: hooks i...
1563
  	int ret;
7e675137a   Nick Piggin   mm: introduce pte...
1564
1565
1566
1567
1568
1569
  	/*
  	 * Technically, architectures with pte_special can avoid all these
  	 * restrictions (same for remap_pfn_range).  However we would like
  	 * consistency in testing and feature parity among all, so we should
  	 * try to keep these invariants in place for everybody.
  	 */
b379d7901   Jared Hulbert   mm: introduce VM_...
1570
1571
1572
1573
1574
  	BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
  	BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
  						(VM_PFNMAP|VM_MIXEDMAP));
  	BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
  	BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
e0dc0d8f4   Nick Piggin   [PATCH] add vm_in...
1575

423bad600   Nick Piggin   mm: add vm_insert...
1576
1577
  	if (addr < vma->vm_start || addr >= vma->vm_end)
  		return -EFAULT;
f25748e3c   Dan Williams   mm, dax: convert ...
1578
  	if (track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV)))
2ab640379   venkatesh.pallipadi@intel.com   x86: PAT: hooks i...
1579
  		return -EINVAL;
01c8f1c44   Dan Williams   mm, dax, gpu: con...
1580
  	ret = insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot);
2ab640379   venkatesh.pallipadi@intel.com   x86: PAT: hooks i...
1581

2ab640379   venkatesh.pallipadi@intel.com   x86: PAT: hooks i...
1582
  	return ret;
423bad600   Nick Piggin   mm: add vm_insert...
1583
  }
1745cbc5d   Andy Lutomirski   mm: Add vm_insert...
1584
  EXPORT_SYMBOL(vm_insert_pfn_prot);
e0dc0d8f4   Nick Piggin   [PATCH] add vm_in...
1585

423bad600   Nick Piggin   mm: add vm_insert...
1586
  int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
01c8f1c44   Dan Williams   mm, dax, gpu: con...
1587
  			pfn_t pfn)
423bad600   Nick Piggin   mm: add vm_insert...
1588
  {
87744ab38   Dan Williams   mm: fix cache mod...
1589
  	pgprot_t pgprot = vma->vm_page_prot;
423bad600   Nick Piggin   mm: add vm_insert...
1590
  	BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
e0dc0d8f4   Nick Piggin   [PATCH] add vm_in...
1591

423bad600   Nick Piggin   mm: add vm_insert...
1592
1593
  	if (addr < vma->vm_start || addr >= vma->vm_end)
  		return -EFAULT;
87744ab38   Dan Williams   mm: fix cache mod...
1594
1595
  	if (track_pfn_insert(vma, &pgprot, pfn))
  		return -EINVAL;
e0dc0d8f4   Nick Piggin   [PATCH] add vm_in...
1596

423bad600   Nick Piggin   mm: add vm_insert...
1597
1598
1599
1600
  	/*
  	 * If we don't have pte special, then we have to use the pfn_valid()
  	 * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
  	 * refcount the page if pfn_valid is true (hence insert_page rather
62eede62d   Hugh Dickins   mm: ZERO_PAGE wit...
1601
1602
  	 * than insert_pfn).  If a zero_pfn were inserted into a VM_MIXEDMAP
  	 * without pte special, it would there be refcounted as a normal page.
423bad600   Nick Piggin   mm: add vm_insert...
1603
  	 */
03fc2da63   Dan Williams   mm: fix pfn_t to ...
1604
  	if (!HAVE_PTE_SPECIAL && !pfn_t_devmap(pfn) && pfn_t_valid(pfn)) {
423bad600   Nick Piggin   mm: add vm_insert...
1605
  		struct page *page;
03fc2da63   Dan Williams   mm: fix pfn_t to ...
1606
1607
1608
1609
1610
1611
  		/*
  		 * At this point we are committed to insert_page()
  		 * regardless of whether the caller specified flags that
  		 * result in pfn_t_has_page() == false.
  		 */
  		page = pfn_to_page(pfn_t_to_pfn(pfn));
87744ab38   Dan Williams   mm: fix cache mod...
1612
  		return insert_page(vma, addr, page, pgprot);
423bad600   Nick Piggin   mm: add vm_insert...
1613
  	}
87744ab38   Dan Williams   mm: fix cache mod...
1614
  	return insert_pfn(vma, addr, pfn, pgprot);
e0dc0d8f4   Nick Piggin   [PATCH] add vm_in...
1615
  }
423bad600   Nick Piggin   mm: add vm_insert...
1616
  EXPORT_SYMBOL(vm_insert_mixed);
e0dc0d8f4   Nick Piggin   [PATCH] add vm_in...
1617

a145dd411   Linus Torvalds   VM: add "vm_inser...
1618
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1619
1620
1621
1622
1623
1624
1625
1626
1627
   * maps a range of physical memory into the requested pages. the old
   * mappings are removed. any references to nonexistent pages results
   * in null mappings (currently treated as "copy-on-access")
   */
  static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
  			unsigned long addr, unsigned long end,
  			unsigned long pfn, pgprot_t prot)
  {
  	pte_t *pte;
c74df32c7   Hugh Dickins   [PATCH] mm: ptd_a...
1628
  	spinlock_t *ptl;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1629

c74df32c7   Hugh Dickins   [PATCH] mm: ptd_a...
1630
  	pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1631
1632
  	if (!pte)
  		return -ENOMEM;
6606c3e0d   Zachary Amsden   [PATCH] paravirt:...
1633
  	arch_enter_lazy_mmu_mode();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1634
1635
  	do {
  		BUG_ON(!pte_none(*pte));
7e675137a   Nick Piggin   mm: introduce pte...
1636
  		set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1637
1638
  		pfn++;
  	} while (pte++, addr += PAGE_SIZE, addr != end);
6606c3e0d   Zachary Amsden   [PATCH] paravirt:...
1639
  	arch_leave_lazy_mmu_mode();
c74df32c7   Hugh Dickins   [PATCH] mm: ptd_a...
1640
  	pte_unmap_unlock(pte - 1, ptl);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
  	return 0;
  }
  
  static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
  			unsigned long addr, unsigned long end,
  			unsigned long pfn, pgprot_t prot)
  {
  	pmd_t *pmd;
  	unsigned long next;
  
  	pfn -= addr >> PAGE_SHIFT;
  	pmd = pmd_alloc(mm, pud, addr);
  	if (!pmd)
  		return -ENOMEM;
f66055ab6   Andrea Arcangeli   thp: verify pmd_t...
1655
  	VM_BUG_ON(pmd_trans_huge(*pmd));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
  	do {
  		next = pmd_addr_end(addr, end);
  		if (remap_pte_range(mm, pmd, addr, next,
  				pfn + (addr >> PAGE_SHIFT), prot))
  			return -ENOMEM;
  	} while (pmd++, addr = next, addr != end);
  	return 0;
  }
  
  static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd,
  			unsigned long addr, unsigned long end,
  			unsigned long pfn, pgprot_t prot)
  {
  	pud_t *pud;
  	unsigned long next;
  
  	pfn -= addr >> PAGE_SHIFT;
  	pud = pud_alloc(mm, pgd, addr);
  	if (!pud)
  		return -ENOMEM;
  	do {
  		next = pud_addr_end(addr, end);
  		if (remap_pmd_range(mm, pud, addr, next,
  				pfn + (addr >> PAGE_SHIFT), prot))
  			return -ENOMEM;
  	} while (pud++, addr = next, addr != end);
  	return 0;
  }
bfa5bf6d6   Rolf Eike Beer   [PATCH] Add kerne...
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
  /**
   * remap_pfn_range - remap kernel memory to userspace
   * @vma: user vma to map to
   * @addr: target user address to start at
   * @pfn: physical address of kernel memory
   * @size: size of map area
   * @prot: page protection flags for this mapping
   *
   *  Note: this is only safe if the mm semaphore is held when called.
   */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1694
1695
1696
1697
1698
  int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
  		    unsigned long pfn, unsigned long size, pgprot_t prot)
  {
  	pgd_t *pgd;
  	unsigned long next;
2d15cab85   Hugh Dickins   [PATCH] mm: fix r...
1699
  	unsigned long end = addr + PAGE_ALIGN(size);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1700
  	struct mm_struct *mm = vma->vm_mm;
d5957d2fc   Yongji Xie   mm: fix incorrect...
1701
  	unsigned long remap_pfn = pfn;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1702
1703
1704
1705
1706
1707
1708
  	int err;
  
  	/*
  	 * Physically remapped pages are special. Tell the
  	 * rest of the world about it:
  	 *   VM_IO tells people not to look at these pages
  	 *	(accesses can have side effects).
6aab341e0   Linus Torvalds   mm: re-architect ...
1709
1710
1711
  	 *   VM_PFNMAP tells the core MM that the base pages are just
  	 *	raw PFN mappings, and do not have a "struct page" associated
  	 *	with them.
314e51b98   Konstantin Khlebnikov   mm: kill vma flag...
1712
1713
1714
1715
  	 *   VM_DONTEXPAND
  	 *      Disable vma merging and expanding with mremap().
  	 *   VM_DONTDUMP
  	 *      Omit vma from core dump, even when VM_IO turned off.
fb155c161   Linus Torvalds   Allow arbitrary s...
1716
1717
1718
1719
  	 *
  	 * There's a horrible special case to handle copy-on-write
  	 * behaviour that some programs depend on. We mark the "original"
  	 * un-COW'ed pages by matching them up with "vma->vm_pgoff".
b3b9c2932   Konstantin Khlebnikov   mm, x86, pat: rew...
1720
  	 * See vm_normal_page() for details.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1721
  	 */
b3b9c2932   Konstantin Khlebnikov   mm, x86, pat: rew...
1722
1723
1724
  	if (is_cow_mapping(vma->vm_flags)) {
  		if (addr != vma->vm_start || end != vma->vm_end)
  			return -EINVAL;
fb155c161   Linus Torvalds   Allow arbitrary s...
1725
  		vma->vm_pgoff = pfn;
b3b9c2932   Konstantin Khlebnikov   mm, x86, pat: rew...
1726
  	}
d5957d2fc   Yongji Xie   mm: fix incorrect...
1727
  	err = track_pfn_remap(vma, &prot, remap_pfn, addr, PAGE_ALIGN(size));
b3b9c2932   Konstantin Khlebnikov   mm, x86, pat: rew...
1728
  	if (err)
3c8bb73ac   venkatesh.pallipadi@intel.com   x86: PAT: store v...
1729
  		return -EINVAL;
fb155c161   Linus Torvalds   Allow arbitrary s...
1730

314e51b98   Konstantin Khlebnikov   mm: kill vma flag...
1731
  	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1732
1733
1734
1735
1736
  
  	BUG_ON(addr >= end);
  	pfn -= addr >> PAGE_SHIFT;
  	pgd = pgd_offset(mm, addr);
  	flush_cache_range(vma, addr, end);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1737
1738
1739
1740
1741
1742
1743
  	do {
  		next = pgd_addr_end(addr, end);
  		err = remap_pud_range(mm, pgd, addr, next,
  				pfn + (addr >> PAGE_SHIFT), prot);
  		if (err)
  			break;
  	} while (pgd++, addr = next, addr != end);
2ab640379   venkatesh.pallipadi@intel.com   x86: PAT: hooks i...
1744
1745
  
  	if (err)
d5957d2fc   Yongji Xie   mm: fix incorrect...
1746
  		untrack_pfn(vma, remap_pfn, PAGE_ALIGN(size));
2ab640379   venkatesh.pallipadi@intel.com   x86: PAT: hooks i...
1747

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1748
1749
1750
  	return err;
  }
  EXPORT_SYMBOL(remap_pfn_range);
b4cbb197c   Linus Torvalds   vm: add vm_iomap_...
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
  /**
   * vm_iomap_memory - remap memory to userspace
   * @vma: user vma to map to
   * @start: start of area
   * @len: size of area
   *
   * This is a simplified io_remap_pfn_range() for common driver use. The
   * driver just needs to give us the physical memory range to be mapped,
   * we'll figure out the rest from the vma information.
   *
   * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get
   * whatever write-combining details or similar.
   */
  int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
  {
  	unsigned long vm_len, pfn, pages;
  
  	/* Check that the physical memory area passed in looks valid */
  	if (start + len < start)
  		return -EINVAL;
  	/*
  	 * You *really* shouldn't map things that aren't page-aligned,
  	 * but we've historically allowed it because IO memory might
  	 * just have smaller alignment.
  	 */
  	len += start & ~PAGE_MASK;
  	pfn = start >> PAGE_SHIFT;
  	pages = (len + ~PAGE_MASK) >> PAGE_SHIFT;
  	if (pfn + pages < pfn)
  		return -EINVAL;
  
  	/* We start the mapping 'vm_pgoff' pages into the area */
  	if (vma->vm_pgoff > pages)
  		return -EINVAL;
  	pfn += vma->vm_pgoff;
  	pages -= vma->vm_pgoff;
  
  	/* Can we fit all of the mapping? */
  	vm_len = vma->vm_end - vma->vm_start;
  	if (vm_len >> PAGE_SHIFT > pages)
  		return -EINVAL;
  
  	/* Ok, let it rip */
  	return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
  }
  EXPORT_SYMBOL(vm_iomap_memory);
aee16b3ce   Jeremy Fitzhardinge   Add apply_to_page...
1797
1798
1799
1800
1801
1802
  static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
  				     unsigned long addr, unsigned long end,
  				     pte_fn_t fn, void *data)
  {
  	pte_t *pte;
  	int err;
2f569afd9   Martin Schwidefsky   CONFIG_HIGHPTE vs...
1803
  	pgtable_t token;
949099148   Borislav Petkov   Add unitialized_v...
1804
  	spinlock_t *uninitialized_var(ptl);
aee16b3ce   Jeremy Fitzhardinge   Add apply_to_page...
1805
1806
1807
1808
1809
1810
1811
1812
  
  	pte = (mm == &init_mm) ?
  		pte_alloc_kernel(pmd, addr) :
  		pte_alloc_map_lock(mm, pmd, addr, &ptl);
  	if (!pte)
  		return -ENOMEM;
  
  	BUG_ON(pmd_huge(*pmd));
38e0edb15   Jeremy Fitzhardinge   mm/apply_to_range...
1813
  	arch_enter_lazy_mmu_mode();
2f569afd9   Martin Schwidefsky   CONFIG_HIGHPTE vs...
1814
  	token = pmd_pgtable(*pmd);
aee16b3ce   Jeremy Fitzhardinge   Add apply_to_page...
1815
1816
  
  	do {
c36987e2e   Daisuke Nishimura   mm: don't call pt...
1817
  		err = fn(pte++, token, addr, data);
aee16b3ce   Jeremy Fitzhardinge   Add apply_to_page...
1818
1819
  		if (err)
  			break;
c36987e2e   Daisuke Nishimura   mm: don't call pt...
1820
  	} while (addr += PAGE_SIZE, addr != end);
aee16b3ce   Jeremy Fitzhardinge   Add apply_to_page...
1821

38e0edb15   Jeremy Fitzhardinge   mm/apply_to_range...
1822
  	arch_leave_lazy_mmu_mode();
aee16b3ce   Jeremy Fitzhardinge   Add apply_to_page...
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
  	if (mm != &init_mm)
  		pte_unmap_unlock(pte-1, ptl);
  	return err;
  }
  
  static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
  				     unsigned long addr, unsigned long end,
  				     pte_fn_t fn, void *data)
  {
  	pmd_t *pmd;
  	unsigned long next;
  	int err;
ceb868796   Andi Kleen   hugetlb: introduc...
1835
  	BUG_ON(pud_huge(*pud));
aee16b3ce   Jeremy Fitzhardinge   Add apply_to_page...
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
  	pmd = pmd_alloc(mm, pud, addr);
  	if (!pmd)
  		return -ENOMEM;
  	do {
  		next = pmd_addr_end(addr, end);
  		err = apply_to_pte_range(mm, pmd, addr, next, fn, data);
  		if (err)
  			break;
  	} while (pmd++, addr = next, addr != end);
  	return err;
  }
  
  static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
  				     unsigned long addr, unsigned long end,
  				     pte_fn_t fn, void *data)
  {
  	pud_t *pud;
  	unsigned long next;
  	int err;
  
  	pud = pud_alloc(mm, pgd, addr);
  	if (!pud)
  		return -ENOMEM;
  	do {
  		next = pud_addr_end(addr, end);
  		err = apply_to_pmd_range(mm, pud, addr, next, fn, data);
  		if (err)
  			break;
  	} while (pud++, addr = next, addr != end);
  	return err;
  }
  
  /*
   * Scan a region of virtual memory, filling in page tables as necessary
   * and calling a provided function on each leaf page table.
   */
  int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
  			unsigned long size, pte_fn_t fn, void *data)
  {
  	pgd_t *pgd;
  	unsigned long next;
57250a5bf   Jeremy Fitzhardinge   mmu-notifiers: re...
1877
  	unsigned long end = addr + size;
aee16b3ce   Jeremy Fitzhardinge   Add apply_to_page...
1878
  	int err;
9cb65bc3b   Mika Penttilä   mm/memory.c: make...
1879
1880
  	if (WARN_ON(addr >= end))
  		return -EINVAL;
aee16b3ce   Jeremy Fitzhardinge   Add apply_to_page...
1881
1882
1883
1884
1885
1886
1887
  	pgd = pgd_offset(mm, addr);
  	do {
  		next = pgd_addr_end(addr, end);
  		err = apply_to_pud_range(mm, pgd, addr, next, fn, data);
  		if (err)
  			break;
  	} while (pgd++, addr = next, addr != end);
57250a5bf   Jeremy Fitzhardinge   mmu-notifiers: re...
1888

aee16b3ce   Jeremy Fitzhardinge   Add apply_to_page...
1889
1890
1891
  	return err;
  }
  EXPORT_SYMBOL_GPL(apply_to_page_range);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1892
  /*
9b4bdd2ff   Kirill A. Shutemov   mm: drop support ...
1893
1894
1895
1896
1897
   * handle_pte_fault chooses page fault handler according to an entry which was
   * read non-atomically.  Before making any commitment, on those architectures
   * or configurations (e.g. i386 with PAE) which might give a mix of unmatched
   * parts, do_swap_page must check under lock before unmapping the pte and
   * proceeding (but do_wp_page is only called after already making such a check;
a335b2e17   Ryota Ozaki   mm: Fix out-of-da...
1898
   * and do_anonymous_page can safely check later on).
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
1899
   */
4c21e2f24   Hugh Dickins   [PATCH] mm: split...
1900
  static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
1901
1902
1903
1904
1905
  				pte_t *page_table, pte_t orig_pte)
  {
  	int same = 1;
  #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
  	if (sizeof(pte_t) > sizeof(unsigned long)) {
4c21e2f24   Hugh Dickins   [PATCH] mm: split...
1906
1907
  		spinlock_t *ptl = pte_lockptr(mm, pmd);
  		spin_lock(ptl);
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
1908
  		same = pte_same(*page_table, orig_pte);
4c21e2f24   Hugh Dickins   [PATCH] mm: split...
1909
  		spin_unlock(ptl);
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
1910
1911
1912
1913
1914
  	}
  #endif
  	pte_unmap(page_table);
  	return same;
  }
9de455b20   Atsushi Nemoto   [PATCH] Pass vma ...
1915
  static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma)
6aab341e0   Linus Torvalds   mm: re-architect ...
1916
  {
0abdd7a81   Dan Williams   dma-debug: introd...
1917
  	debug_dma_assert_idle(src);
6aab341e0   Linus Torvalds   mm: re-architect ...
1918
1919
1920
1921
1922
1923
1924
  	/*
  	 * If the source page was a PFN mapping, we don't have
  	 * a "struct page" for it. We do a best-effort copy by
  	 * just copying from the original user address. If that
  	 * fails, we just zero-fill it. Live with it.
  	 */
  	if (unlikely(!src)) {
9b04c5fec   Cong Wang   mm: remove the se...
1925
  		void *kaddr = kmap_atomic(dst);
5d2a2dbbc   Linus Torvalds   cow_user_page: fi...
1926
1927
1928
1929
1930
1931
1932
1933
1934
  		void __user *uaddr = (void __user *)(va & PAGE_MASK);
  
  		/*
  		 * This really shouldn't fail, because the page is there
  		 * in the page tables. But it might just be unreadable,
  		 * in which case we just give up and fill the result with
  		 * zeroes.
  		 */
  		if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE))
3ecb01df3   Jan Beulich   use clear_page()/...
1935
  			clear_page(kaddr);
9b04c5fec   Cong Wang   mm: remove the se...
1936
  		kunmap_atomic(kaddr);
c4ec7b0de   Dmitriy Monakhov   [PATCH] mm: D-cac...
1937
  		flush_dcache_page(dst);
0ed361dec   Nick Piggin   mm: fix PageUptod...
1938
1939
  	} else
  		copy_user_highpage(dst, src, va, vma);
6aab341e0   Linus Torvalds   mm: re-architect ...
1940
  }
c20cd45eb   Michal Hocko   mm: allow GFP_{FS...
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
  static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma)
  {
  	struct file *vm_file = vma->vm_file;
  
  	if (vm_file)
  		return mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | __GFP_IO;
  
  	/*
  	 * Special mappings (e.g. VDSO) do not have any file so fake
  	 * a default GFP_KERNEL for them.
  	 */
  	return GFP_KERNEL;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1954
  /*
fb09a4642   Kirill A. Shutemov   mm: consolidate c...
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
   * Notify the address space that the page is about to become writable so that
   * it can prohibit this or wait for the page to get into an appropriate state.
   *
   * We do this without the lock held, so that it can sleep if it needs to.
   */
  static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page,
  	       unsigned long address)
  {
  	struct vm_fault vmf;
  	int ret;
  
  	vmf.virtual_address = (void __user *)(address & PAGE_MASK);
  	vmf.pgoff = page->index;
  	vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
c20cd45eb   Michal Hocko   mm: allow GFP_{FS...
1969
  	vmf.gfp_mask = __get_fault_gfp_mask(vma);
fb09a4642   Kirill A. Shutemov   mm: consolidate c...
1970
  	vmf.page = page;
2e4cdab05   Matthew Wilcox   mm: allow page fa...
1971
  	vmf.cow_page = NULL;
fb09a4642   Kirill A. Shutemov   mm: consolidate c...
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
  
  	ret = vma->vm_ops->page_mkwrite(vma, &vmf);
  	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
  		return ret;
  	if (unlikely(!(ret & VM_FAULT_LOCKED))) {
  		lock_page(page);
  		if (!page->mapping) {
  			unlock_page(page);
  			return 0; /* retry */
  		}
  		ret |= VM_FAULT_LOCKED;
  	} else
  		VM_BUG_ON_PAGE(!PageLocked(page), page);
  	return ret;
  }
  
  /*
4e047f897   Shachar Raindel   mm: refactor do_w...
1989
1990
1991
1992
1993
1994
1995
   * Handle write page faults for pages that can be reused in the current vma
   *
   * This can happen either due to the mapping being with the VM_SHARED flag,
   * or due to us being the last reference standing to the page. In either
   * case, all we need to do here is to mark the page as writable and update
   * any related book-keeping.
   */
bae473a42   Kirill A. Shutemov   mm: introduce fau...
1996
1997
1998
  static inline int wp_page_reuse(struct fault_env *fe, pte_t orig_pte,
  			struct page *page, int page_mkwrite, int dirty_shared)
  	__releases(fe->ptl)
4e047f897   Shachar Raindel   mm: refactor do_w...
1999
  {
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2000
  	struct vm_area_struct *vma = fe->vma;
4e047f897   Shachar Raindel   mm: refactor do_w...
2001
2002
2003
2004
2005
2006
2007
2008
  	pte_t entry;
  	/*
  	 * Clear the pages cpupid information as the existing
  	 * information potentially belongs to a now completely
  	 * unrelated process.
  	 */
  	if (page)
  		page_cpupid_xchg_last(page, (1 << LAST_CPUPID_SHIFT) - 1);
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2009
  	flush_cache_page(vma, fe->address, pte_pfn(orig_pte));
4e047f897   Shachar Raindel   mm: refactor do_w...
2010
2011
  	entry = pte_mkyoung(orig_pte);
  	entry = maybe_mkwrite(pte_mkdirty(entry), vma);
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2012
2013
2014
  	if (ptep_set_access_flags(vma, fe->address, fe->pte, entry, 1))
  		update_mmu_cache(vma, fe->address, fe->pte);
  	pte_unmap_unlock(fe->pte, fe->ptl);
4e047f897   Shachar Raindel   mm: refactor do_w...
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
  
  	if (dirty_shared) {
  		struct address_space *mapping;
  		int dirtied;
  
  		if (!page_mkwrite)
  			lock_page(page);
  
  		dirtied = set_page_dirty(page);
  		VM_BUG_ON_PAGE(PageAnon(page), page);
  		mapping = page->mapping;
  		unlock_page(page);
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
2027
  		put_page(page);
4e047f897   Shachar Raindel   mm: refactor do_w...
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
  
  		if ((dirtied || page_mkwrite) && mapping) {
  			/*
  			 * Some device drivers do not set page.mapping
  			 * but still dirty their pages
  			 */
  			balance_dirty_pages_ratelimited(mapping);
  		}
  
  		if (!page_mkwrite)
  			file_update_time(vma->vm_file);
  	}
  
  	return VM_FAULT_WRITE;
  }
  
  /*
2f38ab2c3   Shachar Raindel   mm: refactor do_w...
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
   * Handle the case of a page which we actually need to copy to a new page.
   *
   * Called with mmap_sem locked and the old page referenced, but
   * without the ptl held.
   *
   * High level logic flow:
   *
   * - Allocate a page, copy the content of the old page to the new one.
   * - Handle book keeping and accounting - cgroups, mmu-notifiers, etc.
   * - Take the PTL. If the pte changed, bail out and release the allocated page
   * - If the pte is still the way we remember it, update the page table and all
   *   relevant references. This includes dropping the reference the page-table
   *   held to the old page, as well as updating the rmap.
   * - In any case, unlock the PTL and drop the reference we took to the old page.
   */
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2060
2061
  static int wp_page_copy(struct fault_env *fe, pte_t orig_pte,
  		struct page *old_page)
2f38ab2c3   Shachar Raindel   mm: refactor do_w...
2062
  {
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2063
2064
  	struct vm_area_struct *vma = fe->vma;
  	struct mm_struct *mm = vma->vm_mm;
2f38ab2c3   Shachar Raindel   mm: refactor do_w...
2065
  	struct page *new_page = NULL;
2f38ab2c3   Shachar Raindel   mm: refactor do_w...
2066
2067
  	pte_t entry;
  	int page_copied = 0;
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2068
2069
  	const unsigned long mmun_start = fe->address & PAGE_MASK;
  	const unsigned long mmun_end = mmun_start + PAGE_SIZE;
2f38ab2c3   Shachar Raindel   mm: refactor do_w...
2070
2071
2072
2073
2074
2075
  	struct mem_cgroup *memcg;
  
  	if (unlikely(anon_vma_prepare(vma)))
  		goto oom;
  
  	if (is_zero_pfn(pte_pfn(orig_pte))) {
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2076
  		new_page = alloc_zeroed_user_highpage_movable(vma, fe->address);
2f38ab2c3   Shachar Raindel   mm: refactor do_w...
2077
2078
2079
  		if (!new_page)
  			goto oom;
  	} else {
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2080
2081
  		new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
  				fe->address);
2f38ab2c3   Shachar Raindel   mm: refactor do_w...
2082
2083
  		if (!new_page)
  			goto oom;
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2084
  		cow_user_page(new_page, old_page, fe->address, vma);
2f38ab2c3   Shachar Raindel   mm: refactor do_w...
2085
  	}
2f38ab2c3   Shachar Raindel   mm: refactor do_w...
2086

f627c2f53   Kirill A. Shutemov   memcg: adjust to ...
2087
  	if (mem_cgroup_try_charge(new_page, mm, GFP_KERNEL, &memcg, false))
2f38ab2c3   Shachar Raindel   mm: refactor do_w...
2088
  		goto oom_free_new;
eb3c24f30   Mel Gorman   mm, memcg: Try ch...
2089
  	__SetPageUptodate(new_page);
2f38ab2c3   Shachar Raindel   mm: refactor do_w...
2090
2091
2092
2093
2094
  	mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
  
  	/*
  	 * Re-check the pte - we dropped the lock
  	 */
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2095
2096
  	fe->pte = pte_offset_map_lock(mm, fe->pmd, fe->address, &fe->ptl);
  	if (likely(pte_same(*fe->pte, orig_pte))) {
2f38ab2c3   Shachar Raindel   mm: refactor do_w...
2097
2098
  		if (old_page) {
  			if (!PageAnon(old_page)) {
eca56ff90   Jerome Marchand   mm, shmem: add in...
2099
2100
  				dec_mm_counter_fast(mm,
  						mm_counter_file(old_page));
2f38ab2c3   Shachar Raindel   mm: refactor do_w...
2101
2102
2103
2104
2105
  				inc_mm_counter_fast(mm, MM_ANONPAGES);
  			}
  		} else {
  			inc_mm_counter_fast(mm, MM_ANONPAGES);
  		}
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2106
  		flush_cache_page(vma, fe->address, pte_pfn(orig_pte));
2f38ab2c3   Shachar Raindel   mm: refactor do_w...
2107
2108
2109
2110
2111
2112
2113
2114
  		entry = mk_pte(new_page, vma->vm_page_prot);
  		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
  		/*
  		 * Clear the pte entry and flush it first, before updating the
  		 * pte with the new entry. This will avoid a race condition
  		 * seen in the presence of one thread doing SMC and another
  		 * thread doing COW.
  		 */
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2115
2116
  		ptep_clear_flush_notify(vma, fe->address, fe->pte);
  		page_add_new_anon_rmap(new_page, vma, fe->address, false);
f627c2f53   Kirill A. Shutemov   memcg: adjust to ...
2117
  		mem_cgroup_commit_charge(new_page, memcg, false, false);
2f38ab2c3   Shachar Raindel   mm: refactor do_w...
2118
2119
2120
2121
2122
2123
  		lru_cache_add_active_or_unevictable(new_page, vma);
  		/*
  		 * We call the notify macro here because, when using secondary
  		 * mmu page tables (such as kvm shadow page tables), we want the
  		 * new page to be mapped directly into the secondary page table.
  		 */
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2124
2125
  		set_pte_at_notify(mm, fe->address, fe->pte, entry);
  		update_mmu_cache(vma, fe->address, fe->pte);
2f38ab2c3   Shachar Raindel   mm: refactor do_w...
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
  		if (old_page) {
  			/*
  			 * Only after switching the pte to the new page may
  			 * we remove the mapcount here. Otherwise another
  			 * process may come and find the rmap count decremented
  			 * before the pte is switched to the new page, and
  			 * "reuse" the old page writing into it while our pte
  			 * here still points into it and can be read by other
  			 * threads.
  			 *
  			 * The critical issue is to order this
  			 * page_remove_rmap with the ptp_clear_flush above.
  			 * Those stores are ordered by (if nothing else,)
  			 * the barrier present in the atomic_add_negative
  			 * in page_remove_rmap.
  			 *
  			 * Then the TLB flush in ptep_clear_flush ensures that
  			 * no process can access the old page before the
  			 * decremented mapcount is visible. And the old page
  			 * cannot be reused until after the decremented
  			 * mapcount is visible. So transitively, TLBs to
  			 * old page will be flushed before it can be reused.
  			 */
d281ee614   Kirill A. Shutemov   rmap: add argumen...
2149
  			page_remove_rmap(old_page, false);
2f38ab2c3   Shachar Raindel   mm: refactor do_w...
2150
2151
2152
2153
2154
2155
  		}
  
  		/* Free the old page.. */
  		new_page = old_page;
  		page_copied = 1;
  	} else {
f627c2f53   Kirill A. Shutemov   memcg: adjust to ...
2156
  		mem_cgroup_cancel_charge(new_page, memcg, false);
2f38ab2c3   Shachar Raindel   mm: refactor do_w...
2157
2158
2159
  	}
  
  	if (new_page)
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
2160
  		put_page(new_page);
2f38ab2c3   Shachar Raindel   mm: refactor do_w...
2161

bae473a42   Kirill A. Shutemov   mm: introduce fau...
2162
  	pte_unmap_unlock(fe->pte, fe->ptl);
2f38ab2c3   Shachar Raindel   mm: refactor do_w...
2163
2164
2165
2166
2167
2168
2169
2170
  	mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
  	if (old_page) {
  		/*
  		 * Don't let another task, with possibly unlocked vma,
  		 * keep the mlocked page.
  		 */
  		if (page_copied && (vma->vm_flags & VM_LOCKED)) {
  			lock_page(old_page);	/* LRU manipulation */
e90309c9f   Kirill A. Shutemov   thp: allow mlocke...
2171
2172
  			if (PageMlocked(old_page))
  				munlock_vma_page(old_page);
2f38ab2c3   Shachar Raindel   mm: refactor do_w...
2173
2174
  			unlock_page(old_page);
  		}
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
2175
  		put_page(old_page);
2f38ab2c3   Shachar Raindel   mm: refactor do_w...
2176
2177
2178
  	}
  	return page_copied ? VM_FAULT_WRITE : 0;
  oom_free_new:
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
2179
  	put_page(new_page);
2f38ab2c3   Shachar Raindel   mm: refactor do_w...
2180
2181
  oom:
  	if (old_page)
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
2182
  		put_page(old_page);
2f38ab2c3   Shachar Raindel   mm: refactor do_w...
2183
2184
  	return VM_FAULT_OOM;
  }
dd9061846   Boaz Harrosh   mm: new pfn_mkwri...
2185
2186
2187
2188
  /*
   * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED
   * mapping
   */
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2189
  static int wp_pfn_shared(struct fault_env *fe,  pte_t orig_pte)
dd9061846   Boaz Harrosh   mm: new pfn_mkwri...
2190
  {
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2191
  	struct vm_area_struct *vma = fe->vma;
dd9061846   Boaz Harrosh   mm: new pfn_mkwri...
2192
2193
2194
  	if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) {
  		struct vm_fault vmf = {
  			.page = NULL,
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2195
2196
2197
  			.pgoff = linear_page_index(vma, fe->address),
  			.virtual_address =
  				(void __user *)(fe->address & PAGE_MASK),
dd9061846   Boaz Harrosh   mm: new pfn_mkwri...
2198
2199
2200
  			.flags = FAULT_FLAG_WRITE | FAULT_FLAG_MKWRITE,
  		};
  		int ret;
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2201
  		pte_unmap_unlock(fe->pte, fe->ptl);
dd9061846   Boaz Harrosh   mm: new pfn_mkwri...
2202
2203
2204
  		ret = vma->vm_ops->pfn_mkwrite(vma, &vmf);
  		if (ret & VM_FAULT_ERROR)
  			return ret;
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2205
2206
  		fe->pte = pte_offset_map_lock(vma->vm_mm, fe->pmd, fe->address,
  				&fe->ptl);
dd9061846   Boaz Harrosh   mm: new pfn_mkwri...
2207
2208
2209
2210
  		/*
  		 * We might have raced with another page fault while we
  		 * released the pte_offset_map_lock.
  		 */
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2211
2212
  		if (!pte_same(*fe->pte, orig_pte)) {
  			pte_unmap_unlock(fe->pte, fe->ptl);
dd9061846   Boaz Harrosh   mm: new pfn_mkwri...
2213
2214
2215
  			return 0;
  		}
  	}
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2216
  	return wp_page_reuse(fe, orig_pte, NULL, 0, 0);
dd9061846   Boaz Harrosh   mm: new pfn_mkwri...
2217
  }
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2218
2219
2220
  static int wp_page_shared(struct fault_env *fe, pte_t orig_pte,
  		struct page *old_page)
  	__releases(fe->ptl)
93e478d4c   Shachar Raindel   mm: refactor do_w...
2221
  {
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2222
  	struct vm_area_struct *vma = fe->vma;
93e478d4c   Shachar Raindel   mm: refactor do_w...
2223
  	int page_mkwrite = 0;
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
2224
  	get_page(old_page);
93e478d4c   Shachar Raindel   mm: refactor do_w...
2225

93e478d4c   Shachar Raindel   mm: refactor do_w...
2226
2227
  	if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
  		int tmp;
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2228
2229
  		pte_unmap_unlock(fe->pte, fe->ptl);
  		tmp = do_page_mkwrite(vma, old_page, fe->address);
93e478d4c   Shachar Raindel   mm: refactor do_w...
2230
2231
  		if (unlikely(!tmp || (tmp &
  				      (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
2232
  			put_page(old_page);
93e478d4c   Shachar Raindel   mm: refactor do_w...
2233
2234
2235
2236
2237
2238
2239
2240
  			return tmp;
  		}
  		/*
  		 * Since we dropped the lock we need to revalidate
  		 * the PTE as someone else may have changed it.  If
  		 * they did, we just return, as we can count on the
  		 * MMU to tell us if they didn't also make it writable.
  		 */
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2241
2242
2243
  		fe->pte = pte_offset_map_lock(vma->vm_mm, fe->pmd, fe->address,
  						 &fe->ptl);
  		if (!pte_same(*fe->pte, orig_pte)) {
93e478d4c   Shachar Raindel   mm: refactor do_w...
2244
  			unlock_page(old_page);
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2245
  			pte_unmap_unlock(fe->pte, fe->ptl);
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
2246
  			put_page(old_page);
93e478d4c   Shachar Raindel   mm: refactor do_w...
2247
2248
2249
2250
  			return 0;
  		}
  		page_mkwrite = 1;
  	}
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2251
  	return wp_page_reuse(fe, orig_pte, old_page, page_mkwrite, 1);
93e478d4c   Shachar Raindel   mm: refactor do_w...
2252
  }
2f38ab2c3   Shachar Raindel   mm: refactor do_w...
2253
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2254
2255
2256
2257
   * This routine handles present pages, when users try to write
   * to a shared page. It is done by copying the page to a new address
   * and decrementing the shared-page counter for the old page.
   *
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2258
2259
2260
2261
2262
2263
2264
2265
2266
   * Note that this routine assumes that the protection checks have been
   * done by the caller (the low-level page fault routine in most cases).
   * Thus we can safely just mark it writable once we've done any necessary
   * COW.
   *
   * We also mark the page dirty at this point even though the page will
   * change only once the write actually happens. This avoids a few races,
   * and potentially makes it more efficient.
   *
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2267
2268
2269
   * We enter with non-exclusive mmap_sem (to exclude vma changes,
   * but allow concurrent faults), with pte both mapped and locked.
   * We return with mmap_sem still held, but pte unmapped and unlocked.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2270
   */
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2271
2272
  static int do_wp_page(struct fault_env *fe, pte_t orig_pte)
  	__releases(fe->ptl)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2273
  {
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2274
  	struct vm_area_struct *vma = fe->vma;
2f38ab2c3   Shachar Raindel   mm: refactor do_w...
2275
  	struct page *old_page;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2276

bae473a42   Kirill A. Shutemov   mm: introduce fau...
2277
  	old_page = vm_normal_page(vma, fe->address, orig_pte);
251b97f55   Peter Zijlstra   mm: dirty page ac...
2278
2279
  	if (!old_page) {
  		/*
64e455079   Peter Feiner   mm: softdirty: en...
2280
2281
  		 * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a
  		 * VM_PFNMAP VMA.
251b97f55   Peter Zijlstra   mm: dirty page ac...
2282
2283
  		 *
  		 * We should not cow pages in a shared writeable mapping.
dd9061846   Boaz Harrosh   mm: new pfn_mkwri...
2284
  		 * Just mark the pages writable and/or call ops->pfn_mkwrite.
251b97f55   Peter Zijlstra   mm: dirty page ac...
2285
2286
2287
  		 */
  		if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
  				     (VM_WRITE|VM_SHARED))
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2288
  			return wp_pfn_shared(fe, orig_pte);
2f38ab2c3   Shachar Raindel   mm: refactor do_w...
2289

bae473a42   Kirill A. Shutemov   mm: introduce fau...
2290
2291
  		pte_unmap_unlock(fe->pte, fe->ptl);
  		return wp_page_copy(fe, orig_pte, old_page);
251b97f55   Peter Zijlstra   mm: dirty page ac...
2292
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2293

d08b3851d   Peter Zijlstra   [PATCH] mm: track...
2294
  	/*
ee6a64578   Peter Zijlstra   [PATCH] mm: fixup...
2295
2296
  	 * Take out anonymous pages first, anonymous shared vmas are
  	 * not dirty accountable.
d08b3851d   Peter Zijlstra   [PATCH] mm: track...
2297
  	 */
9a8408951   Hugh Dickins   ksm: identify Pag...
2298
  	if (PageAnon(old_page) && !PageKsm(old_page)) {
6d0a07edd   Andrea Arcangeli   mm: thp: calculat...
2299
  		int total_mapcount;
ab967d860   Hugh Dickins   mm: wp lock page ...
2300
  		if (!trylock_page(old_page)) {
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
2301
  			get_page(old_page);
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2302
  			pte_unmap_unlock(fe->pte, fe->ptl);
ab967d860   Hugh Dickins   mm: wp lock page ...
2303
  			lock_page(old_page);
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2304
2305
2306
  			fe->pte = pte_offset_map_lock(vma->vm_mm, fe->pmd,
  					fe->address, &fe->ptl);
  			if (!pte_same(*fe->pte, orig_pte)) {
ab967d860   Hugh Dickins   mm: wp lock page ...
2307
  				unlock_page(old_page);
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2308
  				pte_unmap_unlock(fe->pte, fe->ptl);
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
2309
  				put_page(old_page);
287668052   Shachar Raindel   mm: refactor do_w...
2310
  				return 0;
ab967d860   Hugh Dickins   mm: wp lock page ...
2311
  			}
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
2312
  			put_page(old_page);
ee6a64578   Peter Zijlstra   [PATCH] mm: fixup...
2313
  		}
6d0a07edd   Andrea Arcangeli   mm: thp: calculat...
2314
2315
2316
2317
2318
2319
2320
2321
2322
  		if (reuse_swap_page(old_page, &total_mapcount)) {
  			if (total_mapcount == 1) {
  				/*
  				 * The page is all ours. Move it to
  				 * our anon_vma so the rmap code will
  				 * not search our parent or siblings.
  				 * Protected against the rmap code by
  				 * the page lock.
  				 */
5a49973d7   Hugh Dickins   mm: thp: refix fa...
2323
  				page_move_anon_rmap(old_page, vma);
6d0a07edd   Andrea Arcangeli   mm: thp: calculat...
2324
  			}
b009c024f   Michel Lespinasse   do_wp_page: remov...
2325
  			unlock_page(old_page);
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2326
  			return wp_page_reuse(fe, orig_pte, old_page, 0, 0);
b009c024f   Michel Lespinasse   do_wp_page: remov...
2327
  		}
ab967d860   Hugh Dickins   mm: wp lock page ...
2328
  		unlock_page(old_page);
ee6a64578   Peter Zijlstra   [PATCH] mm: fixup...
2329
  	} else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
d08b3851d   Peter Zijlstra   [PATCH] mm: track...
2330
  					(VM_WRITE|VM_SHARED))) {
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2331
  		return wp_page_shared(fe, orig_pte, old_page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2332
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2333
2334
2335
2336
  
  	/*
  	 * Ok, we need to copy. Oh, well..
  	 */
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
2337
  	get_page(old_page);
287668052   Shachar Raindel   mm: refactor do_w...
2338

bae473a42   Kirill A. Shutemov   mm: introduce fau...
2339
2340
  	pte_unmap_unlock(fe->pte, fe->ptl);
  	return wp_page_copy(fe, orig_pte, old_page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2341
  }
97a894136   Peter Zijlstra   mm: Remove i_mmap...
2342
  static void unmap_mapping_range_vma(struct vm_area_struct *vma,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2343
2344
2345
  		unsigned long start_addr, unsigned long end_addr,
  		struct zap_details *details)
  {
f5cc4eef9   Al Viro   VM: make zap_page...
2346
  	zap_page_range_single(vma, start_addr, end_addr - start_addr, details);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2347
  }
6b2dbba8b   Michel Lespinasse   mm: replace vma p...
2348
  static inline void unmap_mapping_range_tree(struct rb_root *root,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2349
2350
2351
  					    struct zap_details *details)
  {
  	struct vm_area_struct *vma;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2352
  	pgoff_t vba, vea, zba, zea;
6b2dbba8b   Michel Lespinasse   mm: replace vma p...
2353
  	vma_interval_tree_foreach(vma, root,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2354
  			details->first_index, details->last_index) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2355
2356
  
  		vba = vma->vm_pgoff;
d6e932177   Libin   mm: use vma_pages...
2357
  		vea = vba + vma_pages(vma) - 1;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2358
2359
2360
2361
2362
2363
  		zba = details->first_index;
  		if (zba < vba)
  			zba = vba;
  		zea = details->last_index;
  		if (zea > vea)
  			zea = vea;
97a894136   Peter Zijlstra   mm: Remove i_mmap...
2364
  		unmap_mapping_range_vma(vma,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2365
2366
  			((zba - vba) << PAGE_SHIFT) + vma->vm_start,
  			((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
97a894136   Peter Zijlstra   mm: Remove i_mmap...
2367
  				details);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2368
2369
  	}
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2370
  /**
8a5f14a23   Kirill A. Shutemov   mm: drop support ...
2371
2372
2373
2374
   * unmap_mapping_range - unmap the portion of all mmaps in the specified
   * address_space corresponding to the specified page range in the underlying
   * file.
   *
3d41088fa   Martin Waitz   [PATCH] DocBook: ...
2375
   * @mapping: the address space containing mmaps to be unmapped.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2376
2377
   * @holebegin: byte in first page to unmap, relative to the start of
   * the underlying file.  This will be rounded down to a PAGE_SIZE
25d9e2d15   npiggin@suse.de   truncate: new hel...
2378
   * boundary.  Note that this is different from truncate_pagecache(), which
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
   * must keep the partial page.  In contrast, we must get rid of
   * partial pages.
   * @holelen: size of prospective hole in bytes.  This will be rounded
   * up to a PAGE_SIZE boundary.  A holelen of zero truncates to the
   * end of the file.
   * @even_cows: 1 when truncating a file, unmap even private COWed pages;
   * but 0 when invalidating pagecache, don't throw away private data.
   */
  void unmap_mapping_range(struct address_space *mapping,
  		loff_t const holebegin, loff_t const holelen, int even_cows)
  {
aac453635   Michal Hocko   mm, oom: introduc...
2390
  	struct zap_details details = { };
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
  	pgoff_t hba = holebegin >> PAGE_SHIFT;
  	pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
  
  	/* Check for overflow. */
  	if (sizeof(holelen) > sizeof(hlen)) {
  		long long holeend =
  			(holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
  		if (holeend & ~(long long)ULONG_MAX)
  			hlen = ULONG_MAX - hba + 1;
  	}
  
  	details.check_mapping = even_cows? NULL: mapping;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2403
2404
2405
2406
  	details.first_index = hba;
  	details.last_index = hba + hlen - 1;
  	if (details.last_index < details.first_index)
  		details.last_index = ULONG_MAX;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2407

46c043ede   Kirill A. Shutemov   mm: take i_mmap_l...
2408
  	i_mmap_lock_write(mapping);
6b2dbba8b   Michel Lespinasse   mm: replace vma p...
2409
  	if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap)))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2410
  		unmap_mapping_range_tree(&mapping->i_mmap, &details);
46c043ede   Kirill A. Shutemov   mm: take i_mmap_l...
2411
  	i_mmap_unlock_write(mapping);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2412
2413
  }
  EXPORT_SYMBOL(unmap_mapping_range);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2414
  /*
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2415
2416
   * We enter with non-exclusive mmap_sem (to exclude vma changes,
   * but allow concurrent faults), and pte mapped but not yet locked.
9a95f3cf7   Paul Cassella   mm: describe mmap...
2417
2418
2419
2420
   * We return with pte unmapped and unlocked.
   *
   * We return with the mmap_sem locked or unlocked in the same cases
   * as does filemap_fault().
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2421
   */
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2422
  int do_swap_page(struct fault_env *fe, pte_t orig_pte)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2423
  {
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2424
  	struct vm_area_struct *vma = fe->vma;
56f31801c   Hugh Dickins   mm: cleanup "swap...
2425
  	struct page *page, *swapcache;
00501b531   Johannes Weiner   mm: memcontrol: r...
2426
  	struct mem_cgroup *memcg;
65500d234   Hugh Dickins   [PATCH] mm: page ...
2427
  	swp_entry_t entry;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2428
  	pte_t pte;
d065bd810   Michel Lespinasse   mm: retry page fa...
2429
  	int locked;
ad8c2ee80   Rik van Riel   rmap: add exclusi...
2430
  	int exclusive = 0;
83c54070e   Nick Piggin   mm: fault feedbac...
2431
  	int ret = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2432

bae473a42   Kirill A. Shutemov   mm: introduce fau...
2433
  	if (!pte_unmap_same(vma->vm_mm, fe->pmd, fe->pte, orig_pte))
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2434
  		goto out;
65500d234   Hugh Dickins   [PATCH] mm: page ...
2435
2436
  
  	entry = pte_to_swp_entry(orig_pte);
d1737fdbe   Andi Kleen   HWPOISON: Add bas...
2437
2438
  	if (unlikely(non_swap_entry(entry))) {
  		if (is_migration_entry(entry)) {
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2439
  			migration_entry_wait(vma->vm_mm, fe->pmd, fe->address);
d1737fdbe   Andi Kleen   HWPOISON: Add bas...
2440
2441
2442
  		} else if (is_hwpoison_entry(entry)) {
  			ret = VM_FAULT_HWPOISON;
  		} else {
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2443
  			print_bad_pte(vma, fe->address, orig_pte, NULL);
d99be1a8e   Hugh Dickins   mm: sigbus instea...
2444
  			ret = VM_FAULT_SIGBUS;
d1737fdbe   Andi Kleen   HWPOISON: Add bas...
2445
  		}
0697212a4   Christoph Lameter   [PATCH] Swapless ...
2446
2447
  		goto out;
  	}
0ff922452   Shailabh Nagar   [PATCH] per-task-...
2448
  	delayacct_set_flag(DELAYACCT_PF_SWAPIN);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2449
2450
  	page = lookup_swap_cache(entry);
  	if (!page) {
02098feaa   Hugh Dickins   swapin needs gfp_...
2451
  		page = swapin_readahead(entry,
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2452
  					GFP_HIGHUSER_MOVABLE, vma, fe->address);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2453
2454
  		if (!page) {
  			/*
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2455
2456
  			 * Back out if somebody else faulted in this pte
  			 * while we released the pte lock.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2457
  			 */
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2458
2459
2460
  			fe->pte = pte_offset_map_lock(vma->vm_mm, fe->pmd,
  					fe->address, &fe->ptl);
  			if (likely(pte_same(*fe->pte, orig_pte)))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2461
  				ret = VM_FAULT_OOM;
0ff922452   Shailabh Nagar   [PATCH] per-task-...
2462
  			delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
65500d234   Hugh Dickins   [PATCH] mm: page ...
2463
  			goto unlock;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2464
2465
2466
2467
  		}
  
  		/* Had to read the page from swap area: Major fault */
  		ret = VM_FAULT_MAJOR;
f8891e5e1   Christoph Lameter   [PATCH] Light wei...
2468
  		count_vm_event(PGMAJFAULT);
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2469
  		mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
d1737fdbe   Andi Kleen   HWPOISON: Add bas...
2470
  	} else if (PageHWPoison(page)) {
71f72525d   Wu Fengguang   HWPOISON: comment...
2471
2472
2473
2474
  		/*
  		 * hwpoisoned dirty swapcache pages are kept for killing
  		 * owner processes (which may be unknown at hwpoison time)
  		 */
d1737fdbe   Andi Kleen   HWPOISON: Add bas...
2475
2476
  		ret = VM_FAULT_HWPOISON;
  		delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
56f31801c   Hugh Dickins   mm: cleanup "swap...
2477
  		swapcache = page;
4779cb31c   Andi Kleen   HWPOISON: Fix pag...
2478
  		goto out_release;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2479
  	}
56f31801c   Hugh Dickins   mm: cleanup "swap...
2480
  	swapcache = page;
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2481
  	locked = lock_page_or_retry(page, vma->vm_mm, fe->flags);
e709ffd61   Rik van Riel   mm: remove swap t...
2482

073e587ec   KAMEZAWA Hiroyuki   memcg: move charg...
2483
  	delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
d065bd810   Michel Lespinasse   mm: retry page fa...
2484
2485
2486
2487
  	if (!locked) {
  		ret |= VM_FAULT_RETRY;
  		goto out_release;
  	}
073e587ec   KAMEZAWA Hiroyuki   memcg: move charg...
2488

4969c1192   Andrea Arcangeli   mm: fix swapin ra...
2489
  	/*
31c4a3d3a   Hugh Dickins   mm: further fix s...
2490
2491
2492
2493
  	 * Make sure try_to_free_swap or reuse_swap_page or swapoff did not
  	 * release the swapcache from under us.  The page pin, and pte_same
  	 * test below, are not enough to exclude that.  Even if it is still
  	 * swapcache, we need to check that the page's swap has not changed.
4969c1192   Andrea Arcangeli   mm: fix swapin ra...
2494
  	 */
31c4a3d3a   Hugh Dickins   mm: further fix s...
2495
  	if (unlikely(!PageSwapCache(page) || page_private(page) != entry.val))
4969c1192   Andrea Arcangeli   mm: fix swapin ra...
2496
  		goto out_page;
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2497
  	page = ksm_might_need_to_copy(page, vma, fe->address);
cbf86cfe0   Hugh Dickins   ksm: remove old s...
2498
2499
2500
  	if (unlikely(!page)) {
  		ret = VM_FAULT_OOM;
  		page = swapcache;
cbf86cfe0   Hugh Dickins   ksm: remove old s...
2501
  		goto out_page;
5ad646880   Hugh Dickins   ksm: let shared p...
2502
  	}
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2503
2504
  	if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL,
  				&memcg, false)) {
8a9f3ccd2   Balbir Singh   Memory controller...
2505
  		ret = VM_FAULT_OOM;
bc43f75cd   Johannes Weiner   mm: fix pageref l...
2506
  		goto out_page;
8a9f3ccd2   Balbir Singh   Memory controller...
2507
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2508
  	/*
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2509
  	 * Back out if somebody else already faulted in this pte.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2510
  	 */
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2511
2512
2513
  	fe->pte = pte_offset_map_lock(vma->vm_mm, fe->pmd, fe->address,
  			&fe->ptl);
  	if (unlikely(!pte_same(*fe->pte, orig_pte)))
b81074800   Kirill Korotaev   [PATCH] do_swap_p...
2514
  		goto out_nomap;
b81074800   Kirill Korotaev   [PATCH] do_swap_p...
2515
2516
2517
2518
  
  	if (unlikely(!PageUptodate(page))) {
  		ret = VM_FAULT_SIGBUS;
  		goto out_nomap;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2519
  	}
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
2520
2521
2522
2523
2524
2525
2526
2527
  	/*
  	 * The page isn't present yet, go ahead with the fault.
  	 *
  	 * Be careful about the sequence of operations here.
  	 * To get its accounting right, reuse_swap_page() must be called
  	 * while the page is counted on swap but not yet in mapcount i.e.
  	 * before page_add_anon_rmap() and swap_free(); try_to_free_swap()
  	 * must be called after the swap_free(), or it will never succeed.
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
2528
  	 */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2529

bae473a42   Kirill A. Shutemov   mm: introduce fau...
2530
2531
  	inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
  	dec_mm_counter_fast(vma->vm_mm, MM_SWAPENTS);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2532
  	pte = mk_pte(page, vma->vm_page_prot);
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2533
  	if ((fe->flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, NULL)) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2534
  		pte = maybe_mkwrite(pte_mkdirty(pte), vma);
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2535
  		fe->flags &= ~FAULT_FLAG_WRITE;
9a5b489b8   Andrea Arcangeli   mm: set VM_FAULT_...
2536
  		ret |= VM_FAULT_WRITE;
d281ee614   Kirill A. Shutemov   rmap: add argumen...
2537
  		exclusive = RMAP_EXCLUSIVE;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2538
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2539
  	flush_icache_page(vma, page);
179ef71cb   Cyrill Gorcunov   mm: save soft-dir...
2540
2541
  	if (pte_swp_soft_dirty(orig_pte))
  		pte = pte_mksoft_dirty(pte);
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2542
  	set_pte_at(vma->vm_mm, fe->address, fe->pte, pte);
00501b531   Johannes Weiner   mm: memcontrol: r...
2543
  	if (page == swapcache) {
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2544
  		do_page_add_anon_rmap(page, vma, fe->address, exclusive);
f627c2f53   Kirill A. Shutemov   memcg: adjust to ...
2545
  		mem_cgroup_commit_charge(page, memcg, true, false);
1a8018fb4   Minchan Kim   mm: move swap-in ...
2546
  		activate_page(page);
00501b531   Johannes Weiner   mm: memcontrol: r...
2547
  	} else { /* ksm created a completely new copy */
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2548
  		page_add_new_anon_rmap(page, vma, fe->address, false);
f627c2f53   Kirill A. Shutemov   memcg: adjust to ...
2549
  		mem_cgroup_commit_charge(page, memcg, false, false);
00501b531   Johannes Weiner   mm: memcontrol: r...
2550
2551
  		lru_cache_add_active_or_unevictable(page, vma);
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2552

c475a8ab6   Hugh Dickins   [PATCH] can_share...
2553
  	swap_free(entry);
5ccc5abaa   Vladimir Davydov   mm: free swap cac...
2554
2555
  	if (mem_cgroup_swap_full(page) ||
  	    (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
a2c43eed8   Hugh Dickins   mm: try_to_free_s...
2556
  		try_to_free_swap(page);
c475a8ab6   Hugh Dickins   [PATCH] can_share...
2557
  	unlock_page(page);
56f31801c   Hugh Dickins   mm: cleanup "swap...
2558
  	if (page != swapcache) {
4969c1192   Andrea Arcangeli   mm: fix swapin ra...
2559
2560
2561
2562
2563
2564
2565
2566
2567
  		/*
  		 * Hold the lock to avoid the swap entry to be reused
  		 * until we take the PT lock for the pte_same() check
  		 * (to avoid false positives from pte_same). For
  		 * further safety release the lock after the swap_free
  		 * so that the swap count won't change under a
  		 * parallel locked swapcache.
  		 */
  		unlock_page(swapcache);
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
2568
  		put_page(swapcache);
4969c1192   Andrea Arcangeli   mm: fix swapin ra...
2569
  	}
c475a8ab6   Hugh Dickins   [PATCH] can_share...
2570

bae473a42   Kirill A. Shutemov   mm: introduce fau...
2571
2572
  	if (fe->flags & FAULT_FLAG_WRITE) {
  		ret |= do_wp_page(fe, pte);
61469f1d5   Hugh Dickins   memcg: when do_sw...
2573
2574
  		if (ret & VM_FAULT_ERROR)
  			ret &= VM_FAULT_ERROR;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2575
2576
2577
2578
  		goto out;
  	}
  
  	/* No need to invalidate - it was non-present before */
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2579
  	update_mmu_cache(vma, fe->address, fe->pte);
65500d234   Hugh Dickins   [PATCH] mm: page ...
2580
  unlock:
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2581
  	pte_unmap_unlock(fe->pte, fe->ptl);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2582
2583
  out:
  	return ret;
b81074800   Kirill Korotaev   [PATCH] do_swap_p...
2584
  out_nomap:
f627c2f53   Kirill A. Shutemov   memcg: adjust to ...
2585
  	mem_cgroup_cancel_charge(page, memcg, false);
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2586
  	pte_unmap_unlock(fe->pte, fe->ptl);
bc43f75cd   Johannes Weiner   mm: fix pageref l...
2587
  out_page:
b81074800   Kirill Korotaev   [PATCH] do_swap_p...
2588
  	unlock_page(page);
4779cb31c   Andi Kleen   HWPOISON: Fix pag...
2589
  out_release:
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
2590
  	put_page(page);
56f31801c   Hugh Dickins   mm: cleanup "swap...
2591
  	if (page != swapcache) {
4969c1192   Andrea Arcangeli   mm: fix swapin ra...
2592
  		unlock_page(swapcache);
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
2593
  		put_page(swapcache);
4969c1192   Andrea Arcangeli   mm: fix swapin ra...
2594
  	}
65500d234   Hugh Dickins   [PATCH] mm: page ...
2595
  	return ret;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2596
2597
2598
  }
  
  /*
8ca3eb080   Tony Luck   guard page for st...
2599
2600
   * This is like a special single-page "expand_{down|up}wards()",
   * except we must first make sure that 'address{-|+}PAGE_SIZE'
320b2b8de   Linus Torvalds   mm: keep a guard ...
2601
   * doesn't hit another vma.
320b2b8de   Linus Torvalds   mm: keep a guard ...
2602
2603
2604
2605
2606
   */
  static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
  {
  	address &= PAGE_MASK;
  	if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
0e8e50e20   Linus Torvalds   mm: make stack gu...
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
  		struct vm_area_struct *prev = vma->vm_prev;
  
  		/*
  		 * Is there a mapping abutting this one below?
  		 *
  		 * That's only ok if it's the same stack mapping
  		 * that has gotten split..
  		 */
  		if (prev && prev->vm_end == address)
  			return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
320b2b8de   Linus Torvalds   mm: keep a guard ...
2617

fee7e49d4   Linus Torvalds   mm: propagate err...
2618
  		return expand_downwards(vma, address - PAGE_SIZE);
320b2b8de   Linus Torvalds   mm: keep a guard ...
2619
  	}
8ca3eb080   Tony Luck   guard page for st...
2620
2621
2622
2623
2624
2625
  	if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
  		struct vm_area_struct *next = vma->vm_next;
  
  		/* As VM_GROWSDOWN but s/below/above/ */
  		if (next && next->vm_start == address + PAGE_SIZE)
  			return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
fee7e49d4   Linus Torvalds   mm: propagate err...
2626
  		return expand_upwards(vma, address + PAGE_SIZE);
8ca3eb080   Tony Luck   guard page for st...
2627
  	}
320b2b8de   Linus Torvalds   mm: keep a guard ...
2628
2629
2630
2631
  	return 0;
  }
  
  /*
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2632
2633
2634
   * We enter with non-exclusive mmap_sem (to exclude vma changes,
   * but allow concurrent faults), and pte mapped but not yet locked.
   * We return with mmap_sem still held, but pte unmapped and unlocked.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2635
   */
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2636
  static int do_anonymous_page(struct fault_env *fe)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2637
  {
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2638
  	struct vm_area_struct *vma = fe->vma;
00501b531   Johannes Weiner   mm: memcontrol: r...
2639
  	struct mem_cgroup *memcg;
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2640
  	struct page *page;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2641
  	pte_t entry;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2642

6b7339f4c   Kirill A. Shutemov   mm: avoid setting...
2643
2644
2645
  	/* File mapping without ->vm_ops ? */
  	if (vma->vm_flags & VM_SHARED)
  		return VM_FAULT_SIGBUS;
11ac55247   Linus Torvalds   mm: fix page tabl...
2646
  	/* Check if we need to add a guard page to the stack */
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2647
  	if (check_stack_guard_page(vma, fe->address) < 0)
9c145c56d   Linus Torvalds   vm: make stack gu...
2648
  		return VM_FAULT_SIGSEGV;
320b2b8de   Linus Torvalds   mm: keep a guard ...
2649

7267ec008   Kirill A. Shutemov   mm: postpone page...
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
  	/*
  	 * Use pte_alloc() instead of pte_alloc_map().  We can't run
  	 * pte_offset_map() on pmds where a huge pmd might be created
  	 * from a different thread.
  	 *
  	 * pte_alloc_map() is safe to use under down_write(mmap_sem) or when
  	 * parallel threads are excluded by other means.
  	 *
  	 * Here we only have down_read(mmap_sem).
  	 */
  	if (pte_alloc(vma->vm_mm, fe->pmd, fe->address))
  		return VM_FAULT_OOM;
  
  	/* See the comment in pte_alloc_one_map() */
  	if (unlikely(pmd_trans_unstable(fe->pmd)))
  		return 0;
11ac55247   Linus Torvalds   mm: fix page tabl...
2666
  	/* Use the zero-page for reads */
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2667
2668
2669
  	if (!(fe->flags & FAULT_FLAG_WRITE) &&
  			!mm_forbids_zeropage(vma->vm_mm)) {
  		entry = pte_mkspecial(pfn_pte(my_zero_pfn(fe->address),
62eede62d   Hugh Dickins   mm: ZERO_PAGE wit...
2670
  						vma->vm_page_prot));
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2671
2672
2673
  		fe->pte = pte_offset_map_lock(vma->vm_mm, fe->pmd, fe->address,
  				&fe->ptl);
  		if (!pte_none(*fe->pte))
a13ea5b75   Hugh Dickins   mm: reinstate ZER...
2674
  			goto unlock;
6b251fc96   Andrea Arcangeli   userfaultfd: call...
2675
2676
  		/* Deliver the page fault to userland, check inside PT lock */
  		if (userfaultfd_missing(vma)) {
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2677
2678
  			pte_unmap_unlock(fe->pte, fe->ptl);
  			return handle_userfault(fe, VM_UFFD_MISSING);
6b251fc96   Andrea Arcangeli   userfaultfd: call...
2679
  		}
a13ea5b75   Hugh Dickins   mm: reinstate ZER...
2680
2681
  		goto setpte;
  	}
557ed1fa2   Nick Piggin   remove ZERO_PAGE
2682
  	/* Allocate our own private page. */
557ed1fa2   Nick Piggin   remove ZERO_PAGE
2683
2684
  	if (unlikely(anon_vma_prepare(vma)))
  		goto oom;
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2685
  	page = alloc_zeroed_user_highpage_movable(vma, fe->address);
557ed1fa2   Nick Piggin   remove ZERO_PAGE
2686
2687
  	if (!page)
  		goto oom;
eb3c24f30   Mel Gorman   mm, memcg: Try ch...
2688

bae473a42   Kirill A. Shutemov   mm: introduce fau...
2689
  	if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL, &memcg, false))
eb3c24f30   Mel Gorman   mm, memcg: Try ch...
2690
  		goto oom_free_page;
52f37629f   Minchan Kim   THP: fix comment ...
2691
2692
2693
2694
2695
  	/*
  	 * The memory barrier inside __SetPageUptodate makes sure that
  	 * preceeding stores to the page contents become visible before
  	 * the set_pte_at() write.
  	 */
0ed361dec   Nick Piggin   mm: fix PageUptod...
2696
  	__SetPageUptodate(page);
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2697

557ed1fa2   Nick Piggin   remove ZERO_PAGE
2698
  	entry = mk_pte(page, vma->vm_page_prot);
1ac0cb5d0   Hugh Dickins   mm: fix anonymous...
2699
2700
  	if (vma->vm_flags & VM_WRITE)
  		entry = pte_mkwrite(pte_mkdirty(entry));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2701

bae473a42   Kirill A. Shutemov   mm: introduce fau...
2702
2703
2704
  	fe->pte = pte_offset_map_lock(vma->vm_mm, fe->pmd, fe->address,
  			&fe->ptl);
  	if (!pte_none(*fe->pte))
557ed1fa2   Nick Piggin   remove ZERO_PAGE
2705
  		goto release;
9ba692948   Hugh Dickins   ksm: fix oom dead...
2706

6b251fc96   Andrea Arcangeli   userfaultfd: call...
2707
2708
  	/* Deliver the page fault to userland, check inside PT lock */
  	if (userfaultfd_missing(vma)) {
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2709
  		pte_unmap_unlock(fe->pte, fe->ptl);
f627c2f53   Kirill A. Shutemov   memcg: adjust to ...
2710
  		mem_cgroup_cancel_charge(page, memcg, false);
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
2711
  		put_page(page);
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2712
  		return handle_userfault(fe, VM_UFFD_MISSING);
6b251fc96   Andrea Arcangeli   userfaultfd: call...
2713
  	}
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2714
2715
  	inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
  	page_add_new_anon_rmap(page, vma, fe->address, false);
f627c2f53   Kirill A. Shutemov   memcg: adjust to ...
2716
  	mem_cgroup_commit_charge(page, memcg, false, false);
00501b531   Johannes Weiner   mm: memcontrol: r...
2717
  	lru_cache_add_active_or_unevictable(page, vma);
a13ea5b75   Hugh Dickins   mm: reinstate ZER...
2718
  setpte:
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2719
  	set_pte_at(vma->vm_mm, fe->address, fe->pte, entry);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2720
2721
  
  	/* No need to invalidate - it was non-present before */
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2722
  	update_mmu_cache(vma, fe->address, fe->pte);
65500d234   Hugh Dickins   [PATCH] mm: page ...
2723
  unlock:
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2724
  	pte_unmap_unlock(fe->pte, fe->ptl);
83c54070e   Nick Piggin   mm: fault feedbac...
2725
  	return 0;
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2726
  release:
f627c2f53   Kirill A. Shutemov   memcg: adjust to ...
2727
  	mem_cgroup_cancel_charge(page, memcg, false);
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
2728
  	put_page(page);
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
2729
  	goto unlock;
8a9f3ccd2   Balbir Singh   Memory controller...
2730
  oom_free_page:
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
2731
  	put_page(page);
65500d234   Hugh Dickins   [PATCH] mm: page ...
2732
  oom:
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2733
2734
  	return VM_FAULT_OOM;
  }
9a95f3cf7   Paul Cassella   mm: describe mmap...
2735
2736
2737
2738
2739
  /*
   * The mmap_sem must have been held on entry, and may have been
   * released depending on flags and vma->vm_ops->fault() return value.
   * See filemap_fault() and __lock_page_retry().
   */
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2740
2741
  static int __do_fault(struct fault_env *fe, pgoff_t pgoff,
  		struct page *cow_page, struct page **page, void **entry)
7eae74af3   Kirill A. Shutemov   mm: do_fault(): e...
2742
  {
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2743
  	struct vm_area_struct *vma = fe->vma;
7eae74af3   Kirill A. Shutemov   mm: do_fault(): e...
2744
2745
  	struct vm_fault vmf;
  	int ret;
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2746
  	vmf.virtual_address = (void __user *)(fe->address & PAGE_MASK);
7eae74af3   Kirill A. Shutemov   mm: do_fault(): e...
2747
  	vmf.pgoff = pgoff;
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2748
  	vmf.flags = fe->flags;
7eae74af3   Kirill A. Shutemov   mm: do_fault(): e...
2749
  	vmf.page = NULL;
c20cd45eb   Michal Hocko   mm: allow GFP_{FS...
2750
  	vmf.gfp_mask = __get_fault_gfp_mask(vma);
2e4cdab05   Matthew Wilcox   mm: allow page fa...
2751
  	vmf.cow_page = cow_page;
7eae74af3   Kirill A. Shutemov   mm: do_fault(): e...
2752
2753
2754
2755
  
  	ret = vma->vm_ops->fault(vma, &vmf);
  	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
  		return ret;
bc2466e42   Jan Kara   dax: Use radix tr...
2756
2757
2758
2759
  	if (ret & VM_FAULT_DAX_LOCKED) {
  		*entry = vmf.entry;
  		return ret;
  	}
7eae74af3   Kirill A. Shutemov   mm: do_fault(): e...
2760
2761
2762
2763
  
  	if (unlikely(PageHWPoison(vmf.page))) {
  		if (ret & VM_FAULT_LOCKED)
  			unlock_page(vmf.page);
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
2764
  		put_page(vmf.page);
7eae74af3   Kirill A. Shutemov   mm: do_fault(): e...
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
  		return VM_FAULT_HWPOISON;
  	}
  
  	if (unlikely(!(ret & VM_FAULT_LOCKED)))
  		lock_page(vmf.page);
  	else
  		VM_BUG_ON_PAGE(!PageLocked(vmf.page), vmf.page);
  
  	*page = vmf.page;
  	return ret;
  }
7267ec008   Kirill A. Shutemov   mm: postpone page...
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
  static int pte_alloc_one_map(struct fault_env *fe)
  {
  	struct vm_area_struct *vma = fe->vma;
  
  	if (!pmd_none(*fe->pmd))
  		goto map_pte;
  	if (fe->prealloc_pte) {
  		fe->ptl = pmd_lock(vma->vm_mm, fe->pmd);
  		if (unlikely(!pmd_none(*fe->pmd))) {
  			spin_unlock(fe->ptl);
  			goto map_pte;
  		}
  
  		atomic_long_inc(&vma->vm_mm->nr_ptes);
  		pmd_populate(vma->vm_mm, fe->pmd, fe->prealloc_pte);
  		spin_unlock(fe->ptl);
  		fe->prealloc_pte = 0;
  	} else if (unlikely(pte_alloc(vma->vm_mm, fe->pmd, fe->address))) {
  		return VM_FAULT_OOM;
  	}
  map_pte:
  	/*
  	 * If a huge pmd materialized under us just retry later.  Use
  	 * pmd_trans_unstable() instead of pmd_trans_huge() to ensure the pmd
  	 * didn't become pmd_trans_huge under us and then back to pmd_none, as
  	 * a result of MADV_DONTNEED running immediately after a huge pmd fault
  	 * in a different thread of this mm, in turn leading to a misleading
  	 * pmd_trans_huge() retval.  All we have to ensure is that it is a
  	 * regular pmd that we can walk with pte_offset_map() and we can do that
  	 * through an atomic read in C, which is what pmd_trans_unstable()
  	 * provides.
  	 */
  	if (pmd_trans_unstable(fe->pmd) || pmd_devmap(*fe->pmd))
  		return VM_FAULT_NOPAGE;
  
  	fe->pte = pte_offset_map_lock(vma->vm_mm, fe->pmd, fe->address,
  			&fe->ptl);
  	return 0;
  }
e496cf3d7   Kirill A. Shutemov   thp: introduce CO...
2815
  #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
101024596   Kirill A. Shutemov   mm: introduce do_...
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
  
  #define HPAGE_CACHE_INDEX_MASK (HPAGE_PMD_NR - 1)
  static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
  		unsigned long haddr)
  {
  	if (((vma->vm_start >> PAGE_SHIFT) & HPAGE_CACHE_INDEX_MASK) !=
  			(vma->vm_pgoff & HPAGE_CACHE_INDEX_MASK))
  		return false;
  	if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
  		return false;
  	return true;
  }
  
  static int do_set_pmd(struct fault_env *fe, struct page *page)
  {
  	struct vm_area_struct *vma = fe->vma;
  	bool write = fe->flags & FAULT_FLAG_WRITE;
  	unsigned long haddr = fe->address & HPAGE_PMD_MASK;
  	pmd_t entry;
  	int i, ret;
  
  	if (!transhuge_vma_suitable(vma, haddr))
  		return VM_FAULT_FALLBACK;
  
  	ret = VM_FAULT_FALLBACK;
  	page = compound_head(page);
  
  	fe->ptl = pmd_lock(vma->vm_mm, fe->pmd);
  	if (unlikely(!pmd_none(*fe->pmd)))
  		goto out;
  
  	for (i = 0; i < HPAGE_PMD_NR; i++)
  		flush_icache_page(vma, page + i);
  
  	entry = mk_huge_pmd(page, vma->vm_page_prot);
  	if (write)
  		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
  
  	add_mm_counter(vma->vm_mm, MM_FILEPAGES, HPAGE_PMD_NR);
  	page_add_file_rmap(page, true);
  
  	set_pmd_at(vma->vm_mm, haddr, fe->pmd, entry);
  
  	update_mmu_cache_pmd(vma, haddr, fe->pmd);
  
  	/* fault is handled */
  	ret = 0;
95ecedcd6   Kirill A. Shutemov   thp, vmstats: add...
2863
  	count_vm_event(THP_FILE_MAPPED);
101024596   Kirill A. Shutemov   mm: introduce do_...
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
  out:
  	spin_unlock(fe->ptl);
  	return ret;
  }
  #else
  static int do_set_pmd(struct fault_env *fe, struct page *page)
  {
  	BUILD_BUG();
  	return 0;
  }
  #endif
8c6e50b02   Kirill A. Shutemov   mm: introduce vm_...
2875
  /**
7267ec008   Kirill A. Shutemov   mm: postpone page...
2876
2877
   * alloc_set_pte - setup new PTE entry for given page and add reverse page
   * mapping. If needed, the fucntion allocates page table or use pre-allocated.
8c6e50b02   Kirill A. Shutemov   mm: introduce vm_...
2878
   *
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2879
   * @fe: fault environment
7267ec008   Kirill A. Shutemov   mm: postpone page...
2880
   * @memcg: memcg to charge page (only for private mappings)
8c6e50b02   Kirill A. Shutemov   mm: introduce vm_...
2881
   * @page: page to map
8c6e50b02   Kirill A. Shutemov   mm: introduce vm_...
2882
   *
7267ec008   Kirill A. Shutemov   mm: postpone page...
2883
   * Caller must take care of unlocking fe->ptl, if fe->pte is non-NULL on return.
8c6e50b02   Kirill A. Shutemov   mm: introduce vm_...
2884
2885
2886
2887
   *
   * Target users are page handler itself and implementations of
   * vm_ops->map_pages.
   */
7267ec008   Kirill A. Shutemov   mm: postpone page...
2888
2889
  int alloc_set_pte(struct fault_env *fe, struct mem_cgroup *memcg,
  		struct page *page)
3bb977946   Kirill A. Shutemov   mm: consolidate c...
2890
  {
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2891
2892
  	struct vm_area_struct *vma = fe->vma;
  	bool write = fe->flags & FAULT_FLAG_WRITE;
3bb977946   Kirill A. Shutemov   mm: consolidate c...
2893
  	pte_t entry;
101024596   Kirill A. Shutemov   mm: introduce do_...
2894
  	int ret;
e496cf3d7   Kirill A. Shutemov   thp: introduce CO...
2895
2896
  	if (pmd_none(*fe->pmd) && PageTransCompound(page) &&
  			IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) {
101024596   Kirill A. Shutemov   mm: introduce do_...
2897
2898
2899
2900
2901
2902
2903
  		/* THP on COW? */
  		VM_BUG_ON_PAGE(memcg, page);
  
  		ret = do_set_pmd(fe, page);
  		if (ret != VM_FAULT_FALLBACK)
  			return ret;
  	}
3bb977946   Kirill A. Shutemov   mm: consolidate c...
2904

7267ec008   Kirill A. Shutemov   mm: postpone page...
2905
  	if (!fe->pte) {
101024596   Kirill A. Shutemov   mm: introduce do_...
2906
  		ret = pte_alloc_one_map(fe);
7267ec008   Kirill A. Shutemov   mm: postpone page...
2907
2908
2909
2910
2911
2912
2913
  		if (ret)
  			return ret;
  	}
  
  	/* Re-check under ptl */
  	if (unlikely(!pte_none(*fe->pte)))
  		return VM_FAULT_NOPAGE;
3bb977946   Kirill A. Shutemov   mm: consolidate c...
2914
2915
2916
2917
  	flush_icache_page(vma, page);
  	entry = mk_pte(page, vma->vm_page_prot);
  	if (write)
  		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2918
2919
  	/* copy-on-write page */
  	if (write && !(vma->vm_flags & VM_SHARED)) {
3bb977946   Kirill A. Shutemov   mm: consolidate c...
2920
  		inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2921
  		page_add_new_anon_rmap(page, vma, fe->address, false);
7267ec008   Kirill A. Shutemov   mm: postpone page...
2922
2923
  		mem_cgroup_commit_charge(page, memcg, false, false);
  		lru_cache_add_active_or_unevictable(page, vma);
3bb977946   Kirill A. Shutemov   mm: consolidate c...
2924
  	} else {
eca56ff90   Jerome Marchand   mm, shmem: add in...
2925
  		inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page));
dd78fedde   Kirill A. Shutemov   rmap: support fil...
2926
  		page_add_file_rmap(page, false);
3bb977946   Kirill A. Shutemov   mm: consolidate c...
2927
  	}
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2928
  	set_pte_at(vma->vm_mm, fe->address, fe->pte, entry);
3bb977946   Kirill A. Shutemov   mm: consolidate c...
2929
2930
  
  	/* no need to invalidate: a not-present page won't be cached */
bae473a42   Kirill A. Shutemov   mm: introduce fau...
2931
  	update_mmu_cache(vma, fe->address, fe->pte);
7267ec008   Kirill A. Shutemov   mm: postpone page...
2932
2933
  
  	return 0;
3bb977946   Kirill A. Shutemov   mm: consolidate c...
2934
  }
3a91053ae   Kirill A. Shutemov   mm: mark fault_ar...
2935
2936
  static unsigned long fault_around_bytes __read_mostly =
  	rounddown_pow_of_two(65536);
a9b0f8618   Kirill A. Shutemov   mm: nominate faul...
2937

a9b0f8618   Kirill A. Shutemov   mm: nominate faul...
2938
2939
  #ifdef CONFIG_DEBUG_FS
  static int fault_around_bytes_get(void *data, u64 *val)
1592eef01   Kirill A. Shutemov   mm: add debugfs t...
2940
  {
a9b0f8618   Kirill A. Shutemov   mm: nominate faul...
2941
  	*val = fault_around_bytes;
1592eef01   Kirill A. Shutemov   mm: add debugfs t...
2942
2943
  	return 0;
  }
b4903d6e8   Andrey Ryabinin   mm: debugfs: move...
2944
2945
2946
2947
2948
  /*
   * fault_around_pages() and fault_around_mask() expects fault_around_bytes
   * rounded down to nearest page order. It's what do_fault_around() expects to
   * see.
   */
a9b0f8618   Kirill A. Shutemov   mm: nominate faul...
2949
  static int fault_around_bytes_set(void *data, u64 val)
1592eef01   Kirill A. Shutemov   mm: add debugfs t...
2950
  {
a9b0f8618   Kirill A. Shutemov   mm: nominate faul...
2951
  	if (val / PAGE_SIZE > PTRS_PER_PTE)
1592eef01   Kirill A. Shutemov   mm: add debugfs t...
2952
  		return -EINVAL;
b4903d6e8   Andrey Ryabinin   mm: debugfs: move...
2953
2954
2955
2956
  	if (val > PAGE_SIZE)
  		fault_around_bytes = rounddown_pow_of_two(val);
  	else
  		fault_around_bytes = PAGE_SIZE; /* rounddown_pow_of_two(0) is undefined */
1592eef01   Kirill A. Shutemov   mm: add debugfs t...
2957
2958
  	return 0;
  }
a9b0f8618   Kirill A. Shutemov   mm: nominate faul...
2959
2960
2961
  DEFINE_SIMPLE_ATTRIBUTE(fault_around_bytes_fops,
  		fault_around_bytes_get, fault_around_bytes_set, "%llu
  ");
1592eef01   Kirill A. Shutemov   mm: add debugfs t...
2962
2963
2964
2965
  
  static int __init fault_around_debugfs(void)
  {
  	void *ret;
a9b0f8618   Kirill A. Shutemov   mm: nominate faul...
2966
2967
  	ret = debugfs_create_file("fault_around_bytes", 0644, NULL, NULL,
  			&fault_around_bytes_fops);
1592eef01   Kirill A. Shutemov   mm: add debugfs t...
2968
  	if (!ret)
a9b0f8618   Kirill A. Shutemov   mm: nominate faul...
2969
  		pr_warn("Failed to create fault_around_bytes in debugfs");
1592eef01   Kirill A. Shutemov   mm: add debugfs t...
2970
2971
2972
  	return 0;
  }
  late_initcall(fault_around_debugfs);
1592eef01   Kirill A. Shutemov   mm: add debugfs t...
2973
  #endif
8c6e50b02   Kirill A. Shutemov   mm: introduce vm_...
2974

1fdb412bd   Kirill A. Shutemov   mm: document do_f...
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
  /*
   * do_fault_around() tries to map few pages around the fault address. The hope
   * is that the pages will be needed soon and this will lower the number of
   * faults to handle.
   *
   * It uses vm_ops->map_pages() to map the pages, which skips the page if it's
   * not ready to be mapped: not up-to-date, locked, etc.
   *
   * This function is called with the page table lock taken. In the split ptlock
   * case the page table lock only protects only those entries which belong to
   * the page table corresponding to the fault address.
   *
   * This function doesn't cross the VMA boundaries, in order to call map_pages()
   * only once.
   *
   * fault_around_pages() defines how many pages we'll try to map.
   * do_fault_around() expects it to return a power of two less than or equal to
   * PTRS_PER_PTE.
   *
   * The virtual address of the area that we map is naturally aligned to the
   * fault_around_pages() value (and therefore to page order).  This way it's
   * easier to guarantee that we don't cross page table boundaries.
   */
7267ec008   Kirill A. Shutemov   mm: postpone page...
2998
  static int do_fault_around(struct fault_env *fe, pgoff_t start_pgoff)
8c6e50b02   Kirill A. Shutemov   mm: introduce vm_...
2999
  {
7267ec008   Kirill A. Shutemov   mm: postpone page...
3000
  	unsigned long address = fe->address, nr_pages, mask;
bae473a42   Kirill A. Shutemov   mm: introduce fau...
3001
  	pgoff_t end_pgoff;
7267ec008   Kirill A. Shutemov   mm: postpone page...
3002
  	int off, ret = 0;
8c6e50b02   Kirill A. Shutemov   mm: introduce vm_...
3003

4db0c3c29   Jason Low   mm: remove rest o...
3004
  	nr_pages = READ_ONCE(fault_around_bytes) >> PAGE_SHIFT;
aecd6f442   Kirill A. Shutemov   mm: close race be...
3005
  	mask = ~(nr_pages * PAGE_SIZE - 1) & PAGE_MASK;
7267ec008   Kirill A. Shutemov   mm: postpone page...
3006
3007
  	fe->address = max(address & mask, fe->vma->vm_start);
  	off = ((address - fe->address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
bae473a42   Kirill A. Shutemov   mm: introduce fau...
3008
  	start_pgoff -= off;
8c6e50b02   Kirill A. Shutemov   mm: introduce vm_...
3009
3010
  
  	/*
bae473a42   Kirill A. Shutemov   mm: introduce fau...
3011
3012
  	 *  end_pgoff is either end of page table or end of vma
  	 *  or fault_around_pages() from start_pgoff, depending what is nearest.
8c6e50b02   Kirill A. Shutemov   mm: introduce vm_...
3013
  	 */
bae473a42   Kirill A. Shutemov   mm: introduce fau...
3014
  	end_pgoff = start_pgoff -
7267ec008   Kirill A. Shutemov   mm: postpone page...
3015
  		((fe->address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +
8c6e50b02   Kirill A. Shutemov   mm: introduce vm_...
3016
  		PTRS_PER_PTE - 1;
bae473a42   Kirill A. Shutemov   mm: introduce fau...
3017
3018
  	end_pgoff = min3(end_pgoff, vma_pages(fe->vma) + fe->vma->vm_pgoff - 1,
  			start_pgoff + nr_pages - 1);
8c6e50b02   Kirill A. Shutemov   mm: introduce vm_...
3019

7267ec008   Kirill A. Shutemov   mm: postpone page...
3020
3021
  	if (pmd_none(*fe->pmd)) {
  		fe->prealloc_pte = pte_alloc_one(fe->vma->vm_mm, fe->address);
c5f88bd29   Vegard Nossum   mm: fail prefault...
3022
3023
  		if (!fe->prealloc_pte)
  			goto out;
7267ec008   Kirill A. Shutemov   mm: postpone page...
3024
  		smp_wmb(); /* See comment in __pte_alloc() */
8c6e50b02   Kirill A. Shutemov   mm: introduce vm_...
3025
  	}
bae473a42   Kirill A. Shutemov   mm: introduce fau...
3026
  	fe->vma->vm_ops->map_pages(fe, start_pgoff, end_pgoff);
7267ec008   Kirill A. Shutemov   mm: postpone page...
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047
  
  	/* preallocated pagetable is unused: free it */
  	if (fe->prealloc_pte) {
  		pte_free(fe->vma->vm_mm, fe->prealloc_pte);
  		fe->prealloc_pte = 0;
  	}
  	/* Huge page is mapped? Page fault is solved */
  	if (pmd_trans_huge(*fe->pmd)) {
  		ret = VM_FAULT_NOPAGE;
  		goto out;
  	}
  
  	/* ->map_pages() haven't done anything useful. Cold page cache? */
  	if (!fe->pte)
  		goto out;
  
  	/* check if the page fault is solved */
  	fe->pte -= (fe->address >> PAGE_SHIFT) - (address >> PAGE_SHIFT);
  	if (!pte_none(*fe->pte))
  		ret = VM_FAULT_NOPAGE;
  	pte_unmap_unlock(fe->pte, fe->ptl);
bae473a42   Kirill A. Shutemov   mm: introduce fau...
3048
  out:
bae473a42   Kirill A. Shutemov   mm: introduce fau...
3049
  	fe->address = address;
7267ec008   Kirill A. Shutemov   mm: postpone page...
3050
3051
  	fe->pte = NULL;
  	return ret;
8c6e50b02   Kirill A. Shutemov   mm: introduce vm_...
3052
  }
7267ec008   Kirill A. Shutemov   mm: postpone page...
3053
  static int do_read_fault(struct fault_env *fe, pgoff_t pgoff)
e655fb290   Kirill A. Shutemov   mm: introduce do_...
3054
  {
bae473a42   Kirill A. Shutemov   mm: introduce fau...
3055
  	struct vm_area_struct *vma = fe->vma;
e655fb290   Kirill A. Shutemov   mm: introduce do_...
3056
  	struct page *fault_page;
8c6e50b02   Kirill A. Shutemov   mm: introduce vm_...
3057
3058
3059
3060
3061
3062
3063
  	int ret = 0;
  
  	/*
  	 * Let's call ->map_pages() first and use ->fault() as fallback
  	 * if page by the offset is not ready to be mapped (cold cache or
  	 * something).
  	 */
9b4bdd2ff   Kirill A. Shutemov   mm: drop support ...
3064
  	if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) {
7267ec008   Kirill A. Shutemov   mm: postpone page...
3065
3066
3067
  		ret = do_fault_around(fe, pgoff);
  		if (ret)
  			return ret;
8c6e50b02   Kirill A. Shutemov   mm: introduce vm_...
3068
  	}
e655fb290   Kirill A. Shutemov   mm: introduce do_...
3069

bae473a42   Kirill A. Shutemov   mm: introduce fau...
3070
  	ret = __do_fault(fe, pgoff, NULL, &fault_page, NULL);
e655fb290   Kirill A. Shutemov   mm: introduce do_...
3071
3072
  	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
  		return ret;
7267ec008   Kirill A. Shutemov   mm: postpone page...
3073
3074
  	ret |= alloc_set_pte(fe, NULL, fault_page);
  	if (fe->pte)
bae473a42   Kirill A. Shutemov   mm: introduce fau...
3075
  		pte_unmap_unlock(fe->pte, fe->ptl);
e655fb290   Kirill A. Shutemov   mm: introduce do_...
3076
  	unlock_page(fault_page);
7267ec008   Kirill A. Shutemov   mm: postpone page...
3077
3078
  	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
  		put_page(fault_page);
e655fb290   Kirill A. Shutemov   mm: introduce do_...
3079
3080
  	return ret;
  }
7267ec008   Kirill A. Shutemov   mm: postpone page...
3081
  static int do_cow_fault(struct fault_env *fe, pgoff_t pgoff)
ec47c3b95   Kirill A. Shutemov   mm: introduce do_...
3082
  {
bae473a42   Kirill A. Shutemov   mm: introduce fau...
3083
  	struct vm_area_struct *vma = fe->vma;
ec47c3b95   Kirill A. Shutemov   mm: introduce do_...
3084
  	struct page *fault_page, *new_page;
bc2466e42   Jan Kara   dax: Use radix tr...
3085
  	void *fault_entry;
00501b531   Johannes Weiner   mm: memcontrol: r...
3086
  	struct mem_cgroup *memcg;
ec47c3b95   Kirill A. Shutemov   mm: introduce do_...
3087
3088
3089
3090
  	int ret;
  
  	if (unlikely(anon_vma_prepare(vma)))
  		return VM_FAULT_OOM;
bae473a42   Kirill A. Shutemov   mm: introduce fau...
3091
  	new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, fe->address);
ec47c3b95   Kirill A. Shutemov   mm: introduce do_...
3092
3093
  	if (!new_page)
  		return VM_FAULT_OOM;
bae473a42   Kirill A. Shutemov   mm: introduce fau...
3094
3095
  	if (mem_cgroup_try_charge(new_page, vma->vm_mm, GFP_KERNEL,
  				&memcg, false)) {
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
3096
  		put_page(new_page);
ec47c3b95   Kirill A. Shutemov   mm: introduce do_...
3097
3098
  		return VM_FAULT_OOM;
  	}
bae473a42   Kirill A. Shutemov   mm: introduce fau...
3099
  	ret = __do_fault(fe, pgoff, new_page, &fault_page, &fault_entry);
ec47c3b95   Kirill A. Shutemov   mm: introduce do_...
3100
3101
  	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
  		goto uncharge_out;
bc2466e42   Jan Kara   dax: Use radix tr...
3102
  	if (!(ret & VM_FAULT_DAX_LOCKED))
bae473a42   Kirill A. Shutemov   mm: introduce fau...
3103
  		copy_user_highpage(new_page, fault_page, fe->address, vma);
ec47c3b95   Kirill A. Shutemov   mm: introduce do_...
3104
  	__SetPageUptodate(new_page);
7267ec008   Kirill A. Shutemov   mm: postpone page...
3105
3106
  	ret |= alloc_set_pte(fe, memcg, new_page);
  	if (fe->pte)
bae473a42   Kirill A. Shutemov   mm: introduce fau...
3107
  		pte_unmap_unlock(fe->pte, fe->ptl);
bc2466e42   Jan Kara   dax: Use radix tr...
3108
  	if (!(ret & VM_FAULT_DAX_LOCKED)) {
2e4cdab05   Matthew Wilcox   mm: allow page fa...
3109
  		unlock_page(fault_page);
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
3110
  		put_page(fault_page);
2e4cdab05   Matthew Wilcox   mm: allow page fa...
3111
  	} else {
bc2466e42   Jan Kara   dax: Use radix tr...
3112
  		dax_unlock_mapping_entry(vma->vm_file->f_mapping, pgoff);
2e4cdab05   Matthew Wilcox   mm: allow page fa...
3113
  	}
7267ec008   Kirill A. Shutemov   mm: postpone page...
3114
3115
  	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
  		goto uncharge_out;
ec47c3b95   Kirill A. Shutemov   mm: introduce do_...
3116
3117
  	return ret;
  uncharge_out:
f627c2f53   Kirill A. Shutemov   memcg: adjust to ...
3118
  	mem_cgroup_cancel_charge(new_page, memcg, false);
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
3119
  	put_page(new_page);
ec47c3b95   Kirill A. Shutemov   mm: introduce do_...
3120
3121
  	return ret;
  }
7267ec008   Kirill A. Shutemov   mm: postpone page...
3122
  static int do_shared_fault(struct fault_env *fe, pgoff_t pgoff)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3123
  {
bae473a42   Kirill A. Shutemov   mm: introduce fau...
3124
  	struct vm_area_struct *vma = fe->vma;
f0c6d4d29   Kirill A. Shutemov   mm: introduce do_...
3125
3126
  	struct page *fault_page;
  	struct address_space *mapping;
f0c6d4d29   Kirill A. Shutemov   mm: introduce do_...
3127
  	int dirtied = 0;
f0c6d4d29   Kirill A. Shutemov   mm: introduce do_...
3128
  	int ret, tmp;
1d65f86db   KAMEZAWA Hiroyuki   mm: preallocate p...
3129

bae473a42   Kirill A. Shutemov   mm: introduce fau...
3130
  	ret = __do_fault(fe, pgoff, NULL, &fault_page, NULL);
7eae74af3   Kirill A. Shutemov   mm: do_fault(): e...
3131
  	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
f0c6d4d29   Kirill A. Shutemov   mm: introduce do_...
3132
  		return ret;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3133
3134
  
  	/*
f0c6d4d29   Kirill A. Shutemov   mm: introduce do_...
3135
3136
  	 * Check if the backing address space wants to know that the page is
  	 * about to become writable
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3137
  	 */
fb09a4642   Kirill A. Shutemov   mm: consolidate c...
3138
3139
  	if (vma->vm_ops->page_mkwrite) {
  		unlock_page(fault_page);
bae473a42   Kirill A. Shutemov   mm: introduce fau...
3140
  		tmp = do_page_mkwrite(vma, fault_page, fe->address);
fb09a4642   Kirill A. Shutemov   mm: consolidate c...
3141
3142
  		if (unlikely(!tmp ||
  				(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
3143
  			put_page(fault_page);
fb09a4642   Kirill A. Shutemov   mm: consolidate c...
3144
  			return tmp;
4294621f4   Hugh Dickins   [PATCH] mm: rss =...
3145
  		}
fb09a4642   Kirill A. Shutemov   mm: consolidate c...
3146
  	}
7267ec008   Kirill A. Shutemov   mm: postpone page...
3147
3148
  	ret |= alloc_set_pte(fe, NULL, fault_page);
  	if (fe->pte)
bae473a42   Kirill A. Shutemov   mm: introduce fau...
3149
  		pte_unmap_unlock(fe->pte, fe->ptl);
7267ec008   Kirill A. Shutemov   mm: postpone page...
3150
3151
  	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
  					VM_FAULT_RETRY))) {
f0c6d4d29   Kirill A. Shutemov   mm: introduce do_...
3152
  		unlock_page(fault_page);
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
3153
  		put_page(fault_page);
f0c6d4d29   Kirill A. Shutemov   mm: introduce do_...
3154
  		return ret;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3155
  	}
b827e496c   Nick Piggin   mm: close page_mk...
3156

f0c6d4d29   Kirill A. Shutemov   mm: introduce do_...
3157
3158
  	if (set_page_dirty(fault_page))
  		dirtied = 1;
d82fa87d2   Andrew Morton   mm/memory.c:do_sh...
3159
3160
3161
3162
3163
3164
  	/*
  	 * Take a local copy of the address_space - page.mapping may be zeroed
  	 * by truncate after unlock_page().   The address_space itself remains
  	 * pinned by vma->vm_file's reference.  We rely on unlock_page()'s
  	 * release semantics to prevent the compiler from undoing this copying.
  	 */
1c290f642   Kirill A. Shutemov   mm: sanitize page...
3165
  	mapping = page_rmapping(fault_page);
f0c6d4d29   Kirill A. Shutemov   mm: introduce do_...
3166
3167
3168
3169
3170
3171
3172
  	unlock_page(fault_page);
  	if ((dirtied || vma->vm_ops->page_mkwrite) && mapping) {
  		/*
  		 * Some device drivers do not set page.mapping but still
  		 * dirty their pages
  		 */
  		balance_dirty_pages_ratelimited(mapping);
d08b3851d   Peter Zijlstra   [PATCH] mm: track...
3173
  	}
d00806b18   Nick Piggin   mm: fix fault vs ...
3174

74ec67511   Johannes Weiner   mm: memory: remov...
3175
  	if (!vma->vm_ops->page_mkwrite)
f0c6d4d29   Kirill A. Shutemov   mm: introduce do_...
3176
  		file_update_time(vma->vm_file);
b827e496c   Nick Piggin   mm: close page_mk...
3177

1d65f86db   KAMEZAWA Hiroyuki   mm: preallocate p...
3178
  	return ret;
54cb8821d   Nick Piggin   mm: merge populat...
3179
  }
d00806b18   Nick Piggin   mm: fix fault vs ...
3180

9a95f3cf7   Paul Cassella   mm: describe mmap...
3181
3182
3183
3184
3185
3186
  /*
   * We enter with non-exclusive mmap_sem (to exclude vma changes,
   * but allow concurrent faults).
   * The mmap_sem may have been released depending on flags and our
   * return value.  See filemap_fault() and __lock_page_or_retry().
   */
7267ec008   Kirill A. Shutemov   mm: postpone page...
3187
  static int do_fault(struct fault_env *fe)
54cb8821d   Nick Piggin   mm: merge populat...
3188
  {
bae473a42   Kirill A. Shutemov   mm: introduce fau...
3189
3190
  	struct vm_area_struct *vma = fe->vma;
  	pgoff_t pgoff = linear_page_index(vma, fe->address);
54cb8821d   Nick Piggin   mm: merge populat...
3191

6b7339f4c   Kirill A. Shutemov   mm: avoid setting...
3192
3193
3194
  	/* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */
  	if (!vma->vm_ops->fault)
  		return VM_FAULT_SIGBUS;
bae473a42   Kirill A. Shutemov   mm: introduce fau...
3195
  	if (!(fe->flags & FAULT_FLAG_WRITE))
7267ec008   Kirill A. Shutemov   mm: postpone page...
3196
  		return do_read_fault(fe, pgoff);
ec47c3b95   Kirill A. Shutemov   mm: introduce do_...
3197
  	if (!(vma->vm_flags & VM_SHARED))
7267ec008   Kirill A. Shutemov   mm: postpone page...
3198
3199
  		return do_cow_fault(fe, pgoff);
  	return do_shared_fault(fe, pgoff);
54cb8821d   Nick Piggin   mm: merge populat...
3200
  }
b19a99392   Rashika Kheria   mm/memory.c: mark...
3201
  static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
04bb2f947   Rik van Riel   sched/numa: Adjus...
3202
3203
  				unsigned long addr, int page_nid,
  				int *flags)
9532fec11   Mel Gorman   mm: numa: Migrate...
3204
3205
3206
3207
  {
  	get_page(page);
  
  	count_vm_numa_event(NUMA_HINT_FAULTS);
04bb2f947   Rik van Riel   sched/numa: Adjus...
3208
  	if (page_nid == numa_node_id()) {
9532fec11   Mel Gorman   mm: numa: Migrate...
3209
  		count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
04bb2f947   Rik van Riel   sched/numa: Adjus...
3210
3211
  		*flags |= TNF_FAULT_LOCAL;
  	}
9532fec11   Mel Gorman   mm: numa: Migrate...
3212
3213
3214
  
  	return mpol_misplaced(page, vma, addr);
  }
bae473a42   Kirill A. Shutemov   mm: introduce fau...
3215
  static int do_numa_page(struct fault_env *fe, pte_t pte)
d10e63f29   Mel Gorman   mm: numa: Create ...
3216
  {
bae473a42   Kirill A. Shutemov   mm: introduce fau...
3217
  	struct vm_area_struct *vma = fe->vma;
4daae3b4b   Mel Gorman   mm: mempolicy: Us...
3218
  	struct page *page = NULL;
8191acbd3   Mel Gorman   mm: numa: Sanitiz...
3219
  	int page_nid = -1;
90572890d   Peter Zijlstra   mm: numa: Change ...
3220
  	int last_cpupid;
cbee9f88e   Peter Zijlstra   mm: numa: Add fau...
3221
  	int target_nid;
b8593bfda   Mel Gorman   mm: sched: Adapt ...
3222
  	bool migrated = false;
b191f9b10   Mel Gorman   mm: numa: preserv...
3223
  	bool was_writable = pte_write(pte);
6688cc054   Peter Zijlstra   mm: numa: Do not ...
3224
  	int flags = 0;
d10e63f29   Mel Gorman   mm: numa: Create ...
3225
3226
3227
3228
3229
3230
  
  	/*
  	* The "pte" at this point cannot be used safely without
  	* validation through pte_unmap_same(). It's of NUMA type but
  	* the pfn may be screwed if the read is non atomic.
  	*
4d9424669   Mel Gorman   mm: convert p[te|...
3231
3232
3233
  	* We can safely just do a "set_pte_at()", because the old
  	* page table entry is not accessible, so there would be no
  	* concurrent hardware modifications to the PTE.
d10e63f29   Mel Gorman   mm: numa: Create ...
3234
  	*/
bae473a42   Kirill A. Shutemov   mm: introduce fau...
3235
3236
3237
3238
  	fe->ptl = pte_lockptr(vma->vm_mm, fe->pmd);
  	spin_lock(fe->ptl);
  	if (unlikely(!pte_same(*fe->pte, pte))) {
  		pte_unmap_unlock(fe->pte, fe->ptl);
4daae3b4b   Mel Gorman   mm: mempolicy: Us...
3239
3240
  		goto out;
  	}
4d9424669   Mel Gorman   mm: convert p[te|...
3241
3242
3243
  	/* Make it present again */
  	pte = pte_modify(pte, vma->vm_page_prot);
  	pte = pte_mkyoung(pte);
b191f9b10   Mel Gorman   mm: numa: preserv...
3244
3245
  	if (was_writable)
  		pte = pte_mkwrite(pte);
bae473a42   Kirill A. Shutemov   mm: introduce fau...
3246
3247
  	set_pte_at(vma->vm_mm, fe->address, fe->pte, pte);
  	update_mmu_cache(vma, fe->address, fe->pte);
d10e63f29   Mel Gorman   mm: numa: Create ...
3248

bae473a42   Kirill A. Shutemov   mm: introduce fau...
3249
  	page = vm_normal_page(vma, fe->address, pte);
d10e63f29   Mel Gorman   mm: numa: Create ...
3250
  	if (!page) {
bae473a42   Kirill A. Shutemov   mm: introduce fau...
3251
  		pte_unmap_unlock(fe->pte, fe->ptl);
d10e63f29   Mel Gorman   mm: numa: Create ...
3252
3253
  		return 0;
  	}
e81c48024   Kirill A. Shutemov   mm, numa: skip PT...
3254
3255
  	/* TODO: handle PTE-mapped THP */
  	if (PageCompound(page)) {
bae473a42   Kirill A. Shutemov   mm: introduce fau...
3256
  		pte_unmap_unlock(fe->pte, fe->ptl);
e81c48024   Kirill A. Shutemov   mm, numa: skip PT...
3257
3258
  		return 0;
  	}
6688cc054   Peter Zijlstra   mm: numa: Do not ...
3259
  	/*
bea66fbd1   Mel Gorman   mm: numa: group r...
3260
3261
3262
3263
3264
3265
  	 * Avoid grouping on RO pages in general. RO pages shouldn't hurt as
  	 * much anyway since they can be in shared cache state. This misses
  	 * the case where a mapping is writable but the process never writes
  	 * to it but pte_write gets cleared during protection updates and
  	 * pte_dirty has unpredictable behaviour between PTE scan updates,
  	 * background writeback, dirty balancing and application behaviour.
6688cc054   Peter Zijlstra   mm: numa: Do not ...
3266
  	 */
d59dc7bcf   Rik van Riel   sched/numa, mm: R...
3267
  	if (!pte_write(pte))
6688cc054   Peter Zijlstra   mm: numa: Do not ...
3268
  		flags |= TNF_NO_GROUP;
dabe1d992   Rik van Riel   sched/numa: Be mo...
3269
3270
3271
3272
3273
3274
  	/*
  	 * Flag if the page is shared between multiple address spaces. This
  	 * is later used when determining whether to group tasks together
  	 */
  	if (page_mapcount(page) > 1 && (vma->vm_flags & VM_SHARED))
  		flags |= TNF_SHARED;
90572890d   Peter Zijlstra   mm: numa: Change ...
3275
  	last_cpupid = page_cpupid_last(page);
8191acbd3   Mel Gorman   mm: numa: Sanitiz...
3276
  	page_nid = page_to_nid(page);
bae473a42   Kirill A. Shutemov   mm: introduce fau...
3277
3278
3279
  	target_nid = numa_migrate_prep(page, vma, fe->address, page_nid,
  			&flags);
  	pte_unmap_unlock(fe->pte, fe->ptl);
4daae3b4b   Mel Gorman   mm: mempolicy: Us...
3280
  	if (target_nid == -1) {
4daae3b4b   Mel Gorman   mm: mempolicy: Us...
3281
3282
3283
3284
3285
  		put_page(page);
  		goto out;
  	}
  
  	/* Migrate to the requested node */
1bc115d87   Mel Gorman   mm: numa: Scan pa...
3286
  	migrated = migrate_misplaced_page(page, vma, target_nid);
6688cc054   Peter Zijlstra   mm: numa: Do not ...
3287
  	if (migrated) {
8191acbd3   Mel Gorman   mm: numa: Sanitiz...
3288
  		page_nid = target_nid;
6688cc054   Peter Zijlstra   mm: numa: Do not ...
3289
  		flags |= TNF_MIGRATED;
074c23817   Mel Gorman   mm: numa: slow PT...
3290
3291
  	} else
  		flags |= TNF_MIGRATE_FAIL;
4daae3b4b   Mel Gorman   mm: mempolicy: Us...
3292
3293
  
  out:
8191acbd3   Mel Gorman   mm: numa: Sanitiz...
3294
  	if (page_nid != -1)
6688cc054   Peter Zijlstra   mm: numa: Do not ...
3295
  		task_numa_fault(last_cpupid, page_nid, 1, flags);
d10e63f29   Mel Gorman   mm: numa: Create ...
3296
3297
  	return 0;
  }
bae473a42   Kirill A. Shutemov   mm: introduce fau...
3298
  static int create_huge_pmd(struct fault_env *fe)
b96375f74   Matthew Wilcox   mm: add a pmd_fau...
3299
  {
bae473a42   Kirill A. Shutemov   mm: introduce fau...
3300
  	struct vm_area_struct *vma = fe->vma;
fb6dd5fa4   Kirill A. Shutemov   mm: use vma_is_an...
3301
  	if (vma_is_anonymous(vma))
bae473a42   Kirill A. Shutemov   mm: introduce fau...
3302
  		return do_huge_pmd_anonymous_page(fe);
b96375f74   Matthew Wilcox   mm: add a pmd_fau...
3303
  	if (vma->vm_ops->pmd_fault)
bae473a42   Kirill A. Shutemov   mm: introduce fau...
3304
3305
  		return vma->vm_ops->pmd_fault(vma, fe->address, fe->pmd,
  				fe->flags);
b96375f74   Matthew Wilcox   mm: add a pmd_fau...
3306
3307
  	return VM_FAULT_FALLBACK;
  }
bae473a42   Kirill A. Shutemov   mm: introduce fau...
3308
  static int wp_huge_pmd(struct fault_env *fe, pmd_t orig_pmd)
b96375f74   Matthew Wilcox   mm: add a pmd_fau...
3309
  {
bae473a42   Kirill A. Shutemov   mm: introduce fau...
3310
3311
3312
3313
3314
  	if (vma_is_anonymous(fe->vma))
  		return do_huge_pmd_wp_page(fe, orig_pmd);
  	if (fe->vma->vm_ops->pmd_fault)
  		return fe->vma->vm_ops->pmd_fault(fe->vma, fe->address, fe->pmd,
  				fe->flags);
af9e4d5f2   Kirill A. Shutemov   thp: handle file ...
3315
3316
3317
3318
  
  	/* COW handled on pte level: split pmd */
  	VM_BUG_ON_VMA(fe->vma->vm_flags & VM_SHARED, fe->vma);
  	split_huge_pmd(fe->vma, fe->pmd, fe->address);
b96375f74   Matthew Wilcox   mm: add a pmd_fau...
3319
3320
  	return VM_FAULT_FALLBACK;
  }
38e088546   Lorenzo Stoakes   mm: check VMA fla...
3321
3322
3323
3324
  static inline bool vma_is_accessible(struct vm_area_struct *vma)
  {
  	return vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE);
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3325
3326
3327
3328
3329
3330
3331
3332
3333
  /*
   * These routines also need to handle stuff like marking pages dirty
   * and/or accessed for architectures that don't do it in hardware (most
   * RISC architectures).  The early dirtying is also good on the i386.
   *
   * There is also a hook called "update_mmu_cache()" that architectures
   * with external mmu caches can use to update those (ie the Sparc or
   * PowerPC hashed page tables that act as extended TLBs).
   *
7267ec008   Kirill A. Shutemov   mm: postpone page...
3334
3335
   * We enter with non-exclusive mmap_sem (to exclude vma changes, but allow
   * concurrent faults).
9a95f3cf7   Paul Cassella   mm: describe mmap...
3336
   *
7267ec008   Kirill A. Shutemov   mm: postpone page...
3337
3338
   * The mmap_sem may have been released depending on flags and our return value.
   * See filemap_fault() and __lock_page_or_retry().
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3339
   */
bae473a42   Kirill A. Shutemov   mm: introduce fau...
3340
  static int handle_pte_fault(struct fault_env *fe)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3341
3342
  {
  	pte_t entry;
7267ec008   Kirill A. Shutemov   mm: postpone page...
3343
3344
3345
3346
3347
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357
3358
3359
3360
3361
3362
3363
3364
3365
3366
3367
3368
3369
3370
3371
3372
3373
  	if (unlikely(pmd_none(*fe->pmd))) {
  		/*
  		 * Leave __pte_alloc() until later: because vm_ops->fault may
  		 * want to allocate huge page, and if we expose page table
  		 * for an instant, it will be difficult to retract from
  		 * concurrent faults and from rmap lookups.
  		 */
  		fe->pte = NULL;
  	} else {
  		/* See comment in pte_alloc_one_map() */
  		if (pmd_trans_unstable(fe->pmd) || pmd_devmap(*fe->pmd))
  			return 0;
  		/*
  		 * A regular pmd is established and it can't morph into a huge
  		 * pmd from under us anymore at this point because we hold the
  		 * mmap_sem read mode and khugepaged takes it in write mode.
  		 * So now it's safe to run pte_offset_map().
  		 */
  		fe->pte = pte_offset_map(fe->pmd, fe->address);
  
  		entry = *fe->pte;
  
  		/*
  		 * some architectures can have larger ptes than wordsize,
  		 * e.g.ppc44x-defconfig has CONFIG_PTE_64BIT=y and
  		 * CONFIG_32BIT=y, so READ_ONCE or ACCESS_ONCE cannot guarantee
  		 * atomic accesses.  The code below just needs a consistent
  		 * view for the ifs and we later double check anyway with the
  		 * ptl lock held. So here a barrier will do.
  		 */
  		barrier();
65500d234   Hugh Dickins   [PATCH] mm: page ...
3374
  		if (pte_none(entry)) {
7267ec008   Kirill A. Shutemov   mm: postpone page...
3375
3376
  			pte_unmap(fe->pte);
  			fe->pte = NULL;
65500d234   Hugh Dickins   [PATCH] mm: page ...
3377
  		}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3378
  	}
7267ec008   Kirill A. Shutemov   mm: postpone page...
3379
3380
3381
3382
3383
3384
3385
3386
3387
  	if (!fe->pte) {
  		if (vma_is_anonymous(fe->vma))
  			return do_anonymous_page(fe);
  		else
  			return do_fault(fe);
  	}
  
  	if (!pte_present(entry))
  		return do_swap_page(fe, entry);
38e088546   Lorenzo Stoakes   mm: check VMA fla...
3388
  	if (pte_protnone(entry) && vma_is_accessible(fe->vma))
bae473a42   Kirill A. Shutemov   mm: introduce fau...
3389
  		return do_numa_page(fe, entry);
d10e63f29   Mel Gorman   mm: numa: Create ...
3390

bae473a42   Kirill A. Shutemov   mm: introduce fau...
3391
3392
3393
  	fe->ptl = pte_lockptr(fe->vma->vm_mm, fe->pmd);
  	spin_lock(fe->ptl);
  	if (unlikely(!pte_same(*fe->pte, entry)))
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
3394
  		goto unlock;
bae473a42   Kirill A. Shutemov   mm: introduce fau...
3395
  	if (fe->flags & FAULT_FLAG_WRITE) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3396
  		if (!pte_write(entry))
bae473a42   Kirill A. Shutemov   mm: introduce fau...
3397
  			return do_wp_page(fe, entry);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3398
3399
3400
  		entry = pte_mkdirty(entry);
  	}
  	entry = pte_mkyoung(entry);
bae473a42   Kirill A. Shutemov   mm: introduce fau...
3401
3402
3403
  	if (ptep_set_access_flags(fe->vma, fe->address, fe->pte, entry,
  				fe->flags & FAULT_FLAG_WRITE)) {
  		update_mmu_cache(fe->vma, fe->address, fe->pte);
1a44e1490   Andrea Arcangeli   [PATCH] .text pag...
3404
3405
3406
3407
3408
3409
3410
  	} else {
  		/*
  		 * This is needed only for protection faults but the arch code
  		 * is not yet telling us if this is a protection fault or not.
  		 * This still avoids useless tlb flushes for .text page faults
  		 * with threads.
  		 */
bae473a42   Kirill A. Shutemov   mm: introduce fau...
3411
3412
  		if (fe->flags & FAULT_FLAG_WRITE)
  			flush_tlb_fix_spurious_fault(fe->vma, fe->address);
1a44e1490   Andrea Arcangeli   [PATCH] .text pag...
3413
  	}
8f4e2101f   Hugh Dickins   [PATCH] mm: page ...
3414
  unlock:
bae473a42   Kirill A. Shutemov   mm: introduce fau...
3415
  	pte_unmap_unlock(fe->pte, fe->ptl);
83c54070e   Nick Piggin   mm: fault feedbac...
3416
  	return 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3417
3418
3419
3420
  }
  
  /*
   * By the time we get here, we already hold the mm semaphore
9a95f3cf7   Paul Cassella   mm: describe mmap...
3421
3422
3423
   *
   * The mmap_sem may have been released depending on flags and our
   * return value.  See filemap_fault() and __lock_page_or_retry().
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3424
   */
dcddffd41   Kirill A. Shutemov   mm: do not pass m...
3425
3426
  static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
  		unsigned int flags)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3427
  {
bae473a42   Kirill A. Shutemov   mm: introduce fau...
3428
3429
3430
3431
3432
  	struct fault_env fe = {
  		.vma = vma,
  		.address = address,
  		.flags = flags,
  	};
dcddffd41   Kirill A. Shutemov   mm: do not pass m...
3433
  	struct mm_struct *mm = vma->vm_mm;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3434
3435
  	pgd_t *pgd;
  	pud_t *pud;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3436

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3437
  	pgd = pgd_offset(mm, address);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3438
3439
  	pud = pud_alloc(mm, pgd, address);
  	if (!pud)
c74df32c7   Hugh Dickins   [PATCH] mm: ptd_a...
3440
  		return VM_FAULT_OOM;
bae473a42   Kirill A. Shutemov   mm: introduce fau...
3441
3442
  	fe.pmd = pmd_alloc(mm, pud, address);
  	if (!fe.pmd)
c74df32c7   Hugh Dickins   [PATCH] mm: ptd_a...
3443
  		return VM_FAULT_OOM;
bae473a42   Kirill A. Shutemov   mm: introduce fau...
3444
3445
  	if (pmd_none(*fe.pmd) && transparent_hugepage_enabled(vma)) {
  		int ret = create_huge_pmd(&fe);
c02925540   Kirill A. Shutemov   thp: consolidate ...
3446
3447
  		if (!(ret & VM_FAULT_FALLBACK))
  			return ret;
71e3aac07   Andrea Arcangeli   thp: transparent ...
3448
  	} else {
bae473a42   Kirill A. Shutemov   mm: introduce fau...
3449
  		pmd_t orig_pmd = *fe.pmd;
1f1d06c34   David Rientjes   thp, memcg: split...
3450
  		int ret;
71e3aac07   Andrea Arcangeli   thp: transparent ...
3451
  		barrier();
5c7fb56e5   Dan Williams   mm, dax: dax-pmd ...
3452
  		if (pmd_trans_huge(orig_pmd) || pmd_devmap(orig_pmd)) {
38e088546   Lorenzo Stoakes   mm: check VMA fla...
3453
  			if (pmd_protnone(orig_pmd) && vma_is_accessible(vma))
bae473a42   Kirill A. Shutemov   mm: introduce fau...
3454
  				return do_huge_pmd_numa_page(&fe, orig_pmd);
d10e63f29   Mel Gorman   mm: numa: Create ...
3455

bae473a42   Kirill A. Shutemov   mm: introduce fau...
3456
3457
3458
  			if ((fe.flags & FAULT_FLAG_WRITE) &&
  					!pmd_write(orig_pmd)) {
  				ret = wp_huge_pmd(&fe, orig_pmd);
9845cbbd1   Kirill A. Shutemov   mm, thp: fix infi...
3459
3460
  				if (!(ret & VM_FAULT_FALLBACK))
  					return ret;
a1dd450bc   Will Deacon   mm: thp: set the ...
3461
  			} else {
bae473a42   Kirill A. Shutemov   mm: introduce fau...
3462
  				huge_pmd_set_accessed(&fe, orig_pmd);
9845cbbd1   Kirill A. Shutemov   mm, thp: fix infi...
3463
  				return 0;
1f1d06c34   David Rientjes   thp, memcg: split...
3464
  			}
71e3aac07   Andrea Arcangeli   thp: transparent ...
3465
3466
  		}
  	}
bae473a42   Kirill A. Shutemov   mm: introduce fau...
3467
  	return handle_pte_fault(&fe);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3468
  }
9a95f3cf7   Paul Cassella   mm: describe mmap...
3469
3470
3471
3472
3473
3474
  /*
   * By the time we get here, we already hold the mm semaphore
   *
   * The mmap_sem may have been released depending on flags and our
   * return value.  See filemap_fault() and __lock_page_or_retry().
   */
dcddffd41   Kirill A. Shutemov   mm: do not pass m...
3475
3476
  int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
  		unsigned int flags)
519e52473   Johannes Weiner   mm: memcg: enable...
3477
3478
3479
3480
3481
3482
  {
  	int ret;
  
  	__set_current_state(TASK_RUNNING);
  
  	count_vm_event(PGFAULT);
dcddffd41   Kirill A. Shutemov   mm: do not pass m...
3483
  	mem_cgroup_count_vm_event(vma->vm_mm, PGFAULT);
519e52473   Johannes Weiner   mm: memcg: enable...
3484
3485
3486
3487
3488
3489
3490
3491
3492
  
  	/* do counter updates before entering really critical section. */
  	check_sync_rss_stat(current);
  
  	/*
  	 * Enable the memcg OOM handling for faults triggered in user
  	 * space.  Kernel faults are handled more gracefully.
  	 */
  	if (flags & FAULT_FLAG_USER)
494264208   Johannes Weiner   mm: memcg: handle...
3493
  		mem_cgroup_oom_enable();
519e52473   Johannes Weiner   mm: memcg: enable...
3494

bae473a42   Kirill A. Shutemov   mm: introduce fau...
3495
3496
3497
3498
3499
3500
3501
3502
3503
  	if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE,
  					    flags & FAULT_FLAG_INSTRUCTION,
  					    flags & FAULT_FLAG_REMOTE))
  		return VM_FAULT_SIGSEGV;
  
  	if (unlikely(is_vm_hugetlb_page(vma)))
  		ret = hugetlb_fault(vma->vm_mm, vma, address, flags);
  	else
  		ret = __handle_mm_fault(vma, address, flags);
519e52473   Johannes Weiner   mm: memcg: enable...
3504

494264208   Johannes Weiner   mm: memcg: handle...
3505
3506
3507
3508
3509
3510
3511
3512
3513
3514
3515
  	if (flags & FAULT_FLAG_USER) {
  		mem_cgroup_oom_disable();
                  /*
                   * The task may have entered a memcg OOM situation but
                   * if the allocation error was handled gracefully (no
                   * VM_FAULT_OOM), there is no need to kill anything.
                   * Just clean up the OOM state peacefully.
                   */
                  if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM))
                          mem_cgroup_oom_synchronize(false);
  	}
3812c8c8f   Johannes Weiner   mm: memcg: do not...
3516

3f70dc38c   Michal Hocko   mm: make sure tha...
3517
3518
3519
3520
3521
3522
3523
3524
3525
3526
3527
3528
  	/*
  	 * This mm has been already reaped by the oom reaper and so the
  	 * refault cannot be trusted in general. Anonymous refaults would
  	 * lose data and give a zero page instead e.g. This is especially
  	 * problem for use_mm() because regular tasks will just die and
  	 * the corrupted data will not be visible anywhere while kthread
  	 * will outlive the oom victim and potentially propagate the date
  	 * further.
  	 */
  	if (unlikely((current->flags & PF_KTHREAD) && !(ret & VM_FAULT_ERROR)
  				&& test_bit(MMF_UNSTABLE, &vma->vm_mm->flags)))
  		ret = VM_FAULT_SIGBUS;
519e52473   Johannes Weiner   mm: memcg: enable...
3529
3530
  	return ret;
  }
e1d6d01ab   Jesse Barnes   mm: export find_e...
3531
  EXPORT_SYMBOL_GPL(handle_mm_fault);
519e52473   Johannes Weiner   mm: memcg: enable...
3532

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3533
3534
3535
  #ifndef __PAGETABLE_PUD_FOLDED
  /*
   * Allocate page upper directory.
872fec16d   Hugh Dickins   [PATCH] mm: init_...
3536
   * We've already handled the fast-path in-line.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3537
   */
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
3538
  int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3539
  {
c74df32c7   Hugh Dickins   [PATCH] mm: ptd_a...
3540
3541
  	pud_t *new = pud_alloc_one(mm, address);
  	if (!new)
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
3542
  		return -ENOMEM;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3543

362a61ad6   Nick Piggin   fix SMP data race...
3544
  	smp_wmb(); /* See comment in __pte_alloc */
872fec16d   Hugh Dickins   [PATCH] mm: init_...
3545
  	spin_lock(&mm->page_table_lock);
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
3546
  	if (pgd_present(*pgd))		/* Another has populated it */
5e5419734   Benjamin Herrenschmidt   add mm argument t...
3547
  		pud_free(mm, new);
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
3548
3549
  	else
  		pgd_populate(mm, pgd, new);
c74df32c7   Hugh Dickins   [PATCH] mm: ptd_a...
3550
  	spin_unlock(&mm->page_table_lock);
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
3551
  	return 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3552
3553
3554
3555
3556
3557
  }
  #endif /* __PAGETABLE_PUD_FOLDED */
  
  #ifndef __PAGETABLE_PMD_FOLDED
  /*
   * Allocate page middle directory.
872fec16d   Hugh Dickins   [PATCH] mm: init_...
3558
   * We've already handled the fast-path in-line.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3559
   */
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
3560
  int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3561
  {
c74df32c7   Hugh Dickins   [PATCH] mm: ptd_a...
3562
3563
  	pmd_t *new = pmd_alloc_one(mm, address);
  	if (!new)
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
3564
  		return -ENOMEM;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3565

362a61ad6   Nick Piggin   fix SMP data race...
3566
  	smp_wmb(); /* See comment in __pte_alloc */
872fec16d   Hugh Dickins   [PATCH] mm: init_...
3567
  	spin_lock(&mm->page_table_lock);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3568
  #ifndef __ARCH_HAS_4LEVEL_HACK
dc6c9a35b   Kirill A. Shutemov   mm: account pmd p...
3569
3570
  	if (!pud_present(*pud)) {
  		mm_inc_nr_pmds(mm);
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
3571
  		pud_populate(mm, pud, new);
dc6c9a35b   Kirill A. Shutemov   mm: account pmd p...
3572
  	} else	/* Another has populated it */
5e5419734   Benjamin Herrenschmidt   add mm argument t...
3573
  		pmd_free(mm, new);
dc6c9a35b   Kirill A. Shutemov   mm: account pmd p...
3574
3575
3576
  #else
  	if (!pgd_present(*pud)) {
  		mm_inc_nr_pmds(mm);
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
3577
  		pgd_populate(mm, pud, new);
dc6c9a35b   Kirill A. Shutemov   mm: account pmd p...
3578
3579
  	} else /* Another has populated it */
  		pmd_free(mm, new);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3580
  #endif /* __ARCH_HAS_4LEVEL_HACK */
c74df32c7   Hugh Dickins   [PATCH] mm: ptd_a...
3581
  	spin_unlock(&mm->page_table_lock);
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
3582
  	return 0;
e0f39591c   Alan Stern   [PATCH] Workaroun...
3583
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3584
  #endif /* __PAGETABLE_PMD_FOLDED */
1b36ba815   Namhyung Kim   mm: wrap follow_p...
3585
  static int __follow_pte(struct mm_struct *mm, unsigned long address,
f8ad0f499   Johannes Weiner   mm: introduce fol...
3586
3587
3588
3589
3590
3591
3592
3593
3594
3595
3596
3597
3598
3599
3600
3601
  		pte_t **ptepp, spinlock_t **ptlp)
  {
  	pgd_t *pgd;
  	pud_t *pud;
  	pmd_t *pmd;
  	pte_t *ptep;
  
  	pgd = pgd_offset(mm, address);
  	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
  		goto out;
  
  	pud = pud_offset(pgd, address);
  	if (pud_none(*pud) || unlikely(pud_bad(*pud)))
  		goto out;
  
  	pmd = pmd_offset(pud, address);
f66055ab6   Andrea Arcangeli   thp: verify pmd_t...
3602
  	VM_BUG_ON(pmd_trans_huge(*pmd));
f8ad0f499   Johannes Weiner   mm: introduce fol...
3603
3604
3605
3606
3607
3608
3609
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619
3620
3621
  	if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
  		goto out;
  
  	/* We cannot handle huge page PFN maps. Luckily they don't exist. */
  	if (pmd_huge(*pmd))
  		goto out;
  
  	ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
  	if (!ptep)
  		goto out;
  	if (!pte_present(*ptep))
  		goto unlock;
  	*ptepp = ptep;
  	return 0;
  unlock:
  	pte_unmap_unlock(ptep, *ptlp);
  out:
  	return -EINVAL;
  }
1b36ba815   Namhyung Kim   mm: wrap follow_p...
3622
3623
3624
3625
3626
3627
3628
3629
3630
3631
  static inline int follow_pte(struct mm_struct *mm, unsigned long address,
  			     pte_t **ptepp, spinlock_t **ptlp)
  {
  	int res;
  
  	/* (void) is needed to make gcc happy */
  	(void) __cond_lock(*ptlp,
  			   !(res = __follow_pte(mm, address, ptepp, ptlp)));
  	return res;
  }
3b6748e2d   Johannes Weiner   mm: introduce fol...
3632
3633
3634
3635
3636
3637
3638
3639
3640
3641
3642
3643
3644
3645
3646
3647
3648
3649
3650
3651
3652
3653
3654
3655
3656
3657
3658
3659
  /**
   * follow_pfn - look up PFN at a user virtual address
   * @vma: memory mapping
   * @address: user virtual address
   * @pfn: location to store found PFN
   *
   * Only IO mappings and raw PFN mappings are allowed.
   *
   * Returns zero and the pfn at @pfn on success, -ve otherwise.
   */
  int follow_pfn(struct vm_area_struct *vma, unsigned long address,
  	unsigned long *pfn)
  {
  	int ret = -EINVAL;
  	spinlock_t *ptl;
  	pte_t *ptep;
  
  	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
  		return ret;
  
  	ret = follow_pte(vma->vm_mm, address, &ptep, &ptl);
  	if (ret)
  		return ret;
  	*pfn = pte_pfn(*ptep);
  	pte_unmap_unlock(ptep, ptl);
  	return 0;
  }
  EXPORT_SYMBOL(follow_pfn);
28b2ee20c   Rik van Riel   access_process_vm...
3660
  #ifdef CONFIG_HAVE_IOREMAP_PROT
d87fe6607   venkatesh.pallipadi@intel.com   x86: PAT: modify ...
3661
3662
3663
  int follow_phys(struct vm_area_struct *vma,
  		unsigned long address, unsigned int flags,
  		unsigned long *prot, resource_size_t *phys)
28b2ee20c   Rik van Riel   access_process_vm...
3664
  {
03668a4de   Johannes Weiner   mm: use generic f...
3665
  	int ret = -EINVAL;
28b2ee20c   Rik van Riel   access_process_vm...
3666
3667
  	pte_t *ptep, pte;
  	spinlock_t *ptl;
28b2ee20c   Rik van Riel   access_process_vm...
3668

d87fe6607   venkatesh.pallipadi@intel.com   x86: PAT: modify ...
3669
3670
  	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
  		goto out;
28b2ee20c   Rik van Riel   access_process_vm...
3671

03668a4de   Johannes Weiner   mm: use generic f...
3672
  	if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
d87fe6607   venkatesh.pallipadi@intel.com   x86: PAT: modify ...
3673
  		goto out;
28b2ee20c   Rik van Riel   access_process_vm...
3674
  	pte = *ptep;
03668a4de   Johannes Weiner   mm: use generic f...
3675

28b2ee20c   Rik van Riel   access_process_vm...
3676
3677
  	if ((flags & FOLL_WRITE) && !pte_write(pte))
  		goto unlock;
28b2ee20c   Rik van Riel   access_process_vm...
3678
3679
  
  	*prot = pgprot_val(pte_pgprot(pte));
03668a4de   Johannes Weiner   mm: use generic f...
3680
  	*phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
28b2ee20c   Rik van Riel   access_process_vm...
3681

03668a4de   Johannes Weiner   mm: use generic f...
3682
  	ret = 0;
28b2ee20c   Rik van Riel   access_process_vm...
3683
3684
3685
  unlock:
  	pte_unmap_unlock(ptep, ptl);
  out:
d87fe6607   venkatesh.pallipadi@intel.com   x86: PAT: modify ...
3686
  	return ret;
28b2ee20c   Rik van Riel   access_process_vm...
3687
3688
3689
3690
3691
3692
3693
  }
  
  int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
  			void *buf, int len, int write)
  {
  	resource_size_t phys_addr;
  	unsigned long prot = 0;
2bc7273b0   KOSAKI Motohiro   mm: make maddr __...
3694
  	void __iomem *maddr;
28b2ee20c   Rik van Riel   access_process_vm...
3695
  	int offset = addr & (PAGE_SIZE-1);
d87fe6607   venkatesh.pallipadi@intel.com   x86: PAT: modify ...
3696
  	if (follow_phys(vma, addr, write, &prot, &phys_addr))
28b2ee20c   Rik van Riel   access_process_vm...
3697
  		return -EINVAL;
9cb12d7b4   Grazvydas Ignotas   mm/memory.c: actu...
3698
  	maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
28b2ee20c   Rik van Riel   access_process_vm...
3699
3700
3701
3702
3703
3704
3705
3706
  	if (write)
  		memcpy_toio(maddr + offset, buf, len);
  	else
  		memcpy_fromio(buf, maddr + offset, len);
  	iounmap(maddr);
  
  	return len;
  }
5a73633ef   Uwe Kleine-König   mm: make generic_...
3707
  EXPORT_SYMBOL_GPL(generic_access_phys);
28b2ee20c   Rik van Riel   access_process_vm...
3708
  #endif
0ec76a110   David Howells   [PATCH] NOMMU: Ch...
3709
  /*
206cb6365   Stephen Wilson   mm: factor out ma...
3710
3711
   * Access another process' address space as given in mm.  If non-NULL, use the
   * given task for page fault accounting.
0ec76a110   David Howells   [PATCH] NOMMU: Ch...
3712
   */
e71b4e061   Eric W. Biederman   ptrace: Don't all...
3713
  int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
442486ec1   Lorenzo Stoakes   mm: replace __acc...
3714
  		unsigned long addr, void *buf, int len, unsigned int gup_flags)
0ec76a110   David Howells   [PATCH] NOMMU: Ch...
3715
  {
0ec76a110   David Howells   [PATCH] NOMMU: Ch...
3716
  	struct vm_area_struct *vma;
0ec76a110   David Howells   [PATCH] NOMMU: Ch...
3717
  	void *old_buf = buf;
442486ec1   Lorenzo Stoakes   mm: replace __acc...
3718
  	int write = gup_flags & FOLL_WRITE;
0ec76a110   David Howells   [PATCH] NOMMU: Ch...
3719

0ec76a110   David Howells   [PATCH] NOMMU: Ch...
3720
  	down_read(&mm->mmap_sem);
183ff22bb   Simon Arlott   spelling fixes: mm/
3721
  	/* ignore errors, just check how much was successfully transferred */
0ec76a110   David Howells   [PATCH] NOMMU: Ch...
3722
3723
3724
  	while (len) {
  		int bytes, ret, offset;
  		void *maddr;
28b2ee20c   Rik van Riel   access_process_vm...
3725
  		struct page *page = NULL;
0ec76a110   David Howells   [PATCH] NOMMU: Ch...
3726

1e9877902   Dave Hansen   mm/gup: Introduce...
3727
  		ret = get_user_pages_remote(tsk, mm, addr, 1,
442486ec1   Lorenzo Stoakes   mm: replace __acc...
3728
  				gup_flags, &page, &vma);
28b2ee20c   Rik van Riel   access_process_vm...
3729
  		if (ret <= 0) {
dbffcd03d   Rik van Riel   mm: change confus...
3730
3731
3732
  #ifndef CONFIG_HAVE_IOREMAP_PROT
  			break;
  #else
28b2ee20c   Rik van Riel   access_process_vm...
3733
3734
3735
3736
  			/*
  			 * Check if this is a VM_IO | VM_PFNMAP VMA, which
  			 * we can access using slightly different code.
  			 */
28b2ee20c   Rik van Riel   access_process_vm...
3737
  			vma = find_vma(mm, addr);
fe936dfc2   Michael Ellerman   mm: check that we...
3738
  			if (!vma || vma->vm_start > addr)
28b2ee20c   Rik van Riel   access_process_vm...
3739
3740
3741
3742
3743
  				break;
  			if (vma->vm_ops && vma->vm_ops->access)
  				ret = vma->vm_ops->access(vma, addr, buf,
  							  len, write);
  			if (ret <= 0)
28b2ee20c   Rik van Riel   access_process_vm...
3744
3745
  				break;
  			bytes = ret;
dbffcd03d   Rik van Riel   mm: change confus...
3746
  #endif
0ec76a110   David Howells   [PATCH] NOMMU: Ch...
3747
  		} else {
28b2ee20c   Rik van Riel   access_process_vm...
3748
3749
3750
3751
3752
3753
3754
3755
3756
3757
3758
3759
3760
3761
3762
  			bytes = len;
  			offset = addr & (PAGE_SIZE-1);
  			if (bytes > PAGE_SIZE-offset)
  				bytes = PAGE_SIZE-offset;
  
  			maddr = kmap(page);
  			if (write) {
  				copy_to_user_page(vma, page, addr,
  						  maddr + offset, buf, bytes);
  				set_page_dirty_lock(page);
  			} else {
  				copy_from_user_page(vma, page, addr,
  						    buf, maddr + offset, bytes);
  			}
  			kunmap(page);
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
3763
  			put_page(page);
0ec76a110   David Howells   [PATCH] NOMMU: Ch...
3764
  		}
0ec76a110   David Howells   [PATCH] NOMMU: Ch...
3765
3766
3767
3768
3769
  		len -= bytes;
  		buf += bytes;
  		addr += bytes;
  	}
  	up_read(&mm->mmap_sem);
0ec76a110   David Howells   [PATCH] NOMMU: Ch...
3770
3771
3772
  
  	return buf - old_buf;
  }
03252919b   Andi Kleen   x86: print which ...
3773

5ddd36b9c   Stephen Wilson   mm: implement acc...
3774
  /**
ae91dbfc9   Randy Dunlap   mm: fix memory.c ...
3775
   * access_remote_vm - access another process' address space
5ddd36b9c   Stephen Wilson   mm: implement acc...
3776
3777
3778
3779
   * @mm:		the mm_struct of the target address space
   * @addr:	start address to access
   * @buf:	source or destination buffer
   * @len:	number of bytes to transfer
6347e8d5b   Lorenzo Stoakes   mm: replace acces...
3780
   * @gup_flags:	flags modifying lookup behaviour
5ddd36b9c   Stephen Wilson   mm: implement acc...
3781
3782
3783
3784
   *
   * The caller must hold a reference on @mm.
   */
  int access_remote_vm(struct mm_struct *mm, unsigned long addr,
6347e8d5b   Lorenzo Stoakes   mm: replace acces...
3785
  		void *buf, int len, unsigned int gup_flags)
5ddd36b9c   Stephen Wilson   mm: implement acc...
3786
  {
6347e8d5b   Lorenzo Stoakes   mm: replace acces...
3787
  	return __access_remote_vm(NULL, mm, addr, buf, len, gup_flags);
5ddd36b9c   Stephen Wilson   mm: implement acc...
3788
  }
03252919b   Andi Kleen   x86: print which ...
3789
  /*
206cb6365   Stephen Wilson   mm: factor out ma...
3790
3791
3792
3793
3794
   * Access another process' address space.
   * Source/target buffer must be kernel space,
   * Do not walk the page table directly, use get_user_pages
   */
  int access_process_vm(struct task_struct *tsk, unsigned long addr,
f307ab6dc   Lorenzo Stoakes   mm: replace acces...
3795
  		void *buf, int len, unsigned int gup_flags)
206cb6365   Stephen Wilson   mm: factor out ma...
3796
3797
3798
3799
3800
3801
3802
  {
  	struct mm_struct *mm;
  	int ret;
  
  	mm = get_task_mm(tsk);
  	if (!mm)
  		return 0;
f307ab6dc   Lorenzo Stoakes   mm: replace acces...
3803
  	ret = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags);
442486ec1   Lorenzo Stoakes   mm: replace __acc...
3804

206cb6365   Stephen Wilson   mm: factor out ma...
3805
3806
3807
3808
  	mmput(mm);
  
  	return ret;
  }
03252919b   Andi Kleen   x86: print which ...
3809
3810
3811
3812
3813
3814
3815
  /*
   * Print the name of a VMA.
   */
  void print_vma_addr(char *prefix, unsigned long ip)
  {
  	struct mm_struct *mm = current->mm;
  	struct vm_area_struct *vma;
e8bff74af   Ingo Molnar   x86: fix "BUG: sl...
3816
3817
3818
3819
3820
3821
  	/*
  	 * Do not print if we are in atomic
  	 * contexts (in exception stacks, etc.):
  	 */
  	if (preempt_count())
  		return;
03252919b   Andi Kleen   x86: print which ...
3822
3823
3824
3825
3826
3827
  	down_read(&mm->mmap_sem);
  	vma = find_vma(mm, ip);
  	if (vma && vma->vm_file) {
  		struct file *f = vma->vm_file;
  		char *buf = (char *)__get_free_page(GFP_KERNEL);
  		if (buf) {
2fbc57c53   Andy Shevchenko   mm: use kbasename()
3828
  			char *p;
03252919b   Andi Kleen   x86: print which ...
3829

9bf39ab2a   Miklos Szeredi   vfs: add file_pat...
3830
  			p = file_path(f, buf, PAGE_SIZE);
03252919b   Andi Kleen   x86: print which ...
3831
3832
  			if (IS_ERR(p))
  				p = "?";
2fbc57c53   Andy Shevchenko   mm: use kbasename()
3833
  			printk("%s%s[%lx+%lx]", prefix, kbasename(p),
03252919b   Andi Kleen   x86: print which ...
3834
3835
3836
3837
3838
  					vma->vm_start,
  					vma->vm_end - vma->vm_start);
  			free_page((unsigned long)buf);
  		}
  	}
51a07e50b   Jeff Liu   mm/memory.c:print...
3839
  	up_read(&mm->mmap_sem);
03252919b   Andi Kleen   x86: print which ...
3840
  }
3ee1afa30   Nick Piggin   x86: some lock an...
3841

662bbcb27   Michael S. Tsirkin   mm, sched: Allow ...
3842
  #if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)
9ec23531f   David Hildenbrand   sched/preempt, mm...
3843
  void __might_fault(const char *file, int line)
3ee1afa30   Nick Piggin   x86: some lock an...
3844
  {
95156f005   Peter Zijlstra   lockdep, mm: fix ...
3845
3846
3847
3848
3849
3850
3851
3852
  	/*
  	 * Some code (nfs/sunrpc) uses socket ops on kernel memory while
  	 * holding the mmap_sem, this is safe because kernel memory doesn't
  	 * get paged out, therefore we'll never actually fault, and the
  	 * below annotations will generate false positives.
  	 */
  	if (segment_eq(get_fs(), KERNEL_DS))
  		return;
9ec23531f   David Hildenbrand   sched/preempt, mm...
3853
  	if (pagefault_disabled())
662bbcb27   Michael S. Tsirkin   mm, sched: Allow ...
3854
  		return;
9ec23531f   David Hildenbrand   sched/preempt, mm...
3855
3856
  	__might_sleep(file, line, 0);
  #if defined(CONFIG_DEBUG_ATOMIC_SLEEP)
662bbcb27   Michael S. Tsirkin   mm, sched: Allow ...
3857
  	if (current->mm)
3ee1afa30   Nick Piggin   x86: some lock an...
3858
  		might_lock_read(&current->mm->mmap_sem);
9ec23531f   David Hildenbrand   sched/preempt, mm...
3859
  #endif
3ee1afa30   Nick Piggin   x86: some lock an...
3860
  }
9ec23531f   David Hildenbrand   sched/preempt, mm...
3861
  EXPORT_SYMBOL(__might_fault);
3ee1afa30   Nick Piggin   x86: some lock an...
3862
  #endif
47ad8475c   Andrea Arcangeli   thp: clear_copy_h...
3863
3864
3865
3866
3867
3868
3869
3870
3871
3872
3873
3874
3875
3876
3877
3878
3879
3880
3881
3882
3883
3884
3885
3886
3887
3888
3889
3890
3891
3892
3893
3894
3895
3896
3897
3898
3899
3900
3901
3902
3903
3904
3905
3906
3907
3908
3909
3910
3911
3912
3913
3914
3915
3916
3917
3918
3919
3920
3921
3922
3923
3924
3925
3926
3927
3928
3929
3930
3931
3932
3933
  
  #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
  static void clear_gigantic_page(struct page *page,
  				unsigned long addr,
  				unsigned int pages_per_huge_page)
  {
  	int i;
  	struct page *p = page;
  
  	might_sleep();
  	for (i = 0; i < pages_per_huge_page;
  	     i++, p = mem_map_next(p, page, i)) {
  		cond_resched();
  		clear_user_highpage(p, addr + i * PAGE_SIZE);
  	}
  }
  void clear_huge_page(struct page *page,
  		     unsigned long addr, unsigned int pages_per_huge_page)
  {
  	int i;
  
  	if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
  		clear_gigantic_page(page, addr, pages_per_huge_page);
  		return;
  	}
  
  	might_sleep();
  	for (i = 0; i < pages_per_huge_page; i++) {
  		cond_resched();
  		clear_user_highpage(page + i, addr + i * PAGE_SIZE);
  	}
  }
  
  static void copy_user_gigantic_page(struct page *dst, struct page *src,
  				    unsigned long addr,
  				    struct vm_area_struct *vma,
  				    unsigned int pages_per_huge_page)
  {
  	int i;
  	struct page *dst_base = dst;
  	struct page *src_base = src;
  
  	for (i = 0; i < pages_per_huge_page; ) {
  		cond_resched();
  		copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
  
  		i++;
  		dst = mem_map_next(dst, dst_base, i);
  		src = mem_map_next(src, src_base, i);
  	}
  }
  
  void copy_user_huge_page(struct page *dst, struct page *src,
  			 unsigned long addr, struct vm_area_struct *vma,
  			 unsigned int pages_per_huge_page)
  {
  	int i;
  
  	if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
  		copy_user_gigantic_page(dst, src, addr, vma,
  					pages_per_huge_page);
  		return;
  	}
  
  	might_sleep();
  	for (i = 0; i < pages_per_huge_page; i++) {
  		cond_resched();
  		copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
  	}
  }
  #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
49076ec2c   Kirill A. Shutemov   mm: dynamically a...
3934

40b64acd1   Olof Johansson   mm: fix build of ...
3935
  #if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS
b35f1819a   Kirill A. Shutemov   mm: create a sepa...
3936
3937
3938
3939
3940
3941
3942
3943
  
  static struct kmem_cache *page_ptl_cachep;
  
  void __init ptlock_cache_init(void)
  {
  	page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0,
  			SLAB_PANIC, NULL);
  }
539edb584   Peter Zijlstra   mm: properly sepa...
3944
  bool ptlock_alloc(struct page *page)
49076ec2c   Kirill A. Shutemov   mm: dynamically a...
3945
3946
  {
  	spinlock_t *ptl;
b35f1819a   Kirill A. Shutemov   mm: create a sepa...
3947
  	ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL);
49076ec2c   Kirill A. Shutemov   mm: dynamically a...
3948
3949
  	if (!ptl)
  		return false;
539edb584   Peter Zijlstra   mm: properly sepa...
3950
  	page->ptl = ptl;
49076ec2c   Kirill A. Shutemov   mm: dynamically a...
3951
3952
  	return true;
  }
539edb584   Peter Zijlstra   mm: properly sepa...
3953
  void ptlock_free(struct page *page)
49076ec2c   Kirill A. Shutemov   mm: dynamically a...
3954
  {
b35f1819a   Kirill A. Shutemov   mm: create a sepa...
3955
  	kmem_cache_free(page_ptl_cachep, page->ptl);
49076ec2c   Kirill A. Shutemov   mm: dynamically a...
3956
3957
  }
  #endif