Blame view

mm/hmm.c 16.7 KB
c942fddf8   Thomas Gleixner   treewide: Replace...
1
  // SPDX-License-Identifier: GPL-2.0-or-later
133ff0eac   Jérôme Glisse   mm/hmm: heterogen...
2
3
4
  /*
   * Copyright 2013 Red Hat Inc.
   *
f813f2197   Jérôme Glisse   mm/hmm: fix utf8 ...
5
   * Authors: Jérôme Glisse <jglisse@redhat.com>
133ff0eac   Jérôme Glisse   mm/hmm: heterogen...
6
7
8
9
10
   */
  /*
   * Refer to include/linux/hmm.h for information about heterogeneous memory
   * management or HMM for short.
   */
a520110e4   Christoph Hellwig   mm: split out a n...
11
  #include <linux/pagewalk.h>
133ff0eac   Jérôme Glisse   mm/hmm: heterogen...
12
  #include <linux/hmm.h>
858b54dab   Jérôme Glisse   mm/hmm/devmem: du...
13
  #include <linux/init.h>
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
14
15
  #include <linux/rmap.h>
  #include <linux/swap.h>
133ff0eac   Jérôme Glisse   mm/hmm: heterogen...
16
17
  #include <linux/slab.h>
  #include <linux/sched.h>
4ef589dc9   Jérôme Glisse   mm/hmm/devmem: de...
18
19
  #include <linux/mmzone.h>
  #include <linux/pagemap.h>
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
20
21
  #include <linux/swapops.h>
  #include <linux/hugetlb.h>
4ef589dc9   Jérôme Glisse   mm/hmm/devmem: de...
22
  #include <linux/memremap.h>
c8a53b2db   Jason Gunthorpe   mm/hmm: Hold a mm...
23
  #include <linux/sched/mm.h>
7b2d55d2c   Jérôme Glisse   mm/ZONE_DEVICE: s...
24
  #include <linux/jump_label.h>
55c0ece82   Jérôme Glisse   mm/hmm: add a hel...
25
  #include <linux/dma-mapping.h>
c0b124054   Jérôme Glisse   mm/hmm/mirror: mi...
26
  #include <linux/mmu_notifier.h>
4ef589dc9   Jérôme Glisse   mm/hmm/devmem: de...
27
  #include <linux/memory_hotplug.h>
b756a3b5e   Alistair Popple   mm: device exclus...
28
  #include "internal.h"
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
29
30
31
  struct hmm_vma_walk {
  	struct hmm_range	*range;
  	unsigned long		last;
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
32
  };
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
33
34
35
36
37
  enum {
  	HMM_NEED_FAULT = 1 << 0,
  	HMM_NEED_WRITE_FAULT = 1 << 1,
  	HMM_NEED_ALL_BITS = HMM_NEED_FAULT | HMM_NEED_WRITE_FAULT,
  };
d28c2c9a4   Ralph Campbell   mm/hmm: make full...
38
  static int hmm_pfns_fill(unsigned long addr, unsigned long end,
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
39
  			 struct hmm_range *range, unsigned long cpu_flags)
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
40
  {
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
41
  	unsigned long i = (addr - range->start) >> PAGE_SHIFT;
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
42

da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
43
  	for (; addr < end; addr += PAGE_SIZE, i++)
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
44
  		range->hmm_pfns[i] = cpu_flags;
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
45
46
  	return 0;
  }
5504ed296   Jérôme Glisse   mm/hmm: do not di...
47
  /*
f8c888a30   Christoph Hellwig   mm/hmm: don't han...
48
   * hmm_vma_fault() - fault in a range lacking valid pmd or pte(s)
d2e8d5511   Ralph Campbell   mm/hmm: a few mor...
49
   * @addr: range virtual start address (inclusive)
5504ed296   Jérôme Glisse   mm/hmm: do not di...
50
   * @end: range virtual end address (exclusive)
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
51
   * @required_fault: HMM_NEED_* flags
5504ed296   Jérôme Glisse   mm/hmm: do not di...
52
   * @walk: mm_walk structure
f8c888a30   Christoph Hellwig   mm/hmm: don't han...
53
   * Return: -EBUSY after page fault, or page fault error
5504ed296   Jérôme Glisse   mm/hmm: do not di...
54
55
56
57
   *
   * This function will be called whenever pmd_none() or pte_none() returns true,
   * or whenever there is no page directory covering the virtual address range.
   */
f8c888a30   Christoph Hellwig   mm/hmm: don't han...
58
  static int hmm_vma_fault(unsigned long addr, unsigned long end,
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
59
  			 unsigned int required_fault, struct mm_walk *walk)
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
60
  {
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
61
  	struct hmm_vma_walk *hmm_vma_walk = walk->private;
5a0c38d30   Christoph Hellwig   mm: merge hmm_vma...
62
  	struct vm_area_struct *vma = walk->vma;
5a0c38d30   Christoph Hellwig   mm: merge hmm_vma...
63
  	unsigned int fault_flags = FAULT_FLAG_REMOTE;
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
64

a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
65
  	WARN_ON_ONCE(!required_fault);
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
66
  	hmm_vma_walk->last = addr;
63d5066f6   Jérôme Glisse   mm/hmm: mirror hu...
67

a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
68
  	if (required_fault & HMM_NEED_WRITE_FAULT) {
5a0c38d30   Christoph Hellwig   mm: merge hmm_vma...
69
70
71
  		if (!(vma->vm_flags & VM_WRITE))
  			return -EPERM;
  		fault_flags |= FAULT_FLAG_WRITE;
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
72
  	}
53bfe17ff   Jason Gunthorpe   mm/hmm: do not se...
73
  	for (; addr < end; addr += PAGE_SIZE)
bce617ede   Peter Xu   mm: do page fault...
74
75
  		if (handle_mm_fault(vma, addr, fault_flags, NULL) &
  		    VM_FAULT_ERROR)
53bfe17ff   Jason Gunthorpe   mm/hmm: do not se...
76
  			return -EFAULT;
f8c888a30   Christoph Hellwig   mm/hmm: don't han...
77
  	return -EBUSY;
2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
78
  }
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
79
  static unsigned int hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
80
81
  				       unsigned long pfn_req_flags,
  				       unsigned long cpu_flags)
2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
82
  {
f88a1e90c   Jérôme Glisse   mm/hmm: use devic...
83
  	struct hmm_range *range = hmm_vma_walk->range;
023a019a9   Jérôme Glisse   mm/hmm: add defau...
84
85
86
  	/*
  	 * So we not only consider the individual per page request we also
  	 * consider the default flags requested for the range. The API can
d2e8d5511   Ralph Campbell   mm/hmm: a few mor...
87
88
89
  	 * be used 2 ways. The first one where the HMM user coalesces
  	 * multiple page faults into one request and sets flags per pfn for
  	 * those faults. The second one where the HMM user wants to pre-
023a019a9   Jérôme Glisse   mm/hmm: add defau...
90
91
92
93
  	 * fault a range with specific flags. For the latter one it is a
  	 * waste to have the user pre-fill the pfn arrays with a default
  	 * flags value.
  	 */
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
94
95
  	pfn_req_flags &= range->pfn_flags_mask;
  	pfn_req_flags |= range->default_flags;
023a019a9   Jérôme Glisse   mm/hmm: add defau...
96

2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
97
  	/* We aren't ask to do anything ... */
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
98
  	if (!(pfn_req_flags & HMM_PFN_REQ_FAULT))
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
99
  		return 0;
f88a1e90c   Jérôme Glisse   mm/hmm: use devic...
100

f88a1e90c   Jérôme Glisse   mm/hmm: use devic...
101
  	/* Need to write fault ? */
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
102
103
  	if ((pfn_req_flags & HMM_PFN_REQ_WRITE) &&
  	    !(cpu_flags & HMM_PFN_WRITE))
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
104
105
106
  		return HMM_NEED_FAULT | HMM_NEED_WRITE_FAULT;
  
  	/* If CPU page table is not valid then we need to fault */
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
107
  	if (!(cpu_flags & HMM_PFN_VALID))
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
108
109
  		return HMM_NEED_FAULT;
  	return 0;
2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
110
  }
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
111
112
  static unsigned int
  hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
113
114
  		     const unsigned long hmm_pfns[], unsigned long npages,
  		     unsigned long cpu_flags)
2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
115
  {
6bfef2f91   Jason Gunthorpe   mm/hmm: remove HM...
116
  	struct hmm_range *range = hmm_vma_walk->range;
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
117
  	unsigned int required_fault = 0;
2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
118
  	unsigned long i;
6bfef2f91   Jason Gunthorpe   mm/hmm: remove HM...
119
120
121
122
123
124
  	/*
  	 * If the default flags do not request to fault pages, and the mask does
  	 * not allow for individual pages to be faulted, then
  	 * hmm_pte_need_fault() will always return 0.
  	 */
  	if (!((range->default_flags | range->pfn_flags_mask) &
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
125
  	      HMM_PFN_REQ_FAULT))
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
126
  		return 0;
2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
127
128
  
  	for (i = 0; i < npages; ++i) {
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
129
130
  		required_fault |= hmm_pte_need_fault(hmm_vma_walk, hmm_pfns[i],
  						     cpu_flags);
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
131
132
  		if (required_fault == HMM_NEED_ALL_BITS)
  			return required_fault;
2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
133
  	}
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
134
  	return required_fault;
2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
135
136
137
  }
  
  static int hmm_vma_walk_hole(unsigned long addr, unsigned long end,
b7a16c7ad   Steven Price   mm: pagewalk: add...
138
  			     __always_unused int depth, struct mm_walk *walk)
2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
139
140
141
  {
  	struct hmm_vma_walk *hmm_vma_walk = walk->private;
  	struct hmm_range *range = hmm_vma_walk->range;
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
142
  	unsigned int required_fault;
2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
143
  	unsigned long i, npages;
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
144
  	unsigned long *hmm_pfns;
2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
145
146
147
  
  	i = (addr - range->start) >> PAGE_SHIFT;
  	npages = (end - addr) >> PAGE_SHIFT;
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
148
149
150
  	hmm_pfns = &range->hmm_pfns[i];
  	required_fault =
  		hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0);
bd5d3587b   Jason Gunthorpe   mm/hmm: return er...
151
152
153
154
155
  	if (!walk->vma) {
  		if (required_fault)
  			return -EFAULT;
  		return hmm_pfns_fill(addr, end, range, HMM_PFN_ERROR);
  	}
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
156
157
  	if (required_fault)
  		return hmm_vma_fault(addr, end, required_fault, walk);
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
158
  	return hmm_pfns_fill(addr, end, range, 0);
2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
159
  }
3b50a6e53   Ralph Campbell   mm/hmm: provide t...
160
161
162
163
  static inline unsigned long hmm_pfn_flags_order(unsigned long order)
  {
  	return order << HMM_PFN_ORDER_SHIFT;
  }
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
164
165
  static inline unsigned long pmd_to_hmm_pfn_flags(struct hmm_range *range,
  						 pmd_t pmd)
2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
166
167
168
  {
  	if (pmd_protnone(pmd))
  		return 0;
3b50a6e53   Ralph Campbell   mm/hmm: provide t...
169
170
171
  	return (pmd_write(pmd) ? (HMM_PFN_VALID | HMM_PFN_WRITE) :
  				 HMM_PFN_VALID) |
  	       hmm_pfn_flags_order(PMD_SHIFT - PAGE_SHIFT);
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
172
  }
992de9a8b   Jérôme Glisse   mm/hmm: allow to ...
173
  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
9d3973d60   Christoph Hellwig   mm/hmm: cleanup t...
174
  static int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
175
176
  			      unsigned long end, unsigned long hmm_pfns[],
  			      pmd_t pmd)
9d3973d60   Christoph Hellwig   mm/hmm: cleanup t...
177
  {
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
178
  	struct hmm_vma_walk *hmm_vma_walk = walk->private;
f88a1e90c   Jérôme Glisse   mm/hmm: use devic...
179
  	struct hmm_range *range = hmm_vma_walk->range;
2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
180
  	unsigned long pfn, npages, i;
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
181
  	unsigned int required_fault;
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
182
  	unsigned long cpu_flags;
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
183

2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
184
  	npages = (end - addr) >> PAGE_SHIFT;
f88a1e90c   Jérôme Glisse   mm/hmm: use devic...
185
  	cpu_flags = pmd_to_hmm_pfn_flags(range, pmd);
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
186
  	required_fault =
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
187
  		hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, cpu_flags);
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
188
189
  	if (required_fault)
  		return hmm_vma_fault(addr, end, required_fault, walk);
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
190

309f9a4f5   Christoph Hellwig   mm/hmm: don't abu...
191
  	pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
068354ade   Jason Gunthorpe   mm/hmm: remove pg...
192
  	for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++)
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
193
  		hmm_pfns[i] = pfn | cpu_flags;
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
194
195
  	return 0;
  }
9d3973d60   Christoph Hellwig   mm/hmm: cleanup t...
196
197
198
  #else /* CONFIG_TRANSPARENT_HUGEPAGE */
  /* stub to allow the code below to compile */
  int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
199
  		unsigned long end, unsigned long hmm_pfns[], pmd_t pmd);
9d3973d60   Christoph Hellwig   mm/hmm: cleanup t...
200
  #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
201

2733ea144   Jason Gunthorpe   mm/hmm: remove th...
202
203
  static inline unsigned long pte_to_hmm_pfn_flags(struct hmm_range *range,
  						 pte_t pte)
2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
204
  {
789c2af88   Philip Yang   mm/hmm: support a...
205
  	if (pte_none(pte) || !pte_present(pte) || pte_protnone(pte))
2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
206
  		return 0;
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
207
  	return pte_write(pte) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : HMM_PFN_VALID;
2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
208
  }
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
209
210
  static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
  			      unsigned long end, pmd_t *pmdp, pte_t *ptep,
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
211
  			      unsigned long *hmm_pfn)
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
212
213
  {
  	struct hmm_vma_walk *hmm_vma_walk = walk->private;
f88a1e90c   Jérôme Glisse   mm/hmm: use devic...
214
  	struct hmm_range *range = hmm_vma_walk->range;
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
215
  	unsigned int required_fault;
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
216
  	unsigned long cpu_flags;
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
217
  	pte_t pte = *ptep;
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
218
  	uint64_t pfn_req_flags = *hmm_pfn;
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
219

53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
220
  	if (pte_none(pte)) {
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
221
222
  		required_fault =
  			hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0);
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
223
  		if (required_fault)
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
224
  			goto fault;
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
225
  		*hmm_pfn = 0;
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
226
227
228
229
230
  		return 0;
  	}
  
  	if (!pte_present(pte)) {
  		swp_entry_t entry = pte_to_swp_entry(pte);
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
231
  		/*
51a772c34   Ralph Campbell   mm/hmm: fault non...
232
233
  		 * Don't fault in device private pages owned by the caller,
  		 * just report the PFN.
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
234
  		 */
51a772c34   Ralph Campbell   mm/hmm: fault non...
235
236
237
  		if (is_device_private_entry(entry) &&
  		    pfn_swap_entry_to_page(entry)->pgmap->owner ==
  		    range->dev_private_owner) {
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
238
  			cpu_flags = HMM_PFN_VALID;
4dd845b5a   Alistair Popple   mm/swapops: rewor...
239
  			if (is_writable_device_private_entry(entry))
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
240
  				cpu_flags |= HMM_PFN_WRITE;
af5cdaf82   Alistair Popple   mm: remove specia...
241
  			*hmm_pfn = swp_offset(entry) | cpu_flags;
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
242
243
  			return 0;
  		}
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
244
245
  		required_fault =
  			hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0);
846babe85   Jason Gunthorpe   mm/hmm: do not un...
246
  		if (!required_fault) {
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
247
  			*hmm_pfn = 0;
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
248
  			return 0;
846babe85   Jason Gunthorpe   mm/hmm: do not un...
249
  		}
76612d6ce   Jason Gunthorpe   mm/hmm: reorganiz...
250
251
  
  		if (!non_swap_entry(entry))
51a772c34   Ralph Campbell   mm/hmm: fault non...
252
253
254
  			goto fault;
  
  		if (is_device_private_entry(entry))
76612d6ce   Jason Gunthorpe   mm/hmm: reorganiz...
255
  			goto fault;
b756a3b5e   Alistair Popple   mm: device exclus...
256
257
  		if (is_device_exclusive_entry(entry))
  			goto fault;
76612d6ce   Jason Gunthorpe   mm/hmm: reorganiz...
258
259
260
261
262
  		if (is_migration_entry(entry)) {
  			pte_unmap(ptep);
  			hmm_vma_walk->last = addr;
  			migration_entry_wait(walk->mm, pmdp, addr);
  			return -EBUSY;
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
263
264
265
  		}
  
  		/* Report error for everything else */
dfdc22078   Jason Gunthorpe   mm/hmm: add missi...
266
  		pte_unmap(ptep);
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
267
268
  		return -EFAULT;
  	}
76612d6ce   Jason Gunthorpe   mm/hmm: reorganiz...
269
  	cpu_flags = pte_to_hmm_pfn_flags(range, pte);
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
270
271
  	required_fault =
  		hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, cpu_flags);
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
272
  	if (required_fault)
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
273
  		goto fault;
405506274   Jason Gunthorpe   mm/hmm: add missi...
274
  	/*
4b42fb213   Li Zhijian   mm/hmm: bypass de...
275
276
  	 * Bypass devmap pte such as DAX page when all pfn requested
  	 * flags(pfn_req_flags) are fulfilled.
405506274   Jason Gunthorpe   mm/hmm: add missi...
277
278
279
  	 * Since each architecture defines a struct page for the zero page, just
  	 * fall through and treat it like a normal page.
  	 */
52b66f818   Alistair Popple   mm/hmm.c: allow V...
280
281
  	if (!vm_normal_page(walk->vma, addr, pte) &&
  	    !pte_devmap(pte) &&
4b42fb213   Li Zhijian   mm/hmm: bypass de...
282
  	    !is_zero_pfn(pte_pfn(pte))) {
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
283
  		if (hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0)) {
dfdc22078   Jason Gunthorpe   mm/hmm: add missi...
284
  			pte_unmap(ptep);
ac541f250   Ralph Campbell   mm/hmm: allow sna...
285
286
  			return -EFAULT;
  		}
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
287
  		*hmm_pfn = HMM_PFN_ERROR;
405506274   Jason Gunthorpe   mm/hmm: add missi...
288
  		return 0;
992de9a8b   Jérôme Glisse   mm/hmm: allow to ...
289
  	}
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
290
  	*hmm_pfn = pte_pfn(pte) | cpu_flags;
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
291
292
293
294
295
  	return 0;
  
  fault:
  	pte_unmap(ptep);
  	/* Fault any virtual address we were asked to fault */
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
296
  	return hmm_vma_fault(addr, end, required_fault, walk);
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
297
  }
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
298
299
300
301
302
  static int hmm_vma_walk_pmd(pmd_t *pmdp,
  			    unsigned long start,
  			    unsigned long end,
  			    struct mm_walk *walk)
  {
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
303
304
  	struct hmm_vma_walk *hmm_vma_walk = walk->private;
  	struct hmm_range *range = hmm_vma_walk->range;
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
305
306
  	unsigned long *hmm_pfns =
  		&range->hmm_pfns[(start - range->start) >> PAGE_SHIFT];
2288a9a68   Jason Gunthorpe   mm/hmm: return -E...
307
308
  	unsigned long npages = (end - start) >> PAGE_SHIFT;
  	unsigned long addr = start;
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
309
  	pte_t *ptep;
d08faca01   Jérôme Glisse   mm/hmm: properly ...
310
  	pmd_t pmd;
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
311

da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
312
  again:
d08faca01   Jérôme Glisse   mm/hmm: properly ...
313
314
  	pmd = READ_ONCE(*pmdp);
  	if (pmd_none(pmd))
b7a16c7ad   Steven Price   mm: pagewalk: add...
315
  		return hmm_vma_walk_hole(start, end, -1, walk);
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
316

d08faca01   Jérôme Glisse   mm/hmm: properly ...
317
  	if (thp_migration_supported() && is_pmd_migration_entry(pmd)) {
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
318
  		if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0)) {
d08faca01   Jérôme Glisse   mm/hmm: properly ...
319
  			hmm_vma_walk->last = addr;
d2e8d5511   Ralph Campbell   mm/hmm: a few mor...
320
  			pmd_migration_entry_wait(walk->mm, pmdp);
73231612d   Jérôme Glisse   mm/hmm: improve a...
321
  			return -EBUSY;
d08faca01   Jérôme Glisse   mm/hmm: properly ...
322
  		}
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
323
  		return hmm_pfns_fill(start, end, range, 0);
2288a9a68   Jason Gunthorpe   mm/hmm: return -E...
324
325
326
  	}
  
  	if (!pmd_present(pmd)) {
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
327
  		if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0))
2288a9a68   Jason Gunthorpe   mm/hmm: return -E...
328
  			return -EFAULT;
d28c2c9a4   Ralph Campbell   mm/hmm: make full...
329
  		return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
2288a9a68   Jason Gunthorpe   mm/hmm: return -E...
330
  	}
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
331

d08faca01   Jérôme Glisse   mm/hmm: properly ...
332
  	if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) {
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
333
  		/*
d2e8d5511   Ralph Campbell   mm/hmm: a few mor...
334
  		 * No need to take pmd_lock here, even if some other thread
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
335
336
337
  		 * is splitting the huge pmd we will get that event through
  		 * mmu_notifier callback.
  		 *
d2e8d5511   Ralph Campbell   mm/hmm: a few mor...
338
  		 * So just read pmd value and check again it's a transparent
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
339
340
341
342
343
344
345
  		 * huge or device mapping one and compute corresponding pfn
  		 * values.
  		 */
  		pmd = pmd_read_atomic(pmdp);
  		barrier();
  		if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd))
  			goto again;
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
346

2733ea144   Jason Gunthorpe   mm/hmm: remove th...
347
  		return hmm_vma_handle_pmd(walk, addr, end, hmm_pfns, pmd);
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
348
  	}
d08faca01   Jérôme Glisse   mm/hmm: properly ...
349
  	/*
d2e8d5511   Ralph Campbell   mm/hmm: a few mor...
350
  	 * We have handled all the valid cases above ie either none, migration,
d08faca01   Jérôme Glisse   mm/hmm: properly ...
351
352
353
354
  	 * huge or transparent huge. At this point either it is a valid pmd
  	 * entry pointing to pte directory or it is a bad pmd that will not
  	 * recover.
  	 */
2288a9a68   Jason Gunthorpe   mm/hmm: return -E...
355
  	if (pmd_bad(pmd)) {
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
356
  		if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0))
2288a9a68   Jason Gunthorpe   mm/hmm: return -E...
357
  			return -EFAULT;
d28c2c9a4   Ralph Campbell   mm/hmm: make full...
358
  		return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
2288a9a68   Jason Gunthorpe   mm/hmm: return -E...
359
  	}
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
360
361
  
  	ptep = pte_offset_map(pmdp, addr);
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
362
  	for (; addr < end; addr += PAGE_SIZE, ptep++, hmm_pfns++) {
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
363
  		int r;
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
364

2733ea144   Jason Gunthorpe   mm/hmm: remove th...
365
  		r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, hmm_pfns);
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
366
  		if (r) {
dfdc22078   Jason Gunthorpe   mm/hmm: add missi...
367
  			/* hmm_vma_handle_pte() did pte_unmap() */
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
368
  			return r;
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
369
  		}
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
370
371
  	}
  	pte_unmap(ptep - 1);
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
372
373
  	return 0;
  }
f0b3c45c8   Christoph Hellwig   mm/hmm: only defi...
374
375
  #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && \
      defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
376
377
  static inline unsigned long pud_to_hmm_pfn_flags(struct hmm_range *range,
  						 pud_t pud)
f0b3c45c8   Christoph Hellwig   mm/hmm: only defi...
378
379
380
  {
  	if (!pud_present(pud))
  		return 0;
3b50a6e53   Ralph Campbell   mm/hmm: provide t...
381
382
383
  	return (pud_write(pud) ? (HMM_PFN_VALID | HMM_PFN_WRITE) :
  				 HMM_PFN_VALID) |
  	       hmm_pfn_flags_order(PUD_SHIFT - PAGE_SHIFT);
f0b3c45c8   Christoph Hellwig   mm/hmm: only defi...
384
385
386
387
  }
  
  static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end,
  		struct mm_walk *walk)
992de9a8b   Jérôme Glisse   mm/hmm: allow to ...
388
389
390
  {
  	struct hmm_vma_walk *hmm_vma_walk = walk->private;
  	struct hmm_range *range = hmm_vma_walk->range;
3afc42363   Steven Price   mm: pagewalk: add...
391
  	unsigned long addr = start;
992de9a8b   Jérôme Glisse   mm/hmm: allow to ...
392
  	pud_t pud;
3afc42363   Steven Price   mm: pagewalk: add...
393
394
395
396
397
398
399
400
  	int ret = 0;
  	spinlock_t *ptl = pud_trans_huge_lock(pudp, walk->vma);
  
  	if (!ptl)
  		return 0;
  
  	/* Normally we don't want to split the huge page */
  	walk->action = ACTION_CONTINUE;
992de9a8b   Jérôme Glisse   mm/hmm: allow to ...
401

992de9a8b   Jérôme Glisse   mm/hmm: allow to ...
402
  	pud = READ_ONCE(*pudp);
3afc42363   Steven Price   mm: pagewalk: add...
403
  	if (pud_none(pud)) {
05fc1df95   Jason Gunthorpe   mm/hmm: do not ca...
404
405
  		spin_unlock(ptl);
  		return hmm_vma_walk_hole(start, end, -1, walk);
3afc42363   Steven Price   mm: pagewalk: add...
406
  	}
992de9a8b   Jérôme Glisse   mm/hmm: allow to ...
407
408
409
  
  	if (pud_huge(pud) && pud_devmap(pud)) {
  		unsigned long i, npages, pfn;
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
410
  		unsigned int required_fault;
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
411
412
  		unsigned long *hmm_pfns;
  		unsigned long cpu_flags;
992de9a8b   Jérôme Glisse   mm/hmm: allow to ...
413

3afc42363   Steven Price   mm: pagewalk: add...
414
  		if (!pud_present(pud)) {
05fc1df95   Jason Gunthorpe   mm/hmm: do not ca...
415
416
  			spin_unlock(ptl);
  			return hmm_vma_walk_hole(start, end, -1, walk);
3afc42363   Steven Price   mm: pagewalk: add...
417
  		}
992de9a8b   Jérôme Glisse   mm/hmm: allow to ...
418
419
420
  
  		i = (addr - range->start) >> PAGE_SHIFT;
  		npages = (end - addr) >> PAGE_SHIFT;
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
421
  		hmm_pfns = &range->hmm_pfns[i];
992de9a8b   Jérôme Glisse   mm/hmm: allow to ...
422
423
  
  		cpu_flags = pud_to_hmm_pfn_flags(range, pud);
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
424
  		required_fault = hmm_range_need_fault(hmm_vma_walk, hmm_pfns,
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
425
426
  						      npages, cpu_flags);
  		if (required_fault) {
05fc1df95   Jason Gunthorpe   mm/hmm: do not ca...
427
  			spin_unlock(ptl);
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
428
  			return hmm_vma_fault(addr, end, required_fault, walk);
3afc42363   Steven Price   mm: pagewalk: add...
429
  		}
992de9a8b   Jérôme Glisse   mm/hmm: allow to ...
430

992de9a8b   Jérôme Glisse   mm/hmm: allow to ...
431
  		pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
068354ade   Jason Gunthorpe   mm/hmm: remove pg...
432
  		for (i = 0; i < npages; ++i, ++pfn)
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
433
  			hmm_pfns[i] = pfn | cpu_flags;
3afc42363   Steven Price   mm: pagewalk: add...
434
  		goto out_unlock;
992de9a8b   Jérôme Glisse   mm/hmm: allow to ...
435
  	}
3afc42363   Steven Price   mm: pagewalk: add...
436
437
  	/* Ask for the PUD to be split */
  	walk->action = ACTION_SUBTREE;
992de9a8b   Jérôme Glisse   mm/hmm: allow to ...
438

3afc42363   Steven Price   mm: pagewalk: add...
439
440
441
  out_unlock:
  	spin_unlock(ptl);
  	return ret;
992de9a8b   Jérôme Glisse   mm/hmm: allow to ...
442
  }
f0b3c45c8   Christoph Hellwig   mm/hmm: only defi...
443
444
445
  #else
  #define hmm_vma_walk_pud	NULL
  #endif
992de9a8b   Jérôme Glisse   mm/hmm: allow to ...
446

251bbe59b   Christoph Hellwig   mm/hmm: cleanup t...
447
  #ifdef CONFIG_HUGETLB_PAGE
63d5066f6   Jérôme Glisse   mm/hmm: mirror hu...
448
449
450
451
  static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
  				      unsigned long start, unsigned long end,
  				      struct mm_walk *walk)
  {
05c23af4a   Christoph Hellwig   mm/hmm: remove th...
452
  	unsigned long addr = start, i, pfn;
63d5066f6   Jérôme Glisse   mm/hmm: mirror hu...
453
454
455
  	struct hmm_vma_walk *hmm_vma_walk = walk->private;
  	struct hmm_range *range = hmm_vma_walk->range;
  	struct vm_area_struct *vma = walk->vma;
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
456
  	unsigned int required_fault;
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
457
458
  	unsigned long pfn_req_flags;
  	unsigned long cpu_flags;
63d5066f6   Jérôme Glisse   mm/hmm: mirror hu...
459
460
  	spinlock_t *ptl;
  	pte_t entry;
63d5066f6   Jérôme Glisse   mm/hmm: mirror hu...
461

d2e8d5511   Ralph Campbell   mm/hmm: a few mor...
462
  	ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte);
63d5066f6   Jérôme Glisse   mm/hmm: mirror hu...
463
  	entry = huge_ptep_get(pte);
7f08263d9   Christoph Hellwig   mm/hmm: remove th...
464
  	i = (start - range->start) >> PAGE_SHIFT;
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
465
  	pfn_req_flags = range->hmm_pfns[i];
3b50a6e53   Ralph Campbell   mm/hmm: provide t...
466
467
  	cpu_flags = pte_to_hmm_pfn_flags(range, entry) |
  		    hmm_pfn_flags_order(huge_page_order(hstate_vma(vma)));
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
468
469
  	required_fault =
  		hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, cpu_flags);
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
470
  	if (required_fault) {
45050692d   Christoph Hellwig   mm/hmm: simplify ...
471
  		spin_unlock(ptl);
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
472
  		return hmm_vma_fault(addr, end, required_fault, walk);
63d5066f6   Jérôme Glisse   mm/hmm: mirror hu...
473
  	}
05c23af4a   Christoph Hellwig   mm/hmm: remove th...
474
  	pfn = pte_pfn(entry) + ((start & ~hmask) >> PAGE_SHIFT);
7f08263d9   Christoph Hellwig   mm/hmm: remove th...
475
  	for (; addr < end; addr += PAGE_SIZE, i++, pfn++)
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
476
  		range->hmm_pfns[i] = pfn | cpu_flags;
63d5066f6   Jérôme Glisse   mm/hmm: mirror hu...
477
  	spin_unlock(ptl);
45050692d   Christoph Hellwig   mm/hmm: simplify ...
478
  	return 0;
63d5066f6   Jérôme Glisse   mm/hmm: mirror hu...
479
  }
251bbe59b   Christoph Hellwig   mm/hmm: cleanup t...
480
481
482
  #else
  #define hmm_vma_walk_hugetlb_entry NULL
  #endif /* CONFIG_HUGETLB_PAGE */
63d5066f6   Jérôme Glisse   mm/hmm: mirror hu...
483

d28c2c9a4   Ralph Campbell   mm/hmm: make full...
484
485
  static int hmm_vma_walk_test(unsigned long start, unsigned long end,
  			     struct mm_walk *walk)
33cd47dcb   Jérôme Glisse   mm/hmm: move hmm_...
486
  {
d28c2c9a4   Ralph Campbell   mm/hmm: make full...
487
488
489
  	struct hmm_vma_walk *hmm_vma_walk = walk->private;
  	struct hmm_range *range = hmm_vma_walk->range;
  	struct vm_area_struct *vma = walk->vma;
52b66f818   Alistair Popple   mm/hmm.c: allow V...
490
  	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)) &&
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
491
492
  	    vma->vm_flags & VM_READ)
  		return 0;
d28c2c9a4   Ralph Campbell   mm/hmm: make full...
493
  	/*
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
494
495
  	 * vma ranges that don't have struct page backing them or map I/O
  	 * devices directly cannot be handled by hmm_range_fault().
c2579c9c4   Jason Gunthorpe   mm/hmm: add missi...
496
  	 *
d28c2c9a4   Ralph Campbell   mm/hmm: make full...
497
  	 * If the vma does not allow read access, then assume that it does not
c2579c9c4   Jason Gunthorpe   mm/hmm: add missi...
498
499
  	 * allow write access either. HMM does not support architectures that
  	 * allow write without read.
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
500
501
502
  	 *
  	 * If a fault is requested for an unsupported range then it is a hard
  	 * failure.
d28c2c9a4   Ralph Campbell   mm/hmm: make full...
503
  	 */
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
504
  	if (hmm_range_need_fault(hmm_vma_walk,
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
505
  				 range->hmm_pfns +
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
506
507
508
  					 ((start - range->start) >> PAGE_SHIFT),
  				 (end - start) >> PAGE_SHIFT, 0))
  		return -EFAULT;
d28c2c9a4   Ralph Campbell   mm/hmm: make full...
509

a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
510
  	hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
d28c2c9a4   Ralph Campbell   mm/hmm: make full...
511

a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
512
513
  	/* Skip this vma and continue processing the next vma. */
  	return 1;
33cd47dcb   Jérôme Glisse   mm/hmm: move hmm_...
514
  }
7b86ac337   Christoph Hellwig   pagewalk: separat...
515
516
517
518
519
  static const struct mm_walk_ops hmm_walk_ops = {
  	.pud_entry	= hmm_vma_walk_pud,
  	.pmd_entry	= hmm_vma_walk_pmd,
  	.pte_hole	= hmm_vma_walk_hole,
  	.hugetlb_entry	= hmm_vma_walk_hugetlb_entry,
d28c2c9a4   Ralph Campbell   mm/hmm: make full...
520
  	.test_walk	= hmm_vma_walk_test,
7b86ac337   Christoph Hellwig   pagewalk: separat...
521
  };
9a4903e49   Christoph Hellwig   mm/hmm: replace t...
522
523
  /**
   * hmm_range_fault - try to fault some address in a virtual address range
f970b977e   Jason Gunthorpe   mm/hmm: remove un...
524
   * @range:	argument structure
9a4903e49   Christoph Hellwig   mm/hmm: replace t...
525
   *
be957c886   Jason Gunthorpe   mm/hmm: make hmm_...
526
   * Returns 0 on success or one of the following error codes:
73231612d   Jérôme Glisse   mm/hmm: improve a...
527
   *
9a4903e49   Christoph Hellwig   mm/hmm: replace t...
528
529
530
531
532
   * -EINVAL:	Invalid arguments or mm or virtual address is in an invalid vma
   *		(e.g., device file vma).
   * -ENOMEM:	Out of memory.
   * -EPERM:	Invalid permission (e.g., asking for write and range is read
   *		only).
9a4903e49   Christoph Hellwig   mm/hmm: replace t...
533
534
   * -EBUSY:	The range has been invalidated and the caller needs to wait for
   *		the invalidation to finish.
f970b977e   Jason Gunthorpe   mm/hmm: remove un...
535
536
   * -EFAULT:     A page was requested to be valid and could not be made valid
   *              ie it has no backing VMA or it is illegal to access
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
537
   *
f970b977e   Jason Gunthorpe   mm/hmm: remove un...
538
539
   * This is similar to get_user_pages(), except that it can read the page tables
   * without mutating them (ie causing faults).
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
540
   */
be957c886   Jason Gunthorpe   mm/hmm: make hmm_...
541
  int hmm_range_fault(struct hmm_range *range)
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
542
  {
d28c2c9a4   Ralph Campbell   mm/hmm: make full...
543
544
545
  	struct hmm_vma_walk hmm_vma_walk = {
  		.range = range,
  		.last = range->start,
d28c2c9a4   Ralph Campbell   mm/hmm: make full...
546
  	};
a22dd5064   Jason Gunthorpe   mm/hmm: remove hm...
547
  	struct mm_struct *mm = range->notifier->mm;
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
548
  	int ret;
42fc54140   Michel Lespinasse   mmap locking API:...
549
  	mmap_assert_locked(mm);
704f3f2cf   Jérôme Glisse   mm/hmm: use refer...
550

a3e0d41c2   Jérôme Glisse   mm/hmm: improve d...
551
552
  	do {
  		/* If range is no longer valid force retry. */
a22dd5064   Jason Gunthorpe   mm/hmm: remove hm...
553
554
  		if (mmu_interval_check_retry(range->notifier,
  					     range->notifier_seq))
2bcbeaefd   Christoph Hellwig   mm/hmm: always re...
555
  			return -EBUSY;
d28c2c9a4   Ralph Campbell   mm/hmm: make full...
556
557
  		ret = walk_page_range(mm, hmm_vma_walk.last, range->end,
  				      &hmm_walk_ops, &hmm_vma_walk);
be957c886   Jason Gunthorpe   mm/hmm: make hmm_...
558
559
560
561
562
563
  		/*
  		 * When -EBUSY is returned the loop restarts with
  		 * hmm_vma_walk.last set to an address that has not been stored
  		 * in pfns. All entries < last in the pfn array are set to their
  		 * output, and all >= are still at their input values.
  		 */
d28c2c9a4   Ralph Campbell   mm/hmm: make full...
564
  	} while (ret == -EBUSY);
be957c886   Jason Gunthorpe   mm/hmm: make hmm_...
565
  	return ret;
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
566
  }
73231612d   Jérôme Glisse   mm/hmm: improve a...
567
  EXPORT_SYMBOL(hmm_range_fault);