Blame view

mm/hmm.c 16.6 KB
c942fddf8   Thomas Gleixner   treewide: Replace...
1
  // SPDX-License-Identifier: GPL-2.0-or-later
133ff0eac   Jérôme Glisse   mm/hmm: heterogen...
2
3
4
  /*
   * Copyright 2013 Red Hat Inc.
   *
f813f2197   Jérôme Glisse   mm/hmm: fix utf8 ...
5
   * Authors: Jérôme Glisse <jglisse@redhat.com>
133ff0eac   Jérôme Glisse   mm/hmm: heterogen...
6
7
8
9
10
   */
  /*
   * Refer to include/linux/hmm.h for information about heterogeneous memory
   * management or HMM for short.
   */
a520110e4   Christoph Hellwig   mm: split out a n...
11
  #include <linux/pagewalk.h>
133ff0eac   Jérôme Glisse   mm/hmm: heterogen...
12
  #include <linux/hmm.h>
858b54dab   Jérôme Glisse   mm/hmm/devmem: du...
13
  #include <linux/init.h>
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
14
15
  #include <linux/rmap.h>
  #include <linux/swap.h>
133ff0eac   Jérôme Glisse   mm/hmm: heterogen...
16
17
  #include <linux/slab.h>
  #include <linux/sched.h>
4ef589dc9   Jérôme Glisse   mm/hmm/devmem: de...
18
19
  #include <linux/mmzone.h>
  #include <linux/pagemap.h>
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
20
21
  #include <linux/swapops.h>
  #include <linux/hugetlb.h>
4ef589dc9   Jérôme Glisse   mm/hmm/devmem: de...
22
  #include <linux/memremap.h>
c8a53b2db   Jason Gunthorpe   mm/hmm: Hold a mm...
23
  #include <linux/sched/mm.h>
7b2d55d2c   Jérôme Glisse   mm/ZONE_DEVICE: s...
24
  #include <linux/jump_label.h>
55c0ece82   Jérôme Glisse   mm/hmm: add a hel...
25
  #include <linux/dma-mapping.h>
c0b124054   Jérôme Glisse   mm/hmm/mirror: mi...
26
  #include <linux/mmu_notifier.h>
4ef589dc9   Jérôme Glisse   mm/hmm/devmem: de...
27
  #include <linux/memory_hotplug.h>
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
28
29
30
  struct hmm_vma_walk {
  	struct hmm_range	*range;
  	unsigned long		last;
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
31
  };
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
32
33
34
35
36
  enum {
  	HMM_NEED_FAULT = 1 << 0,
  	HMM_NEED_WRITE_FAULT = 1 << 1,
  	HMM_NEED_ALL_BITS = HMM_NEED_FAULT | HMM_NEED_WRITE_FAULT,
  };
d28c2c9a4   Ralph Campbell   mm/hmm: make full...
37
  static int hmm_pfns_fill(unsigned long addr, unsigned long end,
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
38
  			 struct hmm_range *range, unsigned long cpu_flags)
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
39
  {
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
40
  	unsigned long i = (addr - range->start) >> PAGE_SHIFT;
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
41

da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
42
  	for (; addr < end; addr += PAGE_SIZE, i++)
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
43
  		range->hmm_pfns[i] = cpu_flags;
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
44
45
  	return 0;
  }
5504ed296   Jérôme Glisse   mm/hmm: do not di...
46
  /*
f8c888a30   Christoph Hellwig   mm/hmm: don't han...
47
   * hmm_vma_fault() - fault in a range lacking valid pmd or pte(s)
d2e8d5511   Ralph Campbell   mm/hmm: a few mor...
48
   * @addr: range virtual start address (inclusive)
5504ed296   Jérôme Glisse   mm/hmm: do not di...
49
   * @end: range virtual end address (exclusive)
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
50
   * @required_fault: HMM_NEED_* flags
5504ed296   Jérôme Glisse   mm/hmm: do not di...
51
   * @walk: mm_walk structure
f8c888a30   Christoph Hellwig   mm/hmm: don't han...
52
   * Return: -EBUSY after page fault, or page fault error
5504ed296   Jérôme Glisse   mm/hmm: do not di...
53
54
55
56
   *
   * This function will be called whenever pmd_none() or pte_none() returns true,
   * or whenever there is no page directory covering the virtual address range.
   */
f8c888a30   Christoph Hellwig   mm/hmm: don't han...
57
  static int hmm_vma_fault(unsigned long addr, unsigned long end,
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
58
  			 unsigned int required_fault, struct mm_walk *walk)
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
59
  {
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
60
  	struct hmm_vma_walk *hmm_vma_walk = walk->private;
5a0c38d30   Christoph Hellwig   mm: merge hmm_vma...
61
  	struct vm_area_struct *vma = walk->vma;
5a0c38d30   Christoph Hellwig   mm: merge hmm_vma...
62
  	unsigned int fault_flags = FAULT_FLAG_REMOTE;
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
63

a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
64
  	WARN_ON_ONCE(!required_fault);
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
65
  	hmm_vma_walk->last = addr;
63d5066f6   Jérôme Glisse   mm/hmm: mirror hu...
66

a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
67
  	if (required_fault & HMM_NEED_WRITE_FAULT) {
5a0c38d30   Christoph Hellwig   mm: merge hmm_vma...
68
69
70
  		if (!(vma->vm_flags & VM_WRITE))
  			return -EPERM;
  		fault_flags |= FAULT_FLAG_WRITE;
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
71
  	}
53bfe17ff   Jason Gunthorpe   mm/hmm: do not se...
72
  	for (; addr < end; addr += PAGE_SIZE)
bce617ede   Peter Xu   mm: do page fault...
73
74
  		if (handle_mm_fault(vma, addr, fault_flags, NULL) &
  		    VM_FAULT_ERROR)
53bfe17ff   Jason Gunthorpe   mm/hmm: do not se...
75
  			return -EFAULT;
f8c888a30   Christoph Hellwig   mm/hmm: don't han...
76
  	return -EBUSY;
2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
77
  }
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
78
  static unsigned int hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
79
80
  				       unsigned long pfn_req_flags,
  				       unsigned long cpu_flags)
2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
81
  {
f88a1e90c   Jérôme Glisse   mm/hmm: use devic...
82
  	struct hmm_range *range = hmm_vma_walk->range;
023a019a9   Jérôme Glisse   mm/hmm: add defau...
83
84
85
  	/*
  	 * So we not only consider the individual per page request we also
  	 * consider the default flags requested for the range. The API can
d2e8d5511   Ralph Campbell   mm/hmm: a few mor...
86
87
88
  	 * be used 2 ways. The first one where the HMM user coalesces
  	 * multiple page faults into one request and sets flags per pfn for
  	 * those faults. The second one where the HMM user wants to pre-
023a019a9   Jérôme Glisse   mm/hmm: add defau...
89
90
91
92
  	 * fault a range with specific flags. For the latter one it is a
  	 * waste to have the user pre-fill the pfn arrays with a default
  	 * flags value.
  	 */
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
93
94
  	pfn_req_flags &= range->pfn_flags_mask;
  	pfn_req_flags |= range->default_flags;
023a019a9   Jérôme Glisse   mm/hmm: add defau...
95

2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
96
  	/* We aren't ask to do anything ... */
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
97
  	if (!(pfn_req_flags & HMM_PFN_REQ_FAULT))
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
98
  		return 0;
f88a1e90c   Jérôme Glisse   mm/hmm: use devic...
99

f88a1e90c   Jérôme Glisse   mm/hmm: use devic...
100
  	/* Need to write fault ? */
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
101
102
  	if ((pfn_req_flags & HMM_PFN_REQ_WRITE) &&
  	    !(cpu_flags & HMM_PFN_WRITE))
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
103
104
105
  		return HMM_NEED_FAULT | HMM_NEED_WRITE_FAULT;
  
  	/* If CPU page table is not valid then we need to fault */
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
106
  	if (!(cpu_flags & HMM_PFN_VALID))
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
107
108
  		return HMM_NEED_FAULT;
  	return 0;
2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
109
  }
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
110
111
  static unsigned int
  hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
112
113
  		     const unsigned long hmm_pfns[], unsigned long npages,
  		     unsigned long cpu_flags)
2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
114
  {
6bfef2f91   Jason Gunthorpe   mm/hmm: remove HM...
115
  	struct hmm_range *range = hmm_vma_walk->range;
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
116
  	unsigned int required_fault = 0;
2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
117
  	unsigned long i;
6bfef2f91   Jason Gunthorpe   mm/hmm: remove HM...
118
119
120
121
122
123
  	/*
  	 * If the default flags do not request to fault pages, and the mask does
  	 * not allow for individual pages to be faulted, then
  	 * hmm_pte_need_fault() will always return 0.
  	 */
  	if (!((range->default_flags | range->pfn_flags_mask) &
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
124
  	      HMM_PFN_REQ_FAULT))
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
125
  		return 0;
2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
126
127
  
  	for (i = 0; i < npages; ++i) {
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
128
129
  		required_fault |= hmm_pte_need_fault(hmm_vma_walk, hmm_pfns[i],
  						     cpu_flags);
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
130
131
  		if (required_fault == HMM_NEED_ALL_BITS)
  			return required_fault;
2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
132
  	}
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
133
  	return required_fault;
2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
134
135
136
  }
  
  static int hmm_vma_walk_hole(unsigned long addr, unsigned long end,
b7a16c7ad   Steven Price   mm: pagewalk: add...
137
  			     __always_unused int depth, struct mm_walk *walk)
2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
138
139
140
  {
  	struct hmm_vma_walk *hmm_vma_walk = walk->private;
  	struct hmm_range *range = hmm_vma_walk->range;
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
141
  	unsigned int required_fault;
2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
142
  	unsigned long i, npages;
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
143
  	unsigned long *hmm_pfns;
2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
144
145
146
  
  	i = (addr - range->start) >> PAGE_SHIFT;
  	npages = (end - addr) >> PAGE_SHIFT;
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
147
148
149
  	hmm_pfns = &range->hmm_pfns[i];
  	required_fault =
  		hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0);
bd5d3587b   Jason Gunthorpe   mm/hmm: return er...
150
151
152
153
154
  	if (!walk->vma) {
  		if (required_fault)
  			return -EFAULT;
  		return hmm_pfns_fill(addr, end, range, HMM_PFN_ERROR);
  	}
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
155
156
  	if (required_fault)
  		return hmm_vma_fault(addr, end, required_fault, walk);
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
157
  	return hmm_pfns_fill(addr, end, range, 0);
2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
158
  }
3b50a6e53   Ralph Campbell   mm/hmm: provide t...
159
160
161
162
  static inline unsigned long hmm_pfn_flags_order(unsigned long order)
  {
  	return order << HMM_PFN_ORDER_SHIFT;
  }
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
163
164
  static inline unsigned long pmd_to_hmm_pfn_flags(struct hmm_range *range,
  						 pmd_t pmd)
2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
165
166
167
  {
  	if (pmd_protnone(pmd))
  		return 0;
3b50a6e53   Ralph Campbell   mm/hmm: provide t...
168
169
170
  	return (pmd_write(pmd) ? (HMM_PFN_VALID | HMM_PFN_WRITE) :
  				 HMM_PFN_VALID) |
  	       hmm_pfn_flags_order(PMD_SHIFT - PAGE_SHIFT);
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
171
  }
992de9a8b   Jérôme Glisse   mm/hmm: allow to ...
172
  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
9d3973d60   Christoph Hellwig   mm/hmm: cleanup t...
173
  static int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
174
175
  			      unsigned long end, unsigned long hmm_pfns[],
  			      pmd_t pmd)
9d3973d60   Christoph Hellwig   mm/hmm: cleanup t...
176
  {
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
177
  	struct hmm_vma_walk *hmm_vma_walk = walk->private;
f88a1e90c   Jérôme Glisse   mm/hmm: use devic...
178
  	struct hmm_range *range = hmm_vma_walk->range;
2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
179
  	unsigned long pfn, npages, i;
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
180
  	unsigned int required_fault;
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
181
  	unsigned long cpu_flags;
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
182

2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
183
  	npages = (end - addr) >> PAGE_SHIFT;
f88a1e90c   Jérôme Glisse   mm/hmm: use devic...
184
  	cpu_flags = pmd_to_hmm_pfn_flags(range, pmd);
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
185
  	required_fault =
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
186
  		hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, cpu_flags);
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
187
188
  	if (required_fault)
  		return hmm_vma_fault(addr, end, required_fault, walk);
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
189

309f9a4f5   Christoph Hellwig   mm/hmm: don't abu...
190
  	pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
068354ade   Jason Gunthorpe   mm/hmm: remove pg...
191
  	for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++)
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
192
  		hmm_pfns[i] = pfn | cpu_flags;
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
193
194
  	return 0;
  }
9d3973d60   Christoph Hellwig   mm/hmm: cleanup t...
195
196
197
  #else /* CONFIG_TRANSPARENT_HUGEPAGE */
  /* stub to allow the code below to compile */
  int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
198
  		unsigned long end, unsigned long hmm_pfns[], pmd_t pmd);
9d3973d60   Christoph Hellwig   mm/hmm: cleanup t...
199
  #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
200

08ddddda6   Christoph Hellwig   mm/hmm: check the...
201
202
203
204
205
206
207
  static inline bool hmm_is_device_private_entry(struct hmm_range *range,
  		swp_entry_t entry)
  {
  	return is_device_private_entry(entry) &&
  		device_private_entry_to_page(entry)->pgmap->owner ==
  		range->dev_private_owner;
  }
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
208
209
  static inline unsigned long pte_to_hmm_pfn_flags(struct hmm_range *range,
  						 pte_t pte)
2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
210
  {
789c2af88   Philip Yang   mm/hmm: support a...
211
  	if (pte_none(pte) || !pte_present(pte) || pte_protnone(pte))
2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
212
  		return 0;
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
213
  	return pte_write(pte) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : HMM_PFN_VALID;
2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
214
  }
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
215
216
  static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
  			      unsigned long end, pmd_t *pmdp, pte_t *ptep,
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
217
  			      unsigned long *hmm_pfn)
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
218
219
  {
  	struct hmm_vma_walk *hmm_vma_walk = walk->private;
f88a1e90c   Jérôme Glisse   mm/hmm: use devic...
220
  	struct hmm_range *range = hmm_vma_walk->range;
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
221
  	unsigned int required_fault;
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
222
  	unsigned long cpu_flags;
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
223
  	pte_t pte = *ptep;
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
224
  	uint64_t pfn_req_flags = *hmm_pfn;
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
225

53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
226
  	if (pte_none(pte)) {
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
227
228
  		required_fault =
  			hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0);
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
229
  		if (required_fault)
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
230
  			goto fault;
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
231
  		*hmm_pfn = 0;
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
232
233
234
235
236
  		return 0;
  	}
  
  	if (!pte_present(pte)) {
  		swp_entry_t entry = pte_to_swp_entry(pte);
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
237
  		/*
0cb80a2fb   Randy Dunlap   mm/hmm.c: delete ...
238
  		 * Never fault in device private pages, but just report
17ffdc482   Christoph Hellwig   mm: simplify devi...
239
  		 * the PFN even if not present.
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
240
  		 */
08ddddda6   Christoph Hellwig   mm/hmm: check the...
241
  		if (hmm_is_device_private_entry(range, entry)) {
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
242
  			cpu_flags = HMM_PFN_VALID;
17ffdc482   Christoph Hellwig   mm: simplify devi...
243
  			if (is_write_device_private_entry(entry))
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
244
245
246
  				cpu_flags |= HMM_PFN_WRITE;
  			*hmm_pfn = device_private_entry_to_pfn(entry) |
  					cpu_flags;
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
247
248
  			return 0;
  		}
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
249
250
  		required_fault =
  			hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0);
846babe85   Jason Gunthorpe   mm/hmm: do not un...
251
  		if (!required_fault) {
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
252
  			*hmm_pfn = 0;
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
253
  			return 0;
846babe85   Jason Gunthorpe   mm/hmm: do not un...
254
  		}
76612d6ce   Jason Gunthorpe   mm/hmm: reorganiz...
255
256
257
258
259
260
261
262
263
  
  		if (!non_swap_entry(entry))
  			goto fault;
  
  		if (is_migration_entry(entry)) {
  			pte_unmap(ptep);
  			hmm_vma_walk->last = addr;
  			migration_entry_wait(walk->mm, pmdp, addr);
  			return -EBUSY;
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
264
265
266
  		}
  
  		/* Report error for everything else */
dfdc22078   Jason Gunthorpe   mm/hmm: add missi...
267
  		pte_unmap(ptep);
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
268
269
  		return -EFAULT;
  	}
76612d6ce   Jason Gunthorpe   mm/hmm: reorganiz...
270
  	cpu_flags = pte_to_hmm_pfn_flags(range, pte);
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
271
272
  	required_fault =
  		hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, cpu_flags);
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
273
  	if (required_fault)
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
274
  		goto fault;
405506274   Jason Gunthorpe   mm/hmm: add missi...
275
276
277
278
279
  	/*
  	 * Since each architecture defines a struct page for the zero page, just
  	 * fall through and treat it like a normal page.
  	 */
  	if (pte_special(pte) && !is_zero_pfn(pte_pfn(pte))) {
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
280
  		if (hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0)) {
dfdc22078   Jason Gunthorpe   mm/hmm: add missi...
281
  			pte_unmap(ptep);
ac541f250   Ralph Campbell   mm/hmm: allow sna...
282
283
  			return -EFAULT;
  		}
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
284
  		*hmm_pfn = HMM_PFN_ERROR;
405506274   Jason Gunthorpe   mm/hmm: add missi...
285
  		return 0;
992de9a8b   Jérôme Glisse   mm/hmm: allow to ...
286
  	}
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
287
  	*hmm_pfn = pte_pfn(pte) | cpu_flags;
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
288
289
290
291
292
  	return 0;
  
  fault:
  	pte_unmap(ptep);
  	/* Fault any virtual address we were asked to fault */
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
293
  	return hmm_vma_fault(addr, end, required_fault, walk);
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
294
  }
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
295
296
297
298
299
  static int hmm_vma_walk_pmd(pmd_t *pmdp,
  			    unsigned long start,
  			    unsigned long end,
  			    struct mm_walk *walk)
  {
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
300
301
  	struct hmm_vma_walk *hmm_vma_walk = walk->private;
  	struct hmm_range *range = hmm_vma_walk->range;
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
302
303
  	unsigned long *hmm_pfns =
  		&range->hmm_pfns[(start - range->start) >> PAGE_SHIFT];
2288a9a68   Jason Gunthorpe   mm/hmm: return -E...
304
305
  	unsigned long npages = (end - start) >> PAGE_SHIFT;
  	unsigned long addr = start;
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
306
  	pte_t *ptep;
d08faca01   Jérôme Glisse   mm/hmm: properly ...
307
  	pmd_t pmd;
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
308

da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
309
  again:
d08faca01   Jérôme Glisse   mm/hmm: properly ...
310
311
  	pmd = READ_ONCE(*pmdp);
  	if (pmd_none(pmd))
b7a16c7ad   Steven Price   mm: pagewalk: add...
312
  		return hmm_vma_walk_hole(start, end, -1, walk);
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
313

d08faca01   Jérôme Glisse   mm/hmm: properly ...
314
  	if (thp_migration_supported() && is_pmd_migration_entry(pmd)) {
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
315
  		if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0)) {
d08faca01   Jérôme Glisse   mm/hmm: properly ...
316
  			hmm_vma_walk->last = addr;
d2e8d5511   Ralph Campbell   mm/hmm: a few mor...
317
  			pmd_migration_entry_wait(walk->mm, pmdp);
73231612d   Jérôme Glisse   mm/hmm: improve a...
318
  			return -EBUSY;
d08faca01   Jérôme Glisse   mm/hmm: properly ...
319
  		}
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
320
  		return hmm_pfns_fill(start, end, range, 0);
2288a9a68   Jason Gunthorpe   mm/hmm: return -E...
321
322
323
  	}
  
  	if (!pmd_present(pmd)) {
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
324
  		if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0))
2288a9a68   Jason Gunthorpe   mm/hmm: return -E...
325
  			return -EFAULT;
d28c2c9a4   Ralph Campbell   mm/hmm: make full...
326
  		return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
2288a9a68   Jason Gunthorpe   mm/hmm: return -E...
327
  	}
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
328

d08faca01   Jérôme Glisse   mm/hmm: properly ...
329
  	if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) {
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
330
  		/*
d2e8d5511   Ralph Campbell   mm/hmm: a few mor...
331
  		 * No need to take pmd_lock here, even if some other thread
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
332
333
334
  		 * is splitting the huge pmd we will get that event through
  		 * mmu_notifier callback.
  		 *
d2e8d5511   Ralph Campbell   mm/hmm: a few mor...
335
  		 * So just read pmd value and check again it's a transparent
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
336
337
338
339
340
341
342
  		 * huge or device mapping one and compute corresponding pfn
  		 * values.
  		 */
  		pmd = pmd_read_atomic(pmdp);
  		barrier();
  		if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd))
  			goto again;
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
343

2733ea144   Jason Gunthorpe   mm/hmm: remove th...
344
  		return hmm_vma_handle_pmd(walk, addr, end, hmm_pfns, pmd);
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
345
  	}
d08faca01   Jérôme Glisse   mm/hmm: properly ...
346
  	/*
d2e8d5511   Ralph Campbell   mm/hmm: a few mor...
347
  	 * We have handled all the valid cases above ie either none, migration,
d08faca01   Jérôme Glisse   mm/hmm: properly ...
348
349
350
351
  	 * huge or transparent huge. At this point either it is a valid pmd
  	 * entry pointing to pte directory or it is a bad pmd that will not
  	 * recover.
  	 */
2288a9a68   Jason Gunthorpe   mm/hmm: return -E...
352
  	if (pmd_bad(pmd)) {
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
353
  		if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0))
2288a9a68   Jason Gunthorpe   mm/hmm: return -E...
354
  			return -EFAULT;
d28c2c9a4   Ralph Campbell   mm/hmm: make full...
355
  		return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
2288a9a68   Jason Gunthorpe   mm/hmm: return -E...
356
  	}
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
357
358
  
  	ptep = pte_offset_map(pmdp, addr);
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
359
  	for (; addr < end; addr += PAGE_SIZE, ptep++, hmm_pfns++) {
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
360
  		int r;
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
361

2733ea144   Jason Gunthorpe   mm/hmm: remove th...
362
  		r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, hmm_pfns);
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
363
  		if (r) {
dfdc22078   Jason Gunthorpe   mm/hmm: add missi...
364
  			/* hmm_vma_handle_pte() did pte_unmap() */
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
365
  			return r;
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
366
  		}
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
367
368
  	}
  	pte_unmap(ptep - 1);
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
369
370
  	return 0;
  }
f0b3c45c8   Christoph Hellwig   mm/hmm: only defi...
371
372
  #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && \
      defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
373
374
  static inline unsigned long pud_to_hmm_pfn_flags(struct hmm_range *range,
  						 pud_t pud)
f0b3c45c8   Christoph Hellwig   mm/hmm: only defi...
375
376
377
  {
  	if (!pud_present(pud))
  		return 0;
3b50a6e53   Ralph Campbell   mm/hmm: provide t...
378
379
380
  	return (pud_write(pud) ? (HMM_PFN_VALID | HMM_PFN_WRITE) :
  				 HMM_PFN_VALID) |
  	       hmm_pfn_flags_order(PUD_SHIFT - PAGE_SHIFT);
f0b3c45c8   Christoph Hellwig   mm/hmm: only defi...
381
382
383
384
  }
  
  static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end,
  		struct mm_walk *walk)
992de9a8b   Jérôme Glisse   mm/hmm: allow to ...
385
386
387
  {
  	struct hmm_vma_walk *hmm_vma_walk = walk->private;
  	struct hmm_range *range = hmm_vma_walk->range;
3afc42363   Steven Price   mm: pagewalk: add...
388
  	unsigned long addr = start;
992de9a8b   Jérôme Glisse   mm/hmm: allow to ...
389
  	pud_t pud;
3afc42363   Steven Price   mm: pagewalk: add...
390
391
392
393
394
395
396
397
  	int ret = 0;
  	spinlock_t *ptl = pud_trans_huge_lock(pudp, walk->vma);
  
  	if (!ptl)
  		return 0;
  
  	/* Normally we don't want to split the huge page */
  	walk->action = ACTION_CONTINUE;
992de9a8b   Jérôme Glisse   mm/hmm: allow to ...
398

992de9a8b   Jérôme Glisse   mm/hmm: allow to ...
399
  	pud = READ_ONCE(*pudp);
3afc42363   Steven Price   mm: pagewalk: add...
400
  	if (pud_none(pud)) {
05fc1df95   Jason Gunthorpe   mm/hmm: do not ca...
401
402
  		spin_unlock(ptl);
  		return hmm_vma_walk_hole(start, end, -1, walk);
3afc42363   Steven Price   mm: pagewalk: add...
403
  	}
992de9a8b   Jérôme Glisse   mm/hmm: allow to ...
404
405
406
  
  	if (pud_huge(pud) && pud_devmap(pud)) {
  		unsigned long i, npages, pfn;
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
407
  		unsigned int required_fault;
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
408
409
  		unsigned long *hmm_pfns;
  		unsigned long cpu_flags;
992de9a8b   Jérôme Glisse   mm/hmm: allow to ...
410

3afc42363   Steven Price   mm: pagewalk: add...
411
  		if (!pud_present(pud)) {
05fc1df95   Jason Gunthorpe   mm/hmm: do not ca...
412
413
  			spin_unlock(ptl);
  			return hmm_vma_walk_hole(start, end, -1, walk);
3afc42363   Steven Price   mm: pagewalk: add...
414
  		}
992de9a8b   Jérôme Glisse   mm/hmm: allow to ...
415
416
417
  
  		i = (addr - range->start) >> PAGE_SHIFT;
  		npages = (end - addr) >> PAGE_SHIFT;
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
418
  		hmm_pfns = &range->hmm_pfns[i];
992de9a8b   Jérôme Glisse   mm/hmm: allow to ...
419
420
  
  		cpu_flags = pud_to_hmm_pfn_flags(range, pud);
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
421
  		required_fault = hmm_range_need_fault(hmm_vma_walk, hmm_pfns,
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
422
423
  						      npages, cpu_flags);
  		if (required_fault) {
05fc1df95   Jason Gunthorpe   mm/hmm: do not ca...
424
  			spin_unlock(ptl);
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
425
  			return hmm_vma_fault(addr, end, required_fault, walk);
3afc42363   Steven Price   mm: pagewalk: add...
426
  		}
992de9a8b   Jérôme Glisse   mm/hmm: allow to ...
427

992de9a8b   Jérôme Glisse   mm/hmm: allow to ...
428
  		pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
068354ade   Jason Gunthorpe   mm/hmm: remove pg...
429
  		for (i = 0; i < npages; ++i, ++pfn)
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
430
  			hmm_pfns[i] = pfn | cpu_flags;
3afc42363   Steven Price   mm: pagewalk: add...
431
  		goto out_unlock;
992de9a8b   Jérôme Glisse   mm/hmm: allow to ...
432
  	}
3afc42363   Steven Price   mm: pagewalk: add...
433
434
  	/* Ask for the PUD to be split */
  	walk->action = ACTION_SUBTREE;
992de9a8b   Jérôme Glisse   mm/hmm: allow to ...
435

3afc42363   Steven Price   mm: pagewalk: add...
436
437
438
  out_unlock:
  	spin_unlock(ptl);
  	return ret;
992de9a8b   Jérôme Glisse   mm/hmm: allow to ...
439
  }
f0b3c45c8   Christoph Hellwig   mm/hmm: only defi...
440
441
442
  #else
  #define hmm_vma_walk_pud	NULL
  #endif
992de9a8b   Jérôme Glisse   mm/hmm: allow to ...
443

251bbe59b   Christoph Hellwig   mm/hmm: cleanup t...
444
  #ifdef CONFIG_HUGETLB_PAGE
63d5066f6   Jérôme Glisse   mm/hmm: mirror hu...
445
446
447
448
  static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
  				      unsigned long start, unsigned long end,
  				      struct mm_walk *walk)
  {
05c23af4a   Christoph Hellwig   mm/hmm: remove th...
449
  	unsigned long addr = start, i, pfn;
63d5066f6   Jérôme Glisse   mm/hmm: mirror hu...
450
451
452
  	struct hmm_vma_walk *hmm_vma_walk = walk->private;
  	struct hmm_range *range = hmm_vma_walk->range;
  	struct vm_area_struct *vma = walk->vma;
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
453
  	unsigned int required_fault;
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
454
455
  	unsigned long pfn_req_flags;
  	unsigned long cpu_flags;
63d5066f6   Jérôme Glisse   mm/hmm: mirror hu...
456
457
  	spinlock_t *ptl;
  	pte_t entry;
63d5066f6   Jérôme Glisse   mm/hmm: mirror hu...
458

d2e8d5511   Ralph Campbell   mm/hmm: a few mor...
459
  	ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte);
63d5066f6   Jérôme Glisse   mm/hmm: mirror hu...
460
  	entry = huge_ptep_get(pte);
7f08263d9   Christoph Hellwig   mm/hmm: remove th...
461
  	i = (start - range->start) >> PAGE_SHIFT;
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
462
  	pfn_req_flags = range->hmm_pfns[i];
3b50a6e53   Ralph Campbell   mm/hmm: provide t...
463
464
  	cpu_flags = pte_to_hmm_pfn_flags(range, entry) |
  		    hmm_pfn_flags_order(huge_page_order(hstate_vma(vma)));
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
465
466
  	required_fault =
  		hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, cpu_flags);
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
467
  	if (required_fault) {
45050692d   Christoph Hellwig   mm/hmm: simplify ...
468
  		spin_unlock(ptl);
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
469
  		return hmm_vma_fault(addr, end, required_fault, walk);
63d5066f6   Jérôme Glisse   mm/hmm: mirror hu...
470
  	}
05c23af4a   Christoph Hellwig   mm/hmm: remove th...
471
  	pfn = pte_pfn(entry) + ((start & ~hmask) >> PAGE_SHIFT);
7f08263d9   Christoph Hellwig   mm/hmm: remove th...
472
  	for (; addr < end; addr += PAGE_SIZE, i++, pfn++)
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
473
  		range->hmm_pfns[i] = pfn | cpu_flags;
63d5066f6   Jérôme Glisse   mm/hmm: mirror hu...
474
  	spin_unlock(ptl);
45050692d   Christoph Hellwig   mm/hmm: simplify ...
475
  	return 0;
63d5066f6   Jérôme Glisse   mm/hmm: mirror hu...
476
  }
251bbe59b   Christoph Hellwig   mm/hmm: cleanup t...
477
478
479
  #else
  #define hmm_vma_walk_hugetlb_entry NULL
  #endif /* CONFIG_HUGETLB_PAGE */
63d5066f6   Jérôme Glisse   mm/hmm: mirror hu...
480

d28c2c9a4   Ralph Campbell   mm/hmm: make full...
481
482
  static int hmm_vma_walk_test(unsigned long start, unsigned long end,
  			     struct mm_walk *walk)
33cd47dcb   Jérôme Glisse   mm/hmm: move hmm_...
483
  {
d28c2c9a4   Ralph Campbell   mm/hmm: make full...
484
485
486
  	struct hmm_vma_walk *hmm_vma_walk = walk->private;
  	struct hmm_range *range = hmm_vma_walk->range;
  	struct vm_area_struct *vma = walk->vma;
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
487
488
489
  	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP | VM_MIXEDMAP)) &&
  	    vma->vm_flags & VM_READ)
  		return 0;
d28c2c9a4   Ralph Campbell   mm/hmm: make full...
490
  	/*
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
491
492
  	 * vma ranges that don't have struct page backing them or map I/O
  	 * devices directly cannot be handled by hmm_range_fault().
c2579c9c4   Jason Gunthorpe   mm/hmm: add missi...
493
  	 *
d28c2c9a4   Ralph Campbell   mm/hmm: make full...
494
  	 * If the vma does not allow read access, then assume that it does not
c2579c9c4   Jason Gunthorpe   mm/hmm: add missi...
495
496
  	 * allow write access either. HMM does not support architectures that
  	 * allow write without read.
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
497
498
499
  	 *
  	 * If a fault is requested for an unsupported range then it is a hard
  	 * failure.
d28c2c9a4   Ralph Campbell   mm/hmm: make full...
500
  	 */
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
501
  	if (hmm_range_need_fault(hmm_vma_walk,
2733ea144   Jason Gunthorpe   mm/hmm: remove th...
502
  				 range->hmm_pfns +
a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
503
504
505
  					 ((start - range->start) >> PAGE_SHIFT),
  				 (end - start) >> PAGE_SHIFT, 0))
  		return -EFAULT;
d28c2c9a4   Ralph Campbell   mm/hmm: make full...
506

a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
507
  	hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
d28c2c9a4   Ralph Campbell   mm/hmm: make full...
508

a3eb13c15   Jason Gunthorpe   mm/hmm: return th...
509
510
  	/* Skip this vma and continue processing the next vma. */
  	return 1;
33cd47dcb   Jérôme Glisse   mm/hmm: move hmm_...
511
  }
7b86ac337   Christoph Hellwig   pagewalk: separat...
512
513
514
515
516
  static const struct mm_walk_ops hmm_walk_ops = {
  	.pud_entry	= hmm_vma_walk_pud,
  	.pmd_entry	= hmm_vma_walk_pmd,
  	.pte_hole	= hmm_vma_walk_hole,
  	.hugetlb_entry	= hmm_vma_walk_hugetlb_entry,
d28c2c9a4   Ralph Campbell   mm/hmm: make full...
517
  	.test_walk	= hmm_vma_walk_test,
7b86ac337   Christoph Hellwig   pagewalk: separat...
518
  };
9a4903e49   Christoph Hellwig   mm/hmm: replace t...
519
520
  /**
   * hmm_range_fault - try to fault some address in a virtual address range
f970b977e   Jason Gunthorpe   mm/hmm: remove un...
521
   * @range:	argument structure
9a4903e49   Christoph Hellwig   mm/hmm: replace t...
522
   *
be957c886   Jason Gunthorpe   mm/hmm: make hmm_...
523
   * Returns 0 on success or one of the following error codes:
73231612d   Jérôme Glisse   mm/hmm: improve a...
524
   *
9a4903e49   Christoph Hellwig   mm/hmm: replace t...
525
526
527
528
529
   * -EINVAL:	Invalid arguments or mm or virtual address is in an invalid vma
   *		(e.g., device file vma).
   * -ENOMEM:	Out of memory.
   * -EPERM:	Invalid permission (e.g., asking for write and range is read
   *		only).
9a4903e49   Christoph Hellwig   mm/hmm: replace t...
530
531
   * -EBUSY:	The range has been invalidated and the caller needs to wait for
   *		the invalidation to finish.
f970b977e   Jason Gunthorpe   mm/hmm: remove un...
532
533
   * -EFAULT:     A page was requested to be valid and could not be made valid
   *              ie it has no backing VMA or it is illegal to access
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
534
   *
f970b977e   Jason Gunthorpe   mm/hmm: remove un...
535
536
   * This is similar to get_user_pages(), except that it can read the page tables
   * without mutating them (ie causing faults).
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
537
   */
be957c886   Jason Gunthorpe   mm/hmm: make hmm_...
538
  int hmm_range_fault(struct hmm_range *range)
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
539
  {
d28c2c9a4   Ralph Campbell   mm/hmm: make full...
540
541
542
  	struct hmm_vma_walk hmm_vma_walk = {
  		.range = range,
  		.last = range->start,
d28c2c9a4   Ralph Campbell   mm/hmm: make full...
543
  	};
a22dd5064   Jason Gunthorpe   mm/hmm: remove hm...
544
  	struct mm_struct *mm = range->notifier->mm;
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
545
  	int ret;
42fc54140   Michel Lespinasse   mmap locking API:...
546
  	mmap_assert_locked(mm);
704f3f2cf   Jérôme Glisse   mm/hmm: use refer...
547

a3e0d41c2   Jérôme Glisse   mm/hmm: improve d...
548
549
  	do {
  		/* If range is no longer valid force retry. */
a22dd5064   Jason Gunthorpe   mm/hmm: remove hm...
550
551
  		if (mmu_interval_check_retry(range->notifier,
  					     range->notifier_seq))
2bcbeaefd   Christoph Hellwig   mm/hmm: always re...
552
  			return -EBUSY;
d28c2c9a4   Ralph Campbell   mm/hmm: make full...
553
554
  		ret = walk_page_range(mm, hmm_vma_walk.last, range->end,
  				      &hmm_walk_ops, &hmm_vma_walk);
be957c886   Jason Gunthorpe   mm/hmm: make hmm_...
555
556
557
558
559
560
  		/*
  		 * When -EBUSY is returned the loop restarts with
  		 * hmm_vma_walk.last set to an address that has not been stored
  		 * in pfns. All entries < last in the pfn array are set to their
  		 * output, and all >= are still at their input values.
  		 */
d28c2c9a4   Ralph Campbell   mm/hmm: make full...
561
  	} while (ret == -EBUSY);
be957c886   Jason Gunthorpe   mm/hmm: make hmm_...
562
  	return ret;
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
563
  }
73231612d   Jérôme Glisse   mm/hmm: improve a...
564
  EXPORT_SYMBOL(hmm_range_fault);