Blame view

mm/hmm.c 37.9 KB
133ff0eac   Jérôme Glisse   mm/hmm: heterogen...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
  /*
   * Copyright 2013 Red Hat Inc.
   *
   * This program is free software; you can redistribute it and/or modify
   * it under the terms of the GNU General Public License as published by
   * the Free Software Foundation; either version 2 of the License, or
   * (at your option) any later version.
   *
   * This program is distributed in the hope that it will be useful,
   * but WITHOUT ANY WARRANTY; without even the implied warranty of
   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   * GNU General Public License for more details.
   *
   * Authors: Jérôme Glisse <jglisse@redhat.com>
   */
  /*
   * Refer to include/linux/hmm.h for information about heterogeneous memory
   * management or HMM for short.
   */
  #include <linux/mm.h>
  #include <linux/hmm.h>
858b54dab   Jérôme Glisse   mm/hmm/devmem: du...
22
  #include <linux/init.h>
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
23
24
  #include <linux/rmap.h>
  #include <linux/swap.h>
133ff0eac   Jérôme Glisse   mm/hmm: heterogen...
25
26
  #include <linux/slab.h>
  #include <linux/sched.h>
4ef589dc9   Jérôme Glisse   mm/hmm/devmem: de...
27
28
  #include <linux/mmzone.h>
  #include <linux/pagemap.h>
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
29
30
  #include <linux/swapops.h>
  #include <linux/hugetlb.h>
4ef589dc9   Jérôme Glisse   mm/hmm/devmem: de...
31
  #include <linux/memremap.h>
7b2d55d2c   Jérôme Glisse   mm/ZONE_DEVICE: s...
32
  #include <linux/jump_label.h>
c0b124054   Jérôme Glisse   mm/hmm/mirror: mi...
33
  #include <linux/mmu_notifier.h>
4ef589dc9   Jérôme Glisse   mm/hmm/devmem: de...
34
35
36
  #include <linux/memory_hotplug.h>
  
  #define PA_SECTION_SIZE (1UL << PA_SECTION_SHIFT)
133ff0eac   Jérôme Glisse   mm/hmm: heterogen...
37

6b368cd4a   Jérôme Glisse   mm/hmm: avoid blo...
38
  #if IS_ENABLED(CONFIG_HMM_MIRROR)
c0b124054   Jérôme Glisse   mm/hmm/mirror: mi...
39
  static const struct mmu_notifier_ops hmm_mmu_notifier_ops;
133ff0eac   Jérôme Glisse   mm/hmm: heterogen...
40
41
42
43
  /*
   * struct hmm - HMM per mm struct
   *
   * @mm: mm struct this HMM struct is bound to
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
44
   * @lock: lock protecting ranges list
c0b124054   Jérôme Glisse   mm/hmm/mirror: mi...
45
   * @sequence: we track updates to the CPU page table with a sequence number
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
46
   * @ranges: list of range being snapshotted
c0b124054   Jérôme Glisse   mm/hmm/mirror: mi...
47
48
49
   * @mirrors: list of mirrors for this mm
   * @mmu_notifier: mmu notifier to track updates to CPU page table
   * @mirrors_sem: read/write semaphore protecting the mirrors list
133ff0eac   Jérôme Glisse   mm/hmm: heterogen...
50
51
52
   */
  struct hmm {
  	struct mm_struct	*mm;
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
53
  	spinlock_t		lock;
c0b124054   Jérôme Glisse   mm/hmm/mirror: mi...
54
  	atomic_t		sequence;
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
55
  	struct list_head	ranges;
c0b124054   Jérôme Glisse   mm/hmm/mirror: mi...
56
57
58
  	struct list_head	mirrors;
  	struct mmu_notifier	mmu_notifier;
  	struct rw_semaphore	mirrors_sem;
133ff0eac   Jérôme Glisse   mm/hmm: heterogen...
59
60
61
62
63
64
65
66
67
68
69
70
  };
  
  /*
   * hmm_register - register HMM against an mm (HMM internal)
   *
   * @mm: mm struct to attach to
   *
   * This is not intended to be used directly by device drivers. It allocates an
   * HMM struct if mm does not have one, and initializes it.
   */
  static struct hmm *hmm_register(struct mm_struct *mm)
  {
c0b124054   Jérôme Glisse   mm/hmm/mirror: mi...
71
72
  	struct hmm *hmm = READ_ONCE(mm->hmm);
  	bool cleanup = false;
133ff0eac   Jérôme Glisse   mm/hmm: heterogen...
73
74
75
76
77
78
  
  	/*
  	 * The hmm struct can only be freed once the mm_struct goes away,
  	 * hence we should always have pre-allocated an new hmm struct
  	 * above.
  	 */
c0b124054   Jérôme Glisse   mm/hmm/mirror: mi...
79
80
81
82
83
84
85
86
87
88
  	if (hmm)
  		return hmm;
  
  	hmm = kmalloc(sizeof(*hmm), GFP_KERNEL);
  	if (!hmm)
  		return NULL;
  	INIT_LIST_HEAD(&hmm->mirrors);
  	init_rwsem(&hmm->mirrors_sem);
  	atomic_set(&hmm->sequence, 0);
  	hmm->mmu_notifier.ops = NULL;
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
89
90
  	INIT_LIST_HEAD(&hmm->ranges);
  	spin_lock_init(&hmm->lock);
c0b124054   Jérôme Glisse   mm/hmm/mirror: mi...
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
  	hmm->mm = mm;
  
  	/*
  	 * We should only get here if hold the mmap_sem in write mode ie on
  	 * registration of first mirror through hmm_mirror_register()
  	 */
  	hmm->mmu_notifier.ops = &hmm_mmu_notifier_ops;
  	if (__mmu_notifier_register(&hmm->mmu_notifier, mm)) {
  		kfree(hmm);
  		return NULL;
  	}
  
  	spin_lock(&mm->page_table_lock);
  	if (!mm->hmm)
  		mm->hmm = hmm;
  	else
  		cleanup = true;
  	spin_unlock(&mm->page_table_lock);
  
  	if (cleanup) {
  		mmu_notifier_unregister(&hmm->mmu_notifier, mm);
  		kfree(hmm);
  	}
133ff0eac   Jérôme Glisse   mm/hmm: heterogen...
114
115
116
117
118
119
120
  	return mm->hmm;
  }
  
  void hmm_mm_destroy(struct mm_struct *mm)
  {
  	kfree(mm->hmm);
  }
c0b124054   Jérôme Glisse   mm/hmm/mirror: mi...
121

c0b124054   Jérôme Glisse   mm/hmm/mirror: mi...
122
123
124
125
126
127
  static void hmm_invalidate_range(struct hmm *hmm,
  				 enum hmm_update_type action,
  				 unsigned long start,
  				 unsigned long end)
  {
  	struct hmm_mirror *mirror;
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
  	struct hmm_range *range;
  
  	spin_lock(&hmm->lock);
  	list_for_each_entry(range, &hmm->ranges, list) {
  		unsigned long addr, idx, npages;
  
  		if (end < range->start || start >= range->end)
  			continue;
  
  		range->valid = false;
  		addr = max(start, range->start);
  		idx = (addr - range->start) >> PAGE_SHIFT;
  		npages = (min(range->end, end) - addr) >> PAGE_SHIFT;
  		memset(&range->pfns[idx], 0, sizeof(*range->pfns) * npages);
  	}
  	spin_unlock(&hmm->lock);
c0b124054   Jérôme Glisse   mm/hmm/mirror: mi...
144
145
146
147
148
149
150
  
  	down_read(&hmm->mirrors_sem);
  	list_for_each_entry(mirror, &hmm->mirrors, list)
  		mirror->ops->sync_cpu_device_pagetables(mirror, action,
  							start, end);
  	up_read(&hmm->mirrors_sem);
  }
e1401513c   Ralph Campbell   mm/hmm: HMM shoul...
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
  static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm)
  {
  	struct hmm_mirror *mirror;
  	struct hmm *hmm = mm->hmm;
  
  	down_write(&hmm->mirrors_sem);
  	mirror = list_first_entry_or_null(&hmm->mirrors, struct hmm_mirror,
  					  list);
  	while (mirror) {
  		list_del_init(&mirror->list);
  		if (mirror->ops->release) {
  			/*
  			 * Drop mirrors_sem so callback can wait on any pending
  			 * work that might itself trigger mmu_notifier callback
  			 * and thus would deadlock with us.
  			 */
  			up_write(&hmm->mirrors_sem);
  			mirror->ops->release(mirror);
  			down_write(&hmm->mirrors_sem);
  		}
  		mirror = list_first_entry_or_null(&hmm->mirrors,
  						  struct hmm_mirror, list);
  	}
  	up_write(&hmm->mirrors_sem);
  }
c0b124054   Jérôme Glisse   mm/hmm/mirror: mi...
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
  static void hmm_invalidate_range_start(struct mmu_notifier *mn,
  				       struct mm_struct *mm,
  				       unsigned long start,
  				       unsigned long end)
  {
  	struct hmm *hmm = mm->hmm;
  
  	VM_BUG_ON(!hmm);
  
  	atomic_inc(&hmm->sequence);
  }
  
  static void hmm_invalidate_range_end(struct mmu_notifier *mn,
  				     struct mm_struct *mm,
  				     unsigned long start,
  				     unsigned long end)
  {
  	struct hmm *hmm = mm->hmm;
  
  	VM_BUG_ON(!hmm);
  
  	hmm_invalidate_range(mm->hmm, HMM_UPDATE_INVALIDATE, start, end);
  }
  
  static const struct mmu_notifier_ops hmm_mmu_notifier_ops = {
e1401513c   Ralph Campbell   mm/hmm: HMM shoul...
201
  	.release		= hmm_release,
c0b124054   Jérôme Glisse   mm/hmm/mirror: mi...
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
  	.invalidate_range_start	= hmm_invalidate_range_start,
  	.invalidate_range_end	= hmm_invalidate_range_end,
  };
  
  /*
   * hmm_mirror_register() - register a mirror against an mm
   *
   * @mirror: new mirror struct to register
   * @mm: mm to register against
   *
   * To start mirroring a process address space, the device driver must register
   * an HMM mirror struct.
   *
   * THE mm->mmap_sem MUST BE HELD IN WRITE MODE !
   */
  int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm)
  {
  	/* Sanity check */
  	if (!mm || !mirror || !mirror->ops)
  		return -EINVAL;
c01cbba2a   Jérôme Glisse   mm/hmm: unregiste...
222
  again:
c0b124054   Jérôme Glisse   mm/hmm/mirror: mi...
223
224
225
226
227
  	mirror->hmm = hmm_register(mm);
  	if (!mirror->hmm)
  		return -ENOMEM;
  
  	down_write(&mirror->hmm->mirrors_sem);
c01cbba2a   Jérôme Glisse   mm/hmm: unregiste...
228
229
230
231
232
233
234
235
236
237
238
239
  	if (mirror->hmm->mm == NULL) {
  		/*
  		 * A racing hmm_mirror_unregister() is about to destroy the hmm
  		 * struct. Try again to allocate a new one.
  		 */
  		up_write(&mirror->hmm->mirrors_sem);
  		mirror->hmm = NULL;
  		goto again;
  	} else {
  		list_add(&mirror->list, &mirror->hmm->mirrors);
  		up_write(&mirror->hmm->mirrors_sem);
  	}
c0b124054   Jérôme Glisse   mm/hmm/mirror: mi...
240
241
242
243
244
245
246
247
248
249
250
251
252
253
  
  	return 0;
  }
  EXPORT_SYMBOL(hmm_mirror_register);
  
  /*
   * hmm_mirror_unregister() - unregister a mirror
   *
   * @mirror: new mirror struct to register
   *
   * Stop mirroring a process address space, and cleanup.
   */
  void hmm_mirror_unregister(struct hmm_mirror *mirror)
  {
c01cbba2a   Jérôme Glisse   mm/hmm: unregiste...
254
255
256
257
258
259
  	bool should_unregister = false;
  	struct mm_struct *mm;
  	struct hmm *hmm;
  
  	if (mirror->hmm == NULL)
  		return;
c0b124054   Jérôme Glisse   mm/hmm/mirror: mi...
260

c01cbba2a   Jérôme Glisse   mm/hmm: unregiste...
261
  	hmm = mirror->hmm;
c0b124054   Jérôme Glisse   mm/hmm/mirror: mi...
262
  	down_write(&hmm->mirrors_sem);
e1401513c   Ralph Campbell   mm/hmm: HMM shoul...
263
  	list_del_init(&mirror->list);
c01cbba2a   Jérôme Glisse   mm/hmm: unregiste...
264
265
266
267
  	should_unregister = list_empty(&hmm->mirrors);
  	mirror->hmm = NULL;
  	mm = hmm->mm;
  	hmm->mm = NULL;
c0b124054   Jérôme Glisse   mm/hmm/mirror: mi...
268
  	up_write(&hmm->mirrors_sem);
c01cbba2a   Jérôme Glisse   mm/hmm: unregiste...
269
270
271
272
273
274
275
276
277
278
279
  
  	if (!should_unregister || mm == NULL)
  		return;
  
  	spin_lock(&mm->page_table_lock);
  	if (mm->hmm == hmm)
  		mm->hmm = NULL;
  	spin_unlock(&mm->page_table_lock);
  
  	mmu_notifier_unregister_no_release(&hmm->mmu_notifier, mm);
  	kfree(hmm);
c0b124054   Jérôme Glisse   mm/hmm/mirror: mi...
280
281
  }
  EXPORT_SYMBOL(hmm_mirror_unregister);
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
282

74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
283
284
285
286
287
  struct hmm_vma_walk {
  	struct hmm_range	*range;
  	unsigned long		last;
  	bool			fault;
  	bool			block;
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
288
  };
2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
289
290
  static int hmm_vma_do_fault(struct mm_walk *walk, unsigned long addr,
  			    bool write_fault, uint64_t *pfn)
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
291
292
293
  {
  	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_REMOTE;
  	struct hmm_vma_walk *hmm_vma_walk = walk->private;
f88a1e90c   Jérôme Glisse   mm/hmm: use devic...
294
  	struct hmm_range *range = hmm_vma_walk->range;
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
295
296
297
298
  	struct vm_area_struct *vma = walk->vma;
  	int r;
  
  	flags |= hmm_vma_walk->block ? 0 : FAULT_FLAG_ALLOW_RETRY;
2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
299
  	flags |= write_fault ? FAULT_FLAG_WRITE : 0;
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
300
301
302
303
  	r = handle_mm_fault(vma, addr, flags);
  	if (r & VM_FAULT_RETRY)
  		return -EBUSY;
  	if (r & VM_FAULT_ERROR) {
f88a1e90c   Jérôme Glisse   mm/hmm: use devic...
304
  		*pfn = range->values[HMM_PFN_ERROR];
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
305
306
307
308
309
  		return -EFAULT;
  	}
  
  	return -EAGAIN;
  }
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
310
311
312
313
  static int hmm_pfns_bad(unsigned long addr,
  			unsigned long end,
  			struct mm_walk *walk)
  {
c719547f0   Jérôme Glisse   mm/hmm: hmm_pfns_...
314
315
  	struct hmm_vma_walk *hmm_vma_walk = walk->private;
  	struct hmm_range *range = hmm_vma_walk->range;
ff05c0c6b   Jérôme Glisse   mm/hmm: use uint6...
316
  	uint64_t *pfns = range->pfns;
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
317
318
319
320
  	unsigned long i;
  
  	i = (addr - range->start) >> PAGE_SHIFT;
  	for (; addr < end; addr += PAGE_SIZE, i++)
f88a1e90c   Jérôme Glisse   mm/hmm: use devic...
321
  		pfns[i] = range->values[HMM_PFN_ERROR];
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
322
323
324
  
  	return 0;
  }
5504ed296   Jérôme Glisse   mm/hmm: do not di...
325
326
327
328
  /*
   * hmm_vma_walk_hole() - handle a range lacking valid pmd or pte(s)
   * @start: range virtual start address (inclusive)
   * @end: range virtual end address (exclusive)
2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
329
330
   * @fault: should we fault or not ?
   * @write_fault: write fault ?
5504ed296   Jérôme Glisse   mm/hmm: do not di...
331
332
333
334
335
336
   * @walk: mm_walk structure
   * Returns: 0 on success, -EAGAIN after page fault, or page fault error
   *
   * This function will be called whenever pmd_none() or pte_none() returns true,
   * or whenever there is no page directory covering the virtual address range.
   */
2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
337
338
339
  static int hmm_vma_walk_hole_(unsigned long addr, unsigned long end,
  			      bool fault, bool write_fault,
  			      struct mm_walk *walk)
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
340
  {
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
341
342
  	struct hmm_vma_walk *hmm_vma_walk = walk->private;
  	struct hmm_range *range = hmm_vma_walk->range;
ff05c0c6b   Jérôme Glisse   mm/hmm: use uint6...
343
  	uint64_t *pfns = range->pfns;
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
344
  	unsigned long i;
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
345
  	hmm_vma_walk->last = addr;
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
346
  	i = (addr - range->start) >> PAGE_SHIFT;
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
347
  	for (; addr < end; addr += PAGE_SIZE, i++) {
f88a1e90c   Jérôme Glisse   mm/hmm: use devic...
348
  		pfns[i] = range->values[HMM_PFN_NONE];
2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
349
  		if (fault || write_fault) {
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
350
  			int ret;
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
351

2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
352
353
  			ret = hmm_vma_do_fault(walk, addr, write_fault,
  					       &pfns[i]);
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
354
355
356
357
  			if (ret != -EAGAIN)
  				return ret;
  		}
  	}
2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
358
359
360
361
362
363
364
  	return (fault || write_fault) ? -EAGAIN : 0;
  }
  
  static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
  				      uint64_t pfns, uint64_t cpu_flags,
  				      bool *fault, bool *write_fault)
  {
f88a1e90c   Jérôme Glisse   mm/hmm: use devic...
365
  	struct hmm_range *range = hmm_vma_walk->range;
2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
366
367
368
369
370
  	*fault = *write_fault = false;
  	if (!hmm_vma_walk->fault)
  		return;
  
  	/* We aren't ask to do anything ... */
f88a1e90c   Jérôme Glisse   mm/hmm: use devic...
371
  	if (!(pfns & range->flags[HMM_PFN_VALID]))
2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
372
  		return;
f88a1e90c   Jérôme Glisse   mm/hmm: use devic...
373
374
375
376
377
378
379
  	/* If this is device memory than only fault if explicitly requested */
  	if ((cpu_flags & range->flags[HMM_PFN_DEVICE_PRIVATE])) {
  		/* Do we fault on device memory ? */
  		if (pfns & range->flags[HMM_PFN_DEVICE_PRIVATE]) {
  			*write_fault = pfns & range->flags[HMM_PFN_WRITE];
  			*fault = true;
  		}
2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
380
381
  		return;
  	}
f88a1e90c   Jérôme Glisse   mm/hmm: use devic...
382
383
384
385
386
387
388
  
  	/* If CPU page table is not valid then we need to fault */
  	*fault = !(cpu_flags & range->flags[HMM_PFN_VALID]);
  	/* Need to write fault ? */
  	if ((pfns & range->flags[HMM_PFN_WRITE]) &&
  	    !(cpu_flags & range->flags[HMM_PFN_WRITE])) {
  		*write_fault = true;
2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
  		*fault = true;
  	}
  }
  
  static void hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
  				 const uint64_t *pfns, unsigned long npages,
  				 uint64_t cpu_flags, bool *fault,
  				 bool *write_fault)
  {
  	unsigned long i;
  
  	if (!hmm_vma_walk->fault) {
  		*fault = *write_fault = false;
  		return;
  	}
  
  	for (i = 0; i < npages; ++i) {
  		hmm_pte_need_fault(hmm_vma_walk, pfns[i], cpu_flags,
  				   fault, write_fault);
  		if ((*fault) || (*write_fault))
  			return;
  	}
  }
  
  static int hmm_vma_walk_hole(unsigned long addr, unsigned long end,
  			     struct mm_walk *walk)
  {
  	struct hmm_vma_walk *hmm_vma_walk = walk->private;
  	struct hmm_range *range = hmm_vma_walk->range;
  	bool fault, write_fault;
  	unsigned long i, npages;
  	uint64_t *pfns;
  
  	i = (addr - range->start) >> PAGE_SHIFT;
  	npages = (end - addr) >> PAGE_SHIFT;
  	pfns = &range->pfns[i];
  	hmm_range_need_fault(hmm_vma_walk, pfns, npages,
  			     0, &fault, &write_fault);
  	return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
  }
f88a1e90c   Jérôme Glisse   mm/hmm: use devic...
429
  static inline uint64_t pmd_to_hmm_pfn_flags(struct hmm_range *range, pmd_t pmd)
2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
430
431
432
  {
  	if (pmd_protnone(pmd))
  		return 0;
f88a1e90c   Jérôme Glisse   mm/hmm: use devic...
433
434
435
  	return pmd_write(pmd) ? range->flags[HMM_PFN_VALID] |
  				range->flags[HMM_PFN_WRITE] :
  				range->flags[HMM_PFN_VALID];
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
436
  }
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
437
438
439
440
441
442
443
  static int hmm_vma_handle_pmd(struct mm_walk *walk,
  			      unsigned long addr,
  			      unsigned long end,
  			      uint64_t *pfns,
  			      pmd_t pmd)
  {
  	struct hmm_vma_walk *hmm_vma_walk = walk->private;
f88a1e90c   Jérôme Glisse   mm/hmm: use devic...
444
  	struct hmm_range *range = hmm_vma_walk->range;
2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
445
  	unsigned long pfn, npages, i;
2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
446
  	bool fault, write_fault;
f88a1e90c   Jérôme Glisse   mm/hmm: use devic...
447
  	uint64_t cpu_flags;
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
448

2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
449
  	npages = (end - addr) >> PAGE_SHIFT;
f88a1e90c   Jérôme Glisse   mm/hmm: use devic...
450
  	cpu_flags = pmd_to_hmm_pfn_flags(range, pmd);
2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
451
452
  	hmm_range_need_fault(hmm_vma_walk, pfns, npages, cpu_flags,
  			     &fault, &write_fault);
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
453

2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
454
455
  	if (pmd_protnone(pmd) || fault || write_fault)
  		return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
456
457
  
  	pfn = pmd_pfn(pmd) + pte_index(addr);
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
458
  	for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++)
f88a1e90c   Jérôme Glisse   mm/hmm: use devic...
459
  		pfns[i] = hmm_pfn_from_pfn(range, pfn) | cpu_flags;
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
460
461
462
  	hmm_vma_walk->last = end;
  	return 0;
  }
f88a1e90c   Jérôme Glisse   mm/hmm: use devic...
463
  static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte)
2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
464
465
466
  {
  	if (pte_none(pte) || !pte_present(pte))
  		return 0;
f88a1e90c   Jérôme Glisse   mm/hmm: use devic...
467
468
469
  	return pte_write(pte) ? range->flags[HMM_PFN_VALID] |
  				range->flags[HMM_PFN_WRITE] :
  				range->flags[HMM_PFN_VALID];
2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
470
  }
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
471
472
473
474
475
  static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
  			      unsigned long end, pmd_t *pmdp, pte_t *ptep,
  			      uint64_t *pfn)
  {
  	struct hmm_vma_walk *hmm_vma_walk = walk->private;
f88a1e90c   Jérôme Glisse   mm/hmm: use devic...
476
  	struct hmm_range *range = hmm_vma_walk->range;
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
477
  	struct vm_area_struct *vma = walk->vma;
2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
478
479
  	bool fault, write_fault;
  	uint64_t cpu_flags;
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
480
  	pte_t pte = *ptep;
f88a1e90c   Jérôme Glisse   mm/hmm: use devic...
481
  	uint64_t orig_pfn = *pfn;
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
482

f88a1e90c   Jérôme Glisse   mm/hmm: use devic...
483
484
485
  	*pfn = range->values[HMM_PFN_NONE];
  	cpu_flags = pte_to_hmm_pfn_flags(range, pte);
  	hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
486
  			   &fault, &write_fault);
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
487
488
  
  	if (pte_none(pte)) {
2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
489
  		if (fault || write_fault)
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
490
491
492
493
494
495
496
497
  			goto fault;
  		return 0;
  	}
  
  	if (!pte_present(pte)) {
  		swp_entry_t entry = pte_to_swp_entry(pte);
  
  		if (!non_swap_entry(entry)) {
2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
498
  			if (fault || write_fault)
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
499
500
501
502
503
504
505
506
507
  				goto fault;
  			return 0;
  		}
  
  		/*
  		 * This is a special swap entry, ignore migration, use
  		 * device and report anything else as error.
  		 */
  		if (is_device_private_entry(entry)) {
f88a1e90c   Jérôme Glisse   mm/hmm: use devic...
508
509
  			cpu_flags = range->flags[HMM_PFN_VALID] |
  				range->flags[HMM_PFN_DEVICE_PRIVATE];
2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
510
  			cpu_flags |= is_write_device_private_entry(entry) ?
f88a1e90c   Jérôme Glisse   mm/hmm: use devic...
511
512
513
514
515
516
517
  				range->flags[HMM_PFN_WRITE] : 0;
  			hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
  					   &fault, &write_fault);
  			if (fault || write_fault)
  				goto fault;
  			*pfn = hmm_pfn_from_pfn(range, swp_offset(entry));
  			*pfn |= cpu_flags;
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
518
519
520
521
  			return 0;
  		}
  
  		if (is_migration_entry(entry)) {
2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
522
  			if (fault || write_fault) {
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
523
524
525
  				pte_unmap(ptep);
  				hmm_vma_walk->last = addr;
  				migration_entry_wait(vma->vm_mm,
2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
526
  						     pmdp, addr);
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
527
528
529
530
531
532
  				return -EAGAIN;
  			}
  			return 0;
  		}
  
  		/* Report error for everything else */
f88a1e90c   Jérôme Glisse   mm/hmm: use devic...
533
  		*pfn = range->values[HMM_PFN_ERROR];
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
534
535
  		return -EFAULT;
  	}
2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
536
  	if (fault || write_fault)
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
537
  		goto fault;
f88a1e90c   Jérôme Glisse   mm/hmm: use devic...
538
  	*pfn = hmm_pfn_from_pfn(range, pte_pfn(pte)) | cpu_flags;
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
539
540
541
542
543
  	return 0;
  
  fault:
  	pte_unmap(ptep);
  	/* Fault any virtual address we were asked to fault */
2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
544
  	return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
545
  }
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
546
547
548
549
550
  static int hmm_vma_walk_pmd(pmd_t *pmdp,
  			    unsigned long start,
  			    unsigned long end,
  			    struct mm_walk *walk)
  {
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
551
552
  	struct hmm_vma_walk *hmm_vma_walk = walk->private;
  	struct hmm_range *range = hmm_vma_walk->range;
ff05c0c6b   Jérôme Glisse   mm/hmm: use uint6...
553
  	uint64_t *pfns = range->pfns;
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
554
  	unsigned long addr = start, i;
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
555
556
557
  	pte_t *ptep;
  
  	i = (addr - range->start) >> PAGE_SHIFT;
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
558
559
560
561
  
  again:
  	if (pmd_none(*pmdp))
  		return hmm_vma_walk_hole(start, end, walk);
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
562
  	if (pmd_huge(*pmdp) && (range->vma->vm_flags & VM_HUGETLB))
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
563
564
565
  		return hmm_pfns_bad(start, end, walk);
  
  	if (pmd_devmap(*pmdp) || pmd_trans_huge(*pmdp)) {
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
  		pmd_t pmd;
  
  		/*
  		 * No need to take pmd_lock here, even if some other threads
  		 * is splitting the huge pmd we will get that event through
  		 * mmu_notifier callback.
  		 *
  		 * So just read pmd value and check again its a transparent
  		 * huge or device mapping one and compute corresponding pfn
  		 * values.
  		 */
  		pmd = pmd_read_atomic(pmdp);
  		barrier();
  		if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd))
  			goto again;
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
581

53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
582
  		return hmm_vma_handle_pmd(walk, addr, end, &pfns[i], pmd);
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
583
584
585
586
587
588
589
  	}
  
  	if (pmd_bad(*pmdp))
  		return hmm_pfns_bad(start, end, walk);
  
  	ptep = pte_offset_map(pmdp, addr);
  	for (; addr < end; addr += PAGE_SIZE, ptep++, i++) {
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
590
  		int r;
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
591

53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
592
593
594
595
596
  		r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, &pfns[i]);
  		if (r) {
  			/* hmm_vma_handle_pte() did unmap pte directory */
  			hmm_vma_walk->last = addr;
  			return r;
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
597
  		}
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
598
599
  	}
  	pte_unmap(ptep - 1);
53f5c3f48   Jérôme Glisse   mm/hmm: factor ou...
600
  	hmm_vma_walk->last = addr;
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
601
602
  	return 0;
  }
f88a1e90c   Jérôme Glisse   mm/hmm: use devic...
603
604
  static void hmm_pfns_clear(struct hmm_range *range,
  			   uint64_t *pfns,
33cd47dcb   Jérôme Glisse   mm/hmm: move hmm_...
605
606
607
608
  			   unsigned long addr,
  			   unsigned long end)
  {
  	for (; addr < end; addr += PAGE_SIZE, pfns++)
f88a1e90c   Jérôme Glisse   mm/hmm: use devic...
609
  		*pfns = range->values[HMM_PFN_NONE];
33cd47dcb   Jérôme Glisse   mm/hmm: move hmm_...
610
  }
855ce7d25   Jérôme Glisse   mm/hmm: cleanup s...
611
612
613
614
615
  static void hmm_pfns_special(struct hmm_range *range)
  {
  	unsigned long addr = range->start, i = 0;
  
  	for (; addr < range->end; addr += PAGE_SIZE, i++)
f88a1e90c   Jérôme Glisse   mm/hmm: use devic...
616
  		range->pfns[i] = range->values[HMM_PFN_SPECIAL];
855ce7d25   Jérôme Glisse   mm/hmm: cleanup s...
617
  }
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
618
619
  /*
   * hmm_vma_get_pfns() - snapshot CPU page table for a range of virtual addresses
08232a454   Jérôme Glisse   mm/hmm: use struc...
620
   * @range: range being snapshotted
86586a41b   Jérôme Glisse   mm/hmm: remove HM...
621
622
   * Returns: -EINVAL if invalid argument, -ENOMEM out of memory, -EPERM invalid
   *          vma permission, 0 success
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
623
624
625
626
627
628
629
630
631
632
633
634
   *
   * This snapshots the CPU page table for a range of virtual addresses. Snapshot
   * validity is tracked by range struct. See hmm_vma_range_done() for further
   * information.
   *
   * The range struct is initialized here. It tracks the CPU page table, but only
   * if the function returns success (0), in which case the caller must then call
   * hmm_vma_range_done() to stop CPU page table update tracking on this range.
   *
   * NOT CALLING hmm_vma_range_done() IF FUNCTION RETURNS 0 WILL LEAD TO SERIOUS
   * MEMORY CORRUPTION ! YOU HAVE BEEN WARNED !
   */
08232a454   Jérôme Glisse   mm/hmm: use struc...
635
  int hmm_vma_get_pfns(struct hmm_range *range)
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
636
  {
08232a454   Jérôme Glisse   mm/hmm: use struc...
637
  	struct vm_area_struct *vma = range->vma;
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
638
  	struct hmm_vma_walk hmm_vma_walk;
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
639
640
  	struct mm_walk mm_walk;
  	struct hmm *hmm;
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
641
  	/* Sanity check, this really should not happen ! */
08232a454   Jérôme Glisse   mm/hmm: use struc...
642
  	if (range->start < vma->vm_start || range->start >= vma->vm_end)
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
643
  		return -EINVAL;
08232a454   Jérôme Glisse   mm/hmm: use struc...
644
  	if (range->end < vma->vm_start || range->end > vma->vm_end)
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
645
646
647
648
649
650
651
652
  		return -EINVAL;
  
  	hmm = hmm_register(vma->vm_mm);
  	if (!hmm)
  		return -ENOMEM;
  	/* Caller must have registered a mirror, via hmm_mirror_register() ! */
  	if (!hmm->mmu_notifier.ops)
  		return -EINVAL;
855ce7d25   Jérôme Glisse   mm/hmm: cleanup s...
653
654
655
656
657
  	/* FIXME support hugetlb fs */
  	if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL)) {
  		hmm_pfns_special(range);
  		return -EINVAL;
  	}
86586a41b   Jérôme Glisse   mm/hmm: remove HM...
658
659
660
661
662
663
664
  	if (!(vma->vm_flags & VM_READ)) {
  		/*
  		 * If vma do not allow read access, then assume that it does
  		 * not allow write access, either. Architecture that allow
  		 * write without read access are not supported by HMM, because
  		 * operations such has atomic access would not work.
  		 */
f88a1e90c   Jérôme Glisse   mm/hmm: use devic...
665
  		hmm_pfns_clear(range, range->pfns, range->start, range->end);
86586a41b   Jérôme Glisse   mm/hmm: remove HM...
666
667
  		return -EPERM;
  	}
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
668
  	/* Initialize range to track CPU page table update */
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
669
670
671
672
  	spin_lock(&hmm->lock);
  	range->valid = true;
  	list_add_rcu(&range->list, &hmm->ranges);
  	spin_unlock(&hmm->lock);
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
673
674
675
  	hmm_vma_walk.fault = false;
  	hmm_vma_walk.range = range;
  	mm_walk.private = &hmm_vma_walk;
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
676
677
  	mm_walk.vma = vma;
  	mm_walk.mm = vma->vm_mm;
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
678
679
680
681
682
  	mm_walk.pte_entry = NULL;
  	mm_walk.test_walk = NULL;
  	mm_walk.hugetlb_entry = NULL;
  	mm_walk.pmd_entry = hmm_vma_walk_pmd;
  	mm_walk.pte_hole = hmm_vma_walk_hole;
08232a454   Jérôme Glisse   mm/hmm: use struc...
683
  	walk_page_range(range->start, range->end, &mm_walk);
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
684
685
686
687
688
689
  	return 0;
  }
  EXPORT_SYMBOL(hmm_vma_get_pfns);
  
  /*
   * hmm_vma_range_done() - stop tracking change to CPU page table over a range
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
   * @range: range being tracked
   * Returns: false if range data has been invalidated, true otherwise
   *
   * Range struct is used to track updates to the CPU page table after a call to
   * either hmm_vma_get_pfns() or hmm_vma_fault(). Once the device driver is done
   * using the data,  or wants to lock updates to the data it got from those
   * functions, it must call the hmm_vma_range_done() function, which will then
   * stop tracking CPU page table updates.
   *
   * Note that device driver must still implement general CPU page table update
   * tracking either by using hmm_mirror (see hmm_mirror_register()) or by using
   * the mmu_notifier API directly.
   *
   * CPU page table update tracking done through hmm_range is only temporary and
   * to be used while trying to duplicate CPU page table contents for a range of
   * virtual addresses.
   *
   * There are two ways to use this :
   * again:
08232a454   Jérôme Glisse   mm/hmm: use struc...
709
   *   hmm_vma_get_pfns(range); or hmm_vma_fault(...);
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
710
711
   *   trans = device_build_page_table_update_transaction(pfns);
   *   device_page_table_lock();
08232a454   Jérôme Glisse   mm/hmm: use struc...
712
   *   if (!hmm_vma_range_done(range)) {
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
713
714
715
716
717
718
719
   *     device_page_table_unlock();
   *     goto again;
   *   }
   *   device_commit_transaction(trans);
   *   device_page_table_unlock();
   *
   * Or:
08232a454   Jérôme Glisse   mm/hmm: use struc...
720
   *   hmm_vma_get_pfns(range); or hmm_vma_fault(...);
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
721
   *   device_page_table_lock();
08232a454   Jérôme Glisse   mm/hmm: use struc...
722
723
   *   hmm_vma_range_done(range);
   *   device_update_page_table(range->pfns);
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
724
725
   *   device_page_table_unlock();
   */
08232a454   Jérôme Glisse   mm/hmm: use struc...
726
  bool hmm_vma_range_done(struct hmm_range *range)
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
727
728
729
730
731
732
733
734
  {
  	unsigned long npages = (range->end - range->start) >> PAGE_SHIFT;
  	struct hmm *hmm;
  
  	if (range->end <= range->start) {
  		BUG();
  		return false;
  	}
08232a454   Jérôme Glisse   mm/hmm: use struc...
735
  	hmm = hmm_register(range->vma->vm_mm);
da4c3c735   Jérôme Glisse   mm/hmm/mirror: he...
736
737
738
739
740
741
742
743
744
745
746
747
  	if (!hmm) {
  		memset(range->pfns, 0, sizeof(*range->pfns) * npages);
  		return false;
  	}
  
  	spin_lock(&hmm->lock);
  	list_del_rcu(&range->list);
  	spin_unlock(&hmm->lock);
  
  	return range->valid;
  }
  EXPORT_SYMBOL(hmm_vma_range_done);
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
748
749
750
  
  /*
   * hmm_vma_fault() - try to fault some address in a virtual address range
08232a454   Jérôme Glisse   mm/hmm: use struc...
751
   * @range: range being faulted
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
752
753
754
755
756
757
   * @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem)
   * Returns: 0 success, error otherwise (-EAGAIN means mmap_sem have been drop)
   *
   * This is similar to a regular CPU page fault except that it will not trigger
   * any memory migration if the memory being faulted is not accessible by CPUs.
   *
ff05c0c6b   Jérôme Glisse   mm/hmm: use uint6...
758
759
   * On error, for one virtual address in the range, the function will mark the
   * corresponding HMM pfn entry with an error flag.
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
760
761
762
763
764
765
   *
   * Expected use pattern:
   * retry:
   *   down_read(&mm->mmap_sem);
   *   // Find vma and address device wants to fault, initialize hmm_pfn_t
   *   // array accordingly
08232a454   Jérôme Glisse   mm/hmm: use struc...
766
   *   ret = hmm_vma_fault(range, write, block);
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
767
768
   *   switch (ret) {
   *   case -EAGAIN:
08232a454   Jérôme Glisse   mm/hmm: use struc...
769
   *     hmm_vma_range_done(range);
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
770
771
772
773
774
775
   *     // You might want to rate limit or yield to play nicely, you may
   *     // also commit any valid pfn in the array assuming that you are
   *     // getting true from hmm_vma_range_monitor_end()
   *     goto retry;
   *   case 0:
   *     break;
86586a41b   Jérôme Glisse   mm/hmm: remove HM...
776
777
778
   *   case -ENOMEM:
   *   case -EINVAL:
   *   case -EPERM:
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
779
780
781
782
783
784
785
   *   default:
   *     // Handle error !
   *     up_read(&mm->mmap_sem)
   *     return;
   *   }
   *   // Take device driver lock that serialize device page table update
   *   driver_lock_device_page_table_update();
08232a454   Jérôme Glisse   mm/hmm: use struc...
786
   *   hmm_vma_range_done(range);
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
787
788
789
790
791
792
793
794
795
   *   // Commit pfns we got from hmm_vma_fault()
   *   driver_unlock_device_page_table_update();
   *   up_read(&mm->mmap_sem)
   *
   * YOU MUST CALL hmm_vma_range_done() AFTER THIS FUNCTION RETURN SUCCESS (0)
   * BEFORE FREEING THE range struct OR YOU WILL HAVE SERIOUS MEMORY CORRUPTION !
   *
   * YOU HAVE BEEN WARNED !
   */
2aee09d8c   Jérôme Glisse   mm/hmm: change hm...
796
  int hmm_vma_fault(struct hmm_range *range, bool block)
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
797
  {
08232a454   Jérôme Glisse   mm/hmm: use struc...
798
799
  	struct vm_area_struct *vma = range->vma;
  	unsigned long start = range->start;
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
800
801
802
803
804
805
  	struct hmm_vma_walk hmm_vma_walk;
  	struct mm_walk mm_walk;
  	struct hmm *hmm;
  	int ret;
  
  	/* Sanity check, this really should not happen ! */
08232a454   Jérôme Glisse   mm/hmm: use struc...
806
  	if (range->start < vma->vm_start || range->start >= vma->vm_end)
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
807
  		return -EINVAL;
08232a454   Jérôme Glisse   mm/hmm: use struc...
808
  	if (range->end < vma->vm_start || range->end > vma->vm_end)
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
809
810
811
812
  		return -EINVAL;
  
  	hmm = hmm_register(vma->vm_mm);
  	if (!hmm) {
f88a1e90c   Jérôme Glisse   mm/hmm: use devic...
813
  		hmm_pfns_clear(range, range->pfns, range->start, range->end);
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
814
815
816
817
818
  		return -ENOMEM;
  	}
  	/* Caller must have registered a mirror using hmm_mirror_register() */
  	if (!hmm->mmu_notifier.ops)
  		return -EINVAL;
855ce7d25   Jérôme Glisse   mm/hmm: cleanup s...
819
820
821
822
823
  	/* FIXME support hugetlb fs */
  	if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL)) {
  		hmm_pfns_special(range);
  		return -EINVAL;
  	}
86586a41b   Jérôme Glisse   mm/hmm: remove HM...
824
825
826
827
828
829
830
  	if (!(vma->vm_flags & VM_READ)) {
  		/*
  		 * If vma do not allow read access, then assume that it does
  		 * not allow write access, either. Architecture that allow
  		 * write without read access are not supported by HMM, because
  		 * operations such has atomic access would not work.
  		 */
f88a1e90c   Jérôme Glisse   mm/hmm: use devic...
831
  		hmm_pfns_clear(range, range->pfns, range->start, range->end);
86586a41b   Jérôme Glisse   mm/hmm: remove HM...
832
833
  		return -EPERM;
  	}
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
834

86586a41b   Jérôme Glisse   mm/hmm: remove HM...
835
836
837
838
839
  	/* Initialize range to track CPU page table update */
  	spin_lock(&hmm->lock);
  	range->valid = true;
  	list_add_rcu(&range->list, &hmm->ranges);
  	spin_unlock(&hmm->lock);
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
840
  	hmm_vma_walk.fault = true;
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
841
842
843
844
845
846
847
848
849
850
851
852
853
854
  	hmm_vma_walk.block = block;
  	hmm_vma_walk.range = range;
  	mm_walk.private = &hmm_vma_walk;
  	hmm_vma_walk.last = range->start;
  
  	mm_walk.vma = vma;
  	mm_walk.mm = vma->vm_mm;
  	mm_walk.pte_entry = NULL;
  	mm_walk.test_walk = NULL;
  	mm_walk.hugetlb_entry = NULL;
  	mm_walk.pmd_entry = hmm_vma_walk_pmd;
  	mm_walk.pte_hole = hmm_vma_walk_hole;
  
  	do {
08232a454   Jérôme Glisse   mm/hmm: use struc...
855
  		ret = walk_page_range(start, range->end, &mm_walk);
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
856
857
858
859
860
861
862
  		start = hmm_vma_walk.last;
  	} while (ret == -EAGAIN);
  
  	if (ret) {
  		unsigned long i;
  
  		i = (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
f88a1e90c   Jérôme Glisse   mm/hmm: use devic...
863
864
  		hmm_pfns_clear(range, &range->pfns[i], hmm_vma_walk.last,
  			       range->end);
08232a454   Jérôme Glisse   mm/hmm: use struc...
865
  		hmm_vma_range_done(range);
74eee180b   Jérôme Glisse   mm/hmm/mirror: de...
866
867
868
869
  	}
  	return ret;
  }
  EXPORT_SYMBOL(hmm_vma_fault);
c0b124054   Jérôme Glisse   mm/hmm/mirror: mi...
870
  #endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */
4ef589dc9   Jérôme Glisse   mm/hmm/devmem: de...
871

df6ad6983   Jérôme Glisse   mm/device-public-...
872
  #if IS_ENABLED(CONFIG_DEVICE_PRIVATE) ||  IS_ENABLED(CONFIG_DEVICE_PUBLIC)
4ef589dc9   Jérôme Glisse   mm/hmm/devmem: de...
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
  struct page *hmm_vma_alloc_locked_page(struct vm_area_struct *vma,
  				       unsigned long addr)
  {
  	struct page *page;
  
  	page = alloc_page_vma(GFP_HIGHUSER, vma, addr);
  	if (!page)
  		return NULL;
  	lock_page(page);
  	return page;
  }
  EXPORT_SYMBOL(hmm_vma_alloc_locked_page);
  
  
  static void hmm_devmem_ref_release(struct percpu_ref *ref)
  {
  	struct hmm_devmem *devmem;
  
  	devmem = container_of(ref, struct hmm_devmem, ref);
  	complete(&devmem->completion);
  }
  
  static void hmm_devmem_ref_exit(void *data)
  {
  	struct percpu_ref *ref = data;
  	struct hmm_devmem *devmem;
  
  	devmem = container_of(ref, struct hmm_devmem, ref);
  	percpu_ref_exit(ref);
  	devm_remove_action(devmem->device, &hmm_devmem_ref_exit, data);
  }
  
  static void hmm_devmem_ref_kill(void *data)
  {
  	struct percpu_ref *ref = data;
  	struct hmm_devmem *devmem;
  
  	devmem = container_of(ref, struct hmm_devmem, ref);
  	percpu_ref_kill(ref);
  	wait_for_completion(&devmem->completion);
  	devm_remove_action(devmem->device, &hmm_devmem_ref_kill, data);
  }
  
  static int hmm_devmem_fault(struct vm_area_struct *vma,
  			    unsigned long addr,
  			    const struct page *page,
  			    unsigned int flags,
  			    pmd_t *pmdp)
  {
  	struct hmm_devmem *devmem = page->pgmap->data;
  
  	return devmem->ops->fault(devmem, vma, addr, page, flags, pmdp);
  }
  
  static void hmm_devmem_free(struct page *page, void *data)
  {
  	struct hmm_devmem *devmem = data;
  
  	devmem->ops->free(devmem, page);
  }
  
  static DEFINE_MUTEX(hmm_devmem_lock);
  static RADIX_TREE(hmm_devmem_radix, GFP_KERNEL);
  
  static void hmm_devmem_radix_release(struct resource *resource)
  {
fec11bc03   Colin Ian King   mm/hmm: remove re...
939
  	resource_size_t key, align_start, align_size;
4ef589dc9   Jérôme Glisse   mm/hmm/devmem: de...
940
941
942
  
  	align_start = resource->start & ~(PA_SECTION_SIZE - 1);
  	align_size = ALIGN(resource_size(resource), PA_SECTION_SIZE);
4ef589dc9   Jérôme Glisse   mm/hmm/devmem: de...
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
  
  	mutex_lock(&hmm_devmem_lock);
  	for (key = resource->start;
  	     key <= resource->end;
  	     key += PA_SECTION_SIZE)
  		radix_tree_delete(&hmm_devmem_radix, key >> PA_SECTION_SHIFT);
  	mutex_unlock(&hmm_devmem_lock);
  }
  
  static void hmm_devmem_release(struct device *dev, void *data)
  {
  	struct hmm_devmem *devmem = data;
  	struct resource *resource = devmem->resource;
  	unsigned long start_pfn, npages;
  	struct zone *zone;
  	struct page *page;
  
  	if (percpu_ref_tryget_live(&devmem->ref)) {
  		dev_WARN(dev, "%s: page mapping is still live!
  ", __func__);
  		percpu_ref_put(&devmem->ref);
  	}
  
  	/* pages are dead and unused, undo the arch mapping */
  	start_pfn = (resource->start & ~(PA_SECTION_SIZE - 1)) >> PAGE_SHIFT;
  	npages = ALIGN(resource_size(resource), PA_SECTION_SIZE) >> PAGE_SHIFT;
  
  	page = pfn_to_page(start_pfn);
  	zone = page_zone(page);
  
  	mem_hotplug_begin();
d3df0a423   Jérôme Glisse   mm/hmm: add new h...
974
  	if (resource->desc == IORES_DESC_DEVICE_PRIVATE_MEMORY)
da024512a   Christoph Hellwig   mm: pass the vmem...
975
  		__remove_pages(zone, start_pfn, npages, NULL);
d3df0a423   Jérôme Glisse   mm/hmm: add new h...
976
977
  	else
  		arch_remove_memory(start_pfn << PAGE_SHIFT,
da024512a   Christoph Hellwig   mm: pass the vmem...
978
  				   npages << PAGE_SHIFT, NULL);
4ef589dc9   Jérôme Glisse   mm/hmm/devmem: de...
979
980
981
982
  	mem_hotplug_done();
  
  	hmm_devmem_radix_release(resource);
  }
4ef589dc9   Jérôme Glisse   mm/hmm/devmem: de...
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
  static int hmm_devmem_pages_create(struct hmm_devmem *devmem)
  {
  	resource_size_t key, align_start, align_size, align_end;
  	struct device *device = devmem->device;
  	int ret, nid, is_ram;
  	unsigned long pfn;
  
  	align_start = devmem->resource->start & ~(PA_SECTION_SIZE - 1);
  	align_size = ALIGN(devmem->resource->start +
  			   resource_size(devmem->resource),
  			   PA_SECTION_SIZE) - align_start;
  
  	is_ram = region_intersects(align_start, align_size,
  				   IORESOURCE_SYSTEM_RAM,
  				   IORES_DESC_NONE);
  	if (is_ram == REGION_MIXED) {
  		WARN_ONCE(1, "%s attempted on mixed region %pr
  ",
  				__func__, devmem->resource);
  		return -ENXIO;
  	}
  	if (is_ram == REGION_INTERSECTS)
  		return -ENXIO;
d3df0a423   Jérôme Glisse   mm/hmm: add new h...
1006
1007
1008
1009
  	if (devmem->resource->desc == IORES_DESC_DEVICE_PUBLIC_MEMORY)
  		devmem->pagemap.type = MEMORY_DEVICE_PUBLIC;
  	else
  		devmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
e7744aa25   Logan Gunthorpe   memremap: drop pr...
1010
  	devmem->pagemap.res = *devmem->resource;
4ef589dc9   Jérôme Glisse   mm/hmm/devmem: de...
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
  	devmem->pagemap.page_fault = hmm_devmem_fault;
  	devmem->pagemap.page_free = hmm_devmem_free;
  	devmem->pagemap.dev = devmem->device;
  	devmem->pagemap.ref = &devmem->ref;
  	devmem->pagemap.data = devmem;
  
  	mutex_lock(&hmm_devmem_lock);
  	align_end = align_start + align_size - 1;
  	for (key = align_start; key <= align_end; key += PA_SECTION_SIZE) {
  		struct hmm_devmem *dup;
18be460ee   Tejun Heo   mm/hmm.c: remove ...
1021
1022
  		dup = radix_tree_lookup(&hmm_devmem_radix,
  					key >> PA_SECTION_SHIFT);
4ef589dc9   Jérôme Glisse   mm/hmm/devmem: de...
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
  		if (dup) {
  			dev_err(device, "%s: collides with mapping for %s
  ",
  				__func__, dev_name(dup->device));
  			mutex_unlock(&hmm_devmem_lock);
  			ret = -EBUSY;
  			goto error;
  		}
  		ret = radix_tree_insert(&hmm_devmem_radix,
  					key >> PA_SECTION_SHIFT,
  					devmem);
  		if (ret) {
  			dev_err(device, "%s: failed: %d
  ", __func__, ret);
  			mutex_unlock(&hmm_devmem_lock);
  			goto error_radix;
  		}
  	}
  	mutex_unlock(&hmm_devmem_lock);
  
  	nid = dev_to_node(device);
  	if (nid < 0)
  		nid = numa_mem_id();
  
  	mem_hotplug_begin();
  	/*
  	 * For device private memory we call add_pages() as we only need to
  	 * allocate and initialize struct page for the device memory. More-
  	 * over the device memory is un-accessible thus we do not want to
  	 * create a linear mapping for the memory like arch_add_memory()
  	 * would do.
d3df0a423   Jérôme Glisse   mm/hmm: add new h...
1054
1055
1056
  	 *
  	 * For device public memory, which is accesible by the CPU, we do
  	 * want the linear mapping and thus use arch_add_memory().
4ef589dc9   Jérôme Glisse   mm/hmm/devmem: de...
1057
  	 */
d3df0a423   Jérôme Glisse   mm/hmm: add new h...
1058
  	if (devmem->pagemap.type == MEMORY_DEVICE_PUBLIC)
24e6d5a59   Christoph Hellwig   mm: pass the vmem...
1059
1060
  		ret = arch_add_memory(nid, align_start, align_size, NULL,
  				false);
d3df0a423   Jérôme Glisse   mm/hmm: add new h...
1061
1062
  	else
  		ret = add_pages(nid, align_start >> PAGE_SHIFT,
24e6d5a59   Christoph Hellwig   mm: pass the vmem...
1063
  				align_size >> PAGE_SHIFT, NULL, false);
4ef589dc9   Jérôme Glisse   mm/hmm/devmem: de...
1064
1065
1066
1067
1068
1069
  	if (ret) {
  		mem_hotplug_done();
  		goto error_add_memory;
  	}
  	move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
  				align_start >> PAGE_SHIFT,
a99583e78   Christoph Hellwig   mm: pass the vmem...
1070
  				align_size >> PAGE_SHIFT, NULL);
4ef589dc9   Jérôme Glisse   mm/hmm/devmem: de...
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
  	mem_hotplug_done();
  
  	for (pfn = devmem->pfn_first; pfn < devmem->pfn_last; pfn++) {
  		struct page *page = pfn_to_page(pfn);
  
  		page->pgmap = &devmem->pagemap;
  	}
  	return 0;
  
  error_add_memory:
  	untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
  error_radix:
  	hmm_devmem_radix_release(devmem->resource);
  error:
  	return ret;
  }
  
  static int hmm_devmem_match(struct device *dev, void *data, void *match_data)
  {
  	struct hmm_devmem *devmem = data;
  
  	return devmem->resource == match_data;
  }
  
  static void hmm_devmem_pages_remove(struct hmm_devmem *devmem)
  {
  	devres_release(devmem->device, &hmm_devmem_release,
  		       &hmm_devmem_match, devmem->resource);
  }
  
  /*
   * hmm_devmem_add() - hotplug ZONE_DEVICE memory for device memory
   *
   * @ops: memory event device driver callback (see struct hmm_devmem_ops)
   * @device: device struct to bind the resource too
   * @size: size in bytes of the device memory to add
   * Returns: pointer to new hmm_devmem struct ERR_PTR otherwise
   *
   * This function first finds an empty range of physical address big enough to
   * contain the new resource, and then hotplugs it as ZONE_DEVICE memory, which
   * in turn allocates struct pages. It does not do anything beyond that; all
   * events affecting the memory will go through the various callbacks provided
   * by hmm_devmem_ops struct.
   *
   * Device driver should call this function during device initialization and
   * is then responsible of memory management. HMM only provides helpers.
   */
  struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
  				  struct device *device,
  				  unsigned long size)
  {
  	struct hmm_devmem *devmem;
  	resource_size_t addr;
  	int ret;
e76384884   Dan Williams   mm: introduce MEM...
1125
  	dev_pagemap_get_ops();
4ef589dc9   Jérôme Glisse   mm/hmm/devmem: de...
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
  
  	devmem = devres_alloc_node(&hmm_devmem_release, sizeof(*devmem),
  				   GFP_KERNEL, dev_to_node(device));
  	if (!devmem)
  		return ERR_PTR(-ENOMEM);
  
  	init_completion(&devmem->completion);
  	devmem->pfn_first = -1UL;
  	devmem->pfn_last = -1UL;
  	devmem->resource = NULL;
  	devmem->device = device;
  	devmem->ops = ops;
  
  	ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release,
  			      0, GFP_KERNEL);
  	if (ret)
  		goto error_percpu_ref;
  
  	ret = devm_add_action(device, hmm_devmem_ref_exit, &devmem->ref);
  	if (ret)
  		goto error_devm_add_action;
  
  	size = ALIGN(size, PA_SECTION_SIZE);
  	addr = min((unsigned long)iomem_resource.end,
  		   (1UL << MAX_PHYSMEM_BITS) - 1);
  	addr = addr - size + 1UL;
  
  	/*
  	 * FIXME add a new helper to quickly walk resource tree and find free
  	 * range
  	 *
  	 * FIXME what about ioport_resource resource ?
  	 */
  	for (; addr > size && addr >= iomem_resource.start; addr -= size) {
  		ret = region_intersects(addr, size, 0, IORES_DESC_NONE);
  		if (ret != REGION_DISJOINT)
  			continue;
  
  		devmem->resource = devm_request_mem_region(device, addr, size,
  							   dev_name(device));
  		if (!devmem->resource) {
  			ret = -ENOMEM;
  			goto error_no_resource;
  		}
  		break;
  	}
  	if (!devmem->resource) {
  		ret = -ERANGE;
  		goto error_no_resource;
  	}
  
  	devmem->resource->desc = IORES_DESC_DEVICE_PRIVATE_MEMORY;
  	devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT;
  	devmem->pfn_last = devmem->pfn_first +
  			   (resource_size(devmem->resource) >> PAGE_SHIFT);
  
  	ret = hmm_devmem_pages_create(devmem);
  	if (ret)
  		goto error_pages;
  
  	devres_add(device, devmem);
  
  	ret = devm_add_action(device, hmm_devmem_ref_kill, &devmem->ref);
  	if (ret) {
  		hmm_devmem_remove(devmem);
  		return ERR_PTR(ret);
  	}
  
  	return devmem;
  
  error_pages:
  	devm_release_mem_region(device, devmem->resource->start,
  				resource_size(devmem->resource));
  error_no_resource:
  error_devm_add_action:
  	hmm_devmem_ref_kill(&devmem->ref);
  	hmm_devmem_ref_exit(&devmem->ref);
  error_percpu_ref:
  	devres_free(devmem);
  	return ERR_PTR(ret);
  }
  EXPORT_SYMBOL(hmm_devmem_add);
d3df0a423   Jérôme Glisse   mm/hmm: add new h...
1208
1209
1210
1211
1212
1213
1214
1215
1216
  struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
  					   struct device *device,
  					   struct resource *res)
  {
  	struct hmm_devmem *devmem;
  	int ret;
  
  	if (res->desc != IORES_DESC_DEVICE_PUBLIC_MEMORY)
  		return ERR_PTR(-EINVAL);
e76384884   Dan Williams   mm: introduce MEM...
1217
  	dev_pagemap_get_ops();
d3df0a423   Jérôme Glisse   mm/hmm: add new h...
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
  
  	devmem = devres_alloc_node(&hmm_devmem_release, sizeof(*devmem),
  				   GFP_KERNEL, dev_to_node(device));
  	if (!devmem)
  		return ERR_PTR(-ENOMEM);
  
  	init_completion(&devmem->completion);
  	devmem->pfn_first = -1UL;
  	devmem->pfn_last = -1UL;
  	devmem->resource = res;
  	devmem->device = device;
  	devmem->ops = ops;
  
  	ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release,
  			      0, GFP_KERNEL);
  	if (ret)
  		goto error_percpu_ref;
  
  	ret = devm_add_action(device, hmm_devmem_ref_exit, &devmem->ref);
  	if (ret)
  		goto error_devm_add_action;
  
  
  	devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT;
  	devmem->pfn_last = devmem->pfn_first +
  			   (resource_size(devmem->resource) >> PAGE_SHIFT);
  
  	ret = hmm_devmem_pages_create(devmem);
  	if (ret)
  		goto error_devm_add_action;
  
  	devres_add(device, devmem);
  
  	ret = devm_add_action(device, hmm_devmem_ref_kill, &devmem->ref);
  	if (ret) {
  		hmm_devmem_remove(devmem);
  		return ERR_PTR(ret);
  	}
  
  	return devmem;
  
  error_devm_add_action:
  	hmm_devmem_ref_kill(&devmem->ref);
  	hmm_devmem_ref_exit(&devmem->ref);
  error_percpu_ref:
  	devres_free(devmem);
  	return ERR_PTR(ret);
  }
  EXPORT_SYMBOL(hmm_devmem_add_resource);
4ef589dc9   Jérôme Glisse   mm/hmm/devmem: de...
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
  /*
   * hmm_devmem_remove() - remove device memory (kill and free ZONE_DEVICE)
   *
   * @devmem: hmm_devmem struct use to track and manage the ZONE_DEVICE memory
   *
   * This will hot-unplug memory that was hotplugged by hmm_devmem_add on behalf
   * of the device driver. It will free struct page and remove the resource that
   * reserved the physical address range for this device memory.
   */
  void hmm_devmem_remove(struct hmm_devmem *devmem)
  {
  	resource_size_t start, size;
  	struct device *device;
d3df0a423   Jérôme Glisse   mm/hmm: add new h...
1280
  	bool cdm = false;
4ef589dc9   Jérôme Glisse   mm/hmm/devmem: de...
1281
1282
1283
1284
1285
1286
1287
  
  	if (!devmem)
  		return;
  
  	device = devmem->device;
  	start = devmem->resource->start;
  	size = resource_size(devmem->resource);
d3df0a423   Jérôme Glisse   mm/hmm: add new h...
1288
  	cdm = devmem->resource->desc == IORES_DESC_DEVICE_PUBLIC_MEMORY;
4ef589dc9   Jérôme Glisse   mm/hmm/devmem: de...
1289
1290
1291
  	hmm_devmem_ref_kill(&devmem->ref);
  	hmm_devmem_ref_exit(&devmem->ref);
  	hmm_devmem_pages_remove(devmem);
d3df0a423   Jérôme Glisse   mm/hmm: add new h...
1292
1293
  	if (!cdm)
  		devm_release_mem_region(device, start, size);
4ef589dc9   Jérôme Glisse   mm/hmm/devmem: de...
1294
1295
  }
  EXPORT_SYMBOL(hmm_devmem_remove);
858b54dab   Jérôme Glisse   mm/hmm/devmem: du...
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
  
  /*
   * A device driver that wants to handle multiple devices memory through a
   * single fake device can use hmm_device to do so. This is purely a helper
   * and it is not needed to make use of any HMM functionality.
   */
  #define HMM_DEVICE_MAX 256
  
  static DECLARE_BITMAP(hmm_device_mask, HMM_DEVICE_MAX);
  static DEFINE_SPINLOCK(hmm_device_lock);
  static struct class *hmm_device_class;
  static dev_t hmm_device_devt;
  
  static void hmm_device_release(struct device *device)
  {
  	struct hmm_device *hmm_device;
  
  	hmm_device = container_of(device, struct hmm_device, device);
  	spin_lock(&hmm_device_lock);
  	clear_bit(hmm_device->minor, hmm_device_mask);
  	spin_unlock(&hmm_device_lock);
  
  	kfree(hmm_device);
  }
  
  struct hmm_device *hmm_device_new(void *drvdata)
  {
  	struct hmm_device *hmm_device;
  
  	hmm_device = kzalloc(sizeof(*hmm_device), GFP_KERNEL);
  	if (!hmm_device)
  		return ERR_PTR(-ENOMEM);
  
  	spin_lock(&hmm_device_lock);
  	hmm_device->minor = find_first_zero_bit(hmm_device_mask, HMM_DEVICE_MAX);
  	if (hmm_device->minor >= HMM_DEVICE_MAX) {
  		spin_unlock(&hmm_device_lock);
  		kfree(hmm_device);
  		return ERR_PTR(-EBUSY);
  	}
  	set_bit(hmm_device->minor, hmm_device_mask);
  	spin_unlock(&hmm_device_lock);
  
  	dev_set_name(&hmm_device->device, "hmm_device%d", hmm_device->minor);
  	hmm_device->device.devt = MKDEV(MAJOR(hmm_device_devt),
  					hmm_device->minor);
  	hmm_device->device.release = hmm_device_release;
  	dev_set_drvdata(&hmm_device->device, drvdata);
  	hmm_device->device.class = hmm_device_class;
  	device_initialize(&hmm_device->device);
  
  	return hmm_device;
  }
  EXPORT_SYMBOL(hmm_device_new);
  
  void hmm_device_put(struct hmm_device *hmm_device)
  {
  	put_device(&hmm_device->device);
  }
  EXPORT_SYMBOL(hmm_device_put);
  
  static int __init hmm_init(void)
  {
  	int ret;
  
  	ret = alloc_chrdev_region(&hmm_device_devt, 0,
  				  HMM_DEVICE_MAX,
  				  "hmm_device");
  	if (ret)
  		return ret;
  
  	hmm_device_class = class_create(THIS_MODULE, "hmm_device");
  	if (IS_ERR(hmm_device_class)) {
  		unregister_chrdev_region(hmm_device_devt, HMM_DEVICE_MAX);
  		return PTR_ERR(hmm_device_class);
  	}
  	return 0;
  }
  
  device_initcall(hmm_init);
df6ad6983   Jérôme Glisse   mm/device-public-...
1376
  #endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */