Blame view
mm/hmm.c
35.9 KB
133ff0eac mm/hmm: heterogen... |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 |
/* * Copyright 2013 Red Hat Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Authors: Jérôme Glisse <jglisse@redhat.com> */ /* * Refer to include/linux/hmm.h for information about heterogeneous memory * management or HMM for short. */ #include <linux/mm.h> #include <linux/hmm.h> |
858b54dab mm/hmm/devmem: du... |
22 |
#include <linux/init.h> |
da4c3c735 mm/hmm/mirror: he... |
23 24 |
#include <linux/rmap.h> #include <linux/swap.h> |
133ff0eac mm/hmm: heterogen... |
25 26 |
#include <linux/slab.h> #include <linux/sched.h> |
4ef589dc9 mm/hmm/devmem: de... |
27 28 |
#include <linux/mmzone.h> #include <linux/pagemap.h> |
da4c3c735 mm/hmm/mirror: he... |
29 30 |
#include <linux/swapops.h> #include <linux/hugetlb.h> |
4ef589dc9 mm/hmm/devmem: de... |
31 |
#include <linux/memremap.h> |
7b2d55d2c mm/ZONE_DEVICE: s... |
32 |
#include <linux/jump_label.h> |
c0b124054 mm/hmm/mirror: mi... |
33 |
#include <linux/mmu_notifier.h> |
4ef589dc9 mm/hmm/devmem: de... |
34 35 36 |
#include <linux/memory_hotplug.h> #define PA_SECTION_SIZE (1UL << PA_SECTION_SHIFT) |
133ff0eac mm/hmm: heterogen... |
37 |
|
6b368cd4a mm/hmm: avoid blo... |
38 |
#if IS_ENABLED(CONFIG_HMM_MIRROR) |
c0b124054 mm/hmm/mirror: mi... |
39 |
static const struct mmu_notifier_ops hmm_mmu_notifier_ops; |
133ff0eac mm/hmm: heterogen... |
40 41 42 43 |
/* * struct hmm - HMM per mm struct * * @mm: mm struct this HMM struct is bound to |
da4c3c735 mm/hmm/mirror: he... |
44 |
* @lock: lock protecting ranges list |
c0b124054 mm/hmm/mirror: mi... |
45 |
* @sequence: we track updates to the CPU page table with a sequence number |
da4c3c735 mm/hmm/mirror: he... |
46 |
* @ranges: list of range being snapshotted |
c0b124054 mm/hmm/mirror: mi... |
47 48 49 |
* @mirrors: list of mirrors for this mm * @mmu_notifier: mmu notifier to track updates to CPU page table * @mirrors_sem: read/write semaphore protecting the mirrors list |
133ff0eac mm/hmm: heterogen... |
50 51 52 |
*/ struct hmm { struct mm_struct *mm; |
da4c3c735 mm/hmm/mirror: he... |
53 |
spinlock_t lock; |
c0b124054 mm/hmm/mirror: mi... |
54 |
atomic_t sequence; |
da4c3c735 mm/hmm/mirror: he... |
55 |
struct list_head ranges; |
c0b124054 mm/hmm/mirror: mi... |
56 57 58 |
struct list_head mirrors; struct mmu_notifier mmu_notifier; struct rw_semaphore mirrors_sem; |
133ff0eac mm/hmm: heterogen... |
59 60 61 62 63 64 65 66 67 68 69 70 |
}; /* * hmm_register - register HMM against an mm (HMM internal) * * @mm: mm struct to attach to * * This is not intended to be used directly by device drivers. It allocates an * HMM struct if mm does not have one, and initializes it. */ static struct hmm *hmm_register(struct mm_struct *mm) { |
c0b124054 mm/hmm/mirror: mi... |
71 72 |
struct hmm *hmm = READ_ONCE(mm->hmm); bool cleanup = false; |
133ff0eac mm/hmm: heterogen... |
73 74 75 76 77 78 |
/* * The hmm struct can only be freed once the mm_struct goes away, * hence we should always have pre-allocated an new hmm struct * above. */ |
c0b124054 mm/hmm/mirror: mi... |
79 80 81 82 83 84 85 86 87 88 |
if (hmm) return hmm; hmm = kmalloc(sizeof(*hmm), GFP_KERNEL); if (!hmm) return NULL; INIT_LIST_HEAD(&hmm->mirrors); init_rwsem(&hmm->mirrors_sem); atomic_set(&hmm->sequence, 0); hmm->mmu_notifier.ops = NULL; |
da4c3c735 mm/hmm/mirror: he... |
89 90 |
INIT_LIST_HEAD(&hmm->ranges); spin_lock_init(&hmm->lock); |
c0b124054 mm/hmm/mirror: mi... |
91 |
hmm->mm = mm; |
c0b124054 mm/hmm/mirror: mi... |
92 93 94 95 96 97 |
spin_lock(&mm->page_table_lock); if (!mm->hmm) mm->hmm = hmm; else cleanup = true; spin_unlock(&mm->page_table_lock); |
daca89ecd mm/hmm: fix race ... |
98 99 100 101 102 103 104 105 106 107 |
if (cleanup) goto error; /* * We should only get here if hold the mmap_sem in write mode ie on * registration of first mirror through hmm_mirror_register() */ hmm->mmu_notifier.ops = &hmm_mmu_notifier_ops; if (__mmu_notifier_register(&hmm->mmu_notifier, mm)) goto error_mm; |
c0b124054 mm/hmm/mirror: mi... |
108 |
|
133ff0eac mm/hmm: heterogen... |
109 |
return mm->hmm; |
daca89ecd mm/hmm: fix race ... |
110 111 112 113 114 115 116 117 118 |
error_mm: spin_lock(&mm->page_table_lock); if (mm->hmm == hmm) mm->hmm = NULL; spin_unlock(&mm->page_table_lock); error: kfree(hmm); return NULL; |
133ff0eac mm/hmm: heterogen... |
119 120 121 122 123 124 |
} void hmm_mm_destroy(struct mm_struct *mm) { kfree(mm->hmm); } |
c0b124054 mm/hmm/mirror: mi... |
125 |
|
c0b124054 mm/hmm/mirror: mi... |
126 127 128 129 130 131 |
static void hmm_invalidate_range(struct hmm *hmm, enum hmm_update_type action, unsigned long start, unsigned long end) { struct hmm_mirror *mirror; |
da4c3c735 mm/hmm/mirror: he... |
132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 |
struct hmm_range *range; spin_lock(&hmm->lock); list_for_each_entry(range, &hmm->ranges, list) { unsigned long addr, idx, npages; if (end < range->start || start >= range->end) continue; range->valid = false; addr = max(start, range->start); idx = (addr - range->start) >> PAGE_SHIFT; npages = (min(range->end, end) - addr) >> PAGE_SHIFT; memset(&range->pfns[idx], 0, sizeof(*range->pfns) * npages); } spin_unlock(&hmm->lock); |
c0b124054 mm/hmm/mirror: mi... |
148 149 150 151 152 153 154 |
down_read(&hmm->mirrors_sem); list_for_each_entry(mirror, &hmm->mirrors, list) mirror->ops->sync_cpu_device_pagetables(mirror, action, start, end); up_read(&hmm->mirrors_sem); } |
e1401513c mm/hmm: HMM shoul... |
155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 |
static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm) { struct hmm_mirror *mirror; struct hmm *hmm = mm->hmm; down_write(&hmm->mirrors_sem); mirror = list_first_entry_or_null(&hmm->mirrors, struct hmm_mirror, list); while (mirror) { list_del_init(&mirror->list); if (mirror->ops->release) { /* * Drop mirrors_sem so callback can wait on any pending * work that might itself trigger mmu_notifier callback * and thus would deadlock with us. */ up_write(&hmm->mirrors_sem); mirror->ops->release(mirror); down_write(&hmm->mirrors_sem); } mirror = list_first_entry_or_null(&hmm->mirrors, struct hmm_mirror, list); } up_write(&hmm->mirrors_sem); } |
93065ac75 mm, oom: distingu... |
180 |
static int hmm_invalidate_range_start(struct mmu_notifier *mn, |
c0b124054 mm/hmm/mirror: mi... |
181 182 |
struct mm_struct *mm, unsigned long start, |
93065ac75 mm, oom: distingu... |
183 184 |
unsigned long end, bool blockable) |
c0b124054 mm/hmm/mirror: mi... |
185 186 187 188 189 190 |
{ struct hmm *hmm = mm->hmm; VM_BUG_ON(!hmm); atomic_inc(&hmm->sequence); |
93065ac75 mm, oom: distingu... |
191 192 |
return 0; |
c0b124054 mm/hmm/mirror: mi... |
193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 |
} static void hmm_invalidate_range_end(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long start, unsigned long end) { struct hmm *hmm = mm->hmm; VM_BUG_ON(!hmm); hmm_invalidate_range(mm->hmm, HMM_UPDATE_INVALIDATE, start, end); } static const struct mmu_notifier_ops hmm_mmu_notifier_ops = { |
e1401513c mm/hmm: HMM shoul... |
208 |
.release = hmm_release, |
c0b124054 mm/hmm/mirror: mi... |
209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 |
.invalidate_range_start = hmm_invalidate_range_start, .invalidate_range_end = hmm_invalidate_range_end, }; /* * hmm_mirror_register() - register a mirror against an mm * * @mirror: new mirror struct to register * @mm: mm to register against * * To start mirroring a process address space, the device driver must register * an HMM mirror struct. * * THE mm->mmap_sem MUST BE HELD IN WRITE MODE ! */ int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm) { /* Sanity check */ if (!mm || !mirror || !mirror->ops) return -EINVAL; |
c01cbba2a mm/hmm: unregiste... |
229 |
again: |
c0b124054 mm/hmm/mirror: mi... |
230 231 232 233 234 |
mirror->hmm = hmm_register(mm); if (!mirror->hmm) return -ENOMEM; down_write(&mirror->hmm->mirrors_sem); |
c01cbba2a mm/hmm: unregiste... |
235 236 237 238 239 240 241 242 243 244 245 246 |
if (mirror->hmm->mm == NULL) { /* * A racing hmm_mirror_unregister() is about to destroy the hmm * struct. Try again to allocate a new one. */ up_write(&mirror->hmm->mirrors_sem); mirror->hmm = NULL; goto again; } else { list_add(&mirror->list, &mirror->hmm->mirrors); up_write(&mirror->hmm->mirrors_sem); } |
c0b124054 mm/hmm/mirror: mi... |
247 248 249 250 251 252 253 254 255 256 257 258 259 260 |
return 0; } EXPORT_SYMBOL(hmm_mirror_register); /* * hmm_mirror_unregister() - unregister a mirror * * @mirror: new mirror struct to register * * Stop mirroring a process address space, and cleanup. */ void hmm_mirror_unregister(struct hmm_mirror *mirror) { |
c01cbba2a mm/hmm: unregiste... |
261 262 263 264 265 266 |
bool should_unregister = false; struct mm_struct *mm; struct hmm *hmm; if (mirror->hmm == NULL) return; |
c0b124054 mm/hmm/mirror: mi... |
267 |
|
c01cbba2a mm/hmm: unregiste... |
268 |
hmm = mirror->hmm; |
c0b124054 mm/hmm/mirror: mi... |
269 |
down_write(&hmm->mirrors_sem); |
e1401513c mm/hmm: HMM shoul... |
270 |
list_del_init(&mirror->list); |
c01cbba2a mm/hmm: unregiste... |
271 272 273 274 |
should_unregister = list_empty(&hmm->mirrors); mirror->hmm = NULL; mm = hmm->mm; hmm->mm = NULL; |
c0b124054 mm/hmm/mirror: mi... |
275 |
up_write(&hmm->mirrors_sem); |
c01cbba2a mm/hmm: unregiste... |
276 277 278 |
if (!should_unregister || mm == NULL) return; |
daca89ecd mm/hmm: fix race ... |
279 |
mmu_notifier_unregister_no_release(&hmm->mmu_notifier, mm); |
c01cbba2a mm/hmm: unregiste... |
280 281 282 283 |
spin_lock(&mm->page_table_lock); if (mm->hmm == hmm) mm->hmm = NULL; spin_unlock(&mm->page_table_lock); |
c01cbba2a mm/hmm: unregiste... |
284 |
kfree(hmm); |
c0b124054 mm/hmm/mirror: mi... |
285 286 |
} EXPORT_SYMBOL(hmm_mirror_unregister); |
da4c3c735 mm/hmm/mirror: he... |
287 |
|
74eee180b mm/hmm/mirror: de... |
288 289 290 291 292 |
struct hmm_vma_walk { struct hmm_range *range; unsigned long last; bool fault; bool block; |
74eee180b mm/hmm/mirror: de... |
293 |
}; |
2aee09d8c mm/hmm: change hm... |
294 295 |
static int hmm_vma_do_fault(struct mm_walk *walk, unsigned long addr, bool write_fault, uint64_t *pfn) |
74eee180b mm/hmm/mirror: de... |
296 297 298 |
{ unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_REMOTE; struct hmm_vma_walk *hmm_vma_walk = walk->private; |
f88a1e90c mm/hmm: use devic... |
299 |
struct hmm_range *range = hmm_vma_walk->range; |
74eee180b mm/hmm/mirror: de... |
300 |
struct vm_area_struct *vma = walk->vma; |
50a7ca3c6 mm: convert retur... |
301 |
vm_fault_t ret; |
74eee180b mm/hmm/mirror: de... |
302 303 |
flags |= hmm_vma_walk->block ? 0 : FAULT_FLAG_ALLOW_RETRY; |
2aee09d8c mm/hmm: change hm... |
304 |
flags |= write_fault ? FAULT_FLAG_WRITE : 0; |
50a7ca3c6 mm: convert retur... |
305 306 |
ret = handle_mm_fault(vma, addr, flags); if (ret & VM_FAULT_RETRY) |
74eee180b mm/hmm/mirror: de... |
307 |
return -EBUSY; |
50a7ca3c6 mm: convert retur... |
308 |
if (ret & VM_FAULT_ERROR) { |
f88a1e90c mm/hmm: use devic... |
309 |
*pfn = range->values[HMM_PFN_ERROR]; |
74eee180b mm/hmm/mirror: de... |
310 311 312 313 314 |
return -EFAULT; } return -EAGAIN; } |
da4c3c735 mm/hmm/mirror: he... |
315 316 317 318 |
static int hmm_pfns_bad(unsigned long addr, unsigned long end, struct mm_walk *walk) { |
c719547f0 mm/hmm: hmm_pfns_... |
319 320 |
struct hmm_vma_walk *hmm_vma_walk = walk->private; struct hmm_range *range = hmm_vma_walk->range; |
ff05c0c6b mm/hmm: use uint6... |
321 |
uint64_t *pfns = range->pfns; |
da4c3c735 mm/hmm/mirror: he... |
322 323 324 325 |
unsigned long i; i = (addr - range->start) >> PAGE_SHIFT; for (; addr < end; addr += PAGE_SIZE, i++) |
f88a1e90c mm/hmm: use devic... |
326 |
pfns[i] = range->values[HMM_PFN_ERROR]; |
da4c3c735 mm/hmm/mirror: he... |
327 328 329 |
return 0; } |
5504ed296 mm/hmm: do not di... |
330 331 332 333 |
/* * hmm_vma_walk_hole() - handle a range lacking valid pmd or pte(s) * @start: range virtual start address (inclusive) * @end: range virtual end address (exclusive) |
2aee09d8c mm/hmm: change hm... |
334 335 |
* @fault: should we fault or not ? * @write_fault: write fault ? |
5504ed296 mm/hmm: do not di... |
336 337 338 339 340 341 |
* @walk: mm_walk structure * Returns: 0 on success, -EAGAIN after page fault, or page fault error * * This function will be called whenever pmd_none() or pte_none() returns true, * or whenever there is no page directory covering the virtual address range. */ |
2aee09d8c mm/hmm: change hm... |
342 343 344 |
static int hmm_vma_walk_hole_(unsigned long addr, unsigned long end, bool fault, bool write_fault, struct mm_walk *walk) |
da4c3c735 mm/hmm/mirror: he... |
345 |
{ |
74eee180b mm/hmm/mirror: de... |
346 347 |
struct hmm_vma_walk *hmm_vma_walk = walk->private; struct hmm_range *range = hmm_vma_walk->range; |
ff05c0c6b mm/hmm: use uint6... |
348 |
uint64_t *pfns = range->pfns; |
da4c3c735 mm/hmm/mirror: he... |
349 |
unsigned long i; |
74eee180b mm/hmm/mirror: de... |
350 |
hmm_vma_walk->last = addr; |
da4c3c735 mm/hmm/mirror: he... |
351 |
i = (addr - range->start) >> PAGE_SHIFT; |
74eee180b mm/hmm/mirror: de... |
352 |
for (; addr < end; addr += PAGE_SIZE, i++) { |
f88a1e90c mm/hmm: use devic... |
353 |
pfns[i] = range->values[HMM_PFN_NONE]; |
2aee09d8c mm/hmm: change hm... |
354 |
if (fault || write_fault) { |
74eee180b mm/hmm/mirror: de... |
355 |
int ret; |
da4c3c735 mm/hmm/mirror: he... |
356 |
|
2aee09d8c mm/hmm: change hm... |
357 358 |
ret = hmm_vma_do_fault(walk, addr, write_fault, &pfns[i]); |
74eee180b mm/hmm/mirror: de... |
359 360 361 362 |
if (ret != -EAGAIN) return ret; } } |
2aee09d8c mm/hmm: change hm... |
363 364 365 366 367 368 369 |
return (fault || write_fault) ? -EAGAIN : 0; } static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk, uint64_t pfns, uint64_t cpu_flags, bool *fault, bool *write_fault) { |
f88a1e90c mm/hmm: use devic... |
370 |
struct hmm_range *range = hmm_vma_walk->range; |
2aee09d8c mm/hmm: change hm... |
371 372 373 374 375 |
*fault = *write_fault = false; if (!hmm_vma_walk->fault) return; /* We aren't ask to do anything ... */ |
f88a1e90c mm/hmm: use devic... |
376 |
if (!(pfns & range->flags[HMM_PFN_VALID])) |
2aee09d8c mm/hmm: change hm... |
377 |
return; |
f88a1e90c mm/hmm: use devic... |
378 379 380 381 382 383 384 |
/* If this is device memory than only fault if explicitly requested */ if ((cpu_flags & range->flags[HMM_PFN_DEVICE_PRIVATE])) { /* Do we fault on device memory ? */ if (pfns & range->flags[HMM_PFN_DEVICE_PRIVATE]) { *write_fault = pfns & range->flags[HMM_PFN_WRITE]; *fault = true; } |
2aee09d8c mm/hmm: change hm... |
385 386 |
return; } |
f88a1e90c mm/hmm: use devic... |
387 388 389 390 391 392 393 |
/* If CPU page table is not valid then we need to fault */ *fault = !(cpu_flags & range->flags[HMM_PFN_VALID]); /* Need to write fault ? */ if ((pfns & range->flags[HMM_PFN_WRITE]) && !(cpu_flags & range->flags[HMM_PFN_WRITE])) { *write_fault = true; |
2aee09d8c mm/hmm: change hm... |
394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 |
*fault = true; } } static void hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk, const uint64_t *pfns, unsigned long npages, uint64_t cpu_flags, bool *fault, bool *write_fault) { unsigned long i; if (!hmm_vma_walk->fault) { *fault = *write_fault = false; return; } for (i = 0; i < npages; ++i) { hmm_pte_need_fault(hmm_vma_walk, pfns[i], cpu_flags, fault, write_fault); if ((*fault) || (*write_fault)) return; } } static int hmm_vma_walk_hole(unsigned long addr, unsigned long end, struct mm_walk *walk) { struct hmm_vma_walk *hmm_vma_walk = walk->private; struct hmm_range *range = hmm_vma_walk->range; bool fault, write_fault; unsigned long i, npages; uint64_t *pfns; i = (addr - range->start) >> PAGE_SHIFT; npages = (end - addr) >> PAGE_SHIFT; pfns = &range->pfns[i]; hmm_range_need_fault(hmm_vma_walk, pfns, npages, 0, &fault, &write_fault); return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); } |
f88a1e90c mm/hmm: use devic... |
434 |
static inline uint64_t pmd_to_hmm_pfn_flags(struct hmm_range *range, pmd_t pmd) |
2aee09d8c mm/hmm: change hm... |
435 436 437 |
{ if (pmd_protnone(pmd)) return 0; |
f88a1e90c mm/hmm: use devic... |
438 439 440 |
return pmd_write(pmd) ? range->flags[HMM_PFN_VALID] | range->flags[HMM_PFN_WRITE] : range->flags[HMM_PFN_VALID]; |
da4c3c735 mm/hmm/mirror: he... |
441 |
} |
53f5c3f48 mm/hmm: factor ou... |
442 443 444 445 446 447 448 |
static int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr, unsigned long end, uint64_t *pfns, pmd_t pmd) { struct hmm_vma_walk *hmm_vma_walk = walk->private; |
f88a1e90c mm/hmm: use devic... |
449 |
struct hmm_range *range = hmm_vma_walk->range; |
2aee09d8c mm/hmm: change hm... |
450 |
unsigned long pfn, npages, i; |
2aee09d8c mm/hmm: change hm... |
451 |
bool fault, write_fault; |
f88a1e90c mm/hmm: use devic... |
452 |
uint64_t cpu_flags; |
53f5c3f48 mm/hmm: factor ou... |
453 |
|
2aee09d8c mm/hmm: change hm... |
454 |
npages = (end - addr) >> PAGE_SHIFT; |
f88a1e90c mm/hmm: use devic... |
455 |
cpu_flags = pmd_to_hmm_pfn_flags(range, pmd); |
2aee09d8c mm/hmm: change hm... |
456 457 |
hmm_range_need_fault(hmm_vma_walk, pfns, npages, cpu_flags, &fault, &write_fault); |
53f5c3f48 mm/hmm: factor ou... |
458 |
|
2aee09d8c mm/hmm: change hm... |
459 460 |
if (pmd_protnone(pmd) || fault || write_fault) return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); |
53f5c3f48 mm/hmm: factor ou... |
461 462 |
pfn = pmd_pfn(pmd) + pte_index(addr); |
53f5c3f48 mm/hmm: factor ou... |
463 |
for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) |
f88a1e90c mm/hmm: use devic... |
464 |
pfns[i] = hmm_pfn_from_pfn(range, pfn) | cpu_flags; |
53f5c3f48 mm/hmm: factor ou... |
465 466 467 |
hmm_vma_walk->last = end; return 0; } |
f88a1e90c mm/hmm: use devic... |
468 |
static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte) |
2aee09d8c mm/hmm: change hm... |
469 470 471 |
{ if (pte_none(pte) || !pte_present(pte)) return 0; |
f88a1e90c mm/hmm: use devic... |
472 473 474 |
return pte_write(pte) ? range->flags[HMM_PFN_VALID] | range->flags[HMM_PFN_WRITE] : range->flags[HMM_PFN_VALID]; |
2aee09d8c mm/hmm: change hm... |
475 |
} |
53f5c3f48 mm/hmm: factor ou... |
476 477 478 479 480 |
static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, unsigned long end, pmd_t *pmdp, pte_t *ptep, uint64_t *pfn) { struct hmm_vma_walk *hmm_vma_walk = walk->private; |
f88a1e90c mm/hmm: use devic... |
481 |
struct hmm_range *range = hmm_vma_walk->range; |
53f5c3f48 mm/hmm: factor ou... |
482 |
struct vm_area_struct *vma = walk->vma; |
2aee09d8c mm/hmm: change hm... |
483 484 |
bool fault, write_fault; uint64_t cpu_flags; |
53f5c3f48 mm/hmm: factor ou... |
485 |
pte_t pte = *ptep; |
f88a1e90c mm/hmm: use devic... |
486 |
uint64_t orig_pfn = *pfn; |
53f5c3f48 mm/hmm: factor ou... |
487 |
|
f88a1e90c mm/hmm: use devic... |
488 489 490 |
*pfn = range->values[HMM_PFN_NONE]; cpu_flags = pte_to_hmm_pfn_flags(range, pte); hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags, |
2aee09d8c mm/hmm: change hm... |
491 |
&fault, &write_fault); |
53f5c3f48 mm/hmm: factor ou... |
492 493 |
if (pte_none(pte)) { |
2aee09d8c mm/hmm: change hm... |
494 |
if (fault || write_fault) |
53f5c3f48 mm/hmm: factor ou... |
495 496 497 498 499 500 501 502 |
goto fault; return 0; } if (!pte_present(pte)) { swp_entry_t entry = pte_to_swp_entry(pte); if (!non_swap_entry(entry)) { |
2aee09d8c mm/hmm: change hm... |
503 |
if (fault || write_fault) |
53f5c3f48 mm/hmm: factor ou... |
504 505 506 507 508 509 510 511 512 |
goto fault; return 0; } /* * This is a special swap entry, ignore migration, use * device and report anything else as error. */ if (is_device_private_entry(entry)) { |
f88a1e90c mm/hmm: use devic... |
513 514 |
cpu_flags = range->flags[HMM_PFN_VALID] | range->flags[HMM_PFN_DEVICE_PRIVATE]; |
2aee09d8c mm/hmm: change hm... |
515 |
cpu_flags |= is_write_device_private_entry(entry) ? |
f88a1e90c mm/hmm: use devic... |
516 517 518 519 520 521 522 |
range->flags[HMM_PFN_WRITE] : 0; hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags, &fault, &write_fault); if (fault || write_fault) goto fault; *pfn = hmm_pfn_from_pfn(range, swp_offset(entry)); *pfn |= cpu_flags; |
53f5c3f48 mm/hmm: factor ou... |
523 524 525 526 |
return 0; } if (is_migration_entry(entry)) { |
2aee09d8c mm/hmm: change hm... |
527 |
if (fault || write_fault) { |
53f5c3f48 mm/hmm: factor ou... |
528 529 530 |
pte_unmap(ptep); hmm_vma_walk->last = addr; migration_entry_wait(vma->vm_mm, |
2aee09d8c mm/hmm: change hm... |
531 |
pmdp, addr); |
53f5c3f48 mm/hmm: factor ou... |
532 533 534 535 536 537 |
return -EAGAIN; } return 0; } /* Report error for everything else */ |
f88a1e90c mm/hmm: use devic... |
538 |
*pfn = range->values[HMM_PFN_ERROR]; |
53f5c3f48 mm/hmm: factor ou... |
539 540 |
return -EFAULT; } |
2aee09d8c mm/hmm: change hm... |
541 |
if (fault || write_fault) |
53f5c3f48 mm/hmm: factor ou... |
542 |
goto fault; |
f88a1e90c mm/hmm: use devic... |
543 |
*pfn = hmm_pfn_from_pfn(range, pte_pfn(pte)) | cpu_flags; |
53f5c3f48 mm/hmm: factor ou... |
544 545 546 547 548 |
return 0; fault: pte_unmap(ptep); /* Fault any virtual address we were asked to fault */ |
2aee09d8c mm/hmm: change hm... |
549 |
return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); |
53f5c3f48 mm/hmm: factor ou... |
550 |
} |
da4c3c735 mm/hmm/mirror: he... |
551 552 553 554 555 |
static int hmm_vma_walk_pmd(pmd_t *pmdp, unsigned long start, unsigned long end, struct mm_walk *walk) { |
74eee180b mm/hmm/mirror: de... |
556 557 |
struct hmm_vma_walk *hmm_vma_walk = walk->private; struct hmm_range *range = hmm_vma_walk->range; |
ff05c0c6b mm/hmm: use uint6... |
558 |
uint64_t *pfns = range->pfns; |
da4c3c735 mm/hmm/mirror: he... |
559 |
unsigned long addr = start, i; |
da4c3c735 mm/hmm/mirror: he... |
560 561 562 |
pte_t *ptep; i = (addr - range->start) >> PAGE_SHIFT; |
da4c3c735 mm/hmm/mirror: he... |
563 564 565 566 |
again: if (pmd_none(*pmdp)) return hmm_vma_walk_hole(start, end, walk); |
53f5c3f48 mm/hmm: factor ou... |
567 |
if (pmd_huge(*pmdp) && (range->vma->vm_flags & VM_HUGETLB)) |
da4c3c735 mm/hmm/mirror: he... |
568 569 570 |
return hmm_pfns_bad(start, end, walk); if (pmd_devmap(*pmdp) || pmd_trans_huge(*pmdp)) { |
da4c3c735 mm/hmm/mirror: he... |
571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 |
pmd_t pmd; /* * No need to take pmd_lock here, even if some other threads * is splitting the huge pmd we will get that event through * mmu_notifier callback. * * So just read pmd value and check again its a transparent * huge or device mapping one and compute corresponding pfn * values. */ pmd = pmd_read_atomic(pmdp); barrier(); if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd)) goto again; |
74eee180b mm/hmm/mirror: de... |
586 |
|
53f5c3f48 mm/hmm: factor ou... |
587 |
return hmm_vma_handle_pmd(walk, addr, end, &pfns[i], pmd); |
da4c3c735 mm/hmm/mirror: he... |
588 589 590 591 592 593 594 |
} if (pmd_bad(*pmdp)) return hmm_pfns_bad(start, end, walk); ptep = pte_offset_map(pmdp, addr); for (; addr < end; addr += PAGE_SIZE, ptep++, i++) { |
53f5c3f48 mm/hmm: factor ou... |
595 |
int r; |
74eee180b mm/hmm/mirror: de... |
596 |
|
53f5c3f48 mm/hmm: factor ou... |
597 598 599 600 601 |
r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, &pfns[i]); if (r) { /* hmm_vma_handle_pte() did unmap pte directory */ hmm_vma_walk->last = addr; return r; |
74eee180b mm/hmm/mirror: de... |
602 |
} |
da4c3c735 mm/hmm/mirror: he... |
603 604 |
} pte_unmap(ptep - 1); |
53f5c3f48 mm/hmm: factor ou... |
605 |
hmm_vma_walk->last = addr; |
da4c3c735 mm/hmm/mirror: he... |
606 607 |
return 0; } |
f88a1e90c mm/hmm: use devic... |
608 609 |
static void hmm_pfns_clear(struct hmm_range *range, uint64_t *pfns, |
33cd47dcb mm/hmm: move hmm_... |
610 611 612 613 |
unsigned long addr, unsigned long end) { for (; addr < end; addr += PAGE_SIZE, pfns++) |
f88a1e90c mm/hmm: use devic... |
614 |
*pfns = range->values[HMM_PFN_NONE]; |
33cd47dcb mm/hmm: move hmm_... |
615 |
} |
855ce7d25 mm/hmm: cleanup s... |
616 617 618 619 620 |
static void hmm_pfns_special(struct hmm_range *range) { unsigned long addr = range->start, i = 0; for (; addr < range->end; addr += PAGE_SIZE, i++) |
f88a1e90c mm/hmm: use devic... |
621 |
range->pfns[i] = range->values[HMM_PFN_SPECIAL]; |
855ce7d25 mm/hmm: cleanup s... |
622 |
} |
da4c3c735 mm/hmm/mirror: he... |
623 624 |
/* * hmm_vma_get_pfns() - snapshot CPU page table for a range of virtual addresses |
08232a454 mm/hmm: use struc... |
625 |
* @range: range being snapshotted |
86586a41b mm/hmm: remove HM... |
626 627 |
* Returns: -EINVAL if invalid argument, -ENOMEM out of memory, -EPERM invalid * vma permission, 0 success |
da4c3c735 mm/hmm/mirror: he... |
628 629 630 631 632 633 634 635 636 637 638 639 |
* * This snapshots the CPU page table for a range of virtual addresses. Snapshot * validity is tracked by range struct. See hmm_vma_range_done() for further * information. * * The range struct is initialized here. It tracks the CPU page table, but only * if the function returns success (0), in which case the caller must then call * hmm_vma_range_done() to stop CPU page table update tracking on this range. * * NOT CALLING hmm_vma_range_done() IF FUNCTION RETURNS 0 WILL LEAD TO SERIOUS * MEMORY CORRUPTION ! YOU HAVE BEEN WARNED ! */ |
08232a454 mm/hmm: use struc... |
640 |
int hmm_vma_get_pfns(struct hmm_range *range) |
da4c3c735 mm/hmm/mirror: he... |
641 |
{ |
08232a454 mm/hmm: use struc... |
642 |
struct vm_area_struct *vma = range->vma; |
74eee180b mm/hmm/mirror: de... |
643 |
struct hmm_vma_walk hmm_vma_walk; |
da4c3c735 mm/hmm/mirror: he... |
644 645 |
struct mm_walk mm_walk; struct hmm *hmm; |
da4c3c735 mm/hmm/mirror: he... |
646 |
/* Sanity check, this really should not happen ! */ |
08232a454 mm/hmm: use struc... |
647 |
if (range->start < vma->vm_start || range->start >= vma->vm_end) |
da4c3c735 mm/hmm/mirror: he... |
648 |
return -EINVAL; |
08232a454 mm/hmm: use struc... |
649 |
if (range->end < vma->vm_start || range->end > vma->vm_end) |
da4c3c735 mm/hmm/mirror: he... |
650 651 652 653 654 655 656 657 |
return -EINVAL; hmm = hmm_register(vma->vm_mm); if (!hmm) return -ENOMEM; /* Caller must have registered a mirror, via hmm_mirror_register() ! */ if (!hmm->mmu_notifier.ops) return -EINVAL; |
855ce7d25 mm/hmm: cleanup s... |
658 |
/* FIXME support hugetlb fs */ |
e1fb4a086 dax: remove VM_MI... |
659 660 |
if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL) || vma_is_dax(vma)) { |
855ce7d25 mm/hmm: cleanup s... |
661 662 663 |
hmm_pfns_special(range); return -EINVAL; } |
86586a41b mm/hmm: remove HM... |
664 665 666 667 668 669 670 |
if (!(vma->vm_flags & VM_READ)) { /* * If vma do not allow read access, then assume that it does * not allow write access, either. Architecture that allow * write without read access are not supported by HMM, because * operations such has atomic access would not work. */ |
f88a1e90c mm/hmm: use devic... |
671 |
hmm_pfns_clear(range, range->pfns, range->start, range->end); |
86586a41b mm/hmm: remove HM... |
672 673 |
return -EPERM; } |
da4c3c735 mm/hmm/mirror: he... |
674 |
/* Initialize range to track CPU page table update */ |
da4c3c735 mm/hmm/mirror: he... |
675 676 677 678 |
spin_lock(&hmm->lock); range->valid = true; list_add_rcu(&range->list, &hmm->ranges); spin_unlock(&hmm->lock); |
74eee180b mm/hmm/mirror: de... |
679 680 681 |
hmm_vma_walk.fault = false; hmm_vma_walk.range = range; mm_walk.private = &hmm_vma_walk; |
da4c3c735 mm/hmm/mirror: he... |
682 683 |
mm_walk.vma = vma; mm_walk.mm = vma->vm_mm; |
da4c3c735 mm/hmm/mirror: he... |
684 685 686 687 688 |
mm_walk.pte_entry = NULL; mm_walk.test_walk = NULL; mm_walk.hugetlb_entry = NULL; mm_walk.pmd_entry = hmm_vma_walk_pmd; mm_walk.pte_hole = hmm_vma_walk_hole; |
08232a454 mm/hmm: use struc... |
689 |
walk_page_range(range->start, range->end, &mm_walk); |
da4c3c735 mm/hmm/mirror: he... |
690 691 692 693 694 695 |
return 0; } EXPORT_SYMBOL(hmm_vma_get_pfns); /* * hmm_vma_range_done() - stop tracking change to CPU page table over a range |
da4c3c735 mm/hmm/mirror: he... |
696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 |
* @range: range being tracked * Returns: false if range data has been invalidated, true otherwise * * Range struct is used to track updates to the CPU page table after a call to * either hmm_vma_get_pfns() or hmm_vma_fault(). Once the device driver is done * using the data, or wants to lock updates to the data it got from those * functions, it must call the hmm_vma_range_done() function, which will then * stop tracking CPU page table updates. * * Note that device driver must still implement general CPU page table update * tracking either by using hmm_mirror (see hmm_mirror_register()) or by using * the mmu_notifier API directly. * * CPU page table update tracking done through hmm_range is only temporary and * to be used while trying to duplicate CPU page table contents for a range of * virtual addresses. * * There are two ways to use this : * again: |
08232a454 mm/hmm: use struc... |
715 |
* hmm_vma_get_pfns(range); or hmm_vma_fault(...); |
da4c3c735 mm/hmm/mirror: he... |
716 717 |
* trans = device_build_page_table_update_transaction(pfns); * device_page_table_lock(); |
08232a454 mm/hmm: use struc... |
718 |
* if (!hmm_vma_range_done(range)) { |
da4c3c735 mm/hmm/mirror: he... |
719 720 721 722 723 724 725 |
* device_page_table_unlock(); * goto again; * } * device_commit_transaction(trans); * device_page_table_unlock(); * * Or: |
08232a454 mm/hmm: use struc... |
726 |
* hmm_vma_get_pfns(range); or hmm_vma_fault(...); |
da4c3c735 mm/hmm/mirror: he... |
727 |
* device_page_table_lock(); |
08232a454 mm/hmm: use struc... |
728 729 |
* hmm_vma_range_done(range); * device_update_page_table(range->pfns); |
da4c3c735 mm/hmm/mirror: he... |
730 731 |
* device_page_table_unlock(); */ |
08232a454 mm/hmm: use struc... |
732 |
bool hmm_vma_range_done(struct hmm_range *range) |
da4c3c735 mm/hmm/mirror: he... |
733 734 735 736 737 738 739 740 |
{ unsigned long npages = (range->end - range->start) >> PAGE_SHIFT; struct hmm *hmm; if (range->end <= range->start) { BUG(); return false; } |
08232a454 mm/hmm: use struc... |
741 |
hmm = hmm_register(range->vma->vm_mm); |
da4c3c735 mm/hmm/mirror: he... |
742 743 744 745 746 747 748 749 750 751 752 753 |
if (!hmm) { memset(range->pfns, 0, sizeof(*range->pfns) * npages); return false; } spin_lock(&hmm->lock); list_del_rcu(&range->list); spin_unlock(&hmm->lock); return range->valid; } EXPORT_SYMBOL(hmm_vma_range_done); |
74eee180b mm/hmm/mirror: de... |
754 755 756 |
/* * hmm_vma_fault() - try to fault some address in a virtual address range |
08232a454 mm/hmm: use struc... |
757 |
* @range: range being faulted |
74eee180b mm/hmm/mirror: de... |
758 759 760 761 762 763 |
* @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem) * Returns: 0 success, error otherwise (-EAGAIN means mmap_sem have been drop) * * This is similar to a regular CPU page fault except that it will not trigger * any memory migration if the memory being faulted is not accessible by CPUs. * |
ff05c0c6b mm/hmm: use uint6... |
764 765 |
* On error, for one virtual address in the range, the function will mark the * corresponding HMM pfn entry with an error flag. |
74eee180b mm/hmm/mirror: de... |
766 767 768 769 770 771 |
* * Expected use pattern: * retry: * down_read(&mm->mmap_sem); * // Find vma and address device wants to fault, initialize hmm_pfn_t * // array accordingly |
08232a454 mm/hmm: use struc... |
772 |
* ret = hmm_vma_fault(range, write, block); |
74eee180b mm/hmm/mirror: de... |
773 774 |
* switch (ret) { * case -EAGAIN: |
08232a454 mm/hmm: use struc... |
775 |
* hmm_vma_range_done(range); |
74eee180b mm/hmm/mirror: de... |
776 777 778 779 780 781 |
* // You might want to rate limit or yield to play nicely, you may * // also commit any valid pfn in the array assuming that you are * // getting true from hmm_vma_range_monitor_end() * goto retry; * case 0: * break; |
86586a41b mm/hmm: remove HM... |
782 783 784 |
* case -ENOMEM: * case -EINVAL: * case -EPERM: |
74eee180b mm/hmm/mirror: de... |
785 786 787 788 789 790 791 |
* default: * // Handle error ! * up_read(&mm->mmap_sem) * return; * } * // Take device driver lock that serialize device page table update * driver_lock_device_page_table_update(); |
08232a454 mm/hmm: use struc... |
792 |
* hmm_vma_range_done(range); |
74eee180b mm/hmm/mirror: de... |
793 794 795 796 797 798 799 800 801 |
* // Commit pfns we got from hmm_vma_fault() * driver_unlock_device_page_table_update(); * up_read(&mm->mmap_sem) * * YOU MUST CALL hmm_vma_range_done() AFTER THIS FUNCTION RETURN SUCCESS (0) * BEFORE FREEING THE range struct OR YOU WILL HAVE SERIOUS MEMORY CORRUPTION ! * * YOU HAVE BEEN WARNED ! */ |
2aee09d8c mm/hmm: change hm... |
802 |
int hmm_vma_fault(struct hmm_range *range, bool block) |
74eee180b mm/hmm/mirror: de... |
803 |
{ |
08232a454 mm/hmm: use struc... |
804 805 |
struct vm_area_struct *vma = range->vma; unsigned long start = range->start; |
74eee180b mm/hmm/mirror: de... |
806 807 808 809 810 811 |
struct hmm_vma_walk hmm_vma_walk; struct mm_walk mm_walk; struct hmm *hmm; int ret; /* Sanity check, this really should not happen ! */ |
08232a454 mm/hmm: use struc... |
812 |
if (range->start < vma->vm_start || range->start >= vma->vm_end) |
74eee180b mm/hmm/mirror: de... |
813 |
return -EINVAL; |
08232a454 mm/hmm: use struc... |
814 |
if (range->end < vma->vm_start || range->end > vma->vm_end) |
74eee180b mm/hmm/mirror: de... |
815 816 817 818 |
return -EINVAL; hmm = hmm_register(vma->vm_mm); if (!hmm) { |
f88a1e90c mm/hmm: use devic... |
819 |
hmm_pfns_clear(range, range->pfns, range->start, range->end); |
74eee180b mm/hmm/mirror: de... |
820 821 822 823 824 |
return -ENOMEM; } /* Caller must have registered a mirror using hmm_mirror_register() */ if (!hmm->mmu_notifier.ops) return -EINVAL; |
855ce7d25 mm/hmm: cleanup s... |
825 |
/* FIXME support hugetlb fs */ |
e1fb4a086 dax: remove VM_MI... |
826 827 |
if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL) || vma_is_dax(vma)) { |
855ce7d25 mm/hmm: cleanup s... |
828 829 830 |
hmm_pfns_special(range); return -EINVAL; } |
86586a41b mm/hmm: remove HM... |
831 832 833 834 835 836 837 |
if (!(vma->vm_flags & VM_READ)) { /* * If vma do not allow read access, then assume that it does * not allow write access, either. Architecture that allow * write without read access are not supported by HMM, because * operations such has atomic access would not work. */ |
f88a1e90c mm/hmm: use devic... |
838 |
hmm_pfns_clear(range, range->pfns, range->start, range->end); |
86586a41b mm/hmm: remove HM... |
839 840 |
return -EPERM; } |
74eee180b mm/hmm/mirror: de... |
841 |
|
86586a41b mm/hmm: remove HM... |
842 843 844 845 846 |
/* Initialize range to track CPU page table update */ spin_lock(&hmm->lock); range->valid = true; list_add_rcu(&range->list, &hmm->ranges); spin_unlock(&hmm->lock); |
74eee180b mm/hmm/mirror: de... |
847 |
hmm_vma_walk.fault = true; |
74eee180b mm/hmm/mirror: de... |
848 849 850 851 852 853 854 855 856 857 858 859 860 861 |
hmm_vma_walk.block = block; hmm_vma_walk.range = range; mm_walk.private = &hmm_vma_walk; hmm_vma_walk.last = range->start; mm_walk.vma = vma; mm_walk.mm = vma->vm_mm; mm_walk.pte_entry = NULL; mm_walk.test_walk = NULL; mm_walk.hugetlb_entry = NULL; mm_walk.pmd_entry = hmm_vma_walk_pmd; mm_walk.pte_hole = hmm_vma_walk_hole; do { |
08232a454 mm/hmm: use struc... |
862 |
ret = walk_page_range(start, range->end, &mm_walk); |
74eee180b mm/hmm/mirror: de... |
863 864 865 866 867 868 869 |
start = hmm_vma_walk.last; } while (ret == -EAGAIN); if (ret) { unsigned long i; i = (hmm_vma_walk.last - range->start) >> PAGE_SHIFT; |
f88a1e90c mm/hmm: use devic... |
870 871 |
hmm_pfns_clear(range, &range->pfns[i], hmm_vma_walk.last, range->end); |
08232a454 mm/hmm: use struc... |
872 |
hmm_vma_range_done(range); |
74eee180b mm/hmm/mirror: de... |
873 874 875 876 |
} return ret; } EXPORT_SYMBOL(hmm_vma_fault); |
c0b124054 mm/hmm/mirror: mi... |
877 |
#endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */ |
4ef589dc9 mm/hmm/devmem: de... |
878 |
|
df6ad6983 mm/device-public-... |
879 |
#if IS_ENABLED(CONFIG_DEVICE_PRIVATE) || IS_ENABLED(CONFIG_DEVICE_PUBLIC) |
4ef589dc9 mm/hmm/devmem: de... |
880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 |
struct page *hmm_vma_alloc_locked_page(struct vm_area_struct *vma, unsigned long addr) { struct page *page; page = alloc_page_vma(GFP_HIGHUSER, vma, addr); if (!page) return NULL; lock_page(page); return page; } EXPORT_SYMBOL(hmm_vma_alloc_locked_page); static void hmm_devmem_ref_release(struct percpu_ref *ref) { struct hmm_devmem *devmem; devmem = container_of(ref, struct hmm_devmem, ref); complete(&devmem->completion); } static void hmm_devmem_ref_exit(void *data) { struct percpu_ref *ref = data; struct hmm_devmem *devmem; devmem = container_of(ref, struct hmm_devmem, ref); percpu_ref_exit(ref); |
4ef589dc9 mm/hmm/devmem: de... |
909 910 911 912 913 914 915 916 917 918 |
} static void hmm_devmem_ref_kill(void *data) { struct percpu_ref *ref = data; struct hmm_devmem *devmem; devmem = container_of(ref, struct hmm_devmem, ref); percpu_ref_kill(ref); wait_for_completion(&devmem->completion); |
4ef589dc9 mm/hmm/devmem: de... |
919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 |
} static int hmm_devmem_fault(struct vm_area_struct *vma, unsigned long addr, const struct page *page, unsigned int flags, pmd_t *pmdp) { struct hmm_devmem *devmem = page->pgmap->data; return devmem->ops->fault(devmem, vma, addr, page, flags, pmdp); } static void hmm_devmem_free(struct page *page, void *data) { struct hmm_devmem *devmem = data; |
2fa147bdb mm, dev_pagemap: ... |
935 |
page->mapping = NULL; |
4ef589dc9 mm/hmm/devmem: de... |
936 937 938 939 940 941 942 943 |
devmem->ops->free(devmem, page); } static DEFINE_MUTEX(hmm_devmem_lock); static RADIX_TREE(hmm_devmem_radix, GFP_KERNEL); static void hmm_devmem_radix_release(struct resource *resource) { |
1e9264192 mm/hmm.c: remove ... |
944 |
resource_size_t key; |
4ef589dc9 mm/hmm/devmem: de... |
945 946 947 948 949 950 951 952 |
mutex_lock(&hmm_devmem_lock); for (key = resource->start; key <= resource->end; key += PA_SECTION_SIZE) radix_tree_delete(&hmm_devmem_radix, key >> PA_SECTION_SHIFT); mutex_unlock(&hmm_devmem_lock); } |
e890a8670 mm, hmm: use devm... |
953 |
static void hmm_devmem_release(void *data) |
4ef589dc9 mm/hmm/devmem: de... |
954 955 956 957 958 959 |
{ struct hmm_devmem *devmem = data; struct resource *resource = devmem->resource; unsigned long start_pfn, npages; struct zone *zone; struct page *page; |
4ef589dc9 mm/hmm/devmem: de... |
960 961 962 963 964 965 966 967 |
/* pages are dead and unused, undo the arch mapping */ start_pfn = (resource->start & ~(PA_SECTION_SIZE - 1)) >> PAGE_SHIFT; npages = ALIGN(resource_size(resource), PA_SECTION_SIZE) >> PAGE_SHIFT; page = pfn_to_page(start_pfn); zone = page_zone(page); mem_hotplug_begin(); |
d3df0a423 mm/hmm: add new h... |
968 |
if (resource->desc == IORES_DESC_DEVICE_PRIVATE_MEMORY) |
da024512a mm: pass the vmem... |
969 |
__remove_pages(zone, start_pfn, npages, NULL); |
d3df0a423 mm/hmm: add new h... |
970 971 |
else arch_remove_memory(start_pfn << PAGE_SHIFT, |
da024512a mm: pass the vmem... |
972 |
npages << PAGE_SHIFT, NULL); |
4ef589dc9 mm/hmm/devmem: de... |
973 974 975 976 |
mem_hotplug_done(); hmm_devmem_radix_release(resource); } |
4ef589dc9 mm/hmm/devmem: de... |
977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 |
static int hmm_devmem_pages_create(struct hmm_devmem *devmem) { resource_size_t key, align_start, align_size, align_end; struct device *device = devmem->device; int ret, nid, is_ram; unsigned long pfn; align_start = devmem->resource->start & ~(PA_SECTION_SIZE - 1); align_size = ALIGN(devmem->resource->start + resource_size(devmem->resource), PA_SECTION_SIZE) - align_start; is_ram = region_intersects(align_start, align_size, IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE); if (is_ram == REGION_MIXED) { WARN_ONCE(1, "%s attempted on mixed region %pr ", __func__, devmem->resource); return -ENXIO; } if (is_ram == REGION_INTERSECTS) return -ENXIO; |
d3df0a423 mm/hmm: add new h... |
1000 1001 1002 1003 |
if (devmem->resource->desc == IORES_DESC_DEVICE_PUBLIC_MEMORY) devmem->pagemap.type = MEMORY_DEVICE_PUBLIC; else devmem->pagemap.type = MEMORY_DEVICE_PRIVATE; |
e7744aa25 memremap: drop pr... |
1004 |
devmem->pagemap.res = *devmem->resource; |
4ef589dc9 mm/hmm/devmem: de... |
1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 |
devmem->pagemap.page_fault = hmm_devmem_fault; devmem->pagemap.page_free = hmm_devmem_free; devmem->pagemap.dev = devmem->device; devmem->pagemap.ref = &devmem->ref; devmem->pagemap.data = devmem; mutex_lock(&hmm_devmem_lock); align_end = align_start + align_size - 1; for (key = align_start; key <= align_end; key += PA_SECTION_SIZE) { struct hmm_devmem *dup; |
18be460ee mm/hmm.c: remove ... |
1015 1016 |
dup = radix_tree_lookup(&hmm_devmem_radix, key >> PA_SECTION_SHIFT); |
4ef589dc9 mm/hmm/devmem: de... |
1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 |
if (dup) { dev_err(device, "%s: collides with mapping for %s ", __func__, dev_name(dup->device)); mutex_unlock(&hmm_devmem_lock); ret = -EBUSY; goto error; } ret = radix_tree_insert(&hmm_devmem_radix, key >> PA_SECTION_SHIFT, devmem); if (ret) { dev_err(device, "%s: failed: %d ", __func__, ret); mutex_unlock(&hmm_devmem_lock); goto error_radix; } } mutex_unlock(&hmm_devmem_lock); nid = dev_to_node(device); if (nid < 0) nid = numa_mem_id(); mem_hotplug_begin(); /* * For device private memory we call add_pages() as we only need to * allocate and initialize struct page for the device memory. More- * over the device memory is un-accessible thus we do not want to * create a linear mapping for the memory like arch_add_memory() * would do. |
d3df0a423 mm/hmm: add new h... |
1048 1049 1050 |
* * For device public memory, which is accesible by the CPU, we do * want the linear mapping and thus use arch_add_memory(). |
4ef589dc9 mm/hmm/devmem: de... |
1051 |
*/ |
d3df0a423 mm/hmm: add new h... |
1052 |
if (devmem->pagemap.type == MEMORY_DEVICE_PUBLIC) |
24e6d5a59 mm: pass the vmem... |
1053 1054 |
ret = arch_add_memory(nid, align_start, align_size, NULL, false); |
d3df0a423 mm/hmm: add new h... |
1055 1056 |
else ret = add_pages(nid, align_start >> PAGE_SHIFT, |
24e6d5a59 mm: pass the vmem... |
1057 |
align_size >> PAGE_SHIFT, NULL, false); |
4ef589dc9 mm/hmm/devmem: de... |
1058 1059 1060 1061 1062 1063 |
if (ret) { mem_hotplug_done(); goto error_add_memory; } move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE], align_start >> PAGE_SHIFT, |
a99583e78 mm: pass the vmem... |
1064 |
align_size >> PAGE_SHIFT, NULL); |
4ef589dc9 mm/hmm/devmem: de... |
1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 |
mem_hotplug_done(); for (pfn = devmem->pfn_first; pfn < devmem->pfn_last; pfn++) { struct page *page = pfn_to_page(pfn); page->pgmap = &devmem->pagemap; } return 0; error_add_memory: untrack_pfn(NULL, PHYS_PFN(align_start), align_size); error_radix: hmm_devmem_radix_release(devmem->resource); error: return ret; } |
4ef589dc9 mm/hmm/devmem: de... |
1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 |
/* * hmm_devmem_add() - hotplug ZONE_DEVICE memory for device memory * * @ops: memory event device driver callback (see struct hmm_devmem_ops) * @device: device struct to bind the resource too * @size: size in bytes of the device memory to add * Returns: pointer to new hmm_devmem struct ERR_PTR otherwise * * This function first finds an empty range of physical address big enough to * contain the new resource, and then hotplugs it as ZONE_DEVICE memory, which * in turn allocates struct pages. It does not do anything beyond that; all * events affecting the memory will go through the various callbacks provided * by hmm_devmem_ops struct. * * Device driver should call this function during device initialization and * is then responsible of memory management. HMM only provides helpers. */ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops, struct device *device, unsigned long size) { struct hmm_devmem *devmem; resource_size_t addr; int ret; |
e76384884 mm: introduce MEM... |
1105 |
dev_pagemap_get_ops(); |
4ef589dc9 mm/hmm/devmem: de... |
1106 |
|
e890a8670 mm, hmm: use devm... |
1107 |
devmem = devm_kzalloc(device, sizeof(*devmem), GFP_KERNEL); |
4ef589dc9 mm/hmm/devmem: de... |
1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 |
if (!devmem) return ERR_PTR(-ENOMEM); init_completion(&devmem->completion); devmem->pfn_first = -1UL; devmem->pfn_last = -1UL; devmem->resource = NULL; devmem->device = device; devmem->ops = ops; ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release, 0, GFP_KERNEL); if (ret) |
e890a8670 mm, hmm: use devm... |
1121 |
return ERR_PTR(ret); |
4ef589dc9 mm/hmm/devmem: de... |
1122 |
|
e890a8670 mm, hmm: use devm... |
1123 |
ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit, &devmem->ref); |
4ef589dc9 mm/hmm/devmem: de... |
1124 |
if (ret) |
e890a8670 mm, hmm: use devm... |
1125 |
return ERR_PTR(ret); |
4ef589dc9 mm/hmm/devmem: de... |
1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 |
size = ALIGN(size, PA_SECTION_SIZE); addr = min((unsigned long)iomem_resource.end, (1UL << MAX_PHYSMEM_BITS) - 1); addr = addr - size + 1UL; /* * FIXME add a new helper to quickly walk resource tree and find free * range * * FIXME what about ioport_resource resource ? */ for (; addr > size && addr >= iomem_resource.start; addr -= size) { ret = region_intersects(addr, size, 0, IORES_DESC_NONE); if (ret != REGION_DISJOINT) continue; devmem->resource = devm_request_mem_region(device, addr, size, dev_name(device)); |
e890a8670 mm, hmm: use devm... |
1145 1146 |
if (!devmem->resource) return ERR_PTR(-ENOMEM); |
4ef589dc9 mm/hmm/devmem: de... |
1147 1148 |
break; } |
e890a8670 mm, hmm: use devm... |
1149 1150 |
if (!devmem->resource) return ERR_PTR(-ERANGE); |
4ef589dc9 mm/hmm/devmem: de... |
1151 1152 1153 1154 1155 1156 1157 1158 |
devmem->resource->desc = IORES_DESC_DEVICE_PRIVATE_MEMORY; devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT; devmem->pfn_last = devmem->pfn_first + (resource_size(devmem->resource) >> PAGE_SHIFT); ret = hmm_devmem_pages_create(devmem); if (ret) |
e890a8670 mm, hmm: use devm... |
1159 |
return ERR_PTR(ret); |
4ef589dc9 mm/hmm/devmem: de... |
1160 |
|
e890a8670 mm, hmm: use devm... |
1161 1162 |
ret = devm_add_action_or_reset(device, hmm_devmem_release, devmem); if (ret) |
4ef589dc9 mm/hmm/devmem: de... |
1163 |
return ERR_PTR(ret); |
4ef589dc9 mm/hmm/devmem: de... |
1164 1165 |
return devmem; |
4ef589dc9 mm/hmm/devmem: de... |
1166 |
} |
0f1a62e07 mm, hmm: mark hmm... |
1167 |
EXPORT_SYMBOL_GPL(hmm_devmem_add); |
4ef589dc9 mm/hmm/devmem: de... |
1168 |
|
d3df0a423 mm/hmm: add new h... |
1169 1170 1171 1172 1173 1174 1175 1176 1177 |
struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops, struct device *device, struct resource *res) { struct hmm_devmem *devmem; int ret; if (res->desc != IORES_DESC_DEVICE_PUBLIC_MEMORY) return ERR_PTR(-EINVAL); |
e76384884 mm: introduce MEM... |
1178 |
dev_pagemap_get_ops(); |
d3df0a423 mm/hmm: add new h... |
1179 |
|
e890a8670 mm, hmm: use devm... |
1180 |
devmem = devm_kzalloc(device, sizeof(*devmem), GFP_KERNEL); |
d3df0a423 mm/hmm: add new h... |
1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 |
if (!devmem) return ERR_PTR(-ENOMEM); init_completion(&devmem->completion); devmem->pfn_first = -1UL; devmem->pfn_last = -1UL; devmem->resource = res; devmem->device = device; devmem->ops = ops; ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release, 0, GFP_KERNEL); if (ret) |
e890a8670 mm, hmm: use devm... |
1194 |
return ERR_PTR(ret); |
d3df0a423 mm/hmm: add new h... |
1195 |
|
e890a8670 mm, hmm: use devm... |
1196 1197 |
ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit, &devmem->ref); |
d3df0a423 mm/hmm: add new h... |
1198 |
if (ret) |
e890a8670 mm, hmm: use devm... |
1199 |
return ERR_PTR(ret); |
d3df0a423 mm/hmm: add new h... |
1200 1201 1202 1203 1204 1205 1206 |
devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT; devmem->pfn_last = devmem->pfn_first + (resource_size(devmem->resource) >> PAGE_SHIFT); ret = hmm_devmem_pages_create(devmem); if (ret) |
e890a8670 mm, hmm: use devm... |
1207 |
return ERR_PTR(ret); |
d3df0a423 mm/hmm: add new h... |
1208 |
|
e890a8670 mm, hmm: use devm... |
1209 1210 1211 |
ret = devm_add_action_or_reset(device, hmm_devmem_release, devmem); if (ret) return ERR_PTR(ret); |
d3df0a423 mm/hmm: add new h... |
1212 |
|
e890a8670 mm, hmm: use devm... |
1213 1214 1215 |
ret = devm_add_action_or_reset(device, hmm_devmem_ref_kill, &devmem->ref); if (ret) |
d3df0a423 mm/hmm: add new h... |
1216 |
return ERR_PTR(ret); |
d3df0a423 mm/hmm: add new h... |
1217 1218 |
return devmem; |
d3df0a423 mm/hmm: add new h... |
1219 |
} |
0f1a62e07 mm, hmm: mark hmm... |
1220 |
EXPORT_SYMBOL_GPL(hmm_devmem_add_resource); |
d3df0a423 mm/hmm: add new h... |
1221 |
|
4ef589dc9 mm/hmm/devmem: de... |
1222 |
/* |
858b54dab mm/hmm/devmem: du... |
1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 |
* A device driver that wants to handle multiple devices memory through a * single fake device can use hmm_device to do so. This is purely a helper * and it is not needed to make use of any HMM functionality. */ #define HMM_DEVICE_MAX 256 static DECLARE_BITMAP(hmm_device_mask, HMM_DEVICE_MAX); static DEFINE_SPINLOCK(hmm_device_lock); static struct class *hmm_device_class; static dev_t hmm_device_devt; static void hmm_device_release(struct device *device) { struct hmm_device *hmm_device; hmm_device = container_of(device, struct hmm_device, device); spin_lock(&hmm_device_lock); clear_bit(hmm_device->minor, hmm_device_mask); spin_unlock(&hmm_device_lock); kfree(hmm_device); } struct hmm_device *hmm_device_new(void *drvdata) { struct hmm_device *hmm_device; hmm_device = kzalloc(sizeof(*hmm_device), GFP_KERNEL); if (!hmm_device) return ERR_PTR(-ENOMEM); spin_lock(&hmm_device_lock); hmm_device->minor = find_first_zero_bit(hmm_device_mask, HMM_DEVICE_MAX); if (hmm_device->minor >= HMM_DEVICE_MAX) { spin_unlock(&hmm_device_lock); kfree(hmm_device); return ERR_PTR(-EBUSY); } set_bit(hmm_device->minor, hmm_device_mask); spin_unlock(&hmm_device_lock); dev_set_name(&hmm_device->device, "hmm_device%d", hmm_device->minor); hmm_device->device.devt = MKDEV(MAJOR(hmm_device_devt), hmm_device->minor); hmm_device->device.release = hmm_device_release; dev_set_drvdata(&hmm_device->device, drvdata); hmm_device->device.class = hmm_device_class; device_initialize(&hmm_device->device); return hmm_device; } EXPORT_SYMBOL(hmm_device_new); void hmm_device_put(struct hmm_device *hmm_device) { put_device(&hmm_device->device); } EXPORT_SYMBOL(hmm_device_put); static int __init hmm_init(void) { int ret; ret = alloc_chrdev_region(&hmm_device_devt, 0, HMM_DEVICE_MAX, "hmm_device"); if (ret) return ret; hmm_device_class = class_create(THIS_MODULE, "hmm_device"); if (IS_ERR(hmm_device_class)) { unregister_chrdev_region(hmm_device_devt, HMM_DEVICE_MAX); return PTR_ERR(hmm_device_class); } return 0; } device_initcall(hmm_init); |
df6ad6983 mm/device-public-... |
1301 |
#endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */ |