Blame view
kernel/memremap.c
15.5 KB
92281dee8
|
1 2 3 4 5 6 7 8 9 10 11 12 |
/* * Copyright(c) 2015 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ |
9476df7d8
|
13 |
#include <linux/radix-tree.h> |
7d3dcf26a
|
14 |
#include <linux/device.h> |
92281dee8
|
15 |
#include <linux/types.h> |
34c0fd540
|
16 |
#include <linux/pfn_t.h> |
92281dee8
|
17 18 |
#include <linux/io.h> #include <linux/mm.h> |
41e94a851
|
19 |
#include <linux/memory_hotplug.h> |
5042db43c
|
20 21 |
#include <linux/swap.h> #include <linux/swapops.h> |
92281dee8
|
22 23 24 25 26 27 28 29 |
#ifndef ioremap_cache /* temporary while we convert existing ioremap_cache users to memremap */ __weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size) { return ioremap(offset, size); } #endif |
c269cba35
|
30 31 32 33 34 35 |
#ifndef arch_memremap_wb static void *arch_memremap_wb(resource_size_t offset, unsigned long size) { return (__force void *)ioremap_cache(offset, size); } #endif |
8f716c9b5
|
36 37 38 39 40 41 42 43 44 45 |
#ifndef arch_memremap_can_ram_remap static bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size, unsigned long flags) { return true; } #endif static void *try_ram_remap(resource_size_t offset, size_t size, unsigned long flags) |
182475b7a
|
46 |
{ |
ac343e882
|
47 |
unsigned long pfn = PHYS_PFN(offset); |
182475b7a
|
48 49 |
/* In the simple case just return the existing linear address */ |
8f716c9b5
|
50 51 |
if (pfn_valid(pfn) && !PageHighMem(pfn_to_page(pfn)) && arch_memremap_can_ram_remap(offset, size, flags)) |
182475b7a
|
52 |
return __va(offset); |
8f716c9b5
|
53 |
|
c269cba35
|
54 |
return NULL; /* fallback to arch_memremap_wb */ |
182475b7a
|
55 |
} |
92281dee8
|
56 57 58 59 |
/** * memremap() - remap an iomem_resource as cacheable memory * @offset: iomem resource start address * @size: size of remap |
8f716c9b5
|
60 61 |
* @flags: any of MEMREMAP_WB, MEMREMAP_WT, MEMREMAP_WC, * MEMREMAP_ENC, MEMREMAP_DEC |
92281dee8
|
62 63 64 |
* * memremap() is "ioremap" for cases where it is known that the resource * being mapped does not have i/o side effects and the __iomem |
c907e0eb4
|
65 66 67 |
* annotation is not applicable. In the case of multiple flags, the different * mapping types will be attempted in the order listed below until one of * them succeeds. |
92281dee8
|
68 |
* |
1c29f25bf
|
69 |
* MEMREMAP_WB - matches the default mapping for System RAM on |
92281dee8
|
70 71 72 73 74 75 76 77 |
* the architecture. This is usually a read-allocate write-back cache. * Morever, if MEMREMAP_WB is specified and the requested remap region is RAM * memremap() will bypass establishing a new mapping and instead return * a pointer into the direct map. * * MEMREMAP_WT - establish a mapping whereby writes either bypass the * cache or are written through to memory and never exist in a * cache-dirty state with respect to program visibility. Attempts to |
1c29f25bf
|
78 |
* map System RAM with this mapping type will fail. |
c907e0eb4
|
79 80 81 82 |
* * MEMREMAP_WC - establish a writecombine mapping, whereby writes may * be coalesced together (e.g. in the CPU's write buffers), but is otherwise * uncached. Attempts to map System RAM with this mapping type will fail. |
92281dee8
|
83 84 85 |
*/ void *memremap(resource_size_t offset, size_t size, unsigned long flags) { |
1c29f25bf
|
86 87 |
int is_ram = region_intersects(offset, size, IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE); |
92281dee8
|
88 |
void *addr = NULL; |
cf61e2a14
|
89 90 |
if (!flags) return NULL; |
92281dee8
|
91 92 93 94 95 96 97 98 99 |
if (is_ram == REGION_MIXED) { WARN_ONCE(1, "memremap attempted on mixed range %pa size: %#lx ", &offset, (unsigned long) size); return NULL; } /* Try all mapping types requested until one returns non-NULL */ if (flags & MEMREMAP_WB) { |
92281dee8
|
100 101 102 103 |
/* * MEMREMAP_WB is special in that it can be satisifed * from the direct map. Some archs depend on the * capability of memremap() to autodetect cases where |
1c29f25bf
|
104 |
* the requested range is potentially in System RAM. |
92281dee8
|
105 106 |
*/ if (is_ram == REGION_INTERSECTS) |
8f716c9b5
|
107 |
addr = try_ram_remap(offset, size, flags); |
182475b7a
|
108 |
if (!addr) |
c269cba35
|
109 |
addr = arch_memremap_wb(offset, size); |
92281dee8
|
110 111 112 |
} /* |
cf61e2a14
|
113 114 |
* If we don't have a mapping yet and other request flags are * present then we will be attempting to establish a new virtual |
92281dee8
|
115 |
* address mapping. Enforce that this mapping is not aliasing |
1c29f25bf
|
116 |
* System RAM. |
92281dee8
|
117 |
*/ |
cf61e2a14
|
118 |
if (!addr && is_ram == REGION_INTERSECTS && flags != MEMREMAP_WB) { |
92281dee8
|
119 120 121 122 123 |
WARN_ONCE(1, "memremap attempted on ram %pa size: %#lx ", &offset, (unsigned long) size); return NULL; } |
cf61e2a14
|
124 |
if (!addr && (flags & MEMREMAP_WT)) |
92281dee8
|
125 |
addr = ioremap_wt(offset, size); |
c907e0eb4
|
126 127 128 |
if (!addr && (flags & MEMREMAP_WC)) addr = ioremap_wc(offset, size); |
92281dee8
|
129 130 131 132 133 134 135 136 137 138 139 |
return addr; } EXPORT_SYMBOL(memremap); void memunmap(void *addr) { if (is_vmalloc_addr(addr)) iounmap((void __iomem *) addr); } EXPORT_SYMBOL(memunmap); |
7d3dcf26a
|
140 141 142 |
static void devm_memremap_release(struct device *dev, void *res) { |
9273a8bbf
|
143 |
memunmap(*(void **)res); |
7d3dcf26a
|
144 145 146 147 148 149 150 151 152 153 154 |
} static int devm_memremap_match(struct device *dev, void *res, void *match_data) { return *(void **)res == match_data; } void *devm_memremap(struct device *dev, resource_size_t offset, size_t size, unsigned long flags) { void **ptr, *addr; |
538ea4aa4
|
155 156 |
ptr = devres_alloc_node(devm_memremap_release, sizeof(*ptr), GFP_KERNEL, dev_to_node(dev)); |
7d3dcf26a
|
157 |
if (!ptr) |
b36f47617
|
158 |
return ERR_PTR(-ENOMEM); |
7d3dcf26a
|
159 160 161 162 163 |
addr = memremap(offset, size, flags); if (addr) { *ptr = addr; devres_add(dev, ptr); |
93f834df9
|
164 |
} else { |
7d3dcf26a
|
165 |
devres_free(ptr); |
93f834df9
|
166 167 |
return ERR_PTR(-ENXIO); } |
7d3dcf26a
|
168 169 170 171 172 173 174 |
return addr; } EXPORT_SYMBOL(devm_memremap); void devm_memunmap(struct device *dev, void *addr) { |
d741314fe
|
175 176 |
WARN_ON(devres_release(dev, devm_memremap_release, devm_memremap_match, addr)); |
7d3dcf26a
|
177 178 |
} EXPORT_SYMBOL(devm_memunmap); |
41e94a851
|
179 180 |
#ifdef CONFIG_ZONE_DEVICE |
9476df7d8
|
181 182 183 184 |
static DEFINE_MUTEX(pgmap_lock); static RADIX_TREE(pgmap_radix, GFP_KERNEL); #define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1) #define SECTION_SIZE (1UL << PA_SECTION_SHIFT) |
41e94a851
|
185 186 |
struct page_map { struct resource res; |
9476df7d8
|
187 188 |
struct percpu_ref *ref; struct dev_pagemap pgmap; |
4b94ffdc4
|
189 |
struct vmem_altmap altmap; |
41e94a851
|
190 |
}; |
ab1b597ee
|
191 |
static unsigned long order_at(struct resource *res, unsigned long pgoff) |
9476df7d8
|
192 |
{ |
ab1b597ee
|
193 194 |
unsigned long phys_pgoff = PHYS_PFN(res->start) + pgoff; unsigned long nr_pages, mask; |
eb7d78c9e
|
195 |
|
ab1b597ee
|
196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 |
nr_pages = PHYS_PFN(resource_size(res)); if (nr_pages == pgoff) return ULONG_MAX; /* * What is the largest aligned power-of-2 range available from * this resource pgoff to the end of the resource range, * considering the alignment of the current pgoff? */ mask = phys_pgoff | rounddown_pow_of_two(nr_pages - pgoff); if (!mask) return ULONG_MAX; return find_first_bit(&mask, BITS_PER_LONG); } #define foreach_order_pgoff(res, order, pgoff) \ for (pgoff = 0, order = order_at((res), pgoff); order < ULONG_MAX; \ pgoff += 1UL << order, order = order_at((res), pgoff)) |
5042db43c
|
215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 |
#if IS_ENABLED(CONFIG_DEVICE_PRIVATE) int device_private_entry_fault(struct vm_area_struct *vma, unsigned long addr, swp_entry_t entry, unsigned int flags, pmd_t *pmdp) { struct page *page = device_private_entry_to_page(entry); /* * The page_fault() callback must migrate page back to system memory * so that CPU can access it. This might fail for various reasons * (device issue, device was unsafely unplugged, ...). When such * error conditions happen, the callback must return VM_FAULT_SIGBUS. * * Note that because memory cgroup charges are accounted to the device * memory, this should never fail because of memory restrictions (but * allocation of regular system page might still fail because we are * out of memory). * * There is a more in-depth description of what that callback can and * cannot do, in include/linux/memremap.h */ return page->pgmap->page_fault(vma, addr, page, flags, pmdp); } EXPORT_SYMBOL(device_private_entry_fault); #endif /* CONFIG_DEVICE_PRIVATE */ |
801fc191b
|
242 |
static void pgmap_radix_release(struct resource *res, unsigned long end_pgoff) |
ab1b597ee
|
243 244 |
{ unsigned long pgoff, order; |
9476df7d8
|
245 246 |
mutex_lock(&pgmap_lock); |
801fc191b
|
247 248 249 |
foreach_order_pgoff(res, order, pgoff) { if (pgoff >= end_pgoff) break; |
ab1b597ee
|
250 |
radix_tree_delete(&pgmap_radix, PHYS_PFN(res->start) + pgoff); |
801fc191b
|
251 |
} |
9476df7d8
|
252 |
mutex_unlock(&pgmap_lock); |
ab1b597ee
|
253 254 |
synchronize_rcu(); |
9476df7d8
|
255 |
} |
5c2c2587b
|
256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 |
static unsigned long pfn_first(struct page_map *page_map) { struct dev_pagemap *pgmap = &page_map->pgmap; const struct resource *res = &page_map->res; struct vmem_altmap *altmap = pgmap->altmap; unsigned long pfn; pfn = res->start >> PAGE_SHIFT; if (altmap) pfn += vmem_altmap_offset(altmap); return pfn; } static unsigned long pfn_end(struct page_map *page_map) { const struct resource *res = &page_map->res; return (res->start + resource_size(res)) >> PAGE_SHIFT; } #define for_each_device_pfn(pfn, map) \ for (pfn = pfn_first(map); pfn < pfn_end(map); pfn++) |
9476df7d8
|
278 |
static void devm_memremap_pages_release(struct device *dev, void *data) |
41e94a851
|
279 |
{ |
9476df7d8
|
280 281 282 |
struct page_map *page_map = data; struct resource *res = &page_map->res; resource_size_t align_start, align_size; |
4b94ffdc4
|
283 |
struct dev_pagemap *pgmap = &page_map->pgmap; |
713897038
|
284 285 286 287 |
unsigned long pfn; for_each_device_pfn(pfn, page_map) put_page(pfn_to_page(pfn)); |
9476df7d8
|
288 |
|
5c2c2587b
|
289 290 291 292 293 |
if (percpu_ref_tryget_live(pgmap->ref)) { dev_WARN(dev, "%s: page mapping is still live! ", __func__); percpu_ref_put(pgmap->ref); } |
41e94a851
|
294 |
/* pages are dead and unused, undo the arch mapping */ |
9476df7d8
|
295 |
align_start = res->start & ~(SECTION_SIZE - 1); |
1f21cd46c
|
296 297 |
align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE) - align_start; |
b5d24fda9
|
298 |
|
f931ab479
|
299 |
mem_hotplug_begin(); |
9476df7d8
|
300 |
arch_remove_memory(align_start, align_size); |
f931ab479
|
301 |
mem_hotplug_done(); |
b5d24fda9
|
302 |
|
9049771f7
|
303 |
untrack_pfn(NULL, PHYS_PFN(align_start), align_size); |
801fc191b
|
304 |
pgmap_radix_release(res, -1); |
4b94ffdc4
|
305 306 307 |
dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc, "%s: failed to free all reserved pages ", __func__); |
9476df7d8
|
308 309 310 311 312 313 314 315 |
} /* assumes rcu_read_lock() held at entry */ struct dev_pagemap *find_dev_pagemap(resource_size_t phys) { struct page_map *page_map; WARN_ON_ONCE(!rcu_read_lock_held()); |
ab1b597ee
|
316 |
page_map = radix_tree_lookup(&pgmap_radix, PHYS_PFN(phys)); |
9476df7d8
|
317 |
return page_map ? &page_map->pgmap : NULL; |
41e94a851
|
318 |
} |
4b94ffdc4
|
319 320 321 322 |
/** * devm_memremap_pages - remap and provide memmap backing for the given resource * @dev: hosting device for @res * @res: "host memory" address range |
5c2c2587b
|
323 |
* @ref: a live per-cpu reference count |
4b94ffdc4
|
324 325 |
* @altmap: optional descriptor for allocating the memmap from @res * |
5c2c2587b
|
326 327 |
* Notes: * 1/ @ref must be 'live' on entry and 'dead' before devm_memunmap_pages() time |
713897038
|
328 329 330 331 |
* (or devm release event). The expected order of events is that @ref has * been through percpu_ref_kill() before devm_memremap_pages_release(). The * wait for the completion of all references being dropped and * percpu_ref_exit() must occur after devm_memremap_pages_release(). |
5c2c2587b
|
332 333 334 335 |
* * 2/ @res is expected to be a host memory range that could feasibly be * treated as a "System RAM" range, i.e. not a device mmio range, but * this is not enforced. |
4b94ffdc4
|
336 337 |
*/ void *devm_memremap_pages(struct device *dev, struct resource *res, |
5c2c2587b
|
338 |
struct percpu_ref *ref, struct vmem_altmap *altmap) |
41e94a851
|
339 |
{ |
ab1b597ee
|
340 341 |
resource_size_t align_start, align_size, align_end; unsigned long pfn, pgoff, order; |
9049771f7
|
342 |
pgprot_t pgprot = PAGE_KERNEL; |
4b94ffdc4
|
343 |
struct dev_pagemap *pgmap; |
41e94a851
|
344 |
struct page_map *page_map; |
1fdcce6e1
|
345 |
int error, nid, is_ram, i = 0; |
2a797fd8f
|
346 |
struct dev_pagemap *conflict_pgmap; |
5f29a77cd
|
347 348 349 350 |
align_start = res->start & ~(SECTION_SIZE - 1); align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE) - align_start; |
2a797fd8f
|
351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 |
align_end = align_start + align_size - 1; conflict_pgmap = get_dev_pagemap(PHYS_PFN(align_start), NULL); if (conflict_pgmap) { dev_WARN(dev, "Conflicting mapping in same section "); put_dev_pagemap(conflict_pgmap); return ERR_PTR(-ENOMEM); } conflict_pgmap = get_dev_pagemap(PHYS_PFN(align_end), NULL); if (conflict_pgmap) { dev_WARN(dev, "Conflicting mapping in same section "); put_dev_pagemap(conflict_pgmap); return ERR_PTR(-ENOMEM); } |
d37a14bb5
|
368 369 |
is_ram = region_intersects(align_start, align_size, IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE); |
41e94a851
|
370 |
|
143275421
|
371 372 373 374 |
if (is_ram != REGION_DISJOINT) { WARN_ONCE(1, "%s attempted on %s region %pr ", __func__, is_ram == REGION_MIXED ? "mixed" : "ram", res); |
41e94a851
|
375 376 |
return ERR_PTR(-ENXIO); } |
5c2c2587b
|
377 378 |
if (!ref) return ERR_PTR(-EINVAL); |
538ea4aa4
|
379 380 |
page_map = devres_alloc_node(devm_memremap_pages_release, sizeof(*page_map), GFP_KERNEL, dev_to_node(dev)); |
41e94a851
|
381 382 |
if (!page_map) return ERR_PTR(-ENOMEM); |
4b94ffdc4
|
383 |
pgmap = &page_map->pgmap; |
41e94a851
|
384 385 |
memcpy(&page_map->res, res, sizeof(*res)); |
4b94ffdc4
|
386 387 388 389 390 |
pgmap->dev = dev; if (altmap) { memcpy(&page_map->altmap, altmap, sizeof(*altmap)); pgmap->altmap = &page_map->altmap; } |
5c2c2587b
|
391 |
pgmap->ref = ref; |
4b94ffdc4
|
392 |
pgmap->res = &page_map->res; |
5042db43c
|
393 394 395 396 |
pgmap->type = MEMORY_DEVICE_HOST; pgmap->page_fault = NULL; pgmap->page_free = NULL; pgmap->data = NULL; |
4b94ffdc4
|
397 |
|
9476df7d8
|
398 399 |
mutex_lock(&pgmap_lock); error = 0; |
ab1b597ee
|
400 401 |
foreach_order_pgoff(res, order, pgoff) { |
9476df7d8
|
402 403 404 |
struct dev_pagemap *dup; rcu_read_lock(); |
ab1b597ee
|
405 |
dup = find_dev_pagemap(res->start + PFN_PHYS(pgoff)); |
9476df7d8
|
406 407 408 409 410 411 412 413 |
rcu_read_unlock(); if (dup) { dev_err(dev, "%s: %pr collides with mapping for %s ", __func__, res, dev_name(dup->dev)); error = -EBUSY; break; } |
ab1b597ee
|
414 415 |
error = __radix_tree_insert(&pgmap_radix, PHYS_PFN(res->start) + pgoff, order, page_map); |
9476df7d8
|
416 417 418 419 420 421 422 423 424 |
if (error) { dev_err(dev, "%s: failed: %d ", __func__, error); break; } } mutex_unlock(&pgmap_lock); if (error) goto err_radix; |
41e94a851
|
425 426 |
nid = dev_to_node(dev); if (nid < 0) |
7eff93b7c
|
427 |
nid = numa_mem_id(); |
41e94a851
|
428 |
|
9049771f7
|
429 430 431 432 |
error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(align_start), 0, align_size); if (error) goto err_pfn_remap; |
f931ab479
|
433 |
mem_hotplug_begin(); |
3d79a728f
|
434 |
error = arch_add_memory(nid, align_start, align_size, false); |
f1dd2cd13
|
435 436 437 438 |
if (!error) move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE], align_start >> PAGE_SHIFT, align_size >> PAGE_SHIFT); |
f931ab479
|
439 |
mem_hotplug_done(); |
9476df7d8
|
440 441 |
if (error) goto err_add_memory; |
41e94a851
|
442 |
|
5c2c2587b
|
443 444 |
for_each_device_pfn(pfn, page_map) { struct page *page = pfn_to_page(pfn); |
d77a117e6
|
445 446 447 448 449 450 451 |
/* * ZONE_DEVICE pages union ->lru with a ->pgmap back * pointer. It is a bug if a ZONE_DEVICE page is ever * freed or placed on a driver-private list. Seed the * storage with LIST_POISON* values. */ list_del(&page->lru); |
5c2c2587b
|
452 |
page->pgmap = pgmap; |
713897038
|
453 |
percpu_ref_get(ref); |
1fdcce6e1
|
454 455 |
if (!(++i % 1024)) cond_resched(); |
5c2c2587b
|
456 |
} |
41e94a851
|
457 458 |
devres_add(dev, page_map); return __va(res->start); |
9476df7d8
|
459 460 |
err_add_memory: |
9049771f7
|
461 462 |
untrack_pfn(NULL, PHYS_PFN(align_start), align_size); err_pfn_remap: |
9476df7d8
|
463 |
err_radix: |
801fc191b
|
464 |
pgmap_radix_release(res, pgoff); |
9476df7d8
|
465 466 |
devres_free(page_map); return ERR_PTR(error); |
41e94a851
|
467 |
} |
47d24f8c8
|
468 |
EXPORT_SYMBOL_GPL(devm_memremap_pages); |
4b94ffdc4
|
469 470 471 472 473 474 475 476 477 478 479 |
unsigned long vmem_altmap_offset(struct vmem_altmap *altmap) { /* number of pfns from base where pfn_to_page() is valid */ return altmap->reserve + altmap->free; } void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns) { altmap->alloc -= nr_pfns; } |
4b94ffdc4
|
480 481 482 483 484 |
struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start) { /* * 'memmap_start' is the virtual address for the first "struct * page" in this range of the vmemmap array. In the case of |
07061aab2
|
485 |
* CONFIG_SPARSEMEM_VMEMMAP a page_to_pfn conversion is simple |
4b94ffdc4
|
486 487 488 489 490 491 492 493 |
* pointer arithmetic, so we can perform this to_vmem_altmap() * conversion without concern for the initialization state of * the struct page fields. */ struct page *page = (struct page *) memmap_start; struct dev_pagemap *pgmap; /* |
07061aab2
|
494 |
* Unconditionally retrieve a dev_pagemap associated with the |
4b94ffdc4
|
495 496 497 498 499 500 501 502 503 504 |
* given physical address, this is only for use in the * arch_{add|remove}_memory() for setting up and tearing down * the memmap. */ rcu_read_lock(); pgmap = find_dev_pagemap(__pfn_to_phys(page_to_pfn(page))); rcu_read_unlock(); return pgmap ? pgmap->altmap : NULL; } |
41e94a851
|
505 |
#endif /* CONFIG_ZONE_DEVICE */ |
7b2d55d2c
|
506 |
|
df6ad6983
|
507 508 |
#if IS_ENABLED(CONFIG_DEVICE_PRIVATE) || IS_ENABLED(CONFIG_DEVICE_PUBLIC) void put_zone_device_private_or_public_page(struct page *page) |
7b2d55d2c
|
509 510 511 512 513 514 515 516 517 518 519 520 521 |
{ int count = page_ref_dec_return(page); /* * If refcount is 1 then page is freed and refcount is stable as nobody * holds a reference on the page. */ if (count == 1) { /* Clear Active bit in case of parallel mark_page_accessed */ __ClearPageActive(page); __ClearPageWaiters(page); page->mapping = NULL; |
c733a8287
|
522 |
mem_cgroup_uncharge(page); |
7b2d55d2c
|
523 524 525 526 527 |
page->pgmap->page_free(page, page->pgmap->data); } else if (!count) __put_page(page); } |
df6ad6983
|
528 529 |
EXPORT_SYMBOL(put_zone_device_private_or_public_page); #endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */ |