Blame view

kernel/memremap.c 12 KB
92281dee8   Dan Williams   arch: introduce m...
1
2
3
4
5
6
7
8
9
10
11
12
  /*
   * Copyright(c) 2015 Intel Corporation. All rights reserved.
   *
   * This program is free software; you can redistribute it and/or modify
   * it under the terms of version 2 of the GNU General Public License as
   * published by the Free Software Foundation.
   *
   * This program is distributed in the hope that it will be useful, but
   * WITHOUT ANY WARRANTY; without even the implied warranty of
   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
   * General Public License for more details.
   */
9476df7d8   Dan Williams   mm: introduce fin...
13
14
  #include <linux/radix-tree.h>
  #include <linux/memremap.h>
7d3dcf26a   Christoph Hellwig   devres: add devm_...
15
  #include <linux/device.h>
92281dee8   Dan Williams   arch: introduce m...
16
  #include <linux/types.h>
34c0fd540   Dan Williams   mm, dax, pmem: in...
17
  #include <linux/pfn_t.h>
92281dee8   Dan Williams   arch: introduce m...
18
19
  #include <linux/io.h>
  #include <linux/mm.h>
41e94a851   Christoph Hellwig   add devm_memremap...
20
  #include <linux/memory_hotplug.h>
92281dee8   Dan Williams   arch: introduce m...
21
22
23
24
25
26
27
28
  
  #ifndef ioremap_cache
  /* temporary while we convert existing ioremap_cache users to memremap */
  __weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size)
  {
  	return ioremap(offset, size);
  }
  #endif
c269cba35   Ard Biesheuvel   memremap: add arc...
29
30
31
32
33
34
  #ifndef arch_memremap_wb
  static void *arch_memremap_wb(resource_size_t offset, unsigned long size)
  {
  	return (__force void *)ioremap_cache(offset, size);
  }
  #endif
182475b7a   Dan Williams   memremap: fix hig...
35
36
  static void *try_ram_remap(resource_size_t offset, size_t size)
  {
ac343e882   Ard Biesheuvel   memremap: check p...
37
  	unsigned long pfn = PHYS_PFN(offset);
182475b7a   Dan Williams   memremap: fix hig...
38
39
  
  	/* In the simple case just return the existing linear address */
ac343e882   Ard Biesheuvel   memremap: check p...
40
  	if (pfn_valid(pfn) && !PageHighMem(pfn_to_page(pfn)))
182475b7a   Dan Williams   memremap: fix hig...
41
  		return __va(offset);
c269cba35   Ard Biesheuvel   memremap: add arc...
42
  	return NULL; /* fallback to arch_memremap_wb */
182475b7a   Dan Williams   memremap: fix hig...
43
  }
92281dee8   Dan Williams   arch: introduce m...
44
45
46
47
  /**
   * memremap() - remap an iomem_resource as cacheable memory
   * @offset: iomem resource start address
   * @size: size of remap
c907e0eb4   Brian Starkey   memremap: add MEM...
48
   * @flags: any of MEMREMAP_WB, MEMREMAP_WT and MEMREMAP_WC
92281dee8   Dan Williams   arch: introduce m...
49
50
51
   *
   * memremap() is "ioremap" for cases where it is known that the resource
   * being mapped does not have i/o side effects and the __iomem
c907e0eb4   Brian Starkey   memremap: add MEM...
52
53
54
   * annotation is not applicable. In the case of multiple flags, the different
   * mapping types will be attempted in the order listed below until one of
   * them succeeds.
92281dee8   Dan Williams   arch: introduce m...
55
   *
1c29f25bf   Toshi Kani   memremap: Change ...
56
   * MEMREMAP_WB - matches the default mapping for System RAM on
92281dee8   Dan Williams   arch: introduce m...
57
58
59
60
61
62
63
64
   * the architecture.  This is usually a read-allocate write-back cache.
   * Morever, if MEMREMAP_WB is specified and the requested remap region is RAM
   * memremap() will bypass establishing a new mapping and instead return
   * a pointer into the direct map.
   *
   * MEMREMAP_WT - establish a mapping whereby writes either bypass the
   * cache or are written through to memory and never exist in a
   * cache-dirty state with respect to program visibility.  Attempts to
1c29f25bf   Toshi Kani   memremap: Change ...
65
   * map System RAM with this mapping type will fail.
c907e0eb4   Brian Starkey   memremap: add MEM...
66
67
68
69
   *
   * MEMREMAP_WC - establish a writecombine mapping, whereby writes may
   * be coalesced together (e.g. in the CPU's write buffers), but is otherwise
   * uncached. Attempts to map System RAM with this mapping type will fail.
92281dee8   Dan Williams   arch: introduce m...
70
71
72
   */
  void *memremap(resource_size_t offset, size_t size, unsigned long flags)
  {
1c29f25bf   Toshi Kani   memremap: Change ...
73
74
  	int is_ram = region_intersects(offset, size,
  				       IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
92281dee8   Dan Williams   arch: introduce m...
75
  	void *addr = NULL;
cf61e2a14   Brian Starkey   memremap: don't m...
76
77
  	if (!flags)
  		return NULL;
92281dee8   Dan Williams   arch: introduce m...
78
79
80
81
82
83
84
85
86
  	if (is_ram == REGION_MIXED) {
  		WARN_ONCE(1, "memremap attempted on mixed range %pa size: %#lx
  ",
  				&offset, (unsigned long) size);
  		return NULL;
  	}
  
  	/* Try all mapping types requested until one returns non-NULL */
  	if (flags & MEMREMAP_WB) {
92281dee8   Dan Williams   arch: introduce m...
87
88
89
90
  		/*
  		 * MEMREMAP_WB is special in that it can be satisifed
  		 * from the direct map.  Some archs depend on the
  		 * capability of memremap() to autodetect cases where
1c29f25bf   Toshi Kani   memremap: Change ...
91
  		 * the requested range is potentially in System RAM.
92281dee8   Dan Williams   arch: introduce m...
92
93
  		 */
  		if (is_ram == REGION_INTERSECTS)
182475b7a   Dan Williams   memremap: fix hig...
94
95
  			addr = try_ram_remap(offset, size);
  		if (!addr)
c269cba35   Ard Biesheuvel   memremap: add arc...
96
  			addr = arch_memremap_wb(offset, size);
92281dee8   Dan Williams   arch: introduce m...
97
98
99
  	}
  
  	/*
cf61e2a14   Brian Starkey   memremap: don't m...
100
101
  	 * If we don't have a mapping yet and other request flags are
  	 * present then we will be attempting to establish a new virtual
92281dee8   Dan Williams   arch: introduce m...
102
  	 * address mapping.  Enforce that this mapping is not aliasing
1c29f25bf   Toshi Kani   memremap: Change ...
103
  	 * System RAM.
92281dee8   Dan Williams   arch: introduce m...
104
  	 */
cf61e2a14   Brian Starkey   memremap: don't m...
105
  	if (!addr && is_ram == REGION_INTERSECTS && flags != MEMREMAP_WB) {
92281dee8   Dan Williams   arch: introduce m...
106
107
108
109
110
  		WARN_ONCE(1, "memremap attempted on ram %pa size: %#lx
  ",
  				&offset, (unsigned long) size);
  		return NULL;
  	}
cf61e2a14   Brian Starkey   memremap: don't m...
111
  	if (!addr && (flags & MEMREMAP_WT))
92281dee8   Dan Williams   arch: introduce m...
112
  		addr = ioremap_wt(offset, size);
c907e0eb4   Brian Starkey   memremap: add MEM...
113
114
115
  
  	if (!addr && (flags & MEMREMAP_WC))
  		addr = ioremap_wc(offset, size);
92281dee8   Dan Williams   arch: introduce m...
116
117
118
119
120
121
122
123
124
125
126
  
  	return addr;
  }
  EXPORT_SYMBOL(memremap);
  
  void memunmap(void *addr)
  {
  	if (is_vmalloc_addr(addr))
  		iounmap((void __iomem *) addr);
  }
  EXPORT_SYMBOL(memunmap);
7d3dcf26a   Christoph Hellwig   devres: add devm_...
127
128
129
  
  static void devm_memremap_release(struct device *dev, void *res)
  {
9273a8bbf   Toshi Kani   devm_memremap_rel...
130
  	memunmap(*(void **)res);
7d3dcf26a   Christoph Hellwig   devres: add devm_...
131
132
133
134
135
136
137
138
139
140
141
  }
  
  static int devm_memremap_match(struct device *dev, void *res, void *match_data)
  {
  	return *(void **)res == match_data;
  }
  
  void *devm_memremap(struct device *dev, resource_size_t offset,
  		size_t size, unsigned long flags)
  {
  	void **ptr, *addr;
538ea4aa4   Dan Williams   pmem, memremap: c...
142
143
  	ptr = devres_alloc_node(devm_memremap_release, sizeof(*ptr), GFP_KERNEL,
  			dev_to_node(dev));
7d3dcf26a   Christoph Hellwig   devres: add devm_...
144
  	if (!ptr)
b36f47617   Dan Williams   devm_memremap: co...
145
  		return ERR_PTR(-ENOMEM);
7d3dcf26a   Christoph Hellwig   devres: add devm_...
146
147
148
149
150
  
  	addr = memremap(offset, size, flags);
  	if (addr) {
  		*ptr = addr;
  		devres_add(dev, ptr);
93f834df9   Toshi Kani   devm_memremap: Fi...
151
  	} else {
7d3dcf26a   Christoph Hellwig   devres: add devm_...
152
  		devres_free(ptr);
93f834df9   Toshi Kani   devm_memremap: Fi...
153
154
  		return ERR_PTR(-ENXIO);
  	}
7d3dcf26a   Christoph Hellwig   devres: add devm_...
155
156
157
158
159
160
161
  
  	return addr;
  }
  EXPORT_SYMBOL(devm_memremap);
  
  void devm_memunmap(struct device *dev, void *addr)
  {
d741314fe   Dan Williams   devm_memunmap: us...
162
163
  	WARN_ON(devres_release(dev, devm_memremap_release,
  				devm_memremap_match, addr));
7d3dcf26a   Christoph Hellwig   devres: add devm_...
164
165
  }
  EXPORT_SYMBOL(devm_memunmap);
41e94a851   Christoph Hellwig   add devm_memremap...
166
167
  
  #ifdef CONFIG_ZONE_DEVICE
9476df7d8   Dan Williams   mm: introduce fin...
168
169
170
171
  static DEFINE_MUTEX(pgmap_lock);
  static RADIX_TREE(pgmap_radix, GFP_KERNEL);
  #define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1)
  #define SECTION_SIZE (1UL << PA_SECTION_SHIFT)
41e94a851   Christoph Hellwig   add devm_memremap...
172
173
  struct page_map {
  	struct resource res;
9476df7d8   Dan Williams   mm: introduce fin...
174
175
  	struct percpu_ref *ref;
  	struct dev_pagemap pgmap;
4b94ffdc4   Dan Williams   x86, mm: introduc...
176
  	struct vmem_altmap altmap;
41e94a851   Christoph Hellwig   add devm_memremap...
177
  };
3565fce3a   Dan Williams   mm, x86: get_user...
178
179
180
181
182
183
184
185
186
187
188
  void get_zone_device_page(struct page *page)
  {
  	percpu_ref_get(page->pgmap->ref);
  }
  EXPORT_SYMBOL(get_zone_device_page);
  
  void put_zone_device_page(struct page *page)
  {
  	put_dev_pagemap(page->pgmap);
  }
  EXPORT_SYMBOL(put_zone_device_page);
9476df7d8   Dan Williams   mm: introduce fin...
189
190
  static void pgmap_radix_release(struct resource *res)
  {
eb7d78c9e   Dan Williams   devm_memremap_pag...
191
192
193
194
195
  	resource_size_t key, align_start, align_size, align_end;
  
  	align_start = res->start & ~(SECTION_SIZE - 1);
  	align_size = ALIGN(resource_size(res), SECTION_SIZE);
  	align_end = align_start + align_size - 1;
9476df7d8   Dan Williams   mm: introduce fin...
196
197
198
199
200
201
  
  	mutex_lock(&pgmap_lock);
  	for (key = res->start; key <= res->end; key += SECTION_SIZE)
  		radix_tree_delete(&pgmap_radix, key >> PA_SECTION_SHIFT);
  	mutex_unlock(&pgmap_lock);
  }
5c2c2587b   Dan Williams   mm, dax, pmem: in...
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
  static unsigned long pfn_first(struct page_map *page_map)
  {
  	struct dev_pagemap *pgmap = &page_map->pgmap;
  	const struct resource *res = &page_map->res;
  	struct vmem_altmap *altmap = pgmap->altmap;
  	unsigned long pfn;
  
  	pfn = res->start >> PAGE_SHIFT;
  	if (altmap)
  		pfn += vmem_altmap_offset(altmap);
  	return pfn;
  }
  
  static unsigned long pfn_end(struct page_map *page_map)
  {
  	const struct resource *res = &page_map->res;
  
  	return (res->start + resource_size(res)) >> PAGE_SHIFT;
  }
  
  #define for_each_device_pfn(pfn, map) \
  	for (pfn = pfn_first(map); pfn < pfn_end(map); pfn++)
9476df7d8   Dan Williams   mm: introduce fin...
224
  static void devm_memremap_pages_release(struct device *dev, void *data)
41e94a851   Christoph Hellwig   add devm_memremap...
225
  {
9476df7d8   Dan Williams   mm: introduce fin...
226
227
228
  	struct page_map *page_map = data;
  	struct resource *res = &page_map->res;
  	resource_size_t align_start, align_size;
4b94ffdc4   Dan Williams   x86, mm: introduc...
229
  	struct dev_pagemap *pgmap = &page_map->pgmap;
9476df7d8   Dan Williams   mm: introduce fin...
230

5c2c2587b   Dan Williams   mm, dax, pmem: in...
231
232
233
234
235
  	if (percpu_ref_tryget_live(pgmap->ref)) {
  		dev_WARN(dev, "%s: page mapping is still live!
  ", __func__);
  		percpu_ref_put(pgmap->ref);
  	}
41e94a851   Christoph Hellwig   add devm_memremap...
236
  	/* pages are dead and unused, undo the arch mapping */
9476df7d8   Dan Williams   mm: introduce fin...
237
238
  	align_start = res->start & ~(SECTION_SIZE - 1);
  	align_size = ALIGN(resource_size(res), SECTION_SIZE);
692755b10   Dan Williams   mm: fix devm_memr...
239
  	mem_hotplug_begin();
9476df7d8   Dan Williams   mm: introduce fin...
240
  	arch_remove_memory(align_start, align_size);
692755b10   Dan Williams   mm: fix devm_memr...
241
  	mem_hotplug_done();
9049771f7   Dan Williams   mm: fix cache mod...
242
  	untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
eb7d78c9e   Dan Williams   devm_memremap_pag...
243
  	pgmap_radix_release(res);
4b94ffdc4   Dan Williams   x86, mm: introduc...
244
245
246
  	dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc,
  			"%s: failed to free all reserved pages
  ", __func__);
9476df7d8   Dan Williams   mm: introduce fin...
247
248
249
250
251
252
253
254
255
256
257
  }
  
  /* assumes rcu_read_lock() held at entry */
  struct dev_pagemap *find_dev_pagemap(resource_size_t phys)
  {
  	struct page_map *page_map;
  
  	WARN_ON_ONCE(!rcu_read_lock_held());
  
  	page_map = radix_tree_lookup(&pgmap_radix, phys >> PA_SECTION_SHIFT);
  	return page_map ? &page_map->pgmap : NULL;
41e94a851   Christoph Hellwig   add devm_memremap...
258
  }
4b94ffdc4   Dan Williams   x86, mm: introduc...
259
260
261
262
  /**
   * devm_memremap_pages - remap and provide memmap backing for the given resource
   * @dev: hosting device for @res
   * @res: "host memory" address range
5c2c2587b   Dan Williams   mm, dax, pmem: in...
263
   * @ref: a live per-cpu reference count
4b94ffdc4   Dan Williams   x86, mm: introduc...
264
265
   * @altmap: optional descriptor for allocating the memmap from @res
   *
5c2c2587b   Dan Williams   mm, dax, pmem: in...
266
267
268
269
270
271
272
   * Notes:
   * 1/ @ref must be 'live' on entry and 'dead' before devm_memunmap_pages() time
   *    (or devm release event).
   *
   * 2/ @res is expected to be a host memory range that could feasibly be
   *    treated as a "System RAM" range, i.e. not a device mmio range, but
   *    this is not enforced.
4b94ffdc4   Dan Williams   x86, mm: introduc...
273
274
   */
  void *devm_memremap_pages(struct device *dev, struct resource *res,
5c2c2587b   Dan Williams   mm, dax, pmem: in...
275
  		struct percpu_ref *ref, struct vmem_altmap *altmap)
41e94a851   Christoph Hellwig   add devm_memremap...
276
  {
eb7d78c9e   Dan Williams   devm_memremap_pag...
277
  	resource_size_t key, align_start, align_size, align_end;
9049771f7   Dan Williams   mm: fix cache mod...
278
  	pgprot_t pgprot = PAGE_KERNEL;
4b94ffdc4   Dan Williams   x86, mm: introduc...
279
  	struct dev_pagemap *pgmap;
41e94a851   Christoph Hellwig   add devm_memremap...
280
  	struct page_map *page_map;
5f29a77cd   Dan Williams   mm: fix mixed zon...
281
  	int error, nid, is_ram;
5c2c2587b   Dan Williams   mm, dax, pmem: in...
282
  	unsigned long pfn;
5f29a77cd   Dan Williams   mm: fix mixed zon...
283
284
285
286
  
  	align_start = res->start & ~(SECTION_SIZE - 1);
  	align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
  		- align_start;
d37a14bb5   Linus Torvalds   Merge branch 'cor...
287
288
  	is_ram = region_intersects(align_start, align_size,
  		IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
41e94a851   Christoph Hellwig   add devm_memremap...
289
290
291
292
293
294
295
296
297
298
  
  	if (is_ram == REGION_MIXED) {
  		WARN_ONCE(1, "%s attempted on mixed region %pr
  ",
  				__func__, res);
  		return ERR_PTR(-ENXIO);
  	}
  
  	if (is_ram == REGION_INTERSECTS)
  		return __va(res->start);
5c2c2587b   Dan Williams   mm, dax, pmem: in...
299
300
  	if (!ref)
  		return ERR_PTR(-EINVAL);
538ea4aa4   Dan Williams   pmem, memremap: c...
301
302
  	page_map = devres_alloc_node(devm_memremap_pages_release,
  			sizeof(*page_map), GFP_KERNEL, dev_to_node(dev));
41e94a851   Christoph Hellwig   add devm_memremap...
303
304
  	if (!page_map)
  		return ERR_PTR(-ENOMEM);
4b94ffdc4   Dan Williams   x86, mm: introduc...
305
  	pgmap = &page_map->pgmap;
41e94a851   Christoph Hellwig   add devm_memremap...
306
307
  
  	memcpy(&page_map->res, res, sizeof(*res));
4b94ffdc4   Dan Williams   x86, mm: introduc...
308
309
310
311
312
  	pgmap->dev = dev;
  	if (altmap) {
  		memcpy(&page_map->altmap, altmap, sizeof(*altmap));
  		pgmap->altmap = &page_map->altmap;
  	}
5c2c2587b   Dan Williams   mm, dax, pmem: in...
313
  	pgmap->ref = ref;
4b94ffdc4   Dan Williams   x86, mm: introduc...
314
  	pgmap->res = &page_map->res;
9476df7d8   Dan Williams   mm: introduce fin...
315
316
  	mutex_lock(&pgmap_lock);
  	error = 0;
eb7d78c9e   Dan Williams   devm_memremap_pag...
317
318
  	align_end = align_start + align_size - 1;
  	for (key = align_start; key <= align_end; key += SECTION_SIZE) {
9476df7d8   Dan Williams   mm: introduce fin...
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
  		struct dev_pagemap *dup;
  
  		rcu_read_lock();
  		dup = find_dev_pagemap(key);
  		rcu_read_unlock();
  		if (dup) {
  			dev_err(dev, "%s: %pr collides with mapping for %s
  ",
  					__func__, res, dev_name(dup->dev));
  			error = -EBUSY;
  			break;
  		}
  		error = radix_tree_insert(&pgmap_radix, key >> PA_SECTION_SHIFT,
  				page_map);
  		if (error) {
  			dev_err(dev, "%s: failed: %d
  ", __func__, error);
  			break;
  		}
  	}
  	mutex_unlock(&pgmap_lock);
  	if (error)
  		goto err_radix;
41e94a851   Christoph Hellwig   add devm_memremap...
342
343
  	nid = dev_to_node(dev);
  	if (nid < 0)
7eff93b7c   Dan Williams   devm_memremap_pag...
344
  		nid = numa_mem_id();
41e94a851   Christoph Hellwig   add devm_memremap...
345

9049771f7   Dan Williams   mm: fix cache mod...
346
347
348
349
  	error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(align_start), 0,
  			align_size);
  	if (error)
  		goto err_pfn_remap;
692755b10   Dan Williams   mm: fix devm_memr...
350
  	mem_hotplug_begin();
9476df7d8   Dan Williams   mm: introduce fin...
351
  	error = arch_add_memory(nid, align_start, align_size, true);
692755b10   Dan Williams   mm: fix devm_memr...
352
  	mem_hotplug_done();
9476df7d8   Dan Williams   mm: introduce fin...
353
354
  	if (error)
  		goto err_add_memory;
41e94a851   Christoph Hellwig   add devm_memremap...
355

5c2c2587b   Dan Williams   mm, dax, pmem: in...
356
357
  	for_each_device_pfn(pfn, page_map) {
  		struct page *page = pfn_to_page(pfn);
d77a117e6   Dan Williams   list: kill list_f...
358
359
360
361
362
363
364
  		/*
  		 * ZONE_DEVICE pages union ->lru with a ->pgmap back
  		 * pointer.  It is a bug if a ZONE_DEVICE page is ever
  		 * freed or placed on a driver-private list.  Seed the
  		 * storage with LIST_POISON* values.
  		 */
  		list_del(&page->lru);
5c2c2587b   Dan Williams   mm, dax, pmem: in...
365
366
  		page->pgmap = pgmap;
  	}
41e94a851   Christoph Hellwig   add devm_memremap...
367
368
  	devres_add(dev, page_map);
  	return __va(res->start);
9476df7d8   Dan Williams   mm: introduce fin...
369
370
  
   err_add_memory:
9049771f7   Dan Williams   mm: fix cache mod...
371
372
  	untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
   err_pfn_remap:
9476df7d8   Dan Williams   mm: introduce fin...
373
374
375
376
   err_radix:
  	pgmap_radix_release(res);
  	devres_free(page_map);
  	return ERR_PTR(error);
41e94a851   Christoph Hellwig   add devm_memremap...
377
378
  }
  EXPORT_SYMBOL(devm_memremap_pages);
4b94ffdc4   Dan Williams   x86, mm: introduc...
379
380
381
382
383
384
385
386
387
388
389
  
  unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
  {
  	/* number of pfns from base where pfn_to_page() is valid */
  	return altmap->reserve + altmap->free;
  }
  
  void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns)
  {
  	altmap->alloc -= nr_pfns;
  }
4b94ffdc4   Dan Williams   x86, mm: introduc...
390
391
392
393
394
  struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start)
  {
  	/*
  	 * 'memmap_start' is the virtual address for the first "struct
  	 * page" in this range of the vmemmap array.  In the case of
07061aab2   Andreas Ziegler   mm: fix two typos...
395
  	 * CONFIG_SPARSEMEM_VMEMMAP a page_to_pfn conversion is simple
4b94ffdc4   Dan Williams   x86, mm: introduc...
396
397
398
399
400
401
402
403
  	 * pointer arithmetic, so we can perform this to_vmem_altmap()
  	 * conversion without concern for the initialization state of
  	 * the struct page fields.
  	 */
  	struct page *page = (struct page *) memmap_start;
  	struct dev_pagemap *pgmap;
  
  	/*
07061aab2   Andreas Ziegler   mm: fix two typos...
404
  	 * Unconditionally retrieve a dev_pagemap associated with the
4b94ffdc4   Dan Williams   x86, mm: introduc...
405
406
407
408
409
410
411
412
413
414
  	 * given physical address, this is only for use in the
  	 * arch_{add|remove}_memory() for setting up and tearing down
  	 * the memmap.
  	 */
  	rcu_read_lock();
  	pgmap = find_dev_pagemap(__pfn_to_phys(page_to_pfn(page)));
  	rcu_read_unlock();
  
  	return pgmap ? pgmap->altmap : NULL;
  }
41e94a851   Christoph Hellwig   add devm_memremap...
415
  #endif /* CONFIG_ZONE_DEVICE */