Blame view

mm/memremap.c 14 KB
5981690dd   Dan Williams   memremap: split d...
1
2
  /* SPDX-License-Identifier: GPL-2.0 */
  /* Copyright(c) 2015 Intel Corporation. All rights reserved. */
7d3dcf26a   Christoph Hellwig   devres: add devm_...
3
  #include <linux/device.h>
92281dee8   Dan Williams   arch: introduce m...
4
  #include <linux/io.h>
0207df4fa   Andrey Ryabinin   kernel/memremap, ...
5
  #include <linux/kasan.h>
41e94a851   Christoph Hellwig   add devm_memremap...
6
  #include <linux/memory_hotplug.h>
bcfa4b721   Matthew Wilcox   memremap: Convert...
7
8
  #include <linux/mm.h>
  #include <linux/pfn_t.h>
5042db43c   Jérôme Glisse   mm/ZONE_DEVICE: n...
9
  #include <linux/swap.h>
9ffc1d19f   Dan Williams   mm/memremap_pages...
10
  #include <linux/mmzone.h>
5042db43c   Jérôme Glisse   mm/ZONE_DEVICE: n...
11
  #include <linux/swapops.h>
bcfa4b721   Matthew Wilcox   memremap: Convert...
12
  #include <linux/types.h>
e76384884   Dan Williams   mm: introduce MEM...
13
  #include <linux/wait_bit.h>
bcfa4b721   Matthew Wilcox   memremap: Convert...
14
  #include <linux/xarray.h>
92281dee8   Dan Williams   arch: introduce m...
15

bcfa4b721   Matthew Wilcox   memremap: Convert...
16
  static DEFINE_XARRAY(pgmap_array);
9476df7d8   Dan Williams   mm: introduce fin...
17

9ffc1d19f   Dan Williams   mm/memremap_pages...
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
  /*
   * The memremap() and memremap_pages() interfaces are alternately used
   * to map persistent memory namespaces. These interfaces place different
   * constraints on the alignment and size of the mapping (namespace).
   * memremap() can map individual PAGE_SIZE pages. memremap_pages() can
   * only map subsections (2MB), and at least one architecture (PowerPC)
   * the minimum mapping granularity of memremap_pages() is 16MB.
   *
   * The role of memremap_compat_align() is to communicate the minimum
   * arch supported alignment of a namespace such that it can freely
   * switch modes without violating the arch constraint. Namely, do not
   * allow a namespace to be PAGE_SIZE aligned since that namespace may be
   * reconfigured into a mode that requires SUBSECTION_SIZE alignment.
   */
  #ifndef CONFIG_ARCH_HAS_MEMREMAP_COMPAT_ALIGN
  unsigned long memremap_compat_align(void)
  {
  	return SUBSECTION_SIZE;
  }
  EXPORT_SYMBOL_GPL(memremap_compat_align);
  #endif
f6a55e1a3   Christoph Hellwig   memremap: lift th...
39
40
41
  #ifdef CONFIG_DEV_PAGEMAP_OPS
  DEFINE_STATIC_KEY_FALSE(devmap_managed_key);
  EXPORT_SYMBOL(devmap_managed_key);
f6a55e1a3   Christoph Hellwig   memremap: lift th...
42

46b1ee38b   Ralph Campbell   mm/mremap_pages: ...
43
  static void devmap_managed_enable_put(struct dev_pagemap *pgmap)
f6a55e1a3   Christoph Hellwig   memremap: lift th...
44
  {
46b1ee38b   Ralph Campbell   mm/mremap_pages: ...
45
46
47
  	if (pgmap->type == MEMORY_DEVICE_PRIVATE ||
  	    pgmap->type == MEMORY_DEVICE_FS_DAX)
  		static_branch_dec(&devmap_managed_key);
f6a55e1a3   Christoph Hellwig   memremap: lift th...
48
  }
46b1ee38b   Ralph Campbell   mm/mremap_pages: ...
49
  static void devmap_managed_enable_get(struct dev_pagemap *pgmap)
f6a55e1a3   Christoph Hellwig   memremap: lift th...
50
  {
46b1ee38b   Ralph Campbell   mm/mremap_pages: ...
51
52
53
  	if (pgmap->type == MEMORY_DEVICE_PRIVATE ||
  	    pgmap->type == MEMORY_DEVICE_FS_DAX)
  		static_branch_inc(&devmap_managed_key);
f6a55e1a3   Christoph Hellwig   memremap: lift th...
54
55
  }
  #else
46b1ee38b   Ralph Campbell   mm/mremap_pages: ...
56
  static void devmap_managed_enable_get(struct dev_pagemap *pgmap)
f6a55e1a3   Christoph Hellwig   memremap: lift th...
57
  {
f6a55e1a3   Christoph Hellwig   memremap: lift th...
58
  }
46b1ee38b   Ralph Campbell   mm/mremap_pages: ...
59
  static void devmap_managed_enable_put(struct dev_pagemap *pgmap)
6f42193fd   Christoph Hellwig   memremap: don't u...
60
61
  {
  }
f6a55e1a3   Christoph Hellwig   memremap: lift th...
62
  #endif /* CONFIG_DEV_PAGEMAP_OPS */
a4574f63e   Dan Williams   mm/memremap_pages...
63
  static void pgmap_array_delete(struct range *range)
ab1b597ee   Dan Williams   mm, devm_memremap...
64
  {
a4574f63e   Dan Williams   mm/memremap_pages...
65
  	xa_store_range(&pgmap_array, PHYS_PFN(range->start), PHYS_PFN(range->end),
bcfa4b721   Matthew Wilcox   memremap: Convert...
66
  			NULL, GFP_KERNEL);
ab1b597ee   Dan Williams   mm, devm_memremap...
67
  	synchronize_rcu();
9476df7d8   Dan Williams   mm: introduce fin...
68
  }
b7b3c01b1   Dan Williams   mm/memremap_pages...
69
  static unsigned long pfn_first(struct dev_pagemap *pgmap, int range_id)
5c2c2587b   Dan Williams   mm, dax, pmem: in...
70
  {
b7b3c01b1   Dan Williams   mm/memremap_pages...
71
72
73
74
75
76
  	struct range *range = &pgmap->ranges[range_id];
  	unsigned long pfn = PHYS_PFN(range->start);
  
  	if (range_id)
  		return pfn;
  	return pfn + vmem_altmap_offset(pgmap_altmap(pgmap));
5c2c2587b   Dan Williams   mm, dax, pmem: in...
77
  }
b7b3c01b1   Dan Williams   mm/memremap_pages...
78
  static unsigned long pfn_end(struct dev_pagemap *pgmap, int range_id)
5c2c2587b   Dan Williams   mm, dax, pmem: in...
79
  {
b7b3c01b1   Dan Williams   mm/memremap_pages...
80
  	const struct range *range = &pgmap->ranges[range_id];
5c2c2587b   Dan Williams   mm, dax, pmem: in...
81

a4574f63e   Dan Williams   mm/memremap_pages...
82
  	return (range->start + range_len(range)) >> PAGE_SHIFT;
5c2c2587b   Dan Williams   mm, dax, pmem: in...
83
  }
949b93250   Dan Williams   memremap: fix sof...
84
85
86
87
88
89
  static unsigned long pfn_next(unsigned long pfn)
  {
  	if (pfn % 1024 == 0)
  		cond_resched();
  	return pfn + 1;
  }
b7b3c01b1   Dan Williams   mm/memremap_pages...
90
91
  #define for_each_device_pfn(pfn, map, i) \
  	for (pfn = pfn_first(map, i); pfn < pfn_end(map, i); pfn = pfn_next(pfn))
5c2c2587b   Dan Williams   mm, dax, pmem: in...
92

24917f6b1   Christoph Hellwig   memremap: provide...
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
  static void dev_pagemap_kill(struct dev_pagemap *pgmap)
  {
  	if (pgmap->ops && pgmap->ops->kill)
  		pgmap->ops->kill(pgmap);
  	else
  		percpu_ref_kill(pgmap->ref);
  }
  
  static void dev_pagemap_cleanup(struct dev_pagemap *pgmap)
  {
  	if (pgmap->ops && pgmap->ops->cleanup) {
  		pgmap->ops->cleanup(pgmap);
  	} else {
  		wait_for_completion(&pgmap->done);
  		percpu_ref_exit(pgmap->ref);
  	}
06282373f   Dan Williams   mm/memremap: Fix ...
109
110
111
112
113
114
  	/*
  	 * Undo the pgmap ref assignment for the internal case as the
  	 * caller may re-enable the same pgmap.
  	 */
  	if (pgmap->ref == &pgmap->internal_ref)
  		pgmap->ref = NULL;
24917f6b1   Christoph Hellwig   memremap: provide...
115
  }
b7b3c01b1   Dan Williams   mm/memremap_pages...
116
  static void pageunmap_range(struct dev_pagemap *pgmap, int range_id)
41e94a851   Christoph Hellwig   add devm_memremap...
117
  {
b7b3c01b1   Dan Williams   mm/memremap_pages...
118
  	struct range *range = &pgmap->ranges[range_id];
77e080e76   Aneesh Kumar K.V   mm/memunmap: don'...
119
  	struct page *first_page;
2c2a5af6f   Oscar Salvador   mm, memory_hotplu...
120
  	int nid;
713897038   Dan Williams   mm, zone_device: ...
121

77e080e76   Aneesh Kumar K.V   mm/memunmap: don'...
122
  	/* make sure to access a memmap that was actually initialized */
b7b3c01b1   Dan Williams   mm/memremap_pages...
123
  	first_page = pfn_to_page(pfn_first(pgmap, range_id));
77e080e76   Aneesh Kumar K.V   mm/memunmap: don'...
124

41e94a851   Christoph Hellwig   add devm_memremap...
125
  	/* pages are dead and unused, undo the arch mapping */
77e080e76   Aneesh Kumar K.V   mm/memunmap: don'...
126
  	nid = page_to_nid(first_page);
2c2a5af6f   Oscar Salvador   mm, memory_hotplu...
127

f931ab479   Dan Williams   mm: fix devm_memr...
128
  	mem_hotplug_begin();
a4574f63e   Dan Williams   mm/memremap_pages...
129
130
  	remove_pfn_range_from_zone(page_zone(first_page), PHYS_PFN(range->start),
  				   PHYS_PFN(range_len(range)));
69324b8f4   Dan Williams   mm, devm_memremap...
131
  	if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
a4574f63e   Dan Williams   mm/memremap_pages...
132
133
  		__remove_pages(PHYS_PFN(range->start),
  			       PHYS_PFN(range_len(range)), NULL);
69324b8f4   Dan Williams   mm, devm_memremap...
134
  	} else {
a4574f63e   Dan Williams   mm/memremap_pages...
135
  		arch_remove_memory(nid, range->start, range_len(range),
514caf23a   Christoph Hellwig   memremap: replace...
136
  				pgmap_altmap(pgmap));
a4574f63e   Dan Williams   mm/memremap_pages...
137
  		kasan_remove_zero_shadow(__va(range->start), range_len(range));
69324b8f4   Dan Williams   mm, devm_memremap...
138
  	}
f931ab479   Dan Williams   mm: fix devm_memr...
139
  	mem_hotplug_done();
b5d24fda9   Dan Williams   mm, devm_memremap...
140

a4574f63e   Dan Williams   mm/memremap_pages...
141
142
  	untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range));
  	pgmap_array_delete(range);
b7b3c01b1   Dan Williams   mm/memremap_pages...
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
  }
  
  void memunmap_pages(struct dev_pagemap *pgmap)
  {
  	unsigned long pfn;
  	int i;
  
  	dev_pagemap_kill(pgmap);
  	for (i = 0; i < pgmap->nr_range; i++)
  		for_each_device_pfn(pfn, pgmap, i)
  			put_page(pfn_to_page(pfn));
  	dev_pagemap_cleanup(pgmap);
  
  	for (i = 0; i < pgmap->nr_range; i++)
  		pageunmap_range(pgmap, i);
fdc029b19   Christoph Hellwig   memremap: remove ...
158
159
  	WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages
  ");
46b1ee38b   Ralph Campbell   mm/mremap_pages: ...
160
  	devmap_managed_enable_put(pgmap);
9476df7d8   Dan Williams   mm: introduce fin...
161
  }
6869b7b20   Christoph Hellwig   memremap: provide...
162
163
164
165
166
167
  EXPORT_SYMBOL_GPL(memunmap_pages);
  
  static void devm_memremap_pages_release(void *data)
  {
  	memunmap_pages(data);
  }
9476df7d8   Dan Williams   mm: introduce fin...
168

24917f6b1   Christoph Hellwig   memremap: provide...
169
170
171
172
173
174
175
  static void dev_pagemap_percpu_release(struct percpu_ref *ref)
  {
  	struct dev_pagemap *pgmap =
  		container_of(ref, struct dev_pagemap, internal_ref);
  
  	complete(&pgmap->done);
  }
b7b3c01b1   Dan Williams   mm/memremap_pages...
176
177
  static int pagemap_range(struct dev_pagemap *pgmap, struct mhp_params *params,
  		int range_id, int nid)
41e94a851   Christoph Hellwig   add devm_memremap...
178
  {
b7b3c01b1   Dan Williams   mm/memremap_pages...
179
  	struct range *range = &pgmap->ranges[range_id];
966cf44f6   Alexander Duyck   mm: defer ZONE_DE...
180
  	struct dev_pagemap *conflict_pgmap;
6869b7b20   Christoph Hellwig   memremap: provide...
181
  	int error, is_ram;
5f29a77cd   Dan Williams   mm: fix mixed zon...
182

b7b3c01b1   Dan Williams   mm/memremap_pages...
183
184
185
186
  	if (WARN_ONCE(pgmap_altmap(pgmap) && range_id > 0,
  				"altmap not supported for multiple ranges
  "))
  		return -EINVAL;
f6a55e1a3   Christoph Hellwig   memremap: lift th...
187

a4574f63e   Dan Williams   mm/memremap_pages...
188
  	conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->start), NULL);
15d36fecd   Dave Jiang   mm: disallow mapp...
189
  	if (conflict_pgmap) {
6869b7b20   Christoph Hellwig   memremap: provide...
190
191
  		WARN(1, "Conflicting mapping in same section
  ");
15d36fecd   Dave Jiang   mm: disallow mapp...
192
  		put_dev_pagemap(conflict_pgmap);
b7b3c01b1   Dan Williams   mm/memremap_pages...
193
  		return -ENOMEM;
15d36fecd   Dave Jiang   mm: disallow mapp...
194
  	}
a4574f63e   Dan Williams   mm/memremap_pages...
195
  	conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->end), NULL);
15d36fecd   Dave Jiang   mm: disallow mapp...
196
  	if (conflict_pgmap) {
6869b7b20   Christoph Hellwig   memremap: provide...
197
198
  		WARN(1, "Conflicting mapping in same section
  ");
15d36fecd   Dave Jiang   mm: disallow mapp...
199
  		put_dev_pagemap(conflict_pgmap);
b7b3c01b1   Dan Williams   mm/memremap_pages...
200
  		return -ENOMEM;
15d36fecd   Dave Jiang   mm: disallow mapp...
201
  	}
a4574f63e   Dan Williams   mm/memremap_pages...
202
  	is_ram = region_intersects(range->start, range_len(range),
d37a14bb5   Linus Torvalds   Merge branch 'cor...
203
  		IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
41e94a851   Christoph Hellwig   add devm_memremap...
204

06489cfbd   Dan Williams   mm, devm_memremap...
205
  	if (is_ram != REGION_DISJOINT) {
a4574f63e   Dan Williams   mm/memremap_pages...
206
207
208
209
  		WARN_ONCE(1, "attempted on %s region %#llx-%#llx
  ",
  				is_ram == REGION_MIXED ? "mixed" : "ram",
  				range->start, range->end);
b7b3c01b1   Dan Williams   mm/memremap_pages...
210
  		return -ENXIO;
41e94a851   Christoph Hellwig   add devm_memremap...
211
  	}
a4574f63e   Dan Williams   mm/memremap_pages...
212
213
  	error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(range->start),
  				PHYS_PFN(range->end), pgmap, GFP_KERNEL));
9476df7d8   Dan Williams   mm: introduce fin...
214
  	if (error)
b7b3c01b1   Dan Williams   mm/memremap_pages...
215
  		return error;
9476df7d8   Dan Williams   mm: introduce fin...
216

41e94a851   Christoph Hellwig   add devm_memremap...
217
  	if (nid < 0)
7eff93b7c   Dan Williams   devm_memremap_pag...
218
  		nid = numa_mem_id();
41e94a851   Christoph Hellwig   add devm_memremap...
219

b7b3c01b1   Dan Williams   mm/memremap_pages...
220
  	error = track_pfn_remap(NULL, &params->pgprot, PHYS_PFN(range->start), 0,
a4574f63e   Dan Williams   mm/memremap_pages...
221
  			range_len(range));
9049771f7   Dan Williams   mm: fix cache mod...
222
223
  	if (error)
  		goto err_pfn_remap;
f931ab479   Dan Williams   mm: fix devm_memr...
224
  	mem_hotplug_begin();
69324b8f4   Dan Williams   mm, devm_memremap...
225
226
227
228
229
230
231
232
233
234
235
236
237
  
  	/*
  	 * For device private memory we call add_pages() as we only need to
  	 * allocate and initialize struct page for the device memory. More-
  	 * over the device memory is un-accessible thus we do not want to
  	 * create a linear mapping for the memory like arch_add_memory()
  	 * would do.
  	 *
  	 * For all other device memory types, which are accessible by
  	 * the CPU, we do want the linear mapping and thus use
  	 * arch_add_memory().
  	 */
  	if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
a4574f63e   Dan Williams   mm/memremap_pages...
238
  		error = add_pages(nid, PHYS_PFN(range->start),
b7b3c01b1   Dan Williams   mm/memremap_pages...
239
  				PHYS_PFN(range_len(range)), params);
69324b8f4   Dan Williams   mm, devm_memremap...
240
  	} else {
a4574f63e   Dan Williams   mm/memremap_pages...
241
  		error = kasan_add_zero_shadow(__va(range->start), range_len(range));
69324b8f4   Dan Williams   mm, devm_memremap...
242
243
244
245
  		if (error) {
  			mem_hotplug_done();
  			goto err_kasan;
  		}
a4574f63e   Dan Williams   mm/memremap_pages...
246
  		error = arch_add_memory(nid, range->start, range_len(range),
b7b3c01b1   Dan Williams   mm/memremap_pages...
247
  					params);
69324b8f4   Dan Williams   mm, devm_memremap...
248
249
250
251
252
253
  	}
  
  	if (!error) {
  		struct zone *zone;
  
  		zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE];
a4574f63e   Dan Williams   mm/memremap_pages...
254
  		move_pfn_range_to_zone(zone, PHYS_PFN(range->start),
d882c0067   David Hildenbrand   mm: pass migratet...
255
256
  				PHYS_PFN(range_len(range)), params->altmap,
  				MIGRATE_MOVABLE);
0207df4fa   Andrey Ryabinin   kernel/memremap, ...
257
  	}
f931ab479   Dan Williams   mm: fix devm_memr...
258
  	mem_hotplug_done();
9476df7d8   Dan Williams   mm: introduce fin...
259
260
  	if (error)
  		goto err_add_memory;
41e94a851   Christoph Hellwig   add devm_memremap...
261

966cf44f6   Alexander Duyck   mm: defer ZONE_DE...
262
263
264
265
266
  	/*
  	 * Initialization of the pages has been deferred until now in order
  	 * to allow us to do the work while not holding the hotplug lock.
  	 */
  	memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
a4574f63e   Dan Williams   mm/memremap_pages...
267
268
  				PHYS_PFN(range->start),
  				PHYS_PFN(range_len(range)), pgmap);
b7b3c01b1   Dan Williams   mm/memremap_pages...
269
270
271
  	percpu_ref_get_many(pgmap->ref, pfn_end(pgmap, range_id)
  			- pfn_first(pgmap, range_id));
  	return 0;
9476df7d8   Dan Williams   mm: introduce fin...
272

b7b3c01b1   Dan Williams   mm/memremap_pages...
273
  err_add_memory:
a4574f63e   Dan Williams   mm/memremap_pages...
274
  	kasan_remove_zero_shadow(__va(range->start), range_len(range));
b7b3c01b1   Dan Williams   mm/memremap_pages...
275
  err_kasan:
a4574f63e   Dan Williams   mm/memremap_pages...
276
  	untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range));
b7b3c01b1   Dan Williams   mm/memremap_pages...
277
  err_pfn_remap:
a4574f63e   Dan Williams   mm/memremap_pages...
278
  	pgmap_array_delete(range);
b7b3c01b1   Dan Williams   mm/memremap_pages...
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
  	return error;
  }
  
  
  /*
   * Not device managed version of dev_memremap_pages, undone by
   * memunmap_pages().  Please use dev_memremap_pages if you have a struct
   * device available.
   */
  void *memremap_pages(struct dev_pagemap *pgmap, int nid)
  {
  	struct mhp_params params = {
  		.altmap = pgmap_altmap(pgmap),
  		.pgprot = PAGE_KERNEL,
  	};
  	const int nr_range = pgmap->nr_range;
b7b3c01b1   Dan Williams   mm/memremap_pages...
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
  	int error, i;
  
  	if (WARN_ONCE(!nr_range, "nr_range must be specified
  "))
  		return ERR_PTR(-EINVAL);
  
  	switch (pgmap->type) {
  	case MEMORY_DEVICE_PRIVATE:
  		if (!IS_ENABLED(CONFIG_DEVICE_PRIVATE)) {
  			WARN(1, "Device private memory not supported
  ");
  			return ERR_PTR(-EINVAL);
  		}
  		if (!pgmap->ops || !pgmap->ops->migrate_to_ram) {
  			WARN(1, "Missing migrate_to_ram method
  ");
  			return ERR_PTR(-EINVAL);
  		}
46b1ee38b   Ralph Campbell   mm/mremap_pages: ...
313
314
315
316
317
  		if (!pgmap->ops->page_free) {
  			WARN(1, "Missing page_free method
  ");
  			return ERR_PTR(-EINVAL);
  		}
b7b3c01b1   Dan Williams   mm/memremap_pages...
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
  		if (!pgmap->owner) {
  			WARN(1, "Missing owner
  ");
  			return ERR_PTR(-EINVAL);
  		}
  		break;
  	case MEMORY_DEVICE_FS_DAX:
  		if (!IS_ENABLED(CONFIG_ZONE_DEVICE) ||
  		    IS_ENABLED(CONFIG_FS_DAX_LIMITED)) {
  			WARN(1, "File system DAX not supported
  ");
  			return ERR_PTR(-EINVAL);
  		}
  		break;
  	case MEMORY_DEVICE_GENERIC:
b7b3c01b1   Dan Williams   mm/memremap_pages...
333
334
335
  		break;
  	case MEMORY_DEVICE_PCI_P2PDMA:
  		params.pgprot = pgprot_noncached(params.pgprot);
b7b3c01b1   Dan Williams   mm/memremap_pages...
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
  		break;
  	default:
  		WARN(1, "Invalid pgmap type %d
  ", pgmap->type);
  		break;
  	}
  
  	if (!pgmap->ref) {
  		if (pgmap->ops && (pgmap->ops->kill || pgmap->ops->cleanup))
  			return ERR_PTR(-EINVAL);
  
  		init_completion(&pgmap->done);
  		error = percpu_ref_init(&pgmap->internal_ref,
  				dev_pagemap_percpu_release, 0, GFP_KERNEL);
  		if (error)
  			return ERR_PTR(error);
  		pgmap->ref = &pgmap->internal_ref;
  	} else {
  		if (!pgmap->ops || !pgmap->ops->kill || !pgmap->ops->cleanup) {
  			WARN(1, "Missing reference count teardown definition
  ");
  			return ERR_PTR(-EINVAL);
  		}
  	}
46b1ee38b   Ralph Campbell   mm/mremap_pages: ...
360
  	devmap_managed_enable_get(pgmap);
b7b3c01b1   Dan Williams   mm/memremap_pages...
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
  
  	/*
  	 * Clear the pgmap nr_range as it will be incremented for each
  	 * successfully processed range. This communicates how many
  	 * regions to unwind in the abort case.
  	 */
  	pgmap->nr_range = 0;
  	error = 0;
  	for (i = 0; i < nr_range; i++) {
  		error = pagemap_range(pgmap, &params, i, nid);
  		if (error)
  			break;
  		pgmap->nr_range++;
  	}
  
  	if (i < nr_range) {
  		memunmap_pages(pgmap);
  		pgmap->nr_range = nr_range;
  		return ERR_PTR(error);
  	}
  
  	return __va(pgmap->ranges[0].start);
41e94a851   Christoph Hellwig   add devm_memremap...
383
  }
6869b7b20   Christoph Hellwig   memremap: provide...
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
  EXPORT_SYMBOL_GPL(memremap_pages);
  
  /**
   * devm_memremap_pages - remap and provide memmap backing for the given resource
   * @dev: hosting device for @res
   * @pgmap: pointer to a struct dev_pagemap
   *
   * Notes:
   * 1/ At a minimum the res and type members of @pgmap must be initialized
   *    by the caller before passing it to this function
   *
   * 2/ The altmap field may optionally be initialized, in which case
   *    PGMAP_ALTMAP_VALID must be set in pgmap->flags.
   *
   * 3/ The ref field may optionally be provided, in which pgmap->ref must be
   *    'live' on entry and will be killed and reaped at
   *    devm_memremap_pages_release() time, or if this routine fails.
   *
a4574f63e   Dan Williams   mm/memremap_pages...
402
   * 4/ range is expected to be a host memory range that could feasibly be
6869b7b20   Christoph Hellwig   memremap: provide...
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
   *    treated as a "System RAM" range, i.e. not a device mmio range, but
   *    this is not enforced.
   */
  void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
  {
  	int error;
  	void *ret;
  
  	ret = memremap_pages(pgmap, dev_to_node(dev));
  	if (IS_ERR(ret))
  		return ret;
  
  	error = devm_add_action_or_reset(dev, devm_memremap_pages_release,
  			pgmap);
  	if (error)
  		return ERR_PTR(error);
  	return ret;
  }
808153e11   Dan Williams   mm, devm_memremap...
421
  EXPORT_SYMBOL_GPL(devm_memremap_pages);
4b94ffdc4   Dan Williams   x86, mm: introduc...
422

2e3f139e8   Dan Williams   mm/devm_memremap_...
423
424
425
426
427
  void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap)
  {
  	devm_release_action(dev, devm_memremap_pages_release, pgmap);
  }
  EXPORT_SYMBOL_GPL(devm_memunmap_pages);
4b94ffdc4   Dan Williams   x86, mm: introduc...
428
429
430
  unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
  {
  	/* number of pfns from base where pfn_to_page() is valid */
514caf23a   Christoph Hellwig   memremap: replace...
431
432
433
  	if (altmap)
  		return altmap->reserve + altmap->free;
  	return 0;
4b94ffdc4   Dan Williams   x86, mm: introduc...
434
435
436
437
438
439
  }
  
  void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns)
  {
  	altmap->alloc -= nr_pfns;
  }
0822acb86   Christoph Hellwig   mm: move get_dev_...
440
441
442
443
444
  /**
   * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn
   * @pfn: page frame number to lookup page_map
   * @pgmap: optional known pgmap that already has a reference
   *
832d7aa05   Christoph Hellwig   mm: optimize dev_...
445
446
   * If @pgmap is non-NULL and covers @pfn it will be returned as-is.  If @pgmap
   * is non-NULL but does not cover @pfn the reference to it will be released.
0822acb86   Christoph Hellwig   mm: move get_dev_...
447
448
449
450
   */
  struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
  		struct dev_pagemap *pgmap)
  {
0822acb86   Christoph Hellwig   mm: move get_dev_...
451
452
453
  	resource_size_t phys = PFN_PHYS(pfn);
  
  	/*
832d7aa05   Christoph Hellwig   mm: optimize dev_...
454
  	 * In the cached case we're already holding a live reference.
0822acb86   Christoph Hellwig   mm: move get_dev_...
455
  	 */
832d7aa05   Christoph Hellwig   mm: optimize dev_...
456
  	if (pgmap) {
a4574f63e   Dan Williams   mm/memremap_pages...
457
  		if (phys >= pgmap->range.start && phys <= pgmap->range.end)
832d7aa05   Christoph Hellwig   mm: optimize dev_...
458
459
  			return pgmap;
  		put_dev_pagemap(pgmap);
0822acb86   Christoph Hellwig   mm: move get_dev_...
460
461
462
463
  	}
  
  	/* fall back to slow path lookup */
  	rcu_read_lock();
bcfa4b721   Matthew Wilcox   memremap: Convert...
464
  	pgmap = xa_load(&pgmap_array, PHYS_PFN(phys));
0822acb86   Christoph Hellwig   mm: move get_dev_...
465
466
467
468
469
470
  	if (pgmap && !percpu_ref_tryget_live(pgmap->ref))
  		pgmap = NULL;
  	rcu_read_unlock();
  
  	return pgmap;
  }
e76384884   Dan Williams   mm: introduce MEM...
471
  EXPORT_SYMBOL_GPL(get_dev_pagemap);
7b2d55d2c   Jérôme Glisse   mm/ZONE_DEVICE: s...
472

e76384884   Dan Williams   mm: introduce MEM...
473
  #ifdef CONFIG_DEV_PAGEMAP_OPS
07d802699   John Hubbard   mm: devmap: refac...
474
  void free_devmap_managed_page(struct page *page)
7b2d55d2c   Jérôme Glisse   mm/ZONE_DEVICE: s...
475
  {
429589d64   Dan Williams   mm: Cleanup __put...
476
477
478
479
480
  	/* notify page idle for dax */
  	if (!is_device_private_page(page)) {
  		wake_up_var(&page->_refcount);
  		return;
  	}
7ab0ad0e7   Ralph Campbell   mm/hmm: fix ZONE_...
481

429589d64   Dan Williams   mm: Cleanup __put...
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
  	__ClearPageWaiters(page);
  
  	mem_cgroup_uncharge(page);
  
  	/*
  	 * When a device_private page is freed, the page->mapping field
  	 * may still contain a (stale) mapping value. For example, the
  	 * lower bits of page->mapping may still identify the page as an
  	 * anonymous page. Ultimately, this entire field is just stale
  	 * and wrong, and it will cause errors if not cleared.  One
  	 * example is:
  	 *
  	 *  migrate_vma_pages()
  	 *    migrate_vma_insert_page()
  	 *      page_add_new_anon_rmap()
  	 *        __page_set_anon_rmap()
  	 *          ...checks page->mapping, via PageAnon(page) call,
  	 *            and incorrectly concludes that the page is an
  	 *            anonymous page. Therefore, it incorrectly,
  	 *            silently fails to set up the new anon rmap.
  	 *
  	 * For other types of ZONE_DEVICE pages, migration is either
  	 * handled differently or not done at all, so there is no need
  	 * to clear page->mapping.
  	 */
  	page->mapping = NULL;
  	page->pgmap->ops->page_free(page);
7b2d55d2c   Jérôme Glisse   mm/ZONE_DEVICE: s...
509
  }
e76384884   Dan Williams   mm: introduce MEM...
510
  #endif /* CONFIG_DEV_PAGEMAP_OPS */