Blame view

mm/memremap.c 12.2 KB
5981690dd   Dan Williams   memremap: split d...
1
2
  /* SPDX-License-Identifier: GPL-2.0 */
  /* Copyright(c) 2015 Intel Corporation. All rights reserved. */
7d3dcf26a   Christoph Hellwig   devres: add devm_...
3
  #include <linux/device.h>
92281dee8   Dan Williams   arch: introduce m...
4
  #include <linux/io.h>
0207df4fa   Andrey Ryabinin   kernel/memremap, ...
5
  #include <linux/kasan.h>
41e94a851   Christoph Hellwig   add devm_memremap...
6
  #include <linux/memory_hotplug.h>
bcfa4b721   Matthew Wilcox   memremap: Convert...
7
8
  #include <linux/mm.h>
  #include <linux/pfn_t.h>
5042db43c   Jérôme Glisse   mm/ZONE_DEVICE: n...
9
10
  #include <linux/swap.h>
  #include <linux/swapops.h>
bcfa4b721   Matthew Wilcox   memremap: Convert...
11
  #include <linux/types.h>
e76384884   Dan Williams   mm: introduce MEM...
12
  #include <linux/wait_bit.h>
bcfa4b721   Matthew Wilcox   memremap: Convert...
13
  #include <linux/xarray.h>
92281dee8   Dan Williams   arch: introduce m...
14

bcfa4b721   Matthew Wilcox   memremap: Convert...
15
  static DEFINE_XARRAY(pgmap_array);
9476df7d8   Dan Williams   mm: introduce fin...
16

f6a55e1a3   Christoph Hellwig   memremap: lift th...
17
18
19
20
  #ifdef CONFIG_DEV_PAGEMAP_OPS
  DEFINE_STATIC_KEY_FALSE(devmap_managed_key);
  EXPORT_SYMBOL(devmap_managed_key);
  static atomic_t devmap_managed_enable;
6f42193fd   Christoph Hellwig   memremap: don't u...
21
  static void devmap_managed_enable_put(void)
f6a55e1a3   Christoph Hellwig   memremap: lift th...
22
23
24
25
  {
  	if (atomic_dec_and_test(&devmap_managed_enable))
  		static_branch_disable(&devmap_managed_key);
  }
6f42193fd   Christoph Hellwig   memremap: don't u...
26
  static int devmap_managed_enable_get(struct dev_pagemap *pgmap)
f6a55e1a3   Christoph Hellwig   memremap: lift th...
27
  {
24917f6b1   Christoph Hellwig   memremap: provide...
28
  	if (!pgmap->ops || !pgmap->ops->page_free) {
f6a55e1a3   Christoph Hellwig   memremap: lift th...
29
30
31
32
33
34
35
  		WARN(1, "Missing page_free method
  ");
  		return -EINVAL;
  	}
  
  	if (atomic_inc_return(&devmap_managed_enable) == 1)
  		static_branch_enable(&devmap_managed_key);
6f42193fd   Christoph Hellwig   memremap: don't u...
36
  	return 0;
f6a55e1a3   Christoph Hellwig   memremap: lift th...
37
38
  }
  #else
6f42193fd   Christoph Hellwig   memremap: don't u...
39
  static int devmap_managed_enable_get(struct dev_pagemap *pgmap)
f6a55e1a3   Christoph Hellwig   memremap: lift th...
40
41
42
  {
  	return -EINVAL;
  }
6f42193fd   Christoph Hellwig   memremap: don't u...
43
44
45
  static void devmap_managed_enable_put(void)
  {
  }
f6a55e1a3   Christoph Hellwig   memremap: lift th...
46
  #endif /* CONFIG_DEV_PAGEMAP_OPS */
bcfa4b721   Matthew Wilcox   memremap: Convert...
47
  static void pgmap_array_delete(struct resource *res)
ab1b597ee   Dan Williams   mm, devm_memremap...
48
  {
bcfa4b721   Matthew Wilcox   memremap: Convert...
49
50
  	xa_store_range(&pgmap_array, PHYS_PFN(res->start), PHYS_PFN(res->end),
  			NULL, GFP_KERNEL);
ab1b597ee   Dan Williams   mm, devm_memremap...
51
  	synchronize_rcu();
9476df7d8   Dan Williams   mm: introduce fin...
52
  }
e7744aa25   Logan Gunthorpe   memremap: drop pr...
53
  static unsigned long pfn_first(struct dev_pagemap *pgmap)
5c2c2587b   Dan Williams   mm, dax, pmem: in...
54
  {
7cc7867fb   Dan Williams   mm/devm_memremap_...
55
  	return PHYS_PFN(pgmap->res.start) +
514caf23a   Christoph Hellwig   memremap: replace...
56
  		vmem_altmap_offset(pgmap_altmap(pgmap));
5c2c2587b   Dan Williams   mm, dax, pmem: in...
57
  }
e7744aa25   Logan Gunthorpe   memremap: drop pr...
58
  static unsigned long pfn_end(struct dev_pagemap *pgmap)
5c2c2587b   Dan Williams   mm, dax, pmem: in...
59
  {
e7744aa25   Logan Gunthorpe   memremap: drop pr...
60
  	const struct resource *res = &pgmap->res;
5c2c2587b   Dan Williams   mm, dax, pmem: in...
61
62
63
  
  	return (res->start + resource_size(res)) >> PAGE_SHIFT;
  }
949b93250   Dan Williams   memremap: fix sof...
64
65
66
67
68
69
  static unsigned long pfn_next(unsigned long pfn)
  {
  	if (pfn % 1024 == 0)
  		cond_resched();
  	return pfn + 1;
  }
5c2c2587b   Dan Williams   mm, dax, pmem: in...
70
  #define for_each_device_pfn(pfn, map) \
949b93250   Dan Williams   memremap: fix sof...
71
  	for (pfn = pfn_first(map); pfn < pfn_end(map); pfn = pfn_next(pfn))
5c2c2587b   Dan Williams   mm, dax, pmem: in...
72

24917f6b1   Christoph Hellwig   memremap: provide...
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
  static void dev_pagemap_kill(struct dev_pagemap *pgmap)
  {
  	if (pgmap->ops && pgmap->ops->kill)
  		pgmap->ops->kill(pgmap);
  	else
  		percpu_ref_kill(pgmap->ref);
  }
  
  static void dev_pagemap_cleanup(struct dev_pagemap *pgmap)
  {
  	if (pgmap->ops && pgmap->ops->cleanup) {
  		pgmap->ops->cleanup(pgmap);
  	} else {
  		wait_for_completion(&pgmap->done);
  		percpu_ref_exit(pgmap->ref);
  	}
06282373f   Dan Williams   mm/memremap: Fix ...
89
90
91
92
93
94
  	/*
  	 * Undo the pgmap ref assignment for the internal case as the
  	 * caller may re-enable the same pgmap.
  	 */
  	if (pgmap->ref == &pgmap->internal_ref)
  		pgmap->ref = NULL;
24917f6b1   Christoph Hellwig   memremap: provide...
95
  }
6869b7b20   Christoph Hellwig   memremap: provide...
96
  void memunmap_pages(struct dev_pagemap *pgmap)
41e94a851   Christoph Hellwig   add devm_memremap...
97
  {
e7744aa25   Logan Gunthorpe   memremap: drop pr...
98
  	struct resource *res = &pgmap->res;
77e080e76   Aneesh Kumar K.V   mm/memunmap: don'...
99
  	struct page *first_page;
713897038   Dan Williams   mm, zone_device: ...
100
  	unsigned long pfn;
2c2a5af6f   Oscar Salvador   mm, memory_hotplu...
101
  	int nid;
713897038   Dan Williams   mm, zone_device: ...
102

24917f6b1   Christoph Hellwig   memremap: provide...
103
  	dev_pagemap_kill(pgmap);
e7744aa25   Logan Gunthorpe   memremap: drop pr...
104
  	for_each_device_pfn(pfn, pgmap)
713897038   Dan Williams   mm, zone_device: ...
105
  		put_page(pfn_to_page(pfn));
24917f6b1   Christoph Hellwig   memremap: provide...
106
  	dev_pagemap_cleanup(pgmap);
9476df7d8   Dan Williams   mm: introduce fin...
107

77e080e76   Aneesh Kumar K.V   mm/memunmap: don'...
108
109
  	/* make sure to access a memmap that was actually initialized */
  	first_page = pfn_to_page(pfn_first(pgmap));
41e94a851   Christoph Hellwig   add devm_memremap...
110
  	/* pages are dead and unused, undo the arch mapping */
77e080e76   Aneesh Kumar K.V   mm/memunmap: don'...
111
  	nid = page_to_nid(first_page);
2c2a5af6f   Oscar Salvador   mm, memory_hotplu...
112

f931ab479   Dan Williams   mm: fix devm_memr...
113
  	mem_hotplug_begin();
69324b8f4   Dan Williams   mm, devm_memremap...
114
  	if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
e84c5b761   David Hildenbrand   mm/memory_hotplug...
115
  		__remove_pages(PHYS_PFN(res->start),
77e080e76   Aneesh Kumar K.V   mm/memunmap: don'...
116
  			       PHYS_PFN(resource_size(res)), NULL);
69324b8f4   Dan Williams   mm, devm_memremap...
117
  	} else {
7cc7867fb   Dan Williams   mm/devm_memremap_...
118
  		arch_remove_memory(nid, res->start, resource_size(res),
514caf23a   Christoph Hellwig   memremap: replace...
119
  				pgmap_altmap(pgmap));
7cc7867fb   Dan Williams   mm/devm_memremap_...
120
  		kasan_remove_zero_shadow(__va(res->start), resource_size(res));
69324b8f4   Dan Williams   mm, devm_memremap...
121
  	}
f931ab479   Dan Williams   mm: fix devm_memr...
122
  	mem_hotplug_done();
b5d24fda9   Dan Williams   mm, devm_memremap...
123

7cc7867fb   Dan Williams   mm/devm_memremap_...
124
  	untrack_pfn(NULL, PHYS_PFN(res->start), resource_size(res));
bcfa4b721   Matthew Wilcox   memremap: Convert...
125
  	pgmap_array_delete(res);
fdc029b19   Christoph Hellwig   memremap: remove ...
126
127
  	WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages
  ");
6f42193fd   Christoph Hellwig   memremap: don't u...
128
  	devmap_managed_enable_put();
9476df7d8   Dan Williams   mm: introduce fin...
129
  }
6869b7b20   Christoph Hellwig   memremap: provide...
130
131
132
133
134
135
  EXPORT_SYMBOL_GPL(memunmap_pages);
  
  static void devm_memremap_pages_release(void *data)
  {
  	memunmap_pages(data);
  }
9476df7d8   Dan Williams   mm: introduce fin...
136

24917f6b1   Christoph Hellwig   memremap: provide...
137
138
139
140
141
142
143
  static void dev_pagemap_percpu_release(struct percpu_ref *ref)
  {
  	struct dev_pagemap *pgmap =
  		container_of(ref, struct dev_pagemap, internal_ref);
  
  	complete(&pgmap->done);
  }
6869b7b20   Christoph Hellwig   memremap: provide...
144
145
146
147
  /*
   * Not device managed version of dev_memremap_pages, undone by
   * memunmap_pages().  Please use dev_memremap_pages if you have a struct
   * device available.
4b94ffdc4   Dan Williams   x86, mm: introduc...
148
   */
6869b7b20   Christoph Hellwig   memremap: provide...
149
  void *memremap_pages(struct dev_pagemap *pgmap, int nid)
41e94a851   Christoph Hellwig   add devm_memremap...
150
  {
949b93250   Dan Williams   memremap: fix sof...
151
  	struct resource *res = &pgmap->res;
966cf44f6   Alexander Duyck   mm: defer ZONE_DE...
152
  	struct dev_pagemap *conflict_pgmap;
940519f0c   Michal Hocko   mm, memory_hotplu...
153
154
155
  	struct mhp_restrictions restrictions = {
  		/*
  		 * We do not want any optional features only our own memmap
7cc7867fb   Dan Williams   mm/devm_memremap_...
156
  		 */
514caf23a   Christoph Hellwig   memremap: replace...
157
  		.altmap = pgmap_altmap(pgmap),
940519f0c   Michal Hocko   mm, memory_hotplu...
158
  	};
9049771f7   Dan Williams   mm: fix cache mod...
159
  	pgprot_t pgprot = PAGE_KERNEL;
6869b7b20   Christoph Hellwig   memremap: provide...
160
  	int error, is_ram;
f6a55e1a3   Christoph Hellwig   memremap: lift th...
161
  	bool need_devmap_managed = true;
5f29a77cd   Dan Williams   mm: fix mixed zon...
162

3ed2dcdf5   Christoph Hellwig   memremap: validat...
163
164
165
166
167
168
169
  	switch (pgmap->type) {
  	case MEMORY_DEVICE_PRIVATE:
  		if (!IS_ENABLED(CONFIG_DEVICE_PRIVATE)) {
  			WARN(1, "Device private memory not supported
  ");
  			return ERR_PTR(-EINVAL);
  		}
897e6365c   Christoph Hellwig   memremap: add a m...
170
171
172
173
174
  		if (!pgmap->ops || !pgmap->ops->migrate_to_ram) {
  			WARN(1, "Missing migrate_to_ram method
  ");
  			return ERR_PTR(-EINVAL);
  		}
3ed2dcdf5   Christoph Hellwig   memremap: validat...
175
176
177
178
179
180
181
182
183
184
185
  		break;
  	case MEMORY_DEVICE_FS_DAX:
  		if (!IS_ENABLED(CONFIG_ZONE_DEVICE) ||
  		    IS_ENABLED(CONFIG_FS_DAX_LIMITED)) {
  			WARN(1, "File system DAX not supported
  ");
  			return ERR_PTR(-EINVAL);
  		}
  		break;
  	case MEMORY_DEVICE_DEVDAX:
  	case MEMORY_DEVICE_PCI_P2PDMA:
f6a55e1a3   Christoph Hellwig   memremap: lift th...
186
  		need_devmap_managed = false;
3ed2dcdf5   Christoph Hellwig   memremap: validat...
187
188
189
190
191
192
  		break;
  	default:
  		WARN(1, "Invalid pgmap type %d
  ", pgmap->type);
  		break;
  	}
24917f6b1   Christoph Hellwig   memremap: provide...
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
  	if (!pgmap->ref) {
  		if (pgmap->ops && (pgmap->ops->kill || pgmap->ops->cleanup))
  			return ERR_PTR(-EINVAL);
  
  		init_completion(&pgmap->done);
  		error = percpu_ref_init(&pgmap->internal_ref,
  				dev_pagemap_percpu_release, 0, GFP_KERNEL);
  		if (error)
  			return ERR_PTR(error);
  		pgmap->ref = &pgmap->internal_ref;
  	} else {
  		if (!pgmap->ops || !pgmap->ops->kill || !pgmap->ops->cleanup) {
  			WARN(1, "Missing reference count teardown definition
  ");
  			return ERR_PTR(-EINVAL);
  		}
50f44ee72   Dan Williams   mm/devm_memremap_...
209
  	}
a95c90f1e   Dan Williams   mm, devm_memremap...
210

f6a55e1a3   Christoph Hellwig   memremap: lift th...
211
  	if (need_devmap_managed) {
6f42193fd   Christoph Hellwig   memremap: don't u...
212
  		error = devmap_managed_enable_get(pgmap);
f6a55e1a3   Christoph Hellwig   memremap: lift th...
213
214
215
  		if (error)
  			return ERR_PTR(error);
  	}
7cc7867fb   Dan Williams   mm/devm_memremap_...
216
  	conflict_pgmap = get_dev_pagemap(PHYS_PFN(res->start), NULL);
15d36fecd   Dave Jiang   mm: disallow mapp...
217
  	if (conflict_pgmap) {
6869b7b20   Christoph Hellwig   memremap: provide...
218
219
  		WARN(1, "Conflicting mapping in same section
  ");
15d36fecd   Dave Jiang   mm: disallow mapp...
220
  		put_dev_pagemap(conflict_pgmap);
50f44ee72   Dan Williams   mm/devm_memremap_...
221
222
  		error = -ENOMEM;
  		goto err_array;
15d36fecd   Dave Jiang   mm: disallow mapp...
223
  	}
7cc7867fb   Dan Williams   mm/devm_memremap_...
224
  	conflict_pgmap = get_dev_pagemap(PHYS_PFN(res->end), NULL);
15d36fecd   Dave Jiang   mm: disallow mapp...
225
  	if (conflict_pgmap) {
6869b7b20   Christoph Hellwig   memremap: provide...
226
227
  		WARN(1, "Conflicting mapping in same section
  ");
15d36fecd   Dave Jiang   mm: disallow mapp...
228
  		put_dev_pagemap(conflict_pgmap);
50f44ee72   Dan Williams   mm/devm_memremap_...
229
230
  		error = -ENOMEM;
  		goto err_array;
15d36fecd   Dave Jiang   mm: disallow mapp...
231
  	}
7cc7867fb   Dan Williams   mm/devm_memremap_...
232
  	is_ram = region_intersects(res->start, resource_size(res),
d37a14bb5   Linus Torvalds   Merge branch 'cor...
233
  		IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
41e94a851   Christoph Hellwig   add devm_memremap...
234

06489cfbd   Dan Williams   mm, devm_memremap...
235
236
237
238
  	if (is_ram != REGION_DISJOINT) {
  		WARN_ONCE(1, "%s attempted on %s region %pr
  ", __func__,
  				is_ram == REGION_MIXED ? "mixed" : "ram", res);
a95c90f1e   Dan Williams   mm, devm_memremap...
239
240
  		error = -ENXIO;
  		goto err_array;
41e94a851   Christoph Hellwig   add devm_memremap...
241
  	}
bcfa4b721   Matthew Wilcox   memremap: Convert...
242
243
  	error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(res->start),
  				PHYS_PFN(res->end), pgmap, GFP_KERNEL));
9476df7d8   Dan Williams   mm: introduce fin...
244
  	if (error)
bcfa4b721   Matthew Wilcox   memremap: Convert...
245
  		goto err_array;
9476df7d8   Dan Williams   mm: introduce fin...
246

41e94a851   Christoph Hellwig   add devm_memremap...
247
  	if (nid < 0)
7eff93b7c   Dan Williams   devm_memremap_pag...
248
  		nid = numa_mem_id();
41e94a851   Christoph Hellwig   add devm_memremap...
249

7cc7867fb   Dan Williams   mm/devm_memremap_...
250
251
  	error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(res->start), 0,
  			resource_size(res));
9049771f7   Dan Williams   mm: fix cache mod...
252
253
  	if (error)
  		goto err_pfn_remap;
f931ab479   Dan Williams   mm: fix devm_memr...
254
  	mem_hotplug_begin();
69324b8f4   Dan Williams   mm, devm_memremap...
255
256
257
258
259
260
261
262
263
264
265
266
267
  
  	/*
  	 * For device private memory we call add_pages() as we only need to
  	 * allocate and initialize struct page for the device memory. More-
  	 * over the device memory is un-accessible thus we do not want to
  	 * create a linear mapping for the memory like arch_add_memory()
  	 * would do.
  	 *
  	 * For all other device memory types, which are accessible by
  	 * the CPU, we do want the linear mapping and thus use
  	 * arch_add_memory().
  	 */
  	if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
7cc7867fb   Dan Williams   mm/devm_memremap_...
268
269
  		error = add_pages(nid, PHYS_PFN(res->start),
  				PHYS_PFN(resource_size(res)), &restrictions);
69324b8f4   Dan Williams   mm, devm_memremap...
270
  	} else {
7cc7867fb   Dan Williams   mm/devm_memremap_...
271
  		error = kasan_add_zero_shadow(__va(res->start), resource_size(res));
69324b8f4   Dan Williams   mm, devm_memremap...
272
273
274
275
  		if (error) {
  			mem_hotplug_done();
  			goto err_kasan;
  		}
7cc7867fb   Dan Williams   mm/devm_memremap_...
276
  		error = arch_add_memory(nid, res->start, resource_size(res),
940519f0c   Michal Hocko   mm, memory_hotplu...
277
  					&restrictions);
69324b8f4   Dan Williams   mm, devm_memremap...
278
279
280
281
282
283
  	}
  
  	if (!error) {
  		struct zone *zone;
  
  		zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE];
7cc7867fb   Dan Williams   mm/devm_memremap_...
284
285
  		move_pfn_range_to_zone(zone, PHYS_PFN(res->start),
  				PHYS_PFN(resource_size(res)), restrictions.altmap);
0207df4fa   Andrey Ryabinin   kernel/memremap, ...
286
  	}
f931ab479   Dan Williams   mm: fix devm_memr...
287
  	mem_hotplug_done();
9476df7d8   Dan Williams   mm: introduce fin...
288
289
  	if (error)
  		goto err_add_memory;
41e94a851   Christoph Hellwig   add devm_memremap...
290

966cf44f6   Alexander Duyck   mm: defer ZONE_DE...
291
292
293
294
295
  	/*
  	 * Initialization of the pages has been deferred until now in order
  	 * to allow us to do the work while not holding the hotplug lock.
  	 */
  	memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
7cc7867fb   Dan Williams   mm/devm_memremap_...
296
297
  				PHYS_PFN(res->start),
  				PHYS_PFN(resource_size(res)), pgmap);
966cf44f6   Alexander Duyck   mm: defer ZONE_DE...
298
  	percpu_ref_get_many(pgmap->ref, pfn_end(pgmap) - pfn_first(pgmap));
41e94a851   Christoph Hellwig   add devm_memremap...
299
  	return __va(res->start);
9476df7d8   Dan Williams   mm: introduce fin...
300
301
  
   err_add_memory:
7cc7867fb   Dan Williams   mm/devm_memremap_...
302
  	kasan_remove_zero_shadow(__va(res->start), resource_size(res));
0207df4fa   Andrey Ryabinin   kernel/memremap, ...
303
   err_kasan:
7cc7867fb   Dan Williams   mm/devm_memremap_...
304
  	untrack_pfn(NULL, PHYS_PFN(res->start), resource_size(res));
9049771f7   Dan Williams   mm: fix cache mod...
305
   err_pfn_remap:
bcfa4b721   Matthew Wilcox   memremap: Convert...
306
307
  	pgmap_array_delete(res);
   err_array:
24917f6b1   Christoph Hellwig   memremap: provide...
308
309
  	dev_pagemap_kill(pgmap);
  	dev_pagemap_cleanup(pgmap);
6f42193fd   Christoph Hellwig   memremap: don't u...
310
  	devmap_managed_enable_put();
9476df7d8   Dan Williams   mm: introduce fin...
311
  	return ERR_PTR(error);
41e94a851   Christoph Hellwig   add devm_memremap...
312
  }
6869b7b20   Christoph Hellwig   memremap: provide...
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
  EXPORT_SYMBOL_GPL(memremap_pages);
  
  /**
   * devm_memremap_pages - remap and provide memmap backing for the given resource
   * @dev: hosting device for @res
   * @pgmap: pointer to a struct dev_pagemap
   *
   * Notes:
   * 1/ At a minimum the res and type members of @pgmap must be initialized
   *    by the caller before passing it to this function
   *
   * 2/ The altmap field may optionally be initialized, in which case
   *    PGMAP_ALTMAP_VALID must be set in pgmap->flags.
   *
   * 3/ The ref field may optionally be provided, in which pgmap->ref must be
   *    'live' on entry and will be killed and reaped at
   *    devm_memremap_pages_release() time, or if this routine fails.
   *
   * 4/ res is expected to be a host memory range that could feasibly be
   *    treated as a "System RAM" range, i.e. not a device mmio range, but
   *    this is not enforced.
   */
  void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
  {
  	int error;
  	void *ret;
  
  	ret = memremap_pages(pgmap, dev_to_node(dev));
  	if (IS_ERR(ret))
  		return ret;
  
  	error = devm_add_action_or_reset(dev, devm_memremap_pages_release,
  			pgmap);
  	if (error)
  		return ERR_PTR(error);
  	return ret;
  }
808153e11   Dan Williams   mm, devm_memremap...
350
  EXPORT_SYMBOL_GPL(devm_memremap_pages);
4b94ffdc4   Dan Williams   x86, mm: introduc...
351

2e3f139e8   Dan Williams   mm/devm_memremap_...
352
353
354
355
356
  void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap)
  {
  	devm_release_action(dev, devm_memremap_pages_release, pgmap);
  }
  EXPORT_SYMBOL_GPL(devm_memunmap_pages);
4b94ffdc4   Dan Williams   x86, mm: introduc...
357
358
359
  unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
  {
  	/* number of pfns from base where pfn_to_page() is valid */
514caf23a   Christoph Hellwig   memremap: replace...
360
361
362
  	if (altmap)
  		return altmap->reserve + altmap->free;
  	return 0;
4b94ffdc4   Dan Williams   x86, mm: introduc...
363
364
365
366
367
368
  }
  
  void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns)
  {
  	altmap->alloc -= nr_pfns;
  }
0822acb86   Christoph Hellwig   mm: move get_dev_...
369
370
371
372
373
  /**
   * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn
   * @pfn: page frame number to lookup page_map
   * @pgmap: optional known pgmap that already has a reference
   *
832d7aa05   Christoph Hellwig   mm: optimize dev_...
374
375
   * If @pgmap is non-NULL and covers @pfn it will be returned as-is.  If @pgmap
   * is non-NULL but does not cover @pfn the reference to it will be released.
0822acb86   Christoph Hellwig   mm: move get_dev_...
376
377
378
379
   */
  struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
  		struct dev_pagemap *pgmap)
  {
0822acb86   Christoph Hellwig   mm: move get_dev_...
380
381
382
  	resource_size_t phys = PFN_PHYS(pfn);
  
  	/*
832d7aa05   Christoph Hellwig   mm: optimize dev_...
383
  	 * In the cached case we're already holding a live reference.
0822acb86   Christoph Hellwig   mm: move get_dev_...
384
  	 */
832d7aa05   Christoph Hellwig   mm: optimize dev_...
385
  	if (pgmap) {
e7744aa25   Logan Gunthorpe   memremap: drop pr...
386
  		if (phys >= pgmap->res.start && phys <= pgmap->res.end)
832d7aa05   Christoph Hellwig   mm: optimize dev_...
387
388
  			return pgmap;
  		put_dev_pagemap(pgmap);
0822acb86   Christoph Hellwig   mm: move get_dev_...
389
390
391
392
  	}
  
  	/* fall back to slow path lookup */
  	rcu_read_lock();
bcfa4b721   Matthew Wilcox   memremap: Convert...
393
  	pgmap = xa_load(&pgmap_array, PHYS_PFN(phys));
0822acb86   Christoph Hellwig   mm: move get_dev_...
394
395
396
397
398
399
  	if (pgmap && !percpu_ref_tryget_live(pgmap->ref))
  		pgmap = NULL;
  	rcu_read_unlock();
  
  	return pgmap;
  }
e76384884   Dan Williams   mm: introduce MEM...
400
  EXPORT_SYMBOL_GPL(get_dev_pagemap);
7b2d55d2c   Jérôme Glisse   mm/ZONE_DEVICE: s...
401

e76384884   Dan Williams   mm: introduce MEM...
402
  #ifdef CONFIG_DEV_PAGEMAP_OPS
e76384884   Dan Williams   mm: introduce MEM...
403
  void __put_devmap_managed_page(struct page *page)
7b2d55d2c   Jérôme Glisse   mm/ZONE_DEVICE: s...
404
405
406
407
408
409
410
411
412
413
414
  {
  	int count = page_ref_dec_return(page);
  
  	/*
  	 * If refcount is 1 then page is freed and refcount is stable as nobody
  	 * holds a reference on the page.
  	 */
  	if (count == 1) {
  		/* Clear Active bit in case of parallel mark_page_accessed */
  		__ClearPageActive(page);
  		__ClearPageWaiters(page);
c733a8287   Jérôme Glisse   mm/memcontrol: su...
415
  		mem_cgroup_uncharge(page);
7b2d55d2c   Jérôme Glisse   mm/ZONE_DEVICE: s...
416

7ab0ad0e7   Ralph Campbell   mm/hmm: fix ZONE_...
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
  		/*
  		 * When a device_private page is freed, the page->mapping field
  		 * may still contain a (stale) mapping value. For example, the
  		 * lower bits of page->mapping may still identify the page as
  		 * an anonymous page. Ultimately, this entire field is just
  		 * stale and wrong, and it will cause errors if not cleared.
  		 * One example is:
  		 *
  		 *  migrate_vma_pages()
  		 *    migrate_vma_insert_page()
  		 *      page_add_new_anon_rmap()
  		 *        __page_set_anon_rmap()
  		 *          ...checks page->mapping, via PageAnon(page) call,
  		 *            and incorrectly concludes that the page is an
  		 *            anonymous page. Therefore, it incorrectly,
  		 *            silently fails to set up the new anon rmap.
  		 *
  		 * For other types of ZONE_DEVICE pages, migration is either
  		 * handled differently or not done at all, so there is no need
  		 * to clear page->mapping.
  		 */
  		if (is_device_private_page(page))
  			page->mapping = NULL;
80a72d0af   Christoph Hellwig   memremap: remove ...
440
  		page->pgmap->ops->page_free(page);
7b2d55d2c   Jérôme Glisse   mm/ZONE_DEVICE: s...
441
442
443
  	} else if (!count)
  		__put_page(page);
  }
31c5bda3a   Dan Williams   mm: fix exports t...
444
  EXPORT_SYMBOL(__put_devmap_managed_page);
e76384884   Dan Williams   mm: introduce MEM...
445
  #endif /* CONFIG_DEV_PAGEMAP_OPS */