Blame view

drivers/nvdimm/namespace_devs.c 67.1 KB
5b497af42   Thomas Gleixner   treewide: Replace...
1
  // SPDX-License-Identifier: GPL-2.0-only
3d88002e4   Dan Williams   libnvdimm: suppor...
2
3
  /*
   * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3d88002e4   Dan Williams   libnvdimm: suppor...
4
5
6
   */
  #include <linux/module.h>
  #include <linux/device.h>
6ff3e912d   Dan Williams   libnvdimm, namesp...
7
  #include <linux/sort.h>
3d88002e4   Dan Williams   libnvdimm: suppor...
8
  #include <linux/slab.h>
ae8219f18   Dan Williams   libnvdimm, label:...
9
  #include <linux/list.h>
3d88002e4   Dan Williams   libnvdimm: suppor...
10
  #include <linux/nd.h>
bf9bccc14   Dan Williams   libnvdimm: pmem l...
11
  #include "nd-core.h"
ca6a4657e   Dan Williams   x86, libnvdimm, p...
12
  #include "pmem.h"
6acd7d5ef   Dan Williams   libnvdimm/namespa...
13
  #include "pfn.h"
3d88002e4   Dan Williams   libnvdimm: suppor...
14
15
16
17
18
19
20
21
  #include "nd.h"
  
  static void namespace_io_release(struct device *dev)
  {
  	struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
  
  	kfree(nsio);
  }
bf9bccc14   Dan Williams   libnvdimm: pmem l...
22
23
24
  static void namespace_pmem_release(struct device *dev)
  {
  	struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
0e3b0d123   Dan Williams   libnvdimm, namesp...
25
  	struct nd_region *nd_region = to_nd_region(dev->parent);
bf9bccc14   Dan Williams   libnvdimm: pmem l...
26

0e3b0d123   Dan Williams   libnvdimm, namesp...
27
28
  	if (nspm->id >= 0)
  		ida_simple_remove(&nd_region->ns_ida, nspm->id);
bf9bccc14   Dan Williams   libnvdimm: pmem l...
29
30
31
32
33
34
35
  	kfree(nspm->alt_name);
  	kfree(nspm->uuid);
  	kfree(nspm);
  }
  
  static void namespace_blk_release(struct device *dev)
  {
1b40e09a1   Dan Williams   libnvdimm: blk la...
36
37
38
39
40
41
42
43
44
  	struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
  	struct nd_region *nd_region = to_nd_region(dev->parent);
  
  	if (nsblk->id >= 0)
  		ida_simple_remove(&nd_region->ns_ida, nsblk->id);
  	kfree(nsblk->alt_name);
  	kfree(nsblk->uuid);
  	kfree(nsblk->res);
  	kfree(nsblk);
bf9bccc14   Dan Williams   libnvdimm: pmem l...
45
  }
78c81cc89   Dan Williams   libnvdimm: Move a...
46
47
48
  static bool is_namespace_pmem(const struct device *dev);
  static bool is_namespace_blk(const struct device *dev);
  static bool is_namespace_io(const struct device *dev);
bf9bccc14   Dan Williams   libnvdimm: pmem l...
49

e07ecd76d   Dan Williams   libnvdimm: fix na...
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
  static int is_uuid_busy(struct device *dev, void *data)
  {
  	u8 *uuid1 = data, *uuid2 = NULL;
  
  	if (is_namespace_pmem(dev)) {
  		struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
  
  		uuid2 = nspm->uuid;
  	} else if (is_namespace_blk(dev)) {
  		struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
  
  		uuid2 = nsblk->uuid;
  	} else if (is_nd_btt(dev)) {
  		struct nd_btt *nd_btt = to_nd_btt(dev);
  
  		uuid2 = nd_btt->uuid;
  	} else if (is_nd_pfn(dev)) {
  		struct nd_pfn *nd_pfn = to_nd_pfn(dev);
  
  		uuid2 = nd_pfn->uuid;
  	}
  
  	if (uuid2 && memcmp(uuid1, uuid2, NSLABEL_UUID_LEN) == 0)
  		return -EBUSY;
  
  	return 0;
  }
  
  static int is_namespace_uuid_busy(struct device *dev, void *data)
  {
c9e582aa6   Dan Williams   libnvdimm, nfit: ...
80
  	if (is_nd_region(dev))
e07ecd76d   Dan Williams   libnvdimm: fix na...
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
  		return device_for_each_child(dev, data, is_uuid_busy);
  	return 0;
  }
  
  /**
   * nd_is_uuid_unique - verify that no other namespace has @uuid
   * @dev: any device on a nvdimm_bus
   * @uuid: uuid to check
   */
  bool nd_is_uuid_unique(struct device *dev, u8 *uuid)
  {
  	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
  
  	if (!nvdimm_bus)
  		return false;
  	WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm_bus->dev));
  	if (device_for_each_child(&nvdimm_bus->dev, uuid,
  				is_namespace_uuid_busy) != 0)
  		return false;
  	return true;
  }
004f1afbe   Dan Williams   libnvdimm, pmem: ...
102
103
104
  bool pmem_should_map_pages(struct device *dev)
  {
  	struct nd_region *nd_region = to_nd_region(dev->parent);
fa7d2e639   Dan Williams   libnvdimm/pmem: H...
105
  	struct nd_namespace_common *ndns = to_ndns(dev);
cfe30b872   Dan Williams   libnvdimm, pmem: ...
106
  	struct nd_namespace_io *nsio;
004f1afbe   Dan Williams   libnvdimm, pmem: ...
107
108
109
110
111
112
113
114
115
  
  	if (!IS_ENABLED(CONFIG_ZONE_DEVICE))
  		return false;
  
  	if (!test_bit(ND_REGION_PAGEMAP, &nd_region->flags))
  		return false;
  
  	if (is_nd_pfn(dev) || is_nd_btt(dev))
  		return false;
fa7d2e639   Dan Williams   libnvdimm/pmem: H...
116
117
  	if (ndns->force_raw)
  		return false;
cfe30b872   Dan Williams   libnvdimm, pmem: ...
118
119
120
121
122
  	nsio = to_nd_namespace_io(dev);
  	if (region_intersects(nsio->res.start, resource_size(&nsio->res),
  				IORESOURCE_SYSTEM_RAM,
  				IORES_DESC_NONE) == REGION_MIXED)
  		return false;
004f1afbe   Dan Williams   libnvdimm, pmem: ...
123
  	return ARCH_MEMREMAP_PMEM == MEMREMAP_WB;
004f1afbe   Dan Williams   libnvdimm, pmem: ...
124
125
  }
  EXPORT_SYMBOL(pmem_should_map_pages);
f979b13c3   Dan Williams   libnvdimm, label:...
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
  unsigned int pmem_sector_size(struct nd_namespace_common *ndns)
  {
  	if (is_namespace_pmem(&ndns->dev)) {
  		struct nd_namespace_pmem *nspm;
  
  		nspm = to_nd_namespace_pmem(&ndns->dev);
  		if (nspm->lbasize == 0 || nspm->lbasize == 512)
  			/* default */;
  		else if (nspm->lbasize == 4096)
  			return 4096;
  		else
  			dev_WARN(&ndns->dev, "unsupported sector size: %ld
  ",
  					nspm->lbasize);
  	}
  
  	/*
  	 * There is no namespace label (is_namespace_io()), or the label
  	 * indicates the default sector size.
  	 */
  	return 512;
  }
  EXPORT_SYMBOL(pmem_sector_size);
5212e11fd   Vishal Verma   nd_btt: atomic se...
149
150
151
152
  const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
  		char *name)
  {
  	struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
004f1afbe   Dan Williams   libnvdimm, pmem: ...
153
  	const char *suffix = NULL;
5212e11fd   Vishal Verma   nd_btt: atomic se...
154

0731de0dd   Dan Williams   libnvdimm, pfn: m...
155
156
  	if (ndns->claim && is_nd_btt(ndns->claim))
  		suffix = "s";
5212e11fd   Vishal Verma   nd_btt: atomic se...
157

004f1afbe   Dan Williams   libnvdimm, pmem: ...
158
  	if (is_namespace_pmem(&ndns->dev) || is_namespace_io(&ndns->dev)) {
012207334   Dan Williams   libnvdimm, namesp...
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
  		int nsidx = 0;
  
  		if (is_namespace_pmem(&ndns->dev)) {
  			struct nd_namespace_pmem *nspm;
  
  			nspm = to_nd_namespace_pmem(&ndns->dev);
  			nsidx = nspm->id;
  		}
  
  		if (nsidx)
  			sprintf(name, "pmem%d.%d%s", nd_region->id, nsidx,
  					suffix ? suffix : "");
  		else
  			sprintf(name, "pmem%d%s", nd_region->id,
  					suffix ? suffix : "");
004f1afbe   Dan Williams   libnvdimm, pmem: ...
174
  	} else if (is_namespace_blk(&ndns->dev)) {
5212e11fd   Vishal Verma   nd_btt: atomic se...
175
176
177
  		struct nd_namespace_blk *nsblk;
  
  		nsblk = to_nd_namespace_blk(&ndns->dev);
004f1afbe   Dan Williams   libnvdimm, pmem: ...
178
179
  		sprintf(name, "ndblk%d.%d%s", nd_region->id, nsblk->id,
  				suffix ? suffix : "");
5212e11fd   Vishal Verma   nd_btt: atomic se...
180
181
182
183
184
185
186
  	} else {
  		return NULL;
  	}
  
  	return name;
  }
  EXPORT_SYMBOL(nvdimm_namespace_disk_name);
6ec689542   Vishal Verma   libnvdimm, btt: w...
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
  const u8 *nd_dev_to_uuid(struct device *dev)
  {
  	static const u8 null_uuid[16];
  
  	if (!dev)
  		return null_uuid;
  
  	if (is_namespace_pmem(dev)) {
  		struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
  
  		return nspm->uuid;
  	} else if (is_namespace_blk(dev)) {
  		struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
  
  		return nsblk->uuid;
  	} else
  		return null_uuid;
  }
  EXPORT_SYMBOL(nd_dev_to_uuid);
3d88002e4   Dan Williams   libnvdimm: suppor...
206
207
208
209
210
211
212
213
214
  static ssize_t nstype_show(struct device *dev,
  		struct device_attribute *attr, char *buf)
  {
  	struct nd_region *nd_region = to_nd_region(dev->parent);
  
  	return sprintf(buf, "%d
  ", nd_region_to_nstype(nd_region));
  }
  static DEVICE_ATTR_RO(nstype);
bf9bccc14   Dan Williams   libnvdimm: pmem l...
215
216
217
218
219
220
221
222
223
224
225
  static ssize_t __alt_name_store(struct device *dev, const char *buf,
  		const size_t len)
  {
  	char *input, *pos, *alt_name, **ns_altname;
  	ssize_t rc;
  
  	if (is_namespace_pmem(dev)) {
  		struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
  
  		ns_altname = &nspm->alt_name;
  	} else if (is_namespace_blk(dev)) {
1b40e09a1   Dan Williams   libnvdimm: blk la...
226
227
228
  		struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
  
  		ns_altname = &nsblk->alt_name;
bf9bccc14   Dan Williams   libnvdimm: pmem l...
229
230
  	} else
  		return -ENXIO;
8c2f7e865   Dan Williams   libnvdimm: infras...
231
  	if (dev->driver || to_ndns(dev)->claim)
bf9bccc14   Dan Williams   libnvdimm: pmem l...
232
  		return -EBUSY;
3d9cbe37c   Andy Shevchenko   libnvdimm, namesp...
233
  	input = kstrndup(buf, len, GFP_KERNEL);
bf9bccc14   Dan Williams   libnvdimm: pmem l...
234
235
  	if (!input)
  		return -ENOMEM;
bf9bccc14   Dan Williams   libnvdimm: pmem l...
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
  	pos = strim(input);
  	if (strlen(pos) + 1 > NSLABEL_NAME_LEN) {
  		rc = -EINVAL;
  		goto out;
  	}
  
  	alt_name = kzalloc(NSLABEL_NAME_LEN, GFP_KERNEL);
  	if (!alt_name) {
  		rc = -ENOMEM;
  		goto out;
  	}
  	kfree(*ns_altname);
  	*ns_altname = alt_name;
  	sprintf(*ns_altname, "%s", pos);
  	rc = len;
  
  out:
  	kfree(input);
  	return rc;
  }
1b40e09a1   Dan Williams   libnvdimm: blk la...
256
257
  static resource_size_t nd_namespace_blk_size(struct nd_namespace_blk *nsblk)
  {
8c2f7e865   Dan Williams   libnvdimm: infras...
258
  	struct nd_region *nd_region = to_nd_region(nsblk->common.dev.parent);
1b40e09a1   Dan Williams   libnvdimm: blk la...
259
260
261
262
263
264
265
266
267
268
269
270
271
272
  	struct nd_mapping *nd_mapping = &nd_region->mapping[0];
  	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
  	struct nd_label_id label_id;
  	resource_size_t size = 0;
  	struct resource *res;
  
  	if (!nsblk->uuid)
  		return 0;
  	nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
  	for_each_dpa_resource(ndd, res)
  		if (strcmp(res->name, label_id.id) == 0)
  			size += resource_size(res);
  	return size;
  }
047fc8a1f   Ross Zwisler   libnvdimm, nfit, ...
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
  static bool __nd_namespace_blk_validate(struct nd_namespace_blk *nsblk)
  {
  	struct nd_region *nd_region = to_nd_region(nsblk->common.dev.parent);
  	struct nd_mapping *nd_mapping = &nd_region->mapping[0];
  	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
  	struct nd_label_id label_id;
  	struct resource *res;
  	int count, i;
  
  	if (!nsblk->uuid || !nsblk->lbasize || !ndd)
  		return false;
  
  	count = 0;
  	nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
  	for_each_dpa_resource(ndd, res) {
  		if (strcmp(res->name, label_id.id) != 0)
  			continue;
  		/*
ae551e9ca   Geert Uytterhoeven   nvdimm: Spelling ...
291
  		 * Resources with unacknowledged adjustments indicate a
047fc8a1f   Ross Zwisler   libnvdimm, nfit, ...
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
  		 * failure to update labels
  		 */
  		if (res->flags & DPA_RESOURCE_ADJUSTED)
  			return false;
  		count++;
  	}
  
  	/* These values match after a successful label update */
  	if (count != nsblk->num_resources)
  		return false;
  
  	for (i = 0; i < nsblk->num_resources; i++) {
  		struct resource *found = NULL;
  
  		for_each_dpa_resource(ndd, res)
  			if (res == nsblk->res[i]) {
  				found = res;
  				break;
  			}
  		/* stale resource */
  		if (!found)
  			return false;
  	}
  
  	return true;
  }
  
  resource_size_t nd_namespace_blk_validate(struct nd_namespace_blk *nsblk)
  {
  	resource_size_t size;
  
  	nvdimm_bus_lock(&nsblk->common.dev);
  	size = __nd_namespace_blk_validate(nsblk);
  	nvdimm_bus_unlock(&nsblk->common.dev);
  
  	return size;
  }
  EXPORT_SYMBOL(nd_namespace_blk_validate);
f524bf271   Dan Williams   libnvdimm: write ...
330
331
332
  static int nd_namespace_label_update(struct nd_region *nd_region,
  		struct device *dev)
  {
8c2f7e865   Dan Williams   libnvdimm: infras...
333
  	dev_WARN_ONCE(dev, dev->driver || to_ndns(dev)->claim,
f524bf271   Dan Williams   libnvdimm: write ...
334
335
  			"namespace must be idle during label update
  ");
8c2f7e865   Dan Williams   libnvdimm: infras...
336
  	if (dev->driver || to_ndns(dev)->claim)
f524bf271   Dan Williams   libnvdimm: write ...
337
338
339
340
341
342
343
344
  		return 0;
  
  	/*
  	 * Only allow label writes that will result in a valid namespace
  	 * or deletion of an existing namespace.
  	 */
  	if (is_namespace_pmem(dev)) {
  		struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
0ba1c6348   Dan Williams   libnvdimm: write ...
345
  		resource_size_t size = resource_size(&nspm->nsio.res);
f524bf271   Dan Williams   libnvdimm: write ...
346
347
348
349
350
351
352
353
  
  		if (size == 0 && nspm->uuid)
  			/* delete allocation */;
  		else if (!nspm->uuid)
  			return 0;
  
  		return nd_pmem_namespace_label_update(nd_region, nspm, size);
  	} else if (is_namespace_blk(dev)) {
0ba1c6348   Dan Williams   libnvdimm: write ...
354
355
356
357
358
359
360
361
362
  		struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
  		resource_size_t size = nd_namespace_blk_size(nsblk);
  
  		if (size == 0 && nsblk->uuid)
  			/* delete allocation */;
  		else if (!nsblk->uuid || !nsblk->lbasize)
  			return 0;
  
  		return nd_blk_namespace_label_update(nd_region, nsblk, size);
f524bf271   Dan Williams   libnvdimm: write ...
363
364
365
  	} else
  		return -ENXIO;
  }
bf9bccc14   Dan Williams   libnvdimm: pmem l...
366
367
368
  static ssize_t alt_name_store(struct device *dev,
  		struct device_attribute *attr, const char *buf, size_t len)
  {
f524bf271   Dan Williams   libnvdimm: write ...
369
  	struct nd_region *nd_region = to_nd_region(dev->parent);
bf9bccc14   Dan Williams   libnvdimm: pmem l...
370
  	ssize_t rc;
87a30e1f0   Dan Williams   driver-core, libn...
371
  	nd_device_lock(dev);
bf9bccc14   Dan Williams   libnvdimm: pmem l...
372
373
374
  	nvdimm_bus_lock(dev);
  	wait_nvdimm_bus_probe_idle(dev);
  	rc = __alt_name_store(dev, buf, len);
f524bf271   Dan Williams   libnvdimm: write ...
375
376
  	if (rc >= 0)
  		rc = nd_namespace_label_update(nd_region, dev);
426824d63   Dan Williams   libnvdimm: remove...
377
378
  	dev_dbg(dev, "%s(%zd)
  ", rc < 0 ? "fail " : "", rc);
bf9bccc14   Dan Williams   libnvdimm: pmem l...
379
  	nvdimm_bus_unlock(dev);
87a30e1f0   Dan Williams   driver-core, libn...
380
  	nd_device_unlock(dev);
bf9bccc14   Dan Williams   libnvdimm: pmem l...
381

f524bf271   Dan Williams   libnvdimm: write ...
382
  	return rc < 0 ? rc : len;
bf9bccc14   Dan Williams   libnvdimm: pmem l...
383
384
385
386
387
388
389
390
391
392
393
394
  }
  
  static ssize_t alt_name_show(struct device *dev,
  		struct device_attribute *attr, char *buf)
  {
  	char *ns_altname;
  
  	if (is_namespace_pmem(dev)) {
  		struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
  
  		ns_altname = nspm->alt_name;
  	} else if (is_namespace_blk(dev)) {
1b40e09a1   Dan Williams   libnvdimm: blk la...
395
396
397
  		struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
  
  		ns_altname = nsblk->alt_name;
bf9bccc14   Dan Williams   libnvdimm: pmem l...
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
  	} else
  		return -ENXIO;
  
  	return sprintf(buf, "%s
  ", ns_altname ? ns_altname : "");
  }
  static DEVICE_ATTR_RW(alt_name);
  
  static int scan_free(struct nd_region *nd_region,
  		struct nd_mapping *nd_mapping, struct nd_label_id *label_id,
  		resource_size_t n)
  {
  	bool is_blk = strncmp(label_id->id, "blk", 3) == 0;
  	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
  	int rc = 0;
  
  	while (n) {
  		struct resource *res, *last;
  		resource_size_t new_start;
  
  		last = NULL;
  		for_each_dpa_resource(ndd, res)
  			if (strcmp(res->name, label_id->id) == 0)
  				last = res;
  		res = last;
  		if (!res)
  			return 0;
  
  		if (n >= resource_size(res)) {
  			n -= resource_size(res);
  			nd_dbg_dpa(nd_region, ndd, res, "delete %d
  ", rc);
  			nvdimm_free_dpa(ndd, res);
  			/* retry with last resource deleted */
  			continue;
  		}
  
  		/*
  		 * Keep BLK allocations relegated to high DPA as much as
  		 * possible
  		 */
  		if (is_blk)
  			new_start = res->start + n;
  		else
  			new_start = res->start;
  
  		rc = adjust_resource(res, new_start, resource_size(res) - n);
1b40e09a1   Dan Williams   libnvdimm: blk la...
445
446
  		if (rc == 0)
  			res->flags |= DPA_RESOURCE_ADJUSTED;
bf9bccc14   Dan Williams   libnvdimm: pmem l...
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
  		nd_dbg_dpa(nd_region, ndd, res, "shrink %d
  ", rc);
  		break;
  	}
  
  	return rc;
  }
  
  /**
   * shrink_dpa_allocation - for each dimm in region free n bytes for label_id
   * @nd_region: the set of dimms to reclaim @n bytes from
   * @label_id: unique identifier for the namespace consuming this dpa range
   * @n: number of bytes per-dimm to release
   *
   * Assumes resources are ordered.  Starting from the end try to
   * adjust_resource() the allocation to @n, but if @n is larger than the
   * allocation delete it and find the 'new' last allocation in the label
   * set.
   */
  static int shrink_dpa_allocation(struct nd_region *nd_region,
  		struct nd_label_id *label_id, resource_size_t n)
  {
  	int i;
  
  	for (i = 0; i < nd_region->ndr_mappings; i++) {
  		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
  		int rc;
  
  		rc = scan_free(nd_region, nd_mapping, label_id, n);
  		if (rc)
  			return rc;
  	}
  
  	return 0;
  }
  
  static resource_size_t init_dpa_allocation(struct nd_label_id *label_id,
  		struct nd_region *nd_region, struct nd_mapping *nd_mapping,
  		resource_size_t n)
  {
  	bool is_blk = strncmp(label_id->id, "blk", 3) == 0;
  	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
  	resource_size_t first_dpa;
  	struct resource *res;
  	int rc = 0;
  
  	/* allocate blk from highest dpa first */
  	if (is_blk)
  		first_dpa = nd_mapping->start + nd_mapping->size - n;
  	else
  		first_dpa = nd_mapping->start;
  
  	/* first resource allocation for this label-id or dimm */
  	res = nvdimm_allocate_dpa(ndd, label_id, first_dpa, n);
  	if (!res)
  		rc = -EBUSY;
  
  	nd_dbg_dpa(nd_region, ndd, res, "init %d
  ", rc);
  	return rc ? n : 0;
  }
762d067db   Dan Williams   libnvdimm, namesp...
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
  
  /**
   * space_valid() - validate free dpa space against constraints
   * @nd_region: hosting region of the free space
   * @ndd: dimm device data for debug
   * @label_id: namespace id to allocate space
   * @prev: potential allocation that precedes free space
   * @next: allocation that follows the given free space range
   * @exist: first allocation with same id in the mapping
   * @n: range that must satisfied for pmem allocations
   * @valid: free space range to validate
   *
   * BLK-space is valid as long as it does not precede a PMEM
   * allocation in a given region. PMEM-space must be contiguous
   * and adjacent to an existing existing allocation (if one
   * exists).  If reserving PMEM any space is valid.
   */
  static void space_valid(struct nd_region *nd_region, struct nvdimm_drvdata *ndd,
  		struct nd_label_id *label_id, struct resource *prev,
  		struct resource *next, struct resource *exist,
  		resource_size_t n, struct resource *valid)
bf9bccc14   Dan Williams   libnvdimm: pmem l...
529
  {
762d067db   Dan Williams   libnvdimm, namesp...
530
531
  	bool is_reserve = strcmp(label_id->id, "pmem-reserve") == 0;
  	bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0;
2522afb86   Dan Williams   libnvdimm/region:...
532
533
534
535
536
  	unsigned long align;
  
  	align = nd_region->align / nd_region->ndr_mappings;
  	valid->start = ALIGN(valid->start, align);
  	valid->end = ALIGN_DOWN(valid->end + 1, align) - 1;
762d067db   Dan Williams   libnvdimm, namesp...
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
  
  	if (valid->start >= valid->end)
  		goto invalid;
  
  	if (is_reserve)
  		return;
  
  	if (!is_pmem) {
  		struct nd_mapping *nd_mapping = &nd_region->mapping[0];
  		struct nvdimm_bus *nvdimm_bus;
  		struct blk_alloc_info info = {
  			.nd_mapping = nd_mapping,
  			.available = nd_mapping->size,
  			.res = valid,
  		};
  
  		WARN_ON(!is_nd_blk(&nd_region->dev));
  		nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
  		device_for_each_child(&nvdimm_bus->dev, &info, alias_dpa_busy);
  		return;
  	}
  
  	/* allocation needs to be contiguous, so this is all or nothing */
  	if (resource_size(valid) < n)
  		goto invalid;
  
  	/* we've got all the space we need and no existing allocation */
  	if (!exist)
  		return;
  
  	/* allocation needs to be contiguous with the existing namespace */
  	if (valid->start == exist->end + 1
  			|| valid->end == exist->start - 1)
  		return;
  
   invalid:
  	/* truncate @valid size to 0 */
  	valid->end = valid->start - 1;
bf9bccc14   Dan Williams   libnvdimm: pmem l...
575
576
577
578
579
580
581
582
583
584
585
586
587
  }
  
  enum alloc_loc {
  	ALLOC_ERR = 0, ALLOC_BEFORE, ALLOC_MID, ALLOC_AFTER,
  };
  
  static resource_size_t scan_allocate(struct nd_region *nd_region,
  		struct nd_mapping *nd_mapping, struct nd_label_id *label_id,
  		resource_size_t n)
  {
  	resource_size_t mapping_end = nd_mapping->start + nd_mapping->size - 1;
  	bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0;
  	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
762d067db   Dan Williams   libnvdimm, namesp...
588
  	struct resource *res, *exist = NULL, valid;
bf9bccc14   Dan Williams   libnvdimm: pmem l...
589
  	const resource_size_t to_allocate = n;
bf9bccc14   Dan Williams   libnvdimm: pmem l...
590
  	int first;
762d067db   Dan Williams   libnvdimm, namesp...
591
592
593
594
595
596
597
  	for_each_dpa_resource(ndd, res)
  		if (strcmp(label_id->id, res->name) == 0)
  			exist = res;
  
  	valid.start = nd_mapping->start;
  	valid.end = mapping_end;
  	valid.name = "free space";
bf9bccc14   Dan Williams   libnvdimm: pmem l...
598
599
600
   retry:
  	first = 0;
  	for_each_dpa_resource(ndd, res) {
bf9bccc14   Dan Williams   libnvdimm: pmem l...
601
  		struct resource *next = res->sibling, *new_res = NULL;
762d067db   Dan Williams   libnvdimm, namesp...
602
  		resource_size_t allocate, available = 0;
bf9bccc14   Dan Williams   libnvdimm: pmem l...
603
604
605
606
607
608
609
610
611
612
613
614
  		enum alloc_loc loc = ALLOC_ERR;
  		const char *action;
  		int rc = 0;
  
  		/* ignore resources outside this nd_mapping */
  		if (res->start > mapping_end)
  			continue;
  		if (res->end < nd_mapping->start)
  			continue;
  
  		/* space at the beginning of the mapping */
  		if (!first++ && res->start > nd_mapping->start) {
762d067db   Dan Williams   libnvdimm, namesp...
615
616
617
618
619
620
  			valid.start = nd_mapping->start;
  			valid.end = res->start - 1;
  			space_valid(nd_region, ndd, label_id, NULL, next, exist,
  					to_allocate, &valid);
  			available = resource_size(&valid);
  			if (available)
bf9bccc14   Dan Williams   libnvdimm: pmem l...
621
622
623
624
625
  				loc = ALLOC_BEFORE;
  		}
  
  		/* space between allocations */
  		if (!loc && next) {
762d067db   Dan Williams   libnvdimm, namesp...
626
627
628
629
630
631
  			valid.start = res->start + resource_size(res);
  			valid.end = min(mapping_end, next->start - 1);
  			space_valid(nd_region, ndd, label_id, res, next, exist,
  					to_allocate, &valid);
  			available = resource_size(&valid);
  			if (available)
bf9bccc14   Dan Williams   libnvdimm: pmem l...
632
  				loc = ALLOC_MID;
bf9bccc14   Dan Williams   libnvdimm: pmem l...
633
634
635
636
  		}
  
  		/* space at the end of the mapping */
  		if (!loc && !next) {
762d067db   Dan Williams   libnvdimm, namesp...
637
638
639
640
641
642
  			valid.start = res->start + resource_size(res);
  			valid.end = mapping_end;
  			space_valid(nd_region, ndd, label_id, res, next, exist,
  					to_allocate, &valid);
  			available = resource_size(&valid);
  			if (available)
bf9bccc14   Dan Williams   libnvdimm: pmem l...
643
  				loc = ALLOC_AFTER;
bf9bccc14   Dan Williams   libnvdimm: pmem l...
644
645
646
647
648
649
650
651
652
  		}
  
  		if (!loc || !available)
  			continue;
  		allocate = min(available, n);
  		switch (loc) {
  		case ALLOC_BEFORE:
  			if (strcmp(res->name, label_id->id) == 0) {
  				/* adjust current resource up */
bf9bccc14   Dan Williams   libnvdimm: pmem l...
653
654
655
656
657
658
659
660
661
  				rc = adjust_resource(res, res->start - allocate,
  						resource_size(res) + allocate);
  				action = "cur grow up";
  			} else
  				action = "allocate";
  			break;
  		case ALLOC_MID:
  			if (strcmp(next->name, label_id->id) == 0) {
  				/* adjust next resource up */
bf9bccc14   Dan Williams   libnvdimm: pmem l...
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
  				rc = adjust_resource(next, next->start
  						- allocate, resource_size(next)
  						+ allocate);
  				new_res = next;
  				action = "next grow up";
  			} else if (strcmp(res->name, label_id->id) == 0) {
  				action = "grow down";
  			} else
  				action = "allocate";
  			break;
  		case ALLOC_AFTER:
  			if (strcmp(res->name, label_id->id) == 0)
  				action = "grow down";
  			else
  				action = "allocate";
  			break;
  		default:
  			return n;
  		}
  
  		if (strcmp(action, "allocate") == 0) {
  			/* BLK allocate bottom up */
  			if (!is_pmem)
762d067db   Dan Williams   libnvdimm, namesp...
685
  				valid.start += available - allocate;
bf9bccc14   Dan Williams   libnvdimm: pmem l...
686
687
  
  			new_res = nvdimm_allocate_dpa(ndd, label_id,
762d067db   Dan Williams   libnvdimm, namesp...
688
  					valid.start, allocate);
bf9bccc14   Dan Williams   libnvdimm: pmem l...
689
690
691
692
693
694
  			if (!new_res)
  				rc = -EBUSY;
  		} else if (strcmp(action, "grow down") == 0) {
  			/* adjust current resource down */
  			rc = adjust_resource(res, res->start, resource_size(res)
  					+ allocate);
1b40e09a1   Dan Williams   libnvdimm: blk la...
695
696
  			if (rc == 0)
  				res->flags |= DPA_RESOURCE_ADJUSTED;
bf9bccc14   Dan Williams   libnvdimm: pmem l...
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
  		}
  
  		if (!new_res)
  			new_res = res;
  
  		nd_dbg_dpa(nd_region, ndd, new_res, "%s(%d) %d
  ",
  				action, loc, rc);
  
  		if (rc)
  			return n;
  
  		n -= allocate;
  		if (n) {
  			/*
  			 * Retry scan with newly inserted resources.
  			 * For example, if we did an ALLOC_BEFORE
  			 * insertion there may also have been space
  			 * available for an ALLOC_AFTER insertion, so we
  			 * need to check this same resource again
  			 */
  			goto retry;
  		} else
  			return 0;
  	}
1b40e09a1   Dan Williams   libnvdimm: blk la...
722
723
724
725
726
727
  	/*
  	 * If we allocated nothing in the BLK case it may be because we are in
  	 * an initial "pmem-reserve pass".  Only do an initial BLK allocation
  	 * when none of the DPA space is reserved.
  	 */
  	if ((is_pmem || !ndd->dpa.child) && n == to_allocate)
bf9bccc14   Dan Williams   libnvdimm: pmem l...
728
729
730
  		return init_dpa_allocation(label_id, nd_region, nd_mapping, n);
  	return n;
  }
1b40e09a1   Dan Williams   libnvdimm: blk la...
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
  static int merge_dpa(struct nd_region *nd_region,
  		struct nd_mapping *nd_mapping, struct nd_label_id *label_id)
  {
  	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
  	struct resource *res;
  
  	if (strncmp("pmem", label_id->id, 4) == 0)
  		return 0;
   retry:
  	for_each_dpa_resource(ndd, res) {
  		int rc;
  		struct resource *next = res->sibling;
  		resource_size_t end = res->start + resource_size(res);
  
  		if (!next || strcmp(res->name, label_id->id) != 0
  				|| strcmp(next->name, label_id->id) != 0
  				|| end != next->start)
  			continue;
  		end += resource_size(next);
  		nvdimm_free_dpa(ndd, next);
  		rc = adjust_resource(res, res->start, end - res->start);
  		nd_dbg_dpa(nd_region, ndd, res, "merge %d
  ", rc);
  		if (rc)
  			return rc;
  		res->flags |= DPA_RESOURCE_ADJUSTED;
  		goto retry;
  	}
  
  	return 0;
  }
12e3129e2   Keith Busch   libnvdimm: Use ma...
762
  int __reserve_free_pmem(struct device *dev, void *data)
1b40e09a1   Dan Williams   libnvdimm: blk la...
763
764
765
766
767
  {
  	struct nvdimm *nvdimm = data;
  	struct nd_region *nd_region;
  	struct nd_label_id label_id;
  	int i;
c9e582aa6   Dan Williams   libnvdimm, nfit: ...
768
  	if (!is_memory(dev))
1b40e09a1   Dan Williams   libnvdimm: blk la...
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
  		return 0;
  
  	nd_region = to_nd_region(dev);
  	if (nd_region->ndr_mappings == 0)
  		return 0;
  
  	memset(&label_id, 0, sizeof(label_id));
  	strcat(label_id.id, "pmem-reserve");
  	for (i = 0; i < nd_region->ndr_mappings; i++) {
  		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
  		resource_size_t n, rem = 0;
  
  		if (nd_mapping->nvdimm != nvdimm)
  			continue;
  
  		n = nd_pmem_available_dpa(nd_region, nd_mapping, &rem);
  		if (n == 0)
  			return 0;
  		rem = scan_allocate(nd_region, nd_mapping, &label_id, n);
  		dev_WARN_ONCE(&nd_region->dev, rem,
  				"pmem reserve underrun: %#llx of %#llx bytes
  ",
  				(unsigned long long) n - rem,
  				(unsigned long long) n);
  		return rem ? -ENXIO : 0;
  	}
  
  	return 0;
  }
12e3129e2   Keith Busch   libnvdimm: Use ma...
798
  void release_free_pmem(struct nvdimm_bus *nvdimm_bus,
1b40e09a1   Dan Williams   libnvdimm: blk la...
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
  		struct nd_mapping *nd_mapping)
  {
  	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
  	struct resource *res, *_res;
  
  	for_each_dpa_resource_safe(ndd, res, _res)
  		if (strcmp(res->name, "pmem-reserve") == 0)
  			nvdimm_free_dpa(ndd, res);
  }
  
  static int reserve_free_pmem(struct nvdimm_bus *nvdimm_bus,
  		struct nd_mapping *nd_mapping)
  {
  	struct nvdimm *nvdimm = nd_mapping->nvdimm;
  	int rc;
  
  	rc = device_for_each_child(&nvdimm_bus->dev, nvdimm,
  			__reserve_free_pmem);
  	if (rc)
  		release_free_pmem(nvdimm_bus, nd_mapping);
  	return rc;
  }
bf9bccc14   Dan Williams   libnvdimm: pmem l...
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
  /**
   * grow_dpa_allocation - for each dimm allocate n bytes for @label_id
   * @nd_region: the set of dimms to allocate @n more bytes from
   * @label_id: unique identifier for the namespace consuming this dpa range
   * @n: number of bytes per-dimm to add to the existing allocation
   *
   * Assumes resources are ordered.  For BLK regions, first consume
   * BLK-only available DPA free space, then consume PMEM-aliased DPA
   * space starting at the highest DPA.  For PMEM regions start
   * allocations from the start of an interleave set and end at the first
   * BLK allocation or the end of the interleave set, whichever comes
   * first.
   */
  static int grow_dpa_allocation(struct nd_region *nd_region,
  		struct nd_label_id *label_id, resource_size_t n)
  {
1b40e09a1   Dan Williams   libnvdimm: blk la...
837
838
  	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
  	bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0;
bf9bccc14   Dan Williams   libnvdimm: pmem l...
839
840
841
842
  	int i;
  
  	for (i = 0; i < nd_region->ndr_mappings; i++) {
  		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1b40e09a1   Dan Williams   libnvdimm: blk la...
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
  		resource_size_t rem = n;
  		int rc, j;
  
  		/*
  		 * In the BLK case try once with all unallocated PMEM
  		 * reserved, and once without
  		 */
  		for (j = is_pmem; j < 2; j++) {
  			bool blk_only = j == 0;
  
  			if (blk_only) {
  				rc = reserve_free_pmem(nvdimm_bus, nd_mapping);
  				if (rc)
  					return rc;
  			}
  			rem = scan_allocate(nd_region, nd_mapping,
  					label_id, rem);
  			if (blk_only)
  				release_free_pmem(nvdimm_bus, nd_mapping);
bf9bccc14   Dan Williams   libnvdimm: pmem l...
862

1b40e09a1   Dan Williams   libnvdimm: blk la...
863
864
865
866
867
868
869
870
871
872
873
874
875
876
  			/* try again and allow encroachments into PMEM */
  			if (rem == 0)
  				break;
  		}
  
  		dev_WARN_ONCE(&nd_region->dev, rem,
  				"allocation underrun: %#llx of %#llx bytes
  ",
  				(unsigned long long) n - rem,
  				(unsigned long long) n);
  		if (rem)
  			return -ENXIO;
  
  		rc = merge_dpa(nd_region, nd_mapping, label_id);
bf9bccc14   Dan Williams   libnvdimm: pmem l...
877
878
879
880
881
882
  		if (rc)
  			return rc;
  	}
  
  	return 0;
  }
0e3b0d123   Dan Williams   libnvdimm, namesp...
883
  static void nd_namespace_pmem_set_resource(struct nd_region *nd_region,
bf9bccc14   Dan Williams   libnvdimm: pmem l...
884
885
886
  		struct nd_namespace_pmem *nspm, resource_size_t size)
  {
  	struct resource *res = &nspm->nsio.res;
0e3b0d123   Dan Williams   libnvdimm, namesp...
887
  	resource_size_t offset = 0;
bf9bccc14   Dan Williams   libnvdimm: pmem l...
888

0e3b0d123   Dan Williams   libnvdimm, namesp...
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
  	if (size && !nspm->uuid) {
  		WARN_ON_ONCE(1);
  		size = 0;
  	}
  
  	if (size && nspm->uuid) {
  		struct nd_mapping *nd_mapping = &nd_region->mapping[0];
  		struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
  		struct nd_label_id label_id;
  		struct resource *res;
  
  		if (!ndd) {
  			size = 0;
  			goto out;
  		}
  
  		nd_label_gen_id(&label_id, nspm->uuid, 0);
  
  		/* calculate a spa offset from the dpa allocation offset */
  		for_each_dpa_resource(ndd, res)
  			if (strcmp(res->name, label_id.id) == 0) {
  				offset = (res->start - nd_mapping->start)
  					* nd_region->ndr_mappings;
  				goto out;
  			}
  
  		WARN_ON_ONCE(1);
  		size = 0;
  	}
  
   out:
  	res->start = nd_region->ndr_start + offset;
  	res->end = res->start + size - 1;
bf9bccc14   Dan Williams   libnvdimm: pmem l...
922
  }
bd26d0d0c   Dmitry Krivenok   nvdimm: improve d...
923
924
925
926
927
928
929
930
931
  static bool uuid_not_set(const u8 *uuid, struct device *dev, const char *where)
  {
  	if (!uuid) {
  		dev_dbg(dev, "%s: uuid not set
  ", where);
  		return true;
  	}
  	return false;
  }
bf9bccc14   Dan Williams   libnvdimm: pmem l...
932
933
934
935
  static ssize_t __size_store(struct device *dev, unsigned long long val)
  {
  	resource_size_t allocated = 0, available = 0;
  	struct nd_region *nd_region = to_nd_region(dev->parent);
1f19b983a   Dan Williams   libnvdimm, namesp...
936
  	struct nd_namespace_common *ndns = to_ndns(dev);
bf9bccc14   Dan Williams   libnvdimm: pmem l...
937
938
939
940
  	struct nd_mapping *nd_mapping;
  	struct nvdimm_drvdata *ndd;
  	struct nd_label_id label_id;
  	u32 flags = 0, remainder;
9d032f420   Dan Williams   libnvdimm, namesp...
941
  	int rc, i, id = -1;
bf9bccc14   Dan Williams   libnvdimm: pmem l...
942
  	u8 *uuid = NULL;
bf9bccc14   Dan Williams   libnvdimm: pmem l...
943

1f19b983a   Dan Williams   libnvdimm, namesp...
944
  	if (dev->driver || ndns->claim)
bf9bccc14   Dan Williams   libnvdimm: pmem l...
945
946
947
948
949
950
  		return -EBUSY;
  
  	if (is_namespace_pmem(dev)) {
  		struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
  
  		uuid = nspm->uuid;
9d032f420   Dan Williams   libnvdimm, namesp...
951
  		id = nspm->id;
bf9bccc14   Dan Williams   libnvdimm: pmem l...
952
  	} else if (is_namespace_blk(dev)) {
1b40e09a1   Dan Williams   libnvdimm: blk la...
953
954
955
956
  		struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
  
  		uuid = nsblk->uuid;
  		flags = NSLABEL_FLAG_LOCAL;
9d032f420   Dan Williams   libnvdimm, namesp...
957
  		id = nsblk->id;
bf9bccc14   Dan Williams   libnvdimm: pmem l...
958
959
960
961
962
963
  	}
  
  	/*
  	 * We need a uuid for the allocation-label and dimm(s) on which
  	 * to store the label.
  	 */
bd26d0d0c   Dmitry Krivenok   nvdimm: improve d...
964
  	if (uuid_not_set(uuid, dev, __func__))
bf9bccc14   Dan Williams   libnvdimm: pmem l...
965
  		return -ENXIO;
bd26d0d0c   Dmitry Krivenok   nvdimm: improve d...
966
  	if (nd_region->ndr_mappings == 0) {
426824d63   Dan Williams   libnvdimm: remove...
967
968
  		dev_dbg(dev, "not associated with dimm(s)
  ");
bd26d0d0c   Dmitry Krivenok   nvdimm: improve d...
969
970
  		return -ENXIO;
  	}
bf9bccc14   Dan Williams   libnvdimm: pmem l...
971

2522afb86   Dan Williams   libnvdimm/region:...
972
  	div_u64_rem(val, nd_region->align, &remainder);
bf9bccc14   Dan Williams   libnvdimm: pmem l...
973
  	if (remainder) {
5b26db95f   Aneesh Kumar K.V   libnvdimm: Use PA...
974
975
  		dev_dbg(dev, "%llu is not %ldK aligned
  ", val,
2522afb86   Dan Williams   libnvdimm/region:...
976
  				nd_region->align / SZ_1K);
bf9bccc14   Dan Williams   libnvdimm: pmem l...
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
  		return -EINVAL;
  	}
  
  	nd_label_gen_id(&label_id, uuid, flags);
  	for (i = 0; i < nd_region->ndr_mappings; i++) {
  		nd_mapping = &nd_region->mapping[i];
  		ndd = to_ndd(nd_mapping);
  
  		/*
  		 * All dimms in an interleave set, or the base dimm for a blk
  		 * region, need to be enabled for the size to be changed.
  		 */
  		if (!ndd)
  			return -ENXIO;
  
  		allocated += nvdimm_allocated_dpa(ndd, &label_id);
  	}
12e3129e2   Keith Busch   libnvdimm: Use ma...
994
  	available = nd_region_allocatable_dpa(nd_region);
bf9bccc14   Dan Williams   libnvdimm: pmem l...
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
  
  	if (val > available + allocated)
  		return -ENOSPC;
  
  	if (val == allocated)
  		return 0;
  
  	val = div_u64(val, nd_region->ndr_mappings);
  	allocated = div_u64(allocated, nd_region->ndr_mappings);
  	if (val < allocated)
  		rc = shrink_dpa_allocation(nd_region, &label_id,
  				allocated - val);
  	else
  		rc = grow_dpa_allocation(nd_region, &label_id, val - allocated);
  
  	if (rc)
  		return rc;
  
  	if (is_namespace_pmem(dev)) {
  		struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
0e3b0d123   Dan Williams   libnvdimm, namesp...
1015
  		nd_namespace_pmem_set_resource(nd_region, nspm,
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1016
1017
  				val * nd_region->ndr_mappings);
  	}
1f19b983a   Dan Williams   libnvdimm, namesp...
1018
1019
  	/*
  	 * Try to delete the namespace if we deleted all of its
9d032f420   Dan Williams   libnvdimm, namesp...
1020
1021
1022
  	 * allocation, this is not the seed or 0th device for the
  	 * region, and it is not actively claimed by a btt, pfn, or dax
  	 * instance.
1f19b983a   Dan Williams   libnvdimm, namesp...
1023
  	 */
9d032f420   Dan Williams   libnvdimm, namesp...
1024
  	if (val == 0 && id != 0 && nd_region->ns_seed != dev && !ndns->claim)
1f19b983a   Dan Williams   libnvdimm, namesp...
1025
  		nd_device_unregister(dev, ND_ASYNC);
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1026
1027
1028
1029
1030
1031
  	return rc;
  }
  
  static ssize_t size_store(struct device *dev,
  		struct device_attribute *attr, const char *buf, size_t len)
  {
f524bf271   Dan Williams   libnvdimm: write ...
1032
  	struct nd_region *nd_region = to_nd_region(dev->parent);
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1033
1034
1035
1036
1037
1038
1039
  	unsigned long long val;
  	u8 **uuid = NULL;
  	int rc;
  
  	rc = kstrtoull(buf, 0, &val);
  	if (rc)
  		return rc;
87a30e1f0   Dan Williams   driver-core, libn...
1040
  	nd_device_lock(dev);
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1041
1042
1043
  	nvdimm_bus_lock(dev);
  	wait_nvdimm_bus_probe_idle(dev);
  	rc = __size_store(dev, val);
f524bf271   Dan Williams   libnvdimm: write ...
1044
1045
  	if (rc >= 0)
  		rc = nd_namespace_label_update(nd_region, dev);
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1046
1047
1048
1049
1050
1051
  
  	if (is_namespace_pmem(dev)) {
  		struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
  
  		uuid = &nspm->uuid;
  	} else if (is_namespace_blk(dev)) {
1b40e09a1   Dan Williams   libnvdimm: blk la...
1052
1053
1054
  		struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
  
  		uuid = &nsblk->uuid;
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1055
1056
1057
1058
1059
1060
1061
  	}
  
  	if (rc == 0 && val == 0 && uuid) {
  		/* setting size zero == 'delete namespace' */
  		kfree(*uuid);
  		*uuid = NULL;
  	}
426824d63   Dan Williams   libnvdimm: remove...
1062
1063
  	dev_dbg(dev, "%llx %s (%d)
  ", val, rc < 0 ? "fail" : "success", rc);
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1064
1065
  
  	nvdimm_bus_unlock(dev);
87a30e1f0   Dan Williams   driver-core, libn...
1066
  	nd_device_unlock(dev);
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1067

f524bf271   Dan Williams   libnvdimm: write ...
1068
  	return rc < 0 ? rc : len;
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1069
  }
8c2f7e865   Dan Williams   libnvdimm: infras...
1070
  resource_size_t __nvdimm_namespace_capacity(struct nd_namespace_common *ndns)
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1071
  {
8c2f7e865   Dan Williams   libnvdimm: infras...
1072
  	struct device *dev = &ndns->dev;
1b40e09a1   Dan Williams   libnvdimm: blk la...
1073

bf9bccc14   Dan Williams   libnvdimm: pmem l...
1074
1075
  	if (is_namespace_pmem(dev)) {
  		struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
8c2f7e865   Dan Williams   libnvdimm: infras...
1076
  		return resource_size(&nspm->nsio.res);
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1077
  	} else if (is_namespace_blk(dev)) {
8c2f7e865   Dan Williams   libnvdimm: infras...
1078
  		return nd_namespace_blk_size(to_nd_namespace_blk(dev));
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1079
1080
  	} else if (is_namespace_io(dev)) {
  		struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
8c2f7e865   Dan Williams   libnvdimm: infras...
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
  		return resource_size(&nsio->res);
  	} else
  		WARN_ONCE(1, "unknown namespace type
  ");
  	return 0;
  }
  
  resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns)
  {
  	resource_size_t size;
1b40e09a1   Dan Williams   libnvdimm: blk la...
1091

8c2f7e865   Dan Williams   libnvdimm: infras...
1092
1093
1094
1095
1096
1097
1098
  	nvdimm_bus_lock(&ndns->dev);
  	size = __nvdimm_namespace_capacity(ndns);
  	nvdimm_bus_unlock(&ndns->dev);
  
  	return size;
  }
  EXPORT_SYMBOL(nvdimm_namespace_capacity);
08e6b3c6e   Dan Williams   libnvdimm: Introd...
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
  bool nvdimm_namespace_locked(struct nd_namespace_common *ndns)
  {
  	int i;
  	bool locked = false;
  	struct device *dev = &ndns->dev;
  	struct nd_region *nd_region = to_nd_region(dev->parent);
  
  	for (i = 0; i < nd_region->ndr_mappings; i++) {
  		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
  		struct nvdimm *nvdimm = nd_mapping->nvdimm;
  
  		if (test_bit(NDD_LOCKED, &nvdimm->flags)) {
  			dev_dbg(dev, "%s locked
  ", nvdimm_name(nvdimm));
  			locked = true;
  		}
  	}
  	return locked;
  }
  EXPORT_SYMBOL(nvdimm_namespace_locked);
8c2f7e865   Dan Williams   libnvdimm: infras...
1119
1120
1121
1122
1123
1124
  static ssize_t size_show(struct device *dev,
  		struct device_attribute *attr, char *buf)
  {
  	return sprintf(buf, "%llu
  ", (unsigned long long)
  			nvdimm_namespace_capacity(to_ndns(dev)));
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1125
  }
b44fe7604   Fabian Frederick   libnvdimm, namesp...
1126
  static DEVICE_ATTR(size, 0444, size_show, size_store);
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1127

f95b4bca9   Dan Williams   libnvdimm, namesp...
1128
  static u8 *namespace_to_uuid(struct device *dev)
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1129
  {
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1130
1131
  	if (is_namespace_pmem(dev)) {
  		struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
f95b4bca9   Dan Williams   libnvdimm, namesp...
1132
  		return nspm->uuid;
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1133
  	} else if (is_namespace_blk(dev)) {
1b40e09a1   Dan Williams   libnvdimm: blk la...
1134
  		struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
f95b4bca9   Dan Williams   libnvdimm, namesp...
1135
  		return nsblk->uuid;
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1136
  	} else
f95b4bca9   Dan Williams   libnvdimm, namesp...
1137
1138
1139
1140
1141
1142
1143
  		return ERR_PTR(-ENXIO);
  }
  
  static ssize_t uuid_show(struct device *dev,
  		struct device_attribute *attr, char *buf)
  {
  	u8 *uuid = namespace_to_uuid(dev);
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1144

f95b4bca9   Dan Williams   libnvdimm, namesp...
1145
1146
  	if (IS_ERR(uuid))
  		return PTR_ERR(uuid);
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
  	if (uuid)
  		return sprintf(buf, "%pUb
  ", uuid);
  	return sprintf(buf, "
  ");
  }
  
  /**
   * namespace_update_uuid - check for a unique uuid and whether we're "renaming"
   * @nd_region: parent region so we can updates all dimms in the set
   * @dev: namespace type for generating label_id
   * @new_uuid: incoming uuid
   * @old_uuid: reference to the uuid storage location in the namespace object
   */
  static int namespace_update_uuid(struct nd_region *nd_region,
  		struct device *dev, u8 *new_uuid, u8 **old_uuid)
  {
  	u32 flags = is_namespace_blk(dev) ? NSLABEL_FLAG_LOCAL : 0;
  	struct nd_label_id old_label_id;
  	struct nd_label_id new_label_id;
f524bf271   Dan Williams   libnvdimm: write ...
1167
  	int i;
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1168

f524bf271   Dan Williams   libnvdimm: write ...
1169
1170
  	if (!nd_is_uuid_unique(dev, new_uuid))
  		return -EINVAL;
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1171
1172
1173
  
  	if (*old_uuid == NULL)
  		goto out;
f524bf271   Dan Williams   libnvdimm: write ...
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
  	/*
  	 * If we've already written a label with this uuid, then it's
  	 * too late to rename because we can't reliably update the uuid
  	 * without losing the old namespace.  Userspace must delete this
  	 * namespace to abandon the old uuid.
  	 */
  	for (i = 0; i < nd_region->ndr_mappings; i++) {
  		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
  
  		/*
  		 * This check by itself is sufficient because old_uuid
  		 * would be NULL above if this uuid did not exist in the
  		 * currently written set.
  		 *
  		 * FIXME: can we delete uuid with zero dpa allocated?
  		 */
ae8219f18   Dan Williams   libnvdimm, label:...
1190
  		if (list_empty(&nd_mapping->labels))
f524bf271   Dan Williams   libnvdimm: write ...
1191
1192
  			return -EBUSY;
  	}
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1193
1194
1195
1196
1197
  	nd_label_gen_id(&old_label_id, *old_uuid, flags);
  	nd_label_gen_id(&new_label_id, new_uuid, flags);
  	for (i = 0; i < nd_region->ndr_mappings; i++) {
  		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
  		struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
c4703ce11   Dan Williams   libnvdimm/namespa...
1198
  		struct nd_label_ent *label_ent;
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1199
1200
1201
1202
1203
1204
  		struct resource *res;
  
  		for_each_dpa_resource(ndd, res)
  			if (strcmp(res->name, old_label_id.id) == 0)
  				sprintf((void *) res->name, "%s",
  						new_label_id.id);
c4703ce11   Dan Williams   libnvdimm/namespa...
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
  
  		mutex_lock(&nd_mapping->lock);
  		list_for_each_entry(label_ent, &nd_mapping->labels, list) {
  			struct nd_namespace_label *nd_label = label_ent->label;
  			struct nd_label_id label_id;
  
  			if (!nd_label)
  				continue;
  			nd_label_gen_id(&label_id, nd_label->uuid,
  					__le32_to_cpu(nd_label->flags));
  			if (strcmp(old_label_id.id, label_id.id) == 0)
  				set_bit(ND_LABEL_REAP, &label_ent->flags);
  		}
  		mutex_unlock(&nd_mapping->lock);
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
  	}
  	kfree(*old_uuid);
   out:
  	*old_uuid = new_uuid;
  	return 0;
  }
  
  static ssize_t uuid_store(struct device *dev,
  		struct device_attribute *attr, const char *buf, size_t len)
  {
  	struct nd_region *nd_region = to_nd_region(dev->parent);
  	u8 *uuid = NULL;
8c2f7e865   Dan Williams   libnvdimm: infras...
1231
  	ssize_t rc = 0;
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1232
  	u8 **ns_uuid;
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1233
1234
1235
1236
1237
1238
  
  	if (is_namespace_pmem(dev)) {
  		struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
  
  		ns_uuid = &nspm->uuid;
  	} else if (is_namespace_blk(dev)) {
1b40e09a1   Dan Williams   libnvdimm: blk la...
1239
1240
1241
  		struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
  
  		ns_uuid = &nsblk->uuid;
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1242
1243
  	} else
  		return -ENXIO;
87a30e1f0   Dan Williams   driver-core, libn...
1244
  	nd_device_lock(dev);
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1245
1246
  	nvdimm_bus_lock(dev);
  	wait_nvdimm_bus_probe_idle(dev);
8c2f7e865   Dan Williams   libnvdimm: infras...
1247
1248
1249
1250
  	if (to_ndns(dev)->claim)
  		rc = -EBUSY;
  	if (rc >= 0)
  		rc = nd_uuid_store(dev, &uuid, buf, len);
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1251
1252
  	if (rc >= 0)
  		rc = namespace_update_uuid(nd_region, dev, uuid, ns_uuid);
f524bf271   Dan Williams   libnvdimm: write ...
1253
1254
1255
1256
  	if (rc >= 0)
  		rc = nd_namespace_label_update(nd_region, dev);
  	else
  		kfree(uuid);
426824d63   Dan Williams   libnvdimm: remove...
1257
1258
1259
1260
  	dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
  			buf[len - 1] == '
  ' ? "" : "
  ");
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1261
  	nvdimm_bus_unlock(dev);
87a30e1f0   Dan Williams   driver-core, libn...
1262
  	nd_device_unlock(dev);
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1263

f524bf271   Dan Williams   libnvdimm: write ...
1264
  	return rc < 0 ? rc : len;
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
  }
  static DEVICE_ATTR_RW(uuid);
  
  static ssize_t resource_show(struct device *dev,
  		struct device_attribute *attr, char *buf)
  {
  	struct resource *res;
  
  	if (is_namespace_pmem(dev)) {
  		struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
  
  		res = &nspm->nsio.res;
  	} else if (is_namespace_io(dev)) {
  		struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
  
  		res = &nsio->res;
  	} else
  		return -ENXIO;
  
  	/* no address to convey if the namespace has no allocation */
  	if (resource_size(res) == 0)
  		return -ENXIO;
  	return sprintf(buf, "%#llx
  ", (unsigned long long) res->start);
  }
5cf81ce18   Dan Williams   libnvdimm: Conver...
1290
  static DEVICE_ATTR_ADMIN_RO(resource);
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1291

f979b13c3   Dan Williams   libnvdimm, label:...
1292
  static const unsigned long blk_lbasize_supported[] = { 512, 520, 528,
fcae69573   Vishal Verma   libnvdimm, blk: a...
1293
  	4096, 4104, 4160, 4224, 0 };
1b40e09a1   Dan Williams   libnvdimm: blk la...
1294

f979b13c3   Dan Williams   libnvdimm, label:...
1295
  static const unsigned long pmem_lbasize_supported[] = { 512, 4096, 0 };
1b40e09a1   Dan Williams   libnvdimm: blk la...
1296
1297
1298
  static ssize_t sector_size_show(struct device *dev,
  		struct device_attribute *attr, char *buf)
  {
f979b13c3   Dan Williams   libnvdimm, label:...
1299
1300
  	if (is_namespace_blk(dev)) {
  		struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1b40e09a1   Dan Williams   libnvdimm: blk la...
1301

b2c48f9f9   Dan Williams   libnvdimm: rename...
1302
  		return nd_size_select_show(nsblk->lbasize,
f979b13c3   Dan Williams   libnvdimm, label:...
1303
1304
  				blk_lbasize_supported, buf);
  	}
1b40e09a1   Dan Williams   libnvdimm: blk la...
1305

f979b13c3   Dan Williams   libnvdimm, label:...
1306
1307
  	if (is_namespace_pmem(dev)) {
  		struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
b2c48f9f9   Dan Williams   libnvdimm: rename...
1308
  		return nd_size_select_show(nspm->lbasize,
f979b13c3   Dan Williams   libnvdimm, label:...
1309
1310
1311
  				pmem_lbasize_supported, buf);
  	}
  	return -ENXIO;
1b40e09a1   Dan Williams   libnvdimm: blk la...
1312
1313
1314
1315
1316
  }
  
  static ssize_t sector_size_store(struct device *dev,
  		struct device_attribute *attr, const char *buf, size_t len)
  {
f524bf271   Dan Williams   libnvdimm: write ...
1317
  	struct nd_region *nd_region = to_nd_region(dev->parent);
f979b13c3   Dan Williams   libnvdimm, label:...
1318
1319
  	const unsigned long *supported;
  	unsigned long *lbasize;
8c2f7e865   Dan Williams   libnvdimm: infras...
1320
  	ssize_t rc = 0;
1b40e09a1   Dan Williams   libnvdimm: blk la...
1321

f979b13c3   Dan Williams   libnvdimm, label:...
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
  	if (is_namespace_blk(dev)) {
  		struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
  
  		lbasize = &nsblk->lbasize;
  		supported = blk_lbasize_supported;
  	} else if (is_namespace_pmem(dev)) {
  		struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
  
  		lbasize = &nspm->lbasize;
  		supported = pmem_lbasize_supported;
  	} else
1b40e09a1   Dan Williams   libnvdimm: blk la...
1333
  		return -ENXIO;
87a30e1f0   Dan Williams   driver-core, libn...
1334
  	nd_device_lock(dev);
1b40e09a1   Dan Williams   libnvdimm: blk la...
1335
  	nvdimm_bus_lock(dev);
8c2f7e865   Dan Williams   libnvdimm: infras...
1336
1337
1338
  	if (to_ndns(dev)->claim)
  		rc = -EBUSY;
  	if (rc >= 0)
b2c48f9f9   Dan Williams   libnvdimm: rename...
1339
  		rc = nd_size_select_store(dev, buf, lbasize, supported);
f524bf271   Dan Williams   libnvdimm: write ...
1340
1341
  	if (rc >= 0)
  		rc = nd_namespace_label_update(nd_region, dev);
426824d63   Dan Williams   libnvdimm: remove...
1342
1343
1344
1345
  	dev_dbg(dev, "result: %zd %s: %s%s", rc, rc < 0 ? "tried" : "wrote",
  			buf, buf[len - 1] == '
  ' ? "" : "
  ");
1b40e09a1   Dan Williams   libnvdimm: blk la...
1346
  	nvdimm_bus_unlock(dev);
87a30e1f0   Dan Williams   driver-core, libn...
1347
  	nd_device_unlock(dev);
1b40e09a1   Dan Williams   libnvdimm: blk la...
1348
1349
1350
1351
  
  	return rc ? rc : len;
  }
  static DEVICE_ATTR_RW(sector_size);
0ba1c6348   Dan Williams   libnvdimm: write ...
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
  static ssize_t dpa_extents_show(struct device *dev,
  		struct device_attribute *attr, char *buf)
  {
  	struct nd_region *nd_region = to_nd_region(dev->parent);
  	struct nd_label_id label_id;
  	int count = 0, i;
  	u8 *uuid = NULL;
  	u32 flags = 0;
  
  	nvdimm_bus_lock(dev);
  	if (is_namespace_pmem(dev)) {
  		struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
  
  		uuid = nspm->uuid;
  		flags = 0;
  	} else if (is_namespace_blk(dev)) {
  		struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
  
  		uuid = nsblk->uuid;
  		flags = NSLABEL_FLAG_LOCAL;
  	}
  
  	if (!uuid)
  		goto out;
  
  	nd_label_gen_id(&label_id, uuid, flags);
  	for (i = 0; i < nd_region->ndr_mappings; i++) {
  		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
  		struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
  		struct resource *res;
  
  		for_each_dpa_resource(ndd, res)
  			if (strcmp(res->name, label_id.id) == 0)
  				count++;
  	}
   out:
  	nvdimm_bus_unlock(dev);
  
  	return sprintf(buf, "%d
  ", count);
  }
  static DEVICE_ATTR_RO(dpa_extents);
14e494542   Vishal Verma   libnvdimm, btt: B...
1394
1395
1396
1397
1398
1399
1400
1401
1402
  static int btt_claim_class(struct device *dev)
  {
  	struct nd_region *nd_region = to_nd_region(dev->parent);
  	int i, loop_bitmask = 0;
  
  	for (i = 0; i < nd_region->ndr_mappings; i++) {
  		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
  		struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
  		struct nd_namespace_index *nsindex;
33a560867   Dan Williams   libnvdimm, namesp...
1403
1404
1405
1406
1407
1408
1409
1410
  		/*
  		 * If any of the DIMMs do not support labels the only
  		 * possible BTT format is v1.
  		 */
  		if (!ndd) {
  			loop_bitmask = 0;
  			break;
  		}
14e494542   Vishal Verma   libnvdimm, btt: B...
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
  		nsindex = to_namespace_index(ndd, ndd->ns_current);
  		if (nsindex == NULL)
  			loop_bitmask |= 1;
  		else {
  			/* check whether existing labels are v1.1 or v1.2 */
  			if (__le16_to_cpu(nsindex->major) == 1
  					&& __le16_to_cpu(nsindex->minor) == 1)
  				loop_bitmask |= 2;
  			else
  				loop_bitmask |= 4;
  		}
  	}
  	/*
  	 * If nsindex is null loop_bitmask's bit 0 will be set, and if an index
  	 * block is found, a v1.1 label for any mapping will set bit 1, and a
  	 * v1.2 label will set bit 2.
  	 *
  	 * At the end of the loop, at most one of the three bits must be set.
  	 * If multiple bits were set, it means the different mappings disagree
  	 * about their labels, and this must be cleaned up first.
  	 *
  	 * If all the label index blocks are found to agree, nsindex of NULL
  	 * implies labels haven't been initialized yet, and when they will,
  	 * they will be of the 1.2 format, so we can assume BTT2.0
  	 *
  	 * If 1.1 labels are found, we enforce BTT1.1, and if 1.2 labels are
  	 * found, we enforce BTT2.0
  	 *
  	 * If the loop was never entered, default to BTT1.1 (legacy namespaces)
  	 */
  	switch (loop_bitmask) {
  	case 0:
  	case 2:
  		return NVDIMM_CCLASS_BTT;
  	case 1:
  	case 4:
  		return NVDIMM_CCLASS_BTT2;
  	default:
  		return -ENXIO;
  	}
  }
8c2f7e865   Dan Williams   libnvdimm: infras...
1452
1453
1454
1455
1456
  static ssize_t holder_show(struct device *dev,
  		struct device_attribute *attr, char *buf)
  {
  	struct nd_namespace_common *ndns = to_ndns(dev);
  	ssize_t rc;
87a30e1f0   Dan Williams   driver-core, libn...
1457
  	nd_device_lock(dev);
8c2f7e865   Dan Williams   libnvdimm: infras...
1458
1459
  	rc = sprintf(buf, "%s
  ", ndns->claim ? dev_name(ndns->claim) : "");
87a30e1f0   Dan Williams   driver-core, libn...
1460
  	nd_device_unlock(dev);
8c2f7e865   Dan Williams   libnvdimm: infras...
1461
1462
1463
1464
  
  	return rc;
  }
  static DEVICE_ATTR_RO(holder);
ab84b77af   Ira Weiny   libnvdimm/namsepa...
1465
  static int __holder_class_store(struct device *dev, const char *buf)
b3fde74ea   Dan Williams   libnvdimm, label:...
1466
1467
1468
1469
1470
  {
  	struct nd_namespace_common *ndns = to_ndns(dev);
  
  	if (dev->driver || ndns->claim)
  		return -EBUSY;
ab84b77af   Ira Weiny   libnvdimm/namsepa...
1471
1472
1473
1474
1475
1476
1477
  	if (sysfs_streq(buf, "btt")) {
  		int rc = btt_claim_class(dev);
  
  		if (rc < NVDIMM_CCLASS_NONE)
  			return rc;
  		ndns->claim_class = rc;
  	} else if (sysfs_streq(buf, "pfn"))
b3fde74ea   Dan Williams   libnvdimm, label:...
1478
  		ndns->claim_class = NVDIMM_CCLASS_PFN;
075c3fdd5   Dan Williams   libnvdimm/namespa...
1479
  	else if (sysfs_streq(buf, "dax"))
b3fde74ea   Dan Williams   libnvdimm, label:...
1480
  		ndns->claim_class = NVDIMM_CCLASS_DAX;
075c3fdd5   Dan Williams   libnvdimm/namespa...
1481
  	else if (sysfs_streq(buf, ""))
b3fde74ea   Dan Williams   libnvdimm, label:...
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
  		ndns->claim_class = NVDIMM_CCLASS_NONE;
  	else
  		return -EINVAL;
  
  	return 0;
  }
  
  static ssize_t holder_class_store(struct device *dev,
  		struct device_attribute *attr, const char *buf, size_t len)
  {
  	struct nd_region *nd_region = to_nd_region(dev->parent);
ab84b77af   Ira Weiny   libnvdimm/namsepa...
1493
  	int rc;
b3fde74ea   Dan Williams   libnvdimm, label:...
1494

87a30e1f0   Dan Williams   driver-core, libn...
1495
  	nd_device_lock(dev);
b3fde74ea   Dan Williams   libnvdimm, label:...
1496
1497
1498
1499
1500
  	nvdimm_bus_lock(dev);
  	wait_nvdimm_bus_probe_idle(dev);
  	rc = __holder_class_store(dev, buf);
  	if (rc >= 0)
  		rc = nd_namespace_label_update(nd_region, dev);
ab84b77af   Ira Weiny   libnvdimm/namsepa...
1501
1502
  	dev_dbg(dev, "%s(%d)
  ", rc < 0 ? "fail " : "", rc);
b3fde74ea   Dan Williams   libnvdimm, label:...
1503
  	nvdimm_bus_unlock(dev);
87a30e1f0   Dan Williams   driver-core, libn...
1504
  	nd_device_unlock(dev);
b3fde74ea   Dan Williams   libnvdimm, label:...
1505
1506
1507
1508
1509
1510
1511
1512
1513
  
  	return rc < 0 ? rc : len;
  }
  
  static ssize_t holder_class_show(struct device *dev,
  		struct device_attribute *attr, char *buf)
  {
  	struct nd_namespace_common *ndns = to_ndns(dev);
  	ssize_t rc;
87a30e1f0   Dan Williams   driver-core, libn...
1514
  	nd_device_lock(dev);
b3fde74ea   Dan Williams   libnvdimm, label:...
1515
1516
1517
  	if (ndns->claim_class == NVDIMM_CCLASS_NONE)
  		rc = sprintf(buf, "
  ");
14e494542   Vishal Verma   libnvdimm, btt: B...
1518
1519
  	else if ((ndns->claim_class == NVDIMM_CCLASS_BTT) ||
  			(ndns->claim_class == NVDIMM_CCLASS_BTT2))
b3fde74ea   Dan Williams   libnvdimm, label:...
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
  		rc = sprintf(buf, "btt
  ");
  	else if (ndns->claim_class == NVDIMM_CCLASS_PFN)
  		rc = sprintf(buf, "pfn
  ");
  	else if (ndns->claim_class == NVDIMM_CCLASS_DAX)
  		rc = sprintf(buf, "dax
  ");
  	else
  		rc = sprintf(buf, "<unknown>
  ");
87a30e1f0   Dan Williams   driver-core, libn...
1531
  	nd_device_unlock(dev);
b3fde74ea   Dan Williams   libnvdimm, label:...
1532
1533
1534
1535
  
  	return rc;
  }
  static DEVICE_ATTR_RW(holder_class);
0731de0dd   Dan Williams   libnvdimm, pfn: m...
1536
1537
1538
1539
1540
1541
1542
  static ssize_t mode_show(struct device *dev,
  		struct device_attribute *attr, char *buf)
  {
  	struct nd_namespace_common *ndns = to_ndns(dev);
  	struct device *claim;
  	char *mode;
  	ssize_t rc;
87a30e1f0   Dan Williams   driver-core, libn...
1543
  	nd_device_lock(dev);
0731de0dd   Dan Williams   libnvdimm, pfn: m...
1544
  	claim = ndns->claim;
9c4124281   Dan Williams   libnvdimm: fix mo...
1545
  	if (claim && is_nd_btt(claim))
0731de0dd   Dan Williams   libnvdimm, pfn: m...
1546
  		mode = "safe";
9c4124281   Dan Williams   libnvdimm: fix mo...
1547
1548
  	else if (claim && is_nd_pfn(claim))
  		mode = "memory";
cd03412a5   Dan Williams   libnvdimm, dax: i...
1549
1550
  	else if (claim && is_nd_dax(claim))
  		mode = "dax";
9c4124281   Dan Williams   libnvdimm: fix mo...
1551
1552
  	else if (!claim && pmem_should_map_pages(dev))
  		mode = "memory";
0731de0dd   Dan Williams   libnvdimm, pfn: m...
1553
1554
1555
1556
  	else
  		mode = "raw";
  	rc = sprintf(buf, "%s
  ", mode);
87a30e1f0   Dan Williams   driver-core, libn...
1557
  	nd_device_unlock(dev);
0731de0dd   Dan Williams   libnvdimm, pfn: m...
1558
1559
1560
1561
  
  	return rc;
  }
  static DEVICE_ATTR_RO(mode);
8c2f7e865   Dan Williams   libnvdimm: infras...
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
  static ssize_t force_raw_store(struct device *dev,
  		struct device_attribute *attr, const char *buf, size_t len)
  {
  	bool force_raw;
  	int rc = strtobool(buf, &force_raw);
  
  	if (rc)
  		return rc;
  
  	to_ndns(dev)->force_raw = force_raw;
  	return len;
  }
  
  static ssize_t force_raw_show(struct device *dev,
  		struct device_attribute *attr, char *buf)
  {
  	return sprintf(buf, "%d
  ", to_ndns(dev)->force_raw);
  }
  static DEVICE_ATTR_RW(force_raw);
3d88002e4   Dan Williams   libnvdimm: suppor...
1582
1583
  static struct attribute *nd_namespace_attributes[] = {
  	&dev_attr_nstype.attr,
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1584
  	&dev_attr_size.attr,
0731de0dd   Dan Williams   libnvdimm, pfn: m...
1585
  	&dev_attr_mode.attr,
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1586
  	&dev_attr_uuid.attr,
8c2f7e865   Dan Williams   libnvdimm: infras...
1587
  	&dev_attr_holder.attr,
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1588
1589
  	&dev_attr_resource.attr,
  	&dev_attr_alt_name.attr,
8c2f7e865   Dan Williams   libnvdimm: infras...
1590
  	&dev_attr_force_raw.attr,
1b40e09a1   Dan Williams   libnvdimm: blk la...
1591
  	&dev_attr_sector_size.attr,
0ba1c6348   Dan Williams   libnvdimm: write ...
1592
  	&dev_attr_dpa_extents.attr,
b3fde74ea   Dan Williams   libnvdimm, label:...
1593
  	&dev_attr_holder_class.attr,
3d88002e4   Dan Williams   libnvdimm: suppor...
1594
1595
  	NULL,
  };
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1596
1597
1598
1599
  static umode_t namespace_visible(struct kobject *kobj,
  		struct attribute *a, int n)
  {
  	struct device *dev = container_of(kobj, struct device, kobj);
bfd2e9140   Dan Williams   libnvdimm: Simpli...
1600
1601
  	if (a == &dev_attr_resource.attr && is_namespace_blk(dev))
  		return 0;
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1602
1603
1604
  
  	if (is_namespace_pmem(dev) || is_namespace_blk(dev)) {
  		if (a == &dev_attr_size.attr)
b44fe7604   Fabian Frederick   libnvdimm, namesp...
1605
  			return 0644;
1b40e09a1   Dan Williams   libnvdimm: blk la...
1606

bf9bccc14   Dan Williams   libnvdimm: pmem l...
1607
1608
  		return a->mode;
  	}
8c2f7e865   Dan Williams   libnvdimm: infras...
1609
1610
  	if (a == &dev_attr_nstype.attr || a == &dev_attr_size.attr
  			|| a == &dev_attr_holder.attr
b3fde74ea   Dan Williams   libnvdimm, label:...
1611
  			|| a == &dev_attr_holder_class.attr
0731de0dd   Dan Williams   libnvdimm, pfn: m...
1612
1613
  			|| a == &dev_attr_force_raw.attr
  			|| a == &dev_attr_mode.attr)
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1614
1615
1616
1617
  		return a->mode;
  
  	return 0;
  }
3d88002e4   Dan Williams   libnvdimm: suppor...
1618
1619
  static struct attribute_group nd_namespace_attribute_group = {
  	.attrs = nd_namespace_attributes,
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1620
  	.is_visible = namespace_visible,
3d88002e4   Dan Williams   libnvdimm: suppor...
1621
1622
1623
1624
1625
  };
  
  static const struct attribute_group *nd_namespace_attribute_groups[] = {
  	&nd_device_attribute_group,
  	&nd_namespace_attribute_group,
74ae66c3b   Toshi Kani   libnvdimm: Add sy...
1626
  	&nd_numa_attribute_group,
3d88002e4   Dan Williams   libnvdimm: suppor...
1627
1628
  	NULL,
  };
78c81cc89   Dan Williams   libnvdimm: Move a...
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
  static const struct device_type namespace_io_device_type = {
  	.name = "nd_namespace_io",
  	.release = namespace_io_release,
  	.groups = nd_namespace_attribute_groups,
  };
  
  static const struct device_type namespace_pmem_device_type = {
  	.name = "nd_namespace_pmem",
  	.release = namespace_pmem_release,
  	.groups = nd_namespace_attribute_groups,
  };
  
  static const struct device_type namespace_blk_device_type = {
  	.name = "nd_namespace_blk",
  	.release = namespace_blk_release,
  	.groups = nd_namespace_attribute_groups,
  };
  
  static bool is_namespace_pmem(const struct device *dev)
  {
  	return dev ? dev->type == &namespace_pmem_device_type : false;
  }
  
  static bool is_namespace_blk(const struct device *dev)
  {
  	return dev ? dev->type == &namespace_blk_device_type : false;
  }
  
  static bool is_namespace_io(const struct device *dev)
  {
  	return dev ? dev->type == &namespace_io_device_type : false;
  }
8c2f7e865   Dan Williams   libnvdimm: infras...
1661
1662
1663
  struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev)
  {
  	struct nd_btt *nd_btt = is_nd_btt(dev) ? to_nd_btt(dev) : NULL;
e1455744b   Dan Williams   libnvdimm, pfn: '...
1664
  	struct nd_pfn *nd_pfn = is_nd_pfn(dev) ? to_nd_pfn(dev) : NULL;
cd03412a5   Dan Williams   libnvdimm, dax: i...
1665
  	struct nd_dax *nd_dax = is_nd_dax(dev) ? to_nd_dax(dev) : NULL;
0bfb8dd3e   Dan Williams   libnvdimm: cleanu...
1666
  	struct nd_namespace_common *ndns = NULL;
8c2f7e865   Dan Williams   libnvdimm: infras...
1667
  	resource_size_t size;
cd03412a5   Dan Williams   libnvdimm, dax: i...
1668
  	if (nd_btt || nd_pfn || nd_dax) {
0bfb8dd3e   Dan Williams   libnvdimm: cleanu...
1669
  		if (nd_btt)
e1455744b   Dan Williams   libnvdimm, pfn: '...
1670
  			ndns = nd_btt->ndns;
0bfb8dd3e   Dan Williams   libnvdimm: cleanu...
1671
  		else if (nd_pfn)
e1455744b   Dan Williams   libnvdimm, pfn: '...
1672
  			ndns = nd_pfn->ndns;
cd03412a5   Dan Williams   libnvdimm, dax: i...
1673
1674
  		else if (nd_dax)
  			ndns = nd_dax->nd_pfn.ndns;
e1455744b   Dan Williams   libnvdimm, pfn: '...
1675

0bfb8dd3e   Dan Williams   libnvdimm: cleanu...
1676
  		if (!ndns)
8c2f7e865   Dan Williams   libnvdimm: infras...
1677
1678
1679
1680
1681
1682
  			return ERR_PTR(-ENODEV);
  
  		/*
  		 * Flush any in-progess probes / removals in the driver
  		 * for the raw personality of this namespace.
  		 */
87a30e1f0   Dan Williams   driver-core, libn...
1683
1684
  		nd_device_lock(&ndns->dev);
  		nd_device_unlock(&ndns->dev);
8c2f7e865   Dan Williams   libnvdimm: infras...
1685
1686
1687
  		if (ndns->dev.driver) {
  			dev_dbg(&ndns->dev, "is active, can't bind %s
  ",
0bfb8dd3e   Dan Williams   libnvdimm: cleanu...
1688
  					dev_name(dev));
8c2f7e865   Dan Williams   libnvdimm: infras...
1689
1690
  			return ERR_PTR(-EBUSY);
  		}
0bfb8dd3e   Dan Williams   libnvdimm: cleanu...
1691
  		if (dev_WARN_ONCE(&ndns->dev, ndns->claim != dev,
8c2f7e865   Dan Williams   libnvdimm: infras...
1692
1693
  					"host (%s) vs claim (%s) mismatch
  ",
0bfb8dd3e   Dan Williams   libnvdimm: cleanu...
1694
  					dev_name(dev),
8c2f7e865   Dan Williams   libnvdimm: infras...
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
  					dev_name(ndns->claim)))
  			return ERR_PTR(-ENXIO);
  	} else {
  		ndns = to_ndns(dev);
  		if (ndns->claim) {
  			dev_dbg(dev, "claimed by %s, failing probe
  ",
  				dev_name(ndns->claim));
  
  			return ERR_PTR(-ENXIO);
  		}
  	}
08e6b3c6e   Dan Williams   libnvdimm: Introd...
1707
1708
  	if (nvdimm_namespace_locked(ndns))
  		return ERR_PTR(-EACCES);
8c2f7e865   Dan Williams   libnvdimm: infras...
1709
1710
1711
1712
1713
1714
1715
  	size = nvdimm_namespace_capacity(ndns);
  	if (size < ND_MIN_NAMESPACE_SIZE) {
  		dev_dbg(&ndns->dev, "%pa, too small must be at least %#x
  ",
  				&size, ND_MIN_NAMESPACE_SIZE);
  		return ERR_PTR(-ENODEV);
  	}
6acd7d5ef   Dan Williams   libnvdimm/namespa...
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
  	/*
  	 * Note, alignment validation for fsdax and devdax mode
  	 * namespaces happens in nd_pfn_validate() where infoblock
  	 * padding parameters can be applied.
  	 */
  	if (pmem_should_map_pages(dev)) {
  		struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
  		struct resource *res = &nsio->res;
  
  		if (!IS_ALIGNED(res->start | (res->end + 1),
  					memremap_compat_align())) {
  			dev_err(&ndns->dev, "%pr misaligned, unable to map
  ", res);
  			return ERR_PTR(-EOPNOTSUPP);
  		}
  	}
8c2f7e865   Dan Williams   libnvdimm: infras...
1732
1733
1734
1735
  	if (is_namespace_pmem(&ndns->dev)) {
  		struct nd_namespace_pmem *nspm;
  
  		nspm = to_nd_namespace_pmem(&ndns->dev);
bd26d0d0c   Dmitry Krivenok   nvdimm: improve d...
1736
  		if (uuid_not_set(nspm->uuid, &ndns->dev, __func__))
8c2f7e865   Dan Williams   libnvdimm: infras...
1737
  			return ERR_PTR(-ENODEV);
8c2f7e865   Dan Williams   libnvdimm: infras...
1738
  	} else if (is_namespace_blk(&ndns->dev)) {
047fc8a1f   Ross Zwisler   libnvdimm, nfit, ...
1739
1740
1741
  		struct nd_namespace_blk *nsblk;
  
  		nsblk = to_nd_namespace_blk(&ndns->dev);
bd26d0d0c   Dmitry Krivenok   nvdimm: improve d...
1742
1743
1744
  		if (uuid_not_set(nsblk->uuid, &ndns->dev, __func__))
  			return ERR_PTR(-ENODEV);
  		if (!nsblk->lbasize) {
426824d63   Dan Williams   libnvdimm: remove...
1745
1746
  			dev_dbg(&ndns->dev, "sector size not set
  ");
bd26d0d0c   Dmitry Krivenok   nvdimm: improve d...
1747
1748
  			return ERR_PTR(-ENODEV);
  		}
047fc8a1f   Ross Zwisler   libnvdimm, nfit, ...
1749
1750
  		if (!nd_namespace_blk_validate(nsblk))
  			return ERR_PTR(-ENODEV);
8c2f7e865   Dan Williams   libnvdimm: infras...
1751
1752
1753
1754
1755
  	}
  
  	return ndns;
  }
  EXPORT_SYMBOL(nvdimm_namespace_common_probe);
8f4b01fcd   Aneesh Kumar K.V   libnvdimm/namespa...
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
  int devm_namespace_enable(struct device *dev, struct nd_namespace_common *ndns,
  		resource_size_t size)
  {
  	if (is_namespace_blk(&ndns->dev))
  		return 0;
  	return devm_nsio_enable(dev, to_nd_namespace_io(&ndns->dev), size);
  }
  EXPORT_SYMBOL_GPL(devm_namespace_enable);
  
  void devm_namespace_disable(struct device *dev, struct nd_namespace_common *ndns)
  {
  	if (is_namespace_blk(&ndns->dev))
  		return;
  	devm_nsio_disable(dev, to_nd_namespace_io(&ndns->dev));
  }
  EXPORT_SYMBOL_GPL(devm_namespace_disable);
3d88002e4   Dan Williams   libnvdimm: suppor...
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
  static struct device **create_namespace_io(struct nd_region *nd_region)
  {
  	struct nd_namespace_io *nsio;
  	struct device *dev, **devs;
  	struct resource *res;
  
  	nsio = kzalloc(sizeof(*nsio), GFP_KERNEL);
  	if (!nsio)
  		return NULL;
  
  	devs = kcalloc(2, sizeof(struct device *), GFP_KERNEL);
  	if (!devs) {
  		kfree(nsio);
  		return NULL;
  	}
8c2f7e865   Dan Williams   libnvdimm: infras...
1787
  	dev = &nsio->common.dev;
3d88002e4   Dan Williams   libnvdimm: suppor...
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
  	dev->type = &namespace_io_device_type;
  	dev->parent = &nd_region->dev;
  	res = &nsio->res;
  	res->name = dev_name(&nd_region->dev);
  	res->flags = IORESOURCE_MEM;
  	res->start = nd_region->ndr_start;
  	res->end = res->start + nd_region->ndr_size - 1;
  
  	devs[0] = dev;
  	return devs;
  }
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1799
1800
1801
1802
1803
1804
1805
1806
  static bool has_uuid_at_pos(struct nd_region *nd_region, u8 *uuid,
  		u64 cookie, u16 pos)
  {
  	struct nd_namespace_label *found = NULL;
  	int i;
  
  	for (i = 0; i < nd_region->ndr_mappings; i++) {
  		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
faec6f8a1   Dan Williams   libnvdimm, label:...
1807
1808
  		struct nd_interleave_set *nd_set = nd_region->nd_set;
  		struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
ae8219f18   Dan Williams   libnvdimm, label:...
1809
  		struct nd_label_ent *label_ent;
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1810
  		bool found_uuid = false;
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1811

ae8219f18   Dan Williams   libnvdimm, label:...
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
  		list_for_each_entry(label_ent, &nd_mapping->labels, list) {
  			struct nd_namespace_label *nd_label = label_ent->label;
  			u16 position, nlabel;
  			u64 isetcookie;
  
  			if (!nd_label)
  				continue;
  			isetcookie = __le64_to_cpu(nd_label->isetcookie);
  			position = __le16_to_cpu(nd_label->position);
  			nlabel = __le16_to_cpu(nd_label->nlabel);
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1822
1823
1824
1825
1826
1827
  
  			if (isetcookie != cookie)
  				continue;
  
  			if (memcmp(nd_label->uuid, uuid, NSLABEL_UUID_LEN) != 0)
  				continue;
faec6f8a1   Dan Williams   libnvdimm, label:...
1828
1829
1830
1831
1832
  			if (namespace_label_has(ndd, type_guid)
  					&& !guid_equal(&nd_set->type_guid,
  						&nd_label->type_guid)) {
  				dev_dbg(ndd->dev, "expect type_guid %pUb got %pUb
  ",
db5d00c93   Andy Shevchenko   libnvdimm, namesp...
1833
1834
  						&nd_set->type_guid,
  						&nd_label->type_guid);
faec6f8a1   Dan Williams   libnvdimm, label:...
1835
1836
  				continue;
  			}
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1837
  			if (found_uuid) {
426824d63   Dan Williams   libnvdimm: remove...
1838
1839
  				dev_dbg(ndd->dev, "duplicate entry for uuid
  ");
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
  				return false;
  			}
  			found_uuid = true;
  			if (nlabel != nd_region->ndr_mappings)
  				continue;
  			if (position != pos)
  				continue;
  			found = nd_label;
  			break;
  		}
  		if (found)
  			break;
  	}
  	return found != NULL;
  }
  
  static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id)
  {
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1858
1859
1860
1861
1862
1863
1864
  	int i;
  
  	if (!pmem_id)
  		return -ENODEV;
  
  	for (i = 0; i < nd_region->ndr_mappings; i++) {
  		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
0e3b0d123   Dan Williams   libnvdimm, namesp...
1865
  		struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
ae8219f18   Dan Williams   libnvdimm, label:...
1866
  		struct nd_namespace_label *nd_label = NULL;
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1867
  		u64 hw_start, hw_end, pmem_start, pmem_end;
ae8219f18   Dan Williams   libnvdimm, label:...
1868
  		struct nd_label_ent *label_ent;
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1869

9cf8bd529   Dan Williams   libnvdimm: replac...
1870
  		lockdep_assert_held(&nd_mapping->lock);
ae8219f18   Dan Williams   libnvdimm, label:...
1871
1872
1873
1874
  		list_for_each_entry(label_ent, &nd_mapping->labels, list) {
  			nd_label = label_ent->label;
  			if (!nd_label)
  				continue;
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1875
1876
  			if (memcmp(nd_label->uuid, pmem_id, NSLABEL_UUID_LEN) == 0)
  				break;
ae8219f18   Dan Williams   libnvdimm, label:...
1877
1878
  			nd_label = NULL;
  		}
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1879
1880
1881
1882
1883
  
  		if (!nd_label) {
  			WARN_ON(1);
  			return -EINVAL;
  		}
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1884
1885
1886
1887
1888
1889
  		/*
  		 * Check that this label is compliant with the dpa
  		 * range published in NFIT
  		 */
  		hw_start = nd_mapping->start;
  		hw_end = hw_start + nd_mapping->size;
ae8219f18   Dan Williams   libnvdimm, label:...
1890
1891
  		pmem_start = __le64_to_cpu(nd_label->dpa);
  		pmem_end = pmem_start + __le64_to_cpu(nd_label->rawsize);
0e3b0d123   Dan Williams   libnvdimm, namesp...
1892
1893
  		if (pmem_start >= hw_start && pmem_start < hw_end
  				&& pmem_end <= hw_end && pmem_end > hw_start)
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1894
  			/* pass */;
0e3b0d123   Dan Williams   libnvdimm, namesp...
1895
1896
1897
1898
  		else {
  			dev_dbg(&nd_region->dev, "%s invalid label for %pUb
  ",
  					dev_name(ndd->dev), nd_label->uuid);
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1899
  			return -EINVAL;
0e3b0d123   Dan Williams   libnvdimm, namesp...
1900
  		}
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1901

8a5f50d3b   Dan Williams   libnvdimm, namesp...
1902
1903
  		/* move recently validated label to the front of the list */
  		list_move(&label_ent->list, &nd_mapping->labels);
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1904
1905
1906
1907
1908
  	}
  	return 0;
  }
  
  /**
8a5f50d3b   Dan Williams   libnvdimm, namesp...
1909
   * create_namespace_pmem - validate interleave set labelling, retrieve label0
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1910
   * @nd_region: region with mappings to validate
8a5f50d3b   Dan Williams   libnvdimm, namesp...
1911
1912
   * @nspm: target namespace to create
   * @nd_label: target pmem namespace label to evaluate
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1913
   */
65853a1da   Colin Ian King   libnvdimm, namesp...
1914
  static struct device *create_namespace_pmem(struct nd_region *nd_region,
c12c48ce8   Dan Williams   libnvdimm, label:...
1915
  		struct nd_namespace_index *nsindex,
8a5f50d3b   Dan Williams   libnvdimm, namesp...
1916
  		struct nd_namespace_label *nd_label)
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1917
  {
c12c48ce8   Dan Williams   libnvdimm, label:...
1918
  	u64 cookie = nd_region_interleave_set_cookie(nd_region, nsindex);
86ef58a4e   Dan Williams   nfit, libnvdimm: ...
1919
  	u64 altcookie = nd_region_interleave_set_altcookie(nd_region);
ae8219f18   Dan Williams   libnvdimm, label:...
1920
  	struct nd_label_ent *label_ent;
8a5f50d3b   Dan Williams   libnvdimm, namesp...
1921
  	struct nd_namespace_pmem *nspm;
ae8219f18   Dan Williams   libnvdimm, label:...
1922
  	struct nd_mapping *nd_mapping;
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1923
  	resource_size_t size = 0;
8a5f50d3b   Dan Williams   libnvdimm, namesp...
1924
1925
  	struct resource *res;
  	struct device *dev;
ae8219f18   Dan Williams   libnvdimm, label:...
1926
  	int rc = 0;
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1927
  	u16 i;
4765218db   Dan Williams   libnvdimm, namesp...
1928
1929
1930
  	if (cookie == 0) {
  		dev_dbg(&nd_region->dev, "invalid interleave-set-cookie
  ");
8a5f50d3b   Dan Williams   libnvdimm, namesp...
1931
  		return ERR_PTR(-ENXIO);
4765218db   Dan Williams   libnvdimm, namesp...
1932
  	}
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1933

8a5f50d3b   Dan Williams   libnvdimm, namesp...
1934
1935
1936
1937
  	if (__le64_to_cpu(nd_label->isetcookie) != cookie) {
  		dev_dbg(&nd_region->dev, "invalid cookie in label: %pUb
  ",
  				nd_label->uuid);
86ef58a4e   Dan Williams   nfit, libnvdimm: ...
1938
1939
1940
1941
1942
1943
  		if (__le64_to_cpu(nd_label->isetcookie) != altcookie)
  			return ERR_PTR(-EAGAIN);
  
  		dev_dbg(&nd_region->dev, "valid altcookie in label: %pUb
  ",
  				nd_label->uuid);
ae8219f18   Dan Williams   libnvdimm, label:...
1944
  	}
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1945

8a5f50d3b   Dan Williams   libnvdimm, namesp...
1946
1947
1948
  	nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
  	if (!nspm)
  		return ERR_PTR(-ENOMEM);
ae8219f18   Dan Williams   libnvdimm, label:...
1949

0e3b0d123   Dan Williams   libnvdimm, namesp...
1950
  	nspm->id = -1;
8a5f50d3b   Dan Williams   libnvdimm, namesp...
1951
1952
1953
1954
1955
1956
  	dev = &nspm->nsio.common.dev;
  	dev->type = &namespace_pmem_device_type;
  	dev->parent = &nd_region->dev;
  	res = &nspm->nsio.res;
  	res->name = dev_name(&nd_region->dev);
  	res->flags = IORESOURCE_MEM;
ae8219f18   Dan Williams   libnvdimm, label:...
1957

86ef58a4e   Dan Williams   nfit, libnvdimm: ...
1958
1959
1960
1961
1962
1963
1964
  	for (i = 0; i < nd_region->ndr_mappings; i++) {
  		if (has_uuid_at_pos(nd_region, nd_label->uuid, cookie, i))
  			continue;
  		if (has_uuid_at_pos(nd_region, nd_label->uuid, altcookie, i))
  			continue;
  		break;
  	}
8a5f50d3b   Dan Williams   libnvdimm, namesp...
1965
  	if (i < nd_region->ndr_mappings) {
4f8672201   Dan Williams   libnvdimm, namesp...
1966
  		struct nvdimm *nvdimm = nd_region->mapping[i].nvdimm;
0e3b0d123   Dan Williams   libnvdimm, namesp...
1967

8a5f50d3b   Dan Williams   libnvdimm, namesp...
1968
1969
1970
1971
1972
  		/*
  		 * Give up if we don't find an instance of a uuid at each
  		 * position (from 0 to nd_region->ndr_mappings - 1), or if we
  		 * find a dimm with two instances of the same uuid.
  		 */
0e3b0d123   Dan Williams   libnvdimm, namesp...
1973
1974
  		dev_err(&nd_region->dev, "%s missing label for %pUb
  ",
4f8672201   Dan Williams   libnvdimm, namesp...
1975
  				nvdimm_name(nvdimm), nd_label->uuid);
8a5f50d3b   Dan Williams   libnvdimm, namesp...
1976
  		rc = -EINVAL;
ae8219f18   Dan Williams   libnvdimm, label:...
1977
  		goto err;
8a5f50d3b   Dan Williams   libnvdimm, namesp...
1978
  	}
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1979
1980
1981
1982
1983
1984
1985
1986
1987
  
  	/*
  	 * Fix up each mapping's 'labels' to have the validated pmem label for
  	 * that position at labels[0], and NULL at labels[1].  In the process,
  	 * check that the namespace aligns with interleave-set.  We know
  	 * that it does not overlap with any blk namespaces by virtue of
  	 * the dimm being enabled (i.e. nd_label_reserve_dpa()
  	 * succeeded).
  	 */
8a5f50d3b   Dan Williams   libnvdimm, namesp...
1988
  	rc = select_pmem_id(nd_region, nd_label->uuid);
bf9bccc14   Dan Williams   libnvdimm: pmem l...
1989
1990
1991
1992
1993
  	if (rc)
  		goto err;
  
  	/* Calculate total size and populate namespace properties from label0 */
  	for (i = 0; i < nd_region->ndr_mappings; i++) {
ae8219f18   Dan Williams   libnvdimm, label:...
1994
  		struct nd_namespace_label *label0;
b3fde74ea   Dan Williams   libnvdimm, label:...
1995
  		struct nvdimm_drvdata *ndd;
ae8219f18   Dan Williams   libnvdimm, label:...
1996
1997
  
  		nd_mapping = &nd_region->mapping[i];
ae8219f18   Dan Williams   libnvdimm, label:...
1998
1999
  		label_ent = list_first_entry_or_null(&nd_mapping->labels,
  				typeof(*label_ent), list);
86aa66687   Aneesh Kumar K.V   libnvdimm: Fix en...
2000
  		label0 = label_ent ? label_ent->label : NULL;
ae8219f18   Dan Williams   libnvdimm, label:...
2001
2002
2003
2004
2005
  
  		if (!label0) {
  			WARN_ON(1);
  			continue;
  		}
bf9bccc14   Dan Williams   libnvdimm: pmem l...
2006
2007
2008
2009
2010
2011
2012
2013
2014
  
  		size += __le64_to_cpu(label0->rawsize);
  		if (__le16_to_cpu(label0->position) != 0)
  			continue;
  		WARN_ON(nspm->alt_name || nspm->uuid);
  		nspm->alt_name = kmemdup((void __force *) label0->name,
  				NSLABEL_NAME_LEN, GFP_KERNEL);
  		nspm->uuid = kmemdup((void __force *) label0->uuid,
  				NSLABEL_UUID_LEN, GFP_KERNEL);
f979b13c3   Dan Williams   libnvdimm, label:...
2015
  		nspm->lbasize = __le64_to_cpu(label0->lbasize);
b3fde74ea   Dan Williams   libnvdimm, label:...
2016
2017
2018
2019
  		ndd = to_ndd(nd_mapping);
  		if (namespace_label_has(ndd, abstraction_guid))
  			nspm->nsio.common.claim_class
  				= to_nvdimm_cclass(&label0->abstraction_guid);
bf9bccc14   Dan Williams   libnvdimm: pmem l...
2020
2021
2022
2023
2024
2025
  	}
  
  	if (!nspm->alt_name || !nspm->uuid) {
  		rc = -ENOMEM;
  		goto err;
  	}
0e3b0d123   Dan Williams   libnvdimm, namesp...
2026
  	nd_namespace_pmem_set_resource(nd_region, nspm, size);
bf9bccc14   Dan Williams   libnvdimm: pmem l...
2027

8a5f50d3b   Dan Williams   libnvdimm, namesp...
2028
  	return dev;
bf9bccc14   Dan Williams   libnvdimm: pmem l...
2029
   err:
8a5f50d3b   Dan Williams   libnvdimm, namesp...
2030
  	namespace_pmem_release(dev);
bf9bccc14   Dan Williams   libnvdimm: pmem l...
2031
2032
  	switch (rc) {
  	case -EINVAL:
426824d63   Dan Williams   libnvdimm: remove...
2033
2034
  		dev_dbg(&nd_region->dev, "invalid label(s)
  ");
bf9bccc14   Dan Williams   libnvdimm: pmem l...
2035
2036
  		break;
  	case -ENODEV:
426824d63   Dan Williams   libnvdimm: remove...
2037
2038
  		dev_dbg(&nd_region->dev, "label not found
  ");
bf9bccc14   Dan Williams   libnvdimm: pmem l...
2039
2040
  		break;
  	default:
426824d63   Dan Williams   libnvdimm: remove...
2041
2042
  		dev_dbg(&nd_region->dev, "unexpected err: %d
  ", rc);
bf9bccc14   Dan Williams   libnvdimm: pmem l...
2043
2044
  		break;
  	}
8a5f50d3b   Dan Williams   libnvdimm, namesp...
2045
  	return ERR_PTR(rc);
bf9bccc14   Dan Williams   libnvdimm: pmem l...
2046
  }
1b40e09a1   Dan Williams   libnvdimm: blk la...
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
  struct resource *nsblk_add_resource(struct nd_region *nd_region,
  		struct nvdimm_drvdata *ndd, struct nd_namespace_blk *nsblk,
  		resource_size_t start)
  {
  	struct nd_label_id label_id;
  	struct resource *res;
  
  	nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
  	res = krealloc(nsblk->res,
  			sizeof(void *) * (nsblk->num_resources + 1),
  			GFP_KERNEL);
  	if (!res)
  		return NULL;
  	nsblk->res = (struct resource **) res;
  	for_each_dpa_resource(ndd, res)
  		if (strcmp(res->name, label_id.id) == 0
  				&& res->start == start) {
  			nsblk->res[nsblk->num_resources++] = res;
  			return res;
  		}
  	return NULL;
  }
  
  static struct device *nd_namespace_blk_create(struct nd_region *nd_region)
  {
  	struct nd_namespace_blk *nsblk;
  	struct device *dev;
  
  	if (!is_nd_blk(&nd_region->dev))
  		return NULL;
  
  	nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
  	if (!nsblk)
  		return NULL;
8c2f7e865   Dan Williams   libnvdimm: infras...
2081
  	dev = &nsblk->common.dev;
1b40e09a1   Dan Williams   libnvdimm: blk la...
2082
2083
2084
2085
2086
2087
2088
2089
  	dev->type = &namespace_blk_device_type;
  	nsblk->id = ida_simple_get(&nd_region->ns_ida, 0, 0, GFP_KERNEL);
  	if (nsblk->id < 0) {
  		kfree(nsblk);
  		return NULL;
  	}
  	dev_set_name(dev, "namespace%d.%d", nd_region->id, nsblk->id);
  	dev->parent = &nd_region->dev;
1b40e09a1   Dan Williams   libnvdimm: blk la...
2090

8c2f7e865   Dan Williams   libnvdimm: infras...
2091
  	return &nsblk->common.dev;
1b40e09a1   Dan Williams   libnvdimm: blk la...
2092
  }
98a29c39d   Dan Williams   libnvdimm, namesp...
2093
2094
2095
2096
2097
  static struct device *nd_namespace_pmem_create(struct nd_region *nd_region)
  {
  	struct nd_namespace_pmem *nspm;
  	struct resource *res;
  	struct device *dev;
c9e582aa6   Dan Williams   libnvdimm, nfit: ...
2098
  	if (!is_memory(&nd_region->dev))
98a29c39d   Dan Williams   libnvdimm, namesp...
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
  		return NULL;
  
  	nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
  	if (!nspm)
  		return NULL;
  
  	dev = &nspm->nsio.common.dev;
  	dev->type = &namespace_pmem_device_type;
  	dev->parent = &nd_region->dev;
  	res = &nspm->nsio.res;
  	res->name = dev_name(&nd_region->dev);
  	res->flags = IORESOURCE_MEM;
  
  	nspm->id = ida_simple_get(&nd_region->ns_ida, 0, 0, GFP_KERNEL);
  	if (nspm->id < 0) {
  		kfree(nspm);
  		return NULL;
  	}
  	dev_set_name(dev, "namespace%d.%d", nd_region->id, nspm->id);
98a29c39d   Dan Williams   libnvdimm, namesp...
2118
2119
2120
2121
2122
2123
  	nd_namespace_pmem_set_resource(nd_region, nspm, 0);
  
  	return dev;
  }
  
  void nd_region_create_ns_seed(struct nd_region *nd_region)
1b40e09a1   Dan Williams   libnvdimm: blk la...
2124
2125
  {
  	WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
98a29c39d   Dan Williams   libnvdimm, namesp...
2126
2127
2128
2129
2130
2131
2132
2133
  
  	if (nd_region_to_nstype(nd_region) == ND_DEVICE_NAMESPACE_IO)
  		return;
  
  	if (is_nd_blk(&nd_region->dev))
  		nd_region->ns_seed = nd_namespace_blk_create(nd_region);
  	else
  		nd_region->ns_seed = nd_namespace_pmem_create(nd_region);
1b40e09a1   Dan Williams   libnvdimm: blk la...
2134
2135
2136
2137
2138
  	/*
  	 * Seed creation failures are not fatal, provisioning is simply
  	 * disabled until memory becomes available
  	 */
  	if (!nd_region->ns_seed)
98a29c39d   Dan Williams   libnvdimm, namesp...
2139
2140
2141
  		dev_err(&nd_region->dev, "failed to create %s namespace
  ",
  				is_nd_blk(&nd_region->dev) ? "blk" : "pmem");
1b40e09a1   Dan Williams   libnvdimm: blk la...
2142
2143
2144
  	else
  		nd_device_register(nd_region->ns_seed);
  }
cd03412a5   Dan Williams   libnvdimm, dax: i...
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
  void nd_region_create_dax_seed(struct nd_region *nd_region)
  {
  	WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
  	nd_region->dax_seed = nd_dax_create(nd_region);
  	/*
  	 * Seed creation failures are not fatal, provisioning is simply
  	 * disabled until memory becomes available
  	 */
  	if (!nd_region->dax_seed)
  		dev_err(&nd_region->dev, "failed to create dax namespace
  ");
  }
2dc43331e   Dan Williams   libnvdimm, pfn: f...
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
  void nd_region_create_pfn_seed(struct nd_region *nd_region)
  {
  	WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
  	nd_region->pfn_seed = nd_pfn_create(nd_region);
  	/*
  	 * Seed creation failures are not fatal, provisioning is simply
  	 * disabled until memory becomes available
  	 */
  	if (!nd_region->pfn_seed)
  		dev_err(&nd_region->dev, "failed to create pfn namespace
  ");
  }
8c2f7e865   Dan Williams   libnvdimm: infras...
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
  void nd_region_create_btt_seed(struct nd_region *nd_region)
  {
  	WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
  	nd_region->btt_seed = nd_btt_create(nd_region);
  	/*
  	 * Seed creation failures are not fatal, provisioning is simply
  	 * disabled until memory becomes available
  	 */
  	if (!nd_region->btt_seed)
  		dev_err(&nd_region->dev, "failed to create btt namespace
  ");
  }
8a5f50d3b   Dan Williams   libnvdimm, namesp...
2181
2182
2183
  static int add_namespace_resource(struct nd_region *nd_region,
  		struct nd_namespace_label *nd_label, struct device **devs,
  		int count)
1b40e09a1   Dan Williams   libnvdimm: blk la...
2184
  {
8a5f50d3b   Dan Williams   libnvdimm, namesp...
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
  	struct nd_mapping *nd_mapping = &nd_region->mapping[0];
  	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
  	int i;
  
  	for (i = 0; i < count; i++) {
  		u8 *uuid = namespace_to_uuid(devs[i]);
  		struct resource *res;
  
  		if (IS_ERR_OR_NULL(uuid)) {
  			WARN_ON(1);
  			continue;
  		}
  
  		if (memcmp(uuid, nd_label->uuid, NSLABEL_UUID_LEN) != 0)
  			continue;
  		if (is_namespace_blk(devs[i])) {
  			res = nsblk_add_resource(nd_region, ndd,
  					to_nd_namespace_blk(devs[i]),
  					__le64_to_cpu(nd_label->dpa));
  			if (!res)
  				return -ENXIO;
  			nd_dbg_dpa(nd_region, ndd, res, "%d assign
  ", count);
  		} else {
  			dev_err(&nd_region->dev,
  					"error: conflicting extents for uuid: %pUb
  ",
  					nd_label->uuid);
  			return -ENXIO;
  		}
  		break;
  	}
  
  	return i;
  }
65853a1da   Colin Ian King   libnvdimm, namesp...
2220
  static struct device *create_namespace_blk(struct nd_region *nd_region,
8a5f50d3b   Dan Williams   libnvdimm, namesp...
2221
2222
2223
2224
  		struct nd_namespace_label *nd_label, int count)
  {
  
  	struct nd_mapping *nd_mapping = &nd_region->mapping[0];
faec6f8a1   Dan Williams   libnvdimm, label:...
2225
  	struct nd_interleave_set *nd_set = nd_region->nd_set;
ae8219f18   Dan Williams   libnvdimm, label:...
2226
  	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1b40e09a1   Dan Williams   libnvdimm: blk la...
2227
  	struct nd_namespace_blk *nsblk;
238b323a6   Nicolas Iooss   libnvdimm, namesp...
2228
  	char name[NSLABEL_NAME_LEN];
8a5f50d3b   Dan Williams   libnvdimm, namesp...
2229
2230
  	struct device *dev = NULL;
  	struct resource *res;
8f2bc2430   Dan Williams   libnvdimm, label:...
2231
2232
2233
2234
  	if (namespace_label_has(ndd, type_guid)) {
  		if (!guid_equal(&nd_set->type_guid, &nd_label->type_guid)) {
  			dev_dbg(ndd->dev, "expect type_guid %pUb got %pUb
  ",
db5d00c93   Andy Shevchenko   libnvdimm, namesp...
2235
2236
  					&nd_set->type_guid,
  					&nd_label->type_guid);
8f2bc2430   Dan Williams   libnvdimm, label:...
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
  			return ERR_PTR(-EAGAIN);
  		}
  
  		if (nd_label->isetcookie != __cpu_to_le64(nd_set->cookie2)) {
  			dev_dbg(ndd->dev, "expect cookie %#llx got %#llx
  ",
  					nd_set->cookie2,
  					__le64_to_cpu(nd_label->isetcookie));
  			return ERR_PTR(-EAGAIN);
  		}
faec6f8a1   Dan Williams   libnvdimm, label:...
2247
  	}
8a5f50d3b   Dan Williams   libnvdimm, namesp...
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
  	nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
  	if (!nsblk)
  		return ERR_PTR(-ENOMEM);
  	dev = &nsblk->common.dev;
  	dev->type = &namespace_blk_device_type;
  	dev->parent = &nd_region->dev;
  	nsblk->id = -1;
  	nsblk->lbasize = __le64_to_cpu(nd_label->lbasize);
  	nsblk->uuid = kmemdup(nd_label->uuid, NSLABEL_UUID_LEN,
  			GFP_KERNEL);
b3fde74ea   Dan Williams   libnvdimm, label:...
2258
2259
2260
  	if (namespace_label_has(ndd, abstraction_guid))
  		nsblk->common.claim_class
  			= to_nvdimm_cclass(&nd_label->abstraction_guid);
8a5f50d3b   Dan Williams   libnvdimm, namesp...
2261
2262
2263
  	if (!nsblk->uuid)
  		goto blk_err;
  	memcpy(name, nd_label->name, NSLABEL_NAME_LEN);
55c1fc0af   Kangjie Lu   libnvdimm/namespa...
2264
  	if (name[0]) {
8a5f50d3b   Dan Williams   libnvdimm, namesp...
2265
2266
  		nsblk->alt_name = kmemdup(name, NSLABEL_NAME_LEN,
  				GFP_KERNEL);
55c1fc0af   Kangjie Lu   libnvdimm/namespa...
2267
2268
2269
  		if (!nsblk->alt_name)
  			goto blk_err;
  	}
8a5f50d3b   Dan Williams   libnvdimm, namesp...
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
  	res = nsblk_add_resource(nd_region, ndd, nsblk,
  			__le64_to_cpu(nd_label->dpa));
  	if (!res)
  		goto blk_err;
  	nd_dbg_dpa(nd_region, ndd, res, "%d: assign
  ", count);
  	return dev;
   blk_err:
  	namespace_blk_release(dev);
  	return ERR_PTR(-ENXIO);
  }
6ff3e912d   Dan Williams   libnvdimm, namesp...
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
  static int cmp_dpa(const void *a, const void *b)
  {
  	const struct device *dev_a = *(const struct device **) a;
  	const struct device *dev_b = *(const struct device **) b;
  	struct nd_namespace_blk *nsblk_a, *nsblk_b;
  	struct nd_namespace_pmem *nspm_a, *nspm_b;
  
  	if (is_namespace_io(dev_a))
  		return 0;
  
  	if (is_namespace_blk(dev_a)) {
  		nsblk_a = to_nd_namespace_blk(dev_a);
  		nsblk_b = to_nd_namespace_blk(dev_b);
  
  		return memcmp(&nsblk_a->res[0]->start, &nsblk_b->res[0]->start,
  				sizeof(resource_size_t));
  	}
  
  	nspm_a = to_nd_namespace_pmem(dev_a);
  	nspm_b = to_nd_namespace_pmem(dev_b);
  
  	return memcmp(&nspm_a->nsio.res.start, &nspm_b->nsio.res.start,
  			sizeof(resource_size_t));
  }
8a5f50d3b   Dan Williams   libnvdimm, namesp...
2305
2306
  static struct device **scan_labels(struct nd_region *nd_region)
  {
c969e24c1   Dan Williams   libnvdimm, namesp...
2307
  	int i, count = 0;
8a5f50d3b   Dan Williams   libnvdimm, namesp...
2308
2309
  	struct device *dev, **devs = NULL;
  	struct nd_label_ent *label_ent, *e;
c969e24c1   Dan Williams   libnvdimm, namesp...
2310
2311
  	struct nd_mapping *nd_mapping = &nd_region->mapping[0];
  	resource_size_t map_end = nd_mapping->start + nd_mapping->size - 1;
1b40e09a1   Dan Williams   libnvdimm: blk la...
2312

8a5f50d3b   Dan Williams   libnvdimm, namesp...
2313
2314
  	/* "safe" because create_namespace_pmem() might list_move() label_ent */
  	list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
ae8219f18   Dan Williams   libnvdimm, label:...
2315
  		struct nd_namespace_label *nd_label = label_ent->label;
1b40e09a1   Dan Williams   libnvdimm: blk la...
2316
  		struct device **__devs;
ae8219f18   Dan Williams   libnvdimm, label:...
2317
  		u32 flags;
1b40e09a1   Dan Williams   libnvdimm: blk la...
2318

ae8219f18   Dan Williams   libnvdimm, label:...
2319
2320
2321
  		if (!nd_label)
  			continue;
  		flags = __le32_to_cpu(nd_label->flags);
8a5f50d3b   Dan Williams   libnvdimm, namesp...
2322
2323
2324
  		if (is_nd_blk(&nd_region->dev)
  				== !!(flags & NSLABEL_FLAG_LOCAL))
  			/* pass, region matches label type */;
1b40e09a1   Dan Williams   libnvdimm: blk la...
2325
2326
  		else
  			continue;
c969e24c1   Dan Williams   libnvdimm, namesp...
2327
  		/* skip labels that describe extents outside of the region */
86aa66687   Aneesh Kumar K.V   libnvdimm: Fix en...
2328
2329
2330
  		if (__le64_to_cpu(nd_label->dpa) < nd_mapping->start ||
  		    __le64_to_cpu(nd_label->dpa) > map_end)
  				continue;
c969e24c1   Dan Williams   libnvdimm, namesp...
2331

8a5f50d3b   Dan Williams   libnvdimm, namesp...
2332
2333
2334
  		i = add_namespace_resource(nd_region, nd_label, devs, count);
  		if (i < 0)
  			goto err;
1b40e09a1   Dan Williams   libnvdimm: blk la...
2335
2336
2337
2338
2339
2340
2341
2342
  		if (i < count)
  			continue;
  		__devs = kcalloc(count + 2, sizeof(dev), GFP_KERNEL);
  		if (!__devs)
  			goto err;
  		memcpy(__devs, devs, sizeof(dev) * count);
  		kfree(devs);
  		devs = __devs;
faec6f8a1   Dan Williams   libnvdimm, label:...
2343
  		if (is_nd_blk(&nd_region->dev))
8a5f50d3b   Dan Williams   libnvdimm, namesp...
2344
  			dev = create_namespace_blk(nd_region, nd_label, count);
faec6f8a1   Dan Williams   libnvdimm, label:...
2345
  		else {
c12c48ce8   Dan Williams   libnvdimm, label:...
2346
2347
2348
2349
2350
  			struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
  			struct nd_namespace_index *nsindex;
  
  			nsindex = to_namespace_index(ndd, ndd->ns_current);
  			dev = create_namespace_pmem(nd_region, nsindex, nd_label);
8a5f50d3b   Dan Williams   libnvdimm, namesp...
2351
  		}
faec6f8a1   Dan Williams   libnvdimm, label:...
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
  
  		if (IS_ERR(dev)) {
  			switch (PTR_ERR(dev)) {
  			case -EAGAIN:
  				/* skip invalid labels */
  				continue;
  			case -ENODEV:
  				/* fallthrough to seed creation */
  				break;
  			default:
  				goto err;
  			}
  		} else
  			devs[count++] = dev;
1b40e09a1   Dan Williams   libnvdimm: blk la...
2366
  	}
426824d63   Dan Williams   libnvdimm: remove...
2367
2368
2369
  	dev_dbg(&nd_region->dev, "discovered %d %s namespace%s
  ",
  			count, is_nd_blk(&nd_region->dev)
8a5f50d3b   Dan Williams   libnvdimm, namesp...
2370
  			? "blk" : "pmem", count == 1 ? "" : "s");
1b40e09a1   Dan Williams   libnvdimm: blk la...
2371
2372
2373
  
  	if (count == 0) {
  		/* Publish a zero-sized namespace for userspace to configure. */
ae8219f18   Dan Williams   libnvdimm, label:...
2374
  		nd_mapping_free_labels(nd_mapping);
1b40e09a1   Dan Williams   libnvdimm: blk la...
2375
2376
2377
2378
  
  		devs = kcalloc(2, sizeof(dev), GFP_KERNEL);
  		if (!devs)
  			goto err;
8a5f50d3b   Dan Williams   libnvdimm, namesp...
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
  		if (is_nd_blk(&nd_region->dev)) {
  			struct nd_namespace_blk *nsblk;
  
  			nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
  			if (!nsblk)
  				goto err;
  			dev = &nsblk->common.dev;
  			dev->type = &namespace_blk_device_type;
  		} else {
  			struct nd_namespace_pmem *nspm;
  
  			nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
  			if (!nspm)
  				goto err;
  			dev = &nspm->nsio.common.dev;
  			dev->type = &namespace_pmem_device_type;
0e3b0d123   Dan Williams   libnvdimm, namesp...
2395
  			nd_namespace_pmem_set_resource(nd_region, nspm, 0);
8a5f50d3b   Dan Williams   libnvdimm, namesp...
2396
  		}
1b40e09a1   Dan Williams   libnvdimm: blk la...
2397
2398
  		dev->parent = &nd_region->dev;
  		devs[count++] = dev;
c9e582aa6   Dan Williams   libnvdimm, nfit: ...
2399
  	} else if (is_memory(&nd_region->dev)) {
8a5f50d3b   Dan Williams   libnvdimm, namesp...
2400
2401
  		/* clean unselected labels */
  		for (i = 0; i < nd_region->ndr_mappings; i++) {
0e3b0d123   Dan Williams   libnvdimm, namesp...
2402
2403
2404
  			struct list_head *l, *e;
  			LIST_HEAD(list);
  			int j;
8a5f50d3b   Dan Williams   libnvdimm, namesp...
2405
2406
2407
2408
2409
  			nd_mapping = &nd_region->mapping[i];
  			if (list_empty(&nd_mapping->labels)) {
  				WARN_ON(1);
  				continue;
  			}
0e3b0d123   Dan Williams   libnvdimm, namesp...
2410
2411
2412
2413
2414
2415
2416
  
  			j = count;
  			list_for_each_safe(l, e, &nd_mapping->labels) {
  				if (!j--)
  					break;
  				list_move_tail(l, &list);
  			}
8a5f50d3b   Dan Williams   libnvdimm, namesp...
2417
  			nd_mapping_free_labels(nd_mapping);
0e3b0d123   Dan Williams   libnvdimm, namesp...
2418
  			list_splice_init(&list, &nd_mapping->labels);
8a5f50d3b   Dan Williams   libnvdimm, namesp...
2419
  		}
1b40e09a1   Dan Williams   libnvdimm: blk la...
2420
  	}
6ff3e912d   Dan Williams   libnvdimm, namesp...
2421
2422
  	if (count > 1)
  		sort(devs, count, sizeof(struct device *), cmp_dpa, NULL);
1b40e09a1   Dan Williams   libnvdimm: blk la...
2423
  	return devs;
ae8219f18   Dan Williams   libnvdimm, label:...
2424
   err:
75d29713b   Dan Carpenter   libnvdimm, namesp...
2425
2426
2427
2428
2429
2430
2431
2432
  	if (devs) {
  		for (i = 0; devs[i]; i++)
  			if (is_nd_blk(&nd_region->dev))
  				namespace_blk_release(devs[i]);
  			else
  				namespace_pmem_release(devs[i]);
  		kfree(devs);
  	}
1b40e09a1   Dan Williams   libnvdimm: blk la...
2433
2434
  	return NULL;
  }
8a5f50d3b   Dan Williams   libnvdimm, namesp...
2435
  static struct device **create_namespaces(struct nd_region *nd_region)
ae8219f18   Dan Williams   libnvdimm, label:...
2436
  {
59858d3d5   Colin Ian King   libnvdimm, namesp...
2437
  	struct nd_mapping *nd_mapping;
ae8219f18   Dan Williams   libnvdimm, label:...
2438
  	struct device **devs;
8a5f50d3b   Dan Williams   libnvdimm, namesp...
2439
  	int i;
ae8219f18   Dan Williams   libnvdimm, label:...
2440
2441
2442
  
  	if (nd_region->ndr_mappings == 0)
  		return NULL;
8a5f50d3b   Dan Williams   libnvdimm, namesp...
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
  	/* lock down all mappings while we scan labels */
  	for (i = 0; i < nd_region->ndr_mappings; i++) {
  		nd_mapping = &nd_region->mapping[i];
  		mutex_lock_nested(&nd_mapping->lock, i);
  	}
  
  	devs = scan_labels(nd_region);
  
  	for (i = 0; i < nd_region->ndr_mappings; i++) {
  		int reverse = nd_region->ndr_mappings - 1 - i;
  
  		nd_mapping = &nd_region->mapping[reverse];
  		mutex_unlock(&nd_mapping->lock);
  	}
ae8219f18   Dan Williams   libnvdimm, label:...
2457
2458
2459
  
  	return devs;
  }
a2d1c7a61   Dan Williams   libnvdimm/region:...
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
  static void deactivate_labels(void *region)
  {
  	struct nd_region *nd_region = region;
  	int i;
  
  	for (i = 0; i < nd_region->ndr_mappings; i++) {
  		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
  		struct nvdimm_drvdata *ndd = nd_mapping->ndd;
  		struct nvdimm *nvdimm = nd_mapping->nvdimm;
  
  		mutex_lock(&nd_mapping->lock);
  		nd_mapping_free_labels(nd_mapping);
  		mutex_unlock(&nd_mapping->lock);
  
  		put_ndd(ndd);
  		nd_mapping->ndd = NULL;
  		if (ndd)
  			atomic_dec(&nvdimm->busy);
  	}
  }
bf9bccc14   Dan Williams   libnvdimm: pmem l...
2480
2481
2482
2483
2484
2485
2486
2487
  static int init_active_labels(struct nd_region *nd_region)
  {
  	int i;
  
  	for (i = 0; i < nd_region->ndr_mappings; i++) {
  		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
  		struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
  		struct nvdimm *nvdimm = nd_mapping->nvdimm;
ae8219f18   Dan Williams   libnvdimm, label:...
2488
  		struct nd_label_ent *label_ent;
bf9bccc14   Dan Williams   libnvdimm: pmem l...
2489
2490
2491
  		int count, j;
  
  		/*
9d62ed965   Dan Williams   libnvdimm: handle...
2492
2493
  		 * If the dimm is disabled then we may need to prevent
  		 * the region from being activated.
bf9bccc14   Dan Williams   libnvdimm: pmem l...
2494
2495
  		 */
  		if (!ndd) {
9d62ed965   Dan Williams   libnvdimm: handle...
2496
2497
  			if (test_bit(NDD_LOCKED, &nvdimm->flags))
  				/* fail, label data may be unreadable */;
a0e374525   Dan Williams   libnvdimm/region:...
2498
  			else if (test_bit(NDD_LABELING, &nvdimm->flags))
9d62ed965   Dan Williams   libnvdimm: handle...
2499
2500
  				/* fail, labels needed to disambiguate dpa */;
  			else
bf9bccc14   Dan Williams   libnvdimm: pmem l...
2501
  				return 0;
9d62ed965   Dan Williams   libnvdimm: handle...
2502
2503
2504
2505
2506
2507
  
  			dev_err(&nd_region->dev, "%s: is %s, failing probe
  ",
  					dev_name(&nd_mapping->nvdimm->dev),
  					test_bit(NDD_LOCKED, &nvdimm->flags)
  					? "locked" : "disabled");
bf9bccc14   Dan Williams   libnvdimm: pmem l...
2508
2509
2510
2511
2512
2513
2514
  			return -ENXIO;
  		}
  		nd_mapping->ndd = ndd;
  		atomic_inc(&nvdimm->busy);
  		get_ndd(ndd);
  
  		count = nd_label_active_count(ndd);
426824d63   Dan Williams   libnvdimm: remove...
2515
2516
  		dev_dbg(ndd->dev, "count: %d
  ", count);
bf9bccc14   Dan Williams   libnvdimm: pmem l...
2517
2518
  		if (!count)
  			continue;
bf9bccc14   Dan Williams   libnvdimm: pmem l...
2519
2520
  		for (j = 0; j < count; j++) {
  			struct nd_namespace_label *label;
ae8219f18   Dan Williams   libnvdimm, label:...
2521
2522
2523
  			label_ent = kzalloc(sizeof(*label_ent), GFP_KERNEL);
  			if (!label_ent)
  				break;
bf9bccc14   Dan Williams   libnvdimm: pmem l...
2524
  			label = nd_label_active(ndd, j);
d5d30d5a5   Dan Williams   libnvdimm/dimm: A...
2525
2526
2527
2528
2529
2530
  			if (test_bit(NDD_NOBLK, &nvdimm->flags)) {
  				u32 flags = __le32_to_cpu(label->flags);
  
  				flags &= ~NSLABEL_FLAG_LOCAL;
  				label->flags = __cpu_to_le32(flags);
  			}
ae8219f18   Dan Williams   libnvdimm, label:...
2531
2532
2533
2534
2535
  			label_ent->label = label;
  
  			mutex_lock(&nd_mapping->lock);
  			list_add_tail(&label_ent->list, &nd_mapping->labels);
  			mutex_unlock(&nd_mapping->lock);
bf9bccc14   Dan Williams   libnvdimm: pmem l...
2536
  		}
ae8219f18   Dan Williams   libnvdimm, label:...
2537

a2d1c7a61   Dan Williams   libnvdimm/region:...
2538
2539
2540
  		if (j < count)
  			break;
  	}
ae8219f18   Dan Williams   libnvdimm, label:...
2541

a2d1c7a61   Dan Williams   libnvdimm/region:...
2542
2543
  	if (i < nd_region->ndr_mappings) {
  		deactivate_labels(nd_region);
ae8219f18   Dan Williams   libnvdimm, label:...
2544
  		return -ENOMEM;
bf9bccc14   Dan Williams   libnvdimm: pmem l...
2545
  	}
a2d1c7a61   Dan Williams   libnvdimm/region:...
2546
2547
  	return devm_add_action_or_reset(&nd_region->dev, deactivate_labels,
  			nd_region);
bf9bccc14   Dan Williams   libnvdimm: pmem l...
2548
  }
3d88002e4   Dan Williams   libnvdimm: suppor...
2549
2550
2551
  int nd_region_register_namespaces(struct nd_region *nd_region, int *err)
  {
  	struct device **devs = NULL;
bf9bccc14   Dan Williams   libnvdimm: pmem l...
2552
  	int i, rc = 0, type;
3d88002e4   Dan Williams   libnvdimm: suppor...
2553
2554
  
  	*err = 0;
bf9bccc14   Dan Williams   libnvdimm: pmem l...
2555
2556
2557
2558
2559
2560
2561
2562
2563
  	nvdimm_bus_lock(&nd_region->dev);
  	rc = init_active_labels(nd_region);
  	if (rc) {
  		nvdimm_bus_unlock(&nd_region->dev);
  		return rc;
  	}
  
  	type = nd_region_to_nstype(nd_region);
  	switch (type) {
3d88002e4   Dan Williams   libnvdimm: suppor...
2564
2565
2566
  	case ND_DEVICE_NAMESPACE_IO:
  		devs = create_namespace_io(nd_region);
  		break;
bf9bccc14   Dan Williams   libnvdimm: pmem l...
2567
  	case ND_DEVICE_NAMESPACE_PMEM:
1b40e09a1   Dan Williams   libnvdimm: blk la...
2568
  	case ND_DEVICE_NAMESPACE_BLK:
8a5f50d3b   Dan Williams   libnvdimm, namesp...
2569
  		devs = create_namespaces(nd_region);
1b40e09a1   Dan Williams   libnvdimm: blk la...
2570
  		break;
3d88002e4   Dan Williams   libnvdimm: suppor...
2571
2572
2573
  	default:
  		break;
  	}
bf9bccc14   Dan Williams   libnvdimm: pmem l...
2574
  	nvdimm_bus_unlock(&nd_region->dev);
3d88002e4   Dan Williams   libnvdimm: suppor...
2575
2576
2577
2578
2579
2580
  
  	if (!devs)
  		return -ENODEV;
  
  	for (i = 0; devs[i]; i++) {
  		struct device *dev = devs[i];
1b40e09a1   Dan Williams   libnvdimm: blk la...
2581
  		int id;
3d88002e4   Dan Williams   libnvdimm: suppor...
2582

1b40e09a1   Dan Williams   libnvdimm: blk la...
2583
2584
2585
2586
2587
2588
2589
  		if (type == ND_DEVICE_NAMESPACE_BLK) {
  			struct nd_namespace_blk *nsblk;
  
  			nsblk = to_nd_namespace_blk(dev);
  			id = ida_simple_get(&nd_region->ns_ida, 0, 0,
  					GFP_KERNEL);
  			nsblk->id = id;
0e3b0d123   Dan Williams   libnvdimm, namesp...
2590
2591
2592
2593
2594
2595
2596
  		} else if (type == ND_DEVICE_NAMESPACE_PMEM) {
  			struct nd_namespace_pmem *nspm;
  
  			nspm = to_nd_namespace_pmem(dev);
  			id = ida_simple_get(&nd_region->ns_ida, 0, 0,
  					GFP_KERNEL);
  			nspm->id = id;
1b40e09a1   Dan Williams   libnvdimm: blk la...
2597
2598
2599
2600
2601
2602
  		} else
  			id = i;
  
  		if (id < 0)
  			break;
  		dev_set_name(dev, "namespace%d.%d", nd_region->id, id);
3d88002e4   Dan Williams   libnvdimm: suppor...
2603
2604
  		nd_device_register(dev);
  	}
1b40e09a1   Dan Williams   libnvdimm: blk la...
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
  	if (i)
  		nd_region->ns_seed = devs[0];
  
  	if (devs[i]) {
  		int j;
  
  		for (j = i; devs[j]; j++) {
  			struct device *dev = devs[j];
  
  			device_initialize(dev);
  			put_device(dev);
  		}
  		*err = j - i;
  		/*
  		 * All of the namespaces we tried to register failed, so
  		 * fail region activation.
  		 */
  		if (*err == 0)
  			rc = -ENODEV;
  	}
3d88002e4   Dan Williams   libnvdimm: suppor...
2625
  	kfree(devs);
1b40e09a1   Dan Williams   libnvdimm: blk la...
2626
2627
  	if (rc == -ENODEV)
  		return rc;
3d88002e4   Dan Williams   libnvdimm: suppor...
2628
2629
  	return i;
  }