Blame view

drivers/md/dm-table.c 47.4 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
2
  /*
   * Copyright (C) 2001 Sistina Software (UK) Limited.
d58168763   Mikulas Patocka   dm table: rework ...
3
   * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
4
5
6
   *
   * This file is released under the GPL.
   */
4cc96131a   Mike Snitzer   dm: move request-...
7
  #include "dm-core.h"
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
8
9
10
11
12
13
  
  #include <linux/module.h>
  #include <linux/vmalloc.h>
  #include <linux/blkdev.h>
  #include <linux/namei.h>
  #include <linux/ctype.h>
e7d2860b6   AndrĂ© Goddard Rosa   tree-wide: conver...
14
  #include <linux/string.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
15
16
  #include <linux/slab.h>
  #include <linux/interrupt.h>
48c9c27b8   Arjan van de Ven   [PATCH] sem2mutex...
17
  #include <linux/mutex.h>
d58168763   Mikulas Patocka   dm table: rework ...
18
  #include <linux/delay.h>
60063497a   Arun Sharma   atomic: use <linu...
19
  #include <linux/atomic.h>
bfebd1cdb   Mike Snitzer   dm: add full blk-...
20
  #include <linux/blk-mq.h>
644bda6f3   Dan Ehrenberg   dm table: fall ba...
21
  #include <linux/mount.h>
273752c9f   Vivek Goyal   dm, dax: Make sur...
22
  #include <linux/dax.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
23

72d948616   Alasdair G Kergon   [PATCH] dm: impro...
24
  #define DM_MSG_PREFIX "table"
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
25
26
27
28
29
30
  #define MAX_DEPTH 16
  #define NODE_SIZE L1_CACHE_BYTES
  #define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
  #define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
  
  struct dm_table {
1134e5ae7   Mike Anderson   [PATCH] dm table:...
31
  	struct mapped_device *md;
7e0d574f2   Bart Van Assche   dm: introduce enu...
32
  	enum dm_queue_mode type;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
33
34
35
36
37
38
39
40
41
42
  
  	/* btree table */
  	unsigned int depth;
  	unsigned int counts[MAX_DEPTH];	/* in nodes */
  	sector_t *index[MAX_DEPTH];
  
  	unsigned int num_targets;
  	unsigned int num_allocated;
  	sector_t *highs;
  	struct dm_target *targets;
36a0456fb   Alasdair G Kergon   dm table: add imm...
43
  	struct target_type *immutable_target_type;
e83068a5f   Mike Snitzer   dm mpath: add opt...
44
45
46
47
  
  	bool integrity_supported:1;
  	bool singleton:1;
  	bool all_blk_mq:1;
9b4b5a797   Milan Broz   dm table: add fla...
48
  	unsigned integrity_added:1;
5ae89a872   Mike Snitzer   dm: linear suppor...
49

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
50
51
52
53
54
  	/*
  	 * Indicates the rw permissions for the new logical
  	 * device.  This should be a combination of FMODE_READ
  	 * and FMODE_WRITE.
  	 */
aeb5d7270   Al Viro   [PATCH] introduce...
55
  	fmode_t mode;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
56
57
58
  
  	/* a list of devices used by this table */
  	struct list_head devices;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
59
60
61
  	/* events get handed up using this callback */
  	void (*event_fn)(void *);
  	void *event_context;
e6ee8c0b7   Kiyoshi Ueda   dm: enable reques...
62
63
  
  	struct dm_md_mempools *mempools;
9d357b078   NeilBrown   dm: introduce tar...
64
65
  
  	struct list_head target_callbacks;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
  };
  
  /*
   * Similar to ceiling(log_size(n))
   */
  static unsigned int int_log(unsigned int n, unsigned int base)
  {
  	int result = 0;
  
  	while (n > 1) {
  		n = dm_div_up(n, base);
  		result++;
  	}
  
  	return result;
  }
  
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
   * Calculate the index of the child node of the n'th node k'th key.
   */
  static inline unsigned int get_child(unsigned int n, unsigned int k)
  {
  	return (n * CHILDREN_PER_NODE) + k;
  }
  
  /*
   * Return the n'th node of level l from table t.
   */
  static inline sector_t *get_node(struct dm_table *t,
  				 unsigned int l, unsigned int n)
  {
  	return t->index[l] + (n * KEYS_PER_NODE);
  }
  
  /*
   * Return the highest key that you could lookup from the n'th
   * node on level l of the btree.
   */
  static sector_t high(struct dm_table *t, unsigned int l, unsigned int n)
  {
  	for (; l < t->depth - 1; l++)
  		n = get_child(n, CHILDREN_PER_NODE - 1);
  
  	if (n >= t->counts[l])
  		return (sector_t) - 1;
  
  	return get_node(t, l, n)[KEYS_PER_NODE - 1];
  }
  
  /*
   * Fills in a level of the btree based on the highs of the level
   * below it.
   */
  static int setup_btree_index(unsigned int l, struct dm_table *t)
  {
  	unsigned int n, k;
  	sector_t *node;
  
  	for (n = 0U; n < t->counts[l]; n++) {
  		node = get_node(t, l, n);
  
  		for (k = 0U; k < KEYS_PER_NODE; k++)
  			node[k] = high(t, l + 1, get_child(n, k));
  	}
  
  	return 0;
  }
  
  void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size)
  {
  	unsigned long size;
  	void *addr;
  
  	/*
  	 * Check that we're not going to overflow.
  	 */
  	if (nmemb > (ULONG_MAX / elem_size))
  		return NULL;
  
  	size = nmemb * elem_size;
e29e65aac   Joe Perches   dm: use vzalloc
146
  	addr = vzalloc(size);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
147
148
149
  
  	return addr;
  }
086490125   Mike Snitzer   dm table: clean d...
150
  EXPORT_SYMBOL(dm_vcalloc);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
151
152
153
154
155
156
157
158
159
  
  /*
   * highs, and targets are managed as dynamic arrays during a
   * table load.
   */
  static int alloc_targets(struct dm_table *t, unsigned int num)
  {
  	sector_t *n_highs;
  	struct dm_target *n_targets;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
160
161
162
  
  	/*
  	 * Allocate both the target array and offset array at once.
512875bd9   Jun'ichi Nomura   dm: table detect ...
163
164
  	 * Append an empty entry to catch sectors beyond the end of
  	 * the device.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
165
  	 */
512875bd9   Jun'ichi Nomura   dm: table detect ...
166
  	n_highs = (sector_t *) dm_vcalloc(num + 1, sizeof(struct dm_target) +
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
167
168
169
170
171
  					  sizeof(sector_t));
  	if (!n_highs)
  		return -ENOMEM;
  
  	n_targets = (struct dm_target *) (n_highs + num);
57a2f2385   Mikulas Patocka   dm table: remove ...
172
  	memset(n_highs, -1, sizeof(*n_highs) * num);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
173
174
175
176
177
178
179
180
  	vfree(t->highs);
  
  	t->num_allocated = num;
  	t->highs = n_highs;
  	t->targets = n_targets;
  
  	return 0;
  }
aeb5d7270   Al Viro   [PATCH] introduce...
181
  int dm_table_create(struct dm_table **result, fmode_t mode,
1134e5ae7   Mike Anderson   [PATCH] dm table:...
182
  		    unsigned num_targets, struct mapped_device *md)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
183
  {
094262db9   Dmitry Monakhov   dm: use kzalloc
184
  	struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
185
186
187
  
  	if (!t)
  		return -ENOMEM;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
188
  	INIT_LIST_HEAD(&t->devices);
9d357b078   NeilBrown   dm: introduce tar...
189
  	INIT_LIST_HEAD(&t->target_callbacks);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
190
191
192
193
194
  
  	if (!num_targets)
  		num_targets = KEYS_PER_NODE;
  
  	num_targets = dm_round_up(num_targets, KEYS_PER_NODE);
5b2d06576   Mikulas Patocka   dm table: fail dm...
195
196
197
198
  	if (!num_targets) {
  		kfree(t);
  		return -ENOMEM;
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
199
200
  	if (alloc_targets(t, num_targets)) {
  		kfree(t);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
201
202
  		return -ENOMEM;
  	}
e83068a5f   Mike Snitzer   dm mpath: add opt...
203
  	t->type = DM_TYPE_NONE;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
204
  	t->mode = mode;
1134e5ae7   Mike Anderson   [PATCH] dm table:...
205
  	t->md = md;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
206
207
208
  	*result = t;
  	return 0;
  }
86f1152b1   Benjamin Marzinski   dm: allow active ...
209
  static void free_devices(struct list_head *devices, struct mapped_device *md)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
210
211
  {
  	struct list_head *tmp, *next;
afb24528f   Paul Jimenez   dm: table use lis...
212
  	list_for_each_safe(tmp, next, devices) {
82b1519b3   Mikulas Patocka   dm: export struct...
213
214
  		struct dm_dev_internal *dd =
  		    list_entry(tmp, struct dm_dev_internal, list);
86f1152b1   Benjamin Marzinski   dm: allow active ...
215
216
217
  		DMWARN("%s: dm_table_destroy: dm_put_device call missing for %s",
  		       dm_device_name(md), dd->dm_dev->name);
  		dm_put_table_device(md, dd->dm_dev);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
218
219
220
  		kfree(dd);
  	}
  }
d58168763   Mikulas Patocka   dm table: rework ...
221
  void dm_table_destroy(struct dm_table *t)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
222
223
  {
  	unsigned int i;
a79401559   Alasdair G Kergon   dm: bind new tabl...
224
225
  	if (!t)
  		return;
26803b9f0   Will Drewry   dm ioctl: refacto...
226
  	/* free the indexes */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
  	if (t->depth >= 2)
  		vfree(t->index[t->depth - 2]);
  
  	/* free the targets */
  	for (i = 0; i < t->num_targets; i++) {
  		struct dm_target *tgt = t->targets + i;
  
  		if (tgt->type->dtr)
  			tgt->type->dtr(tgt);
  
  		dm_put_target_type(tgt->type);
  	}
  
  	vfree(t->highs);
  
  	/* free the device list */
86f1152b1   Benjamin Marzinski   dm: allow active ...
243
  	free_devices(&t->devices, t->md);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
244

e6ee8c0b7   Kiyoshi Ueda   dm: enable reques...
245
  	dm_free_md_mempools(t->mempools);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
246
247
  	kfree(t);
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
248
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
249
250
   * See if we've already got a device in the list.
   */
82b1519b3   Mikulas Patocka   dm: export struct...
251
  static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
252
  {
82b1519b3   Mikulas Patocka   dm: export struct...
253
  	struct dm_dev_internal *dd;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
254
255
  
  	list_for_each_entry (dd, l, list)
86f1152b1   Benjamin Marzinski   dm: allow active ...
256
  		if (dd->dm_dev->bdev->bd_dev == dev)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
257
258
259
260
261
262
  			return dd;
  
  	return NULL;
  }
  
  /*
f6a1ed108   Mikulas Patocka   dm table: fix que...
263
   * If possible, this checks an area of a destination device is invalid.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
264
   */
f6a1ed108   Mikulas Patocka   dm table: fix que...
265
266
  static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
  				  sector_t start, sector_t len, void *data)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
267
  {
f4808ca99   Milan Broz   dm table: reject ...
268
  	struct request_queue *q;
754c5fc7e   Mike Snitzer   dm: calculate que...
269
270
271
272
  	struct queue_limits *limits = data;
  	struct block_device *bdev = dev->bdev;
  	sector_t dev_size =
  		i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
02acc3a4f   Mike Snitzer   dm table: ensure ...
273
  	unsigned short logical_block_size_sectors =
754c5fc7e   Mike Snitzer   dm: calculate que...
274
  		limits->logical_block_size >> SECTOR_SHIFT;
02acc3a4f   Mike Snitzer   dm table: ensure ...
275
  	char b[BDEVNAME_SIZE];
2cd54d9be   Mike Anderson   dm: allow offline...
276

f4808ca99   Milan Broz   dm table: reject ...
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
  	/*
  	 * Some devices exist without request functions,
  	 * such as loop devices not yet bound to backing files.
  	 * Forbid the use of such devices.
  	 */
  	q = bdev_get_queue(bdev);
  	if (!q || !q->make_request_fn) {
  		DMWARN("%s: %s is not yet initialised: "
  		       "start=%llu, len=%llu, dev_size=%llu",
  		       dm_device_name(ti->table->md), bdevname(bdev, b),
  		       (unsigned long long)start,
  		       (unsigned long long)len,
  		       (unsigned long long)dev_size);
  		return 1;
  	}
2cd54d9be   Mike Anderson   dm: allow offline...
292
  	if (!dev_size)
f6a1ed108   Mikulas Patocka   dm table: fix que...
293
  		return 0;
2cd54d9be   Mike Anderson   dm: allow offline...
294

5dea271b6   Mike Snitzer   dm table: pass co...
295
  	if ((start >= dev_size) || (start + len > dev_size)) {
a963a9562   Mike Snitzer   dm table: add mor...
296
297
298
299
300
301
  		DMWARN("%s: %s too small for target: "
  		       "start=%llu, len=%llu, dev_size=%llu",
  		       dm_device_name(ti->table->md), bdevname(bdev, b),
  		       (unsigned long long)start,
  		       (unsigned long long)len,
  		       (unsigned long long)dev_size);
f6a1ed108   Mikulas Patocka   dm table: fix que...
302
  		return 1;
02acc3a4f   Mike Snitzer   dm table: ensure ...
303
  	}
dd88d313b   Damien Le Moal   dm table: add zon...
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
  	/*
  	 * If the target is mapped to zoned block device(s), check
  	 * that the zones are not partially mapped.
  	 */
  	if (bdev_zoned_model(bdev) != BLK_ZONED_NONE) {
  		unsigned int zone_sectors = bdev_zone_sectors(bdev);
  
  		if (start & (zone_sectors - 1)) {
  			DMWARN("%s: start=%llu not aligned to h/w zone size %u of %s",
  			       dm_device_name(ti->table->md),
  			       (unsigned long long)start,
  			       zone_sectors, bdevname(bdev, b));
  			return 1;
  		}
  
  		/*
  		 * Note: The last zone of a zoned block device may be smaller
  		 * than other zones. So for a target mapping the end of a
  		 * zoned block device with such a zone, len would not be zone
  		 * aligned. We do not allow such last smaller zone to be part
  		 * of the mapping here to ensure that mappings with multiple
  		 * devices do not end up with a smaller zone in the middle of
  		 * the sector range.
  		 */
  		if (len & (zone_sectors - 1)) {
  			DMWARN("%s: len=%llu not aligned to h/w zone size %u of %s",
  			       dm_device_name(ti->table->md),
  			       (unsigned long long)len,
  			       zone_sectors, bdevname(bdev, b));
  			return 1;
  		}
  	}
02acc3a4f   Mike Snitzer   dm table: ensure ...
336
  	if (logical_block_size_sectors <= 1)
f6a1ed108   Mikulas Patocka   dm table: fix que...
337
  		return 0;
02acc3a4f   Mike Snitzer   dm table: ensure ...
338
339
340
  
  	if (start & (logical_block_size_sectors - 1)) {
  		DMWARN("%s: start=%llu not aligned to h/w "
a963a9562   Mike Snitzer   dm table: add mor...
341
  		       "logical block size %u of %s",
02acc3a4f   Mike Snitzer   dm table: ensure ...
342
343
  		       dm_device_name(ti->table->md),
  		       (unsigned long long)start,
754c5fc7e   Mike Snitzer   dm: calculate que...
344
  		       limits->logical_block_size, bdevname(bdev, b));
f6a1ed108   Mikulas Patocka   dm table: fix que...
345
  		return 1;
02acc3a4f   Mike Snitzer   dm table: ensure ...
346
  	}
5dea271b6   Mike Snitzer   dm table: pass co...
347
  	if (len & (logical_block_size_sectors - 1)) {
02acc3a4f   Mike Snitzer   dm table: ensure ...
348
  		DMWARN("%s: len=%llu not aligned to h/w "
a963a9562   Mike Snitzer   dm table: add mor...
349
  		       "logical block size %u of %s",
02acc3a4f   Mike Snitzer   dm table: ensure ...
350
  		       dm_device_name(ti->table->md),
5dea271b6   Mike Snitzer   dm table: pass co...
351
  		       (unsigned long long)len,
754c5fc7e   Mike Snitzer   dm: calculate que...
352
  		       limits->logical_block_size, bdevname(bdev, b));
f6a1ed108   Mikulas Patocka   dm table: fix que...
353
  		return 1;
02acc3a4f   Mike Snitzer   dm table: ensure ...
354
  	}
f6a1ed108   Mikulas Patocka   dm table: fix que...
355
  	return 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
356
357
358
  }
  
  /*
570b9d968   Alasdair G Kergon   dm table: fix upg...
359
   * This upgrades the mode on an already open dm_dev, being
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
360
   * careful to leave things as they were if we fail to reopen the
570b9d968   Alasdair G Kergon   dm table: fix upg...
361
362
   * device and not to touch the existing bdev field in case
   * it is accessed concurrently inside dm_table_any_congested().
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
363
   */
aeb5d7270   Al Viro   [PATCH] introduce...
364
  static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
82b1519b3   Mikulas Patocka   dm: export struct...
365
  			struct mapped_device *md)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
366
367
  {
  	int r;
86f1152b1   Benjamin Marzinski   dm: allow active ...
368
  	struct dm_dev *old_dev, *new_dev;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
369

86f1152b1   Benjamin Marzinski   dm: allow active ...
370
  	old_dev = dd->dm_dev;
570b9d968   Alasdair G Kergon   dm table: fix upg...
371

86f1152b1   Benjamin Marzinski   dm: allow active ...
372
373
  	r = dm_get_table_device(md, dd->dm_dev->bdev->bd_dev,
  				dd->dm_dev->mode | new_mode, &new_dev);
570b9d968   Alasdair G Kergon   dm table: fix upg...
374
375
  	if (r)
  		return r;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
376

86f1152b1   Benjamin Marzinski   dm: allow active ...
377
378
  	dd->dm_dev = new_dev;
  	dm_put_table_device(md, old_dev);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
379

570b9d968   Alasdair G Kergon   dm table: fix upg...
380
  	return 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
381
382
383
  }
  
  /*
4df2bf466   DingXiang   dm snapshot: disa...
384
385
386
387
   * Convert the path to a device
   */
  dev_t dm_get_dev_t(const char *path)
  {
3c1201691   Mikulas Patocka   dm table: replace...
388
  	dev_t dev;
4df2bf466   DingXiang   dm snapshot: disa...
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
  	struct block_device *bdev;
  
  	bdev = lookup_bdev(path);
  	if (IS_ERR(bdev))
  		dev = name_to_dev_t(path);
  	else {
  		dev = bdev->bd_dev;
  		bdput(bdev);
  	}
  
  	return dev;
  }
  EXPORT_SYMBOL_GPL(dm_get_dev_t);
  
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
404
405
406
   * Add a device to the list, or just increment the usage count if
   * it's already present.
   */
086490125   Mike Snitzer   dm table: clean d...
407
408
  int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
  		  struct dm_dev **result)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
409
410
  {
  	int r;
4df2bf466   DingXiang   dm snapshot: disa...
411
  	dev_t dev;
82b1519b3   Mikulas Patocka   dm: export struct...
412
  	struct dm_dev_internal *dd;
086490125   Mike Snitzer   dm table: clean d...
413
  	struct dm_table *t = ti->table;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
414

547bc9264   Eric Sesterhenn   BUG_ON() Conversi...
415
  	BUG_ON(!t);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
416

4df2bf466   DingXiang   dm snapshot: disa...
417
418
419
  	dev = dm_get_dev_t(path);
  	if (!dev)
  		return -ENODEV;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
420
421
422
423
424
425
  
  	dd = find_device(&t->devices, dev);
  	if (!dd) {
  		dd = kmalloc(sizeof(*dd), GFP_KERNEL);
  		if (!dd)
  			return -ENOMEM;
86f1152b1   Benjamin Marzinski   dm: allow active ...
426
  		if ((r = dm_get_table_device(t->md, dev, mode, &dd->dm_dev))) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
427
428
429
  			kfree(dd);
  			return r;
  		}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
430
431
  		atomic_set(&dd->count, 0);
  		list_add(&dd->list, &t->devices);
86f1152b1   Benjamin Marzinski   dm: allow active ...
432
  	} else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) {
f165921df   Jun'ichi Nomura   [PATCH] dm/md dep...
433
  		r = upgrade_mode(dd, mode, t->md);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
434
435
436
437
  		if (r)
  			return r;
  	}
  	atomic_inc(&dd->count);
86f1152b1   Benjamin Marzinski   dm: allow active ...
438
  	*result = dd->dm_dev;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
439
440
  	return 0;
  }
086490125   Mike Snitzer   dm table: clean d...
441
  EXPORT_SYMBOL(dm_get_device);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
442

11f0431be   Mike Snitzer   dm: remove symbol...
443
444
  static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
  				sector_t start, sector_t len, void *data)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
445
  {
754c5fc7e   Mike Snitzer   dm: calculate que...
446
447
  	struct queue_limits *limits = data;
  	struct block_device *bdev = dev->bdev;
165125e1e   Jens Axboe   [BLOCK] Get rid o...
448
  	struct request_queue *q = bdev_get_queue(bdev);
0c2322e4c   Alasdair G Kergon   dm: detect lost q...
449
450
451
452
453
  	char b[BDEVNAME_SIZE];
  
  	if (unlikely(!q)) {
  		DMWARN("%s: Cannot set limits for nonexistent device %s",
  		       dm_device_name(ti->table->md), bdevname(bdev, b));
754c5fc7e   Mike Snitzer   dm: calculate que...
454
  		return 0;
0c2322e4c   Alasdair G Kergon   dm: detect lost q...
455
  	}
3cb402145   Bryn Reeves   [PATCH] dm: extra...
456

b27d7f16d   Martin K. Petersen   DM: Fix device ma...
457
458
  	if (bdev_stack_limits(limits, bdev, start) < 0)
  		DMWARN("%s: adding target device %s caused an alignment inconsistency: "
a963a9562   Mike Snitzer   dm table: add mor...
459
460
461
462
463
464
  		       "physical_block_size=%u, logical_block_size=%u, "
  		       "alignment_offset=%u, start=%llu",
  		       dm_device_name(ti->table->md), bdevname(bdev, b),
  		       q->limits.physical_block_size,
  		       q->limits.logical_block_size,
  		       q->limits.alignment_offset,
b27d7f16d   Martin K. Petersen   DM: Fix device ma...
465
  		       (unsigned long long) start << SECTOR_SHIFT);
3cb402145   Bryn Reeves   [PATCH] dm: extra...
466

dd88d313b   Damien Le Moal   dm table: add zon...
467
  	limits->zoned = blk_queue_zoned_model(q);
754c5fc7e   Mike Snitzer   dm: calculate que...
468
  	return 0;
3cb402145   Bryn Reeves   [PATCH] dm: extra...
469
  }
969429b50   NeilBrown   [PATCH] dm: make ...
470

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
471
  /*
086490125   Mike Snitzer   dm table: clean d...
472
   * Decrement a device's use count and remove it if necessary.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
473
   */
82b1519b3   Mikulas Patocka   dm: export struct...
474
  void dm_put_device(struct dm_target *ti, struct dm_dev *d)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
475
  {
86f1152b1   Benjamin Marzinski   dm: allow active ...
476
477
478
  	int found = 0;
  	struct list_head *devices = &ti->table->devices;
  	struct dm_dev_internal *dd;
82b1519b3   Mikulas Patocka   dm: export struct...
479

86f1152b1   Benjamin Marzinski   dm: allow active ...
480
481
482
483
484
485
486
487
488
489
490
  	list_for_each_entry(dd, devices, list) {
  		if (dd->dm_dev == d) {
  			found = 1;
  			break;
  		}
  	}
  	if (!found) {
  		DMWARN("%s: device %s not in table devices list",
  		       dm_device_name(ti->table->md), d->name);
  		return;
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
491
  	if (atomic_dec_and_test(&dd->count)) {
86f1152b1   Benjamin Marzinski   dm: allow active ...
492
  		dm_put_table_device(ti->table->md, d);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
493
494
495
496
  		list_del(&dd->list);
  		kfree(dd);
  	}
  }
086490125   Mike Snitzer   dm table: clean d...
497
  EXPORT_SYMBOL(dm_put_device);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
  
  /*
   * Checks to see if the target joins onto the end of the table.
   */
  static int adjoin(struct dm_table *table, struct dm_target *ti)
  {
  	struct dm_target *prev;
  
  	if (!table->num_targets)
  		return !ti->begin;
  
  	prev = &table->targets[table->num_targets - 1];
  	return (ti->begin == (prev->begin + prev->len));
  }
  
  /*
   * Used to dynamically allocate the arg array.
f36afb395   Mikulas Patocka   dm: allocate buff...
515
516
517
518
519
520
521
   *
   * We do first allocation with GFP_NOIO because dm-mpath and dm-thin must
   * process messages even if some device is suspended. These messages have a
   * small fixed number of arguments.
   *
   * On the other hand, dm-switch needs to process bulk data using messages and
   * excessive use of GFP_NOIO could cause trouble.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
522
523
524
525
526
   */
  static char **realloc_argv(unsigned *array_size, char **old_argv)
  {
  	char **argv;
  	unsigned new_size;
f36afb395   Mikulas Patocka   dm: allocate buff...
527
  	gfp_t gfp;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
528

f36afb395   Mikulas Patocka   dm: allocate buff...
529
530
531
532
533
534
535
536
  	if (*array_size) {
  		new_size = *array_size * 2;
  		gfp = GFP_KERNEL;
  	} else {
  		new_size = 8;
  		gfp = GFP_NOIO;
  	}
  	argv = kmalloc(new_size * sizeof(*argv), gfp);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
  	if (argv) {
  		memcpy(argv, old_argv, *array_size * sizeof(*argv));
  		*array_size = new_size;
  	}
  
  	kfree(old_argv);
  	return argv;
  }
  
  /*
   * Destructively splits up the argument list to pass to ctr.
   */
  int dm_split_args(int *argc, char ***argvp, char *input)
  {
  	char *start, *end = input, *out, **argv = NULL;
  	unsigned array_size = 0;
  
  	*argc = 0;
814d68629   David Teigland   [PATCH] dm table ...
555
556
557
558
559
  
  	if (!input) {
  		*argvp = NULL;
  		return 0;
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
560
561
562
563
564
  	argv = realloc_argv(&array_size, argv);
  	if (!argv)
  		return -ENOMEM;
  
  	while (1) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
565
  		/* Skip whitespace */
e7d2860b6   AndrĂ© Goddard Rosa   tree-wide: conver...
566
  		start = skip_spaces(end);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
  
  		if (!*start)
  			break;	/* success, we hit the end */
  
  		/* 'out' is used to remove any back-quotes */
  		end = out = start;
  		while (*end) {
  			/* Everything apart from '\0' can be quoted */
  			if (*end == '\\' && *(end + 1)) {
  				*out++ = *(end + 1);
  				end += 2;
  				continue;
  			}
  
  			if (isspace(*end))
  				break;	/* end of token */
  
  			*out++ = *end++;
  		}
  
  		/* have we already filled the array ? */
  		if ((*argc + 1) > array_size) {
  			argv = realloc_argv(&array_size, argv);
  			if (!argv)
  				return -ENOMEM;
  		}
  
  		/* we know this is whitespace */
  		if (*end)
  			end++;
  
  		/* terminate the string and put it in the array */
  		*out = '\0';
  		argv[*argc] = start;
  		(*argc)++;
  	}
  
  	*argvp = argv;
  	return 0;
  }
be6d4305d   Mike Snitzer   dm table: validat...
607
608
609
610
611
612
613
  /*
   * Impose necessary and sufficient conditions on a devices's table such
   * that any incoming bio which respects its logical_block_size can be
   * processed successfully.  If it falls across the boundary between
   * two or more targets, the size of each piece it gets split into must
   * be compatible with the logical_block_size of the target processing it.
   */
754c5fc7e   Mike Snitzer   dm: calculate que...
614
615
  static int validate_hardware_logical_block_alignment(struct dm_table *table,
  						 struct queue_limits *limits)
be6d4305d   Mike Snitzer   dm table: validat...
616
617
618
619
620
621
  {
  	/*
  	 * This function uses arithmetic modulo the logical_block_size
  	 * (in units of 512-byte sectors).
  	 */
  	unsigned short device_logical_block_size_sects =
754c5fc7e   Mike Snitzer   dm: calculate que...
622
  		limits->logical_block_size >> SECTOR_SHIFT;
be6d4305d   Mike Snitzer   dm table: validat...
623
624
625
626
627
628
629
630
631
632
633
634
635
  
  	/*
  	 * Offset of the start of the next table entry, mod logical_block_size.
  	 */
  	unsigned short next_target_start = 0;
  
  	/*
  	 * Given an aligned bio that extends beyond the end of a
  	 * target, how many sectors must the next target handle?
  	 */
  	unsigned short remaining = 0;
  
  	struct dm_target *uninitialized_var(ti);
754c5fc7e   Mike Snitzer   dm: calculate que...
636
  	struct queue_limits ti_limits;
3c1201691   Mikulas Patocka   dm table: replace...
637
  	unsigned i;
be6d4305d   Mike Snitzer   dm table: validat...
638
639
640
641
  
  	/*
  	 * Check each entry in the table in turn.
  	 */
3c1201691   Mikulas Patocka   dm table: replace...
642
643
  	for (i = 0; i < dm_table_get_num_targets(table); i++) {
  		ti = dm_table_get_target(table, i);
be6d4305d   Mike Snitzer   dm table: validat...
644

b1bd055d3   Martin K. Petersen   block: Introduce ...
645
  		blk_set_stacking_limits(&ti_limits);
754c5fc7e   Mike Snitzer   dm: calculate que...
646
647
648
649
650
  
  		/* combine all target devices' limits */
  		if (ti->type->iterate_devices)
  			ti->type->iterate_devices(ti, dm_set_device_limits,
  						  &ti_limits);
be6d4305d   Mike Snitzer   dm table: validat...
651
652
653
654
655
  		/*
  		 * If the remaining sectors fall entirely within this
  		 * table entry are they compatible with its logical_block_size?
  		 */
  		if (remaining < ti->len &&
754c5fc7e   Mike Snitzer   dm: calculate que...
656
  		    remaining & ((ti_limits.logical_block_size >>
be6d4305d   Mike Snitzer   dm table: validat...
657
658
659
660
661
662
663
664
665
666
667
668
  				  SECTOR_SHIFT) - 1))
  			break;	/* Error */
  
  		next_target_start =
  		    (unsigned short) ((next_target_start + ti->len) &
  				      (device_logical_block_size_sects - 1));
  		remaining = next_target_start ?
  		    device_logical_block_size_sects - next_target_start : 0;
  	}
  
  	if (remaining) {
  		DMWARN("%s: table line %u (start sect %llu len %llu) "
a963a9562   Mike Snitzer   dm table: add mor...
669
  		       "not aligned to h/w logical block size %u",
be6d4305d   Mike Snitzer   dm table: validat...
670
671
672
  		       dm_device_name(table->md), i,
  		       (unsigned long long) ti->begin,
  		       (unsigned long long) ti->len,
754c5fc7e   Mike Snitzer   dm: calculate que...
673
  		       limits->logical_block_size);
be6d4305d   Mike Snitzer   dm table: validat...
674
675
676
677
678
  		return -EINVAL;
  	}
  
  	return 0;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
679
680
681
682
683
684
  int dm_table_add_target(struct dm_table *t, const char *type,
  			sector_t start, sector_t len, char *params)
  {
  	int r = -EINVAL, argc;
  	char **argv;
  	struct dm_target *tgt;
3791e2fc0   Alasdair G Kergon   dm table: add sin...
685
686
687
688
689
  	if (t->singleton) {
  		DMERR("%s: target type %s must appear alone in table",
  		      dm_device_name(t->md), t->targets->type->name);
  		return -EINVAL;
  	}
57a2f2385   Mikulas Patocka   dm table: remove ...
690
  	BUG_ON(t->num_targets >= t->num_allocated);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
691
692
693
694
695
  
  	tgt = t->targets + t->num_targets;
  	memset(tgt, 0, sizeof(*tgt));
  
  	if (!len) {
72d948616   Alasdair G Kergon   [PATCH] dm: impro...
696
  		DMERR("%s: zero-length target", dm_device_name(t->md));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
697
698
699
700
701
  		return -EINVAL;
  	}
  
  	tgt->type = dm_get_target_type(type);
  	if (!tgt->type) {
dafa724bf   tang.junhui   dm table: fix mis...
702
  		DMERR("%s: %s: unknown target type", dm_device_name(t->md), type);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
703
704
  		return -EINVAL;
  	}
3791e2fc0   Alasdair G Kergon   dm table: add sin...
705
706
  	if (dm_target_needs_singleton(tgt->type)) {
  		if (t->num_targets) {
dafa724bf   tang.junhui   dm table: fix mis...
707
708
  			tgt->error = "singleton target type must appear alone in table";
  			goto bad;
3791e2fc0   Alasdair G Kergon   dm table: add sin...
709
  		}
e83068a5f   Mike Snitzer   dm mpath: add opt...
710
  		t->singleton = true;
3791e2fc0   Alasdair G Kergon   dm table: add sin...
711
  	}
cc6cbe141   Alasdair G Kergon   dm table: add alw...
712
  	if (dm_target_always_writeable(tgt->type) && !(t->mode & FMODE_WRITE)) {
dafa724bf   tang.junhui   dm table: fix mis...
713
714
  		tgt->error = "target type may not be included in a read-only table";
  		goto bad;
cc6cbe141   Alasdair G Kergon   dm table: add alw...
715
  	}
36a0456fb   Alasdair G Kergon   dm table: add imm...
716
717
  	if (t->immutable_target_type) {
  		if (t->immutable_target_type != tgt->type) {
dafa724bf   tang.junhui   dm table: fix mis...
718
719
  			tgt->error = "immutable target type cannot be mixed with other target types";
  			goto bad;
36a0456fb   Alasdair G Kergon   dm table: add imm...
720
721
722
  		}
  	} else if (dm_target_is_immutable(tgt->type)) {
  		if (t->num_targets) {
dafa724bf   tang.junhui   dm table: fix mis...
723
724
  			tgt->error = "immutable target type cannot be mixed with other target types";
  			goto bad;
36a0456fb   Alasdair G Kergon   dm table: add imm...
725
726
727
  		}
  		t->immutable_target_type = tgt->type;
  	}
9b4b5a797   Milan Broz   dm table: add fla...
728
729
  	if (dm_target_has_integrity(tgt->type))
  		t->integrity_added = 1;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
730
731
732
733
734
735
736
737
738
739
  	tgt->table = t;
  	tgt->begin = start;
  	tgt->len = len;
  	tgt->error = "Unknown error";
  
  	/*
  	 * Does this target adjoin the previous one ?
  	 */
  	if (!adjoin(t, tgt)) {
  		tgt->error = "Gap in table";
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
  		goto bad;
  	}
  
  	r = dm_split_args(&argc, &argv, params);
  	if (r) {
  		tgt->error = "couldn't split parameters (insufficient memory)";
  		goto bad;
  	}
  
  	r = tgt->type->ctr(tgt, argc, argv);
  	kfree(argv);
  	if (r)
  		goto bad;
  
  	t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;
55a62eef8   Alasdair G Kergon   dm: rename reques...
755
756
  	if (!tgt->num_discard_bios && tgt->discards_supported)
  		DMWARN("%s: %s: ignoring discards_supported because num_discard_bios is zero.",
936688d7e   Mike Snitzer   dm table: fix dis...
757
  		       dm_device_name(t->md), type);
5ae89a872   Mike Snitzer   dm: linear suppor...
758

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
759
760
761
  	return 0;
  
   bad:
72d948616   Alasdair G Kergon   [PATCH] dm: impro...
762
  	DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
763
764
765
  	dm_put_target_type(tgt->type);
  	return r;
  }
498f0103e   Mike Snitzer   dm table: share t...
766
767
768
  /*
   * Target argument parsing helpers.
   */
5916a22b8   Eric Biggers   dm: constify argu...
769
770
  static int validate_next_arg(const struct dm_arg *arg,
  			     struct dm_arg_set *arg_set,
498f0103e   Mike Snitzer   dm table: share t...
771
772
773
  			     unsigned *value, char **error, unsigned grouped)
  {
  	const char *arg_str = dm_shift_arg(arg_set);
31998ef19   Mikulas Patocka   dm: reject traili...
774
  	char dummy;
498f0103e   Mike Snitzer   dm table: share t...
775
776
  
  	if (!arg_str ||
31998ef19   Mikulas Patocka   dm: reject traili...
777
  	    (sscanf(arg_str, "%u%c", value, &dummy) != 1) ||
498f0103e   Mike Snitzer   dm table: share t...
778
779
780
781
782
783
784
785
786
  	    (*value < arg->min) ||
  	    (*value > arg->max) ||
  	    (grouped && arg_set->argc < *value)) {
  		*error = arg->error;
  		return -EINVAL;
  	}
  
  	return 0;
  }
5916a22b8   Eric Biggers   dm: constify argu...
787
  int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
498f0103e   Mike Snitzer   dm table: share t...
788
789
790
791
792
  		unsigned *value, char **error)
  {
  	return validate_next_arg(arg, arg_set, value, error, 0);
  }
  EXPORT_SYMBOL(dm_read_arg);
5916a22b8   Eric Biggers   dm: constify argu...
793
  int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set,
498f0103e   Mike Snitzer   dm table: share t...
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
  		      unsigned *value, char **error)
  {
  	return validate_next_arg(arg, arg_set, value, error, 1);
  }
  EXPORT_SYMBOL(dm_read_arg_group);
  
  const char *dm_shift_arg(struct dm_arg_set *as)
  {
  	char *r;
  
  	if (as->argc) {
  		as->argc--;
  		r = *as->argv;
  		as->argv++;
  		return r;
  	}
  
  	return NULL;
  }
  EXPORT_SYMBOL(dm_shift_arg);
  
  void dm_consume_args(struct dm_arg_set *as, unsigned num_args)
  {
  	BUG_ON(as->argc < num_args);
  	as->argc -= num_args;
  	as->argv += num_args;
  }
  EXPORT_SYMBOL(dm_consume_args);
7e0d574f2   Bart Van Assche   dm: introduce enu...
822
  static bool __table_type_bio_based(enum dm_queue_mode table_type)
545ed20e6   Toshi Kani   dm: add infrastru...
823
824
825
826
  {
  	return (table_type == DM_TYPE_BIO_BASED ||
  		table_type == DM_TYPE_DAX_BIO_BASED);
  }
7e0d574f2   Bart Van Assche   dm: introduce enu...
827
  static bool __table_type_request_based(enum dm_queue_mode table_type)
15b94a690   Junichi Nomura   dm: fix reload fa...
828
829
830
831
  {
  	return (table_type == DM_TYPE_REQUEST_BASED ||
  		table_type == DM_TYPE_MQ_REQUEST_BASED);
  }
7e0d574f2   Bart Van Assche   dm: introduce enu...
832
  void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type)
e83068a5f   Mike Snitzer   dm mpath: add opt...
833
834
835
836
  {
  	t->type = type;
  }
  EXPORT_SYMBOL_GPL(dm_table_set_type);
545ed20e6   Toshi Kani   dm: add infrastru...
837
838
839
  static int device_supports_dax(struct dm_target *ti, struct dm_dev *dev,
  			       sector_t start, sector_t len, void *data)
  {
74ec37d03   Ross Zwisler   dm: prevent DAX m...
840
  	return bdev_dax_supported(dev->bdev, PAGE_SIZE);
545ed20e6   Toshi Kani   dm: add infrastru...
841
842
843
844
845
  }
  
  static bool dm_table_supports_dax(struct dm_table *t)
  {
  	struct dm_target *ti;
3c1201691   Mikulas Patocka   dm table: replace...
846
  	unsigned i;
545ed20e6   Toshi Kani   dm: add infrastru...
847
848
  
  	/* Ensure that all targets support DAX. */
3c1201691   Mikulas Patocka   dm table: replace...
849
850
  	for (i = 0; i < dm_table_get_num_targets(t); i++) {
  		ti = dm_table_get_target(t, i);
545ed20e6   Toshi Kani   dm: add infrastru...
851
852
853
854
855
856
857
858
859
860
861
  
  		if (!ti->type->direct_access)
  			return false;
  
  		if (!ti->type->iterate_devices ||
  		    !ti->type->iterate_devices(ti, device_supports_dax, NULL))
  			return false;
  	}
  
  	return true;
  }
e83068a5f   Mike Snitzer   dm mpath: add opt...
862
  static int dm_table_determine_type(struct dm_table *t)
e6ee8c0b7   Kiyoshi Ueda   dm: enable reques...
863
864
  {
  	unsigned i;
169e2cc27   Mike Snitzer   dm: allow error t...
865
  	unsigned bio_based = 0, request_based = 0, hybrid = 0;
5b8c01f74   Bart Van Assche   dm table: simplif...
866
  	unsigned sq_count = 0, mq_count = 0;
e6ee8c0b7   Kiyoshi Ueda   dm: enable reques...
867
868
  	struct dm_target *tgt;
  	struct dm_dev_internal *dd;
e83068a5f   Mike Snitzer   dm mpath: add opt...
869
  	struct list_head *devices = dm_table_get_devices(t);
7e0d574f2   Bart Van Assche   dm: introduce enu...
870
  	enum dm_queue_mode live_md_type = dm_get_md_type(t->md);
e6ee8c0b7   Kiyoshi Ueda   dm: enable reques...
871

e83068a5f   Mike Snitzer   dm mpath: add opt...
872
873
874
875
  	if (t->type != DM_TYPE_NONE) {
  		/* target already set the table's type */
  		if (t->type == DM_TYPE_BIO_BASED)
  			return 0;
545ed20e6   Toshi Kani   dm: add infrastru...
876
  		BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED);
e83068a5f   Mike Snitzer   dm mpath: add opt...
877
878
  		goto verify_rq_based;
  	}
e6ee8c0b7   Kiyoshi Ueda   dm: enable reques...
879
880
  	for (i = 0; i < t->num_targets; i++) {
  		tgt = t->targets + i;
169e2cc27   Mike Snitzer   dm: allow error t...
881
882
883
  		if (dm_target_hybrid(tgt))
  			hybrid = 1;
  		else if (dm_target_request_based(tgt))
e6ee8c0b7   Kiyoshi Ueda   dm: enable reques...
884
885
886
887
888
889
890
891
892
893
  			request_based = 1;
  		else
  			bio_based = 1;
  
  		if (bio_based && request_based) {
  			DMWARN("Inconsistent table: different target types"
  			       " can't be mixed up");
  			return -EINVAL;
  		}
  	}
169e2cc27   Mike Snitzer   dm: allow error t...
894
895
896
897
898
899
  	if (hybrid && !bio_based && !request_based) {
  		/*
  		 * The targets can work either way.
  		 * Determine the type from the live device.
  		 * Default to bio-based if device is new.
  		 */
15b94a690   Junichi Nomura   dm: fix reload fa...
900
  		if (__table_type_request_based(live_md_type))
169e2cc27   Mike Snitzer   dm: allow error t...
901
902
903
904
  			request_based = 1;
  		else
  			bio_based = 1;
  	}
e6ee8c0b7   Kiyoshi Ueda   dm: enable reques...
905
906
907
  	if (bio_based) {
  		/* We must use this table as bio-based */
  		t->type = DM_TYPE_BIO_BASED;
f8df1fdf1   Mike Snitzer   dm error: add DAX...
908
909
  		if (dm_table_supports_dax(t) ||
  		    (list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED))
545ed20e6   Toshi Kani   dm: add infrastru...
910
  			t->type = DM_TYPE_DAX_BIO_BASED;
e6ee8c0b7   Kiyoshi Ueda   dm: enable reques...
911
912
913
914
  		return 0;
  	}
  
  	BUG_ON(!request_based); /* No targets in this table */
e83068a5f   Mike Snitzer   dm mpath: add opt...
915
916
917
918
919
920
921
  	/*
  	 * The only way to establish DM_TYPE_MQ_REQUEST_BASED is by
  	 * having a compatible target use dm_table_set_type.
  	 */
  	t->type = DM_TYPE_REQUEST_BASED;
  
  verify_rq_based:
65803c205   Mike Snitzer   dm table: train h...
922
923
924
925
926
927
928
929
930
931
  	/*
  	 * Request-based dm supports only tables that have a single target now.
  	 * To support multiple targets, request splitting support is needed,
  	 * and that needs lots of changes in the block-layer.
  	 * (e.g. request completion process for partial completion.)
  	 */
  	if (t->num_targets > 1) {
  		DMWARN("Request-based dm doesn't support multiple targets yet");
  		return -EINVAL;
  	}
6936c12cf   Mike Snitzer   dm table: fix 'al...
932
933
934
935
936
937
938
939
940
941
942
943
  	if (list_empty(devices)) {
  		int srcu_idx;
  		struct dm_table *live_table = dm_get_live_table(t->md, &srcu_idx);
  
  		/* inherit live table's type and all_blk_mq */
  		if (live_table) {
  			t->type = live_table->type;
  			t->all_blk_mq = live_table->all_blk_mq;
  		}
  		dm_put_live_table(t->md, srcu_idx);
  		return 0;
  	}
e6ee8c0b7   Kiyoshi Ueda   dm: enable reques...
944
  	/* Non-request-stackable devices can't be used for request-based dm */
e6ee8c0b7   Kiyoshi Ueda   dm: enable reques...
945
  	list_for_each_entry(dd, devices, list) {
e5863d9ad   Mike Snitzer   dm: allocate requ...
946
947
948
949
950
  		struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev);
  
  		if (!blk_queue_stackable(q)) {
  			DMERR("table load rejected: including"
  			      " non-request-stackable devices");
e6ee8c0b7   Kiyoshi Ueda   dm: enable reques...
951
952
  			return -EINVAL;
  		}
e5863d9ad   Mike Snitzer   dm: allocate requ...
953
954
  
  		if (q->mq_ops)
5b8c01f74   Bart Van Assche   dm table: simplif...
955
956
957
  			mq_count++;
  		else
  			sq_count++;
e5863d9ad   Mike Snitzer   dm: allocate requ...
958
  	}
5b8c01f74   Bart Van Assche   dm table: simplif...
959
960
961
  	if (sq_count && mq_count) {
  		DMERR("table load rejected: not all devices are blk-mq request-stackable");
  		return -EINVAL;
e83068a5f   Mike Snitzer   dm mpath: add opt...
962
  	}
5b8c01f74   Bart Van Assche   dm table: simplif...
963
  	t->all_blk_mq = mq_count > 0;
e6ee8c0b7   Kiyoshi Ueda   dm: enable reques...
964

301fc3f5e   Bart Van Assche   dm table: an 'all...
965
966
967
968
  	if (t->type == DM_TYPE_MQ_REQUEST_BASED && !t->all_blk_mq) {
  		DMERR("table load rejected: all devices are not blk-mq request-stackable");
  		return -EINVAL;
  	}
e6ee8c0b7   Kiyoshi Ueda   dm: enable reques...
969
970
  	return 0;
  }
7e0d574f2   Bart Van Assche   dm: introduce enu...
971
  enum dm_queue_mode dm_table_get_type(struct dm_table *t)
e6ee8c0b7   Kiyoshi Ueda   dm: enable reques...
972
973
974
  {
  	return t->type;
  }
36a0456fb   Alasdair G Kergon   dm table: add imm...
975
976
977
978
  struct target_type *dm_table_get_immutable_target_type(struct dm_table *t)
  {
  	return t->immutable_target_type;
  }
16f122661   Mike Snitzer   dm: optimize dm_m...
979
980
981
982
983
984
985
986
987
  struct dm_target *dm_table_get_immutable_target(struct dm_table *t)
  {
  	/* Immutable target is implicitly a singleton */
  	if (t->num_targets > 1 ||
  	    !dm_target_is_immutable(t->targets[0].type))
  		return NULL;
  
  	return t->targets;
  }
f083b09b7   Mike Snitzer   dm: set DM_TARGET...
988
989
  struct dm_target *dm_table_get_wildcard_target(struct dm_table *t)
  {
3c1201691   Mikulas Patocka   dm table: replace...
990
991
  	struct dm_target *ti;
  	unsigned i;
f083b09b7   Mike Snitzer   dm: set DM_TARGET...
992

3c1201691   Mikulas Patocka   dm table: replace...
993
994
  	for (i = 0; i < dm_table_get_num_targets(t); i++) {
  		ti = dm_table_get_target(t, i);
f083b09b7   Mike Snitzer   dm: set DM_TARGET...
995
996
997
998
999
1000
  		if (dm_target_is_wildcard(ti->type))
  			return ti;
  	}
  
  	return NULL;
  }
545ed20e6   Toshi Kani   dm: add infrastru...
1001
1002
1003
1004
  bool dm_table_bio_based(struct dm_table *t)
  {
  	return __table_type_bio_based(dm_table_get_type(t));
  }
e6ee8c0b7   Kiyoshi Ueda   dm: enable reques...
1005
1006
  bool dm_table_request_based(struct dm_table *t)
  {
15b94a690   Junichi Nomura   dm: fix reload fa...
1007
  	return __table_type_request_based(dm_table_get_type(t));
e5863d9ad   Mike Snitzer   dm: allocate requ...
1008
  }
e83068a5f   Mike Snitzer   dm mpath: add opt...
1009
  bool dm_table_all_blk_mq_devices(struct dm_table *t)
e5863d9ad   Mike Snitzer   dm: allocate requ...
1010
  {
e83068a5f   Mike Snitzer   dm mpath: add opt...
1011
  	return t->all_blk_mq;
e6ee8c0b7   Kiyoshi Ueda   dm: enable reques...
1012
  }
17e149b8f   Mike Snitzer   dm: add 'use_blk_...
1013
  static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md)
e6ee8c0b7   Kiyoshi Ueda   dm: enable reques...
1014
  {
7e0d574f2   Bart Van Assche   dm: introduce enu...
1015
  	enum dm_queue_mode type = dm_table_get_type(t);
30187e1d4   Mike Snitzer   dm: rename target...
1016
  	unsigned per_io_data_size = 0;
78d8e58a0   Mike Snitzer   Revert "block, dm...
1017
  	struct dm_target *tgt;
c0820cf5a   Mikulas Patocka   dm: introduce per...
1018
  	unsigned i;
e6ee8c0b7   Kiyoshi Ueda   dm: enable reques...
1019

78d8e58a0   Mike Snitzer   Revert "block, dm...
1020
  	if (unlikely(type == DM_TYPE_NONE)) {
e6ee8c0b7   Kiyoshi Ueda   dm: enable reques...
1021
1022
1023
  		DMWARN("no table type is set, can't allocate mempools");
  		return -EINVAL;
  	}
545ed20e6   Toshi Kani   dm: add infrastru...
1024
  	if (__table_type_bio_based(type))
78d8e58a0   Mike Snitzer   Revert "block, dm...
1025
1026
  		for (i = 0; i < t->num_targets; i++) {
  			tgt = t->targets + i;
30187e1d4   Mike Snitzer   dm: rename target...
1027
  			per_io_data_size = max(per_io_data_size, tgt->per_io_data_size);
78d8e58a0   Mike Snitzer   Revert "block, dm...
1028
  		}
30187e1d4   Mike Snitzer   dm: rename target...
1029
  	t->mempools = dm_alloc_md_mempools(md, type, t->integrity_supported, per_io_data_size);
4e6e36c37   Mike Snitzer   Revert "dm: do no...
1030
1031
  	if (!t->mempools)
  		return -ENOMEM;
e6ee8c0b7   Kiyoshi Ueda   dm: enable reques...
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
  
  	return 0;
  }
  
  void dm_table_free_md_mempools(struct dm_table *t)
  {
  	dm_free_md_mempools(t->mempools);
  	t->mempools = NULL;
  }
  
  struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t)
  {
  	return t->mempools;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
  static int setup_indexes(struct dm_table *t)
  {
  	int i;
  	unsigned int total = 0;
  	sector_t *indexes;
  
  	/* allocate the space for *all* the indexes */
  	for (i = t->depth - 2; i >= 0; i--) {
  		t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE);
  		total += t->counts[i];
  	}
  
  	indexes = (sector_t *) dm_vcalloc(total, (unsigned long) NODE_SIZE);
  	if (!indexes)
  		return -ENOMEM;
  
  	/* set up internal nodes, bottom-up */
82d601dc0   Jun'ichi Nomura   dm: table remove ...
1063
  	for (i = t->depth - 2; i >= 0; i--) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
  		t->index[i] = indexes;
  		indexes += (KEYS_PER_NODE * t->counts[i]);
  		setup_btree_index(i, t);
  	}
  
  	return 0;
  }
  
  /*
   * Builds the btree to index the map.
   */
26803b9f0   Will Drewry   dm ioctl: refacto...
1075
  static int dm_table_build_index(struct dm_table *t)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1076
1077
1078
  {
  	int r = 0;
  	unsigned int leaf_nodes;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
  	/* how many indexes will the btree have ? */
  	leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE);
  	t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE);
  
  	/* leaf layer has already been set up */
  	t->counts[t->depth - 1] = leaf_nodes;
  	t->index[t->depth - 1] = t->highs;
  
  	if (t->depth >= 2)
  		r = setup_indexes(t);
  
  	return r;
  }
25520d55c   Martin K. Petersen   block: Inline blk...
1092
1093
1094
1095
  static bool integrity_profile_exists(struct gendisk *disk)
  {
  	return !!blk_get_integrity(disk);
  }
26803b9f0   Will Drewry   dm ioctl: refacto...
1096
  /*
a63a5cf84   Mike Snitzer   dm: improve block...
1097
   * Get a disk whose integrity profile reflects the table's profile.
a63a5cf84   Mike Snitzer   dm: improve block...
1098
1099
   * Returns NULL if integrity support was inconsistent or unavailable.
   */
25520d55c   Martin K. Petersen   block: Inline blk...
1100
  static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t)
a63a5cf84   Mike Snitzer   dm: improve block...
1101
1102
1103
1104
  {
  	struct list_head *devices = dm_table_get_devices(t);
  	struct dm_dev_internal *dd = NULL;
  	struct gendisk *prev_disk = NULL, *template_disk = NULL;
e2460f2a4   Mikulas Patocka   dm: mark targets ...
1105
1106
1107
1108
1109
1110
1111
  	unsigned i;
  
  	for (i = 0; i < dm_table_get_num_targets(t); i++) {
  		struct dm_target *ti = dm_table_get_target(t, i);
  		if (!dm_target_passes_integrity(ti->type))
  			goto no_integrity;
  	}
a63a5cf84   Mike Snitzer   dm: improve block...
1112
1113
  
  	list_for_each_entry(dd, devices, list) {
86f1152b1   Benjamin Marzinski   dm: allow active ...
1114
  		template_disk = dd->dm_dev->bdev->bd_disk;
25520d55c   Martin K. Petersen   block: Inline blk...
1115
  		if (!integrity_profile_exists(template_disk))
a63a5cf84   Mike Snitzer   dm: improve block...
1116
  			goto no_integrity;
a63a5cf84   Mike Snitzer   dm: improve block...
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
  		else if (prev_disk &&
  			 blk_integrity_compare(prev_disk, template_disk) < 0)
  			goto no_integrity;
  		prev_disk = template_disk;
  	}
  
  	return template_disk;
  
  no_integrity:
  	if (prev_disk)
  		DMWARN("%s: integrity not set: %s and %s profile mismatch",
  		       dm_device_name(t->md),
  		       prev_disk->disk_name,
  		       template_disk->disk_name);
  	return NULL;
  }
  
  /*
25520d55c   Martin K. Petersen   block: Inline blk...
1135
1136
1137
   * Register the mapped device for blk_integrity support if the
   * underlying devices have an integrity profile.  But all devices may
   * not have matching profiles (checking all devices isn't reliable
a63a5cf84   Mike Snitzer   dm: improve block...
1138
   * during table load because this table may use other DM device(s) which
25520d55c   Martin K. Petersen   block: Inline blk...
1139
1140
1141
1142
   * must be resumed before they will have an initialized integity
   * profile).  Consequently, stacked DM devices force a 2 stage integrity
   * profile validation: First pass during table load, final pass during
   * resume.
26803b9f0   Will Drewry   dm ioctl: refacto...
1143
   */
25520d55c   Martin K. Petersen   block: Inline blk...
1144
  static int dm_table_register_integrity(struct dm_table *t)
26803b9f0   Will Drewry   dm ioctl: refacto...
1145
  {
25520d55c   Martin K. Petersen   block: Inline blk...
1146
  	struct mapped_device *md = t->md;
a63a5cf84   Mike Snitzer   dm: improve block...
1147
  	struct gendisk *template_disk = NULL;
26803b9f0   Will Drewry   dm ioctl: refacto...
1148

9b4b5a797   Milan Broz   dm table: add fla...
1149
1150
1151
  	/* If target handles integrity itself do not register it here. */
  	if (t->integrity_added)
  		return 0;
25520d55c   Martin K. Petersen   block: Inline blk...
1152
  	template_disk = dm_table_get_integrity_disk(t);
a63a5cf84   Mike Snitzer   dm: improve block...
1153
1154
  	if (!template_disk)
  		return 0;
26803b9f0   Will Drewry   dm ioctl: refacto...
1155

25520d55c   Martin K. Petersen   block: Inline blk...
1156
  	if (!integrity_profile_exists(dm_disk(md))) {
e83068a5f   Mike Snitzer   dm mpath: add opt...
1157
  		t->integrity_supported = true;
25520d55c   Martin K. Petersen   block: Inline blk...
1158
1159
1160
1161
1162
1163
1164
  		/*
  		 * Register integrity profile during table load; we can do
  		 * this because the final profile must match during resume.
  		 */
  		blk_integrity_register(dm_disk(md),
  				       blk_get_integrity(template_disk));
  		return 0;
a63a5cf84   Mike Snitzer   dm: improve block...
1165
1166
1167
  	}
  
  	/*
25520d55c   Martin K. Petersen   block: Inline blk...
1168
  	 * If DM device already has an initialized integrity
a63a5cf84   Mike Snitzer   dm: improve block...
1169
1170
  	 * profile the new profile should not conflict.
  	 */
25520d55c   Martin K. Petersen   block: Inline blk...
1171
  	if (blk_integrity_compare(dm_disk(md), template_disk) < 0) {
a63a5cf84   Mike Snitzer   dm: improve block...
1172
1173
1174
1175
1176
1177
  		DMWARN("%s: conflict with existing integrity profile: "
  		       "%s profile mismatch",
  		       dm_device_name(t->md),
  		       template_disk->disk_name);
  		return 1;
  	}
25520d55c   Martin K. Petersen   block: Inline blk...
1178
  	/* Preserve existing integrity profile */
e83068a5f   Mike Snitzer   dm mpath: add opt...
1179
  	t->integrity_supported = true;
26803b9f0   Will Drewry   dm ioctl: refacto...
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
  	return 0;
  }
  
  /*
   * Prepares the table for use by building the indices,
   * setting the type, and allocating mempools.
   */
  int dm_table_complete(struct dm_table *t)
  {
  	int r;
e83068a5f   Mike Snitzer   dm mpath: add opt...
1190
  	r = dm_table_determine_type(t);
26803b9f0   Will Drewry   dm ioctl: refacto...
1191
  	if (r) {
e83068a5f   Mike Snitzer   dm mpath: add opt...
1192
  		DMERR("unable to determine table type");
26803b9f0   Will Drewry   dm ioctl: refacto...
1193
1194
1195
1196
1197
1198
1199
1200
  		return r;
  	}
  
  	r = dm_table_build_index(t);
  	if (r) {
  		DMERR("unable to build btrees");
  		return r;
  	}
25520d55c   Martin K. Petersen   block: Inline blk...
1201
  	r = dm_table_register_integrity(t);
26803b9f0   Will Drewry   dm ioctl: refacto...
1202
1203
1204
1205
  	if (r) {
  		DMERR("could not register integrity profile.");
  		return r;
  	}
17e149b8f   Mike Snitzer   dm: add 'use_blk_...
1206
  	r = dm_table_alloc_md_mempools(t, t->md);
26803b9f0   Will Drewry   dm ioctl: refacto...
1207
1208
1209
1210
1211
  	if (r)
  		DMERR("unable to allocate mempools");
  
  	return r;
  }
48c9c27b8   Arjan van de Ven   [PATCH] sem2mutex...
1212
  static DEFINE_MUTEX(_event_lock);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1213
1214
1215
  void dm_table_event_callback(struct dm_table *t,
  			     void (*fn)(void *), void *context)
  {
48c9c27b8   Arjan van de Ven   [PATCH] sem2mutex...
1216
  	mutex_lock(&_event_lock);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1217
1218
  	t->event_fn = fn;
  	t->event_context = context;
48c9c27b8   Arjan van de Ven   [PATCH] sem2mutex...
1219
  	mutex_unlock(&_event_lock);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1220
1221
1222
1223
1224
1225
1226
1227
1228
  }
  
  void dm_table_event(struct dm_table *t)
  {
  	/*
  	 * You can no longer call dm_table_event() from interrupt
  	 * context, use a bottom half instead.
  	 */
  	BUG_ON(in_interrupt());
48c9c27b8   Arjan van de Ven   [PATCH] sem2mutex...
1229
  	mutex_lock(&_event_lock);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1230
1231
  	if (t->event_fn)
  		t->event_fn(t->event_context);
48c9c27b8   Arjan van de Ven   [PATCH] sem2mutex...
1232
  	mutex_unlock(&_event_lock);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1233
  }
086490125   Mike Snitzer   dm table: clean d...
1234
  EXPORT_SYMBOL(dm_table_event);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1235
1236
1237
1238
1239
  
  sector_t dm_table_get_size(struct dm_table *t)
  {
  	return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
  }
086490125   Mike Snitzer   dm table: clean d...
1240
  EXPORT_SYMBOL(dm_table_get_size);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1241
1242
1243
  
  struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index)
  {
143535396   Milan Broz   [PATCH] dm table:...
1244
  	if (index >= t->num_targets)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1245
1246
1247
1248
1249
1250
1251
  		return NULL;
  
  	return t->targets + index;
  }
  
  /*
   * Search the btree for the correct target.
512875bd9   Jun'ichi Nomura   dm: table detect ...
1252
1253
1254
   *
   * Caller should check returned pointer with dm_target_is_valid()
   * to trap I/O beyond end of device.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
   */
  struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
  {
  	unsigned int l, n = 0, k = 0;
  	sector_t *node;
  
  	for (l = 0; l < t->depth; l++) {
  		n = get_child(n, k);
  		node = get_node(t, l, n);
  
  		for (k = 0; k < KEYS_PER_NODE; k++)
  			if (node[k] >= sector)
  				break;
  	}
  
  	return &t->targets[(KEYS_PER_NODE * n) + k];
  }
3ae706561   Mike Snitzer   dm: retain table ...
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
  static int count_device(struct dm_target *ti, struct dm_dev *dev,
  			sector_t start, sector_t len, void *data)
  {
  	unsigned *num_devices = data;
  
  	(*num_devices)++;
  
  	return 0;
  }
  
  /*
   * Check whether a table has no data devices attached using each
   * target's iterate_devices method.
   * Returns false if the result is unknown because a target doesn't
   * support iterate_devices.
   */
  bool dm_table_has_no_data_devices(struct dm_table *table)
  {
3c1201691   Mikulas Patocka   dm table: replace...
1290
1291
  	struct dm_target *ti;
  	unsigned i, num_devices;
3ae706561   Mike Snitzer   dm: retain table ...
1292

3c1201691   Mikulas Patocka   dm table: replace...
1293
1294
  	for (i = 0; i < dm_table_get_num_targets(table); i++) {
  		ti = dm_table_get_target(table, i);
3ae706561   Mike Snitzer   dm: retain table ...
1295
1296
1297
  
  		if (!ti->type->iterate_devices)
  			return false;
3c1201691   Mikulas Patocka   dm table: replace...
1298
  		num_devices = 0;
3ae706561   Mike Snitzer   dm: retain table ...
1299
1300
1301
1302
1303
1304
1305
  		ti->type->iterate_devices(ti, count_device, &num_devices);
  		if (num_devices)
  			return false;
  	}
  
  	return true;
  }
dd88d313b   Damien Le Moal   dm table: add zon...
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
  static int device_is_zoned_model(struct dm_target *ti, struct dm_dev *dev,
  				 sector_t start, sector_t len, void *data)
  {
  	struct request_queue *q = bdev_get_queue(dev->bdev);
  	enum blk_zoned_model *zoned_model = data;
  
  	return q && blk_queue_zoned_model(q) == *zoned_model;
  }
  
  static bool dm_table_supports_zoned_model(struct dm_table *t,
  					  enum blk_zoned_model zoned_model)
  {
  	struct dm_target *ti;
  	unsigned i;
  
  	for (i = 0; i < dm_table_get_num_targets(t); i++) {
  		ti = dm_table_get_target(t, i);
  
  		if (zoned_model == BLK_ZONED_HM &&
  		    !dm_target_supports_zoned_hm(ti->type))
  			return false;
  
  		if (!ti->type->iterate_devices ||
  		    !ti->type->iterate_devices(ti, device_is_zoned_model, &zoned_model))
  			return false;
  	}
  
  	return true;
  }
  
  static int device_matches_zone_sectors(struct dm_target *ti, struct dm_dev *dev,
  				       sector_t start, sector_t len, void *data)
  {
  	struct request_queue *q = bdev_get_queue(dev->bdev);
  	unsigned int *zone_sectors = data;
  
  	return q && blk_queue_zone_sectors(q) == *zone_sectors;
  }
  
  static bool dm_table_matches_zone_sectors(struct dm_table *t,
  					  unsigned int zone_sectors)
  {
  	struct dm_target *ti;
  	unsigned i;
  
  	for (i = 0; i < dm_table_get_num_targets(t); i++) {
  		ti = dm_table_get_target(t, i);
  
  		if (!ti->type->iterate_devices ||
  		    !ti->type->iterate_devices(ti, device_matches_zone_sectors, &zone_sectors))
  			return false;
  	}
  
  	return true;
  }
  
  static int validate_hardware_zoned_model(struct dm_table *table,
  					 enum blk_zoned_model zoned_model,
  					 unsigned int zone_sectors)
  {
  	if (zoned_model == BLK_ZONED_NONE)
  		return 0;
  
  	if (!dm_table_supports_zoned_model(table, zoned_model)) {
  		DMERR("%s: zoned model is not consistent across all devices",
  		      dm_device_name(table->md));
  		return -EINVAL;
  	}
  
  	/* Check zone size validity and compatibility */
  	if (!zone_sectors || !is_power_of_2(zone_sectors))
  		return -EINVAL;
  
  	if (!dm_table_matches_zone_sectors(table, zone_sectors)) {
  		DMERR("%s: zone sectors is not consistent across all devices",
  		      dm_device_name(table->md));
  		return -EINVAL;
  	}
  
  	return 0;
  }
9c47008d1   Martin K. Petersen   dm: add integrity...
1387
  /*
754c5fc7e   Mike Snitzer   dm: calculate que...
1388
1389
1390
1391
1392
   * Establish the new table's queue_limits and validate them.
   */
  int dm_calculate_queue_limits(struct dm_table *table,
  			      struct queue_limits *limits)
  {
3c1201691   Mikulas Patocka   dm table: replace...
1393
  	struct dm_target *ti;
754c5fc7e   Mike Snitzer   dm: calculate que...
1394
  	struct queue_limits ti_limits;
3c1201691   Mikulas Patocka   dm table: replace...
1395
  	unsigned i;
dd88d313b   Damien Le Moal   dm table: add zon...
1396
1397
  	enum blk_zoned_model zoned_model = BLK_ZONED_NONE;
  	unsigned int zone_sectors = 0;
754c5fc7e   Mike Snitzer   dm: calculate que...
1398

b1bd055d3   Martin K. Petersen   block: Introduce ...
1399
  	blk_set_stacking_limits(limits);
754c5fc7e   Mike Snitzer   dm: calculate que...
1400

3c1201691   Mikulas Patocka   dm table: replace...
1401
  	for (i = 0; i < dm_table_get_num_targets(table); i++) {
b1bd055d3   Martin K. Petersen   block: Introduce ...
1402
  		blk_set_stacking_limits(&ti_limits);
754c5fc7e   Mike Snitzer   dm: calculate que...
1403

3c1201691   Mikulas Patocka   dm table: replace...
1404
  		ti = dm_table_get_target(table, i);
754c5fc7e   Mike Snitzer   dm: calculate que...
1405
1406
1407
1408
1409
1410
1411
1412
1413
  
  		if (!ti->type->iterate_devices)
  			goto combine_limits;
  
  		/*
  		 * Combine queue limits of all the devices this target uses.
  		 */
  		ti->type->iterate_devices(ti, dm_set_device_limits,
  					  &ti_limits);
dd88d313b   Damien Le Moal   dm table: add zon...
1414
1415
1416
1417
1418
1419
1420
1421
  		if (zoned_model == BLK_ZONED_NONE && ti_limits.zoned != BLK_ZONED_NONE) {
  			/*
  			 * After stacking all limits, validate all devices
  			 * in table support this zoned model and zone sectors.
  			 */
  			zoned_model = ti_limits.zoned;
  			zone_sectors = ti_limits.chunk_sectors;
  		}
40bea4312   Mike Snitzer   dm stripe: expose...
1422
1423
1424
  		/* Set I/O hints portion of queue limits */
  		if (ti->type->io_hints)
  			ti->type->io_hints(ti, &ti_limits);
754c5fc7e   Mike Snitzer   dm: calculate que...
1425
1426
1427
1428
  		/*
  		 * Check each device area is consistent with the target's
  		 * overall queue limits.
  		 */
f6a1ed108   Mikulas Patocka   dm table: fix que...
1429
1430
  		if (ti->type->iterate_devices(ti, device_area_is_invalid,
  					      &ti_limits))
754c5fc7e   Mike Snitzer   dm: calculate que...
1431
1432
1433
1434
1435
1436
1437
1438
  			return -EINVAL;
  
  combine_limits:
  		/*
  		 * Merge this target's queue limits into the overall limits
  		 * for the table.
  		 */
  		if (blk_stack_limits(limits, &ti_limits, 0) < 0)
b27d7f16d   Martin K. Petersen   DM: Fix device ma...
1439
  			DMWARN("%s: adding target device "
754c5fc7e   Mike Snitzer   dm: calculate que...
1440
  			       "(start sect %llu len %llu) "
b27d7f16d   Martin K. Petersen   DM: Fix device ma...
1441
  			       "caused an alignment inconsistency",
754c5fc7e   Mike Snitzer   dm: calculate que...
1442
1443
1444
  			       dm_device_name(table->md),
  			       (unsigned long long) ti->begin,
  			       (unsigned long long) ti->len);
dd88d313b   Damien Le Moal   dm table: add zon...
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
  
  		/*
  		 * FIXME: this should likely be moved to blk_stack_limits(), would
  		 * also eliminate limits->zoned stacking hack in dm_set_device_limits()
  		 */
  		if (limits->zoned == BLK_ZONED_NONE && ti_limits.zoned != BLK_ZONED_NONE) {
  			/*
  			 * By default, the stacked limits zoned model is set to
  			 * BLK_ZONED_NONE in blk_set_stacking_limits(). Update
  			 * this model using the first target model reported
  			 * that is not BLK_ZONED_NONE. This will be either the
  			 * first target device zoned model or the model reported
  			 * by the target .io_hints.
  			 */
  			limits->zoned = ti_limits.zoned;
  		}
754c5fc7e   Mike Snitzer   dm: calculate que...
1461
  	}
dd88d313b   Damien Le Moal   dm table: add zon...
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
  	/*
  	 * Verify that the zoned model and zone sectors, as determined before
  	 * any .io_hints override, are the same across all devices in the table.
  	 * - this is especially relevant if .io_hints is emulating a disk-managed
  	 *   zoned model (aka BLK_ZONED_NONE) on host-managed zoned block devices.
  	 * BUT...
  	 */
  	if (limits->zoned != BLK_ZONED_NONE) {
  		/*
  		 * ...IF the above limits stacking determined a zoned model
  		 * validate that all of the table's devices conform to it.
  		 */
  		zoned_model = limits->zoned;
  		zone_sectors = limits->chunk_sectors;
  	}
  	if (validate_hardware_zoned_model(table, zoned_model, zone_sectors))
  		return -EINVAL;
754c5fc7e   Mike Snitzer   dm: calculate que...
1479
1480
1481
1482
  	return validate_hardware_logical_block_alignment(table, limits);
  }
  
  /*
25520d55c   Martin K. Petersen   block: Inline blk...
1483
1484
1485
   * Verify that all devices have an integrity profile that matches the
   * DM device's registered integrity profile.  If the profiles don't
   * match then unregister the DM device's integrity profile.
9c47008d1   Martin K. Petersen   dm: add integrity...
1486
   */
25520d55c   Martin K. Petersen   block: Inline blk...
1487
  static void dm_table_verify_integrity(struct dm_table *t)
9c47008d1   Martin K. Petersen   dm: add integrity...
1488
  {
a63a5cf84   Mike Snitzer   dm: improve block...
1489
  	struct gendisk *template_disk = NULL;
9c47008d1   Martin K. Petersen   dm: add integrity...
1490

9b4b5a797   Milan Broz   dm table: add fla...
1491
1492
  	if (t->integrity_added)
  		return;
25520d55c   Martin K. Petersen   block: Inline blk...
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
  	if (t->integrity_supported) {
  		/*
  		 * Verify that the original integrity profile
  		 * matches all the devices in this table.
  		 */
  		template_disk = dm_table_get_integrity_disk(t);
  		if (template_disk &&
  		    blk_integrity_compare(dm_disk(t->md), template_disk) >= 0)
  			return;
  	}
9c47008d1   Martin K. Petersen   dm: add integrity...
1503

25520d55c   Martin K. Petersen   block: Inline blk...
1504
  	if (integrity_profile_exists(dm_disk(t->md))) {
876fbba1d   Mike Snitzer   dm table: avoid c...
1505
1506
  		DMWARN("%s: unable to establish an integrity profile",
  		       dm_device_name(t->md));
25520d55c   Martin K. Petersen   block: Inline blk...
1507
1508
  		blk_integrity_unregister(dm_disk(t->md));
  	}
9c47008d1   Martin K. Petersen   dm: add integrity...
1509
  }
ed8b752bc   Mike Snitzer   dm table: set flu...
1510
1511
1512
  static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
  				sector_t start, sector_t len, void *data)
  {
c888a8f95   Jens Axboe   block: kill off q...
1513
  	unsigned long flush = (unsigned long) data;
ed8b752bc   Mike Snitzer   dm table: set flu...
1514
  	struct request_queue *q = bdev_get_queue(dev->bdev);
c888a8f95   Jens Axboe   block: kill off q...
1515
  	return q && (q->queue_flags & flush);
ed8b752bc   Mike Snitzer   dm table: set flu...
1516
  }
c888a8f95   Jens Axboe   block: kill off q...
1517
  static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush)
ed8b752bc   Mike Snitzer   dm table: set flu...
1518
1519
  {
  	struct dm_target *ti;
3c1201691   Mikulas Patocka   dm table: replace...
1520
  	unsigned i;
ed8b752bc   Mike Snitzer   dm table: set flu...
1521
1522
1523
1524
1525
1526
1527
  
  	/*
  	 * Require at least one underlying device to support flushes.
  	 * t->devices includes internal dm devices such as mirror logs
  	 * so we need to use iterate_devices here, which targets
  	 * supporting flushes must provide.
  	 */
3c1201691   Mikulas Patocka   dm table: replace...
1528
1529
  	for (i = 0; i < dm_table_get_num_targets(t); i++) {
  		ti = dm_table_get_target(t, i);
ed8b752bc   Mike Snitzer   dm table: set flu...
1530

55a62eef8   Alasdair G Kergon   dm: rename reques...
1531
  		if (!ti->num_flush_bios)
ed8b752bc   Mike Snitzer   dm table: set flu...
1532
  			continue;
0e9c24ed7   Joe Thornber   dm: allow targets...
1533
  		if (ti->flush_supported)
7f61f5a02   Joe Perches   dm table: use boo...
1534
  			return true;
0e9c24ed7   Joe Thornber   dm: allow targets...
1535

ed8b752bc   Mike Snitzer   dm table: set flu...
1536
  		if (ti->type->iterate_devices &&
c888a8f95   Jens Axboe   block: kill off q...
1537
  		    ti->type->iterate_devices(ti, device_flush_capable, (void *) flush))
7f61f5a02   Joe Perches   dm table: use boo...
1538
  			return true;
ed8b752bc   Mike Snitzer   dm table: set flu...
1539
  	}
7f61f5a02   Joe Perches   dm table: use boo...
1540
  	return false;
ed8b752bc   Mike Snitzer   dm table: set flu...
1541
  }
273752c9f   Vivek Goyal   dm, dax: Make sur...
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
  static int device_dax_write_cache_enabled(struct dm_target *ti,
  					  struct dm_dev *dev, sector_t start,
  					  sector_t len, void *data)
  {
  	struct dax_device *dax_dev = dev->dax_dev;
  
  	if (!dax_dev)
  		return false;
  
  	if (dax_write_cache_enabled(dax_dev))
  		return true;
  	return false;
  }
  
  static int dm_table_supports_dax_write_cache(struct dm_table *t)
  {
  	struct dm_target *ti;
  	unsigned i;
  
  	for (i = 0; i < dm_table_get_num_targets(t); i++) {
  		ti = dm_table_get_target(t, i);
  
  		if (ti->type->iterate_devices &&
  		    ti->type->iterate_devices(ti,
  				device_dax_write_cache_enabled, NULL))
  			return true;
  	}
  
  	return false;
  }
4693c9668   Mandeep Singh Baines   dm table: propaga...
1572
1573
1574
1575
1576
1577
1578
  static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev,
  			    sector_t start, sector_t len, void *data)
  {
  	struct request_queue *q = bdev_get_queue(dev->bdev);
  
  	return q && blk_queue_nonrot(q);
  }
c3c4555ed   Milan Broz   dm table: clear a...
1579
1580
1581
1582
1583
1584
1585
  static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
  			     sector_t start, sector_t len, void *data)
  {
  	struct request_queue *q = bdev_get_queue(dev->bdev);
  
  	return q && !blk_queue_add_random(q);
  }
200612ec3   Jeff Moyer   dm table: propaga...
1586
1587
1588
1589
1590
1591
1592
  static int queue_supports_sg_merge(struct dm_target *ti, struct dm_dev *dev,
  				   sector_t start, sector_t len, void *data)
  {
  	struct request_queue *q = bdev_get_queue(dev->bdev);
  
  	return q && !test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags);
  }
c3c4555ed   Milan Broz   dm table: clear a...
1593
1594
  static bool dm_table_all_devices_attribute(struct dm_table *t,
  					   iterate_devices_callout_fn func)
4693c9668   Mandeep Singh Baines   dm table: propaga...
1595
1596
  {
  	struct dm_target *ti;
3c1201691   Mikulas Patocka   dm table: replace...
1597
  	unsigned i;
4693c9668   Mandeep Singh Baines   dm table: propaga...
1598

3c1201691   Mikulas Patocka   dm table: replace...
1599
1600
  	for (i = 0; i < dm_table_get_num_targets(t); i++) {
  		ti = dm_table_get_target(t, i);
4693c9668   Mandeep Singh Baines   dm table: propaga...
1601
1602
  
  		if (!ti->type->iterate_devices ||
c3c4555ed   Milan Broz   dm table: clear a...
1603
  		    !ti->type->iterate_devices(ti, func, NULL))
7f61f5a02   Joe Perches   dm table: use boo...
1604
  			return false;
4693c9668   Mandeep Singh Baines   dm table: propaga...
1605
  	}
7f61f5a02   Joe Perches   dm table: use boo...
1606
  	return true;
4693c9668   Mandeep Singh Baines   dm table: propaga...
1607
  }
d54eaa5a0   Mike Snitzer   dm: prepare to su...
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
  static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev,
  					 sector_t start, sector_t len, void *data)
  {
  	struct request_queue *q = bdev_get_queue(dev->bdev);
  
  	return q && !q->limits.max_write_same_sectors;
  }
  
  static bool dm_table_supports_write_same(struct dm_table *t)
  {
  	struct dm_target *ti;
3c1201691   Mikulas Patocka   dm table: replace...
1619
  	unsigned i;
d54eaa5a0   Mike Snitzer   dm: prepare to su...
1620

3c1201691   Mikulas Patocka   dm table: replace...
1621
1622
  	for (i = 0; i < dm_table_get_num_targets(t); i++) {
  		ti = dm_table_get_target(t, i);
d54eaa5a0   Mike Snitzer   dm: prepare to su...
1623

55a62eef8   Alasdair G Kergon   dm: rename reques...
1624
  		if (!ti->num_write_same_bios)
d54eaa5a0   Mike Snitzer   dm: prepare to su...
1625
1626
1627
  			return false;
  
  		if (!ti->type->iterate_devices ||
dc019b21f   Mike Snitzer   dm table: fix wri...
1628
  		    ti->type->iterate_devices(ti, device_not_write_same_capable, NULL))
d54eaa5a0   Mike Snitzer   dm: prepare to su...
1629
1630
1631
1632
1633
  			return false;
  	}
  
  	return true;
  }
ac62d6208   Christoph Hellwig   dm: support REQ_O...
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
  static int device_not_write_zeroes_capable(struct dm_target *ti, struct dm_dev *dev,
  					   sector_t start, sector_t len, void *data)
  {
  	struct request_queue *q = bdev_get_queue(dev->bdev);
  
  	return q && !q->limits.max_write_zeroes_sectors;
  }
  
  static bool dm_table_supports_write_zeroes(struct dm_table *t)
  {
  	struct dm_target *ti;
  	unsigned i = 0;
  
  	while (i < dm_table_get_num_targets(t)) {
  		ti = dm_table_get_target(t, i++);
  
  		if (!ti->num_write_zeroes_bios)
  			return false;
  
  		if (!ti->type->iterate_devices ||
  		    ti->type->iterate_devices(ti, device_not_write_zeroes_capable, NULL))
  			return false;
  	}
  
  	return true;
  }
e39516d24   Mike Snitzer   dm: discard suppo...
1660
1661
  static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev,
  				      sector_t start, sector_t len, void *data)
a7ffb6a53   Mikulas Patocka   dm table: make dm...
1662
1663
  {
  	struct request_queue *q = bdev_get_queue(dev->bdev);
e39516d24   Mike Snitzer   dm: discard suppo...
1664
  	return q && !blk_queue_discard(q);
a7ffb6a53   Mikulas Patocka   dm table: make dm...
1665
1666
1667
1668
1669
  }
  
  static bool dm_table_supports_discards(struct dm_table *t)
  {
  	struct dm_target *ti;
3c1201691   Mikulas Patocka   dm table: replace...
1670
  	unsigned i;
a7ffb6a53   Mikulas Patocka   dm table: make dm...
1671

3c1201691   Mikulas Patocka   dm table: replace...
1672
1673
  	for (i = 0; i < dm_table_get_num_targets(t); i++) {
  		ti = dm_table_get_target(t, i);
a7ffb6a53   Mikulas Patocka   dm table: make dm...
1674
1675
  
  		if (!ti->num_discard_bios)
e39516d24   Mike Snitzer   dm: discard suppo...
1676
  			return false;
a7ffb6a53   Mikulas Patocka   dm table: make dm...
1677

e39516d24   Mike Snitzer   dm: discard suppo...
1678
1679
1680
1681
1682
1683
1684
1685
1686
  		/*
  		 * Either the target provides discard support (as implied by setting
  		 * 'discards_supported') or it relies on _all_ data devices having
  		 * discard support.
  		 */
  		if (!ti->discards_supported &&
  		    (!ti->type->iterate_devices ||
  		     ti->type->iterate_devices(ti, device_not_discard_capable, NULL)))
  			return false;
a7ffb6a53   Mikulas Patocka   dm table: make dm...
1687
  	}
e39516d24   Mike Snitzer   dm: discard suppo...
1688
  	return true;
a7ffb6a53   Mikulas Patocka   dm table: make dm...
1689
  }
754c5fc7e   Mike Snitzer   dm: calculate que...
1690
1691
  void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
  			       struct queue_limits *limits)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1692
  {
519a7e16f   Jens Axboe   dm: switch to usi...
1693
  	bool wc = false, fua = false;
ed8b752bc   Mike Snitzer   dm table: set flu...
1694

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1695
  	/*
1197764e4   Mike Snitzer   dm table: establi...
1696
  	 * Copy table's limits to the DM device's request_queue
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1697
  	 */
754c5fc7e   Mike Snitzer   dm: calculate que...
1698
  	q->limits = *limits;
c9a3f6d6f   Jens Axboe   dm: use unlocked ...
1699

5ae89a872   Mike Snitzer   dm: linear suppor...
1700
1701
1702
1703
  	if (!dm_table_supports_discards(t))
  		queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
  	else
  		queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
c888a8f95   Jens Axboe   block: kill off q...
1704
  	if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) {
519a7e16f   Jens Axboe   dm: switch to usi...
1705
  		wc = true;
c888a8f95   Jens Axboe   block: kill off q...
1706
  		if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_FUA)))
519a7e16f   Jens Axboe   dm: switch to usi...
1707
  			fua = true;
ed8b752bc   Mike Snitzer   dm table: set flu...
1708
  	}
519a7e16f   Jens Axboe   dm: switch to usi...
1709
  	blk_queue_write_cache(q, wc, fua);
ed8b752bc   Mike Snitzer   dm table: set flu...
1710

0605fa6da   Mike Snitzer   dm: set QUEUE_FLA...
1711
1712
  	if (dm_table_supports_dax(t))
  		queue_flag_set_unlocked(QUEUE_FLAG_DAX, q);
74ec37d03   Ross Zwisler   dm: prevent DAX m...
1713
1714
  	else
  		queue_flag_clear_unlocked(QUEUE_FLAG_DAX, q);
273752c9f   Vivek Goyal   dm, dax: Make sur...
1715
1716
  	if (dm_table_supports_dax_write_cache(t))
  		dax_write_cache(t->md->dax_dev, true);
c3c4555ed   Milan Broz   dm table: clear a...
1717
1718
  	/* Ensure that all underlying devices are non-rotational. */
  	if (dm_table_all_devices_attribute(t, device_is_nonrot))
4693c9668   Mandeep Singh Baines   dm table: propaga...
1719
1720
1721
  		queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
  	else
  		queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q);
d54eaa5a0   Mike Snitzer   dm: prepare to su...
1722
1723
  	if (!dm_table_supports_write_same(t))
  		q->limits.max_write_same_sectors = 0;
ac62d6208   Christoph Hellwig   dm: support REQ_O...
1724
1725
  	if (!dm_table_supports_write_zeroes(t))
  		q->limits.max_write_zeroes_sectors = 0;
c1a94672a   Mike Snitzer   dm: disable WRITE...
1726

200612ec3   Jeff Moyer   dm table: propaga...
1727
1728
1729
1730
  	if (dm_table_all_devices_attribute(t, queue_supports_sg_merge))
  		queue_flag_clear_unlocked(QUEUE_FLAG_NO_SG_MERGE, q);
  	else
  		queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q);
25520d55c   Martin K. Petersen   block: Inline blk...
1731
  	dm_table_verify_integrity(t);
e6ee8c0b7   Kiyoshi Ueda   dm: enable reques...
1732
1733
  
  	/*
c3c4555ed   Milan Broz   dm table: clear a...
1734
1735
1736
1737
1738
1739
1740
1741
1742
  	 * Determine whether or not this queue's I/O timings contribute
  	 * to the entropy pool, Only request-based targets use this.
  	 * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
  	 * have it set.
  	 */
  	if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random))
  		queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
  
  	/*
e6ee8c0b7   Kiyoshi Ueda   dm: enable reques...
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
  	 * QUEUE_FLAG_STACKABLE must be set after all queue settings are
  	 * visible to other CPUs because, once the flag is set, incoming bios
  	 * are processed by request-based dm, which refers to the queue
  	 * settings.
  	 * Until the flag set, bios are passed to bio-based dm and queued to
  	 * md->deferred where queue settings are not needed yet.
  	 * Those bios are passed to request-based dm at the resume time.
  	 */
  	smp_mb();
  	if (dm_table_request_based(t))
  		queue_flag_set_unlocked(QUEUE_FLAG_STACKABLE, q);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
  }
  
  unsigned int dm_table_get_num_targets(struct dm_table *t)
  {
  	return t->num_targets;
  }
  
  struct list_head *dm_table_get_devices(struct dm_table *t)
  {
  	return &t->devices;
  }
aeb5d7270   Al Viro   [PATCH] introduce...
1765
  fmode_t dm_table_get_mode(struct dm_table *t)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1766
1767
1768
  {
  	return t->mode;
  }
086490125   Mike Snitzer   dm table: clean d...
1769
  EXPORT_SYMBOL(dm_table_get_mode);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1770

d67ee213f   Mike Snitzer   dm: add presuspen...
1771
1772
1773
1774
1775
1776
1777
  enum suspend_mode {
  	PRESUSPEND,
  	PRESUSPEND_UNDO,
  	POSTSUSPEND,
  };
  
  static void suspend_targets(struct dm_table *t, enum suspend_mode mode)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1778
1779
1780
  {
  	int i = t->num_targets;
  	struct dm_target *ti = t->targets;
1ea0654e4   Bart Van Assche   dm: verify suspen...
1781
  	lockdep_assert_held(&t->md->suspend_lock);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1782
  	while (i--) {
d67ee213f   Mike Snitzer   dm: add presuspen...
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
  		switch (mode) {
  		case PRESUSPEND:
  			if (ti->type->presuspend)
  				ti->type->presuspend(ti);
  			break;
  		case PRESUSPEND_UNDO:
  			if (ti->type->presuspend_undo)
  				ti->type->presuspend_undo(ti);
  			break;
  		case POSTSUSPEND:
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1793
1794
  			if (ti->type->postsuspend)
  				ti->type->postsuspend(ti);
d67ee213f   Mike Snitzer   dm: add presuspen...
1795
1796
  			break;
  		}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1797
1798
1799
1800
1801
1802
  		ti++;
  	}
  }
  
  void dm_table_presuspend_targets(struct dm_table *t)
  {
cf222b376   Alasdair G Kergon   [PATCH] device-ma...
1803
1804
  	if (!t)
  		return;
d67ee213f   Mike Snitzer   dm: add presuspen...
1805
1806
1807
1808
1809
1810
1811
1812
1813
  	suspend_targets(t, PRESUSPEND);
  }
  
  void dm_table_presuspend_undo_targets(struct dm_table *t)
  {
  	if (!t)
  		return;
  
  	suspend_targets(t, PRESUSPEND_UNDO);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1814
1815
1816
1817
  }
  
  void dm_table_postsuspend_targets(struct dm_table *t)
  {
cf222b376   Alasdair G Kergon   [PATCH] device-ma...
1818
1819
  	if (!t)
  		return;
d67ee213f   Mike Snitzer   dm: add presuspen...
1820
  	suspend_targets(t, POSTSUSPEND);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1821
  }
8757b7764   Milan Broz   [PATCH] dm table:...
1822
  int dm_table_resume_targets(struct dm_table *t)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1823
  {
8757b7764   Milan Broz   [PATCH] dm table:...
1824
  	int i, r = 0;
1ea0654e4   Bart Van Assche   dm: verify suspen...
1825
  	lockdep_assert_held(&t->md->suspend_lock);
8757b7764   Milan Broz   [PATCH] dm table:...
1826
1827
1828
1829
1830
1831
1832
  	for (i = 0; i < t->num_targets; i++) {
  		struct dm_target *ti = t->targets + i;
  
  		if (!ti->type->preresume)
  			continue;
  
  		r = ti->type->preresume(ti);
7833b08e1   Mike Snitzer   dm table: print e...
1833
1834
1835
  		if (r) {
  			DMERR("%s: %s: preresume failed, error = %d",
  			      dm_device_name(t->md), ti->type->name, r);
8757b7764   Milan Broz   [PATCH] dm table:...
1836
  			return r;
7833b08e1   Mike Snitzer   dm table: print e...
1837
  		}
8757b7764   Milan Broz   [PATCH] dm table:...
1838
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1839
1840
1841
1842
1843
1844
1845
  
  	for (i = 0; i < t->num_targets; i++) {
  		struct dm_target *ti = t->targets + i;
  
  		if (ti->type->resume)
  			ti->type->resume(ti);
  	}
8757b7764   Milan Broz   [PATCH] dm table:...
1846
1847
  
  	return 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1848
  }
9d357b078   NeilBrown   dm: introduce tar...
1849
1850
1851
1852
1853
  void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb)
  {
  	list_add(&cb->list, &t->target_callbacks);
  }
  EXPORT_SYMBOL_GPL(dm_table_add_target_callbacks);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1854
1855
  int dm_table_any_congested(struct dm_table *t, int bdi_bits)
  {
82b1519b3   Mikulas Patocka   dm: export struct...
1856
  	struct dm_dev_internal *dd;
afb24528f   Paul Jimenez   dm: table use lis...
1857
  	struct list_head *devices = dm_table_get_devices(t);
9d357b078   NeilBrown   dm: introduce tar...
1858
  	struct dm_target_callbacks *cb;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1859
  	int r = 0;
afb24528f   Paul Jimenez   dm: table use lis...
1860
  	list_for_each_entry(dd, devices, list) {
86f1152b1   Benjamin Marzinski   dm: allow active ...
1861
  		struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev);
0c2322e4c   Alasdair G Kergon   dm: detect lost q...
1862
1863
1864
  		char b[BDEVNAME_SIZE];
  
  		if (likely(q))
dc3b17cc8   Jan Kara   block: Use pointe...
1865
  			r |= bdi_congested(q->backing_dev_info, bdi_bits);
0c2322e4c   Alasdair G Kergon   dm: detect lost q...
1866
1867
1868
  		else
  			DMWARN_LIMIT("%s: any_congested: nonexistent device %s",
  				     dm_device_name(t->md),
86f1152b1   Benjamin Marzinski   dm: allow active ...
1869
  				     bdevname(dd->dm_dev->bdev, b));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1870
  	}
9d357b078   NeilBrown   dm: introduce tar...
1871
1872
1873
  	list_for_each_entry(cb, &t->target_callbacks, list)
  		if (cb->congested_fn)
  			r |= cb->congested_fn(cb, bdi_bits);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1874
1875
  	return r;
  }
1134e5ae7   Mike Anderson   [PATCH] dm table:...
1876
1877
  struct mapped_device *dm_table_get_md(struct dm_table *t)
  {
1134e5ae7   Mike Anderson   [PATCH] dm table:...
1878
1879
  	return t->md;
  }
086490125   Mike Snitzer   dm table: clean d...
1880
  EXPORT_SYMBOL(dm_table_get_md);
1134e5ae7   Mike Anderson   [PATCH] dm table:...
1881

9974fa2c6   Mike Snitzer   dm table: add dm_...
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
  void dm_table_run_md_queue_async(struct dm_table *t)
  {
  	struct mapped_device *md;
  	struct request_queue *queue;
  	unsigned long flags;
  
  	if (!dm_table_request_based(t))
  		return;
  
  	md = dm_table_get_md(t);
  	queue = dm_get_md_queue(md);
  	if (queue) {
bfebd1cdb   Mike Snitzer   dm: add full blk-...
1894
1895
1896
1897
1898
1899
1900
  		if (queue->mq_ops)
  			blk_mq_run_hw_queues(queue, true);
  		else {
  			spin_lock_irqsave(queue->queue_lock, flags);
  			blk_run_queue_async(queue);
  			spin_unlock_irqrestore(queue->queue_lock, flags);
  		}
9974fa2c6   Mike Snitzer   dm table: add dm_...
1901
1902
1903
  	}
  }
  EXPORT_SYMBOL(dm_table_run_md_queue_async);