Blame view

block/blk-zoned.c 12.3 KB
3dcf60bcb   Christoph Hellwig   block: add SPDX t...
1
  // SPDX-License-Identifier: GPL-2.0
6a0cb1bc1   Hannes Reinecke   block: Implement ...
2
3
4
5
6
7
8
9
10
11
12
13
14
15
  /*
   * Zoned block device handling
   *
   * Copyright (c) 2015, Hannes Reinecke
   * Copyright (c) 2015, SUSE Linux GmbH
   *
   * Copyright (c) 2016, Damien Le Moal
   * Copyright (c) 2016, Western Digital
   */
  
  #include <linux/kernel.h>
  #include <linux/module.h>
  #include <linux/rbtree.h>
  #include <linux/blkdev.h>
bf5054569   Damien Le Moal   block: Introduce ...
16
  #include <linux/blk-mq.h>
26202928f   Damien Le Moal   block: Limit zone...
17
18
  #include <linux/mm.h>
  #include <linux/vmalloc.h>
bd976e527   Damien Le Moal   block: Kill gfp_t...
19
  #include <linux/sched/mm.h>
6a0cb1bc1   Hannes Reinecke   block: Implement ...
20

a2d6b3a2d   Damien Le Moal   block: Improve zo...
21
  #include "blk.h"
6a0cb1bc1   Hannes Reinecke   block: Implement ...
22
23
24
  static inline sector_t blk_zone_start(struct request_queue *q,
  				      sector_t sector)
  {
f99e86485   Damien Le Moal   block: Rename blk...
25
  	sector_t zone_mask = blk_queue_zone_sectors(q) - 1;
6a0cb1bc1   Hannes Reinecke   block: Implement ...
26
27
28
29
30
  
  	return sector & ~zone_mask;
  }
  
  /*
6cc77e9cb   Christoph Hellwig   block: introduce ...
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
   * Return true if a request is a write requests that needs zone write locking.
   */
  bool blk_req_needs_zone_write_lock(struct request *rq)
  {
  	if (!rq->q->seq_zones_wlock)
  		return false;
  
  	if (blk_rq_is_passthrough(rq))
  		return false;
  
  	switch (req_op(rq)) {
  	case REQ_OP_WRITE_ZEROES:
  	case REQ_OP_WRITE_SAME:
  	case REQ_OP_WRITE:
  		return blk_rq_zone_is_seq(rq);
  	default:
  		return false;
  	}
  }
  EXPORT_SYMBOL_GPL(blk_req_needs_zone_write_lock);
  
  void __blk_req_zone_write_lock(struct request *rq)
  {
  	if (WARN_ON_ONCE(test_and_set_bit(blk_rq_zone_no(rq),
  					  rq->q->seq_zones_wlock)))
  		return;
  
  	WARN_ON_ONCE(rq->rq_flags & RQF_ZONE_WRITE_LOCKED);
  	rq->rq_flags |= RQF_ZONE_WRITE_LOCKED;
  }
  EXPORT_SYMBOL_GPL(__blk_req_zone_write_lock);
  
  void __blk_req_zone_write_unlock(struct request *rq)
  {
  	rq->rq_flags &= ~RQF_ZONE_WRITE_LOCKED;
  	if (rq->q->seq_zones_wlock)
  		WARN_ON_ONCE(!test_and_clear_bit(blk_rq_zone_no(rq),
  						 rq->q->seq_zones_wlock));
  }
  EXPORT_SYMBOL_GPL(__blk_req_zone_write_unlock);
a91e13802   Damien Le Moal   block: Introduce ...
71
72
  /**
   * blkdev_nr_zones - Get number of zones
9b38bb4b1   Christoph Hellwig   block: simplify b...
73
   * @disk:	Target gendisk
a91e13802   Damien Le Moal   block: Introduce ...
74
   *
9b38bb4b1   Christoph Hellwig   block: simplify b...
75
76
   * Return the total number of zones of a zoned block device.  For a block
   * device without zone capabilities, the number of zones is always 0.
a91e13802   Damien Le Moal   block: Introduce ...
77
   */
9b38bb4b1   Christoph Hellwig   block: simplify b...
78
  unsigned int blkdev_nr_zones(struct gendisk *disk)
a91e13802   Damien Le Moal   block: Introduce ...
79
  {
9b38bb4b1   Christoph Hellwig   block: simplify b...
80
  	sector_t zone_sectors = blk_queue_zone_sectors(disk->queue);
a91e13802   Damien Le Moal   block: Introduce ...
81

9b38bb4b1   Christoph Hellwig   block: simplify b...
82
  	if (!blk_queue_is_zoned(disk->queue))
a91e13802   Damien Le Moal   block: Introduce ...
83
  		return 0;
9b38bb4b1   Christoph Hellwig   block: simplify b...
84
  	return (get_capacity(disk) + zone_sectors - 1) >> ilog2(zone_sectors);
a91e13802   Damien Le Moal   block: Introduce ...
85
86
  }
  EXPORT_SYMBOL_GPL(blkdev_nr_zones);
6a0cb1bc1   Hannes Reinecke   block: Implement ...
87
88
89
90
  /**
   * blkdev_report_zones - Get zones information
   * @bdev:	Target block device
   * @sector:	Sector from which to report zones
d41003513   Christoph Hellwig   block: rework zon...
91
92
93
   * @nr_zones:	Maximum number of zones to report
   * @cb:		Callback function called for each reported zone
   * @data:	Private data for the callback
6a0cb1bc1   Hannes Reinecke   block: Implement ...
94
95
   *
   * Description:
d41003513   Christoph Hellwig   block: rework zon...
96
97
98
99
100
101
102
103
104
   *    Get zone information starting from the zone containing @sector for at most
   *    @nr_zones, and call @cb for each zone reported by the device.
   *    To report all zones in a device starting from @sector, the BLK_ALL_ZONES
   *    constant can be passed to @nr_zones.
   *    Returns the number of zones reported by the device, or a negative errno
   *    value in case of failure.
   *
   *    Note: The caller must use memalloc_noXX_save/restore() calls to control
   *    memory allocations done within this function.
6a0cb1bc1   Hannes Reinecke   block: Implement ...
105
   */
e76239a37   Christoph Hellwig   block: add a repo...
106
  int blkdev_report_zones(struct block_device *bdev, sector_t sector,
d41003513   Christoph Hellwig   block: rework zon...
107
  			unsigned int nr_zones, report_zones_cb cb, void *data)
6a0cb1bc1   Hannes Reinecke   block: Implement ...
108
  {
ceeb373aa   Damien Le Moal   block: Simplify r...
109
  	struct gendisk *disk = bdev->bd_disk;
5eac3eb30   Damien Le Moal   block: Remove par...
110
  	sector_t capacity = get_capacity(disk);
6a0cb1bc1   Hannes Reinecke   block: Implement ...
111

d41003513   Christoph Hellwig   block: rework zon...
112
113
  	if (!blk_queue_is_zoned(bdev_get_queue(bdev)) ||
  	    WARN_ON_ONCE(!disk->fops->report_zones))
e76239a37   Christoph Hellwig   block: add a repo...
114
  		return -EOPNOTSUPP;
6a0cb1bc1   Hannes Reinecke   block: Implement ...
115

d41003513   Christoph Hellwig   block: rework zon...
116
  	if (!nr_zones || sector >= capacity)
6a0cb1bc1   Hannes Reinecke   block: Implement ...
117
  		return 0;
6a0cb1bc1   Hannes Reinecke   block: Implement ...
118

d41003513   Christoph Hellwig   block: rework zon...
119
  	return disk->fops->report_zones(disk, sector, nr_zones, cb, data);
6a0cb1bc1   Hannes Reinecke   block: Implement ...
120
121
  }
  EXPORT_SYMBOL_GPL(blkdev_report_zones);
6e33dbf28   Chaitanya Kulkarni   blk-zoned: implem...
122
  static inline bool blkdev_allow_reset_all_zones(struct block_device *bdev,
c7a1d926d   Damien Le Moal   block: Simplify R...
123
  						sector_t sector,
6e33dbf28   Chaitanya Kulkarni   blk-zoned: implem...
124
125
126
127
  						sector_t nr_sectors)
  {
  	if (!blk_queue_zone_resetall(bdev_get_queue(bdev)))
  		return false;
6e33dbf28   Chaitanya Kulkarni   blk-zoned: implem...
128
  	/*
5eac3eb30   Damien Le Moal   block: Remove par...
129
130
  	 * REQ_OP_ZONE_RESET_ALL can be executed only if the number of sectors
  	 * of the applicable zone range is the entire disk.
6e33dbf28   Chaitanya Kulkarni   blk-zoned: implem...
131
  	 */
5eac3eb30   Damien Le Moal   block: Remove par...
132
  	return !sector && nr_sectors == get_capacity(bdev->bd_disk);
6e33dbf28   Chaitanya Kulkarni   blk-zoned: implem...
133
  }
6a0cb1bc1   Hannes Reinecke   block: Implement ...
134
  /**
6c1b1da58   Ajay Joshi   block: add zone o...
135
   * blkdev_zone_mgmt - Execute a zone management operation on a range of zones
6a0cb1bc1   Hannes Reinecke   block: Implement ...
136
   * @bdev:	Target block device
6c1b1da58   Ajay Joshi   block: add zone o...
137
138
139
140
   * @op:		Operation to be performed on the zones
   * @sector:	Start sector of the first zone to operate on
   * @nr_sectors:	Number of sectors, should be at least the length of one zone and
   *		must be zone size aligned.
6a0cb1bc1   Hannes Reinecke   block: Implement ...
141
142
143
   * @gfp_mask:	Memory allocation flags (for bio_alloc)
   *
   * Description:
6c1b1da58   Ajay Joshi   block: add zone o...
144
   *    Perform the specified operation on the range of zones specified by
6a0cb1bc1   Hannes Reinecke   block: Implement ...
145
146
   *    @sector..@sector+@nr_sectors. Specifying the entire disk sector range
   *    is valid, but the specified range should not contain conventional zones.
6c1b1da58   Ajay Joshi   block: add zone o...
147
148
   *    The operation to execute on each zone can be a zone reset, open, close
   *    or finish request.
6a0cb1bc1   Hannes Reinecke   block: Implement ...
149
   */
6c1b1da58   Ajay Joshi   block: add zone o...
150
151
152
  int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op,
  		     sector_t sector, sector_t nr_sectors,
  		     gfp_t gfp_mask)
6a0cb1bc1   Hannes Reinecke   block: Implement ...
153
154
  {
  	struct request_queue *q = bdev_get_queue(bdev);
6c1b1da58   Ajay Joshi   block: add zone o...
155
  	sector_t zone_sectors = blk_queue_zone_sectors(q);
5eac3eb30   Damien Le Moal   block: Remove par...
156
  	sector_t capacity = get_capacity(bdev->bd_disk);
6a0cb1bc1   Hannes Reinecke   block: Implement ...
157
  	sector_t end_sector = sector + nr_sectors;
a2d6b3a2d   Damien Le Moal   block: Improve zo...
158
  	struct bio *bio = NULL;
6a0cb1bc1   Hannes Reinecke   block: Implement ...
159
  	int ret;
6a0cb1bc1   Hannes Reinecke   block: Implement ...
160
161
  	if (!blk_queue_is_zoned(q))
  		return -EOPNOTSUPP;
a2d6b3a2d   Damien Le Moal   block: Improve zo...
162
163
  	if (bdev_read_only(bdev))
  		return -EPERM;
6c1b1da58   Ajay Joshi   block: add zone o...
164
165
  	if (!op_is_zone_mgmt(op))
  		return -EOPNOTSUPP;
5eac3eb30   Damien Le Moal   block: Remove par...
166
  	if (!nr_sectors || end_sector > capacity)
6a0cb1bc1   Hannes Reinecke   block: Implement ...
167
168
169
170
  		/* Out of range */
  		return -EINVAL;
  
  	/* Check alignment (handle eventual smaller last zone) */
6a0cb1bc1   Hannes Reinecke   block: Implement ...
171
172
  	if (sector & (zone_sectors - 1))
  		return -EINVAL;
5eac3eb30   Damien Le Moal   block: Remove par...
173
  	if ((nr_sectors & (zone_sectors - 1)) && end_sector != capacity)
6a0cb1bc1   Hannes Reinecke   block: Implement ...
174
175
176
  		return -EINVAL;
  
  	while (sector < end_sector) {
a2d6b3a2d   Damien Le Moal   block: Improve zo...
177
  		bio = blk_next_bio(bio, 0, gfp_mask);
74d46992e   Christoph Hellwig   block: replace bi...
178
  		bio_set_dev(bio, bdev);
6a0cb1bc1   Hannes Reinecke   block: Implement ...
179

c7a1d926d   Damien Le Moal   block: Simplify R...
180
181
182
183
  		/*
  		 * Special case for the zone reset operation that reset all
  		 * zones, this is useful for applications like mkfs.
  		 */
6c1b1da58   Ajay Joshi   block: add zone o...
184
185
  		if (op == REQ_OP_ZONE_RESET &&
  		    blkdev_allow_reset_all_zones(bdev, sector, nr_sectors)) {
c7a1d926d   Damien Le Moal   block: Simplify R...
186
187
188
  			bio->bi_opf = REQ_OP_ZONE_RESET_ALL;
  			break;
  		}
8e42d239c   Chaitanya Kulkarni   block: mark zone-...
189
  		bio->bi_opf = op | REQ_SYNC;
c7a1d926d   Damien Le Moal   block: Simplify R...
190
  		bio->bi_iter.bi_sector = sector;
6a0cb1bc1   Hannes Reinecke   block: Implement ...
191
192
193
194
  		sector += zone_sectors;
  
  		/* This may take a while, so be nice to others */
  		cond_resched();
6a0cb1bc1   Hannes Reinecke   block: Implement ...
195
  	}
a2d6b3a2d   Damien Le Moal   block: Improve zo...
196
197
  	ret = submit_bio_wait(bio);
  	bio_put(bio);
a2d6b3a2d   Damien Le Moal   block: Improve zo...
198
  	return ret;
6a0cb1bc1   Hannes Reinecke   block: Implement ...
199
  }
6c1b1da58   Ajay Joshi   block: add zone o...
200
  EXPORT_SYMBOL_GPL(blkdev_zone_mgmt);
3ed05a987   Shaun Tancheff   blk-zoned: implem...
201

d41003513   Christoph Hellwig   block: rework zon...
202
203
204
205
206
207
208
209
210
211
212
213
214
  struct zone_report_args {
  	struct blk_zone __user *zones;
  };
  
  static int blkdev_copy_zone_to_user(struct blk_zone *zone, unsigned int idx,
  				    void *data)
  {
  	struct zone_report_args *args = data;
  
  	if (copy_to_user(&args->zones[idx], zone, sizeof(struct blk_zone)))
  		return -EFAULT;
  	return 0;
  }
56c4bddb9   Bart Van Assche   block: Suppress k...
215
  /*
3ed05a987   Shaun Tancheff   blk-zoned: implem...
216
217
218
219
220
221
222
   * BLKREPORTZONE ioctl processing.
   * Called from blkdev_ioctl.
   */
  int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
  			      unsigned int cmd, unsigned long arg)
  {
  	void __user *argp = (void __user *)arg;
d41003513   Christoph Hellwig   block: rework zon...
223
  	struct zone_report_args args;
3ed05a987   Shaun Tancheff   blk-zoned: implem...
224
225
  	struct request_queue *q;
  	struct blk_zone_report rep;
3ed05a987   Shaun Tancheff   blk-zoned: implem...
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
  	int ret;
  
  	if (!argp)
  		return -EINVAL;
  
  	q = bdev_get_queue(bdev);
  	if (!q)
  		return -ENXIO;
  
  	if (!blk_queue_is_zoned(q))
  		return -ENOTTY;
  
  	if (!capable(CAP_SYS_ADMIN))
  		return -EACCES;
  
  	if (copy_from_user(&rep, argp, sizeof(struct blk_zone_report)))
  		return -EFAULT;
  
  	if (!rep.nr_zones)
  		return -EINVAL;
d41003513   Christoph Hellwig   block: rework zon...
246
247
248
249
250
  	args.zones = argp + sizeof(struct blk_zone_report);
  	ret = blkdev_report_zones(bdev, rep.sector, rep.nr_zones,
  				  blkdev_copy_zone_to_user, &args);
  	if (ret < 0)
  		return ret;
3ed05a987   Shaun Tancheff   blk-zoned: implem...
251

d41003513   Christoph Hellwig   block: rework zon...
252
253
254
255
  	rep.nr_zones = ret;
  	if (copy_to_user(argp, &rep, sizeof(struct blk_zone_report)))
  		return -EFAULT;
  	return 0;
3ed05a987   Shaun Tancheff   blk-zoned: implem...
256
  }
56c4bddb9   Bart Van Assche   block: Suppress k...
257
  /*
e876df1fe   Ajay Joshi   block: add zone o...
258
   * BLKRESETZONE, BLKOPENZONE, BLKCLOSEZONE and BLKFINISHZONE ioctl processing.
3ed05a987   Shaun Tancheff   blk-zoned: implem...
259
260
   * Called from blkdev_ioctl.
   */
e876df1fe   Ajay Joshi   block: add zone o...
261
262
  int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
  			   unsigned int cmd, unsigned long arg)
3ed05a987   Shaun Tancheff   blk-zoned: implem...
263
264
265
266
  {
  	void __user *argp = (void __user *)arg;
  	struct request_queue *q;
  	struct blk_zone_range zrange;
e876df1fe   Ajay Joshi   block: add zone o...
267
  	enum req_opf op;
3ed05a987   Shaun Tancheff   blk-zoned: implem...
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
  
  	if (!argp)
  		return -EINVAL;
  
  	q = bdev_get_queue(bdev);
  	if (!q)
  		return -ENXIO;
  
  	if (!blk_queue_is_zoned(q))
  		return -ENOTTY;
  
  	if (!capable(CAP_SYS_ADMIN))
  		return -EACCES;
  
  	if (!(mode & FMODE_WRITE))
  		return -EBADF;
  
  	if (copy_from_user(&zrange, argp, sizeof(struct blk_zone_range)))
  		return -EFAULT;
e876df1fe   Ajay Joshi   block: add zone o...
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
  	switch (cmd) {
  	case BLKRESETZONE:
  		op = REQ_OP_ZONE_RESET;
  		break;
  	case BLKOPENZONE:
  		op = REQ_OP_ZONE_OPEN;
  		break;
  	case BLKCLOSEZONE:
  		op = REQ_OP_ZONE_CLOSE;
  		break;
  	case BLKFINISHZONE:
  		op = REQ_OP_ZONE_FINISH;
  		break;
  	default:
  		return -ENOTTY;
  	}
  
  	return blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors,
  				GFP_KERNEL);
3ed05a987   Shaun Tancheff   blk-zoned: implem...
306
  }
bf5054569   Damien Le Moal   block: Introduce ...
307
308
309
310
311
312
313
  
  static inline unsigned long *blk_alloc_zone_bitmap(int node,
  						   unsigned int nr_zones)
  {
  	return kcalloc_node(BITS_TO_LONGS(nr_zones), sizeof(unsigned long),
  			    GFP_NOIO, node);
  }
bf5054569   Damien Le Moal   block: Introduce ...
314
315
  void blk_queue_free_zone_bitmaps(struct request_queue *q)
  {
f216fdd77   Christoph Hellwig   block: replace se...
316
317
  	kfree(q->conv_zones_bitmap);
  	q->conv_zones_bitmap = NULL;
bf5054569   Damien Le Moal   block: Introduce ...
318
319
320
  	kfree(q->seq_zones_wlock);
  	q->seq_zones_wlock = NULL;
  }
d41003513   Christoph Hellwig   block: rework zon...
321
322
  struct blk_revalidate_zone_args {
  	struct gendisk	*disk;
f216fdd77   Christoph Hellwig   block: replace se...
323
  	unsigned long	*conv_zones_bitmap;
d41003513   Christoph Hellwig   block: rework zon...
324
  	unsigned long	*seq_zones_wlock;
e94f58194   Christoph Hellwig   block: allocate t...
325
  	unsigned int	nr_zones;
6c6b35491   Christoph Hellwig   block: set the zo...
326
  	sector_t	zone_sectors;
d41003513   Christoph Hellwig   block: rework zon...
327
328
  	sector_t	sector;
  };
d9dd73087   Damien Le Moal   block: Enhance bl...
329
330
331
  /*
   * Helper function to check the validity of zones of a zoned block device.
   */
d41003513   Christoph Hellwig   block: rework zon...
332
333
  static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx,
  				  void *data)
d9dd73087   Damien Le Moal   block: Enhance bl...
334
  {
d41003513   Christoph Hellwig   block: rework zon...
335
336
  	struct blk_revalidate_zone_args *args = data;
  	struct gendisk *disk = args->disk;
d9dd73087   Damien Le Moal   block: Enhance bl...
337
  	struct request_queue *q = disk->queue;
d9dd73087   Damien Le Moal   block: Enhance bl...
338
339
340
341
342
343
  	sector_t capacity = get_capacity(disk);
  
  	/*
  	 * All zones must have the same size, with the exception on an eventual
  	 * smaller last zone.
  	 */
6c6b35491   Christoph Hellwig   block: set the zo...
344
345
346
347
348
349
350
  	if (zone->start == 0) {
  		if (zone->len == 0 || !is_power_of_2(zone->len)) {
  			pr_warn("%s: Invalid zoned device with non power of two zone size (%llu)
  ",
  				disk->disk_name, zone->len);
  			return -ENODEV;
  		}
d9dd73087   Damien Le Moal   block: Enhance bl...
351

6c6b35491   Christoph Hellwig   block: set the zo...
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
  		args->zone_sectors = zone->len;
  		args->nr_zones = (capacity + zone->len - 1) >> ilog2(zone->len);
  	} else if (zone->start + args->zone_sectors < capacity) {
  		if (zone->len != args->zone_sectors) {
  			pr_warn("%s: Invalid zoned device with non constant zone size
  ",
  				disk->disk_name);
  			return -ENODEV;
  		}
  	} else {
  		if (zone->len > args->zone_sectors) {
  			pr_warn("%s: Invalid zoned device with larger last zone size
  ",
  				disk->disk_name);
  			return -ENODEV;
  		}
d9dd73087   Damien Le Moal   block: Enhance bl...
368
369
370
  	}
  
  	/* Check for holes in the zone report */
d41003513   Christoph Hellwig   block: rework zon...
371
  	if (zone->start != args->sector) {
d9dd73087   Damien Le Moal   block: Enhance bl...
372
373
  		pr_warn("%s: Zone gap at sectors %llu..%llu
  ",
d41003513   Christoph Hellwig   block: rework zon...
374
375
  			disk->disk_name, args->sector, zone->start);
  		return -ENODEV;
d9dd73087   Damien Le Moal   block: Enhance bl...
376
377
378
379
380
  	}
  
  	/* Check zone type */
  	switch (zone->type) {
  	case BLK_ZONE_TYPE_CONVENTIONAL:
e94f58194   Christoph Hellwig   block: allocate t...
381
382
383
384
385
386
387
388
  		if (!args->conv_zones_bitmap) {
  			args->conv_zones_bitmap =
  				blk_alloc_zone_bitmap(q->node, args->nr_zones);
  			if (!args->conv_zones_bitmap)
  				return -ENOMEM;
  		}
  		set_bit(idx, args->conv_zones_bitmap);
  		break;
d9dd73087   Damien Le Moal   block: Enhance bl...
389
390
  	case BLK_ZONE_TYPE_SEQWRITE_REQ:
  	case BLK_ZONE_TYPE_SEQWRITE_PREF:
e94f58194   Christoph Hellwig   block: allocate t...
391
392
393
394
395
396
  		if (!args->seq_zones_wlock) {
  			args->seq_zones_wlock =
  				blk_alloc_zone_bitmap(q->node, args->nr_zones);
  			if (!args->seq_zones_wlock)
  				return -ENOMEM;
  		}
d9dd73087   Damien Le Moal   block: Enhance bl...
397
398
399
400
401
  		break;
  	default:
  		pr_warn("%s: Invalid zone type 0x%x at sectors %llu
  ",
  			disk->disk_name, (int)zone->type, zone->start);
d41003513   Christoph Hellwig   block: rework zon...
402
  		return -ENODEV;
d9dd73087   Damien Le Moal   block: Enhance bl...
403
  	}
d41003513   Christoph Hellwig   block: rework zon...
404
405
406
  	args->sector += zone->len;
  	return 0;
  }
bf5054569   Damien Le Moal   block: Introduce ...
407
408
409
410
411
412
  /**
   * blk_revalidate_disk_zones - (re)allocate and initialize zone bitmaps
   * @disk:	Target disk
   *
   * Helper function for low-level device drivers to (re) allocate and initialize
   * a disk request queue zone bitmaps. This functions should normally be called
ae58954d8   Christoph Hellwig   block: don't hand...
413
414
415
   * within the disk ->revalidate method for blk-mq based drivers.  For BIO based
   * drivers only q->nr_zones needs to be updated so that the sysfs exposed value
   * is correct.
bf5054569   Damien Le Moal   block: Introduce ...
416
417
418
419
   */
  int blk_revalidate_disk_zones(struct gendisk *disk)
  {
  	struct request_queue *q = disk->queue;
e94f58194   Christoph Hellwig   block: allocate t...
420
421
  	struct blk_revalidate_zone_args args = {
  		.disk		= disk,
e94f58194   Christoph Hellwig   block: allocate t...
422
  	};
6c6b35491   Christoph Hellwig   block: set the zo...
423
424
  	unsigned int noio_flag;
  	int ret;
bf5054569   Damien Le Moal   block: Introduce ...
425

c98c3d09f   Christoph Hellwig   block: cleanup th...
426
427
  	if (WARN_ON_ONCE(!blk_queue_is_zoned(q)))
  		return -EIO;
ae58954d8   Christoph Hellwig   block: don't hand...
428
429
  	if (WARN_ON_ONCE(!queue_is_mq(q)))
  		return -EIO;
bf5054569   Damien Le Moal   block: Introduce ...
430

e94f58194   Christoph Hellwig   block: allocate t...
431
  	/*
6c6b35491   Christoph Hellwig   block: set the zo...
432
433
  	 * Ensure that all memory allocations in this context are done as if
  	 * GFP_NOIO was specified.
e94f58194   Christoph Hellwig   block: allocate t...
434
  	 */
6c6b35491   Christoph Hellwig   block: set the zo...
435
436
437
438
  	noio_flag = memalloc_noio_save();
  	ret = disk->fops->report_zones(disk, 0, UINT_MAX,
  				       blk_revalidate_zone_cb, &args);
  	memalloc_noio_restore(noio_flag);
bf5054569   Damien Le Moal   block: Introduce ...
439

bf5054569   Damien Le Moal   block: Introduce ...
440
  	/*
6c6b35491   Christoph Hellwig   block: set the zo...
441
442
443
  	 * Install the new bitmaps and update nr_zones only once the queue is
  	 * stopped and all I/Os are completed (i.e. a scheduler is not
  	 * referencing the bitmaps).
bf5054569   Damien Le Moal   block: Introduce ...
444
445
  	 */
  	blk_mq_freeze_queue(q);
d41003513   Christoph Hellwig   block: rework zon...
446
  	if (ret >= 0) {
6c6b35491   Christoph Hellwig   block: set the zo...
447
  		blk_queue_chunk_sectors(q, args.zone_sectors);
e94f58194   Christoph Hellwig   block: allocate t...
448
  		q->nr_zones = args.nr_zones;
d41003513   Christoph Hellwig   block: rework zon...
449
  		swap(q->seq_zones_wlock, args.seq_zones_wlock);
f216fdd77   Christoph Hellwig   block: replace se...
450
  		swap(q->conv_zones_bitmap, args.conv_zones_bitmap);
d41003513   Christoph Hellwig   block: rework zon...
451
452
  		ret = 0;
  	} else {
bf5054569   Damien Le Moal   block: Introduce ...
453
454
  		pr_warn("%s: failed to revalidate zones
  ", disk->disk_name);
bf5054569   Damien Le Moal   block: Introduce ...
455
  		blk_queue_free_zone_bitmaps(q);
bf5054569   Damien Le Moal   block: Introduce ...
456
  	}
d41003513   Christoph Hellwig   block: rework zon...
457
  	blk_mq_unfreeze_queue(q);
bf5054569   Damien Le Moal   block: Introduce ...
458

d41003513   Christoph Hellwig   block: rework zon...
459
  	kfree(args.seq_zones_wlock);
f216fdd77   Christoph Hellwig   block: replace se...
460
  	kfree(args.conv_zones_bitmap);
bf5054569   Damien Le Moal   block: Introduce ...
461
462
463
  	return ret;
  }
  EXPORT_SYMBOL_GPL(blk_revalidate_disk_zones);