Blame view

block/blk-lib.c 11.5 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  // SPDX-License-Identifier: GPL-2.0
f31e7e402   Dmitry Monakhov   blkdev: move blkd...
2
3
4
5
6
7
8
9
10
11
  /*
   * Functions related to generic helpers functions
   */
  #include <linux/kernel.h>
  #include <linux/module.h>
  #include <linux/bio.h>
  #include <linux/blkdev.h>
  #include <linux/scatterlist.h>
  
  #include "blk.h"
a2d6b3a2d   Damien Le Moal   block: Improve zo...
12
  struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp)
f31e7e402   Dmitry Monakhov   blkdev: move blkd...
13
  {
9082e87bf   Christoph Hellwig   block: remove str...
14
15
16
17
  	struct bio *new = bio_alloc(gfp, nr_pages);
  
  	if (bio) {
  		bio_chain(bio, new);
4e49ea4a3   Mike Christie   block/fs/drivers:...
18
  		submit_bio(bio);
9082e87bf   Christoph Hellwig   block: remove str...
19
  	}
5dba3089e   Lukas Czerner   blkdev: Submit di...
20

9082e87bf   Christoph Hellwig   block: remove str...
21
  	return new;
f31e7e402   Dmitry Monakhov   blkdev: move blkd...
22
  }
38f252553   Christoph Hellwig   block: add __blkd...
23
  int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
288dab8a3   Christoph Hellwig   block: add a sepa...
24
  		sector_t nr_sects, gfp_t gfp_mask, int flags,
469e3216e   Mike Christie   block discard: us...
25
  		struct bio **biop)
f31e7e402   Dmitry Monakhov   blkdev: move blkd...
26
  {
f31e7e402   Dmitry Monakhov   blkdev: move blkd...
27
  	struct request_queue *q = bdev_get_queue(bdev);
38f252553   Christoph Hellwig   block: add __blkd...
28
  	struct bio *bio = *biop;
ef295ecf0   Christoph Hellwig   block: better op ...
29
  	unsigned int op;
9b15d109a   Coly Li   block: improve di...
30
  	sector_t bs_mask, part_offset = 0;
f31e7e402   Dmitry Monakhov   blkdev: move blkd...
31
32
33
  
  	if (!q)
  		return -ENXIO;
288dab8a3   Christoph Hellwig   block: add a sepa...
34

a13553c77   Ilya Dryomov   block: add bdev_r...
35
36
  	if (bdev_read_only(bdev))
  		return -EPERM;
288dab8a3   Christoph Hellwig   block: add a sepa...
37
38
39
40
41
42
43
44
45
  	if (flags & BLKDEV_DISCARD_SECURE) {
  		if (!blk_queue_secure_erase(q))
  			return -EOPNOTSUPP;
  		op = REQ_OP_SECURE_ERASE;
  	} else {
  		if (!blk_queue_discard(q))
  			return -EOPNOTSUPP;
  		op = REQ_OP_DISCARD;
  	}
f31e7e402   Dmitry Monakhov   blkdev: move blkd...
46

b35fd7422   Coly Li   block: check queu...
47
48
49
50
51
52
53
54
55
  	/* In case the discard granularity isn't set by buggy device driver */
  	if (WARN_ON_ONCE(!q->limits.discard_granularity)) {
  		char dev_name[BDEVNAME_SIZE];
  
  		bdevname(bdev, dev_name);
  		pr_err_ratelimited("%s: Error: discard_granularity is 0.
  ", dev_name);
  		return -EOPNOTSUPP;
  	}
28b2be203   Darrick J. Wong   block: require wr...
56
57
58
  	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
  	if ((sector | nr_sects) & bs_mask)
  		return -EINVAL;
ba5d73851   Ming Lei   block: cleanup __...
59
60
  	if (!nr_sects)
  		return -EINVAL;
a22c4d7e3   Ming Lin   block: re-add dis...
61

9b15d109a   Coly Li   block: improve di...
62
  	/* In case the discard request is in a partition */
fa01b1e97   Christoph Hellwig   block: add a bdev...
63
  	if (bdev_is_partition(bdev))
9b15d109a   Coly Li   block: improve di...
64
  		part_offset = bdev->bd_part->start_sect;
ba5d73851   Ming Lei   block: cleanup __...
65
  	while (nr_sects) {
9b15d109a   Coly Li   block: improve di...
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
  		sector_t granularity_aligned_lba, req_sects;
  		sector_t sector_mapped = sector + part_offset;
  
  		granularity_aligned_lba = round_up(sector_mapped,
  				q->limits.discard_granularity >> SECTOR_SHIFT);
  
  		/*
  		 * Check whether the discard bio starts at a discard_granularity
  		 * aligned LBA,
  		 * - If no: set (granularity_aligned_lba - sector_mapped) to
  		 *   bi_size of the first split bio, then the second bio will
  		 *   start at a discard_granularity aligned LBA on the device.
  		 * - If yes: use bio_aligned_discard_max_sectors() as the max
  		 *   possible bi_size of the first split bio. Then when this bio
  		 *   is split in device drive, the split ones are very probably
  		 *   to be aligned to discard_granularity of the device's queue.
  		 */
  		if (granularity_aligned_lba == sector_mapped)
  			req_sects = min_t(sector_t, nr_sects,
  					  bio_aligned_discard_max_sectors(q));
  		else
  			req_sects = min_t(sector_t, nr_sects,
  					  granularity_aligned_lba - sector_mapped);
c6e666345   Paolo Bonzini   block: split disc...
89

4800bf7bc   Dave Chinner   block: fix 32 bit...
90
  		WARN_ON_ONCE((req_sects << 9) > UINT_MAX);
a2d6b3a2d   Damien Le Moal   block: Improve zo...
91
  		bio = blk_next_bio(bio, 0, gfp_mask);
4f024f379   Kent Overstreet   block: Abstract o...
92
  		bio->bi_iter.bi_sector = sector;
74d46992e   Christoph Hellwig   block: replace bi...
93
  		bio_set_dev(bio, bdev);
288dab8a3   Christoph Hellwig   block: add a sepa...
94
  		bio_set_op_attrs(bio, op, 0);
f31e7e402   Dmitry Monakhov   blkdev: move blkd...
95

4f024f379   Kent Overstreet   block: Abstract o...
96
  		bio->bi_iter.bi_size = req_sects << 9;
ba5d73851   Ming Lei   block: cleanup __...
97
  		sector += req_sects;
c6e666345   Paolo Bonzini   block: split disc...
98
  		nr_sects -= req_sects;
f31e7e402   Dmitry Monakhov   blkdev: move blkd...
99

c8123f8c9   Jens Axboe   block: add cond_r...
100
101
102
103
104
105
106
  		/*
  		 * We can loop for a long time in here, if someone does
  		 * full device discards (like mkfs). Be nice and allow
  		 * us to schedule out to avoid softlocking if preempt
  		 * is disabled.
  		 */
  		cond_resched();
5dba3089e   Lukas Czerner   blkdev: Submit di...
107
  	}
38f252553   Christoph Hellwig   block: add __blkd...
108
109
110
111
112
113
114
115
116
117
118
119
  
  	*biop = bio;
  	return 0;
  }
  EXPORT_SYMBOL(__blkdev_issue_discard);
  
  /**
   * blkdev_issue_discard - queue a discard
   * @bdev:	blockdev to issue discard for
   * @sector:	start sector
   * @nr_sects:	number of sectors to discard
   * @gfp_mask:	memory allocation flags (for bio_alloc)
e554911c2   Eric Biggers   block: correct do...
120
   * @flags:	BLKDEV_DISCARD_* flags to control behaviour
38f252553   Christoph Hellwig   block: add __blkd...
121
122
123
124
125
126
127
   *
   * Description:
   *    Issue a discard request for the sectors in question.
   */
  int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
  		sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
  {
38f252553   Christoph Hellwig   block: add __blkd...
128
129
130
  	struct bio *bio = NULL;
  	struct blk_plug plug;
  	int ret;
38f252553   Christoph Hellwig   block: add __blkd...
131
  	blk_start_plug(&plug);
288dab8a3   Christoph Hellwig   block: add a sepa...
132
  	ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags,
38f252553   Christoph Hellwig   block: add __blkd...
133
  			&bio);
bbd848e0f   Mike Snitzer   block: reinstate ...
134
  	if (!ret && bio) {
4e49ea4a3   Mike Christie   block/fs/drivers:...
135
  		ret = submit_bio_wait(bio);
48920ff2a   Christoph Hellwig   block: remove the...
136
  		if (ret == -EOPNOTSUPP)
bbd848e0f   Mike Snitzer   block: reinstate ...
137
  			ret = 0;
05bd92ddd   Shaun Tancheff   block: missing bi...
138
  		bio_put(bio);
bbd848e0f   Mike Snitzer   block: reinstate ...
139
  	}
0cfbcafca   Shaohua Li   block: add plug f...
140
  	blk_finish_plug(&plug);
f31e7e402   Dmitry Monakhov   blkdev: move blkd...
141

bbd848e0f   Mike Snitzer   block: reinstate ...
142
  	return ret;
f31e7e402   Dmitry Monakhov   blkdev: move blkd...
143
144
  }
  EXPORT_SYMBOL(blkdev_issue_discard);
3f14d792f   Dmitry Monakhov   blkdev: add blkde...
145

3f14d792f   Dmitry Monakhov   blkdev: add blkde...
146
  /**
e73c23ff7   Chaitanya Kulkarni   block: add async ...
147
   * __blkdev_issue_write_same - generate number of bios with same page
4363ac7c1   Martin K. Petersen   block: Implement ...
148
149
150
151
152
   * @bdev:	target blockdev
   * @sector:	start sector
   * @nr_sects:	number of sectors to write
   * @gfp_mask:	memory allocation flags (for bio_alloc)
   * @page:	page containing data to write
e73c23ff7   Chaitanya Kulkarni   block: add async ...
153
   * @biop:	pointer to anchor bio
4363ac7c1   Martin K. Petersen   block: Implement ...
154
155
   *
   * Description:
e73c23ff7   Chaitanya Kulkarni   block: add async ...
156
   *  Generate and issue number of bios(REQ_OP_WRITE_SAME) with same page.
4363ac7c1   Martin K. Petersen   block: Implement ...
157
   */
e73c23ff7   Chaitanya Kulkarni   block: add async ...
158
159
160
  static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
  		sector_t nr_sects, gfp_t gfp_mask, struct page *page,
  		struct bio **biop)
4363ac7c1   Martin K. Petersen   block: Implement ...
161
  {
4363ac7c1   Martin K. Petersen   block: Implement ...
162
163
  	struct request_queue *q = bdev_get_queue(bdev);
  	unsigned int max_write_same_sectors;
e73c23ff7   Chaitanya Kulkarni   block: add async ...
164
  	struct bio *bio = *biop;
28b2be203   Darrick J. Wong   block: require wr...
165
  	sector_t bs_mask;
4363ac7c1   Martin K. Petersen   block: Implement ...
166
167
168
  
  	if (!q)
  		return -ENXIO;
a13553c77   Ilya Dryomov   block: add bdev_r...
169
170
  	if (bdev_read_only(bdev))
  		return -EPERM;
28b2be203   Darrick J. Wong   block: require wr...
171
172
173
  	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
  	if ((sector | nr_sects) & bs_mask)
  		return -EINVAL;
e73c23ff7   Chaitanya Kulkarni   block: add async ...
174
175
  	if (!bdev_write_same(bdev))
  		return -EOPNOTSUPP;
b49a0871b   Ming Lin   block: remove spl...
176
  	/* Ensure that max_write_same_sectors doesn't overflow bi_size */
34ffec60b   Ming Lei   block: make sure ...
177
  	max_write_same_sectors = bio_allowed_max_sectors(q);
4363ac7c1   Martin K. Petersen   block: Implement ...
178

4363ac7c1   Martin K. Petersen   block: Implement ...
179
  	while (nr_sects) {
a2d6b3a2d   Damien Le Moal   block: Improve zo...
180
  		bio = blk_next_bio(bio, 1, gfp_mask);
4f024f379   Kent Overstreet   block: Abstract o...
181
  		bio->bi_iter.bi_sector = sector;
74d46992e   Christoph Hellwig   block: replace bi...
182
  		bio_set_dev(bio, bdev);
4363ac7c1   Martin K. Petersen   block: Implement ...
183
184
185
186
  		bio->bi_vcnt = 1;
  		bio->bi_io_vec->bv_page = page;
  		bio->bi_io_vec->bv_offset = 0;
  		bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
95fe6c1a2   Mike Christie   block, fs, mm, dr...
187
  		bio_set_op_attrs(bio, REQ_OP_WRITE_SAME, 0);
4363ac7c1   Martin K. Petersen   block: Implement ...
188
189
  
  		if (nr_sects > max_write_same_sectors) {
4f024f379   Kent Overstreet   block: Abstract o...
190
  			bio->bi_iter.bi_size = max_write_same_sectors << 9;
4363ac7c1   Martin K. Petersen   block: Implement ...
191
192
193
  			nr_sects -= max_write_same_sectors;
  			sector += max_write_same_sectors;
  		} else {
4f024f379   Kent Overstreet   block: Abstract o...
194
  			bio->bi_iter.bi_size = nr_sects << 9;
4363ac7c1   Martin K. Petersen   block: Implement ...
195
196
  			nr_sects = 0;
  		}
e73c23ff7   Chaitanya Kulkarni   block: add async ...
197
  		cond_resched();
4363ac7c1   Martin K. Petersen   block: Implement ...
198
  	}
e73c23ff7   Chaitanya Kulkarni   block: add async ...
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
  	*biop = bio;
  	return 0;
  }
  
  /**
   * blkdev_issue_write_same - queue a write same operation
   * @bdev:	target blockdev
   * @sector:	start sector
   * @nr_sects:	number of sectors to write
   * @gfp_mask:	memory allocation flags (for bio_alloc)
   * @page:	page containing data
   *
   * Description:
   *    Issue a write same request for the sectors in question.
   */
  int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
  				sector_t nr_sects, gfp_t gfp_mask,
  				struct page *page)
  {
  	struct bio *bio = NULL;
  	struct blk_plug plug;
  	int ret;
  
  	blk_start_plug(&plug);
  	ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, page,
  			&bio);
  	if (ret == 0 && bio) {
4e49ea4a3   Mike Christie   block/fs/drivers:...
226
  		ret = submit_bio_wait(bio);
05bd92ddd   Shaun Tancheff   block: missing bi...
227
228
  		bio_put(bio);
  	}
e73c23ff7   Chaitanya Kulkarni   block: add async ...
229
  	blk_finish_plug(&plug);
3f40bf2c8   Christoph Hellwig   block: don't igno...
230
  	return ret;
4363ac7c1   Martin K. Petersen   block: Implement ...
231
232
  }
  EXPORT_SYMBOL(blkdev_issue_write_same);
a6f0788ec   Chaitanya Kulkarni   block: add suppor...
233
234
  static int __blkdev_issue_write_zeroes(struct block_device *bdev,
  		sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
d928be9f8   Christoph Hellwig   block: add a REQ_...
235
  		struct bio **biop, unsigned flags)
a6f0788ec   Chaitanya Kulkarni   block: add suppor...
236
237
238
239
240
241
242
  {
  	struct bio *bio = *biop;
  	unsigned int max_write_zeroes_sectors;
  	struct request_queue *q = bdev_get_queue(bdev);
  
  	if (!q)
  		return -ENXIO;
a13553c77   Ilya Dryomov   block: add bdev_r...
243
244
  	if (bdev_read_only(bdev))
  		return -EPERM;
a6f0788ec   Chaitanya Kulkarni   block: add suppor...
245
246
247
248
249
250
251
  	/* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */
  	max_write_zeroes_sectors = bdev_write_zeroes_sectors(bdev);
  
  	if (max_write_zeroes_sectors == 0)
  		return -EOPNOTSUPP;
  
  	while (nr_sects) {
a2d6b3a2d   Damien Le Moal   block: Improve zo...
252
  		bio = blk_next_bio(bio, 0, gfp_mask);
a6f0788ec   Chaitanya Kulkarni   block: add suppor...
253
  		bio->bi_iter.bi_sector = sector;
74d46992e   Christoph Hellwig   block: replace bi...
254
  		bio_set_dev(bio, bdev);
d928be9f8   Christoph Hellwig   block: add a REQ_...
255
256
257
  		bio->bi_opf = REQ_OP_WRITE_ZEROES;
  		if (flags & BLKDEV_ZERO_NOUNMAP)
  			bio->bi_opf |= REQ_NOUNMAP;
a6f0788ec   Chaitanya Kulkarni   block: add suppor...
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
  
  		if (nr_sects > max_write_zeroes_sectors) {
  			bio->bi_iter.bi_size = max_write_zeroes_sectors << 9;
  			nr_sects -= max_write_zeroes_sectors;
  			sector += max_write_zeroes_sectors;
  		} else {
  			bio->bi_iter.bi_size = nr_sects << 9;
  			nr_sects = 0;
  		}
  		cond_resched();
  	}
  
  	*biop = bio;
  	return 0;
  }
615d22a51   Damien Le Moal   block: Fix __blkd...
273
274
275
276
277
278
279
280
  /*
   * Convert a number of 512B sectors to a number of pages.
   * The result is limited to a number of pages that can fit into a BIO.
   * Also make sure that the result is always at least 1 (page) for the cases
   * where nr_sects is lower than the number of sectors in a page.
   */
  static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects)
  {
09c2c359b   Mikulas Patocka   block: fix intege...
281
  	sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512);
615d22a51   Damien Le Moal   block: Fix __blkd...
282

09c2c359b   Mikulas Patocka   block: fix intege...
283
  	return min(pages, (sector_t)BIO_MAX_PAGES);
615d22a51   Damien Le Moal   block: Fix __blkd...
284
  }
425a4dba7   Ilya Dryomov   block: factor out...
285
286
287
288
289
290
291
292
293
294
295
  static int __blkdev_issue_zero_pages(struct block_device *bdev,
  		sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
  		struct bio **biop)
  {
  	struct request_queue *q = bdev_get_queue(bdev);
  	struct bio *bio = *biop;
  	int bi_size = 0;
  	unsigned int sz;
  
  	if (!q)
  		return -ENXIO;
a13553c77   Ilya Dryomov   block: add bdev_r...
296
297
  	if (bdev_read_only(bdev))
  		return -EPERM;
425a4dba7   Ilya Dryomov   block: factor out...
298
  	while (nr_sects != 0) {
a2d6b3a2d   Damien Le Moal   block: Improve zo...
299
300
  		bio = blk_next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects),
  				   gfp_mask);
425a4dba7   Ilya Dryomov   block: factor out...
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
  		bio->bi_iter.bi_sector = sector;
  		bio_set_dev(bio, bdev);
  		bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
  
  		while (nr_sects != 0) {
  			sz = min((sector_t) PAGE_SIZE, nr_sects << 9);
  			bi_size = bio_add_page(bio, ZERO_PAGE(0), sz, 0);
  			nr_sects -= bi_size >> 9;
  			sector += bi_size >> 9;
  			if (bi_size < sz)
  				break;
  		}
  		cond_resched();
  	}
  
  	*biop = bio;
  	return 0;
  }
a6f0788ec   Chaitanya Kulkarni   block: add suppor...
319
  /**
e73c23ff7   Chaitanya Kulkarni   block: add async ...
320
   * __blkdev_issue_zeroout - generate number of zero filed write bios
3f14d792f   Dmitry Monakhov   blkdev: add blkde...
321
322
323
324
   * @bdev:	blockdev to issue
   * @sector:	start sector
   * @nr_sects:	number of sectors to write
   * @gfp_mask:	memory allocation flags (for bio_alloc)
e73c23ff7   Chaitanya Kulkarni   block: add async ...
325
   * @biop:	pointer to anchor bio
ee472d835   Christoph Hellwig   block: add a flag...
326
   * @flags:	controls detailed behavior
3f14d792f   Dmitry Monakhov   blkdev: add blkde...
327
328
   *
   * Description:
ee472d835   Christoph Hellwig   block: add a flag...
329
330
331
332
333
   *  Zero-fill a block range, either using hardware offload or by explicitly
   *  writing zeroes to the device.
   *
   *  If a device is using logical block provisioning, the underlying space will
   *  not be released if %flags contains BLKDEV_ZERO_NOUNMAP.
cb365b967   Christoph Hellwig   block: add a new ...
334
335
336
   *
   *  If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return
   *  -EOPNOTSUPP if no explicit hardware offload for zeroing is provided.
3f14d792f   Dmitry Monakhov   blkdev: add blkde...
337
   */
e73c23ff7   Chaitanya Kulkarni   block: add async ...
338
339
  int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
  		sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
ee472d835   Christoph Hellwig   block: add a flag...
340
  		unsigned flags)
3f14d792f   Dmitry Monakhov   blkdev: add blkde...
341
  {
18edc8eaa   Dmitry Monakhov   blkdev: fix blkde...
342
  	int ret;
28b2be203   Darrick J. Wong   block: require wr...
343
344
345
346
347
  	sector_t bs_mask;
  
  	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
  	if ((sector | nr_sects) & bs_mask)
  		return -EINVAL;
3f14d792f   Dmitry Monakhov   blkdev: add blkde...
348

a6f0788ec   Chaitanya Kulkarni   block: add suppor...
349
  	ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask,
d928be9f8   Christoph Hellwig   block: add a REQ_...
350
  			biop, flags);
cb365b967   Christoph Hellwig   block: add a new ...
351
  	if (ret != -EOPNOTSUPP || (flags & BLKDEV_ZERO_NOFALLBACK))
425a4dba7   Ilya Dryomov   block: factor out...
352
  		return ret;
3f14d792f   Dmitry Monakhov   blkdev: add blkde...
353

425a4dba7   Ilya Dryomov   block: factor out...
354
355
  	return __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask,
  					 biop);
3f14d792f   Dmitry Monakhov   blkdev: add blkde...
356
  }
e73c23ff7   Chaitanya Kulkarni   block: add async ...
357
  EXPORT_SYMBOL(__blkdev_issue_zeroout);
579e8f3c7   Martin K. Petersen   block: Make blkde...
358
359
360
361
362
363
364
  
  /**
   * blkdev_issue_zeroout - zero-fill a block range
   * @bdev:	blockdev to write
   * @sector:	start sector
   * @nr_sects:	number of sectors to write
   * @gfp_mask:	memory allocation flags (for bio_alloc)
ee472d835   Christoph Hellwig   block: add a flag...
365
   * @flags:	controls detailed behavior
579e8f3c7   Martin K. Petersen   block: Make blkde...
366
367
   *
   * Description:
ee472d835   Christoph Hellwig   block: add a flag...
368
369
370
   *  Zero-fill a block range, either using hardware offload or by explicitly
   *  writing zeroes to the device.  See __blkdev_issue_zeroout() for the
   *  valid values for %flags.
579e8f3c7   Martin K. Petersen   block: Make blkde...
371
   */
579e8f3c7   Martin K. Petersen   block: Make blkde...
372
  int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
ee472d835   Christoph Hellwig   block: add a flag...
373
  		sector_t nr_sects, gfp_t gfp_mask, unsigned flags)
579e8f3c7   Martin K. Petersen   block: Make blkde...
374
  {
d5ce4c31d   Ilya Dryomov   block: cope with ...
375
376
377
  	int ret = 0;
  	sector_t bs_mask;
  	struct bio *bio;
e73c23ff7   Chaitanya Kulkarni   block: add async ...
378
  	struct blk_plug plug;
d5ce4c31d   Ilya Dryomov   block: cope with ...
379
  	bool try_write_zeroes = !!bdev_write_zeroes_sectors(bdev);
d93ba7a5a   Martin K. Petersen   block: Add discar...
380

d5ce4c31d   Ilya Dryomov   block: cope with ...
381
382
383
384
385
386
  	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
  	if ((sector | nr_sects) & bs_mask)
  		return -EINVAL;
  
  retry:
  	bio = NULL;
e73c23ff7   Chaitanya Kulkarni   block: add async ...
387
  	blk_start_plug(&plug);
d5ce4c31d   Ilya Dryomov   block: cope with ...
388
389
390
391
392
393
394
395
396
397
  	if (try_write_zeroes) {
  		ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects,
  						  gfp_mask, &bio, flags);
  	} else if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
  		ret = __blkdev_issue_zero_pages(bdev, sector, nr_sects,
  						gfp_mask, &bio);
  	} else {
  		/* No zeroing offload support */
  		ret = -EOPNOTSUPP;
  	}
e73c23ff7   Chaitanya Kulkarni   block: add async ...
398
399
400
401
402
  	if (ret == 0 && bio) {
  		ret = submit_bio_wait(bio);
  		bio_put(bio);
  	}
  	blk_finish_plug(&plug);
d5ce4c31d   Ilya Dryomov   block: cope with ...
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
  	if (ret && try_write_zeroes) {
  		if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
  			try_write_zeroes = false;
  			goto retry;
  		}
  		if (!bdev_write_zeroes_sectors(bdev)) {
  			/*
  			 * Zeroing offload support was indicated, but the
  			 * device reported ILLEGAL REQUEST (for some devices
  			 * there is no non-destructive way to verify whether
  			 * WRITE ZEROES is actually supported).
  			 */
  			ret = -EOPNOTSUPP;
  		}
  	}
579e8f3c7   Martin K. Petersen   block: Make blkde...
418

e73c23ff7   Chaitanya Kulkarni   block: add async ...
419
  	return ret;
579e8f3c7   Martin K. Petersen   block: Make blkde...
420
  }
3f14d792f   Dmitry Monakhov   blkdev: add blkde...
421
  EXPORT_SYMBOL(blkdev_issue_zeroout);