Blame view

block/blk-lib.c 10.2 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  // SPDX-License-Identifier: GPL-2.0
f31e7e402   Dmitry Monakhov   blkdev: move blkd...
2
3
4
5
6
7
8
9
10
11
  /*
   * Functions related to generic helpers functions
   */
  #include <linux/kernel.h>
  #include <linux/module.h>
  #include <linux/bio.h>
  #include <linux/blkdev.h>
  #include <linux/scatterlist.h>
  
  #include "blk.h"
a2d6b3a2d   Damien Le Moal   block: Improve zo...
12
  struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp)
f31e7e402   Dmitry Monakhov   blkdev: move blkd...
13
  {
9082e87bf   Christoph Hellwig   block: remove str...
14
15
16
17
  	struct bio *new = bio_alloc(gfp, nr_pages);
  
  	if (bio) {
  		bio_chain(bio, new);
4e49ea4a3   Mike Christie   block/fs/drivers:...
18
  		submit_bio(bio);
9082e87bf   Christoph Hellwig   block: remove str...
19
  	}
5dba3089e   Lukas Czerner   blkdev: Submit di...
20

9082e87bf   Christoph Hellwig   block: remove str...
21
  	return new;
f31e7e402   Dmitry Monakhov   blkdev: move blkd...
22
  }
38f252553   Christoph Hellwig   block: add __blkd...
23
  int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
288dab8a3   Christoph Hellwig   block: add a sepa...
24
  		sector_t nr_sects, gfp_t gfp_mask, int flags,
469e3216e   Mike Christie   block discard: us...
25
  		struct bio **biop)
f31e7e402   Dmitry Monakhov   blkdev: move blkd...
26
  {
f31e7e402   Dmitry Monakhov   blkdev: move blkd...
27
  	struct request_queue *q = bdev_get_queue(bdev);
38f252553   Christoph Hellwig   block: add __blkd...
28
  	struct bio *bio = *biop;
ef295ecf0   Christoph Hellwig   block: better op ...
29
  	unsigned int op;
28b2be203   Darrick J. Wong   block: require wr...
30
  	sector_t bs_mask;
f31e7e402   Dmitry Monakhov   blkdev: move blkd...
31
32
33
  
  	if (!q)
  		return -ENXIO;
288dab8a3   Christoph Hellwig   block: add a sepa...
34

a13553c77   Ilya Dryomov   block: add bdev_r...
35
36
  	if (bdev_read_only(bdev))
  		return -EPERM;
288dab8a3   Christoph Hellwig   block: add a sepa...
37
38
39
40
41
42
43
44
45
  	if (flags & BLKDEV_DISCARD_SECURE) {
  		if (!blk_queue_secure_erase(q))
  			return -EOPNOTSUPP;
  		op = REQ_OP_SECURE_ERASE;
  	} else {
  		if (!blk_queue_discard(q))
  			return -EOPNOTSUPP;
  		op = REQ_OP_DISCARD;
  	}
f31e7e402   Dmitry Monakhov   blkdev: move blkd...
46

28b2be203   Darrick J. Wong   block: require wr...
47
48
49
  	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
  	if ((sector | nr_sects) & bs_mask)
  		return -EINVAL;
ba5d73851   Ming Lei   block: cleanup __...
50
51
  	if (!nr_sects)
  		return -EINVAL;
a22c4d7e3   Ming Lin   block: re-add dis...
52

ba5d73851   Ming Lei   block: cleanup __...
53
  	while (nr_sects) {
4800bf7bc   Dave Chinner   block: fix 32 bit...
54
  		sector_t req_sects = min_t(sector_t, nr_sects,
ba5d73851   Ming Lei   block: cleanup __...
55
  				bio_allowed_max_sectors(q));
c6e666345   Paolo Bonzini   block: split disc...
56

4800bf7bc   Dave Chinner   block: fix 32 bit...
57
  		WARN_ON_ONCE((req_sects << 9) > UINT_MAX);
a2d6b3a2d   Damien Le Moal   block: Improve zo...
58
  		bio = blk_next_bio(bio, 0, gfp_mask);
4f024f379   Kent Overstreet   block: Abstract o...
59
  		bio->bi_iter.bi_sector = sector;
74d46992e   Christoph Hellwig   block: replace bi...
60
  		bio_set_dev(bio, bdev);
288dab8a3   Christoph Hellwig   block: add a sepa...
61
  		bio_set_op_attrs(bio, op, 0);
f31e7e402   Dmitry Monakhov   blkdev: move blkd...
62

4f024f379   Kent Overstreet   block: Abstract o...
63
  		bio->bi_iter.bi_size = req_sects << 9;
ba5d73851   Ming Lei   block: cleanup __...
64
  		sector += req_sects;
c6e666345   Paolo Bonzini   block: split disc...
65
  		nr_sects -= req_sects;
f31e7e402   Dmitry Monakhov   blkdev: move blkd...
66

c8123f8c9   Jens Axboe   block: add cond_r...
67
68
69
70
71
72
73
  		/*
  		 * We can loop for a long time in here, if someone does
  		 * full device discards (like mkfs). Be nice and allow
  		 * us to schedule out to avoid softlocking if preempt
  		 * is disabled.
  		 */
  		cond_resched();
5dba3089e   Lukas Czerner   blkdev: Submit di...
74
  	}
38f252553   Christoph Hellwig   block: add __blkd...
75
76
77
78
79
80
81
82
83
84
85
86
  
  	*biop = bio;
  	return 0;
  }
  EXPORT_SYMBOL(__blkdev_issue_discard);
  
  /**
   * blkdev_issue_discard - queue a discard
   * @bdev:	blockdev to issue discard for
   * @sector:	start sector
   * @nr_sects:	number of sectors to discard
   * @gfp_mask:	memory allocation flags (for bio_alloc)
e554911c2   Eric Biggers   block: correct do...
87
   * @flags:	BLKDEV_DISCARD_* flags to control behaviour
38f252553   Christoph Hellwig   block: add __blkd...
88
89
90
91
92
93
94
   *
   * Description:
   *    Issue a discard request for the sectors in question.
   */
  int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
  		sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
  {
38f252553   Christoph Hellwig   block: add __blkd...
95
96
97
  	struct bio *bio = NULL;
  	struct blk_plug plug;
  	int ret;
38f252553   Christoph Hellwig   block: add __blkd...
98
  	blk_start_plug(&plug);
288dab8a3   Christoph Hellwig   block: add a sepa...
99
  	ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags,
38f252553   Christoph Hellwig   block: add __blkd...
100
  			&bio);
bbd848e0f   Mike Snitzer   block: reinstate ...
101
  	if (!ret && bio) {
4e49ea4a3   Mike Christie   block/fs/drivers:...
102
  		ret = submit_bio_wait(bio);
48920ff2a   Christoph Hellwig   block: remove the...
103
  		if (ret == -EOPNOTSUPP)
bbd848e0f   Mike Snitzer   block: reinstate ...
104
  			ret = 0;
05bd92ddd   Shaun Tancheff   block: missing bi...
105
  		bio_put(bio);
bbd848e0f   Mike Snitzer   block: reinstate ...
106
  	}
0cfbcafca   Shaohua Li   block: add plug f...
107
  	blk_finish_plug(&plug);
f31e7e402   Dmitry Monakhov   blkdev: move blkd...
108

bbd848e0f   Mike Snitzer   block: reinstate ...
109
  	return ret;
f31e7e402   Dmitry Monakhov   blkdev: move blkd...
110
111
  }
  EXPORT_SYMBOL(blkdev_issue_discard);
3f14d792f   Dmitry Monakhov   blkdev: add blkde...
112

3f14d792f   Dmitry Monakhov   blkdev: add blkde...
113
  /**
e73c23ff7   Chaitanya Kulkarni   block: add async ...
114
   * __blkdev_issue_write_same - generate number of bios with same page
4363ac7c1   Martin K. Petersen   block: Implement ...
115
116
117
118
119
   * @bdev:	target blockdev
   * @sector:	start sector
   * @nr_sects:	number of sectors to write
   * @gfp_mask:	memory allocation flags (for bio_alloc)
   * @page:	page containing data to write
e73c23ff7   Chaitanya Kulkarni   block: add async ...
120
   * @biop:	pointer to anchor bio
4363ac7c1   Martin K. Petersen   block: Implement ...
121
122
   *
   * Description:
e73c23ff7   Chaitanya Kulkarni   block: add async ...
123
   *  Generate and issue number of bios(REQ_OP_WRITE_SAME) with same page.
4363ac7c1   Martin K. Petersen   block: Implement ...
124
   */
e73c23ff7   Chaitanya Kulkarni   block: add async ...
125
126
127
  static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
  		sector_t nr_sects, gfp_t gfp_mask, struct page *page,
  		struct bio **biop)
4363ac7c1   Martin K. Petersen   block: Implement ...
128
  {
4363ac7c1   Martin K. Petersen   block: Implement ...
129
130
  	struct request_queue *q = bdev_get_queue(bdev);
  	unsigned int max_write_same_sectors;
e73c23ff7   Chaitanya Kulkarni   block: add async ...
131
  	struct bio *bio = *biop;
28b2be203   Darrick J. Wong   block: require wr...
132
  	sector_t bs_mask;
4363ac7c1   Martin K. Petersen   block: Implement ...
133
134
135
  
  	if (!q)
  		return -ENXIO;
a13553c77   Ilya Dryomov   block: add bdev_r...
136
137
  	if (bdev_read_only(bdev))
  		return -EPERM;
28b2be203   Darrick J. Wong   block: require wr...
138
139
140
  	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
  	if ((sector | nr_sects) & bs_mask)
  		return -EINVAL;
e73c23ff7   Chaitanya Kulkarni   block: add async ...
141
142
  	if (!bdev_write_same(bdev))
  		return -EOPNOTSUPP;
b49a0871b   Ming Lin   block: remove spl...
143
  	/* Ensure that max_write_same_sectors doesn't overflow bi_size */
34ffec60b   Ming Lei   block: make sure ...
144
  	max_write_same_sectors = bio_allowed_max_sectors(q);
4363ac7c1   Martin K. Petersen   block: Implement ...
145

4363ac7c1   Martin K. Petersen   block: Implement ...
146
  	while (nr_sects) {
a2d6b3a2d   Damien Le Moal   block: Improve zo...
147
  		bio = blk_next_bio(bio, 1, gfp_mask);
4f024f379   Kent Overstreet   block: Abstract o...
148
  		bio->bi_iter.bi_sector = sector;
74d46992e   Christoph Hellwig   block: replace bi...
149
  		bio_set_dev(bio, bdev);
4363ac7c1   Martin K. Petersen   block: Implement ...
150
151
152
153
  		bio->bi_vcnt = 1;
  		bio->bi_io_vec->bv_page = page;
  		bio->bi_io_vec->bv_offset = 0;
  		bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
95fe6c1a2   Mike Christie   block, fs, mm, dr...
154
  		bio_set_op_attrs(bio, REQ_OP_WRITE_SAME, 0);
4363ac7c1   Martin K. Petersen   block: Implement ...
155
156
  
  		if (nr_sects > max_write_same_sectors) {
4f024f379   Kent Overstreet   block: Abstract o...
157
  			bio->bi_iter.bi_size = max_write_same_sectors << 9;
4363ac7c1   Martin K. Petersen   block: Implement ...
158
159
160
  			nr_sects -= max_write_same_sectors;
  			sector += max_write_same_sectors;
  		} else {
4f024f379   Kent Overstreet   block: Abstract o...
161
  			bio->bi_iter.bi_size = nr_sects << 9;
4363ac7c1   Martin K. Petersen   block: Implement ...
162
163
  			nr_sects = 0;
  		}
e73c23ff7   Chaitanya Kulkarni   block: add async ...
164
  		cond_resched();
4363ac7c1   Martin K. Petersen   block: Implement ...
165
  	}
e73c23ff7   Chaitanya Kulkarni   block: add async ...
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
  	*biop = bio;
  	return 0;
  }
  
  /**
   * blkdev_issue_write_same - queue a write same operation
   * @bdev:	target blockdev
   * @sector:	start sector
   * @nr_sects:	number of sectors to write
   * @gfp_mask:	memory allocation flags (for bio_alloc)
   * @page:	page containing data
   *
   * Description:
   *    Issue a write same request for the sectors in question.
   */
  int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
  				sector_t nr_sects, gfp_t gfp_mask,
  				struct page *page)
  {
  	struct bio *bio = NULL;
  	struct blk_plug plug;
  	int ret;
  
  	blk_start_plug(&plug);
  	ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, page,
  			&bio);
  	if (ret == 0 && bio) {
4e49ea4a3   Mike Christie   block/fs/drivers:...
193
  		ret = submit_bio_wait(bio);
05bd92ddd   Shaun Tancheff   block: missing bi...
194
195
  		bio_put(bio);
  	}
e73c23ff7   Chaitanya Kulkarni   block: add async ...
196
  	blk_finish_plug(&plug);
3f40bf2c8   Christoph Hellwig   block: don't igno...
197
  	return ret;
4363ac7c1   Martin K. Petersen   block: Implement ...
198
199
  }
  EXPORT_SYMBOL(blkdev_issue_write_same);
a6f0788ec   Chaitanya Kulkarni   block: add suppor...
200
201
  static int __blkdev_issue_write_zeroes(struct block_device *bdev,
  		sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
d928be9f8   Christoph Hellwig   block: add a REQ_...
202
  		struct bio **biop, unsigned flags)
a6f0788ec   Chaitanya Kulkarni   block: add suppor...
203
204
205
206
207
208
209
  {
  	struct bio *bio = *biop;
  	unsigned int max_write_zeroes_sectors;
  	struct request_queue *q = bdev_get_queue(bdev);
  
  	if (!q)
  		return -ENXIO;
a13553c77   Ilya Dryomov   block: add bdev_r...
210
211
  	if (bdev_read_only(bdev))
  		return -EPERM;
a6f0788ec   Chaitanya Kulkarni   block: add suppor...
212
213
214
215
216
217
218
  	/* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */
  	max_write_zeroes_sectors = bdev_write_zeroes_sectors(bdev);
  
  	if (max_write_zeroes_sectors == 0)
  		return -EOPNOTSUPP;
  
  	while (nr_sects) {
a2d6b3a2d   Damien Le Moal   block: Improve zo...
219
  		bio = blk_next_bio(bio, 0, gfp_mask);
a6f0788ec   Chaitanya Kulkarni   block: add suppor...
220
  		bio->bi_iter.bi_sector = sector;
74d46992e   Christoph Hellwig   block: replace bi...
221
  		bio_set_dev(bio, bdev);
d928be9f8   Christoph Hellwig   block: add a REQ_...
222
223
224
  		bio->bi_opf = REQ_OP_WRITE_ZEROES;
  		if (flags & BLKDEV_ZERO_NOUNMAP)
  			bio->bi_opf |= REQ_NOUNMAP;
a6f0788ec   Chaitanya Kulkarni   block: add suppor...
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
  
  		if (nr_sects > max_write_zeroes_sectors) {
  			bio->bi_iter.bi_size = max_write_zeroes_sectors << 9;
  			nr_sects -= max_write_zeroes_sectors;
  			sector += max_write_zeroes_sectors;
  		} else {
  			bio->bi_iter.bi_size = nr_sects << 9;
  			nr_sects = 0;
  		}
  		cond_resched();
  	}
  
  	*biop = bio;
  	return 0;
  }
615d22a51   Damien Le Moal   block: Fix __blkd...
240
241
242
243
244
245
246
247
  /*
   * Convert a number of 512B sectors to a number of pages.
   * The result is limited to a number of pages that can fit into a BIO.
   * Also make sure that the result is always at least 1 (page) for the cases
   * where nr_sects is lower than the number of sectors in a page.
   */
  static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects)
  {
09c2c359b   Mikulas Patocka   block: fix intege...
248
  	sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512);
615d22a51   Damien Le Moal   block: Fix __blkd...
249

09c2c359b   Mikulas Patocka   block: fix intege...
250
  	return min(pages, (sector_t)BIO_MAX_PAGES);
615d22a51   Damien Le Moal   block: Fix __blkd...
251
  }
425a4dba7   Ilya Dryomov   block: factor out...
252
253
254
255
256
257
258
259
260
261
262
  static int __blkdev_issue_zero_pages(struct block_device *bdev,
  		sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
  		struct bio **biop)
  {
  	struct request_queue *q = bdev_get_queue(bdev);
  	struct bio *bio = *biop;
  	int bi_size = 0;
  	unsigned int sz;
  
  	if (!q)
  		return -ENXIO;
a13553c77   Ilya Dryomov   block: add bdev_r...
263
264
  	if (bdev_read_only(bdev))
  		return -EPERM;
425a4dba7   Ilya Dryomov   block: factor out...
265
  	while (nr_sects != 0) {
a2d6b3a2d   Damien Le Moal   block: Improve zo...
266
267
  		bio = blk_next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects),
  				   gfp_mask);
425a4dba7   Ilya Dryomov   block: factor out...
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
  		bio->bi_iter.bi_sector = sector;
  		bio_set_dev(bio, bdev);
  		bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
  
  		while (nr_sects != 0) {
  			sz = min((sector_t) PAGE_SIZE, nr_sects << 9);
  			bi_size = bio_add_page(bio, ZERO_PAGE(0), sz, 0);
  			nr_sects -= bi_size >> 9;
  			sector += bi_size >> 9;
  			if (bi_size < sz)
  				break;
  		}
  		cond_resched();
  	}
  
  	*biop = bio;
  	return 0;
  }
a6f0788ec   Chaitanya Kulkarni   block: add suppor...
286
  /**
e73c23ff7   Chaitanya Kulkarni   block: add async ...
287
   * __blkdev_issue_zeroout - generate number of zero filed write bios
3f14d792f   Dmitry Monakhov   blkdev: add blkde...
288
289
290
291
   * @bdev:	blockdev to issue
   * @sector:	start sector
   * @nr_sects:	number of sectors to write
   * @gfp_mask:	memory allocation flags (for bio_alloc)
e73c23ff7   Chaitanya Kulkarni   block: add async ...
292
   * @biop:	pointer to anchor bio
ee472d835   Christoph Hellwig   block: add a flag...
293
   * @flags:	controls detailed behavior
3f14d792f   Dmitry Monakhov   blkdev: add blkde...
294
295
   *
   * Description:
ee472d835   Christoph Hellwig   block: add a flag...
296
297
298
299
300
   *  Zero-fill a block range, either using hardware offload or by explicitly
   *  writing zeroes to the device.
   *
   *  If a device is using logical block provisioning, the underlying space will
   *  not be released if %flags contains BLKDEV_ZERO_NOUNMAP.
cb365b967   Christoph Hellwig   block: add a new ...
301
302
303
   *
   *  If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return
   *  -EOPNOTSUPP if no explicit hardware offload for zeroing is provided.
3f14d792f   Dmitry Monakhov   blkdev: add blkde...
304
   */
e73c23ff7   Chaitanya Kulkarni   block: add async ...
305
306
  int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
  		sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
ee472d835   Christoph Hellwig   block: add a flag...
307
  		unsigned flags)
3f14d792f   Dmitry Monakhov   blkdev: add blkde...
308
  {
18edc8eaa   Dmitry Monakhov   blkdev: fix blkde...
309
  	int ret;
28b2be203   Darrick J. Wong   block: require wr...
310
311
312
313
314
  	sector_t bs_mask;
  
  	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
  	if ((sector | nr_sects) & bs_mask)
  		return -EINVAL;
3f14d792f   Dmitry Monakhov   blkdev: add blkde...
315

a6f0788ec   Chaitanya Kulkarni   block: add suppor...
316
  	ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask,
d928be9f8   Christoph Hellwig   block: add a REQ_...
317
  			biop, flags);
cb365b967   Christoph Hellwig   block: add a new ...
318
  	if (ret != -EOPNOTSUPP || (flags & BLKDEV_ZERO_NOFALLBACK))
425a4dba7   Ilya Dryomov   block: factor out...
319
  		return ret;
3f14d792f   Dmitry Monakhov   blkdev: add blkde...
320

425a4dba7   Ilya Dryomov   block: factor out...
321
322
  	return __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask,
  					 biop);
3f14d792f   Dmitry Monakhov   blkdev: add blkde...
323
  }
e73c23ff7   Chaitanya Kulkarni   block: add async ...
324
  EXPORT_SYMBOL(__blkdev_issue_zeroout);
579e8f3c7   Martin K. Petersen   block: Make blkde...
325
326
327
328
329
330
331
  
  /**
   * blkdev_issue_zeroout - zero-fill a block range
   * @bdev:	blockdev to write
   * @sector:	start sector
   * @nr_sects:	number of sectors to write
   * @gfp_mask:	memory allocation flags (for bio_alloc)
ee472d835   Christoph Hellwig   block: add a flag...
332
   * @flags:	controls detailed behavior
579e8f3c7   Martin K. Petersen   block: Make blkde...
333
334
   *
   * Description:
ee472d835   Christoph Hellwig   block: add a flag...
335
336
337
   *  Zero-fill a block range, either using hardware offload or by explicitly
   *  writing zeroes to the device.  See __blkdev_issue_zeroout() for the
   *  valid values for %flags.
579e8f3c7   Martin K. Petersen   block: Make blkde...
338
   */
579e8f3c7   Martin K. Petersen   block: Make blkde...
339
  int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
ee472d835   Christoph Hellwig   block: add a flag...
340
  		sector_t nr_sects, gfp_t gfp_mask, unsigned flags)
579e8f3c7   Martin K. Petersen   block: Make blkde...
341
  {
d5ce4c31d   Ilya Dryomov   block: cope with ...
342
343
344
  	int ret = 0;
  	sector_t bs_mask;
  	struct bio *bio;
e73c23ff7   Chaitanya Kulkarni   block: add async ...
345
  	struct blk_plug plug;
d5ce4c31d   Ilya Dryomov   block: cope with ...
346
  	bool try_write_zeroes = !!bdev_write_zeroes_sectors(bdev);
d93ba7a5a   Martin K. Petersen   block: Add discar...
347

d5ce4c31d   Ilya Dryomov   block: cope with ...
348
349
350
351
352
353
  	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
  	if ((sector | nr_sects) & bs_mask)
  		return -EINVAL;
  
  retry:
  	bio = NULL;
e73c23ff7   Chaitanya Kulkarni   block: add async ...
354
  	blk_start_plug(&plug);
d5ce4c31d   Ilya Dryomov   block: cope with ...
355
356
357
358
359
360
361
362
363
364
  	if (try_write_zeroes) {
  		ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects,
  						  gfp_mask, &bio, flags);
  	} else if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
  		ret = __blkdev_issue_zero_pages(bdev, sector, nr_sects,
  						gfp_mask, &bio);
  	} else {
  		/* No zeroing offload support */
  		ret = -EOPNOTSUPP;
  	}
e73c23ff7   Chaitanya Kulkarni   block: add async ...
365
366
367
368
369
  	if (ret == 0 && bio) {
  		ret = submit_bio_wait(bio);
  		bio_put(bio);
  	}
  	blk_finish_plug(&plug);
d5ce4c31d   Ilya Dryomov   block: cope with ...
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
  	if (ret && try_write_zeroes) {
  		if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
  			try_write_zeroes = false;
  			goto retry;
  		}
  		if (!bdev_write_zeroes_sectors(bdev)) {
  			/*
  			 * Zeroing offload support was indicated, but the
  			 * device reported ILLEGAL REQUEST (for some devices
  			 * there is no non-destructive way to verify whether
  			 * WRITE ZEROES is actually supported).
  			 */
  			ret = -EOPNOTSUPP;
  		}
  	}
579e8f3c7   Martin K. Petersen   block: Make blkde...
385

e73c23ff7   Chaitanya Kulkarni   block: add async ...
386
  	return ret;
579e8f3c7   Martin K. Petersen   block: Make blkde...
387
  }
3f14d792f   Dmitry Monakhov   blkdev: add blkde...
388
  EXPORT_SYMBOL(blkdev_issue_zeroout);