Blame view

block/blk-lib.c 7.29 KB
f31e7e402   Dmitry Monakhov   blkdev: move blkd...
1
2
3
4
5
6
7
8
9
10
  /*
   * Functions related to generic helpers functions
   */
  #include <linux/kernel.h>
  #include <linux/module.h>
  #include <linux/bio.h>
  #include <linux/blkdev.h>
  #include <linux/scatterlist.h>
  
  #include "blk.h"
5dba3089e   Lukas Czerner   blkdev: Submit di...
11
12
13
14
15
16
17
  struct bio_batch {
  	atomic_t		done;
  	unsigned long		flags;
  	struct completion	*wait;
  };
  
  static void bio_batch_end_io(struct bio *bio, int err)
f31e7e402   Dmitry Monakhov   blkdev: move blkd...
18
  {
5dba3089e   Lukas Czerner   blkdev: Submit di...
19
  	struct bio_batch *bb = bio->bi_private;
8af1954d1   Lukas Czerner   blkdev: Do not re...
20
  	if (err && (err != -EOPNOTSUPP))
5dba3089e   Lukas Czerner   blkdev: Submit di...
21
  		clear_bit(BIO_UPTODATE, &bb->flags);
5dba3089e   Lukas Czerner   blkdev: Submit di...
22
23
  	if (atomic_dec_and_test(&bb->done))
  		complete(bb->wait);
f31e7e402   Dmitry Monakhov   blkdev: move blkd...
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
  	bio_put(bio);
  }
  
  /**
   * blkdev_issue_discard - queue a discard
   * @bdev:	blockdev to issue discard for
   * @sector:	start sector
   * @nr_sects:	number of sectors to discard
   * @gfp_mask:	memory allocation flags (for bio_alloc)
   * @flags:	BLKDEV_IFL_* flags to control behaviour
   *
   * Description:
   *    Issue a discard request for the sectors in question.
   */
  int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
  		sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
  {
  	DECLARE_COMPLETION_ONSTACK(wait);
  	struct request_queue *q = bdev_get_queue(bdev);
8c5553678   Christoph Hellwig   block: remove the...
43
  	int type = REQ_WRITE | REQ_DISCARD;
97597dc08   Geert Uytterhoeven   block: Do not cal...
44
45
  	unsigned int max_discard_sectors, granularity;
  	int alignment;
5dba3089e   Lukas Czerner   blkdev: Submit di...
46
  	struct bio_batch bb;
f31e7e402   Dmitry Monakhov   blkdev: move blkd...
47
  	struct bio *bio;
f31e7e402   Dmitry Monakhov   blkdev: move blkd...
48
  	int ret = 0;
0cfbcafca   Shaohua Li   block: add plug f...
49
  	struct blk_plug plug;
f31e7e402   Dmitry Monakhov   blkdev: move blkd...
50
51
52
53
54
55
  
  	if (!q)
  		return -ENXIO;
  
  	if (!blk_queue_discard(q))
  		return -EOPNOTSUPP;
f6ff53d36   Paolo Bonzini   block: reorganize...
56
57
  	/* Zero-sector (unknown) and one-sector granularities are the same.  */
  	granularity = max(q->limits.discard_granularity >> 9, 1U);
97597dc08   Geert Uytterhoeven   block: Do not cal...
58
  	alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
f6ff53d36   Paolo Bonzini   block: reorganize...
59

10d1f9e2c   Jens Axboe   block: fix proble...
60
61
  	/*
  	 * Ensure that max_discard_sectors is of the proper
c6e666345   Paolo Bonzini   block: split disc...
62
  	 * granularity, so that requests stay aligned after a split.
10d1f9e2c   Jens Axboe   block: fix proble...
63
64
  	 */
  	max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
97597dc08   Geert Uytterhoeven   block: Do not cal...
65
  	max_discard_sectors -= max_discard_sectors % granularity;
4c64500ea   Jens Axboe   block: fix patch ...
66
  	if (unlikely(!max_discard_sectors)) {
0f7996039   Mike Snitzer   block: eliminate ...
67
68
  		/* Avoid infinite loop below. Being cautious never hurts. */
  		return -EOPNOTSUPP;
10d1f9e2c   Jens Axboe   block: fix proble...
69
  	}
f31e7e402   Dmitry Monakhov   blkdev: move blkd...
70

dd3932edd   Christoph Hellwig   block: remove BLK...
71
  	if (flags & BLKDEV_DISCARD_SECURE) {
8d57a98cc   Adrian Hunter   block: add secure...
72
73
  		if (!blk_queue_secdiscard(q))
  			return -EOPNOTSUPP;
8c5553678   Christoph Hellwig   block: remove the...
74
  		type |= REQ_SECURE;
8d57a98cc   Adrian Hunter   block: add secure...
75
  	}
5dba3089e   Lukas Czerner   blkdev: Submit di...
76
77
78
  	atomic_set(&bb.done, 1);
  	bb.flags = 1 << BIO_UPTODATE;
  	bb.wait = &wait;
0cfbcafca   Shaohua Li   block: add plug f...
79
  	blk_start_plug(&plug);
5dba3089e   Lukas Czerner   blkdev: Submit di...
80
  	while (nr_sects) {
c6e666345   Paolo Bonzini   block: split disc...
81
  		unsigned int req_sects;
8dd2cb7e8   Shaohua Li   block: discard gr...
82
  		sector_t end_sect, tmp;
c6e666345   Paolo Bonzini   block: split disc...
83

f31e7e402   Dmitry Monakhov   blkdev: move blkd...
84
  		bio = bio_alloc(gfp_mask, 1);
66ac02801   Christoph Hellwig   block: don't allo...
85
86
87
88
  		if (!bio) {
  			ret = -ENOMEM;
  			break;
  		}
c6e666345   Paolo Bonzini   block: split disc...
89
90
91
92
93
94
95
  		req_sects = min_t(sector_t, nr_sects, max_discard_sectors);
  
  		/*
  		 * If splitting a request, and the next starting sector would be
  		 * misaligned, stop the discard at the previous aligned sector.
  		 */
  		end_sect = sector + req_sects;
8dd2cb7e8   Shaohua Li   block: discard gr...
96
97
98
99
100
101
  		tmp = end_sect;
  		if (req_sects < nr_sects &&
  		    sector_div(tmp, granularity) != alignment) {
  			end_sect = end_sect - alignment;
  			sector_div(end_sect, granularity);
  			end_sect = end_sect * granularity + alignment;
c6e666345   Paolo Bonzini   block: split disc...
102
103
  			req_sects = end_sect - sector;
  		}
4f024f379   Kent Overstreet   block: Abstract o...
104
  		bio->bi_iter.bi_sector = sector;
5dba3089e   Lukas Czerner   blkdev: Submit di...
105
  		bio->bi_end_io = bio_batch_end_io;
f31e7e402   Dmitry Monakhov   blkdev: move blkd...
106
  		bio->bi_bdev = bdev;
5dba3089e   Lukas Czerner   blkdev: Submit di...
107
  		bio->bi_private = &bb;
f31e7e402   Dmitry Monakhov   blkdev: move blkd...
108

4f024f379   Kent Overstreet   block: Abstract o...
109
  		bio->bi_iter.bi_size = req_sects << 9;
c6e666345   Paolo Bonzini   block: split disc...
110
111
  		nr_sects -= req_sects;
  		sector = end_sect;
f31e7e402   Dmitry Monakhov   blkdev: move blkd...
112

5dba3089e   Lukas Czerner   blkdev: Submit di...
113
  		atomic_inc(&bb.done);
f31e7e402   Dmitry Monakhov   blkdev: move blkd...
114
  		submit_bio(type, bio);
c8123f8c9   Jens Axboe   block: add cond_r...
115
116
117
118
119
120
121
122
  
  		/*
  		 * We can loop for a long time in here, if someone does
  		 * full device discards (like mkfs). Be nice and allow
  		 * us to schedule out to avoid softlocking if preempt
  		 * is disabled.
  		 */
  		cond_resched();
5dba3089e   Lukas Czerner   blkdev: Submit di...
123
  	}
0cfbcafca   Shaohua Li   block: add plug f...
124
  	blk_finish_plug(&plug);
f31e7e402   Dmitry Monakhov   blkdev: move blkd...
125

5dba3089e   Lukas Czerner   blkdev: Submit di...
126
127
  	/* Wait for bios in-flight */
  	if (!atomic_dec_and_test(&bb.done))
5577022f4   Vladimir Davydov   block: account io...
128
  		wait_for_completion_io(&wait);
f31e7e402   Dmitry Monakhov   blkdev: move blkd...
129

8af1954d1   Lukas Czerner   blkdev: Do not re...
130
  	if (!test_bit(BIO_UPTODATE, &bb.flags))
5dba3089e   Lukas Czerner   blkdev: Submit di...
131
  		ret = -EIO;
66ac02801   Christoph Hellwig   block: don't allo...
132

f31e7e402   Dmitry Monakhov   blkdev: move blkd...
133
  	return ret;
f31e7e402   Dmitry Monakhov   blkdev: move blkd...
134
135
  }
  EXPORT_SYMBOL(blkdev_issue_discard);
3f14d792f   Dmitry Monakhov   blkdev: add blkde...
136

3f14d792f   Dmitry Monakhov   blkdev: add blkde...
137
  /**
4363ac7c1   Martin K. Petersen   block: Implement ...
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
   * blkdev_issue_write_same - queue a write same operation
   * @bdev:	target blockdev
   * @sector:	start sector
   * @nr_sects:	number of sectors to write
   * @gfp_mask:	memory allocation flags (for bio_alloc)
   * @page:	page containing data to write
   *
   * Description:
   *    Issue a write same request for the sectors in question.
   */
  int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
  			    sector_t nr_sects, gfp_t gfp_mask,
  			    struct page *page)
  {
  	DECLARE_COMPLETION_ONSTACK(wait);
  	struct request_queue *q = bdev_get_queue(bdev);
  	unsigned int max_write_same_sectors;
  	struct bio_batch bb;
  	struct bio *bio;
  	int ret = 0;
  
  	if (!q)
  		return -ENXIO;
  
  	max_write_same_sectors = q->limits.max_write_same_sectors;
  
  	if (max_write_same_sectors == 0)
  		return -EOPNOTSUPP;
  
  	atomic_set(&bb.done, 1);
  	bb.flags = 1 << BIO_UPTODATE;
  	bb.wait = &wait;
  
  	while (nr_sects) {
  		bio = bio_alloc(gfp_mask, 1);
  		if (!bio) {
  			ret = -ENOMEM;
  			break;
  		}
4f024f379   Kent Overstreet   block: Abstract o...
177
  		bio->bi_iter.bi_sector = sector;
4363ac7c1   Martin K. Petersen   block: Implement ...
178
179
180
181
182
183
184
185
186
  		bio->bi_end_io = bio_batch_end_io;
  		bio->bi_bdev = bdev;
  		bio->bi_private = &bb;
  		bio->bi_vcnt = 1;
  		bio->bi_io_vec->bv_page = page;
  		bio->bi_io_vec->bv_offset = 0;
  		bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
  
  		if (nr_sects > max_write_same_sectors) {
4f024f379   Kent Overstreet   block: Abstract o...
187
  			bio->bi_iter.bi_size = max_write_same_sectors << 9;
4363ac7c1   Martin K. Petersen   block: Implement ...
188
189
190
  			nr_sects -= max_write_same_sectors;
  			sector += max_write_same_sectors;
  		} else {
4f024f379   Kent Overstreet   block: Abstract o...
191
  			bio->bi_iter.bi_size = nr_sects << 9;
4363ac7c1   Martin K. Petersen   block: Implement ...
192
193
194
195
196
197
198
199
200
  			nr_sects = 0;
  		}
  
  		atomic_inc(&bb.done);
  		submit_bio(REQ_WRITE | REQ_WRITE_SAME, bio);
  	}
  
  	/* Wait for bios in-flight */
  	if (!atomic_dec_and_test(&bb.done))
5577022f4   Vladimir Davydov   block: account io...
201
  		wait_for_completion_io(&wait);
4363ac7c1   Martin K. Petersen   block: Implement ...
202
203
204
205
206
207
208
209
210
  
  	if (!test_bit(BIO_UPTODATE, &bb.flags))
  		ret = -ENOTSUPP;
  
  	return ret;
  }
  EXPORT_SYMBOL(blkdev_issue_write_same);
  
  /**
291d24f6d   Ben Hutchings   block: fix kernel...
211
   * blkdev_issue_zeroout - generate number of zero filed write bios
3f14d792f   Dmitry Monakhov   blkdev: add blkde...
212
213
214
215
   * @bdev:	blockdev to issue
   * @sector:	start sector
   * @nr_sects:	number of sectors to write
   * @gfp_mask:	memory allocation flags (for bio_alloc)
3f14d792f   Dmitry Monakhov   blkdev: add blkde...
216
217
218
   *
   * Description:
   *  Generate and issue number of bios with zerofiled pages.
3f14d792f   Dmitry Monakhov   blkdev: add blkde...
219
   */
35086784c   Fabian Frederick   block/blk-lib.c: ...
220
221
  static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
  				  sector_t nr_sects, gfp_t gfp_mask)
3f14d792f   Dmitry Monakhov   blkdev: add blkde...
222
  {
18edc8eaa   Dmitry Monakhov   blkdev: fix blkde...
223
  	int ret;
3f14d792f   Dmitry Monakhov   blkdev: add blkde...
224
225
  	struct bio *bio;
  	struct bio_batch bb;
0aeea1896   Lukas Czerner   block: fix mis-sy...
226
  	unsigned int sz;
3f14d792f   Dmitry Monakhov   blkdev: add blkde...
227
  	DECLARE_COMPLETION_ONSTACK(wait);
0aeea1896   Lukas Czerner   block: fix mis-sy...
228
  	atomic_set(&bb.done, 1);
3f14d792f   Dmitry Monakhov   blkdev: add blkde...
229
230
  	bb.flags = 1 << BIO_UPTODATE;
  	bb.wait = &wait;
3f14d792f   Dmitry Monakhov   blkdev: add blkde...
231

18edc8eaa   Dmitry Monakhov   blkdev: fix blkde...
232
  	ret = 0;
3f14d792f   Dmitry Monakhov   blkdev: add blkde...
233
234
235
  	while (nr_sects != 0) {
  		bio = bio_alloc(gfp_mask,
  				min(nr_sects, (sector_t)BIO_MAX_PAGES));
18edc8eaa   Dmitry Monakhov   blkdev: fix blkde...
236
237
  		if (!bio) {
  			ret = -ENOMEM;
3f14d792f   Dmitry Monakhov   blkdev: add blkde...
238
  			break;
18edc8eaa   Dmitry Monakhov   blkdev: fix blkde...
239
  		}
3f14d792f   Dmitry Monakhov   blkdev: add blkde...
240

4f024f379   Kent Overstreet   block: Abstract o...
241
  		bio->bi_iter.bi_sector = sector;
3f14d792f   Dmitry Monakhov   blkdev: add blkde...
242
243
  		bio->bi_bdev   = bdev;
  		bio->bi_end_io = bio_batch_end_io;
dd3932edd   Christoph Hellwig   block: remove BLK...
244
  		bio->bi_private = &bb;
3f14d792f   Dmitry Monakhov   blkdev: add blkde...
245

0341aafb7   Jens Axboe   block: fix bad us...
246
247
  		while (nr_sects != 0) {
  			sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
3f14d792f   Dmitry Monakhov   blkdev: add blkde...
248
249
250
251
252
253
  			ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
  			nr_sects -= ret >> 9;
  			sector += ret >> 9;
  			if (ret < (sz << 9))
  				break;
  		}
18edc8eaa   Dmitry Monakhov   blkdev: fix blkde...
254
  		ret = 0;
0aeea1896   Lukas Czerner   block: fix mis-sy...
255
  		atomic_inc(&bb.done);
3f14d792f   Dmitry Monakhov   blkdev: add blkde...
256
257
  		submit_bio(WRITE, bio);
  	}
3f14d792f   Dmitry Monakhov   blkdev: add blkde...
258

dd3932edd   Christoph Hellwig   block: remove BLK...
259
  	/* Wait for bios in-flight */
0aeea1896   Lukas Czerner   block: fix mis-sy...
260
  	if (!atomic_dec_and_test(&bb.done))
5577022f4   Vladimir Davydov   block: account io...
261
  		wait_for_completion_io(&wait);
3f14d792f   Dmitry Monakhov   blkdev: add blkde...
262
263
264
265
  
  	if (!test_bit(BIO_UPTODATE, &bb.flags))
  		/* One of bios in the batch was completed with error.*/
  		ret = -EIO;
3f14d792f   Dmitry Monakhov   blkdev: add blkde...
266
267
  	return ret;
  }
579e8f3c7   Martin K. Petersen   block: Make blkde...
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
  
  /**
   * blkdev_issue_zeroout - zero-fill a block range
   * @bdev:	blockdev to write
   * @sector:	start sector
   * @nr_sects:	number of sectors to write
   * @gfp_mask:	memory allocation flags (for bio_alloc)
   *
   * Description:
   *  Generate and issue number of bios with zerofiled pages.
   */
  
  int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
  			 sector_t nr_sects, gfp_t gfp_mask)
  {
  	if (bdev_write_same(bdev)) {
  		unsigned char bdn[BDEVNAME_SIZE];
  
  		if (!blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask,
  					     ZERO_PAGE(0)))
  			return 0;
  
  		bdevname(bdev, bdn);
  		pr_err("%s: WRITE SAME failed. Manually zeroing.
  ", bdn);
  	}
  
  	return __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask);
  }
3f14d792f   Dmitry Monakhov   blkdev: add blkde...
297
  EXPORT_SYMBOL(blkdev_issue_zeroout);