Blame view

block/blk-lib.c 7.08 KB
f31e7e402   Dmitry Monakhov   blkdev: move blkd...
1
2
3
4
5
6
7
8
9
10
  /*
   * Functions related to generic helpers functions
   */
  #include <linux/kernel.h>
  #include <linux/module.h>
  #include <linux/bio.h>
  #include <linux/blkdev.h>
  #include <linux/scatterlist.h>
  
  #include "blk.h"
5dba3089e   Lukas Czerner   blkdev: Submit di...
11
12
13
14
15
16
17
  struct bio_batch {
  	atomic_t		done;
  	unsigned long		flags;
  	struct completion	*wait;
  };
  
  static void bio_batch_end_io(struct bio *bio, int err)
f31e7e402   Dmitry Monakhov   blkdev: move blkd...
18
  {
5dba3089e   Lukas Czerner   blkdev: Submit di...
19
  	struct bio_batch *bb = bio->bi_private;
8af1954d1   Lukas Czerner   blkdev: Do not re...
20
  	if (err && (err != -EOPNOTSUPP))
5dba3089e   Lukas Czerner   blkdev: Submit di...
21
  		clear_bit(BIO_UPTODATE, &bb->flags);
5dba3089e   Lukas Czerner   blkdev: Submit di...
22
23
  	if (atomic_dec_and_test(&bb->done))
  		complete(bb->wait);
f31e7e402   Dmitry Monakhov   blkdev: move blkd...
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
  	bio_put(bio);
  }
  
  /**
   * blkdev_issue_discard - queue a discard
   * @bdev:	blockdev to issue discard for
   * @sector:	start sector
   * @nr_sects:	number of sectors to discard
   * @gfp_mask:	memory allocation flags (for bio_alloc)
   * @flags:	BLKDEV_IFL_* flags to control behaviour
   *
   * Description:
   *    Issue a discard request for the sectors in question.
   */
  int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
  		sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
  {
  	DECLARE_COMPLETION_ONSTACK(wait);
  	struct request_queue *q = bdev_get_queue(bdev);
8c5553678   Christoph Hellwig   block: remove the...
43
  	int type = REQ_WRITE | REQ_DISCARD;
8dd2cb7e8   Shaohua Li   block: discard gr...
44
45
  	sector_t max_discard_sectors;
  	sector_t granularity, alignment;
5dba3089e   Lukas Czerner   blkdev: Submit di...
46
  	struct bio_batch bb;
f31e7e402   Dmitry Monakhov   blkdev: move blkd...
47
  	struct bio *bio;
f31e7e402   Dmitry Monakhov   blkdev: move blkd...
48
  	int ret = 0;
0cfbcafca   Shaohua Li   block: add plug f...
49
  	struct blk_plug plug;
f31e7e402   Dmitry Monakhov   blkdev: move blkd...
50
51
52
53
54
55
  
  	if (!q)
  		return -ENXIO;
  
  	if (!blk_queue_discard(q))
  		return -EOPNOTSUPP;
f6ff53d36   Paolo Bonzini   block: reorganize...
56
57
  	/* Zero-sector (unknown) and one-sector granularities are the same.  */
  	granularity = max(q->limits.discard_granularity >> 9, 1U);
8dd2cb7e8   Shaohua Li   block: discard gr...
58
59
  	alignment = bdev_discard_alignment(bdev) >> 9;
  	alignment = sector_div(alignment, granularity);
f6ff53d36   Paolo Bonzini   block: reorganize...
60

10d1f9e2c   Jens Axboe   block: fix proble...
61
62
  	/*
  	 * Ensure that max_discard_sectors is of the proper
c6e666345   Paolo Bonzini   block: split disc...
63
  	 * granularity, so that requests stay aligned after a split.
10d1f9e2c   Jens Axboe   block: fix proble...
64
65
  	 */
  	max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
8dd2cb7e8   Shaohua Li   block: discard gr...
66
67
  	sector_div(max_discard_sectors, granularity);
  	max_discard_sectors *= granularity;
4c64500ea   Jens Axboe   block: fix patch ...
68
  	if (unlikely(!max_discard_sectors)) {
0f7996039   Mike Snitzer   block: eliminate ...
69
70
  		/* Avoid infinite loop below. Being cautious never hurts. */
  		return -EOPNOTSUPP;
10d1f9e2c   Jens Axboe   block: fix proble...
71
  	}
f31e7e402   Dmitry Monakhov   blkdev: move blkd...
72

dd3932edd   Christoph Hellwig   block: remove BLK...
73
  	if (flags & BLKDEV_DISCARD_SECURE) {
8d57a98cc   Adrian Hunter   block: add secure...
74
75
  		if (!blk_queue_secdiscard(q))
  			return -EOPNOTSUPP;
8c5553678   Christoph Hellwig   block: remove the...
76
  		type |= REQ_SECURE;
8d57a98cc   Adrian Hunter   block: add secure...
77
  	}
5dba3089e   Lukas Czerner   blkdev: Submit di...
78
79
80
  	atomic_set(&bb.done, 1);
  	bb.flags = 1 << BIO_UPTODATE;
  	bb.wait = &wait;
0cfbcafca   Shaohua Li   block: add plug f...
81
  	blk_start_plug(&plug);
5dba3089e   Lukas Czerner   blkdev: Submit di...
82
  	while (nr_sects) {
c6e666345   Paolo Bonzini   block: split disc...
83
  		unsigned int req_sects;
8dd2cb7e8   Shaohua Li   block: discard gr...
84
  		sector_t end_sect, tmp;
c6e666345   Paolo Bonzini   block: split disc...
85

f31e7e402   Dmitry Monakhov   blkdev: move blkd...
86
  		bio = bio_alloc(gfp_mask, 1);
66ac02801   Christoph Hellwig   block: don't allo...
87
88
89
90
  		if (!bio) {
  			ret = -ENOMEM;
  			break;
  		}
c6e666345   Paolo Bonzini   block: split disc...
91
92
93
94
95
96
97
  		req_sects = min_t(sector_t, nr_sects, max_discard_sectors);
  
  		/*
  		 * If splitting a request, and the next starting sector would be
  		 * misaligned, stop the discard at the previous aligned sector.
  		 */
  		end_sect = sector + req_sects;
8dd2cb7e8   Shaohua Li   block: discard gr...
98
99
100
101
102
103
  		tmp = end_sect;
  		if (req_sects < nr_sects &&
  		    sector_div(tmp, granularity) != alignment) {
  			end_sect = end_sect - alignment;
  			sector_div(end_sect, granularity);
  			end_sect = end_sect * granularity + alignment;
c6e666345   Paolo Bonzini   block: split disc...
104
105
  			req_sects = end_sect - sector;
  		}
f31e7e402   Dmitry Monakhov   blkdev: move blkd...
106
  		bio->bi_sector = sector;
5dba3089e   Lukas Czerner   blkdev: Submit di...
107
  		bio->bi_end_io = bio_batch_end_io;
f31e7e402   Dmitry Monakhov   blkdev: move blkd...
108
  		bio->bi_bdev = bdev;
5dba3089e   Lukas Czerner   blkdev: Submit di...
109
  		bio->bi_private = &bb;
f31e7e402   Dmitry Monakhov   blkdev: move blkd...
110

c6e666345   Paolo Bonzini   block: split disc...
111
112
113
  		bio->bi_size = req_sects << 9;
  		nr_sects -= req_sects;
  		sector = end_sect;
f31e7e402   Dmitry Monakhov   blkdev: move blkd...
114

5dba3089e   Lukas Czerner   blkdev: Submit di...
115
  		atomic_inc(&bb.done);
f31e7e402   Dmitry Monakhov   blkdev: move blkd...
116
  		submit_bio(type, bio);
5dba3089e   Lukas Czerner   blkdev: Submit di...
117
  	}
0cfbcafca   Shaohua Li   block: add plug f...
118
  	blk_finish_plug(&plug);
f31e7e402   Dmitry Monakhov   blkdev: move blkd...
119

5dba3089e   Lukas Czerner   blkdev: Submit di...
120
121
  	/* Wait for bios in-flight */
  	if (!atomic_dec_and_test(&bb.done))
5577022f4   Vladimir Davydov   block: account io...
122
  		wait_for_completion_io(&wait);
f31e7e402   Dmitry Monakhov   blkdev: move blkd...
123

8af1954d1   Lukas Czerner   blkdev: Do not re...
124
  	if (!test_bit(BIO_UPTODATE, &bb.flags))
5dba3089e   Lukas Czerner   blkdev: Submit di...
125
  		ret = -EIO;
66ac02801   Christoph Hellwig   block: don't allo...
126

f31e7e402   Dmitry Monakhov   blkdev: move blkd...
127
  	return ret;
f31e7e402   Dmitry Monakhov   blkdev: move blkd...
128
129
  }
  EXPORT_SYMBOL(blkdev_issue_discard);
3f14d792f   Dmitry Monakhov   blkdev: add blkde...
130

3f14d792f   Dmitry Monakhov   blkdev: add blkde...
131
  /**
4363ac7c1   Martin K. Petersen   block: Implement ...
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
   * blkdev_issue_write_same - queue a write same operation
   * @bdev:	target blockdev
   * @sector:	start sector
   * @nr_sects:	number of sectors to write
   * @gfp_mask:	memory allocation flags (for bio_alloc)
   * @page:	page containing data to write
   *
   * Description:
   *    Issue a write same request for the sectors in question.
   */
  int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
  			    sector_t nr_sects, gfp_t gfp_mask,
  			    struct page *page)
  {
  	DECLARE_COMPLETION_ONSTACK(wait);
  	struct request_queue *q = bdev_get_queue(bdev);
  	unsigned int max_write_same_sectors;
  	struct bio_batch bb;
  	struct bio *bio;
  	int ret = 0;
  
  	if (!q)
  		return -ENXIO;
  
  	max_write_same_sectors = q->limits.max_write_same_sectors;
  
  	if (max_write_same_sectors == 0)
  		return -EOPNOTSUPP;
  
  	atomic_set(&bb.done, 1);
  	bb.flags = 1 << BIO_UPTODATE;
  	bb.wait = &wait;
  
  	while (nr_sects) {
  		bio = bio_alloc(gfp_mask, 1);
  		if (!bio) {
  			ret = -ENOMEM;
  			break;
  		}
  
  		bio->bi_sector = sector;
  		bio->bi_end_io = bio_batch_end_io;
  		bio->bi_bdev = bdev;
  		bio->bi_private = &bb;
  		bio->bi_vcnt = 1;
  		bio->bi_io_vec->bv_page = page;
  		bio->bi_io_vec->bv_offset = 0;
  		bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
  
  		if (nr_sects > max_write_same_sectors) {
  			bio->bi_size = max_write_same_sectors << 9;
  			nr_sects -= max_write_same_sectors;
  			sector += max_write_same_sectors;
  		} else {
  			bio->bi_size = nr_sects << 9;
  			nr_sects = 0;
  		}
  
  		atomic_inc(&bb.done);
  		submit_bio(REQ_WRITE | REQ_WRITE_SAME, bio);
  	}
  
  	/* Wait for bios in-flight */
  	if (!atomic_dec_and_test(&bb.done))
5577022f4   Vladimir Davydov   block: account io...
196
  		wait_for_completion_io(&wait);
4363ac7c1   Martin K. Petersen   block: Implement ...
197
198
199
200
201
202
203
204
205
  
  	if (!test_bit(BIO_UPTODATE, &bb.flags))
  		ret = -ENOTSUPP;
  
  	return ret;
  }
  EXPORT_SYMBOL(blkdev_issue_write_same);
  
  /**
291d24f6d   Ben Hutchings   block: fix kernel...
206
   * blkdev_issue_zeroout - generate number of zero filed write bios
3f14d792f   Dmitry Monakhov   blkdev: add blkde...
207
208
209
210
   * @bdev:	blockdev to issue
   * @sector:	start sector
   * @nr_sects:	number of sectors to write
   * @gfp_mask:	memory allocation flags (for bio_alloc)
3f14d792f   Dmitry Monakhov   blkdev: add blkde...
211
212
213
   *
   * Description:
   *  Generate and issue number of bios with zerofiled pages.
3f14d792f   Dmitry Monakhov   blkdev: add blkde...
214
   */
579e8f3c7   Martin K. Petersen   block: Make blkde...
215
  int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
dd3932edd   Christoph Hellwig   block: remove BLK...
216
  			sector_t nr_sects, gfp_t gfp_mask)
3f14d792f   Dmitry Monakhov   blkdev: add blkde...
217
  {
18edc8eaa   Dmitry Monakhov   blkdev: fix blkde...
218
  	int ret;
3f14d792f   Dmitry Monakhov   blkdev: add blkde...
219
220
  	struct bio *bio;
  	struct bio_batch bb;
0aeea1896   Lukas Czerner   block: fix mis-sy...
221
  	unsigned int sz;
3f14d792f   Dmitry Monakhov   blkdev: add blkde...
222
  	DECLARE_COMPLETION_ONSTACK(wait);
0aeea1896   Lukas Czerner   block: fix mis-sy...
223
  	atomic_set(&bb.done, 1);
3f14d792f   Dmitry Monakhov   blkdev: add blkde...
224
225
  	bb.flags = 1 << BIO_UPTODATE;
  	bb.wait = &wait;
3f14d792f   Dmitry Monakhov   blkdev: add blkde...
226

18edc8eaa   Dmitry Monakhov   blkdev: fix blkde...
227
  	ret = 0;
3f14d792f   Dmitry Monakhov   blkdev: add blkde...
228
229
230
  	while (nr_sects != 0) {
  		bio = bio_alloc(gfp_mask,
  				min(nr_sects, (sector_t)BIO_MAX_PAGES));
18edc8eaa   Dmitry Monakhov   blkdev: fix blkde...
231
232
  		if (!bio) {
  			ret = -ENOMEM;
3f14d792f   Dmitry Monakhov   blkdev: add blkde...
233
  			break;
18edc8eaa   Dmitry Monakhov   blkdev: fix blkde...
234
  		}
3f14d792f   Dmitry Monakhov   blkdev: add blkde...
235
236
237
238
  
  		bio->bi_sector = sector;
  		bio->bi_bdev   = bdev;
  		bio->bi_end_io = bio_batch_end_io;
dd3932edd   Christoph Hellwig   block: remove BLK...
239
  		bio->bi_private = &bb;
3f14d792f   Dmitry Monakhov   blkdev: add blkde...
240

0341aafb7   Jens Axboe   block: fix bad us...
241
242
  		while (nr_sects != 0) {
  			sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
3f14d792f   Dmitry Monakhov   blkdev: add blkde...
243
244
245
246
247
248
  			ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
  			nr_sects -= ret >> 9;
  			sector += ret >> 9;
  			if (ret < (sz << 9))
  				break;
  		}
18edc8eaa   Dmitry Monakhov   blkdev: fix blkde...
249
  		ret = 0;
0aeea1896   Lukas Czerner   block: fix mis-sy...
250
  		atomic_inc(&bb.done);
3f14d792f   Dmitry Monakhov   blkdev: add blkde...
251
252
  		submit_bio(WRITE, bio);
  	}
3f14d792f   Dmitry Monakhov   blkdev: add blkde...
253

dd3932edd   Christoph Hellwig   block: remove BLK...
254
  	/* Wait for bios in-flight */
0aeea1896   Lukas Czerner   block: fix mis-sy...
255
  	if (!atomic_dec_and_test(&bb.done))
5577022f4   Vladimir Davydov   block: account io...
256
  		wait_for_completion_io(&wait);
3f14d792f   Dmitry Monakhov   blkdev: add blkde...
257
258
259
260
  
  	if (!test_bit(BIO_UPTODATE, &bb.flags))
  		/* One of bios in the batch was completed with error.*/
  		ret = -EIO;
3f14d792f   Dmitry Monakhov   blkdev: add blkde...
261
262
  	return ret;
  }
579e8f3c7   Martin K. Petersen   block: Make blkde...
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
  
  /**
   * blkdev_issue_zeroout - zero-fill a block range
   * @bdev:	blockdev to write
   * @sector:	start sector
   * @nr_sects:	number of sectors to write
   * @gfp_mask:	memory allocation flags (for bio_alloc)
   *
   * Description:
   *  Generate and issue number of bios with zerofiled pages.
   */
  
  int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
  			 sector_t nr_sects, gfp_t gfp_mask)
  {
  	if (bdev_write_same(bdev)) {
  		unsigned char bdn[BDEVNAME_SIZE];
  
  		if (!blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask,
  					     ZERO_PAGE(0)))
  			return 0;
  
  		bdevname(bdev, bdn);
  		pr_err("%s: WRITE SAME failed. Manually zeroing.
  ", bdn);
  	}
  
  	return __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask);
  }
3f14d792f   Dmitry Monakhov   blkdev: add blkde...
292
  EXPORT_SYMBOL(blkdev_issue_zeroout);