Blame view
block/blk-lib.c
10.8 KB
b24413180 License cleanup: ... |
1 |
// SPDX-License-Identifier: GPL-2.0 |
f31e7e402 blkdev: move blkd... |
2 3 4 5 6 7 8 9 10 11 |
/* * Functions related to generic helpers functions */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/bio.h> #include <linux/blkdev.h> #include <linux/scatterlist.h> #include "blk.h" |
4e49ea4a3 block/fs/drivers:... |
12 |
static struct bio *next_bio(struct bio *bio, unsigned int nr_pages, |
9082e87bf block: remove str... |
13 |
gfp_t gfp) |
f31e7e402 blkdev: move blkd... |
14 |
{ |
9082e87bf block: remove str... |
15 16 17 18 |
struct bio *new = bio_alloc(gfp, nr_pages); if (bio) { bio_chain(bio, new); |
4e49ea4a3 block/fs/drivers:... |
19 |
submit_bio(bio); |
9082e87bf block: remove str... |
20 |
} |
5dba3089e blkdev: Submit di... |
21 |
|
9082e87bf block: remove str... |
22 |
return new; |
f31e7e402 blkdev: move blkd... |
23 |
} |
38f252553 block: add __blkd... |
24 |
int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, |
288dab8a3 block: add a sepa... |
25 |
sector_t nr_sects, gfp_t gfp_mask, int flags, |
469e3216e block discard: us... |
26 |
struct bio **biop) |
f31e7e402 blkdev: move blkd... |
27 |
{ |
f31e7e402 blkdev: move blkd... |
28 |
struct request_queue *q = bdev_get_queue(bdev); |
38f252553 block: add __blkd... |
29 |
struct bio *bio = *biop; |
a22c4d7e3 block: re-add dis... |
30 |
unsigned int granularity; |
ef295ecf0 block: better op ... |
31 |
unsigned int op; |
a22c4d7e3 block: re-add dis... |
32 |
int alignment; |
28b2be203 block: require wr... |
33 |
sector_t bs_mask; |
f31e7e402 blkdev: move blkd... |
34 35 36 |
if (!q) return -ENXIO; |
288dab8a3 block: add a sepa... |
37 |
|
a13553c77 block: add bdev_r... |
38 39 |
if (bdev_read_only(bdev)) return -EPERM; |
288dab8a3 block: add a sepa... |
40 41 42 43 44 45 46 47 48 |
if (flags & BLKDEV_DISCARD_SECURE) { if (!blk_queue_secure_erase(q)) return -EOPNOTSUPP; op = REQ_OP_SECURE_ERASE; } else { if (!blk_queue_discard(q)) return -EOPNOTSUPP; op = REQ_OP_DISCARD; } |
f31e7e402 blkdev: move blkd... |
49 |
|
28b2be203 block: require wr... |
50 51 52 |
bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; if ((sector | nr_sects) & bs_mask) return -EINVAL; |
a22c4d7e3 block: re-add dis... |
53 54 55 |
/* Zero-sector (unknown) and one-sector granularities are the same. */ granularity = max(q->limits.discard_granularity >> 9, 1U); alignment = (bdev_discard_alignment(bdev) >> 9) % granularity; |
5dba3089e blkdev: Submit di... |
56 |
while (nr_sects) { |
c6e666345 block: split disc... |
57 |
unsigned int req_sects; |
a22c4d7e3 block: re-add dis... |
58 |
sector_t end_sect, tmp; |
c6e666345 block: split disc... |
59 |
|
a22c4d7e3 block: re-add dis... |
60 61 |
/* Make sure bi_size doesn't overflow */ req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9); |
9082e87bf block: remove str... |
62 |
/** |
a22c4d7e3 block: re-add dis... |
63 64 65 |
* If splitting a request, and the next starting sector would be * misaligned, stop the discard at the previous aligned sector. */ |
c6e666345 block: split disc... |
66 |
end_sect = sector + req_sects; |
a22c4d7e3 block: re-add dis... |
67 68 69 70 71 72 73 74 |
tmp = end_sect; if (req_sects < nr_sects && sector_div(tmp, granularity) != alignment) { end_sect = end_sect - alignment; sector_div(end_sect, granularity); end_sect = end_sect * granularity + alignment; req_sects = end_sect - sector; } |
c6e666345 block: split disc... |
75 |
|
f9d03f96b block: improve ha... |
76 |
bio = next_bio(bio, 0, gfp_mask); |
4f024f379 block: Abstract o... |
77 |
bio->bi_iter.bi_sector = sector; |
74d46992e block: replace bi... |
78 |
bio_set_dev(bio, bdev); |
288dab8a3 block: add a sepa... |
79 |
bio_set_op_attrs(bio, op, 0); |
f31e7e402 blkdev: move blkd... |
80 |
|
4f024f379 block: Abstract o... |
81 |
bio->bi_iter.bi_size = req_sects << 9; |
c6e666345 block: split disc... |
82 83 |
nr_sects -= req_sects; sector = end_sect; |
f31e7e402 blkdev: move blkd... |
84 |
|
c8123f8c9 block: add cond_r... |
85 86 87 88 89 90 91 |
/* * We can loop for a long time in here, if someone does * full device discards (like mkfs). Be nice and allow * us to schedule out to avoid softlocking if preempt * is disabled. */ cond_resched(); |
5dba3089e blkdev: Submit di... |
92 |
} |
38f252553 block: add __blkd... |
93 94 95 96 97 98 99 100 101 102 103 104 |
*biop = bio; return 0; } EXPORT_SYMBOL(__blkdev_issue_discard); /** * blkdev_issue_discard - queue a discard * @bdev: blockdev to issue discard for * @sector: start sector * @nr_sects: number of sectors to discard * @gfp_mask: memory allocation flags (for bio_alloc) |
e554911c2 block: correct do... |
105 |
* @flags: BLKDEV_DISCARD_* flags to control behaviour |
38f252553 block: add __blkd... |
106 107 108 109 110 111 112 |
* * Description: * Issue a discard request for the sectors in question. */ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) { |
38f252553 block: add __blkd... |
113 114 115 |
struct bio *bio = NULL; struct blk_plug plug; int ret; |
38f252553 block: add __blkd... |
116 |
blk_start_plug(&plug); |
288dab8a3 block: add a sepa... |
117 |
ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags, |
38f252553 block: add __blkd... |
118 |
&bio); |
bbd848e0f block: reinstate ... |
119 |
if (!ret && bio) { |
4e49ea4a3 block/fs/drivers:... |
120 |
ret = submit_bio_wait(bio); |
48920ff2a block: remove the... |
121 |
if (ret == -EOPNOTSUPP) |
bbd848e0f block: reinstate ... |
122 |
ret = 0; |
05bd92ddd block: missing bi... |
123 |
bio_put(bio); |
bbd848e0f block: reinstate ... |
124 |
} |
0cfbcafca block: add plug f... |
125 |
blk_finish_plug(&plug); |
f31e7e402 blkdev: move blkd... |
126 |
|
bbd848e0f block: reinstate ... |
127 |
return ret; |
f31e7e402 blkdev: move blkd... |
128 129 |
} EXPORT_SYMBOL(blkdev_issue_discard); |
3f14d792f blkdev: add blkde... |
130 |
|
3f14d792f blkdev: add blkde... |
131 |
/** |
e73c23ff7 block: add async ... |
132 |
* __blkdev_issue_write_same - generate number of bios with same page |
4363ac7c1 block: Implement ... |
133 134 135 136 137 |
* @bdev: target blockdev * @sector: start sector * @nr_sects: number of sectors to write * @gfp_mask: memory allocation flags (for bio_alloc) * @page: page containing data to write |
e73c23ff7 block: add async ... |
138 |
* @biop: pointer to anchor bio |
4363ac7c1 block: Implement ... |
139 140 |
* * Description: |
e73c23ff7 block: add async ... |
141 |
* Generate and issue number of bios(REQ_OP_WRITE_SAME) with same page. |
4363ac7c1 block: Implement ... |
142 |
*/ |
e73c23ff7 block: add async ... |
143 144 145 |
static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct page *page, struct bio **biop) |
4363ac7c1 block: Implement ... |
146 |
{ |
4363ac7c1 block: Implement ... |
147 148 |
struct request_queue *q = bdev_get_queue(bdev); unsigned int max_write_same_sectors; |
e73c23ff7 block: add async ... |
149 |
struct bio *bio = *biop; |
28b2be203 block: require wr... |
150 |
sector_t bs_mask; |
4363ac7c1 block: Implement ... |
151 152 153 |
if (!q) return -ENXIO; |
a13553c77 block: add bdev_r... |
154 155 |
if (bdev_read_only(bdev)) return -EPERM; |
28b2be203 block: require wr... |
156 157 158 |
bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; if ((sector | nr_sects) & bs_mask) return -EINVAL; |
e73c23ff7 block: add async ... |
159 160 |
if (!bdev_write_same(bdev)) return -EOPNOTSUPP; |
b49a0871b block: remove spl... |
161 162 |
/* Ensure that max_write_same_sectors doesn't overflow bi_size */ max_write_same_sectors = UINT_MAX >> 9; |
4363ac7c1 block: Implement ... |
163 |
|
4363ac7c1 block: Implement ... |
164 |
while (nr_sects) { |
4e49ea4a3 block/fs/drivers:... |
165 |
bio = next_bio(bio, 1, gfp_mask); |
4f024f379 block: Abstract o... |
166 |
bio->bi_iter.bi_sector = sector; |
74d46992e block: replace bi... |
167 |
bio_set_dev(bio, bdev); |
4363ac7c1 block: Implement ... |
168 169 170 171 |
bio->bi_vcnt = 1; bio->bi_io_vec->bv_page = page; bio->bi_io_vec->bv_offset = 0; bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev); |
95fe6c1a2 block, fs, mm, dr... |
172 |
bio_set_op_attrs(bio, REQ_OP_WRITE_SAME, 0); |
4363ac7c1 block: Implement ... |
173 174 |
if (nr_sects > max_write_same_sectors) { |
4f024f379 block: Abstract o... |
175 |
bio->bi_iter.bi_size = max_write_same_sectors << 9; |
4363ac7c1 block: Implement ... |
176 177 178 |
nr_sects -= max_write_same_sectors; sector += max_write_same_sectors; } else { |
4f024f379 block: Abstract o... |
179 |
bio->bi_iter.bi_size = nr_sects << 9; |
4363ac7c1 block: Implement ... |
180 181 |
nr_sects = 0; } |
e73c23ff7 block: add async ... |
182 |
cond_resched(); |
4363ac7c1 block: Implement ... |
183 |
} |
e73c23ff7 block: add async ... |
184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 |
*biop = bio; return 0; } /** * blkdev_issue_write_same - queue a write same operation * @bdev: target blockdev * @sector: start sector * @nr_sects: number of sectors to write * @gfp_mask: memory allocation flags (for bio_alloc) * @page: page containing data * * Description: * Issue a write same request for the sectors in question. */ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct page *page) { struct bio *bio = NULL; struct blk_plug plug; int ret; blk_start_plug(&plug); ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, page, &bio); if (ret == 0 && bio) { |
4e49ea4a3 block/fs/drivers:... |
211 |
ret = submit_bio_wait(bio); |
05bd92ddd block: missing bi... |
212 213 |
bio_put(bio); } |
e73c23ff7 block: add async ... |
214 |
blk_finish_plug(&plug); |
3f40bf2c8 block: don't igno... |
215 |
return ret; |
4363ac7c1 block: Implement ... |
216 217 |
} EXPORT_SYMBOL(blkdev_issue_write_same); |
a6f0788ec block: add suppor... |
218 219 |
static int __blkdev_issue_write_zeroes(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, |
d928be9f8 block: add a REQ_... |
220 |
struct bio **biop, unsigned flags) |
a6f0788ec block: add suppor... |
221 222 223 224 225 226 227 |
{ struct bio *bio = *biop; unsigned int max_write_zeroes_sectors; struct request_queue *q = bdev_get_queue(bdev); if (!q) return -ENXIO; |
a13553c77 block: add bdev_r... |
228 229 |
if (bdev_read_only(bdev)) return -EPERM; |
a6f0788ec block: add suppor... |
230 231 232 233 234 235 236 237 238 |
/* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */ max_write_zeroes_sectors = bdev_write_zeroes_sectors(bdev); if (max_write_zeroes_sectors == 0) return -EOPNOTSUPP; while (nr_sects) { bio = next_bio(bio, 0, gfp_mask); bio->bi_iter.bi_sector = sector; |
74d46992e block: replace bi... |
239 |
bio_set_dev(bio, bdev); |
d928be9f8 block: add a REQ_... |
240 241 242 |
bio->bi_opf = REQ_OP_WRITE_ZEROES; if (flags & BLKDEV_ZERO_NOUNMAP) bio->bi_opf |= REQ_NOUNMAP; |
a6f0788ec block: add suppor... |
243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 |
if (nr_sects > max_write_zeroes_sectors) { bio->bi_iter.bi_size = max_write_zeroes_sectors << 9; nr_sects -= max_write_zeroes_sectors; sector += max_write_zeroes_sectors; } else { bio->bi_iter.bi_size = nr_sects << 9; nr_sects = 0; } cond_resched(); } *biop = bio; return 0; } |
615d22a51 block: Fix __blkd... |
258 259 260 261 262 263 264 265 |
/* * Convert a number of 512B sectors to a number of pages. * The result is limited to a number of pages that can fit into a BIO. * Also make sure that the result is always at least 1 (page) for the cases * where nr_sects is lower than the number of sectors in a page. */ static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects) { |
09c2c359b block: fix intege... |
266 |
sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512); |
615d22a51 block: Fix __blkd... |
267 |
|
09c2c359b block: fix intege... |
268 |
return min(pages, (sector_t)BIO_MAX_PAGES); |
615d22a51 block: Fix __blkd... |
269 |
} |
425a4dba7 block: factor out... |
270 271 272 273 274 275 276 277 278 279 280 |
static int __blkdev_issue_zero_pages(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct bio **biop) { struct request_queue *q = bdev_get_queue(bdev); struct bio *bio = *biop; int bi_size = 0; unsigned int sz; if (!q) return -ENXIO; |
a13553c77 block: add bdev_r... |
281 282 |
if (bdev_read_only(bdev)) return -EPERM; |
425a4dba7 block: factor out... |
283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 |
while (nr_sects != 0) { bio = next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects), gfp_mask); bio->bi_iter.bi_sector = sector; bio_set_dev(bio, bdev); bio_set_op_attrs(bio, REQ_OP_WRITE, 0); while (nr_sects != 0) { sz = min((sector_t) PAGE_SIZE, nr_sects << 9); bi_size = bio_add_page(bio, ZERO_PAGE(0), sz, 0); nr_sects -= bi_size >> 9; sector += bi_size >> 9; if (bi_size < sz) break; } cond_resched(); } *biop = bio; return 0; } |
a6f0788ec block: add suppor... |
304 |
/** |
e73c23ff7 block: add async ... |
305 |
* __blkdev_issue_zeroout - generate number of zero filed write bios |
3f14d792f blkdev: add blkde... |
306 307 308 309 |
* @bdev: blockdev to issue * @sector: start sector * @nr_sects: number of sectors to write * @gfp_mask: memory allocation flags (for bio_alloc) |
e73c23ff7 block: add async ... |
310 |
* @biop: pointer to anchor bio |
ee472d835 block: add a flag... |
311 |
* @flags: controls detailed behavior |
3f14d792f blkdev: add blkde... |
312 313 |
* * Description: |
ee472d835 block: add a flag... |
314 315 316 317 318 |
* Zero-fill a block range, either using hardware offload or by explicitly * writing zeroes to the device. * * If a device is using logical block provisioning, the underlying space will * not be released if %flags contains BLKDEV_ZERO_NOUNMAP. |
cb365b967 block: add a new ... |
319 320 321 |
* * If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return * -EOPNOTSUPP if no explicit hardware offload for zeroing is provided. |
3f14d792f blkdev: add blkde... |
322 |
*/ |
e73c23ff7 block: add async ... |
323 324 |
int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, |
ee472d835 block: add a flag... |
325 |
unsigned flags) |
3f14d792f blkdev: add blkde... |
326 |
{ |
18edc8eaa blkdev: fix blkde... |
327 |
int ret; |
28b2be203 block: require wr... |
328 329 330 331 332 |
sector_t bs_mask; bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; if ((sector | nr_sects) & bs_mask) return -EINVAL; |
3f14d792f blkdev: add blkde... |
333 |
|
a6f0788ec block: add suppor... |
334 |
ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask, |
d928be9f8 block: add a REQ_... |
335 |
biop, flags); |
cb365b967 block: add a new ... |
336 |
if (ret != -EOPNOTSUPP || (flags & BLKDEV_ZERO_NOFALLBACK)) |
425a4dba7 block: factor out... |
337 |
return ret; |
3f14d792f blkdev: add blkde... |
338 |
|
425a4dba7 block: factor out... |
339 340 |
return __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask, biop); |
3f14d792f blkdev: add blkde... |
341 |
} |
e73c23ff7 block: add async ... |
342 |
EXPORT_SYMBOL(__blkdev_issue_zeroout); |
579e8f3c7 block: Make blkde... |
343 344 345 346 347 348 349 |
/** * blkdev_issue_zeroout - zero-fill a block range * @bdev: blockdev to write * @sector: start sector * @nr_sects: number of sectors to write * @gfp_mask: memory allocation flags (for bio_alloc) |
ee472d835 block: add a flag... |
350 |
* @flags: controls detailed behavior |
579e8f3c7 block: Make blkde... |
351 352 |
* * Description: |
ee472d835 block: add a flag... |
353 354 355 |
* Zero-fill a block range, either using hardware offload or by explicitly * writing zeroes to the device. See __blkdev_issue_zeroout() for the * valid values for %flags. |
579e8f3c7 block: Make blkde... |
356 |
*/ |
579e8f3c7 block: Make blkde... |
357 |
int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, |
ee472d835 block: add a flag... |
358 |
sector_t nr_sects, gfp_t gfp_mask, unsigned flags) |
579e8f3c7 block: Make blkde... |
359 |
{ |
d5ce4c31d block: cope with ... |
360 361 362 |
int ret = 0; sector_t bs_mask; struct bio *bio; |
e73c23ff7 block: add async ... |
363 |
struct blk_plug plug; |
d5ce4c31d block: cope with ... |
364 |
bool try_write_zeroes = !!bdev_write_zeroes_sectors(bdev); |
d93ba7a5a block: Add discar... |
365 |
|
d5ce4c31d block: cope with ... |
366 367 368 369 370 371 |
bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; if ((sector | nr_sects) & bs_mask) return -EINVAL; retry: bio = NULL; |
e73c23ff7 block: add async ... |
372 |
blk_start_plug(&plug); |
d5ce4c31d block: cope with ... |
373 374 375 376 377 378 379 380 381 382 |
if (try_write_zeroes) { ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask, &bio, flags); } else if (!(flags & BLKDEV_ZERO_NOFALLBACK)) { ret = __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask, &bio); } else { /* No zeroing offload support */ ret = -EOPNOTSUPP; } |
e73c23ff7 block: add async ... |
383 384 385 386 387 |
if (ret == 0 && bio) { ret = submit_bio_wait(bio); bio_put(bio); } blk_finish_plug(&plug); |
d5ce4c31d block: cope with ... |
388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 |
if (ret && try_write_zeroes) { if (!(flags & BLKDEV_ZERO_NOFALLBACK)) { try_write_zeroes = false; goto retry; } if (!bdev_write_zeroes_sectors(bdev)) { /* * Zeroing offload support was indicated, but the * device reported ILLEGAL REQUEST (for some devices * there is no non-destructive way to verify whether * WRITE ZEROES is actually supported). */ ret = -EOPNOTSUPP; } } |
579e8f3c7 block: Make blkde... |
403 |
|
e73c23ff7 block: add async ... |
404 |
return ret; |
579e8f3c7 block: Make blkde... |
405 |
} |
3f14d792f blkdev: add blkde... |
406 |
EXPORT_SYMBOL(blkdev_issue_zeroout); |