Blame view
block/blk-lib.c
3.94 KB
f31e7e402
|
1 2 3 4 5 6 7 8 9 10 |
/* * Functions related to generic helpers functions */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/bio.h> #include <linux/blkdev.h> #include <linux/scatterlist.h> #include "blk.h" |
5dba3089e
|
11 12 13 14 15 16 17 |
struct bio_batch { atomic_t done; unsigned long flags; struct completion *wait; }; static void bio_batch_end_io(struct bio *bio, int err) |
f31e7e402
|
18 |
{ |
5dba3089e
|
19 |
struct bio_batch *bb = bio->bi_private; |
8af1954d1
|
20 |
if (err && (err != -EOPNOTSUPP)) |
5dba3089e
|
21 |
clear_bit(BIO_UPTODATE, &bb->flags); |
5dba3089e
|
22 23 |
if (atomic_dec_and_test(&bb->done)) complete(bb->wait); |
f31e7e402
|
24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 |
bio_put(bio); } /** * blkdev_issue_discard - queue a discard * @bdev: blockdev to issue discard for * @sector: start sector * @nr_sects: number of sectors to discard * @gfp_mask: memory allocation flags (for bio_alloc) * @flags: BLKDEV_IFL_* flags to control behaviour * * Description: * Issue a discard request for the sectors in question. */ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) { DECLARE_COMPLETION_ONSTACK(wait); struct request_queue *q = bdev_get_queue(bdev); |
8c5553678
|
43 |
int type = REQ_WRITE | REQ_DISCARD; |
10d1f9e2c
|
44 |
unsigned int max_discard_sectors; |
5dba3089e
|
45 |
struct bio_batch bb; |
f31e7e402
|
46 |
struct bio *bio; |
f31e7e402
|
47 48 49 50 51 52 53 |
int ret = 0; if (!q) return -ENXIO; if (!blk_queue_discard(q)) return -EOPNOTSUPP; |
10d1f9e2c
|
54 55 56 57 58 |
/* * Ensure that max_discard_sectors is of the proper * granularity */ max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9); |
4c64500ea
|
59 |
if (unlikely(!max_discard_sectors)) { |
0f7996039
|
60 61 62 |
/* Avoid infinite loop below. Being cautious never hurts. */ return -EOPNOTSUPP; } else if (q->limits.discard_granularity) { |
10d1f9e2c
|
63 64 65 66 |
unsigned int disc_sects = q->limits.discard_granularity >> 9; max_discard_sectors &= ~(disc_sects - 1); } |
f31e7e402
|
67 |
|
dd3932edd
|
68 |
if (flags & BLKDEV_DISCARD_SECURE) { |
8d57a98cc
|
69 70 |
if (!blk_queue_secdiscard(q)) return -EOPNOTSUPP; |
8c5553678
|
71 |
type |= REQ_SECURE; |
8d57a98cc
|
72 |
} |
5dba3089e
|
73 74 75 76 77 |
atomic_set(&bb.done, 1); bb.flags = 1 << BIO_UPTODATE; bb.wait = &wait; while (nr_sects) { |
f31e7e402
|
78 |
bio = bio_alloc(gfp_mask, 1); |
66ac02801
|
79 80 81 82 |
if (!bio) { ret = -ENOMEM; break; } |
f31e7e402
|
83 |
bio->bi_sector = sector; |
5dba3089e
|
84 |
bio->bi_end_io = bio_batch_end_io; |
f31e7e402
|
85 |
bio->bi_bdev = bdev; |
5dba3089e
|
86 |
bio->bi_private = &bb; |
f31e7e402
|
87 |
|
f31e7e402
|
88 89 90 91 92 93 94 95 |
if (nr_sects > max_discard_sectors) { bio->bi_size = max_discard_sectors << 9; nr_sects -= max_discard_sectors; sector += max_discard_sectors; } else { bio->bi_size = nr_sects << 9; nr_sects = 0; } |
5dba3089e
|
96 |
atomic_inc(&bb.done); |
f31e7e402
|
97 |
submit_bio(type, bio); |
5dba3089e
|
98 |
} |
f31e7e402
|
99 |
|
5dba3089e
|
100 101 |
/* Wait for bios in-flight */ if (!atomic_dec_and_test(&bb.done)) |
dd3932edd
|
102 |
wait_for_completion(&wait); |
f31e7e402
|
103 |
|
8af1954d1
|
104 |
if (!test_bit(BIO_UPTODATE, &bb.flags)) |
5dba3089e
|
105 |
ret = -EIO; |
66ac02801
|
106 |
|
f31e7e402
|
107 |
return ret; |
f31e7e402
|
108 109 |
} EXPORT_SYMBOL(blkdev_issue_discard); |
3f14d792f
|
110 |
|
3f14d792f
|
111 |
/** |
291d24f6d
|
112 |
* blkdev_issue_zeroout - generate number of zero filed write bios |
3f14d792f
|
113 114 115 116 |
* @bdev: blockdev to issue * @sector: start sector * @nr_sects: number of sectors to write * @gfp_mask: memory allocation flags (for bio_alloc) |
3f14d792f
|
117 118 119 |
* * Description: * Generate and issue number of bios with zerofiled pages. |
3f14d792f
|
120 121 122 |
*/ int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, |
dd3932edd
|
123 |
sector_t nr_sects, gfp_t gfp_mask) |
3f14d792f
|
124 |
{ |
18edc8eaa
|
125 |
int ret; |
3f14d792f
|
126 127 |
struct bio *bio; struct bio_batch bb; |
0aeea1896
|
128 |
unsigned int sz; |
3f14d792f
|
129 |
DECLARE_COMPLETION_ONSTACK(wait); |
0aeea1896
|
130 |
atomic_set(&bb.done, 1); |
3f14d792f
|
131 132 |
bb.flags = 1 << BIO_UPTODATE; bb.wait = &wait; |
3f14d792f
|
133 |
|
18edc8eaa
|
134 |
ret = 0; |
3f14d792f
|
135 136 137 |
while (nr_sects != 0) { bio = bio_alloc(gfp_mask, min(nr_sects, (sector_t)BIO_MAX_PAGES)); |
18edc8eaa
|
138 139 |
if (!bio) { ret = -ENOMEM; |
3f14d792f
|
140 |
break; |
18edc8eaa
|
141 |
} |
3f14d792f
|
142 143 144 145 |
bio->bi_sector = sector; bio->bi_bdev = bdev; bio->bi_end_io = bio_batch_end_io; |
dd3932edd
|
146 |
bio->bi_private = &bb; |
3f14d792f
|
147 |
|
0341aafb7
|
148 149 |
while (nr_sects != 0) { sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects); |
3f14d792f
|
150 151 152 153 154 155 |
ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0); nr_sects -= ret >> 9; sector += ret >> 9; if (ret < (sz << 9)) break; } |
18edc8eaa
|
156 |
ret = 0; |
0aeea1896
|
157 |
atomic_inc(&bb.done); |
3f14d792f
|
158 159 |
submit_bio(WRITE, bio); } |
3f14d792f
|
160 |
|
dd3932edd
|
161 |
/* Wait for bios in-flight */ |
0aeea1896
|
162 |
if (!atomic_dec_and_test(&bb.done)) |
dd3932edd
|
163 |
wait_for_completion(&wait); |
3f14d792f
|
164 165 166 167 |
if (!test_bit(BIO_UPTODATE, &bb.flags)) /* One of bios in the batch was completed with error.*/ ret = -EIO; |
3f14d792f
|
168 169 170 |
return ret; } EXPORT_SYMBOL(blkdev_issue_zeroout); |