Commit 18edc8eaa68070771bdb2098260e44efe74de722

Authored by Dmitry Monakhov
Committed by Jens Axboe
1 parent 3383977fad

blkdev: fix blkdev_issue_zeroout return value

- If function called without barrier option retvalue is incorrect

Signed-off-by: Dmitry Monakhov <dmonakhov@openvz.org>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>

Showing 1 changed file with 6 additions and 2 deletions Inline Diff

1 /* 1 /*
2 * Functions related to generic helpers functions 2 * Functions related to generic helpers functions
3 */ 3 */
4 #include <linux/kernel.h> 4 #include <linux/kernel.h>
5 #include <linux/module.h> 5 #include <linux/module.h>
6 #include <linux/bio.h> 6 #include <linux/bio.h>
7 #include <linux/blkdev.h> 7 #include <linux/blkdev.h>
8 #include <linux/scatterlist.h> 8 #include <linux/scatterlist.h>
9 9
10 #include "blk.h" 10 #include "blk.h"
11 11
12 static void blkdev_discard_end_io(struct bio *bio, int err) 12 static void blkdev_discard_end_io(struct bio *bio, int err)
13 { 13 {
14 if (err) { 14 if (err) {
15 if (err == -EOPNOTSUPP) 15 if (err == -EOPNOTSUPP)
16 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags); 16 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
17 clear_bit(BIO_UPTODATE, &bio->bi_flags); 17 clear_bit(BIO_UPTODATE, &bio->bi_flags);
18 } 18 }
19 19
20 if (bio->bi_private) 20 if (bio->bi_private)
21 complete(bio->bi_private); 21 complete(bio->bi_private);
22 22
23 bio_put(bio); 23 bio_put(bio);
24 } 24 }
25 25
26 /** 26 /**
27 * blkdev_issue_discard - queue a discard 27 * blkdev_issue_discard - queue a discard
28 * @bdev: blockdev to issue discard for 28 * @bdev: blockdev to issue discard for
29 * @sector: start sector 29 * @sector: start sector
30 * @nr_sects: number of sectors to discard 30 * @nr_sects: number of sectors to discard
31 * @gfp_mask: memory allocation flags (for bio_alloc) 31 * @gfp_mask: memory allocation flags (for bio_alloc)
32 * @flags: BLKDEV_IFL_* flags to control behaviour 32 * @flags: BLKDEV_IFL_* flags to control behaviour
33 * 33 *
34 * Description: 34 * Description:
35 * Issue a discard request for the sectors in question. 35 * Issue a discard request for the sectors in question.
36 */ 36 */
37 int blkdev_issue_discard(struct block_device *bdev, sector_t sector, 37 int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
38 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) 38 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
39 { 39 {
40 DECLARE_COMPLETION_ONSTACK(wait); 40 DECLARE_COMPLETION_ONSTACK(wait);
41 struct request_queue *q = bdev_get_queue(bdev); 41 struct request_queue *q = bdev_get_queue(bdev);
42 int type = flags & BLKDEV_IFL_BARRIER ? 42 int type = flags & BLKDEV_IFL_BARRIER ?
43 DISCARD_BARRIER : DISCARD_NOBARRIER; 43 DISCARD_BARRIER : DISCARD_NOBARRIER;
44 unsigned int max_discard_sectors; 44 unsigned int max_discard_sectors;
45 struct bio *bio; 45 struct bio *bio;
46 int ret = 0; 46 int ret = 0;
47 47
48 if (!q) 48 if (!q)
49 return -ENXIO; 49 return -ENXIO;
50 50
51 if (!blk_queue_discard(q)) 51 if (!blk_queue_discard(q))
52 return -EOPNOTSUPP; 52 return -EOPNOTSUPP;
53 53
54 /* 54 /*
55 * Ensure that max_discard_sectors is of the proper 55 * Ensure that max_discard_sectors is of the proper
56 * granularity 56 * granularity
57 */ 57 */
58 max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9); 58 max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
59 if (q->limits.discard_granularity) { 59 if (q->limits.discard_granularity) {
60 unsigned int disc_sects = q->limits.discard_granularity >> 9; 60 unsigned int disc_sects = q->limits.discard_granularity >> 9;
61 61
62 max_discard_sectors &= ~(disc_sects - 1); 62 max_discard_sectors &= ~(disc_sects - 1);
63 } 63 }
64 64
65 while (nr_sects && !ret) { 65 while (nr_sects && !ret) {
66 bio = bio_alloc(gfp_mask, 1); 66 bio = bio_alloc(gfp_mask, 1);
67 if (!bio) { 67 if (!bio) {
68 ret = -ENOMEM; 68 ret = -ENOMEM;
69 break; 69 break;
70 } 70 }
71 71
72 bio->bi_sector = sector; 72 bio->bi_sector = sector;
73 bio->bi_end_io = blkdev_discard_end_io; 73 bio->bi_end_io = blkdev_discard_end_io;
74 bio->bi_bdev = bdev; 74 bio->bi_bdev = bdev;
75 if (flags & BLKDEV_IFL_WAIT) 75 if (flags & BLKDEV_IFL_WAIT)
76 bio->bi_private = &wait; 76 bio->bi_private = &wait;
77 77
78 if (nr_sects > max_discard_sectors) { 78 if (nr_sects > max_discard_sectors) {
79 bio->bi_size = max_discard_sectors << 9; 79 bio->bi_size = max_discard_sectors << 9;
80 nr_sects -= max_discard_sectors; 80 nr_sects -= max_discard_sectors;
81 sector += max_discard_sectors; 81 sector += max_discard_sectors;
82 } else { 82 } else {
83 bio->bi_size = nr_sects << 9; 83 bio->bi_size = nr_sects << 9;
84 nr_sects = 0; 84 nr_sects = 0;
85 } 85 }
86 86
87 bio_get(bio); 87 bio_get(bio);
88 submit_bio(type, bio); 88 submit_bio(type, bio);
89 89
90 if (flags & BLKDEV_IFL_WAIT) 90 if (flags & BLKDEV_IFL_WAIT)
91 wait_for_completion(&wait); 91 wait_for_completion(&wait);
92 92
93 if (bio_flagged(bio, BIO_EOPNOTSUPP)) 93 if (bio_flagged(bio, BIO_EOPNOTSUPP))
94 ret = -EOPNOTSUPP; 94 ret = -EOPNOTSUPP;
95 else if (!bio_flagged(bio, BIO_UPTODATE)) 95 else if (!bio_flagged(bio, BIO_UPTODATE))
96 ret = -EIO; 96 ret = -EIO;
97 bio_put(bio); 97 bio_put(bio);
98 } 98 }
99 99
100 return ret; 100 return ret;
101 } 101 }
102 EXPORT_SYMBOL(blkdev_issue_discard); 102 EXPORT_SYMBOL(blkdev_issue_discard);
103 103
104 struct bio_batch 104 struct bio_batch
105 { 105 {
106 atomic_t done; 106 atomic_t done;
107 unsigned long flags; 107 unsigned long flags;
108 struct completion *wait; 108 struct completion *wait;
109 bio_end_io_t *end_io; 109 bio_end_io_t *end_io;
110 }; 110 };
111 111
112 static void bio_batch_end_io(struct bio *bio, int err) 112 static void bio_batch_end_io(struct bio *bio, int err)
113 { 113 {
114 struct bio_batch *bb = bio->bi_private; 114 struct bio_batch *bb = bio->bi_private;
115 115
116 if (err) { 116 if (err) {
117 if (err == -EOPNOTSUPP) 117 if (err == -EOPNOTSUPP)
118 set_bit(BIO_EOPNOTSUPP, &bb->flags); 118 set_bit(BIO_EOPNOTSUPP, &bb->flags);
119 else 119 else
120 clear_bit(BIO_UPTODATE, &bb->flags); 120 clear_bit(BIO_UPTODATE, &bb->flags);
121 } 121 }
122 if (bb) { 122 if (bb) {
123 if (bb->end_io) 123 if (bb->end_io)
124 bb->end_io(bio, err); 124 bb->end_io(bio, err);
125 atomic_inc(&bb->done); 125 atomic_inc(&bb->done);
126 complete(bb->wait); 126 complete(bb->wait);
127 } 127 }
128 bio_put(bio); 128 bio_put(bio);
129 } 129 }
130 130
131 /** 131 /**
132 * blkdev_issue_zeroout generate number of zero filed write bios 132 * blkdev_issue_zeroout generate number of zero filed write bios
133 * @bdev: blockdev to issue 133 * @bdev: blockdev to issue
134 * @sector: start sector 134 * @sector: start sector
135 * @nr_sects: number of sectors to write 135 * @nr_sects: number of sectors to write
136 * @gfp_mask: memory allocation flags (for bio_alloc) 136 * @gfp_mask: memory allocation flags (for bio_alloc)
137 * @flags: BLKDEV_IFL_* flags to control behaviour 137 * @flags: BLKDEV_IFL_* flags to control behaviour
138 * 138 *
139 * Description: 139 * Description:
140 * Generate and issue number of bios with zerofiled pages. 140 * Generate and issue number of bios with zerofiled pages.
141 * Send barrier at the beginning and at the end if requested. This guarantie 141 * Send barrier at the beginning and at the end if requested. This guarantie
142 * correct request ordering. Empty barrier allow us to avoid post queue flush. 142 * correct request ordering. Empty barrier allow us to avoid post queue flush.
143 */ 143 */
144 144
145 int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 145 int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
146 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) 146 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
147 { 147 {
148 int ret = 0; 148 int ret;
149 struct bio *bio; 149 struct bio *bio;
150 struct bio_batch bb; 150 struct bio_batch bb;
151 unsigned int sz, issued = 0; 151 unsigned int sz, issued = 0;
152 DECLARE_COMPLETION_ONSTACK(wait); 152 DECLARE_COMPLETION_ONSTACK(wait);
153 153
154 atomic_set(&bb.done, 0); 154 atomic_set(&bb.done, 0);
155 bb.flags = 1 << BIO_UPTODATE; 155 bb.flags = 1 << BIO_UPTODATE;
156 bb.wait = &wait; 156 bb.wait = &wait;
157 bb.end_io = NULL; 157 bb.end_io = NULL;
158 158
159 if (flags & BLKDEV_IFL_BARRIER) { 159 if (flags & BLKDEV_IFL_BARRIER) {
160 /* issue async barrier before the data */ 160 /* issue async barrier before the data */
161 ret = blkdev_issue_flush(bdev, gfp_mask, NULL, 0); 161 ret = blkdev_issue_flush(bdev, gfp_mask, NULL, 0);
162 if (ret) 162 if (ret)
163 return ret; 163 return ret;
164 } 164 }
165 submit: 165 submit:
166 ret = 0;
166 while (nr_sects != 0) { 167 while (nr_sects != 0) {
167 bio = bio_alloc(gfp_mask, 168 bio = bio_alloc(gfp_mask,
168 min(nr_sects, (sector_t)BIO_MAX_PAGES)); 169 min(nr_sects, (sector_t)BIO_MAX_PAGES));
169 if (!bio) 170 if (!bio) {
171 ret = -ENOMEM;
170 break; 172 break;
173 }
171 174
172 bio->bi_sector = sector; 175 bio->bi_sector = sector;
173 bio->bi_bdev = bdev; 176 bio->bi_bdev = bdev;
174 bio->bi_end_io = bio_batch_end_io; 177 bio->bi_end_io = bio_batch_end_io;
175 if (flags & BLKDEV_IFL_WAIT) 178 if (flags & BLKDEV_IFL_WAIT)
176 bio->bi_private = &bb; 179 bio->bi_private = &bb;
177 180
178 while (nr_sects != 0) { 181 while (nr_sects != 0) {
179 sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects); 182 sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
180 if (sz == 0) 183 if (sz == 0)
181 /* bio has maximum size possible */ 184 /* bio has maximum size possible */
182 break; 185 break;
183 ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0); 186 ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
184 nr_sects -= ret >> 9; 187 nr_sects -= ret >> 9;
185 sector += ret >> 9; 188 sector += ret >> 9;
186 if (ret < (sz << 9)) 189 if (ret < (sz << 9))
187 break; 190 break;
188 } 191 }
192 ret = 0;
189 issued++; 193 issued++;
190 submit_bio(WRITE, bio); 194 submit_bio(WRITE, bio);
191 } 195 }
192 /* 196 /*
193 * When all data bios are in flight. Send final barrier if requeted. 197 * When all data bios are in flight. Send final barrier if requeted.
194 */ 198 */
195 if (nr_sects == 0 && flags & BLKDEV_IFL_BARRIER) 199 if (nr_sects == 0 && flags & BLKDEV_IFL_BARRIER)
196 ret = blkdev_issue_flush(bdev, gfp_mask, NULL, 200 ret = blkdev_issue_flush(bdev, gfp_mask, NULL,
197 flags & BLKDEV_IFL_WAIT); 201 flags & BLKDEV_IFL_WAIT);
198 202
199 203
200 if (flags & BLKDEV_IFL_WAIT) 204 if (flags & BLKDEV_IFL_WAIT)
201 /* Wait for bios in-flight */ 205 /* Wait for bios in-flight */
202 while ( issued != atomic_read(&bb.done)) 206 while ( issued != atomic_read(&bb.done))
203 wait_for_completion(&wait); 207 wait_for_completion(&wait);
204 208
205 if (!test_bit(BIO_UPTODATE, &bb.flags)) 209 if (!test_bit(BIO_UPTODATE, &bb.flags))
206 /* One of bios in the batch was completed with error.*/ 210 /* One of bios in the batch was completed with error.*/
207 ret = -EIO; 211 ret = -EIO;
208 212
209 if (ret) 213 if (ret)
210 goto out; 214 goto out;
211 215
212 if (test_bit(BIO_EOPNOTSUPP, &bb.flags)) { 216 if (test_bit(BIO_EOPNOTSUPP, &bb.flags)) {
213 ret = -EOPNOTSUPP; 217 ret = -EOPNOTSUPP;
214 goto out; 218 goto out;
215 } 219 }
216 if (nr_sects != 0) 220 if (nr_sects != 0)
217 goto submit; 221 goto submit;
218 out: 222 out:
219 return ret; 223 return ret;
220 } 224 }
221 EXPORT_SYMBOL(blkdev_issue_zeroout); 225 EXPORT_SYMBOL(blkdev_issue_zeroout);
222 226