Blame view
block/blk-flush.c
6.7 KB
86db1e297
|
1 |
/* |
4fed947cb
|
2 |
* Functions to sequence FLUSH and FUA writes. |
86db1e297
|
3 4 5 6 7 |
*/ #include <linux/kernel.h> #include <linux/module.h> #include <linux/bio.h> #include <linux/blkdev.h> |
5a0e3ad6a
|
8 |
#include <linux/gfp.h> |
86db1e297
|
9 10 |
#include "blk.h" |
4fed947cb
|
11 12 13 14 15 16 17 18 |
/* FLUSH/FUA sequences */ enum { QUEUE_FSEQ_STARTED = (1 << 0), /* flushing in progress */ QUEUE_FSEQ_PREFLUSH = (1 << 1), /* pre-flushing in progress */ QUEUE_FSEQ_DATA = (1 << 2), /* data write in progress */ QUEUE_FSEQ_POSTFLUSH = (1 << 3), /* post-flushing in progress */ QUEUE_FSEQ_DONE = (1 << 4), }; |
dd4c133f3
|
19 |
static struct request *queue_next_fseq(struct request_queue *q); |
28e7d1845
|
20 |
|
dd4c133f3
|
21 |
unsigned blk_flush_cur_seq(struct request_queue *q) |
86db1e297
|
22 |
{ |
dd4c133f3
|
23 |
if (!q->flush_seq) |
86db1e297
|
24 |
return 0; |
dd4c133f3
|
25 |
return 1 << ffz(q->flush_seq); |
86db1e297
|
26 |
} |
dd4c133f3
|
27 28 |
static struct request *blk_flush_complete_seq(struct request_queue *q, unsigned seq, int error) |
86db1e297
|
29 |
{ |
28e7d1845
|
30 |
struct request *next_rq = NULL; |
86db1e297
|
31 |
|
dd4c133f3
|
32 33 |
if (error && !q->flush_err) q->flush_err = error; |
86db1e297
|
34 |
|
dd4c133f3
|
35 36 |
BUG_ON(q->flush_seq & seq); q->flush_seq |= seq; |
86db1e297
|
37 |
|
dd4c133f3
|
38 39 40 |
if (blk_flush_cur_seq(q) != QUEUE_FSEQ_DONE) { /* not complete yet, queue the next flush sequence */ next_rq = queue_next_fseq(q); |
28e7d1845
|
41 |
} else { |
dd4c133f3
|
42 43 44 45 46 47 48 49 |
/* complete this flush request */ __blk_end_request_all(q->orig_flush_rq, q->flush_err); q->orig_flush_rq = NULL; q->flush_seq = 0; /* dispatch the next flush if there's one */ if (!list_empty(&q->pending_flushes)) { next_rq = list_entry_rq(q->pending_flushes.next); |
28e7d1845
|
50 51 52 53 |
list_move(&next_rq->queuelist, &q->queue_head); } } return next_rq; |
86db1e297
|
54 |
} |
47f70d5a6
|
55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 |
static void blk_flush_complete_seq_end_io(struct request_queue *q, unsigned seq, int error) { bool was_empty = elv_queue_empty(q); struct request *next_rq; next_rq = blk_flush_complete_seq(q, seq, error); /* * Moving a request silently to empty queue_head may stall the * queue. Kick the queue in those cases. */ if (was_empty && next_rq) __blk_run_queue(q); } |
86db1e297
|
70 71 72 |
static void pre_flush_end_io(struct request *rq, int error) { elv_completed_request(rq->q, rq); |
47f70d5a6
|
73 |
blk_flush_complete_seq_end_io(rq->q, QUEUE_FSEQ_PREFLUSH, error); |
86db1e297
|
74 |
} |
dd4c133f3
|
75 |
static void flush_data_end_io(struct request *rq, int error) |
86db1e297
|
76 77 |
{ elv_completed_request(rq->q, rq); |
47f70d5a6
|
78 |
blk_flush_complete_seq_end_io(rq->q, QUEUE_FSEQ_DATA, error); |
86db1e297
|
79 80 81 82 83 |
} static void post_flush_end_io(struct request *rq, int error) { elv_completed_request(rq->q, rq); |
47f70d5a6
|
84 |
blk_flush_complete_seq_end_io(rq->q, QUEUE_FSEQ_POSTFLUSH, error); |
86db1e297
|
85 |
} |
cde4c406d
|
86 |
static void init_flush_request(struct request *rq, struct gendisk *disk) |
86db1e297
|
87 |
{ |
28e18d018
|
88 |
rq->cmd_type = REQ_TYPE_FS; |
337238be1
|
89 |
rq->cmd_flags = WRITE_FLUSH; |
cde4c406d
|
90 |
rq->rq_disk = disk; |
86db1e297
|
91 |
} |
dd4c133f3
|
92 |
static struct request *queue_next_fseq(struct request_queue *q) |
86db1e297
|
93 |
{ |
4fed947cb
|
94 |
struct request *orig_rq = q->orig_flush_rq; |
dd4c133f3
|
95 |
struct request *rq = &q->flush_rq; |
86db1e297
|
96 |
|
cde4c406d
|
97 |
blk_rq_init(q, rq); |
dd4c133f3
|
98 99 |
switch (blk_flush_cur_seq(q)) { case QUEUE_FSEQ_PREFLUSH: |
cde4c406d
|
100 101 |
init_flush_request(rq, orig_rq->rq_disk); rq->end_io = pre_flush_end_io; |
28e7d1845
|
102 |
break; |
dd4c133f3
|
103 |
case QUEUE_FSEQ_DATA: |
4fed947cb
|
104 |
init_request_from_bio(rq, orig_rq->bio); |
09d60c701
|
105 106 107 108 109 110 111 |
/* * orig_rq->rq_disk may be different from * bio->bi_bdev->bd_disk if orig_rq got here through * remapping drivers. Make sure rq->rq_disk points * to the same one as orig_rq. */ rq->rq_disk = orig_rq->rq_disk; |
4fed947cb
|
112 113 |
rq->cmd_flags &= ~(REQ_FLUSH | REQ_FUA); rq->cmd_flags |= orig_rq->cmd_flags & (REQ_FLUSH | REQ_FUA); |
dd4c133f3
|
114 |
rq->end_io = flush_data_end_io; |
28e7d1845
|
115 |
break; |
dd4c133f3
|
116 |
case QUEUE_FSEQ_POSTFLUSH: |
cde4c406d
|
117 118 |
init_flush_request(rq, orig_rq->rq_disk); rq->end_io = post_flush_end_io; |
28e7d1845
|
119 |
break; |
28e7d1845
|
120 121 122 |
default: BUG(); } |
cde4c406d
|
123 124 |
elv_insert(q, rq, ELEVATOR_INSERT_FRONT); |
dd831006d
|
125 |
return rq; |
86db1e297
|
126 |
} |
dd4c133f3
|
127 |
struct request *blk_do_flush(struct request_queue *q, struct request *rq) |
86db1e297
|
128 |
{ |
4fed947cb
|
129 130 131 132 |
unsigned int fflags = q->flush_flags; /* may change, cache it */ bool has_flush = fflags & REQ_FLUSH, has_fua = fflags & REQ_FUA; bool do_preflush = has_flush && (rq->cmd_flags & REQ_FLUSH); bool do_postflush = has_flush && !has_fua && (rq->cmd_flags & REQ_FUA); |
28e7d1845
|
133 |
unsigned skip = 0; |
4fed947cb
|
134 135 136 137 138 139 140 141 142 143 144 145 146 |
/* * Special case. If there's data but flush is not necessary, * the request can be issued directly. * * Flush w/o data should be able to be issued directly too but * currently some drivers assume that rq->bio contains * non-zero data if it isn't NULL and empty FLUSH requests * getting here usually have bio's without data. */ if (blk_rq_sectors(rq) && !do_preflush && !do_postflush) { rq->cmd_flags &= ~REQ_FLUSH; if (!has_fua) rq->cmd_flags &= ~REQ_FUA; |
28e7d1845
|
147 |
return rq; |
4fed947cb
|
148 |
} |
28e7d1845
|
149 |
|
4fed947cb
|
150 151 152 153 154 |
/* * Sequenced flushes can't be processed in parallel. If * another one is already in progress, queue for later * processing. */ |
dd4c133f3
|
155 |
if (q->flush_seq) { |
dd4c133f3
|
156 |
list_move_tail(&rq->queuelist, &q->pending_flushes); |
28e7d1845
|
157 158 |
return NULL; } |
86db1e297
|
159 |
/* |
dd4c133f3
|
160 |
* Start a new flush sequence |
86db1e297
|
161 |
*/ |
dd4c133f3
|
162 |
q->flush_err = 0; |
dd4c133f3
|
163 |
q->flush_seq |= QUEUE_FSEQ_STARTED; |
86db1e297
|
164 |
|
4fed947cb
|
165 166 167 168 |
/* adjust FLUSH/FUA of the original request and stash it away */ rq->cmd_flags &= ~REQ_FLUSH; if (!has_fua) rq->cmd_flags &= ~REQ_FUA; |
28e7d1845
|
169 |
blk_dequeue_request(rq); |
dd4c133f3
|
170 |
q->orig_flush_rq = rq; |
86db1e297
|
171 |
|
4fed947cb
|
172 173 |
/* skip unneded sequences and return the first one */ if (!do_preflush) |
dd4c133f3
|
174 |
skip |= QUEUE_FSEQ_PREFLUSH; |
4fed947cb
|
175 |
if (!blk_rq_sectors(rq)) |
dd4c133f3
|
176 |
skip |= QUEUE_FSEQ_DATA; |
4fed947cb
|
177 |
if (!do_postflush) |
dd4c133f3
|
178 |
skip |= QUEUE_FSEQ_POSTFLUSH; |
dd4c133f3
|
179 |
return blk_flush_complete_seq(q, skip, 0); |
86db1e297
|
180 |
} |
d391a2dda
|
181 |
static void bio_end_flush(struct bio *bio, int err) |
86db1e297
|
182 |
{ |
d391a2dda
|
183 |
if (err) |
86db1e297
|
184 |
clear_bit(BIO_UPTODATE, &bio->bi_flags); |
f17e232e9
|
185 186 187 |
if (bio->bi_private) complete(bio->bi_private); bio_put(bio); |
86db1e297
|
188 189 190 191 192 |
} /** * blkdev_issue_flush - queue a flush * @bdev: blockdev to issue flush for |
fbd9b09a1
|
193 |
* @gfp_mask: memory allocation flags (for bio_alloc) |
86db1e297
|
194 195 196 197 198 |
* @error_sector: error sector * * Description: * Issue a flush for the block device in question. Caller can supply * room for storing the error offset in case of a flush error, if they |
f17e232e9
|
199 200 |
* wish to. If WAIT flag is not passed then caller may check only what * request was pushed in some internal queue for later handling. |
86db1e297
|
201 |
*/ |
fbd9b09a1
|
202 |
int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, |
dd3932edd
|
203 |
sector_t *error_sector) |
86db1e297
|
204 205 206 207 |
{ DECLARE_COMPLETION_ONSTACK(wait); struct request_queue *q; struct bio *bio; |
fbd9b09a1
|
208 |
int ret = 0; |
86db1e297
|
209 210 211 212 213 214 215 |
if (bdev->bd_disk == NULL) return -ENXIO; q = bdev_get_queue(bdev); if (!q) return -ENXIO; |
f10d9f617
|
216 217 218 219 |
/* * some block devices may not have their queue correctly set up here * (e.g. loop device without a backing file) and so issuing a flush * here will panic. Ensure there is a request function before issuing |
d391a2dda
|
220 |
* the flush. |
f10d9f617
|
221 222 223 |
*/ if (!q->make_request_fn) return -ENXIO; |
fbd9b09a1
|
224 |
bio = bio_alloc(gfp_mask, 0); |
d391a2dda
|
225 |
bio->bi_end_io = bio_end_flush; |
86db1e297
|
226 |
bio->bi_bdev = bdev; |
dd3932edd
|
227 |
bio->bi_private = &wait; |
86db1e297
|
228 |
|
f17e232e9
|
229 |
bio_get(bio); |
d391a2dda
|
230 |
submit_bio(WRITE_FLUSH, bio); |
dd3932edd
|
231 232 233 234 235 236 237 238 239 |
wait_for_completion(&wait); /* * The driver must store the error location in ->bi_sector, if * it supports it. For non-stacked drivers, this should be * copied from blk_rq_pos(rq). */ if (error_sector) *error_sector = bio->bi_sector; |
86db1e297
|
240 |
|
d391a2dda
|
241 |
if (!bio_flagged(bio, BIO_UPTODATE)) |
86db1e297
|
242 243 244 245 246 |
ret = -EIO; bio_put(bio); return ret; } |
86db1e297
|
247 |
EXPORT_SYMBOL(blkdev_issue_flush); |