Blame view
block/blk-flush.c
17.3 KB
86db1e297
|
1 |
/* |
3140c3cfa
|
2 |
* Functions to sequence PREFLUSH and FUA writes. |
ae1b15396
|
3 4 5 6 7 8 |
* * Copyright (C) 2011 Max Planck Institute for Gravitational Physics * Copyright (C) 2011 Tejun Heo <tj@kernel.org> * * This file is released under the GPLv2. * |
3140c3cfa
|
9 |
* REQ_{PREFLUSH|FUA} requests are decomposed to sequences consisted of three |
ae1b15396
|
10 11 12 |
* optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request * properties and hardware capability. * |
28a8f0d31
|
13 14 |
* If a request doesn't have data, only REQ_PREFLUSH makes sense, which * indicates a simple flush request. If there is data, REQ_PREFLUSH indicates |
ae1b15396
|
15 16 17 18 |
* that the device cache should be flushed before the data is executed, and * REQ_FUA means that the data must be on non-volatile media on request * completion. * |
3140c3cfa
|
19 20 21 |
* If the device doesn't have writeback cache, PREFLUSH and FUA don't make any * difference. The requests are either completed immediately if there's no data * or executed as normal requests otherwise. |
ae1b15396
|
22 |
* |
28a8f0d31
|
23 |
* If the device has writeback cache and supports FUA, REQ_PREFLUSH is |
ae1b15396
|
24 25 |
* translated to PREFLUSH but REQ_FUA is passed down directly with DATA. * |
28a8f0d31
|
26 27 |
* If the device has writeback cache and doesn't support FUA, REQ_PREFLUSH * is translated to PREFLUSH and REQ_FUA to POSTFLUSH. |
ae1b15396
|
28 29 30 |
* * The actual execution of flush is double buffered. Whenever a request * needs to execute PRE or POSTFLUSH, it queues at |
7c94e1c15
|
31 |
* fq->flush_queue[fq->flush_pending_idx]. Once certain criteria are met, a |
3a5e02ced
|
32 |
* REQ_OP_FLUSH is issued and the pending_idx is toggled. When the flush |
ae1b15396
|
33 |
* completes, all the requests which were pending are proceeded to the next |
3140c3cfa
|
34 |
* step. This allows arbitrary merging of different types of PREFLUSH/FUA |
ae1b15396
|
35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 |
* requests. * * Currently, the following conditions are used to determine when to issue * flush. * * C1. At any given time, only one flush shall be in progress. This makes * double buffering sufficient. * * C2. Flush is deferred if any request is executing DATA of its sequence. * This avoids issuing separate POSTFLUSHes for requests which shared * PREFLUSH. * * C3. The second condition is ignored if there is a request which has * waited longer than FLUSH_PENDING_TIMEOUT. This is to avoid * starvation in the unlikely case where there are continuous stream of |
3140c3cfa
|
50 |
* FUA (without PREFLUSH) requests. |
ae1b15396
|
51 52 53 54 |
* * For devices which support FUA, it isn't clear whether C2 (and thus C3) * is beneficial. * |
3140c3cfa
|
55 |
* Note that a sequenced PREFLUSH/FUA request with DATA is completed twice. |
ae1b15396
|
56 57 58 |
* Once while executing DATA and again after the whole sequence is * complete. The first completion updates the contained bio but doesn't * finish it so that the bio submitter is notified only after the whole |
e80640213
|
59 |
* sequence is complete. This is implemented by testing RQF_FLUSH_SEQ in |
ae1b15396
|
60 61 |
* req_bio_endio(). * |
3140c3cfa
|
62 |
* The above peculiarity requires that each PREFLUSH/FUA request has only one |
ae1b15396
|
63 64 |
* bio attached to it, which is guaranteed as they aren't allowed to be * merged in the usual way. |
86db1e297
|
65 |
*/ |
ae1b15396
|
66 |
|
86db1e297
|
67 68 69 70 |
#include <linux/kernel.h> #include <linux/module.h> #include <linux/bio.h> #include <linux/blkdev.h> |
5a0e3ad6a
|
71 |
#include <linux/gfp.h> |
320ae51fe
|
72 |
#include <linux/blk-mq.h> |
86db1e297
|
73 74 |
#include "blk.h" |
320ae51fe
|
75 |
#include "blk-mq.h" |
0048b4837
|
76 |
#include "blk-mq-tag.h" |
bd166ef18
|
77 |
#include "blk-mq-sched.h" |
86db1e297
|
78 |
|
3140c3cfa
|
79 |
/* PREFLUSH/FUA sequences */ |
4fed947cb
|
80 |
enum { |
ae1b15396
|
81 82 83 84 85 86 87 88 89 90 91 92 93 |
REQ_FSEQ_PREFLUSH = (1 << 0), /* pre-flushing in progress */ REQ_FSEQ_DATA = (1 << 1), /* data write in progress */ REQ_FSEQ_POSTFLUSH = (1 << 2), /* post-flushing in progress */ REQ_FSEQ_DONE = (1 << 3), REQ_FSEQ_ACTIONS = REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA | REQ_FSEQ_POSTFLUSH, /* * If flush has been pending longer than the following timeout, * it's issued even if flush_data requests are still in flight. */ FLUSH_PENDING_TIMEOUT = 5 * HZ, |
4fed947cb
|
94 |
}; |
0bae352da
|
95 |
static bool blk_kick_flush(struct request_queue *q, |
84fca1b0c
|
96 |
struct blk_flush_queue *fq, unsigned int flags); |
28e7d1845
|
97 |
|
c888a8f95
|
98 |
static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq) |
86db1e297
|
99 |
{ |
ae1b15396
|
100 |
unsigned int policy = 0; |
86db1e297
|
101 |
|
fa1bf42ff
|
102 103 |
if (blk_rq_sectors(rq)) policy |= REQ_FSEQ_DATA; |
c888a8f95
|
104 |
if (fflags & (1UL << QUEUE_FLAG_WC)) { |
28a8f0d31
|
105 |
if (rq->cmd_flags & REQ_PREFLUSH) |
ae1b15396
|
106 |
policy |= REQ_FSEQ_PREFLUSH; |
c888a8f95
|
107 108 |
if (!(fflags & (1UL << QUEUE_FLAG_FUA)) && (rq->cmd_flags & REQ_FUA)) |
ae1b15396
|
109 |
policy |= REQ_FSEQ_POSTFLUSH; |
28e7d1845
|
110 |
} |
ae1b15396
|
111 |
return policy; |
86db1e297
|
112 |
} |
ae1b15396
|
113 |
static unsigned int blk_flush_cur_seq(struct request *rq) |
47f70d5a6
|
114 |
{ |
ae1b15396
|
115 116 |
return 1 << ffz(rq->flush.seq); } |
47f70d5a6
|
117 |
|
ae1b15396
|
118 119 |
static void blk_flush_restore_request(struct request *rq) { |
47f70d5a6
|
120 |
/* |
ae1b15396
|
121 122 123 |
* After flush data completion, @rq->bio is %NULL but we need to * complete the bio again. @rq->biotail is guaranteed to equal the * original @rq->bio. Restore it. |
47f70d5a6
|
124 |
*/ |
ae1b15396
|
125 126 127 |
rq->bio = rq->biotail; /* make @rq a normal request */ |
e80640213
|
128 |
rq->rq_flags &= ~RQF_FLUSH_SEQ; |
4853abaae
|
129 |
rq->end_io = rq->flush.saved_end_io; |
320ae51fe
|
130 |
} |
10beafc19
|
131 |
static bool blk_flush_queue_rq(struct request *rq, bool add_front) |
320ae51fe
|
132 |
{ |
18741986a
|
133 |
if (rq->q->mq_ops) { |
2b053aca7
|
134 |
blk_mq_add_to_requeue_list(rq, add_front, true); |
18741986a
|
135 136 |
return false; } else { |
10beafc19
|
137 138 139 140 |
if (add_front) list_add(&rq->queuelist, &rq->q->queue_head); else list_add_tail(&rq->queuelist, &rq->q->queue_head); |
18741986a
|
141 142 |
return true; } |
47f70d5a6
|
143 |
} |
ae1b15396
|
144 145 |
/** * blk_flush_complete_seq - complete flush sequence |
3140c3cfa
|
146 |
* @rq: PREFLUSH/FUA request being sequenced |
0bae352da
|
147 |
* @fq: flush queue |
ae1b15396
|
148 149 150 151 152 153 154 |
* @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero) * @error: whether an error occurred * * @rq just completed @seq part of its flush sequence, record the * completion and trigger the next step. * * CONTEXT: |
7c94e1c15
|
155 |
* spin_lock_irq(q->queue_lock or fq->mq_flush_lock) |
ae1b15396
|
156 157 158 159 |
* * RETURNS: * %true if requests were added to the dispatch queue, %false otherwise. */ |
0bae352da
|
160 161 |
static bool blk_flush_complete_seq(struct request *rq, struct blk_flush_queue *fq, |
2a842acab
|
162 |
unsigned int seq, blk_status_t error) |
86db1e297
|
163 |
{ |
ae1b15396
|
164 |
struct request_queue *q = rq->q; |
7c94e1c15
|
165 |
struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx]; |
320ae51fe
|
166 |
bool queued = false, kicked; |
190b02ed7
|
167 |
unsigned int cmd_flags; |
ae1b15396
|
168 169 170 |
BUG_ON(rq->flush.seq & seq); rq->flush.seq |= seq; |
190b02ed7
|
171 |
cmd_flags = rq->cmd_flags; |
ae1b15396
|
172 173 174 175 176 177 178 179 180 181 182 |
if (likely(!error)) seq = blk_flush_cur_seq(rq); else seq = REQ_FSEQ_DONE; switch (seq) { case REQ_FSEQ_PREFLUSH: case REQ_FSEQ_POSTFLUSH: /* queue for flush */ if (list_empty(pending)) |
7c94e1c15
|
183 |
fq->flush_pending_since = jiffies; |
ae1b15396
|
184 185 186 187 |
list_move_tail(&rq->flush.list, pending); break; case REQ_FSEQ_DATA: |
7c94e1c15
|
188 |
list_move_tail(&rq->flush.list, &fq->flush_data_in_flight); |
10beafc19
|
189 |
queued = blk_flush_queue_rq(rq, true); |
ae1b15396
|
190 191 192 193 194 195 196 197 198 199 200 201 |
break; case REQ_FSEQ_DONE: /* * @rq was previously adjusted by blk_flush_issue() for * flush sequencing and may already have gone through the * flush data request completion path. Restore @rq for * normal completion and end it. */ BUG_ON(!list_empty(&rq->queuelist)); list_del_init(&rq->flush.list); blk_flush_restore_request(rq); |
320ae51fe
|
202 |
if (q->mq_ops) |
c8a446ad6
|
203 |
blk_mq_end_request(rq, error); |
320ae51fe
|
204 205 |
else __blk_end_request_all(rq, error); |
ae1b15396
|
206 207 208 209 210 |
break; default: BUG(); } |
190b02ed7
|
211 |
kicked = blk_kick_flush(q, fq, cmd_flags); |
320ae51fe
|
212 |
return kicked | queued; |
86db1e297
|
213 |
} |
2a842acab
|
214 |
static void flush_end_io(struct request *flush_rq, blk_status_t error) |
86db1e297
|
215 |
{ |
ae1b15396
|
216 |
struct request_queue *q = flush_rq->q; |
320ae51fe
|
217 |
struct list_head *running; |
ae1b15396
|
218 219 |
bool queued = false; struct request *rq, *n; |
320ae51fe
|
220 |
unsigned long flags = 0; |
e97c293cd
|
221 |
struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx); |
ae1b15396
|
222 |
|
223023750
|
223 |
if (q->mq_ops) { |
0048b4837
|
224 225 226 |
struct blk_mq_hw_ctx *hctx; /* release the tag's ownership to the req cloned from */ |
7c94e1c15
|
227 |
spin_lock_irqsave(&fq->mq_flush_lock, flags); |
7d7e0f90b
|
228 |
hctx = blk_mq_map_queue(q, flush_rq->mq_ctx->cpu); |
923218f61
|
229 230 231 232 233 234 235 |
if (!q->elevator) { blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq); flush_rq->tag = -1; } else { blk_mq_put_driver_tag_hctx(hctx, flush_rq); flush_rq->internal_tag = -1; } |
223023750
|
236 |
} |
18741986a
|
237 |
|
7c94e1c15
|
238 239 |
running = &fq->flush_queue[fq->flush_running_idx]; BUG_ON(fq->flush_pending_idx == fq->flush_running_idx); |
ae1b15396
|
240 241 |
/* account completion of the flush request */ |
7c94e1c15
|
242 |
fq->flush_running_idx ^= 1; |
320ae51fe
|
243 244 245 |
if (!q->mq_ops) elv_completed_request(q, flush_rq); |
ae1b15396
|
246 247 248 249 250 251 |
/* and push the waiting requests to the next stage */ list_for_each_entry_safe(rq, n, running, flush.list) { unsigned int seq = blk_flush_cur_seq(rq); BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH); |
0bae352da
|
252 |
queued |= blk_flush_complete_seq(rq, fq, seq, error); |
ae1b15396
|
253 |
} |
47f70d5a6
|
254 |
/* |
3ac0cc450
|
255 256 257 258 259 260 261 262 263 |
* Kick the queue to avoid stall for two cases: * 1. Moving a request silently to empty queue_head may stall the * queue. * 2. When flush request is running in non-queueable queue, the * queue is hold. Restart the queue after flush request is finished * to avoid stall. * This function is called from request completion path and calling * directly into request_fn may confuse the driver. Always use * kblockd. |
47f70d5a6
|
264 |
*/ |
7c94e1c15
|
265 |
if (queued || fq->flush_queue_delayed) { |
18741986a
|
266 267 |
WARN_ON(q->mq_ops); blk_run_queue_async(q); |
320ae51fe
|
268 |
} |
7c94e1c15
|
269 |
fq->flush_queue_delayed = 0; |
320ae51fe
|
270 |
if (q->mq_ops) |
7c94e1c15
|
271 |
spin_unlock_irqrestore(&fq->mq_flush_lock, flags); |
320ae51fe
|
272 |
} |
ae1b15396
|
273 274 275 |
/** * blk_kick_flush - consider issuing flush request * @q: request_queue being kicked |
0bae352da
|
276 |
* @fq: flush queue |
84fca1b0c
|
277 |
* @flags: cmd_flags of the original request |
ae1b15396
|
278 279 280 281 282 |
* * Flush related states of @q have changed, consider issuing flush request. * Please read the comment at the top of this file for more info. * * CONTEXT: |
7c94e1c15
|
283 |
* spin_lock_irq(q->queue_lock or fq->mq_flush_lock) |
ae1b15396
|
284 285 286 287 |
* * RETURNS: * %true if flush was issued, %false otherwise. */ |
84fca1b0c
|
288 289 |
static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq, unsigned int flags) |
86db1e297
|
290 |
{ |
7c94e1c15
|
291 |
struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx]; |
ae1b15396
|
292 293 |
struct request *first_rq = list_first_entry(pending, struct request, flush.list); |
7c94e1c15
|
294 |
struct request *flush_rq = fq->flush_rq; |
ae1b15396
|
295 296 |
/* C1 described at the top of this file */ |
7c94e1c15
|
297 |
if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending)) |
ae1b15396
|
298 |
return false; |
7520872c0
|
299 300 301 302 303 304 |
/* C2 and C3 * * For blk-mq + scheduling, we can risk having all driver tags * assigned to empty flushes, and we deadlock if we are expecting * other requests to make progress. Don't defer for that case. */ |
7c94e1c15
|
305 |
if (!list_empty(&fq->flush_data_in_flight) && |
7520872c0
|
306 |
!(q->mq_ops && q->elevator) && |
ae1b15396
|
307 |
time_before(jiffies, |
7c94e1c15
|
308 |
fq->flush_pending_since + FLUSH_PENDING_TIMEOUT)) |
ae1b15396
|
309 310 311 312 313 314 |
return false; /* * Issue flush and toggle pending_idx. This makes pending_idx * different from running_idx, which means flush is in flight. */ |
7c94e1c15
|
315 |
fq->flush_pending_idx ^= 1; |
18741986a
|
316 |
|
7ddab5de5
|
317 |
blk_rq_init(q, flush_rq); |
f70ced091
|
318 319 |
/* |
923218f61
|
320 321 322 323 324 325 |
* In case of none scheduler, borrow tag from the first request * since they can't be in flight at the same time. And acquire * the tag's ownership for flush req. * * In case of IO scheduler, flush rq need to borrow scheduler tag * just for cheating put/get driver tag. |
f70ced091
|
326 327 |
*/ if (q->mq_ops) { |
0048b4837
|
328 |
struct blk_mq_hw_ctx *hctx; |
f70ced091
|
329 |
flush_rq->mq_ctx = first_rq->mq_ctx; |
0048b4837
|
330 |
|
923218f61
|
331 332 333 334 335 336 337 338 |
if (!q->elevator) { fq->orig_rq = first_rq; flush_rq->tag = first_rq->tag; hctx = blk_mq_map_queue(q, first_rq->mq_ctx->cpu); blk_mq_tag_set_rq(hctx, first_rq->tag, flush_rq); } else { flush_rq->internal_tag = first_rq->internal_tag; } |
f70ced091
|
339 |
} |
320ae51fe
|
340 |
|
70fd76140
|
341 |
flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH; |
84fca1b0c
|
342 |
flush_rq->cmd_flags |= (flags & REQ_DRV) | (flags & REQ_FAILFAST_MASK); |
e80640213
|
343 |
flush_rq->rq_flags |= RQF_FLUSH_SEQ; |
7ddab5de5
|
344 345 |
flush_rq->rq_disk = first_rq->rq_disk; flush_rq->end_io = flush_end_io; |
ae1b15396
|
346 |
|
7ddab5de5
|
347 |
return blk_flush_queue_rq(flush_rq, false); |
86db1e297
|
348 |
} |
2a842acab
|
349 |
static void flush_data_end_io(struct request *rq, blk_status_t error) |
86db1e297
|
350 |
{ |
ae1b15396
|
351 |
struct request_queue *q = rq->q; |
e97c293cd
|
352 |
struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL); |
ae1b15396
|
353 |
|
2fff8a924
|
354 |
lockdep_assert_held(q->queue_lock); |
e83a46bbb
|
355 |
/* |
94d7dea44
|
356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 |
* Updating q->in_flight[] here for making this tag usable * early. Because in blk_queue_start_tag(), * q->in_flight[BLK_RW_ASYNC] is used to limit async I/O and * reserve tags for sync I/O. * * More importantly this way can avoid the following I/O * deadlock: * * - suppose there are 40 fua requests comming to flush queue * and queue depth is 31 * - 30 rqs are scheduled then blk_queue_start_tag() can't alloc * tag for async I/O any more * - all the 30 rqs are completed before FLUSH_PENDING_TIMEOUT * and flush_data_end_io() is called * - the other rqs still can't go ahead if not updating * q->in_flight[BLK_RW_ASYNC] here, meantime these rqs * are held in flush data queue and make no progress of * handling post flush rq * - only after the post flush rq is handled, all these rqs * can be completed */ elv_completed_request(q, rq); /* for avoiding double accounting */ |
36869cb93
|
381 |
rq->rq_flags &= ~RQF_STARTED; |
94d7dea44
|
382 383 |
/* |
e83a46bbb
|
384 385 386 |
* After populating an empty queue, kick it to avoid stall. Read * the comment in flush_end_io(). */ |
0bae352da
|
387 |
if (blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error)) |
24ecfbe27
|
388 |
blk_run_queue_async(q); |
86db1e297
|
389 |
} |
2a842acab
|
390 |
static void mq_flush_data_end_io(struct request *rq, blk_status_t error) |
320ae51fe
|
391 392 393 |
{ struct request_queue *q = rq->q; struct blk_mq_hw_ctx *hctx; |
e97c293cd
|
394 |
struct blk_mq_ctx *ctx = rq->mq_ctx; |
320ae51fe
|
395 |
unsigned long flags; |
e97c293cd
|
396 |
struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx); |
320ae51fe
|
397 |
|
7d7e0f90b
|
398 |
hctx = blk_mq_map_queue(q, ctx->cpu); |
320ae51fe
|
399 |
|
923218f61
|
400 401 402 403 |
if (q->elevator) { WARN_ON(rq->tag < 0); blk_mq_put_driver_tag_hctx(hctx, rq); } |
320ae51fe
|
404 405 406 407 |
/* * After populating an empty queue, kick it to avoid stall. Read * the comment in flush_end_io(). */ |
7c94e1c15
|
408 |
spin_lock_irqsave(&fq->mq_flush_lock, flags); |
bd166ef18
|
409 |
blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error); |
7c94e1c15
|
410 |
spin_unlock_irqrestore(&fq->mq_flush_lock, flags); |
bd166ef18
|
411 |
|
80c8452ad
|
412 |
blk_mq_sched_restart(hctx); |
320ae51fe
|
413 |
} |
ae1b15396
|
414 |
/** |
3140c3cfa
|
415 |
* blk_insert_flush - insert a new PREFLUSH/FUA request |
ae1b15396
|
416 417 |
* @rq: request to insert * |
b710a4805
|
418 |
* To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions. |
320ae51fe
|
419 |
* or __blk_mq_run_hw_queue() to dispatch request. |
ae1b15396
|
420 421 |
* @rq is being submitted. Analyze what needs to be done and put it on the * right queue. |
ae1b15396
|
422 423 |
*/ void blk_insert_flush(struct request *rq) |
86db1e297
|
424 |
{ |
ae1b15396
|
425 |
struct request_queue *q = rq->q; |
c888a8f95
|
426 |
unsigned long fflags = q->queue_flags; /* may change, cache */ |
ae1b15396
|
427 |
unsigned int policy = blk_flush_policy(fflags, rq); |
e97c293cd
|
428 |
struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx); |
86db1e297
|
429 |
|
2fff8a924
|
430 431 |
if (!q->mq_ops) lockdep_assert_held(q->queue_lock); |
ae1b15396
|
432 433 |
/* * @policy now records what operations need to be done. Adjust |
28a8f0d31
|
434 |
* REQ_PREFLUSH and FUA for the driver. |
ae1b15396
|
435 |
*/ |
28a8f0d31
|
436 |
rq->cmd_flags &= ~REQ_PREFLUSH; |
c888a8f95
|
437 |
if (!(fflags & (1UL << QUEUE_FLAG_FUA))) |
ae1b15396
|
438 439 440 |
rq->cmd_flags &= ~REQ_FUA; /* |
ae5b2ec8a
|
441 442 443 444 445 446 447 |
* REQ_PREFLUSH|REQ_FUA implies REQ_SYNC, so if we clear any * of those flags, we have to set REQ_SYNC to avoid skewing * the request accounting. */ rq->cmd_flags |= REQ_SYNC; /* |
4853abaae
|
448 449 450 451 452 453 |
* An empty flush handed down from a stacking driver may * translate into nothing if the underlying device does not * advertise a write-back cache. In this case, simply * complete the request. */ if (!policy) { |
320ae51fe
|
454 |
if (q->mq_ops) |
c8a446ad6
|
455 |
blk_mq_end_request(rq, 0); |
320ae51fe
|
456 |
else |
d0fac0256
|
457 |
__blk_end_request(rq, 0, 0); |
4853abaae
|
458 459 |
return; } |
834f9f61a
|
460 |
BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */ |
4853abaae
|
461 462 |
/* |
ae1b15396
|
463 464 465 466 467 468 |
* If there's data but flush is not necessary, the request can be * processed directly without going through flush machinery. Queue * for normal execution. */ if ((policy & REQ_FSEQ_DATA) && !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) { |
bd166ef18
|
469 |
if (q->mq_ops) |
598906f81
|
470 |
blk_mq_request_bypass_insert(rq, false); |
bd166ef18
|
471 |
else |
dcd8376c3
|
472 |
list_add_tail(&rq->queuelist, &q->queue_head); |
ae1b15396
|
473 |
return; |
28e7d1845
|
474 |
} |
cde4c406d
|
475 |
|
ae1b15396
|
476 477 478 479 480 481 |
/* * @rq should go through flush machinery. Mark it part of flush * sequence and submit for further processing. */ memset(&rq->flush, 0, sizeof(rq->flush)); INIT_LIST_HEAD(&rq->flush.list); |
e80640213
|
482 |
rq->rq_flags |= RQF_FLUSH_SEQ; |
4853abaae
|
483 |
rq->flush.saved_end_io = rq->end_io; /* Usually NULL */ |
320ae51fe
|
484 485 |
if (q->mq_ops) { rq->end_io = mq_flush_data_end_io; |
7c94e1c15
|
486 |
spin_lock_irq(&fq->mq_flush_lock); |
0bae352da
|
487 |
blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0); |
7c94e1c15
|
488 |
spin_unlock_irq(&fq->mq_flush_lock); |
320ae51fe
|
489 490 |
return; } |
ae1b15396
|
491 |
rq->end_io = flush_data_end_io; |
0bae352da
|
492 |
blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0); |
86db1e297
|
493 |
} |
ae1b15396
|
494 |
/** |
86db1e297
|
495 496 |
* blkdev_issue_flush - queue a flush * @bdev: blockdev to issue flush for |
fbd9b09a1
|
497 |
* @gfp_mask: memory allocation flags (for bio_alloc) |
86db1e297
|
498 499 500 501 502 |
* @error_sector: error sector * * Description: * Issue a flush for the block device in question. Caller can supply * room for storing the error offset in case of a flush error, if they |
1be7d2073
|
503 |
* wish to. |
86db1e297
|
504 |
*/ |
fbd9b09a1
|
505 |
int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, |
dd3932edd
|
506 |
sector_t *error_sector) |
86db1e297
|
507 |
{ |
86db1e297
|
508 509 |
struct request_queue *q; struct bio *bio; |
fbd9b09a1
|
510 |
int ret = 0; |
86db1e297
|
511 512 513 514 515 516 517 |
if (bdev->bd_disk == NULL) return -ENXIO; q = bdev_get_queue(bdev); if (!q) return -ENXIO; |
f10d9f617
|
518 519 520 521 |
/* * some block devices may not have their queue correctly set up here * (e.g. loop device without a backing file) and so issuing a flush * here will panic. Ensure there is a request function before issuing |
d391a2dda
|
522 |
* the flush. |
f10d9f617
|
523 524 525 |
*/ if (!q->make_request_fn) return -ENXIO; |
fbd9b09a1
|
526 |
bio = bio_alloc(gfp_mask, 0); |
74d46992e
|
527 |
bio_set_dev(bio, bdev); |
70fd76140
|
528 |
bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; |
86db1e297
|
529 |
|
4e49ea4a3
|
530 |
ret = submit_bio_wait(bio); |
dd3932edd
|
531 532 533 534 535 536 537 |
/* * The driver must store the error location in ->bi_sector, if * it supports it. For non-stacked drivers, this should be * copied from blk_rq_pos(rq). */ if (error_sector) |
4f024f379
|
538 |
*error_sector = bio->bi_iter.bi_sector; |
86db1e297
|
539 |
|
86db1e297
|
540 541 542 |
bio_put(bio); return ret; } |
86db1e297
|
543 |
EXPORT_SYMBOL(blkdev_issue_flush); |
320ae51fe
|
544 |
|
f70ced091
|
545 546 |
struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q, int node, int cmd_size) |
320ae51fe
|
547 |
{ |
7c94e1c15
|
548 549 |
struct blk_flush_queue *fq; int rq_sz = sizeof(struct request); |
1bcb1eada
|
550 |
|
f70ced091
|
551 |
fq = kzalloc_node(sizeof(*fq), GFP_KERNEL, node); |
7c94e1c15
|
552 553 |
if (!fq) goto fail; |
1bcb1eada
|
554 |
|
6d247d7f7
|
555 |
if (q->mq_ops) |
7c94e1c15
|
556 |
spin_lock_init(&fq->mq_flush_lock); |
7c94e1c15
|
557 |
|
6d247d7f7
|
558 |
rq_sz = round_up(rq_sz + cmd_size, cache_line_size()); |
f70ced091
|
559 |
fq->flush_rq = kzalloc_node(rq_sz, GFP_KERNEL, node); |
7c94e1c15
|
560 561 562 563 564 565 566 567 568 569 570 571 572 |
if (!fq->flush_rq) goto fail_rq; INIT_LIST_HEAD(&fq->flush_queue[0]); INIT_LIST_HEAD(&fq->flush_queue[1]); INIT_LIST_HEAD(&fq->flush_data_in_flight); return fq; fail_rq: kfree(fq); fail: return NULL; |
320ae51fe
|
573 |
} |
f35526557
|
574 |
|
ba483388e
|
575 |
void blk_free_flush_queue(struct blk_flush_queue *fq) |
f35526557
|
576 |
{ |
7c94e1c15
|
577 578 579 |
/* bio based request queue hasn't flush queue */ if (!fq) return; |
3c09676c1
|
580 |
|
7c94e1c15
|
581 582 583 |
kfree(fq->flush_rq); kfree(fq); } |