Blame view

block/blk-barrier.c 7.08 KB
86db1e297   Jens Axboe   block: continue l...
1
2
3
4
5
6
7
  /*
   * Functions related to barrier IO handling
   */
  #include <linux/kernel.h>
  #include <linux/module.h>
  #include <linux/bio.h>
  #include <linux/blkdev.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
8
  #include <linux/gfp.h>
86db1e297   Jens Axboe   block: continue l...
9
10
  
  #include "blk.h"
86db1e297   Jens Axboe   block: continue l...
11
12
13
  /*
   * Cache flushing for ordered writes handling
   */
6f6a036e6   Adrian Bunk   block/blk-barrier...
14
  unsigned blk_ordered_cur_seq(struct request_queue *q)
86db1e297   Jens Axboe   block: continue l...
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
  {
  	if (!q->ordseq)
  		return 0;
  	return 1 << ffz(q->ordseq);
  }
  
  unsigned blk_ordered_req_seq(struct request *rq)
  {
  	struct request_queue *q = rq->q;
  
  	BUG_ON(q->ordseq == 0);
  
  	if (rq == &q->pre_flush_rq)
  		return QUEUE_ORDSEQ_PREFLUSH;
  	if (rq == &q->bar_rq)
  		return QUEUE_ORDSEQ_BAR;
  	if (rq == &q->post_flush_rq)
  		return QUEUE_ORDSEQ_POSTFLUSH;
  
  	/*
  	 * !fs requests don't need to follow barrier ordering.  Always
  	 * put them at the front.  This fixes the following deadlock.
  	 *
  	 * http://thread.gmane.org/gmane.linux.kernel/537473
  	 */
33659ebba   Christoph Hellwig   block: remove wra...
40
  	if (rq->cmd_type != REQ_TYPE_FS)
86db1e297   Jens Axboe   block: continue l...
41
42
43
44
45
46
47
48
  		return QUEUE_ORDSEQ_DRAIN;
  
  	if ((rq->cmd_flags & REQ_ORDERED_COLOR) ==
  	    (q->orig_bar_rq->cmd_flags & REQ_ORDERED_COLOR))
  		return QUEUE_ORDSEQ_DRAIN;
  	else
  		return QUEUE_ORDSEQ_DONE;
  }
8f11b3e99   Tejun Heo   block: make barri...
49
  bool blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
86db1e297   Jens Axboe   block: continue l...
50
51
52
53
54
55
56
57
58
59
  {
  	struct request *rq;
  
  	if (error && !q->orderr)
  		q->orderr = error;
  
  	BUG_ON(q->ordseq & seq);
  	q->ordseq |= seq;
  
  	if (blk_ordered_cur_seq(q) != QUEUE_ORDSEQ_DONE)
8f11b3e99   Tejun Heo   block: make barri...
60
  		return false;
86db1e297   Jens Axboe   block: continue l...
61
62
63
64
65
66
  
  	/*
  	 * Okay, sequence complete.
  	 */
  	q->ordseq = 0;
  	rq = q->orig_bar_rq;
40cbbb781   Tejun Heo   block: implement ...
67
  	__blk_end_request_all(rq, q->orderr);
8f11b3e99   Tejun Heo   block: make barri...
68
  	return true;
86db1e297   Jens Axboe   block: continue l...
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
  }
  
  static void pre_flush_end_io(struct request *rq, int error)
  {
  	elv_completed_request(rq->q, rq);
  	blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_PREFLUSH, error);
  }
  
  static void bar_end_io(struct request *rq, int error)
  {
  	elv_completed_request(rq->q, rq);
  	blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_BAR, error);
  }
  
  static void post_flush_end_io(struct request *rq, int error)
  {
  	elv_completed_request(rq->q, rq);
  	blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_POSTFLUSH, error);
  }
  
  static void queue_flush(struct request_queue *q, unsigned which)
  {
  	struct request *rq;
  	rq_end_io_fn *end_io;
313e42999   Tejun Heo   block: reorganize...
93
  	if (which == QUEUE_ORDERED_DO_PREFLUSH) {
86db1e297   Jens Axboe   block: continue l...
94
95
96
97
98
99
  		rq = &q->pre_flush_rq;
  		end_io = pre_flush_end_io;
  	} else {
  		rq = &q->post_flush_rq;
  		end_io = post_flush_end_io;
  	}
2a4aa30c5   FUJITA Tomonori   block: rename and...
100
  	blk_rq_init(q, rq);
28e18d018   FUJITA Tomonori   block: set REQ_TY...
101
  	rq->cmd_type = REQ_TYPE_FS;
8749534fe   FUJITA Tomonori   block: introduce ...
102
  	rq->cmd_flags = REQ_HARDBARRIER | REQ_FLUSH;
16f2319fd   FUJITA Tomonori   block: set up rq-...
103
  	rq->rq_disk = q->orig_bar_rq->rq_disk;
86db1e297   Jens Axboe   block: continue l...
104
  	rq->end_io = end_io;
86db1e297   Jens Axboe   block: continue l...
105
106
107
  
  	elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
  }
8f11b3e99   Tejun Heo   block: make barri...
108
  static inline bool start_ordered(struct request_queue *q, struct request **rqp)
86db1e297   Jens Axboe   block: continue l...
109
  {
8f11b3e99   Tejun Heo   block: make barri...
110
111
  	struct request *rq = *rqp;
  	unsigned skip = 0;
86db1e297   Jens Axboe   block: continue l...
112
113
114
  	q->orderr = 0;
  	q->ordered = q->next_ordered;
  	q->ordseq |= QUEUE_ORDSEQ_STARTED;
58eea927d   Tejun Heo   block: simplify e...
115
116
117
118
  	/*
  	 * For an empty barrier, there's no actual BAR request, which
  	 * in turn makes POSTFLUSH unnecessary.  Mask them off.
  	 */
6958f1454   Tejun Heo   block: kill QUEUE...
119
  	if (!blk_rq_sectors(rq))
58eea927d   Tejun Heo   block: simplify e...
120
121
  		q->ordered &= ~(QUEUE_ORDERED_DO_BAR |
  				QUEUE_ORDERED_DO_POSTFLUSH);
f671620e7   Tejun Heo   block: make every...
122
  	/* stash away the original request */
9934c8c04   Tejun Heo   block: implement ...
123
  	blk_dequeue_request(rq);
86db1e297   Jens Axboe   block: continue l...
124
  	q->orig_bar_rq = rq;
f671620e7   Tejun Heo   block: make every...
125
  	rq = NULL;
86db1e297   Jens Axboe   block: continue l...
126
127
128
129
130
  
  	/*
  	 * Queue ordered sequence.  As we stack them at the head, we
  	 * need to queue in reverse order.  Note that we rely on that
  	 * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs
58eea927d   Tejun Heo   block: simplify e...
131
  	 * request gets inbetween ordered sequence.
86db1e297   Jens Axboe   block: continue l...
132
  	 */
58eea927d   Tejun Heo   block: simplify e...
133
  	if (q->ordered & QUEUE_ORDERED_DO_POSTFLUSH) {
313e42999   Tejun Heo   block: reorganize...
134
  		queue_flush(q, QUEUE_ORDERED_DO_POSTFLUSH);
f671620e7   Tejun Heo   block: make every...
135
136
  		rq = &q->post_flush_rq;
  	} else
8f11b3e99   Tejun Heo   block: make barri...
137
  		skip |= QUEUE_ORDSEQ_POSTFLUSH;
86db1e297   Jens Axboe   block: continue l...
138

f671620e7   Tejun Heo   block: make every...
139
140
141
142
143
144
  	if (q->ordered & QUEUE_ORDERED_DO_BAR) {
  		rq = &q->bar_rq;
  
  		/* initialize proxy request and queue it */
  		blk_rq_init(q, rq);
  		if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
7b6d91dae   Christoph Hellwig   block: unify flag...
145
  			rq->cmd_flags |= REQ_WRITE;
f671620e7   Tejun Heo   block: make every...
146
147
148
149
150
151
152
  		if (q->ordered & QUEUE_ORDERED_DO_FUA)
  			rq->cmd_flags |= REQ_FUA;
  		init_request_from_bio(rq, q->orig_bar_rq->bio);
  		rq->end_io = bar_end_io;
  
  		elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
  	} else
8f11b3e99   Tejun Heo   block: make barri...
153
  		skip |= QUEUE_ORDSEQ_BAR;
86db1e297   Jens Axboe   block: continue l...
154

313e42999   Tejun Heo   block: reorganize...
155
156
  	if (q->ordered & QUEUE_ORDERED_DO_PREFLUSH) {
  		queue_flush(q, QUEUE_ORDERED_DO_PREFLUSH);
86db1e297   Jens Axboe   block: continue l...
157
158
  		rq = &q->pre_flush_rq;
  	} else
8f11b3e99   Tejun Heo   block: make barri...
159
  		skip |= QUEUE_ORDSEQ_PREFLUSH;
86db1e297   Jens Axboe   block: continue l...
160

6958f1454   Tejun Heo   block: kill QUEUE...
161
  	if (queue_in_flight(q))
86db1e297   Jens Axboe   block: continue l...
162
  		rq = NULL;
f671620e7   Tejun Heo   block: make every...
163
  	else
8f11b3e99   Tejun Heo   block: make barri...
164
  		skip |= QUEUE_ORDSEQ_DRAIN;
86db1e297   Jens Axboe   block: continue l...
165

8f11b3e99   Tejun Heo   block: make barri...
166
167
168
169
170
171
172
  	*rqp = rq;
  
  	/*
  	 * Complete skipped sequences.  If whole sequence is complete,
  	 * return false to tell elevator that this request is gone.
  	 */
  	return !blk_ordered_complete_seq(q, skip, 0);
86db1e297   Jens Axboe   block: continue l...
173
  }
8f11b3e99   Tejun Heo   block: make barri...
174
  bool blk_do_ordered(struct request_queue *q, struct request **rqp)
86db1e297   Jens Axboe   block: continue l...
175
176
  {
  	struct request *rq = *rqp;
33659ebba   Christoph Hellwig   block: remove wra...
177
178
  	const int is_barrier = rq->cmd_type == REQ_TYPE_FS &&
  				(rq->cmd_flags & REQ_HARDBARRIER);
86db1e297   Jens Axboe   block: continue l...
179
180
181
  
  	if (!q->ordseq) {
  		if (!is_barrier)
8f11b3e99   Tejun Heo   block: make barri...
182
  			return true;
86db1e297   Jens Axboe   block: continue l...
183

8f11b3e99   Tejun Heo   block: make barri...
184
185
186
  		if (q->next_ordered != QUEUE_ORDERED_NONE)
  			return start_ordered(q, rqp);
  		else {
86db1e297   Jens Axboe   block: continue l...
187
  			/*
a7384677b   Tejun Heo   block: remove dup...
188
189
  			 * Queue ordering not supported.  Terminate
  			 * with prejudice.
86db1e297   Jens Axboe   block: continue l...
190
  			 */
9934c8c04   Tejun Heo   block: implement ...
191
  			blk_dequeue_request(rq);
40cbbb781   Tejun Heo   block: implement ...
192
  			__blk_end_request_all(rq, -EOPNOTSUPP);
86db1e297   Jens Axboe   block: continue l...
193
  			*rqp = NULL;
8f11b3e99   Tejun Heo   block: make barri...
194
  			return false;
86db1e297   Jens Axboe   block: continue l...
195
196
197
198
199
200
201
202
  		}
  	}
  
  	/*
  	 * Ordered sequence in progress
  	 */
  
  	/* Special requests are not subject to ordering rules. */
33659ebba   Christoph Hellwig   block: remove wra...
203
  	if (rq->cmd_type != REQ_TYPE_FS &&
86db1e297   Jens Axboe   block: continue l...
204
  	    rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
8f11b3e99   Tejun Heo   block: make barri...
205
  		return true;
86db1e297   Jens Axboe   block: continue l...
206

6958f1454   Tejun Heo   block: kill QUEUE...
207
208
209
210
  	/* Ordered by draining.  Wait for turn. */
  	WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q));
  	if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q))
  		*rqp = NULL;
86db1e297   Jens Axboe   block: continue l...
211

8f11b3e99   Tejun Heo   block: make barri...
212
  	return true;
86db1e297   Jens Axboe   block: continue l...
213
214
215
216
  }
  
  static void bio_end_empty_barrier(struct bio *bio, int err)
  {
cc66b4512   Jens Axboe   block: fix blkdev...
217
218
219
  	if (err) {
  		if (err == -EOPNOTSUPP)
  			set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
86db1e297   Jens Axboe   block: continue l...
220
  		clear_bit(BIO_UPTODATE, &bio->bi_flags);
cc66b4512   Jens Axboe   block: fix blkdev...
221
  	}
f17e232e9   Dmitry Monakhov   blkdev: allow asy...
222
223
224
  	if (bio->bi_private)
  		complete(bio->bi_private);
  	bio_put(bio);
86db1e297   Jens Axboe   block: continue l...
225
226
227
228
229
  }
  
  /**
   * blkdev_issue_flush - queue a flush
   * @bdev:	blockdev to issue flush for
fbd9b09a1   Dmitry Monakhov   blkdev: generaliz...
230
   * @gfp_mask:	memory allocation flags (for bio_alloc)
86db1e297   Jens Axboe   block: continue l...
231
   * @error_sector:	error sector
fbd9b09a1   Dmitry Monakhov   blkdev: generaliz...
232
   * @flags:	BLKDEV_IFL_* flags to control behaviour
86db1e297   Jens Axboe   block: continue l...
233
234
235
236
   *
   * Description:
   *    Issue a flush for the block device in question. Caller can supply
   *    room for storing the error offset in case of a flush error, if they
f17e232e9   Dmitry Monakhov   blkdev: allow asy...
237
238
   *    wish to. If WAIT flag is not passed then caller may check only what
   *    request was pushed in some internal queue for later handling.
86db1e297   Jens Axboe   block: continue l...
239
   */
fbd9b09a1   Dmitry Monakhov   blkdev: generaliz...
240
241
  int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
  		sector_t *error_sector, unsigned long flags)
86db1e297   Jens Axboe   block: continue l...
242
243
244
245
  {
  	DECLARE_COMPLETION_ONSTACK(wait);
  	struct request_queue *q;
  	struct bio *bio;
fbd9b09a1   Dmitry Monakhov   blkdev: generaliz...
246
  	int ret = 0;
86db1e297   Jens Axboe   block: continue l...
247
248
249
250
251
252
253
  
  	if (bdev->bd_disk == NULL)
  		return -ENXIO;
  
  	q = bdev_get_queue(bdev);
  	if (!q)
  		return -ENXIO;
f10d9f617   Dave Chinner   blkdev: check for...
254
255
256
257
258
259
260
261
  	/*
  	 * some block devices may not have their queue correctly set up here
  	 * (e.g. loop device without a backing file) and so issuing a flush
  	 * here will panic. Ensure there is a request function before issuing
  	 * the barrier.
  	 */
  	if (!q->make_request_fn)
  		return -ENXIO;
fbd9b09a1   Dmitry Monakhov   blkdev: generaliz...
262
  	bio = bio_alloc(gfp_mask, 0);
86db1e297   Jens Axboe   block: continue l...
263
  	bio->bi_end_io = bio_end_empty_barrier;
86db1e297   Jens Axboe   block: continue l...
264
  	bio->bi_bdev = bdev;
f17e232e9   Dmitry Monakhov   blkdev: allow asy...
265
266
  	if (test_bit(BLKDEV_WAIT, &flags))
  		bio->bi_private = &wait;
86db1e297   Jens Axboe   block: continue l...
267

f17e232e9   Dmitry Monakhov   blkdev: allow asy...
268
269
270
271
272
273
274
275
276
277
278
279
  	bio_get(bio);
  	submit_bio(WRITE_BARRIER, bio);
  	if (test_bit(BLKDEV_WAIT, &flags)) {
  		wait_for_completion(&wait);
  		/*
  		 * The driver must store the error location in ->bi_sector, if
  		 * it supports it. For non-stacked drivers, this should be
  		 * copied from blk_rq_pos(rq).
  		 */
  		if (error_sector)
  			*error_sector = bio->bi_sector;
  	}
86db1e297   Jens Axboe   block: continue l...
280

cc66b4512   Jens Axboe   block: fix blkdev...
281
282
283
  	if (bio_flagged(bio, BIO_EOPNOTSUPP))
  		ret = -EOPNOTSUPP;
  	else if (!bio_flagged(bio, BIO_UPTODATE))
86db1e297   Jens Axboe   block: continue l...
284
285
286
287
288
  		ret = -EIO;
  
  	bio_put(bio);
  	return ret;
  }
86db1e297   Jens Axboe   block: continue l...
289
  EXPORT_SYMBOL(blkdev_issue_flush);