Blame view

block/blk-core.c 76.6 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2
3
4
5
   * Copyright (C) 1991, 1992 Linus Torvalds
   * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
   * Elevator latency, (C) 2000  Andrea Arcangeli <andrea@suse.de> SuSE
   * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
6728cb0e6   Jens Axboe   block: make core ...
6
7
   * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
   *	-  July2000
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
8
9
10
11
12
13
   * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
   */
  
  /*
   * This handles all read/write requests to block devices
   */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
14
15
16
17
18
19
20
21
22
23
  #include <linux/kernel.h>
  #include <linux/module.h>
  #include <linux/backing-dev.h>
  #include <linux/bio.h>
  #include <linux/blkdev.h>
  #include <linux/highmem.h>
  #include <linux/mm.h>
  #include <linux/kernel_stat.h>
  #include <linux/string.h>
  #include <linux/init.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
24
25
26
27
  #include <linux/completion.h>
  #include <linux/slab.h>
  #include <linux/swap.h>
  #include <linux/writeback.h>
faccbd4b2   Andrew Morton   [PATCH] io-accoun...
28
  #include <linux/task_io_accounting_ops.h>
c17bb4951   Akinobu Mita   [PATCH] fault-inj...
29
  #include <linux/fault-inject.h>
73c101011   Jens Axboe   block: initial pa...
30
  #include <linux/list_sort.h>
e3c78ca52   Tejun Heo   block: reorganize...
31
  #include <linux/delay.h>
55782138e   Li Zefan   tracing/events: c...
32
33
34
  
  #define CREATE_TRACE_POINTS
  #include <trace/events/block.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
35

8324aa91d   Jens Axboe   block: split tag ...
36
  #include "blk.h"
d07335e51   Mike Snitzer   block: Rename "bl...
37
  EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
b0da3f0da   Jun'ichi Nomura   Add a tracepoint ...
38
  EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
55782138e   Li Zefan   tracing/events: c...
39
  EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
0bfc24559   Ingo Molnar   blktrace: port to...
40

a73f730d0   Tejun Heo   block, cfq: move ...
41
  DEFINE_IDA(blk_queue_ida);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
42
43
44
  /*
   * For the allocated request tables
   */
5ece6c52e   Adrian Bunk   make blk-core.c:r...
45
  static struct kmem_cache *request_cachep;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
46
47
48
49
  
  /*
   * For queue allocation
   */
6728cb0e6   Jens Axboe   block: make core ...
50
  struct kmem_cache *blk_requestq_cachep;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
51
52
  
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
53
54
   * Controlling structure to kblockd
   */
ff856bad6   Jens Axboe   [BLOCK] ll_rw_blk...
55
  static struct workqueue_struct *kblockd_workqueue;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
56

26b8256e2   Jens Axboe   block: get rid of...
57
58
  static void drive_stat_acct(struct request *rq, int new_io)
  {
28f13702f   Jens Axboe   block: avoid dupl...
59
  	struct hd_struct *part;
26b8256e2   Jens Axboe   block: get rid of...
60
  	int rw = rq_data_dir(rq);
c99590591   Tejun Heo   block: fix diskst...
61
  	int cpu;
26b8256e2   Jens Axboe   block: get rid of...
62

c2553b584   Jens Axboe   block: make blk_d...
63
  	if (!blk_do_io_stat(rq))
26b8256e2   Jens Axboe   block: get rid of...
64
  		return;
074a7aca7   Tejun Heo   block: move stats...
65
  	cpu = part_stat_lock();
c99590591   Tejun Heo   block: fix diskst...
66

09e099d4b   Jerome Marchand   block: fix accoun...
67
68
  	if (!new_io) {
  		part = rq->part;
074a7aca7   Tejun Heo   block: move stats...
69
  		part_stat_inc(cpu, part, merges[rw]);
09e099d4b   Jerome Marchand   block: fix accoun...
70
71
  	} else {
  		part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
6c23a9681   Jens Axboe   block: add intern...
72
  		if (!hd_struct_try_get(part)) {
09e099d4b   Jerome Marchand   block: fix accoun...
73
74
75
76
77
78
79
80
81
  			/*
  			 * The partition is already being removed,
  			 * the request will be accounted on the disk only
  			 *
  			 * We take a reference on disk->part0 although that
  			 * partition will never be deleted, so we can treat
  			 * it as any other partition.
  			 */
  			part = &rq->rq_disk->part0;
6c23a9681   Jens Axboe   block: add intern...
82
  			hd_struct_get(part);
09e099d4b   Jerome Marchand   block: fix accoun...
83
  		}
074a7aca7   Tejun Heo   block: move stats...
84
  		part_round_stats(cpu, part);
316d315bf   Nikanth Karthikesan   block: Seperate r...
85
  		part_inc_in_flight(part, rw);
09e099d4b   Jerome Marchand   block: fix accoun...
86
  		rq->part = part;
26b8256e2   Jens Axboe   block: get rid of...
87
  	}
e71bf0d0e   Tejun Heo   block: fix disk->...
88

074a7aca7   Tejun Heo   block: move stats...
89
  	part_stat_unlock();
26b8256e2   Jens Axboe   block: get rid of...
90
  }
8324aa91d   Jens Axboe   block: split tag ...
91
  void blk_queue_congestion_threshold(struct request_queue *q)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
92
93
94
95
96
97
98
99
100
101
102
103
104
  {
  	int nr;
  
  	nr = q->nr_requests - (q->nr_requests / 8) + 1;
  	if (nr > q->nr_requests)
  		nr = q->nr_requests;
  	q->nr_congestion_on = nr;
  
  	nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1;
  	if (nr < 1)
  		nr = 1;
  	q->nr_congestion_off = nr;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
105
106
107
108
109
110
111
112
113
114
115
116
  /**
   * blk_get_backing_dev_info - get the address of a queue's backing_dev_info
   * @bdev:	device
   *
   * Locates the passed device's request queue and returns the address of its
   * backing_dev_info
   *
   * Will return NULL if the request queue cannot be located.
   */
  struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
  {
  	struct backing_dev_info *ret = NULL;
165125e1e   Jens Axboe   [BLOCK] Get rid o...
117
  	struct request_queue *q = bdev_get_queue(bdev);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
118
119
120
121
122
  
  	if (q)
  		ret = &q->backing_dev_info;
  	return ret;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
123
  EXPORT_SYMBOL(blk_get_backing_dev_info);
2a4aa30c5   FUJITA Tomonori   block: rename and...
124
  void blk_rq_init(struct request_queue *q, struct request *rq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
125
  {
1afb20f30   FUJITA Tomonori   block: make rq_in...
126
  	memset(rq, 0, sizeof(*rq));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
127
  	INIT_LIST_HEAD(&rq->queuelist);
242f9dcb8   Jens Axboe   block: unify requ...
128
  	INIT_LIST_HEAD(&rq->timeout_list);
c7c22e4d5   Jens Axboe   block: add suppor...
129
  	rq->cpu = -1;
63a713867   Jens Axboe   block: fixup rq_i...
130
  	rq->q = q;
a2dec7b36   Tejun Heo   block: hide reque...
131
  	rq->__sector = (sector_t) -1;
2e662b65f   Jens Axboe   [PATCH] elevator:...
132
133
  	INIT_HLIST_NODE(&rq->hash);
  	RB_CLEAR_NODE(&rq->rb_node);
d7e3c3249   FUJITA Tomonori   block: add large ...
134
  	rq->cmd = rq->__cmd;
e2494e1b4   Li Zefan   blktrace: fix pdu...
135
  	rq->cmd_len = BLK_MAX_CDB;
63a713867   Jens Axboe   block: fixup rq_i...
136
  	rq->tag = -1;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
137
  	rq->ref_count = 1;
b243ddcbe   Tejun Heo   block: move rq->s...
138
  	rq->start_time = jiffies;
9195291e5   Divyesh Shah   blkio: Increment ...
139
  	set_start_time_ns(rq);
09e099d4b   Jerome Marchand   block: fix accoun...
140
  	rq->part = NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
141
  }
2a4aa30c5   FUJITA Tomonori   block: rename and...
142
  EXPORT_SYMBOL(blk_rq_init);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
143

5bb23a688   NeilBrown   Don't decrement b...
144
145
  static void req_bio_endio(struct request *rq, struct bio *bio,
  			  unsigned int nbytes, int error)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
146
  {
143a87f4c   Tejun Heo   block: improve fl...
147
148
149
150
  	if (error)
  		clear_bit(BIO_UPTODATE, &bio->bi_flags);
  	else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
  		error = -EIO;
797e7dbbe   Tejun Heo   [BLOCK] reimpleme...
151

143a87f4c   Tejun Heo   block: improve fl...
152
153
154
155
156
  	if (unlikely(nbytes > bio->bi_size)) {
  		printk(KERN_ERR "%s: want %u bytes done, %u left
  ",
  		       __func__, nbytes, bio->bi_size);
  		nbytes = bio->bi_size;
5bb23a688   NeilBrown   Don't decrement b...
157
  	}
797e7dbbe   Tejun Heo   [BLOCK] reimpleme...
158

143a87f4c   Tejun Heo   block: improve fl...
159
160
  	if (unlikely(rq->cmd_flags & REQ_QUIET))
  		set_bit(BIO_QUIET, &bio->bi_flags);
08bafc034   Keith Mannthey   block: Supress Bu...
161

143a87f4c   Tejun Heo   block: improve fl...
162
163
  	bio->bi_size -= nbytes;
  	bio->bi_sector += (nbytes >> 9);
7ba1ba12e   Martin K. Petersen   block: Block laye...
164

143a87f4c   Tejun Heo   block: improve fl...
165
166
  	if (bio_integrity(bio))
  		bio_integrity_advance(bio, nbytes);
7ba1ba12e   Martin K. Petersen   block: Block laye...
167

143a87f4c   Tejun Heo   block: improve fl...
168
169
170
  	/* don't actually finish bio if it's part of flush sequence */
  	if (bio->bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ))
  		bio_endio(bio, error);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
171
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
172

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
173
174
175
  void blk_dump_rq_flags(struct request *rq, char *msg)
  {
  	int bit;
6728cb0e6   Jens Axboe   block: make core ...
176
177
  	printk(KERN_INFO "%s: dev %s: type=%x, flags=%x
  ", msg,
4aff5e233   Jens Axboe   [PATCH] Split str...
178
179
  		rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
  		rq->cmd_flags);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
180

83096ebf1   Tejun Heo   block: convert to...
181
182
183
184
  	printk(KERN_INFO "  sector %llu, nr/cnr %u/%u
  ",
  	       (unsigned long long)blk_rq_pos(rq),
  	       blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
731ec497e   Tejun Heo   block: kill rq->data
185
186
  	printk(KERN_INFO "  bio %p, biotail %p, buffer %p, len %u
  ",
2e46e8b27   Tejun Heo   block: drop reque...
187
  	       rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
188

33659ebba   Christoph Hellwig   block: remove wra...
189
  	if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
6728cb0e6   Jens Axboe   block: make core ...
190
  		printk(KERN_INFO "  cdb: ");
d34c87e4b   FUJITA Tomonori   block: replace si...
191
  		for (bit = 0; bit < BLK_MAX_CDB; bit++)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
192
193
194
195
196
  			printk("%02x ", rq->cmd[bit]);
  		printk("
  ");
  	}
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
197
  EXPORT_SYMBOL(blk_dump_rq_flags);
3cca6dc1c   Jens Axboe   block: add API fo...
198
  static void blk_delay_work(struct work_struct *work)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
199
  {
3cca6dc1c   Jens Axboe   block: add API fo...
200
  	struct request_queue *q;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
201

3cca6dc1c   Jens Axboe   block: add API fo...
202
203
  	q = container_of(work, struct request_queue, delay_work.work);
  	spin_lock_irq(q->queue_lock);
24ecfbe27   Christoph Hellwig   block: add blk_ru...
204
  	__blk_run_queue(q);
3cca6dc1c   Jens Axboe   block: add API fo...
205
  	spin_unlock_irq(q->queue_lock);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
206
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
207
208
  
  /**
3cca6dc1c   Jens Axboe   block: add API fo...
209
210
211
   * blk_delay_queue - restart queueing after defined interval
   * @q:		The &struct request_queue in question
   * @msecs:	Delay in msecs
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
212
213
   *
   * Description:
3cca6dc1c   Jens Axboe   block: add API fo...
214
215
216
217
218
   *   Sometimes queueing needs to be postponed for a little while, to allow
   *   resources to come back. This function will make sure that queueing is
   *   restarted around the specified time.
   */
  void blk_delay_queue(struct request_queue *q, unsigned long msecs)
2ad8b1ef1   Alan D. Brunelle   Add UNPLUG traces...
219
  {
4521cc4ed   Jens Axboe   block: blk_delay_...
220
221
  	queue_delayed_work(kblockd_workqueue, &q->delay_work,
  				msecs_to_jiffies(msecs));
2ad8b1ef1   Alan D. Brunelle   Add UNPLUG traces...
222
  }
3cca6dc1c   Jens Axboe   block: add API fo...
223
  EXPORT_SYMBOL(blk_delay_queue);
2ad8b1ef1   Alan D. Brunelle   Add UNPLUG traces...
224

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
225
226
  /**
   * blk_start_queue - restart a previously stopped queue
165125e1e   Jens Axboe   [BLOCK] Get rid o...
227
   * @q:    The &struct request_queue in question
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
228
229
230
231
232
233
   *
   * Description:
   *   blk_start_queue() will clear the stop flag on the queue, and call
   *   the request_fn for the queue if it was in a stopped state when
   *   entered. Also see blk_stop_queue(). Queue lock must be held.
   **/
165125e1e   Jens Axboe   [BLOCK] Get rid o...
234
  void blk_start_queue(struct request_queue *q)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
235
  {
a038e2536   Paolo 'Blaisorblade' Giarrusso   [PATCH] blk_start...
236
  	WARN_ON(!irqs_disabled());
75ad23bc0   Nick Piggin   block: make queue...
237
  	queue_flag_clear(QUEUE_FLAG_STOPPED, q);
24ecfbe27   Christoph Hellwig   block: add blk_ru...
238
  	__blk_run_queue(q);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
239
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
240
241
242
243
  EXPORT_SYMBOL(blk_start_queue);
  
  /**
   * blk_stop_queue - stop a queue
165125e1e   Jens Axboe   [BLOCK] Get rid o...
244
   * @q:    The &struct request_queue in question
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
245
246
247
248
249
250
251
252
253
254
255
   *
   * Description:
   *   The Linux block layer assumes that a block driver will consume all
   *   entries on the request queue when the request_fn strategy is called.
   *   Often this will not happen, because of hardware limitations (queue
   *   depth settings). If a device driver gets a 'queue full' response,
   *   or if it simply chooses not to queue more I/O at one point, it can
   *   call this function to prevent the request_fn from being called until
   *   the driver has signalled it's ready to go again. This happens by calling
   *   blk_start_queue() to restart queue operations. Queue lock must be held.
   **/
165125e1e   Jens Axboe   [BLOCK] Get rid o...
256
  void blk_stop_queue(struct request_queue *q)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
257
  {
ad3d9d7ed   Jens Axboe   block: fix issue ...
258
  	__cancel_delayed_work(&q->delay_work);
75ad23bc0   Nick Piggin   block: make queue...
259
  	queue_flag_set(QUEUE_FLAG_STOPPED, q);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
260
261
262
263
264
265
266
267
268
269
270
271
  }
  EXPORT_SYMBOL(blk_stop_queue);
  
  /**
   * blk_sync_queue - cancel any pending callbacks on a queue
   * @q: the queue
   *
   * Description:
   *     The block layer may perform asynchronous callback activity
   *     on a queue, such as calling the unplug function after a timeout.
   *     A block device may call blk_sync_queue to ensure that any
   *     such activity is cancelled, thus allowing it to release resources
59c51591a   Michael Opdenacker   Fix occurrences o...
272
   *     that the callbacks might use. The caller must already have made sure
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
273
274
275
   *     that its ->make_request_fn will not re-add plugging prior to calling
   *     this function.
   *
da5277700   Vivek Goyal   block: Move blk_t...
276
277
278
279
   *     This function does not cancel any asynchronous activity arising
   *     out of elevator or throttling code. That would require elevaotor_exit()
   *     and blk_throtl_exit() to be called with queue lock initialized.
   *
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
280
281
282
   */
  void blk_sync_queue(struct request_queue *q)
  {
70ed28b92   Jens Axboe   block: leave the ...
283
  	del_timer_sync(&q->timeout);
3cca6dc1c   Jens Axboe   block: add API fo...
284
  	cancel_delayed_work_sync(&q->delay_work);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
285
286
287
288
  }
  EXPORT_SYMBOL(blk_sync_queue);
  
  /**
80a4b58e3   Jens Axboe   block: only call ...
289
   * __blk_run_queue - run a single device queue
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
290
   * @q:	The queue to run
80a4b58e3   Jens Axboe   block: only call ...
291
292
293
   *
   * Description:
   *    See @blk_run_queue. This variant must be called with the queue lock
24ecfbe27   Christoph Hellwig   block: add blk_ru...
294
   *    held and interrupts disabled.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
295
   */
24ecfbe27   Christoph Hellwig   block: add blk_ru...
296
  void __blk_run_queue(struct request_queue *q)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
297
  {
a538cd03b   Tejun Heo   block: merge blk_...
298
299
  	if (unlikely(blk_queue_stopped(q)))
  		return;
c21e6beba   Jens Axboe   block: get rid of...
300
  	q->request_fn(q);
75ad23bc0   Nick Piggin   block: make queue...
301
302
  }
  EXPORT_SYMBOL(__blk_run_queue);
dac07ec12   Jens Axboe   [BLOCK] limit req...
303

75ad23bc0   Nick Piggin   block: make queue...
304
  /**
24ecfbe27   Christoph Hellwig   block: add blk_ru...
305
306
307
308
309
310
311
312
313
   * blk_run_queue_async - run a single device queue in workqueue context
   * @q:	The queue to run
   *
   * Description:
   *    Tells kblockd to perform the equivalent of @blk_run_queue on behalf
   *    of us.
   */
  void blk_run_queue_async(struct request_queue *q)
  {
3ec717b7c   Shaohua Li   block: don't dela...
314
315
  	if (likely(!blk_queue_stopped(q))) {
  		__cancel_delayed_work(&q->delay_work);
24ecfbe27   Christoph Hellwig   block: add blk_ru...
316
  		queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
3ec717b7c   Shaohua Li   block: don't dela...
317
  	}
24ecfbe27   Christoph Hellwig   block: add blk_ru...
318
  }
c21e6beba   Jens Axboe   block: get rid of...
319
  EXPORT_SYMBOL(blk_run_queue_async);
24ecfbe27   Christoph Hellwig   block: add blk_ru...
320
321
  
  /**
75ad23bc0   Nick Piggin   block: make queue...
322
323
   * blk_run_queue - run a single device queue
   * @q: The queue to run
80a4b58e3   Jens Axboe   block: only call ...
324
325
326
   *
   * Description:
   *    Invoke request handling on this queue, if it has pending work to do.
a7f557923   Tejun Heo   block: kill blk_s...
327
   *    May be used to restart queueing when a request has completed.
75ad23bc0   Nick Piggin   block: make queue...
328
329
330
331
332
333
   */
  void blk_run_queue(struct request_queue *q)
  {
  	unsigned long flags;
  
  	spin_lock_irqsave(q->queue_lock, flags);
24ecfbe27   Christoph Hellwig   block: add blk_ru...
334
  	__blk_run_queue(q);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
335
336
337
  	spin_unlock_irqrestore(q->queue_lock, flags);
  }
  EXPORT_SYMBOL(blk_run_queue);
165125e1e   Jens Axboe   [BLOCK] Get rid o...
338
  void blk_put_queue(struct request_queue *q)
483f4afc4   Al Viro   [PATCH] fix sysfs...
339
340
341
  {
  	kobject_put(&q->kobj);
  }
d86e0e83b   Jens Axboe   block: export blk...
342
  EXPORT_SYMBOL(blk_put_queue);
483f4afc4   Al Viro   [PATCH] fix sysfs...
343

e3c78ca52   Tejun Heo   block: reorganize...
344
345
346
  /**
   * blk_drain_queue - drain requests from request_queue
   * @q: queue to drain
c9a929dde   Tejun Heo   block: fix reques...
347
   * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV
e3c78ca52   Tejun Heo   block: reorganize...
348
   *
c9a929dde   Tejun Heo   block: fix reques...
349
350
351
   * Drain requests from @q.  If @drain_all is set, all requests are drained.
   * If not, only ELVPRIV requests are drained.  The caller is responsible
   * for ensuring that no new requests which need to be drained are queued.
e3c78ca52   Tejun Heo   block: reorganize...
352
   */
c9a929dde   Tejun Heo   block: fix reques...
353
  void blk_drain_queue(struct request_queue *q, bool drain_all)
e3c78ca52   Tejun Heo   block: reorganize...
354
355
  {
  	while (true) {
481a7d647   Tejun Heo   block: fix drain_...
356
357
  		bool drain = false;
  		int i;
e3c78ca52   Tejun Heo   block: reorganize...
358
359
360
361
  
  		spin_lock_irq(q->queue_lock);
  
  		elv_drain_elevator(q);
c9a929dde   Tejun Heo   block: fix reques...
362
363
  		if (drain_all)
  			blk_throtl_drain(q);
e3c78ca52   Tejun Heo   block: reorganize...
364

4eabc9412   Tejun Heo   block: don't kick...
365
366
367
368
369
370
371
372
  		/*
  		 * This function might be called on a queue which failed
  		 * driver init after queue creation.  Some drivers
  		 * (e.g. fd) get unhappy in such cases.  Kick queue iff
  		 * dispatch queue has something on it.
  		 */
  		if (!list_empty(&q->queue_head))
  			__blk_run_queue(q);
c9a929dde   Tejun Heo   block: fix reques...
373

481a7d647   Tejun Heo   block: fix drain_...
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
  		drain |= q->rq.elvpriv;
  
  		/*
  		 * Unfortunately, requests are queued at and tracked from
  		 * multiple places and there's no single counter which can
  		 * be drained.  Check all the queues and counters.
  		 */
  		if (drain_all) {
  			drain |= !list_empty(&q->queue_head);
  			for (i = 0; i < 2; i++) {
  				drain |= q->rq.count[i];
  				drain |= q->in_flight[i];
  				drain |= !list_empty(&q->flush_queue[i]);
  			}
  		}
e3c78ca52   Tejun Heo   block: reorganize...
389
390
  
  		spin_unlock_irq(q->queue_lock);
481a7d647   Tejun Heo   block: fix drain_...
391
  		if (!drain)
e3c78ca52   Tejun Heo   block: reorganize...
392
393
394
395
  			break;
  		msleep(10);
  	}
  }
c9a929dde   Tejun Heo   block: fix reques...
396
397
398
399
400
401
  /**
   * blk_cleanup_queue - shutdown a request queue
   * @q: request queue to shutdown
   *
   * Mark @q DEAD, drain all pending requests, destroy and put it.  All
   * future requests will be failed immediately with -ENODEV.
c94a96ac9   Vivek Goyal   block: Initialize...
402
   */
6728cb0e6   Jens Axboe   block: make core ...
403
  void blk_cleanup_queue(struct request_queue *q)
483f4afc4   Al Viro   [PATCH] fix sysfs...
404
  {
c9a929dde   Tejun Heo   block: fix reques...
405
  	spinlock_t *lock = q->queue_lock;
e3335de94   Jens Axboe   block: blk_cleanu...
406

c9a929dde   Tejun Heo   block: fix reques...
407
  	/* mark @q DEAD, no new request or merges will be allowed afterwards */
483f4afc4   Al Viro   [PATCH] fix sysfs...
408
  	mutex_lock(&q->sysfs_lock);
75ad23bc0   Nick Piggin   block: make queue...
409
  	queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
c9a929dde   Tejun Heo   block: fix reques...
410
411
412
413
414
  
  	spin_lock_irq(lock);
  	queue_flag_set(QUEUE_FLAG_NOMERGES, q);
  	queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
  	queue_flag_set(QUEUE_FLAG_DEAD, q);
483f4afc4   Al Viro   [PATCH] fix sysfs...
415

777eb1bf1   Hannes Reinecke   block: Free queue...
416
417
  	if (q->queue_lock != &q->__queue_lock)
  		q->queue_lock = &q->__queue_lock;
da5277700   Vivek Goyal   block: Move blk_t...
418

c9a929dde   Tejun Heo   block: fix reques...
419
420
  	spin_unlock_irq(lock);
  	mutex_unlock(&q->sysfs_lock);
6dd9ad7df   Tejun Heo   block: don't call...
421
422
423
424
425
426
427
  	/*
  	 * Drain all requests queued before DEAD marking.  The caller might
  	 * be trying to tear down @q before its elevator is initialized, in
  	 * which case we don't want to call into draining.
  	 */
  	if (q->elevator)
  		blk_drain_queue(q, true);
c9a929dde   Tejun Heo   block: fix reques...
428
429
430
431
432
433
  
  	/* @q won't process any more request, flush async actions */
  	del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
  	blk_sync_queue(q);
  
  	/* @q is and will stay empty, shutdown and put */
483f4afc4   Al Viro   [PATCH] fix sysfs...
434
435
  	blk_put_queue(q);
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
436
  EXPORT_SYMBOL(blk_cleanup_queue);
165125e1e   Jens Axboe   [BLOCK] Get rid o...
437
  static int blk_init_free_list(struct request_queue *q)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
438
439
  {
  	struct request_list *rl = &q->rq;
1abec4fdb   Mike Snitzer   block: make blk_i...
440
441
  	if (unlikely(rl->rq_pool))
  		return 0;
1faa16d22   Jens Axboe   block: change the...
442
443
  	rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0;
  	rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0;
cb98fc8bb   Tejun Heo   [BLOCK] Reimpleme...
444
  	rl->elvpriv = 0;
1faa16d22   Jens Axboe   block: change the...
445
446
  	init_waitqueue_head(&rl->wait[BLK_RW_SYNC]);
  	init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
447

1946089a1   Christoph Lameter   [PATCH] NUMA awar...
448
449
  	rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
  				mempool_free_slab, request_cachep, q->node);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
450
451
452
453
454
455
  
  	if (!rl->rq_pool)
  		return -ENOMEM;
  
  	return 0;
  }
165125e1e   Jens Axboe   [BLOCK] Get rid o...
456
  struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
457
  {
1946089a1   Christoph Lameter   [PATCH] NUMA awar...
458
459
460
  	return blk_alloc_queue_node(gfp_mask, -1);
  }
  EXPORT_SYMBOL(blk_alloc_queue);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
461

165125e1e   Jens Axboe   [BLOCK] Get rid o...
462
  struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
1946089a1   Christoph Lameter   [PATCH] NUMA awar...
463
  {
165125e1e   Jens Axboe   [BLOCK] Get rid o...
464
  	struct request_queue *q;
e0bf68dde   Peter Zijlstra   mm: bdi init hooks
465
  	int err;
1946089a1   Christoph Lameter   [PATCH] NUMA awar...
466

8324aa91d   Jens Axboe   block: split tag ...
467
  	q = kmem_cache_alloc_node(blk_requestq_cachep,
94f6030ca   Christoph Lameter   Slab allocators: ...
468
  				gfp_mask | __GFP_ZERO, node_id);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
469
470
  	if (!q)
  		return NULL;
a73f730d0   Tejun Heo   block, cfq: move ...
471
472
473
  	q->id = ida_simple_get(&blk_queue_ida, 0, 0, GFP_KERNEL);
  	if (q->id < 0)
  		goto fail_q;
0989a025d   Jens Axboe   block: don't over...
474
475
476
477
  	q->backing_dev_info.ra_pages =
  			(VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
  	q->backing_dev_info.state = 0;
  	q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
d993831fa   Jens Axboe   writeback: add na...
478
  	q->backing_dev_info.name = "block";
5151412dd   Mike Snitzer   block: initialize...
479
  	q->node = node_id;
0989a025d   Jens Axboe   block: don't over...
480

e0bf68dde   Peter Zijlstra   mm: bdi init hooks
481
  	err = bdi_init(&q->backing_dev_info);
a73f730d0   Tejun Heo   block, cfq: move ...
482
483
  	if (err)
  		goto fail_id;
e0bf68dde   Peter Zijlstra   mm: bdi init hooks
484

a73f730d0   Tejun Heo   block, cfq: move ...
485
486
  	if (blk_throtl_init(q))
  		goto fail_id;
e43473b7f   Vivek Goyal   blkio: Core imple...
487

31373d09d   Matthew Garrett   laptop-mode: Make...
488
489
  	setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
  		    laptop_mode_timer_fn, (unsigned long) q);
242f9dcb8   Jens Axboe   block: unify requ...
490
491
  	setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
  	INIT_LIST_HEAD(&q->timeout_list);
a612fddf0   Tejun Heo   block, cfq: move ...
492
  	INIT_LIST_HEAD(&q->icq_list);
ae1b15396   Tejun Heo   block: reimplemen...
493
494
495
  	INIT_LIST_HEAD(&q->flush_queue[0]);
  	INIT_LIST_HEAD(&q->flush_queue[1]);
  	INIT_LIST_HEAD(&q->flush_data_in_flight);
3cca6dc1c   Jens Axboe   block: add API fo...
496
  	INIT_DELAYED_WORK(&q->delay_work, blk_delay_work);
483f4afc4   Al Viro   [PATCH] fix sysfs...
497

8324aa91d   Jens Axboe   block: split tag ...
498
  	kobject_init(&q->kobj, &blk_queue_ktype);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
499

483f4afc4   Al Viro   [PATCH] fix sysfs...
500
  	mutex_init(&q->sysfs_lock);
e7e72bf64   Neil Brown   Remove blkdev war...
501
  	spin_lock_init(&q->__queue_lock);
483f4afc4   Al Viro   [PATCH] fix sysfs...
502

c94a96ac9   Vivek Goyal   block: Initialize...
503
504
505
506
507
  	/*
  	 * By default initialize queue_lock to internal lock and driver can
  	 * override it later if need be.
  	 */
  	q->queue_lock = &q->__queue_lock;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
508
  	return q;
a73f730d0   Tejun Heo   block, cfq: move ...
509
510
511
512
513
514
  
  fail_id:
  	ida_simple_remove(&blk_queue_ida, q->id);
  fail_q:
  	kmem_cache_free(blk_requestq_cachep, q);
  	return NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
515
  }
1946089a1   Christoph Lameter   [PATCH] NUMA awar...
516
  EXPORT_SYMBOL(blk_alloc_queue_node);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
  
  /**
   * blk_init_queue  - prepare a request queue for use with a block device
   * @rfn:  The function to be called to process requests that have been
   *        placed on the queue.
   * @lock: Request queue spin lock
   *
   * Description:
   *    If a block device wishes to use the standard request handling procedures,
   *    which sorts requests and coalesces adjacent requests, then it must
   *    call blk_init_queue().  The function @rfn will be called when there
   *    are requests on the queue that need to be processed.  If the device
   *    supports plugging, then @rfn may not be called immediately when requests
   *    are available on the queue, but may be called at some time later instead.
   *    Plugged queues are generally unplugged when a buffer belonging to one
   *    of the requests on the queue is needed, or due to memory pressure.
   *
   *    @rfn is not required, or even expected, to remove all requests off the
   *    queue, but only as many as it can handle at a time.  If it does leave
   *    requests on the queue, it is responsible for arranging that the requests
   *    get dealt with eventually.
   *
   *    The queue spin lock must be held while manipulating the requests on the
a038e2536   Paolo 'Blaisorblade' Giarrusso   [PATCH] blk_start...
540
541
   *    request queue; this lock will be taken also from interrupt context, so irq
   *    disabling is needed for it.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
542
   *
710027a48   Randy Dunlap   Add some block/ s...
543
   *    Function returns a pointer to the initialized request queue, or %NULL if
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
544
545
546
547
548
549
   *    it didn't succeed.
   *
   * Note:
   *    blk_init_queue() must be paired with a blk_cleanup_queue() call
   *    when the block device is deactivated (such as at module unload).
   **/
1946089a1   Christoph Lameter   [PATCH] NUMA awar...
550

165125e1e   Jens Axboe   [BLOCK] Get rid o...
551
  struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
552
  {
1946089a1   Christoph Lameter   [PATCH] NUMA awar...
553
554
555
  	return blk_init_queue_node(rfn, lock, -1);
  }
  EXPORT_SYMBOL(blk_init_queue);
165125e1e   Jens Axboe   [BLOCK] Get rid o...
556
  struct request_queue *
1946089a1   Christoph Lameter   [PATCH] NUMA awar...
557
558
  blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
  {
c86d1b8ae   Mike Snitzer   block: avoid unco...
559
  	struct request_queue *uninit_q, *q;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
560

c86d1b8ae   Mike Snitzer   block: avoid unco...
561
562
563
  	uninit_q = blk_alloc_queue_node(GFP_KERNEL, node_id);
  	if (!uninit_q)
  		return NULL;
5151412dd   Mike Snitzer   block: initialize...
564
  	q = blk_init_allocated_queue(uninit_q, rfn, lock);
c86d1b8ae   Mike Snitzer   block: avoid unco...
565
566
567
568
  	if (!q)
  		blk_cleanup_queue(uninit_q);
  
  	return q;
01effb0dc   Mike Snitzer   block: allow init...
569
570
571
572
573
574
575
  }
  EXPORT_SYMBOL(blk_init_queue_node);
  
  struct request_queue *
  blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
  			 spinlock_t *lock)
  {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
576
577
  	if (!q)
  		return NULL;
c86d1b8ae   Mike Snitzer   block: avoid unco...
578
  	if (blk_init_free_list(q))
8669aafdb   Al Viro   [PATCH] fix doubl...
579
  		return NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
580
581
  
  	q->request_fn		= rfn;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
582
  	q->prep_rq_fn		= NULL;
28018c242   James Bottomley   block: implement ...
583
  	q->unprep_rq_fn		= NULL;
bc58ba946   Jens Axboe   block: add sysfs ...
584
  	q->queue_flags		= QUEUE_FLAG_DEFAULT;
c94a96ac9   Vivek Goyal   block: Initialize...
585
586
587
588
  
  	/* Override internal queue lock with supplied lock pointer */
  	if (lock)
  		q->queue_lock		= lock;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
589

f3b144aa7   Jens Axboe   block: remove var...
590
591
592
  	/*
  	 * This also sets hw/phys segments, boundary and size
  	 */
c20e8de27   Jens Axboe   block: rename __m...
593
  	blk_queue_make_request(q, blk_queue_bio);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
594

44ec95425   Alan Stern   [SCSI] sg: cap re...
595
  	q->sg_reserved_size = INT_MAX;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
596
597
598
599
600
601
602
  	/*
  	 * all done
  	 */
  	if (!elevator_init(q, NULL)) {
  		blk_queue_congestion_threshold(q);
  		return q;
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
603
604
  	return NULL;
  }
5151412dd   Mike Snitzer   block: initialize...
605
  EXPORT_SYMBOL(blk_init_allocated_queue);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
606

09ac46c42   Tejun Heo   block: misc updat...
607
  bool blk_get_queue(struct request_queue *q)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
608
  {
34f6055c8   Tejun Heo   block: add blk_qu...
609
  	if (likely(!blk_queue_dead(q))) {
09ac46c42   Tejun Heo   block: misc updat...
610
611
  		__blk_get_queue(q);
  		return true;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
612
  	}
09ac46c42   Tejun Heo   block: misc updat...
613
  	return false;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
614
  }
d86e0e83b   Jens Axboe   block: export blk...
615
  EXPORT_SYMBOL(blk_get_queue);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
616

165125e1e   Jens Axboe   [BLOCK] Get rid o...
617
  static inline void blk_free_request(struct request_queue *q, struct request *rq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
618
  {
f1f8cc946   Tejun Heo   block, cfq: move ...
619
  	if (rq->cmd_flags & REQ_ELVPRIV) {
cb98fc8bb   Tejun Heo   [BLOCK] Reimpleme...
620
  		elv_put_request(q, rq);
f1f8cc946   Tejun Heo   block, cfq: move ...
621
622
623
  		if (rq->elv.icq)
  			put_io_context(rq->elv.icq->ioc, q);
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
624
625
  	mempool_free(rq, q->rq.rq_pool);
  }
1ea25ecb7   Jens Axboe   [PATCH] Audit blo...
626
  static struct request *
f1f8cc946   Tejun Heo   block, cfq: move ...
627
628
  blk_alloc_request(struct request_queue *q, struct io_cq *icq,
  		  unsigned int flags, gfp_t gfp_mask)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
629
630
631
632
633
  {
  	struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
  
  	if (!rq)
  		return NULL;
2a4aa30c5   FUJITA Tomonori   block: rename and...
634
  	blk_rq_init(q, rq);
1afb20f30   FUJITA Tomonori   block: make rq_in...
635

42dad7647   Jerome Marchand   block: simplify I...
636
  	rq->cmd_flags = flags | REQ_ALLOCED;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
637

f1f8cc946   Tejun Heo   block, cfq: move ...
638
639
640
641
642
643
644
645
646
  	if (flags & REQ_ELVPRIV) {
  		rq->elv.icq = icq;
  		if (unlikely(elv_set_request(q, rq, gfp_mask))) {
  			mempool_free(rq, q->rq.rq_pool);
  			return NULL;
  		}
  		/* @rq->elv.icq holds on to io_context until @rq is freed */
  		if (icq)
  			get_io_context(icq->ioc);
cb98fc8bb   Tejun Heo   [BLOCK] Reimpleme...
647
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
648

cb98fc8bb   Tejun Heo   [BLOCK] Reimpleme...
649
  	return rq;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
650
651
652
653
654
655
  }
  
  /*
   * ioc_batching returns true if the ioc is a valid batching request and
   * should be given priority access to a request.
   */
165125e1e   Jens Axboe   [BLOCK] Get rid o...
656
  static inline int ioc_batching(struct request_queue *q, struct io_context *ioc)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
  {
  	if (!ioc)
  		return 0;
  
  	/*
  	 * Make sure the process is able to allocate at least 1 request
  	 * even if the batch times out, otherwise we could theoretically
  	 * lose wakeups.
  	 */
  	return ioc->nr_batch_requests == q->nr_batching ||
  		(ioc->nr_batch_requests > 0
  		&& time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME));
  }
  
  /*
   * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This
   * will cause the process to be a "batcher" on all queues in the system. This
   * is the behaviour we want though - once it gets a wakeup it should be given
   * a nice run.
   */
165125e1e   Jens Axboe   [BLOCK] Get rid o...
677
  static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
678
679
680
681
682
683
684
  {
  	if (!ioc || ioc_batching(q, ioc))
  		return;
  
  	ioc->nr_batch_requests = q->nr_batching;
  	ioc->last_waited = jiffies;
  }
1faa16d22   Jens Axboe   block: change the...
685
  static void __freed_request(struct request_queue *q, int sync)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
686
687
  {
  	struct request_list *rl = &q->rq;
1faa16d22   Jens Axboe   block: change the...
688
689
  	if (rl->count[sync] < queue_congestion_off_threshold(q))
  		blk_clear_queue_congested(q, sync);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
690

1faa16d22   Jens Axboe   block: change the...
691
692
693
  	if (rl->count[sync] + 1 <= q->nr_requests) {
  		if (waitqueue_active(&rl->wait[sync]))
  			wake_up(&rl->wait[sync]);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
694

1faa16d22   Jens Axboe   block: change the...
695
  		blk_clear_queue_full(q, sync);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
696
697
698
699
700
701
702
  	}
  }
  
  /*
   * A request has just been released.  Account for it, update the full and
   * congestion status, wake up any waiters.   Called under q->queue_lock.
   */
75eb6c372   Tejun Heo   block: pass aroun...
703
  static void freed_request(struct request_queue *q, unsigned int flags)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
704
705
  {
  	struct request_list *rl = &q->rq;
75eb6c372   Tejun Heo   block: pass aroun...
706
  	int sync = rw_is_sync(flags);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
707

1faa16d22   Jens Axboe   block: change the...
708
  	rl->count[sync]--;
75eb6c372   Tejun Heo   block: pass aroun...
709
  	if (flags & REQ_ELVPRIV)
cb98fc8bb   Tejun Heo   [BLOCK] Reimpleme...
710
  		rl->elvpriv--;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
711

1faa16d22   Jens Axboe   block: change the...
712
  	__freed_request(q, sync);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
713

1faa16d22   Jens Axboe   block: change the...
714
715
  	if (unlikely(rl->starved[sync ^ 1]))
  		__freed_request(q, sync ^ 1);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
716
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
717
  /*
9d5a4e946   Mike Snitzer   block: skip eleva...
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
   * Determine if elevator data should be initialized when allocating the
   * request associated with @bio.
   */
  static bool blk_rq_should_init_elevator(struct bio *bio)
  {
  	if (!bio)
  		return true;
  
  	/*
  	 * Flush requests do not use the elevator so skip initialization.
  	 * This allows a request to share the flush and elevator data.
  	 */
  	if (bio->bi_rw & (REQ_FLUSH | REQ_FUA))
  		return false;
  
  	return true;
  }
da8303c63   Tejun Heo   block: make get_r...
735
736
737
738
739
740
741
742
743
744
745
746
747
  /**
   * get_request - get a free request
   * @q: request_queue to allocate request from
   * @rw_flags: RW and SYNC flags
   * @bio: bio to allocate request for (can be %NULL)
   * @gfp_mask: allocation mask
   *
   * Get a free request from @q.  This function may fail under memory
   * pressure or if @q is dead.
   *
   * Must be callled with @q->queue_lock held and,
   * Returns %NULL on failure, with @q->queue_lock held.
   * Returns !%NULL on success, with @q->queue_lock *not held*.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
748
   */
165125e1e   Jens Axboe   [BLOCK] Get rid o...
749
  static struct request *get_request(struct request_queue *q, int rw_flags,
7749a8d42   Jens Axboe   [PATCH] Propagate...
750
  				   struct bio *bio, gfp_t gfp_mask)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
751
752
753
  {
  	struct request *rq = NULL;
  	struct request_list *rl = &q->rq;
f1f8cc946   Tejun Heo   block, cfq: move ...
754
  	struct elevator_type *et;
f2dbd76a0   Tejun Heo   block, cfq: repla...
755
  	struct io_context *ioc;
f1f8cc946   Tejun Heo   block, cfq: move ...
756
  	struct io_cq *icq = NULL;
1faa16d22   Jens Axboe   block: change the...
757
  	const bool is_sync = rw_is_sync(rw_flags) != 0;
f2dbd76a0   Tejun Heo   block, cfq: repla...
758
  	bool retried = false;
75eb6c372   Tejun Heo   block: pass aroun...
759
  	int may_queue;
f2dbd76a0   Tejun Heo   block, cfq: repla...
760
  retry:
f1f8cc946   Tejun Heo   block, cfq: move ...
761
  	et = q->elevator->type;
f2dbd76a0   Tejun Heo   block, cfq: repla...
762
  	ioc = current->io_context;
88ee5ef15   Jens Axboe   [BLOCK] ll_rw_blk...
763

34f6055c8   Tejun Heo   block: add blk_qu...
764
  	if (unlikely(blk_queue_dead(q)))
da8303c63   Tejun Heo   block: make get_r...
765
  		return NULL;
7749a8d42   Jens Axboe   [PATCH] Propagate...
766
  	may_queue = elv_may_queue(q, rw_flags);
88ee5ef15   Jens Axboe   [BLOCK] ll_rw_blk...
767
768
  	if (may_queue == ELV_MQUEUE_NO)
  		goto rq_starved;
1faa16d22   Jens Axboe   block: change the...
769
770
  	if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) {
  		if (rl->count[is_sync]+1 >= q->nr_requests) {
f2dbd76a0   Tejun Heo   block, cfq: repla...
771
772
773
774
775
776
777
778
779
780
781
782
783
  			/*
  			 * We want ioc to record batching state.  If it's
  			 * not already there, creating a new one requires
  			 * dropping queue_lock, which in turn requires
  			 * retesting conditions to avoid queue hang.
  			 */
  			if (!ioc && !retried) {
  				spin_unlock_irq(q->queue_lock);
  				create_io_context(current, gfp_mask, q->node);
  				spin_lock_irq(q->queue_lock);
  				retried = true;
  				goto retry;
  			}
88ee5ef15   Jens Axboe   [BLOCK] ll_rw_blk...
784
785
786
787
788
789
  			/*
  			 * The queue will fill after this allocation, so set
  			 * it as full, and mark this process as "batching".
  			 * This process will be allowed to complete a batch of
  			 * requests, others will be blocked.
  			 */
1faa16d22   Jens Axboe   block: change the...
790
  			if (!blk_queue_full(q, is_sync)) {
88ee5ef15   Jens Axboe   [BLOCK] ll_rw_blk...
791
  				ioc_set_batching(q, ioc);
1faa16d22   Jens Axboe   block: change the...
792
  				blk_set_queue_full(q, is_sync);
88ee5ef15   Jens Axboe   [BLOCK] ll_rw_blk...
793
794
795
796
797
798
799
800
801
802
803
  			} else {
  				if (may_queue != ELV_MQUEUE_MUST
  						&& !ioc_batching(q, ioc)) {
  					/*
  					 * The queue is full and the allocating
  					 * process is not a "batcher", and not
  					 * exempted by the IO scheduler
  					 */
  					goto out;
  				}
  			}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
804
  		}
1faa16d22   Jens Axboe   block: change the...
805
  		blk_set_queue_congested(q, is_sync);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
806
  	}
082cf69eb   Jens Axboe   [PATCH] ll_rw_blk...
807
808
809
810
811
  	/*
  	 * Only allow batching queuers to allocate up to 50% over the defined
  	 * limit of requests, otherwise we could have thousands of requests
  	 * allocated with any setting of ->nr_requests
  	 */
1faa16d22   Jens Axboe   block: change the...
812
  	if (rl->count[is_sync] >= (3 * q->nr_requests / 2))
082cf69eb   Jens Axboe   [PATCH] ll_rw_blk...
813
  		goto out;
fd782a4a9   Hugh Dickins   [PATCH] Fix get_r...
814

1faa16d22   Jens Axboe   block: change the...
815
816
  	rl->count[is_sync]++;
  	rl->starved[is_sync] = 0;
cb98fc8bb   Tejun Heo   [BLOCK] Reimpleme...
817

f1f8cc946   Tejun Heo   block, cfq: move ...
818
819
820
821
822
823
824
825
826
827
  	/*
  	 * Decide whether the new request will be managed by elevator.  If
  	 * so, mark @rw_flags and increment elvpriv.  Non-zero elvpriv will
  	 * prevent the current elevator from being destroyed until the new
  	 * request is freed.  This guarantees icq's won't be destroyed and
  	 * makes creating new ones safe.
  	 *
  	 * Also, lookup icq while holding queue_lock.  If it doesn't exist,
  	 * it will be created after releasing queue_lock.
  	 */
75eb6c372   Tejun Heo   block: pass aroun...
828
829
830
831
  	if (blk_rq_should_init_elevator(bio) &&
  	    !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags)) {
  		rw_flags |= REQ_ELVPRIV;
  		rl->elvpriv++;
f1f8cc946   Tejun Heo   block, cfq: move ...
832
833
  		if (et->icq_cache && ioc)
  			icq = ioc_lookup_icq(ioc, q);
9d5a4e946   Mike Snitzer   block: skip eleva...
834
  	}
cb98fc8bb   Tejun Heo   [BLOCK] Reimpleme...
835

f253b86b4   Jens Axboe   Revert "block: fi...
836
837
  	if (blk_queue_io_stat(q))
  		rw_flags |= REQ_IO_STAT;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
838
  	spin_unlock_irq(q->queue_lock);
f1f8cc946   Tejun Heo   block, cfq: move ...
839
840
841
842
843
844
845
  	/* create icq if missing */
  	if (unlikely(et->icq_cache && !icq))
  		icq = ioc_create_icq(q, gfp_mask);
  
  	/* rqs are guaranteed to have icq on elv_set_request() if requested */
  	if (likely(!et->icq_cache || icq))
  		rq = blk_alloc_request(q, icq, rw_flags, gfp_mask);
88ee5ef15   Jens Axboe   [BLOCK] ll_rw_blk...
846
  	if (unlikely(!rq)) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
847
848
849
850
851
852
853
854
  		/*
  		 * Allocation failed presumably due to memory. Undo anything
  		 * we might have messed up.
  		 *
  		 * Allocating task should really be put onto the front of the
  		 * wait queue, but this is pretty rare.
  		 */
  		spin_lock_irq(q->queue_lock);
75eb6c372   Tejun Heo   block: pass aroun...
855
  		freed_request(q, rw_flags);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
856
857
858
859
860
861
862
863
864
  
  		/*
  		 * in the very unlikely event that allocation failed and no
  		 * requests for this direction was pending, mark us starved
  		 * so that freeing of a request in the other direction will
  		 * notice us. another possible fix would be to split the
  		 * rq mempool into READ and WRITE
  		 */
  rq_starved:
1faa16d22   Jens Axboe   block: change the...
865
866
  		if (unlikely(rl->count[is_sync] == 0))
  			rl->starved[is_sync] = 1;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
867

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
868
869
  		goto out;
  	}
88ee5ef15   Jens Axboe   [BLOCK] ll_rw_blk...
870
871
872
873
874
875
  	/*
  	 * ioc may be NULL here, and ioc_batching will be false. That's
  	 * OK, if the queue is under the request limit then requests need
  	 * not count toward the nr_batch_requests limit. There will always
  	 * be some limit enforced by BLK_BATCH_TIME.
  	 */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
876
877
  	if (ioc_batching(q, ioc))
  		ioc->nr_batch_requests--;
6728cb0e6   Jens Axboe   block: make core ...
878

1faa16d22   Jens Axboe   block: change the...
879
  	trace_block_getrq(q, bio, rw_flags & 1);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
880
  out:
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
881
882
  	return rq;
  }
da8303c63   Tejun Heo   block: make get_r...
883
884
885
886
887
888
889
890
  /**
   * get_request_wait - get a free request with retry
   * @q: request_queue to allocate request from
   * @rw_flags: RW and SYNC flags
   * @bio: bio to allocate request for (can be %NULL)
   *
   * Get a free request from @q.  This function keeps retrying under memory
   * pressure and fails iff @q is dead.
d6344532a   Nick Piggin   [PATCH] blk: redu...
891
   *
da8303c63   Tejun Heo   block: make get_r...
892
893
894
   * Must be callled with @q->queue_lock held and,
   * Returns %NULL on failure, with @q->queue_lock held.
   * Returns !%NULL on success, with @q->queue_lock *not held*.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
895
   */
165125e1e   Jens Axboe   [BLOCK] Get rid o...
896
  static struct request *get_request_wait(struct request_queue *q, int rw_flags,
22e2c507c   Jens Axboe   [PATCH] Update cf...
897
  					struct bio *bio)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
898
  {
1faa16d22   Jens Axboe   block: change the...
899
  	const bool is_sync = rw_is_sync(rw_flags) != 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
900
  	struct request *rq;
7749a8d42   Jens Axboe   [PATCH] Propagate...
901
  	rq = get_request(q, rw_flags, bio, GFP_NOIO);
450991bc1   Nick Piggin   [PATCH] blk: __ma...
902
903
  	while (!rq) {
  		DEFINE_WAIT(wait);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
904
  		struct request_list *rl = &q->rq;
34f6055c8   Tejun Heo   block: add blk_qu...
905
  		if (unlikely(blk_queue_dead(q)))
da8303c63   Tejun Heo   block: make get_r...
906
  			return NULL;
1faa16d22   Jens Axboe   block: change the...
907
  		prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
908
  				TASK_UNINTERRUPTIBLE);
1faa16d22   Jens Axboe   block: change the...
909
  		trace_block_sleeprq(q, bio, rw_flags & 1);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
910

05caf8dbc   Zhang, Yanmin   block: Move the s...
911
912
  		spin_unlock_irq(q->queue_lock);
  		io_schedule();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
913

05caf8dbc   Zhang, Yanmin   block: Move the s...
914
915
916
917
918
919
  		/*
  		 * After sleeping, we become a "batching" process and
  		 * will be able to allocate at least one request, and
  		 * up to a big batch of them for a small period time.
  		 * See ioc_batching, ioc_set_batching
  		 */
f2dbd76a0   Tejun Heo   block, cfq: repla...
920
921
  		create_io_context(current, GFP_NOIO, q->node);
  		ioc_set_batching(q, current->io_context);
d6344532a   Nick Piggin   [PATCH] blk: redu...
922

05caf8dbc   Zhang, Yanmin   block: Move the s...
923
  		spin_lock_irq(q->queue_lock);
1faa16d22   Jens Axboe   block: change the...
924
  		finish_wait(&rl->wait[is_sync], &wait);
05caf8dbc   Zhang, Yanmin   block: Move the s...
925
926
927
  
  		rq = get_request(q, rw_flags, bio, GFP_NOIO);
  	};
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
928
929
930
  
  	return rq;
  }
165125e1e   Jens Axboe   [BLOCK] Get rid o...
931
  struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
932
933
934
935
  {
  	struct request *rq;
  
  	BUG_ON(rw != READ && rw != WRITE);
d6344532a   Nick Piggin   [PATCH] blk: redu...
936
  	spin_lock_irq(q->queue_lock);
da8303c63   Tejun Heo   block: make get_r...
937
  	if (gfp_mask & __GFP_WAIT)
22e2c507c   Jens Axboe   [PATCH] Update cf...
938
  		rq = get_request_wait(q, rw, NULL);
da8303c63   Tejun Heo   block: make get_r...
939
  	else
22e2c507c   Jens Axboe   [PATCH] Update cf...
940
  		rq = get_request(q, rw, NULL, gfp_mask);
da8303c63   Tejun Heo   block: make get_r...
941
942
  	if (!rq)
  		spin_unlock_irq(q->queue_lock);
d6344532a   Nick Piggin   [PATCH] blk: redu...
943
  	/* q->queue_lock is unlocked at this point */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
944
945
946
  
  	return rq;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
947
948
949
  EXPORT_SYMBOL(blk_get_request);
  
  /**
79eb63e9e   Boaz Harrosh   block: Add blk_ma...
950
   * blk_make_request - given a bio, allocate a corresponding struct request.
8ebf97560   Randy Dunlap   block: fix kernel...
951
   * @q: target request queue
79eb63e9e   Boaz Harrosh   block: Add blk_ma...
952
953
   * @bio:  The bio describing the memory mappings that will be submitted for IO.
   *        It may be a chained-bio properly constructed by block/bio layer.
8ebf97560   Randy Dunlap   block: fix kernel...
954
   * @gfp_mask: gfp flags to be used for memory allocation
dc72ef4ae   Jens Axboe   [PATCH] Add blk_s...
955
   *
79eb63e9e   Boaz Harrosh   block: Add blk_ma...
956
957
958
959
   * blk_make_request is the parallel of generic_make_request for BLOCK_PC
   * type commands. Where the struct request needs to be farther initialized by
   * the caller. It is passed a &struct bio, which describes the memory info of
   * the I/O transfer.
dc72ef4ae   Jens Axboe   [PATCH] Add blk_s...
960
   *
79eb63e9e   Boaz Harrosh   block: Add blk_ma...
961
962
963
964
965
966
967
968
969
   * The caller of blk_make_request must make sure that bi_io_vec
   * are set to describe the memory buffers. That bio_data_dir() will return
   * the needed direction of the request. (And all bio's in the passed bio-chain
   * are properly set accordingly)
   *
   * If called under none-sleepable conditions, mapped bio buffers must not
   * need bouncing, by calling the appropriate masked or flagged allocator,
   * suitable for the target device. Otherwise the call to blk_queue_bounce will
   * BUG.
53674ac5a   Jens Axboe   block: add warnin...
970
971
972
973
974
975
976
977
978
   *
   * WARNING: When allocating/cloning a bio-chain, careful consideration should be
   * given to how you allocate bios. In particular, you cannot use __GFP_WAIT for
   * anything but the first bio in the chain. Otherwise you risk waiting for IO
   * completion of a bio that hasn't been submitted yet, thus resulting in a
   * deadlock. Alternatively bios should be allocated using bio_kmalloc() instead
   * of bio_alloc(), as that avoids the mempool deadlock.
   * If possible a big IO should be split into smaller parts when allocation
   * fails. Partial allocation should not be an error, or you risk a live-lock.
dc72ef4ae   Jens Axboe   [PATCH] Add blk_s...
979
   */
79eb63e9e   Boaz Harrosh   block: Add blk_ma...
980
981
  struct request *blk_make_request(struct request_queue *q, struct bio *bio,
  				 gfp_t gfp_mask)
dc72ef4ae   Jens Axboe   [PATCH] Add blk_s...
982
  {
79eb63e9e   Boaz Harrosh   block: Add blk_ma...
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
  	struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask);
  
  	if (unlikely(!rq))
  		return ERR_PTR(-ENOMEM);
  
  	for_each_bio(bio) {
  		struct bio *bounce_bio = bio;
  		int ret;
  
  		blk_queue_bounce(q, &bounce_bio);
  		ret = blk_rq_append_bio(q, rq, bounce_bio);
  		if (unlikely(ret)) {
  			blk_put_request(rq);
  			return ERR_PTR(ret);
  		}
  	}
  
  	return rq;
dc72ef4ae   Jens Axboe   [PATCH] Add blk_s...
1001
  }
79eb63e9e   Boaz Harrosh   block: Add blk_ma...
1002
  EXPORT_SYMBOL(blk_make_request);
dc72ef4ae   Jens Axboe   [PATCH] Add blk_s...
1003
1004
  
  /**
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1005
1006
1007
1008
1009
1010
1011
1012
1013
   * blk_requeue_request - put a request back on queue
   * @q:		request queue where request should be inserted
   * @rq:		request to be inserted
   *
   * Description:
   *    Drivers often keep queueing requests until the hardware cannot accept
   *    more, when that condition happens we need to put the request back
   *    on the queue. Must be called with queue lock held.
   */
165125e1e   Jens Axboe   [BLOCK] Get rid o...
1014
  void blk_requeue_request(struct request_queue *q, struct request *rq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1015
  {
242f9dcb8   Jens Axboe   block: unify requ...
1016
1017
  	blk_delete_timer(rq);
  	blk_clear_rq_complete(rq);
5f3ea37c7   Arnaldo Carvalho de Melo   blktrace: port to...
1018
  	trace_block_rq_requeue(q, rq);
2056a782f   Jens Axboe   [PATCH] Block que...
1019

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1020
1021
  	if (blk_rq_tagged(rq))
  		blk_queue_end_tag(q, rq);
ba396a6c1   James Bottomley   block: fix oops w...
1022
  	BUG_ON(blk_queued_rq(rq));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1023
1024
  	elv_requeue_request(q, rq);
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1025
  EXPORT_SYMBOL(blk_requeue_request);
73c101011   Jens Axboe   block: initial pa...
1026
1027
1028
1029
  static void add_acct_request(struct request_queue *q, struct request *rq,
  			     int where)
  {
  	drive_stat_acct(rq, 1);
7eaceacca   Jens Axboe   block: remove per...
1030
  	__elv_add_request(q, rq, where);
73c101011   Jens Axboe   block: initial pa...
1031
  }
074a7aca7   Tejun Heo   block: move stats...
1032
1033
1034
1035
1036
  static void part_round_stats_single(int cpu, struct hd_struct *part,
  				    unsigned long now)
  {
  	if (now == part->stamp)
  		return;
316d315bf   Nikanth Karthikesan   block: Seperate r...
1037
  	if (part_in_flight(part)) {
074a7aca7   Tejun Heo   block: move stats...
1038
  		__part_stat_add(cpu, part, time_in_queue,
316d315bf   Nikanth Karthikesan   block: Seperate r...
1039
  				part_in_flight(part) * (now - part->stamp));
074a7aca7   Tejun Heo   block: move stats...
1040
1041
1042
1043
1044
1045
  		__part_stat_add(cpu, part, io_ticks, (now - part->stamp));
  	}
  	part->stamp = now;
  }
  
  /**
496aa8a98   Randy Dunlap   block: fix curren...
1046
1047
1048
   * part_round_stats() - Round off the performance stats on a struct disk_stats.
   * @cpu: cpu number for stats access
   * @part: target partition
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
   *
   * The average IO queue length and utilisation statistics are maintained
   * by observing the current state of the queue length and the amount of
   * time it has been in this state for.
   *
   * Normally, that accounting is done on IO completion, but that can result
   * in more than a second's worth of IO being accounted for within any one
   * second, leading to >100% utilisation.  To deal with that, we call this
   * function to do a round-off before returning the results when reading
   * /proc/diskstats.  This accounts immediately for all queue usage up to
   * the current jiffies and restarts the counters again.
   */
c99590591   Tejun Heo   block: fix diskst...
1061
  void part_round_stats(int cpu, struct hd_struct *part)
6f2576af5   Jerome Marchand   Enhanced partitio...
1062
1063
  {
  	unsigned long now = jiffies;
074a7aca7   Tejun Heo   block: move stats...
1064
1065
1066
  	if (part->partno)
  		part_round_stats_single(cpu, &part_to_disk(part)->part0, now);
  	part_round_stats_single(cpu, part, now);
6f2576af5   Jerome Marchand   Enhanced partitio...
1067
  }
074a7aca7   Tejun Heo   block: move stats...
1068
  EXPORT_SYMBOL_GPL(part_round_stats);
6f2576af5   Jerome Marchand   Enhanced partitio...
1069

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1070
1071
1072
  /*
   * queue lock must be held
   */
165125e1e   Jens Axboe   [BLOCK] Get rid o...
1073
  void __blk_put_request(struct request_queue *q, struct request *req)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1074
  {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1075
1076
1077
1078
  	if (unlikely(!q))
  		return;
  	if (unlikely(--req->ref_count))
  		return;
8922e16cf   Tejun Heo   [PATCH] 01/05 Imp...
1079
  	elv_completed_request(q, req);
1cd96c242   Boaz Harrosh   block: WARN in __...
1080
1081
  	/* this is a bio leak */
  	WARN_ON(req->bio != NULL);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1082
1083
1084
1085
  	/*
  	 * Request may not have originated from ll_rw_blk. if not,
  	 * it didn't come out of our reserved rq pools
  	 */
49171e5c6   Jens Axboe   [PATCH] Remove st...
1086
  	if (req->cmd_flags & REQ_ALLOCED) {
75eb6c372   Tejun Heo   block: pass aroun...
1087
  		unsigned int flags = req->cmd_flags;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1088

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1089
  		BUG_ON(!list_empty(&req->queuelist));
9817064b6   Jens Axboe   [PATCH] elevator:...
1090
  		BUG_ON(!hlist_unhashed(&req->hash));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1091
1092
  
  		blk_free_request(q, req);
75eb6c372   Tejun Heo   block: pass aroun...
1093
  		freed_request(q, flags);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1094
1095
  	}
  }
6e39b69e7   Mike Christie   [SCSI] export blk...
1096
  EXPORT_SYMBOL_GPL(__blk_put_request);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1097
1098
  void blk_put_request(struct request *req)
  {
8922e16cf   Tejun Heo   [PATCH] 01/05 Imp...
1099
  	unsigned long flags;
165125e1e   Jens Axboe   [BLOCK] Get rid o...
1100
  	struct request_queue *q = req->q;
8922e16cf   Tejun Heo   [PATCH] 01/05 Imp...
1101

52a93ba81   FUJITA Tomonori   block: remove the...
1102
1103
1104
  	spin_lock_irqsave(q->queue_lock, flags);
  	__blk_put_request(q, req);
  	spin_unlock_irqrestore(q->queue_lock, flags);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1105
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1106
  EXPORT_SYMBOL(blk_put_request);
66ac02801   Christoph Hellwig   block: don't allo...
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
  /**
   * blk_add_request_payload - add a payload to a request
   * @rq: request to update
   * @page: page backing the payload
   * @len: length of the payload.
   *
   * This allows to later add a payload to an already submitted request by
   * a block driver.  The driver needs to take care of freeing the payload
   * itself.
   *
   * Note that this is a quite horrible hack and nothing but handling of
   * discard requests should ever use it.
   */
  void blk_add_request_payload(struct request *rq, struct page *page,
  		unsigned int len)
  {
  	struct bio *bio = rq->bio;
  
  	bio->bi_io_vec->bv_page = page;
  	bio->bi_io_vec->bv_offset = 0;
  	bio->bi_io_vec->bv_len = len;
  
  	bio->bi_size = len;
  	bio->bi_vcnt = 1;
  	bio->bi_phys_segments = 1;
  
  	rq->__data_len = rq->resid_len = len;
  	rq->nr_phys_segments = 1;
  	rq->buffer = bio_data(bio);
  }
  EXPORT_SYMBOL_GPL(blk_add_request_payload);
73c101011   Jens Axboe   block: initial pa...
1138
1139
1140
1141
  static bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
  				   struct bio *bio)
  {
  	const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
73c101011   Jens Axboe   block: initial pa...
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
  	if (!ll_back_merge_fn(q, req, bio))
  		return false;
  
  	trace_block_bio_backmerge(q, bio);
  
  	if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
  		blk_rq_set_mixed_merge(req);
  
  	req->biotail->bi_next = bio;
  	req->biotail = bio;
  	req->__data_len += bio->bi_size;
  	req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
  
  	drive_stat_acct(req, 0);
95cf3dd9d   Vivek Goyal   block: call elv_b...
1156
  	elv_bio_merged(q, req, bio);
73c101011   Jens Axboe   block: initial pa...
1157
1158
1159
1160
1161
1162
1163
  	return true;
  }
  
  static bool bio_attempt_front_merge(struct request_queue *q,
  				    struct request *req, struct bio *bio)
  {
  	const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
73c101011   Jens Axboe   block: initial pa...
1164

73c101011   Jens Axboe   block: initial pa...
1165
1166
1167
1168
1169
1170
1171
  	if (!ll_front_merge_fn(q, req, bio))
  		return false;
  
  	trace_block_bio_frontmerge(q, bio);
  
  	if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
  		blk_rq_set_mixed_merge(req);
73c101011   Jens Axboe   block: initial pa...
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
  	bio->bi_next = req->bio;
  	req->bio = bio;
  
  	/*
  	 * may not be valid. if the low level driver said
  	 * it didn't need a bounce buffer then it better
  	 * not touch req->buffer either...
  	 */
  	req->buffer = bio_data(bio);
  	req->__sector = bio->bi_sector;
  	req->__data_len += bio->bi_size;
  	req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
  
  	drive_stat_acct(req, 0);
95cf3dd9d   Vivek Goyal   block: call elv_b...
1186
  	elv_bio_merged(q, req, bio);
73c101011   Jens Axboe   block: initial pa...
1187
1188
  	return true;
  }
bd87b5898   Tejun Heo   block: drop @tsk ...
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
  /**
   * attempt_plug_merge - try to merge with %current's plugged list
   * @q: request_queue new bio is being queued at
   * @bio: new bio being queued
   * @request_count: out parameter for number of traversed plugged requests
   *
   * Determine whether @bio being queued on @q can be merged with a request
   * on %current's plugged list.  Returns %true if merge was successful,
   * otherwise %false.
   *
   * This function is called without @q->queue_lock; however, elevator is
   * accessed iff there already are requests on the plugged list which in
   * turn guarantees validity of the elevator.
   *
   * Note that, on successful merge, elevator operation
   * elevator_bio_merged_fn() will be called without queue lock.  Elevator
   * must be ready for this.
73c101011   Jens Axboe   block: initial pa...
1206
   */
bd87b5898   Tejun Heo   block: drop @tsk ...
1207
1208
  static bool attempt_plug_merge(struct request_queue *q, struct bio *bio,
  			       unsigned int *request_count)
73c101011   Jens Axboe   block: initial pa...
1209
1210
1211
1212
  {
  	struct blk_plug *plug;
  	struct request *rq;
  	bool ret = false;
bd87b5898   Tejun Heo   block: drop @tsk ...
1213
  	plug = current->plug;
73c101011   Jens Axboe   block: initial pa...
1214
1215
  	if (!plug)
  		goto out;
56ebdaf2f   Shaohua Li   block: simplify f...
1216
  	*request_count = 0;
73c101011   Jens Axboe   block: initial pa...
1217
1218
1219
  
  	list_for_each_entry_reverse(rq, &plug->list, queuelist) {
  		int el_ret;
56ebdaf2f   Shaohua Li   block: simplify f...
1220
  		(*request_count)++;
73c101011   Jens Axboe   block: initial pa...
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
  		if (rq->q != q)
  			continue;
  
  		el_ret = elv_try_merge(rq, bio);
  		if (el_ret == ELEVATOR_BACK_MERGE) {
  			ret = bio_attempt_back_merge(q, rq, bio);
  			if (ret)
  				break;
  		} else if (el_ret == ELEVATOR_FRONT_MERGE) {
  			ret = bio_attempt_front_merge(q, rq, bio);
  			if (ret)
  				break;
  		}
  	}
  out:
  	return ret;
  }
86db1e297   Jens Axboe   block: continue l...
1238
  void init_request_from_bio(struct request *req, struct bio *bio)
52d9e6753   Tejun Heo   [BLOCK] ll_rw_blk...
1239
  {
4aff5e233   Jens Axboe   [PATCH] Split str...
1240
  	req->cmd_type = REQ_TYPE_FS;
52d9e6753   Tejun Heo   [BLOCK] ll_rw_blk...
1241

7b6d91dae   Christoph Hellwig   block: unify flag...
1242
1243
  	req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK;
  	if (bio->bi_rw & REQ_RAHEAD)
a82afdfcb   Tejun Heo   block: use the sa...
1244
  		req->cmd_flags |= REQ_FAILFAST_MASK;
b31dc66a5   Jens Axboe   [PATCH] Kill PF_S...
1245

52d9e6753   Tejun Heo   [BLOCK] ll_rw_blk...
1246
  	req->errors = 0;
a2dec7b36   Tejun Heo   block: hide reque...
1247
  	req->__sector = bio->bi_sector;
52d9e6753   Tejun Heo   [BLOCK] ll_rw_blk...
1248
  	req->ioprio = bio_prio(bio);
bc1c56fde   NeilBrown   Share code betwee...
1249
  	blk_rq_bio_prep(req->q, req, bio);
52d9e6753   Tejun Heo   [BLOCK] ll_rw_blk...
1250
  }
5a7bbad27   Christoph Hellwig   block: remove sup...
1251
  void blk_queue_bio(struct request_queue *q, struct bio *bio)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1252
  {
5e00d1b5b   Jiri Slaby   BLOCK: fix bio.bi...
1253
  	const bool sync = !!(bio->bi_rw & REQ_SYNC);
73c101011   Jens Axboe   block: initial pa...
1254
1255
1256
  	struct blk_plug *plug;
  	int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT;
  	struct request *req;
56ebdaf2f   Shaohua Li   block: simplify f...
1257
  	unsigned int request_count = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1258

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1259
1260
1261
1262
1263
1264
  	/*
  	 * low level driver can indicate that it wants pages above a
  	 * certain limit bounced to low memory (ie for highmem, or even
  	 * ISA dma in theory)
  	 */
  	blk_queue_bounce(q, &bio);
4fed947cb   Tejun Heo   block: implement ...
1265
  	if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) {
73c101011   Jens Axboe   block: initial pa...
1266
  		spin_lock_irq(q->queue_lock);
ae1b15396   Tejun Heo   block: reimplemen...
1267
  		where = ELEVATOR_INSERT_FLUSH;
28e7d1845   Tejun Heo   block: drop barri...
1268
1269
  		goto get_rq;
  	}
73c101011   Jens Axboe   block: initial pa...
1270
1271
1272
1273
  	/*
  	 * Check if we can merge with the plugged list before grabbing
  	 * any locks.
  	 */
bd87b5898   Tejun Heo   block: drop @tsk ...
1274
  	if (attempt_plug_merge(q, bio, &request_count))
5a7bbad27   Christoph Hellwig   block: remove sup...
1275
  		return;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1276

73c101011   Jens Axboe   block: initial pa...
1277
  	spin_lock_irq(q->queue_lock);
2056a782f   Jens Axboe   [PATCH] Block que...
1278

73c101011   Jens Axboe   block: initial pa...
1279
1280
  	el_ret = elv_merge(q, &req, bio);
  	if (el_ret == ELEVATOR_BACK_MERGE) {
73c101011   Jens Axboe   block: initial pa...
1281
1282
1283
1284
1285
1286
  		if (bio_attempt_back_merge(q, req, bio)) {
  			if (!attempt_back_merge(q, req))
  				elv_merged_request(q, req, el_ret);
  			goto out_unlock;
  		}
  	} else if (el_ret == ELEVATOR_FRONT_MERGE) {
73c101011   Jens Axboe   block: initial pa...
1287
1288
1289
1290
  		if (bio_attempt_front_merge(q, req, bio)) {
  			if (!attempt_front_merge(q, req))
  				elv_merged_request(q, req, el_ret);
  			goto out_unlock;
80a761fd3   Tejun Heo   block: implement ...
1291
  		}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1292
  	}
450991bc1   Nick Piggin   [PATCH] blk: __ma...
1293
  get_rq:
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1294
  	/*
7749a8d42   Jens Axboe   [PATCH] Propagate...
1295
1296
1297
1298
1299
1300
  	 * This sync check and mask will be re-done in init_request_from_bio(),
  	 * but we need to set it earlier to expose the sync flag to the
  	 * rq allocator and io schedulers.
  	 */
  	rw_flags = bio_data_dir(bio);
  	if (sync)
7b6d91dae   Christoph Hellwig   block: unify flag...
1301
  		rw_flags |= REQ_SYNC;
7749a8d42   Jens Axboe   [PATCH] Propagate...
1302
1303
  
  	/*
450991bc1   Nick Piggin   [PATCH] blk: __ma...
1304
  	 * Grab a free request. This is might sleep but can not fail.
d6344532a   Nick Piggin   [PATCH] blk: redu...
1305
  	 * Returns with the queue unlocked.
450991bc1   Nick Piggin   [PATCH] blk: __ma...
1306
  	 */
7749a8d42   Jens Axboe   [PATCH] Propagate...
1307
  	req = get_request_wait(q, rw_flags, bio);
da8303c63   Tejun Heo   block: make get_r...
1308
1309
1310
1311
  	if (unlikely(!req)) {
  		bio_endio(bio, -ENODEV);	/* @q is dead */
  		goto out_unlock;
  	}
d6344532a   Nick Piggin   [PATCH] blk: redu...
1312

450991bc1   Nick Piggin   [PATCH] blk: __ma...
1313
1314
1315
1316
1317
  	/*
  	 * After dropping the lock and possibly sleeping here, our request
  	 * may now be mergeable after it had proven unmergeable (above).
  	 * We don't worry about that case for efficiency. It won't happen
  	 * often, and the elevators are able to handle it.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1318
  	 */
52d9e6753   Tejun Heo   [BLOCK] ll_rw_blk...
1319
  	init_request_from_bio(req, bio);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1320

9562ad9ab   Tao Ma   block: Remove the...
1321
  	if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags))
11ccf116d   Jens Axboe   block: fix warnin...
1322
  		req->cpu = raw_smp_processor_id();
73c101011   Jens Axboe   block: initial pa...
1323
1324
  
  	plug = current->plug;
721a9602e   Jens Axboe   block: kill off R...
1325
  	if (plug) {
dc6d36c97   Jens Axboe   block: readd plug...
1326
1327
1328
1329
1330
1331
1332
1333
  		/*
  		 * If this is the first request added after a plug, fire
  		 * of a plug trace. If others have been added before, check
  		 * if we have multiple devices in this plug. If so, make a
  		 * note to sort the list before dispatch.
  		 */
  		if (list_empty(&plug->list))
  			trace_block_plug(q);
3540d5e89   Shaohua Li   block: avoid unne...
1334
1335
1336
  		else {
  			if (!plug->should_sort) {
  				struct request *__rq;
73c101011   Jens Axboe   block: initial pa...
1337

3540d5e89   Shaohua Li   block: avoid unne...
1338
1339
1340
1341
  				__rq = list_entry_rq(plug->list.prev);
  				if (__rq->q != q)
  					plug->should_sort = 1;
  			}
019ceb7d5   Shaohua Li   block: add missed...
1342
  			if (request_count >= BLK_MAX_REQUEST_COUNT) {
3540d5e89   Shaohua Li   block: avoid unne...
1343
  				blk_flush_plug_list(plug, false);
019ceb7d5   Shaohua Li   block: add missed...
1344
1345
  				trace_block_plug(q);
  			}
73c101011   Jens Axboe   block: initial pa...
1346
  		}
73c101011   Jens Axboe   block: initial pa...
1347
1348
1349
1350
1351
  		list_add_tail(&req->queuelist, &plug->list);
  		drive_stat_acct(req, 1);
  	} else {
  		spin_lock_irq(q->queue_lock);
  		add_acct_request(q, req, where);
24ecfbe27   Christoph Hellwig   block: add blk_ru...
1352
  		__blk_run_queue(q);
73c101011   Jens Axboe   block: initial pa...
1353
1354
1355
  out_unlock:
  		spin_unlock_irq(q->queue_lock);
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1356
  }
c20e8de27   Jens Axboe   block: rename __m...
1357
  EXPORT_SYMBOL_GPL(blk_queue_bio);	/* for device mapper only */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1358
1359
1360
1361
1362
1363
1364
  
  /*
   * If bio->bi_dev is a partition, remap the location
   */
  static inline void blk_partition_remap(struct bio *bio)
  {
  	struct block_device *bdev = bio->bi_bdev;
bf2de6f5a   Jens Axboe   block: Initial su...
1365
  	if (bio_sectors(bio) && bdev != bdev->bd_contains) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1366
  		struct hd_struct *p = bdev->bd_part;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1367
1368
  		bio->bi_sector += p->start_sect;
  		bio->bi_bdev = bdev->bd_contains;
c7149d6bc   Alan D. Brunelle   Fix remap handlin...
1369

d07335e51   Mike Snitzer   block: Rename "bl...
1370
1371
1372
  		trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio,
  				      bdev->bd_dev,
  				      bio->bi_sector - p->start_sect);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1373
1374
  	}
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
  static void handle_bad_sector(struct bio *bio)
  {
  	char b[BDEVNAME_SIZE];
  
  	printk(KERN_INFO "attempt to access beyond end of device
  ");
  	printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu
  ",
  			bdevname(bio->bi_bdev, b),
  			bio->bi_rw,
  			(unsigned long long)bio->bi_sector + bio_sectors(bio),
77304d2ab   Mike Snitzer   block: read i_siz...
1386
  			(long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1387
1388
1389
  
  	set_bit(BIO_EOF, &bio->bi_flags);
  }
c17bb4951   Akinobu Mita   [PATCH] fault-inj...
1390
1391
1392
1393
1394
1395
1396
1397
1398
  #ifdef CONFIG_FAIL_MAKE_REQUEST
  
  static DECLARE_FAULT_ATTR(fail_make_request);
  
  static int __init setup_fail_make_request(char *str)
  {
  	return setup_fault_attr(&fail_make_request, str);
  }
  __setup("fail_make_request=", setup_fail_make_request);
b2c9cd379   Akinobu Mita   fail_make_request...
1399
  static bool should_fail_request(struct hd_struct *part, unsigned int bytes)
c17bb4951   Akinobu Mita   [PATCH] fault-inj...
1400
  {
b2c9cd379   Akinobu Mita   fail_make_request...
1401
  	return part->make_it_fail && should_fail(&fail_make_request, bytes);
c17bb4951   Akinobu Mita   [PATCH] fault-inj...
1402
1403
1404
1405
  }
  
  static int __init fail_make_request_debugfs(void)
  {
dd48c085c   Akinobu Mita   fault-injection: ...
1406
1407
1408
1409
  	struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
  						NULL, &fail_make_request);
  
  	return IS_ERR(dir) ? PTR_ERR(dir) : 0;
c17bb4951   Akinobu Mita   [PATCH] fault-inj...
1410
1411
1412
1413
1414
  }
  
  late_initcall(fail_make_request_debugfs);
  
  #else /* CONFIG_FAIL_MAKE_REQUEST */
b2c9cd379   Akinobu Mita   fail_make_request...
1415
1416
  static inline bool should_fail_request(struct hd_struct *part,
  					unsigned int bytes)
c17bb4951   Akinobu Mita   [PATCH] fault-inj...
1417
  {
b2c9cd379   Akinobu Mita   fail_make_request...
1418
  	return false;
c17bb4951   Akinobu Mita   [PATCH] fault-inj...
1419
1420
1421
  }
  
  #endif /* CONFIG_FAIL_MAKE_REQUEST */
c07e2b412   Jens Axboe   block: factor our...
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
  /*
   * Check whether this bio extends beyond the end of the device.
   */
  static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
  {
  	sector_t maxsector;
  
  	if (!nr_sectors)
  		return 0;
  
  	/* Test device or partition size, when known. */
77304d2ab   Mike Snitzer   block: read i_siz...
1433
  	maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9;
c07e2b412   Jens Axboe   block: factor our...
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
  	if (maxsector) {
  		sector_t sector = bio->bi_sector;
  
  		if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
  			/*
  			 * This may well happen - the kernel calls bread()
  			 * without checking the size of the device, e.g., when
  			 * mounting a device.
  			 */
  			handle_bad_sector(bio);
  			return 1;
  		}
  	}
  
  	return 0;
  }
27a84d54c   Christoph Hellwig   block: refactor g...
1450
1451
  static noinline_for_stack bool
  generic_make_request_checks(struct bio *bio)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1452
  {
165125e1e   Jens Axboe   [BLOCK] Get rid o...
1453
  	struct request_queue *q;
5a7bbad27   Christoph Hellwig   block: remove sup...
1454
  	int nr_sectors = bio_sectors(bio);
51fd77bd9   Jens Axboe   [BLOCK] Don't all...
1455
  	int err = -EIO;
5a7bbad27   Christoph Hellwig   block: remove sup...
1456
1457
  	char b[BDEVNAME_SIZE];
  	struct hd_struct *part;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1458
1459
  
  	might_sleep();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1460

c07e2b412   Jens Axboe   block: factor our...
1461
1462
  	if (bio_check_eod(bio, nr_sectors))
  		goto end_io;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1463

5a7bbad27   Christoph Hellwig   block: remove sup...
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
  	q = bdev_get_queue(bio->bi_bdev);
  	if (unlikely(!q)) {
  		printk(KERN_ERR
  		       "generic_make_request: Trying to access "
  			"nonexistent block-device %s (%Lu)
  ",
  			bdevname(bio->bi_bdev, b),
  			(long long) bio->bi_sector);
  		goto end_io;
  	}
c17bb4951   Akinobu Mita   [PATCH] fault-inj...
1474

5a7bbad27   Christoph Hellwig   block: remove sup...
1475
1476
1477
1478
1479
1480
1481
1482
1483
  	if (unlikely(!(bio->bi_rw & REQ_DISCARD) &&
  		     nr_sectors > queue_max_hw_sectors(q))) {
  		printk(KERN_ERR "bio too big device %s (%u > %u)
  ",
  		       bdevname(bio->bi_bdev, b),
  		       bio_sectors(bio),
  		       queue_max_hw_sectors(q));
  		goto end_io;
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1484

5a7bbad27   Christoph Hellwig   block: remove sup...
1485
1486
1487
1488
1489
  	part = bio->bi_bdev->bd_part;
  	if (should_fail_request(part, bio->bi_size) ||
  	    should_fail_request(&part_to_disk(part)->part0,
  				bio->bi_size))
  		goto end_io;
2056a782f   Jens Axboe   [PATCH] Block que...
1490

5a7bbad27   Christoph Hellwig   block: remove sup...
1491
1492
1493
1494
1495
  	/*
  	 * If this device has partitions, remap block n
  	 * of partition p to block n+start(p) of the disk.
  	 */
  	blk_partition_remap(bio);
2056a782f   Jens Axboe   [PATCH] Block que...
1496

5a7bbad27   Christoph Hellwig   block: remove sup...
1497
1498
  	if (bio_integrity_enabled(bio) && bio_integrity_prep(bio))
  		goto end_io;
a7384677b   Tejun Heo   block: remove dup...
1499

5a7bbad27   Christoph Hellwig   block: remove sup...
1500
1501
  	if (bio_check_eod(bio, nr_sectors))
  		goto end_io;
1e87901e1   Tejun Heo   block: filter flu...
1502

5a7bbad27   Christoph Hellwig   block: remove sup...
1503
1504
1505
1506
1507
1508
1509
1510
1511
  	/*
  	 * Filter flush bio's early so that make_request based
  	 * drivers without flush support don't have to worry
  	 * about them.
  	 */
  	if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) {
  		bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA);
  		if (!nr_sectors) {
  			err = 0;
51fd77bd9   Jens Axboe   [BLOCK] Don't all...
1512
1513
  			goto end_io;
  		}
5a7bbad27   Christoph Hellwig   block: remove sup...
1514
  	}
5ddfe9691   NeilBrown   [PATCH] md: check...
1515

5a7bbad27   Christoph Hellwig   block: remove sup...
1516
1517
1518
1519
1520
1521
1522
  	if ((bio->bi_rw & REQ_DISCARD) &&
  	    (!blk_queue_discard(q) ||
  	     ((bio->bi_rw & REQ_SECURE) &&
  	      !blk_queue_secdiscard(q)))) {
  		err = -EOPNOTSUPP;
  		goto end_io;
  	}
01edede41   Minchan Kim   block: trace bio ...
1523

bc16a4f93   Tejun Heo   block: reorganize...
1524
1525
  	if (blk_throtl_bio(q, bio))
  		return false;	/* throttled, will be resubmitted later */
27a84d54c   Christoph Hellwig   block: refactor g...
1526

5a7bbad27   Christoph Hellwig   block: remove sup...
1527
  	trace_block_bio_queue(q, bio);
27a84d54c   Christoph Hellwig   block: refactor g...
1528
  	return true;
a7384677b   Tejun Heo   block: remove dup...
1529
1530
1531
  
  end_io:
  	bio_endio(bio, err);
27a84d54c   Christoph Hellwig   block: refactor g...
1532
  	return false;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1533
  }
27a84d54c   Christoph Hellwig   block: refactor g...
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
  /**
   * generic_make_request - hand a buffer to its device driver for I/O
   * @bio:  The bio describing the location in memory and on the device.
   *
   * generic_make_request() is used to make I/O requests of block
   * devices. It is passed a &struct bio, which describes the I/O that needs
   * to be done.
   *
   * generic_make_request() does not return any status.  The
   * success/failure status of the request, along with notification of
   * completion, is delivered asynchronously through the bio->bi_end_io
   * function described (one day) else where.
   *
   * The caller of generic_make_request must make sure that bi_io_vec
   * are set to describe the memory buffer, and that bi_dev and bi_sector are
   * set to describe the device address, and the
   * bi_end_io and optionally bi_private are set to describe how
   * completion notification should be signaled.
   *
   * generic_make_request and the drivers it calls may use bi_next if this
   * bio happens to be merged with someone else, and may resubmit the bio to
   * a lower device by calling into generic_make_request recursively, which
   * means the bio should NOT be touched after the call to ->make_request_fn.
d89d87965   Neil Brown   When stacked bloc...
1557
1558
1559
   */
  void generic_make_request(struct bio *bio)
  {
bddd87c7e   Akinobu Mita   blk-core: use BIO...
1560
  	struct bio_list bio_list_on_stack;
27a84d54c   Christoph Hellwig   block: refactor g...
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
  	if (!generic_make_request_checks(bio))
  		return;
  
  	/*
  	 * We only want one ->make_request_fn to be active at a time, else
  	 * stack usage with stacked devices could be a problem.  So use
  	 * current->bio_list to keep a list of requests submited by a
  	 * make_request_fn function.  current->bio_list is also used as a
  	 * flag to say if generic_make_request is currently active in this
  	 * task or not.  If it is NULL, then no make_request is active.  If
  	 * it is non-NULL, then a make_request is active, and new requests
  	 * should be added at the tail
  	 */
bddd87c7e   Akinobu Mita   blk-core: use BIO...
1574
  	if (current->bio_list) {
bddd87c7e   Akinobu Mita   blk-core: use BIO...
1575
  		bio_list_add(current->bio_list, bio);
d89d87965   Neil Brown   When stacked bloc...
1576
1577
  		return;
  	}
27a84d54c   Christoph Hellwig   block: refactor g...
1578

d89d87965   Neil Brown   When stacked bloc...
1579
1580
1581
1582
1583
  	/* following loop may be a bit non-obvious, and so deserves some
  	 * explanation.
  	 * Before entering the loop, bio->bi_next is NULL (as all callers
  	 * ensure that) so we have a list with a single bio.
  	 * We pretend that we have just taken it off a longer list, so
bddd87c7e   Akinobu Mita   blk-core: use BIO...
1584
1585
  	 * we assign bio_list to a pointer to the bio_list_on_stack,
  	 * thus initialising the bio_list of new bios to be
27a84d54c   Christoph Hellwig   block: refactor g...
1586
  	 * added.  ->make_request() may indeed add some more bios
d89d87965   Neil Brown   When stacked bloc...
1587
1588
1589
  	 * through a recursive call to generic_make_request.  If it
  	 * did, we find a non-NULL value in bio_list and re-enter the loop
  	 * from the top.  In this case we really did just take the bio
bddd87c7e   Akinobu Mita   blk-core: use BIO...
1590
  	 * of the top of the list (no pretending) and so remove it from
27a84d54c   Christoph Hellwig   block: refactor g...
1591
  	 * bio_list, and call into ->make_request() again.
d89d87965   Neil Brown   When stacked bloc...
1592
1593
  	 */
  	BUG_ON(bio->bi_next);
bddd87c7e   Akinobu Mita   blk-core: use BIO...
1594
1595
  	bio_list_init(&bio_list_on_stack);
  	current->bio_list = &bio_list_on_stack;
d89d87965   Neil Brown   When stacked bloc...
1596
  	do {
27a84d54c   Christoph Hellwig   block: refactor g...
1597
1598
1599
  		struct request_queue *q = bdev_get_queue(bio->bi_bdev);
  
  		q->make_request_fn(q, bio);
bddd87c7e   Akinobu Mita   blk-core: use BIO...
1600
  		bio = bio_list_pop(current->bio_list);
d89d87965   Neil Brown   When stacked bloc...
1601
  	} while (bio);
bddd87c7e   Akinobu Mita   blk-core: use BIO...
1602
  	current->bio_list = NULL; /* deactivate */
d89d87965   Neil Brown   When stacked bloc...
1603
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1604
1605
1606
  EXPORT_SYMBOL(generic_make_request);
  
  /**
710027a48   Randy Dunlap   Add some block/ s...
1607
   * submit_bio - submit a bio to the block device layer for I/O
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1608
1609
1610
1611
1612
   * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
   * @bio: The &struct bio which describes the I/O
   *
   * submit_bio() is very similar in purpose to generic_make_request(), and
   * uses that function to do most of the work. Both are fairly rough
710027a48   Randy Dunlap   Add some block/ s...
1613
   * interfaces; @bio must be presetup and ready for I/O.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1614
1615
1616
1617
1618
   *
   */
  void submit_bio(int rw, struct bio *bio)
  {
  	int count = bio_sectors(bio);
22e2c507c   Jens Axboe   [PATCH] Update cf...
1619
  	bio->bi_rw |= rw;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1620

bf2de6f5a   Jens Axboe   block: Initial su...
1621
1622
1623
1624
  	/*
  	 * If it's a regular read/write or a barrier with data attached,
  	 * go through the normal accounting stuff before submission.
  	 */
3ffb52e73   Jens Axboe   block: fixup miss...
1625
  	if (bio_has_data(bio) && !(rw & REQ_DISCARD)) {
bf2de6f5a   Jens Axboe   block: Initial su...
1626
1627
1628
1629
1630
1631
1632
1633
1634
  		if (rw & WRITE) {
  			count_vm_events(PGPGOUT, count);
  		} else {
  			task_io_account_read(bio->bi_size);
  			count_vm_events(PGPGIN, count);
  		}
  
  		if (unlikely(block_dump)) {
  			char b[BDEVNAME_SIZE];
8dcbdc742   San Mehat   block: block_dump...
1635
1636
  			printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)
  ",
ba25f9dcc   Pavel Emelyanov   Use helpers to ob...
1637
  			current->comm, task_pid_nr(current),
bf2de6f5a   Jens Axboe   block: Initial su...
1638
1639
  				(rw & WRITE) ? "WRITE" : "READ",
  				(unsigned long long)bio->bi_sector,
8dcbdc742   San Mehat   block: block_dump...
1640
1641
  				bdevname(bio->bi_bdev, b),
  				count);
bf2de6f5a   Jens Axboe   block: Initial su...
1642
  		}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1643
1644
1645
1646
  	}
  
  	generic_make_request(bio);
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1647
  EXPORT_SYMBOL(submit_bio);
3bcddeac1   Kiyoshi Ueda   blk_end_request: ...
1648
  /**
82124d603   Kiyoshi Ueda   block: add reques...
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
   * blk_rq_check_limits - Helper function to check a request for the queue limit
   * @q:  the queue
   * @rq: the request being checked
   *
   * Description:
   *    @rq may have been made based on weaker limitations of upper-level queues
   *    in request stacking drivers, and it may violate the limitation of @q.
   *    Since the block layer and the underlying device driver trust @rq
   *    after it is inserted to @q, it should be checked against @q before
   *    the insertion using this generic function.
   *
   *    This function should also be useful for request stacking drivers
eef35c2d4   Stefan Weil   Fix spelling fuct...
1661
   *    in some cases below, so export this function.
82124d603   Kiyoshi Ueda   block: add reques...
1662
1663
1664
1665
1666
1667
1668
1669
1670
   *    Request stacking drivers like request-based dm may change the queue
   *    limits while requests are in the queue (e.g. dm's table swapping).
   *    Such request stacking drivers should check those requests agaist
   *    the new queue limits again when they dispatch those requests,
   *    although such checkings are also done against the old queue limits
   *    when submitting requests.
   */
  int blk_rq_check_limits(struct request_queue *q, struct request *rq)
  {
3383977fa   ike Snitzer   block: update req...
1671
1672
  	if (rq->cmd_flags & REQ_DISCARD)
  		return 0;
ae03bf639   Martin K. Petersen   block: Use access...
1673
1674
  	if (blk_rq_sectors(rq) > queue_max_sectors(q) ||
  	    blk_rq_bytes(rq) > queue_max_hw_sectors(q) << 9) {
82124d603   Kiyoshi Ueda   block: add reques...
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
  		printk(KERN_ERR "%s: over max size limit.
  ", __func__);
  		return -EIO;
  	}
  
  	/*
  	 * queue's settings related to segment counting like q->bounce_pfn
  	 * may differ from that of other stacking queues.
  	 * Recalculate it to check the request correctly on this queue's
  	 * limitation.
  	 */
  	blk_recalc_rq_segments(rq);
8a78362c4   Martin K. Petersen   block: Consolidat...
1687
  	if (rq->nr_phys_segments > queue_max_segments(q)) {
82124d603   Kiyoshi Ueda   block: add reques...
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
  		printk(KERN_ERR "%s: over max segments limit.
  ", __func__);
  		return -EIO;
  	}
  
  	return 0;
  }
  EXPORT_SYMBOL_GPL(blk_rq_check_limits);
  
  /**
   * blk_insert_cloned_request - Helper for stacking drivers to submit a request
   * @q:  the queue to submit the request
   * @rq: the request being queued
   */
  int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
  {
  	unsigned long flags;
4853abaae   Jeff Moyer   block: fix flush ...
1705
  	int where = ELEVATOR_INSERT_BACK;
82124d603   Kiyoshi Ueda   block: add reques...
1706
1707
1708
  
  	if (blk_rq_check_limits(q, rq))
  		return -EIO;
b2c9cd379   Akinobu Mita   fail_make_request...
1709
1710
  	if (rq->rq_disk &&
  	    should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq)))
82124d603   Kiyoshi Ueda   block: add reques...
1711
  		return -EIO;
82124d603   Kiyoshi Ueda   block: add reques...
1712
1713
  
  	spin_lock_irqsave(q->queue_lock, flags);
8ba61435d   Tejun Heo   block: add missin...
1714
1715
1716
1717
  	if (unlikely(blk_queue_dead(q))) {
  		spin_unlock_irqrestore(q->queue_lock, flags);
  		return -ENODEV;
  	}
82124d603   Kiyoshi Ueda   block: add reques...
1718
1719
1720
1721
1722
1723
  
  	/*
  	 * Submitting request must be dequeued before calling this function
  	 * because it will be linked to another request_queue
  	 */
  	BUG_ON(blk_queued_rq(rq));
4853abaae   Jeff Moyer   block: fix flush ...
1724
1725
1726
1727
  	if (rq->cmd_flags & (REQ_FLUSH|REQ_FUA))
  		where = ELEVATOR_INSERT_FLUSH;
  
  	add_acct_request(q, rq, where);
e67b77c79   Jeff Moyer   blk-flush: move t...
1728
1729
  	if (where == ELEVATOR_INSERT_FLUSH)
  		__blk_run_queue(q);
82124d603   Kiyoshi Ueda   block: add reques...
1730
1731
1732
1733
1734
  	spin_unlock_irqrestore(q->queue_lock, flags);
  
  	return 0;
  }
  EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
80a761fd3   Tejun Heo   block: implement ...
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
  /**
   * blk_rq_err_bytes - determine number of bytes till the next failure boundary
   * @rq: request to examine
   *
   * Description:
   *     A request could be merge of IOs which require different failure
   *     handling.  This function determines the number of bytes which
   *     can be failed from the beginning of the request without
   *     crossing into area which need to be retried further.
   *
   * Return:
   *     The number of bytes to fail.
   *
   * Context:
   *     queue_lock must be held.
   */
  unsigned int blk_rq_err_bytes(const struct request *rq)
  {
  	unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
  	unsigned int bytes = 0;
  	struct bio *bio;
  
  	if (!(rq->cmd_flags & REQ_MIXED_MERGE))
  		return blk_rq_bytes(rq);
  
  	/*
  	 * Currently the only 'mixing' which can happen is between
  	 * different fastfail types.  We can safely fail portions
  	 * which have all the failfast bits that the first one has -
  	 * the ones which are at least as eager to fail as the first
  	 * one.
  	 */
  	for (bio = rq->bio; bio; bio = bio->bi_next) {
  		if ((bio->bi_rw & ff) != ff)
  			break;
  		bytes += bio->bi_size;
  	}
  
  	/* this could lead to infinite loop */
  	BUG_ON(blk_rq_bytes(rq) && !bytes);
  	return bytes;
  }
  EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
bc58ba946   Jens Axboe   block: add sysfs ...
1778
1779
  static void blk_account_io_completion(struct request *req, unsigned int bytes)
  {
c2553b584   Jens Axboe   block: make blk_d...
1780
  	if (blk_do_io_stat(req)) {
bc58ba946   Jens Axboe   block: add sysfs ...
1781
1782
1783
1784
1785
  		const int rw = rq_data_dir(req);
  		struct hd_struct *part;
  		int cpu;
  
  		cpu = part_stat_lock();
09e099d4b   Jerome Marchand   block: fix accoun...
1786
  		part = req->part;
bc58ba946   Jens Axboe   block: add sysfs ...
1787
1788
1789
1790
1791
1792
1793
  		part_stat_add(cpu, part, sectors[rw], bytes >> 9);
  		part_stat_unlock();
  	}
  }
  
  static void blk_account_io_done(struct request *req)
  {
bc58ba946   Jens Axboe   block: add sysfs ...
1794
  	/*
dd4c133f3   Tejun Heo   block: rename bar...
1795
1796
1797
  	 * Account IO completion.  flush_rq isn't accounted as a
  	 * normal IO on queueing nor completion.  Accounting the
  	 * containing request is enough.
bc58ba946   Jens Axboe   block: add sysfs ...
1798
  	 */
414b4ff5e   Tejun Heo   block: add REQ_FL...
1799
  	if (blk_do_io_stat(req) && !(req->cmd_flags & REQ_FLUSH_SEQ)) {
bc58ba946   Jens Axboe   block: add sysfs ...
1800
1801
1802
1803
1804
1805
  		unsigned long duration = jiffies - req->start_time;
  		const int rw = rq_data_dir(req);
  		struct hd_struct *part;
  		int cpu;
  
  		cpu = part_stat_lock();
09e099d4b   Jerome Marchand   block: fix accoun...
1806
  		part = req->part;
bc58ba946   Jens Axboe   block: add sysfs ...
1807
1808
1809
1810
  
  		part_stat_inc(cpu, part, ios[rw]);
  		part_stat_add(cpu, part, ticks[rw], duration);
  		part_round_stats(cpu, part);
316d315bf   Nikanth Karthikesan   block: Seperate r...
1811
  		part_dec_in_flight(part, rw);
bc58ba946   Jens Axboe   block: add sysfs ...
1812

6c23a9681   Jens Axboe   block: add intern...
1813
  		hd_struct_put(part);
bc58ba946   Jens Axboe   block: add sysfs ...
1814
1815
1816
  		part_stat_unlock();
  	}
  }
53a08807c   Tejun Heo   block: internal d...
1817
  /**
9934c8c04   Tejun Heo   block: implement ...
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
   * blk_peek_request - peek at the top of a request queue
   * @q: request queue to peek at
   *
   * Description:
   *     Return the request at the top of @q.  The returned request
   *     should be started using blk_start_request() before LLD starts
   *     processing it.
   *
   * Return:
   *     Pointer to the request at the top of @q if available.  Null
   *     otherwise.
   *
   * Context:
   *     queue_lock must be held.
   */
  struct request *blk_peek_request(struct request_queue *q)
158dbda00   Tejun Heo   block: reorganize...
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
  {
  	struct request *rq;
  	int ret;
  
  	while ((rq = __elv_next_request(q)) != NULL) {
  		if (!(rq->cmd_flags & REQ_STARTED)) {
  			/*
  			 * This is the first time the device driver
  			 * sees this request (possibly after
  			 * requeueing).  Notify IO scheduler.
  			 */
33659ebba   Christoph Hellwig   block: remove wra...
1845
  			if (rq->cmd_flags & REQ_SORTED)
158dbda00   Tejun Heo   block: reorganize...
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
  				elv_activate_rq(q, rq);
  
  			/*
  			 * just mark as started even if we don't start
  			 * it, a request that has been delayed should
  			 * not be passed by new incoming requests
  			 */
  			rq->cmd_flags |= REQ_STARTED;
  			trace_block_rq_issue(q, rq);
  		}
  
  		if (!q->boundary_rq || q->boundary_rq == rq) {
  			q->end_sector = rq_end_sector(rq);
  			q->boundary_rq = NULL;
  		}
  
  		if (rq->cmd_flags & REQ_DONTPREP)
  			break;
2e46e8b27   Tejun Heo   block: drop reque...
1864
  		if (q->dma_drain_size && blk_rq_bytes(rq)) {
158dbda00   Tejun Heo   block: reorganize...
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
  			/*
  			 * make sure space for the drain appears we
  			 * know we can do this because max_hw_segments
  			 * has been adjusted to be one fewer than the
  			 * device can handle
  			 */
  			rq->nr_phys_segments++;
  		}
  
  		if (!q->prep_rq_fn)
  			break;
  
  		ret = q->prep_rq_fn(q, rq);
  		if (ret == BLKPREP_OK) {
  			break;
  		} else if (ret == BLKPREP_DEFER) {
  			/*
  			 * the request may have been (partially) prepped.
  			 * we need to keep this request in the front to
  			 * avoid resource deadlock.  REQ_STARTED will
  			 * prevent other fs requests from passing this one.
  			 */
2e46e8b27   Tejun Heo   block: drop reque...
1887
  			if (q->dma_drain_size && blk_rq_bytes(rq) &&
158dbda00   Tejun Heo   block: reorganize...
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
  			    !(rq->cmd_flags & REQ_DONTPREP)) {
  				/*
  				 * remove the space for the drain we added
  				 * so that we don't add it again
  				 */
  				--rq->nr_phys_segments;
  			}
  
  			rq = NULL;
  			break;
  		} else if (ret == BLKPREP_KILL) {
  			rq->cmd_flags |= REQ_QUIET;
c143dc903   James Bottomley   block: fix an oop...
1900
1901
1902
1903
1904
  			/*
  			 * Mark this request as started so we don't trigger
  			 * any debug logic in the end I/O path.
  			 */
  			blk_start_request(rq);
40cbbb781   Tejun Heo   block: implement ...
1905
  			__blk_end_request_all(rq, -EIO);
158dbda00   Tejun Heo   block: reorganize...
1906
1907
1908
1909
1910
1911
1912
1913
1914
  		} else {
  			printk(KERN_ERR "%s: bad return=%d
  ", __func__, ret);
  			break;
  		}
  	}
  
  	return rq;
  }
9934c8c04   Tejun Heo   block: implement ...
1915
  EXPORT_SYMBOL(blk_peek_request);
158dbda00   Tejun Heo   block: reorganize...
1916

9934c8c04   Tejun Heo   block: implement ...
1917
  void blk_dequeue_request(struct request *rq)
158dbda00   Tejun Heo   block: reorganize...
1918
  {
9934c8c04   Tejun Heo   block: implement ...
1919
  	struct request_queue *q = rq->q;
158dbda00   Tejun Heo   block: reorganize...
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
  	BUG_ON(list_empty(&rq->queuelist));
  	BUG_ON(ELV_ON_HASH(rq));
  
  	list_del_init(&rq->queuelist);
  
  	/*
  	 * the time frame between a request being removed from the lists
  	 * and to it is freed is accounted as io that is in progress at
  	 * the driver side.
  	 */
9195291e5   Divyesh Shah   blkio: Increment ...
1930
  	if (blk_account_rq(rq)) {
0a7ae2ff0   Jens Axboe   block: change the...
1931
  		q->in_flight[rq_is_sync(rq)]++;
9195291e5   Divyesh Shah   blkio: Increment ...
1932
1933
  		set_io_start_time_ns(rq);
  	}
158dbda00   Tejun Heo   block: reorganize...
1934
  }
5efccd17c   Tejun Heo   block: reorder re...
1935
  /**
9934c8c04   Tejun Heo   block: implement ...
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
   * blk_start_request - start request processing on the driver
   * @req: request to dequeue
   *
   * Description:
   *     Dequeue @req and start timeout timer on it.  This hands off the
   *     request to the driver.
   *
   *     Block internal functions which don't want to start timer should
   *     call blk_dequeue_request().
   *
   * Context:
   *     queue_lock must be held.
   */
  void blk_start_request(struct request *req)
  {
  	blk_dequeue_request(req);
  
  	/*
5f49f6317   Tejun Heo   block: set rq->re...
1954
1955
  	 * We are now handing the request to the hardware, initialize
  	 * resid_len to full count and add the timeout handler.
9934c8c04   Tejun Heo   block: implement ...
1956
  	 */
5f49f6317   Tejun Heo   block: set rq->re...
1957
  	req->resid_len = blk_rq_bytes(req);
dbb66c4be   FUJITA Tomonori   block: needs to s...
1958
1959
  	if (unlikely(blk_bidi_rq(req)))
  		req->next_rq->resid_len = blk_rq_bytes(req->next_rq);
9934c8c04   Tejun Heo   block: implement ...
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
  	blk_add_timer(req);
  }
  EXPORT_SYMBOL(blk_start_request);
  
  /**
   * blk_fetch_request - fetch a request from a request queue
   * @q: request queue to fetch a request from
   *
   * Description:
   *     Return the request at the top of @q.  The request is started on
   *     return and LLD can start processing it immediately.
   *
   * Return:
   *     Pointer to the request at the top of @q if available.  Null
   *     otherwise.
   *
   * Context:
   *     queue_lock must be held.
   */
  struct request *blk_fetch_request(struct request_queue *q)
  {
  	struct request *rq;
  
  	rq = blk_peek_request(q);
  	if (rq)
  		blk_start_request(rq);
  	return rq;
  }
  EXPORT_SYMBOL(blk_fetch_request);
  
  /**
2e60e0229   Tejun Heo   block: clean up r...
1991
   * blk_update_request - Special helper function for request stacking drivers
8ebf97560   Randy Dunlap   block: fix kernel...
1992
   * @req:      the request being processed
710027a48   Randy Dunlap   Add some block/ s...
1993
   * @error:    %0 for success, < %0 for error
8ebf97560   Randy Dunlap   block: fix kernel...
1994
   * @nr_bytes: number of bytes to complete @req
3bcddeac1   Kiyoshi Ueda   blk_end_request: ...
1995
1996
   *
   * Description:
8ebf97560   Randy Dunlap   block: fix kernel...
1997
1998
1999
   *     Ends I/O on a number of bytes attached to @req, but doesn't complete
   *     the request structure even if @req doesn't have leftover.
   *     If @req has leftover, sets it up for the next range of segments.
2e60e0229   Tejun Heo   block: clean up r...
2000
2001
2002
2003
2004
2005
2006
   *
   *     This special helper function is only for request stacking drivers
   *     (e.g. request-based dm) so that they can handle partial completion.
   *     Actual device drivers should use blk_end_request instead.
   *
   *     Passing the result of blk_rq_bytes() as @nr_bytes guarantees
   *     %false return from this function.
3bcddeac1   Kiyoshi Ueda   blk_end_request: ...
2007
2008
   *
   * Return:
2e60e0229   Tejun Heo   block: clean up r...
2009
2010
   *     %false - this request doesn't have any more data
   *     %true  - this request has more data
3bcddeac1   Kiyoshi Ueda   blk_end_request: ...
2011
   **/
2e60e0229   Tejun Heo   block: clean up r...
2012
  bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2013
  {
5450d3e1d   Kiyoshi Ueda   blk_end_request: ...
2014
  	int total_bytes, bio_nbytes, next_idx = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2015
  	struct bio *bio;
2e60e0229   Tejun Heo   block: clean up r...
2016
2017
  	if (!req->bio)
  		return false;
5f3ea37c7   Arnaldo Carvalho de Melo   blktrace: port to...
2018
  	trace_block_rq_complete(req->q, req);
2056a782f   Jens Axboe   [PATCH] Block que...
2019

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2020
  	/*
6f41469c6   Tejun Heo   block: clear req-...
2021
2022
2023
2024
2025
2026
  	 * For fs requests, rq is just carrier of independent bio's
  	 * and each partial completion should be handled separately.
  	 * Reset per-request error on each partial completion.
  	 *
  	 * TODO: tj: This is too subtle.  It would be better to let
  	 * low level drivers do what they see fit.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2027
  	 */
33659ebba   Christoph Hellwig   block: remove wra...
2028
  	if (req->cmd_type == REQ_TYPE_FS)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2029
  		req->errors = 0;
33659ebba   Christoph Hellwig   block: remove wra...
2030
2031
  	if (error && req->cmd_type == REQ_TYPE_FS &&
  	    !(req->cmd_flags & REQ_QUIET)) {
79775567e   Hannes Reinecke   [SCSI] block: imp...
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
  		char *error_type;
  
  		switch (error) {
  		case -ENOLINK:
  			error_type = "recoverable transport";
  			break;
  		case -EREMOTEIO:
  			error_type = "critical target";
  			break;
  		case -EBADE:
  			error_type = "critical nexus";
  			break;
  		case -EIO:
  		default:
  			error_type = "I/O";
  			break;
  		}
  		printk(KERN_ERR "end_request: %s error, dev %s, sector %llu
  ",
  		       error_type, req->rq_disk ? req->rq_disk->disk_name : "?",
  		       (unsigned long long)blk_rq_pos(req));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2053
  	}
bc58ba946   Jens Axboe   block: add sysfs ...
2054
  	blk_account_io_completion(req, nr_bytes);
d72d904a5   Jens Axboe   [BLOCK] Update re...
2055

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2056
2057
2058
2059
2060
2061
2062
  	total_bytes = bio_nbytes = 0;
  	while ((bio = req->bio) != NULL) {
  		int nbytes;
  
  		if (nr_bytes >= bio->bi_size) {
  			req->bio = bio->bi_next;
  			nbytes = bio->bi_size;
5bb23a688   NeilBrown   Don't decrement b...
2063
  			req_bio_endio(req, bio, nbytes, error);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2064
2065
2066
2067
  			next_idx = 0;
  			bio_nbytes = 0;
  		} else {
  			int idx = bio->bi_idx + next_idx;
af498d7fa   Kazuhisa Ichikawa   block: fix the bi...
2068
  			if (unlikely(idx >= bio->bi_vcnt)) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2069
  				blk_dump_rq_flags(req, "__end_that");
6728cb0e6   Jens Axboe   block: make core ...
2070
2071
  				printk(KERN_ERR "%s: bio idx %d >= vcnt %d
  ",
af498d7fa   Kazuhisa Ichikawa   block: fix the bi...
2072
  				       __func__, idx, bio->bi_vcnt);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
  				break;
  			}
  
  			nbytes = bio_iovec_idx(bio, idx)->bv_len;
  			BIO_BUG_ON(nbytes > bio->bi_size);
  
  			/*
  			 * not a complete bvec done
  			 */
  			if (unlikely(nbytes > nr_bytes)) {
  				bio_nbytes += nr_bytes;
  				total_bytes += nr_bytes;
  				break;
  			}
  
  			/*
  			 * advance to the next vector
  			 */
  			next_idx++;
  			bio_nbytes += nbytes;
  		}
  
  		total_bytes += nbytes;
  		nr_bytes -= nbytes;
6728cb0e6   Jens Axboe   block: make core ...
2097
2098
  		bio = req->bio;
  		if (bio) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
  			/*
  			 * end more in this run, or just return 'not-done'
  			 */
  			if (unlikely(nr_bytes <= 0))
  				break;
  		}
  	}
  
  	/*
  	 * completely done
  	 */
2e60e0229   Tejun Heo   block: clean up r...
2110
2111
2112
2113
2114
2115
  	if (!req->bio) {
  		/*
  		 * Reset counters so that the request stacking driver
  		 * can find how many bytes remain in the request
  		 * later.
  		 */
a2dec7b36   Tejun Heo   block: hide reque...
2116
  		req->__data_len = 0;
2e60e0229   Tejun Heo   block: clean up r...
2117
2118
  		return false;
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2119
2120
2121
2122
2123
  
  	/*
  	 * if the request wasn't completed, update state
  	 */
  	if (bio_nbytes) {
5bb23a688   NeilBrown   Don't decrement b...
2124
  		req_bio_endio(req, bio, bio_nbytes, error);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2125
2126
2127
2128
  		bio->bi_idx += next_idx;
  		bio_iovec(bio)->bv_offset += nr_bytes;
  		bio_iovec(bio)->bv_len -= nr_bytes;
  	}
a2dec7b36   Tejun Heo   block: hide reque...
2129
  	req->__data_len -= total_bytes;
2e46e8b27   Tejun Heo   block: drop reque...
2130
2131
2132
  	req->buffer = bio_data(req->bio);
  
  	/* update sector only for requests with clear definition of sector */
33659ebba   Christoph Hellwig   block: remove wra...
2133
  	if (req->cmd_type == REQ_TYPE_FS || (req->cmd_flags & REQ_DISCARD))
a2dec7b36   Tejun Heo   block: hide reque...
2134
  		req->__sector += total_bytes >> 9;
2e46e8b27   Tejun Heo   block: drop reque...
2135

80a761fd3   Tejun Heo   block: implement ...
2136
2137
2138
2139
2140
  	/* mixed attributes always follow the first bio */
  	if (req->cmd_flags & REQ_MIXED_MERGE) {
  		req->cmd_flags &= ~REQ_FAILFAST_MASK;
  		req->cmd_flags |= req->bio->bi_rw & REQ_FAILFAST_MASK;
  	}
2e46e8b27   Tejun Heo   block: drop reque...
2141
2142
2143
2144
2145
  	/*
  	 * If total number of sectors is less than the first segment
  	 * size, something has gone terribly wrong.
  	 */
  	if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
8182924bc   Jens Axboe   block: dump reque...
2146
  		blk_dump_rq_flags(req, "request botched");
a2dec7b36   Tejun Heo   block: hide reque...
2147
  		req->__data_len = blk_rq_cur_bytes(req);
2e46e8b27   Tejun Heo   block: drop reque...
2148
2149
2150
  	}
  
  	/* recalculate the number of segments */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2151
  	blk_recalc_rq_segments(req);
2e46e8b27   Tejun Heo   block: drop reque...
2152

2e60e0229   Tejun Heo   block: clean up r...
2153
  	return true;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2154
  }
2e60e0229   Tejun Heo   block: clean up r...
2155
  EXPORT_SYMBOL_GPL(blk_update_request);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2156

2e60e0229   Tejun Heo   block: clean up r...
2157
2158
2159
  static bool blk_update_bidi_request(struct request *rq, int error,
  				    unsigned int nr_bytes,
  				    unsigned int bidi_bytes)
5efccd17c   Tejun Heo   block: reorder re...
2160
  {
2e60e0229   Tejun Heo   block: clean up r...
2161
2162
  	if (blk_update_request(rq, error, nr_bytes))
  		return true;
5efccd17c   Tejun Heo   block: reorder re...
2163

2e60e0229   Tejun Heo   block: clean up r...
2164
2165
2166
2167
  	/* Bidi request must be completed as a whole */
  	if (unlikely(blk_bidi_rq(rq)) &&
  	    blk_update_request(rq->next_rq, error, bidi_bytes))
  		return true;
5efccd17c   Tejun Heo   block: reorder re...
2168

e2e1a148b   Jens Axboe   block: add sysfs ...
2169
2170
  	if (blk_queue_add_random(rq->q))
  		add_disk_randomness(rq->rq_disk);
2e60e0229   Tejun Heo   block: clean up r...
2171
2172
  
  	return false;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2173
  }
28018c242   James Bottomley   block: implement ...
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
  /**
   * blk_unprep_request - unprepare a request
   * @req:	the request
   *
   * This function makes a request ready for complete resubmission (or
   * completion).  It happens only after all error handling is complete,
   * so represents the appropriate moment to deallocate any resources
   * that were allocated to the request in the prep_rq_fn.  The queue
   * lock is held when calling this.
   */
  void blk_unprep_request(struct request *req)
  {
  	struct request_queue *q = req->q;
  
  	req->cmd_flags &= ~REQ_DONTPREP;
  	if (q->unprep_rq_fn)
  		q->unprep_rq_fn(q, req);
  }
  EXPORT_SYMBOL_GPL(blk_unprep_request);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2193
2194
2195
  /*
   * queue lock must be held
   */
2e60e0229   Tejun Heo   block: clean up r...
2196
  static void blk_finish_request(struct request *req, int error)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2197
  {
b8286239d   Kiyoshi Ueda   blk_end_request: ...
2198
2199
  	if (blk_rq_tagged(req))
  		blk_queue_end_tag(req->q, req);
ba396a6c1   James Bottomley   block: fix oops w...
2200
  	BUG_ON(blk_queued_rq(req));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2201

33659ebba   Christoph Hellwig   block: remove wra...
2202
  	if (unlikely(laptop_mode) && req->cmd_type == REQ_TYPE_FS)
31373d09d   Matthew Garrett   laptop-mode: Make...
2203
  		laptop_io_completion(&req->q->backing_dev_info);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2204

e78042e5b   Mike Anderson   blk: move blk_del...
2205
  	blk_delete_timer(req);
28018c242   James Bottomley   block: implement ...
2206
2207
  	if (req->cmd_flags & REQ_DONTPREP)
  		blk_unprep_request(req);
bc58ba946   Jens Axboe   block: add sysfs ...
2208
  	blk_account_io_done(req);
b8286239d   Kiyoshi Ueda   blk_end_request: ...
2209

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2210
  	if (req->end_io)
8ffdc6550   Tejun Heo   [BLOCK] add @upto...
2211
  		req->end_io(req, error);
b8286239d   Kiyoshi Ueda   blk_end_request: ...
2212
2213
2214
  	else {
  		if (blk_bidi_rq(req))
  			__blk_put_request(req->next_rq->q, req->next_rq);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2215
  		__blk_put_request(req->q, req);
b8286239d   Kiyoshi Ueda   blk_end_request: ...
2216
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2217
  }
3b11313a6   Kiyoshi Ueda   blk_end_request: ...
2218
  /**
2e60e0229   Tejun Heo   block: clean up r...
2219
2220
2221
2222
2223
   * blk_end_bidi_request - Complete a bidi request
   * @rq:         the request to complete
   * @error:      %0 for success, < %0 for error
   * @nr_bytes:   number of bytes to complete @rq
   * @bidi_bytes: number of bytes to complete @rq->next_rq
a0cd12854   Jens Axboe   block: add end_qu...
2224
2225
   *
   * Description:
e3a04fe34   Kiyoshi Ueda   blk_end_request: ...
2226
   *     Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
2e60e0229   Tejun Heo   block: clean up r...
2227
2228
2229
   *     Drivers that supports bidi can safely call this member for any
   *     type of request, bidi or uni.  In the later case @bidi_bytes is
   *     just ignored.
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2230
2231
   *
   * Return:
2e60e0229   Tejun Heo   block: clean up r...
2232
2233
   *     %false - we are done with this request
   *     %true  - still buffers pending for this request
a0cd12854   Jens Axboe   block: add end_qu...
2234
   **/
b1f744937   FUJITA Tomonori   block: move compl...
2235
  static bool blk_end_bidi_request(struct request *rq, int error,
32fab448e   Kiyoshi Ueda   block: add reques...
2236
2237
  				 unsigned int nr_bytes, unsigned int bidi_bytes)
  {
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2238
  	struct request_queue *q = rq->q;
2e60e0229   Tejun Heo   block: clean up r...
2239
  	unsigned long flags;
32fab448e   Kiyoshi Ueda   block: add reques...
2240

2e60e0229   Tejun Heo   block: clean up r...
2241
2242
  	if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
  		return true;
32fab448e   Kiyoshi Ueda   block: add reques...
2243

336cdb400   Kiyoshi Ueda   blk_end_request: ...
2244
  	spin_lock_irqsave(q->queue_lock, flags);
2e60e0229   Tejun Heo   block: clean up r...
2245
  	blk_finish_request(rq, error);
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2246
  	spin_unlock_irqrestore(q->queue_lock, flags);
2e60e0229   Tejun Heo   block: clean up r...
2247
  	return false;
32fab448e   Kiyoshi Ueda   block: add reques...
2248
  }
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2249
  /**
2e60e0229   Tejun Heo   block: clean up r...
2250
2251
   * __blk_end_bidi_request - Complete a bidi request with queue lock held
   * @rq:         the request to complete
710027a48   Randy Dunlap   Add some block/ s...
2252
   * @error:      %0 for success, < %0 for error
e3a04fe34   Kiyoshi Ueda   blk_end_request: ...
2253
2254
   * @nr_bytes:   number of bytes to complete @rq
   * @bidi_bytes: number of bytes to complete @rq->next_rq
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2255
2256
   *
   * Description:
2e60e0229   Tejun Heo   block: clean up r...
2257
2258
   *     Identical to blk_end_bidi_request() except that queue lock is
   *     assumed to be locked on entry and remains so on return.
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2259
2260
   *
   * Return:
2e60e0229   Tejun Heo   block: clean up r...
2261
2262
   *     %false - we are done with this request
   *     %true  - still buffers pending for this request
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2263
   **/
4853abaae   Jeff Moyer   block: fix flush ...
2264
  bool __blk_end_bidi_request(struct request *rq, int error,
b1f744937   FUJITA Tomonori   block: move compl...
2265
  				   unsigned int nr_bytes, unsigned int bidi_bytes)
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2266
  {
2e60e0229   Tejun Heo   block: clean up r...
2267
2268
  	if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
  		return true;
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2269

2e60e0229   Tejun Heo   block: clean up r...
2270
  	blk_finish_request(rq, error);
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2271

2e60e0229   Tejun Heo   block: clean up r...
2272
  	return false;
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2273
  }
e19a3ab05   Kiyoshi Ueda   blk_end_request: ...
2274
2275
2276
2277
  
  /**
   * blk_end_request - Helper function for drivers to complete the request.
   * @rq:       the request being processed
710027a48   Randy Dunlap   Add some block/ s...
2278
   * @error:    %0 for success, < %0 for error
e19a3ab05   Kiyoshi Ueda   blk_end_request: ...
2279
2280
2281
2282
2283
2284
2285
   * @nr_bytes: number of bytes to complete
   *
   * Description:
   *     Ends I/O on a number of bytes attached to @rq.
   *     If @rq has leftover, sets it up for the next range of segments.
   *
   * Return:
b1f744937   FUJITA Tomonori   block: move compl...
2286
2287
   *     %false - we are done with this request
   *     %true  - still buffers pending for this request
e19a3ab05   Kiyoshi Ueda   blk_end_request: ...
2288
   **/
b1f744937   FUJITA Tomonori   block: move compl...
2289
  bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
e19a3ab05   Kiyoshi Ueda   blk_end_request: ...
2290
  {
b1f744937   FUJITA Tomonori   block: move compl...
2291
  	return blk_end_bidi_request(rq, error, nr_bytes, 0);
e19a3ab05   Kiyoshi Ueda   blk_end_request: ...
2292
  }
56ad1740d   Jens Axboe   block: make the e...
2293
  EXPORT_SYMBOL(blk_end_request);
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2294
2295
  
  /**
b1f744937   FUJITA Tomonori   block: move compl...
2296
2297
   * blk_end_request_all - Helper function for drives to finish the request.
   * @rq: the request to finish
8ebf97560   Randy Dunlap   block: fix kernel...
2298
   * @error: %0 for success, < %0 for error
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2299
2300
   *
   * Description:
b1f744937   FUJITA Tomonori   block: move compl...
2301
2302
2303
   *     Completely finish @rq.
   */
  void blk_end_request_all(struct request *rq, int error)
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2304
  {
b1f744937   FUJITA Tomonori   block: move compl...
2305
2306
  	bool pending;
  	unsigned int bidi_bytes = 0;
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2307

b1f744937   FUJITA Tomonori   block: move compl...
2308
2309
  	if (unlikely(blk_bidi_rq(rq)))
  		bidi_bytes = blk_rq_bytes(rq->next_rq);
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2310

b1f744937   FUJITA Tomonori   block: move compl...
2311
2312
2313
  	pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
  	BUG_ON(pending);
  }
56ad1740d   Jens Axboe   block: make the e...
2314
  EXPORT_SYMBOL(blk_end_request_all);
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2315

b1f744937   FUJITA Tomonori   block: move compl...
2316
2317
2318
  /**
   * blk_end_request_cur - Helper function to finish the current request chunk.
   * @rq: the request to finish the current chunk for
8ebf97560   Randy Dunlap   block: fix kernel...
2319
   * @error: %0 for success, < %0 for error
b1f744937   FUJITA Tomonori   block: move compl...
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
   *
   * Description:
   *     Complete the current consecutively mapped chunk from @rq.
   *
   * Return:
   *     %false - we are done with this request
   *     %true  - still buffers pending for this request
   */
  bool blk_end_request_cur(struct request *rq, int error)
  {
  	return blk_end_request(rq, error, blk_rq_cur_bytes(rq));
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2331
  }
56ad1740d   Jens Axboe   block: make the e...
2332
  EXPORT_SYMBOL(blk_end_request_cur);
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2333

e19a3ab05   Kiyoshi Ueda   blk_end_request: ...
2334
  /**
80a761fd3   Tejun Heo   block: implement ...
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
   * blk_end_request_err - Finish a request till the next failure boundary.
   * @rq: the request to finish till the next failure boundary for
   * @error: must be negative errno
   *
   * Description:
   *     Complete @rq till the next failure boundary.
   *
   * Return:
   *     %false - we are done with this request
   *     %true  - still buffers pending for this request
   */
  bool blk_end_request_err(struct request *rq, int error)
  {
  	WARN_ON(error >= 0);
  	return blk_end_request(rq, error, blk_rq_err_bytes(rq));
  }
  EXPORT_SYMBOL_GPL(blk_end_request_err);
  
  /**
b1f744937   FUJITA Tomonori   block: move compl...
2354
2355
2356
2357
   * __blk_end_request - Helper function for drivers to complete the request.
   * @rq:       the request being processed
   * @error:    %0 for success, < %0 for error
   * @nr_bytes: number of bytes to complete
e3a04fe34   Kiyoshi Ueda   blk_end_request: ...
2358
2359
   *
   * Description:
b1f744937   FUJITA Tomonori   block: move compl...
2360
   *     Must be called with queue lock held unlike blk_end_request().
e3a04fe34   Kiyoshi Ueda   blk_end_request: ...
2361
2362
   *
   * Return:
b1f744937   FUJITA Tomonori   block: move compl...
2363
2364
   *     %false - we are done with this request
   *     %true  - still buffers pending for this request
e3a04fe34   Kiyoshi Ueda   blk_end_request: ...
2365
   **/
b1f744937   FUJITA Tomonori   block: move compl...
2366
  bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
e3a04fe34   Kiyoshi Ueda   blk_end_request: ...
2367
  {
b1f744937   FUJITA Tomonori   block: move compl...
2368
  	return __blk_end_bidi_request(rq, error, nr_bytes, 0);
e3a04fe34   Kiyoshi Ueda   blk_end_request: ...
2369
  }
56ad1740d   Jens Axboe   block: make the e...
2370
  EXPORT_SYMBOL(__blk_end_request);
e3a04fe34   Kiyoshi Ueda   blk_end_request: ...
2371
2372
  
  /**
b1f744937   FUJITA Tomonori   block: move compl...
2373
2374
   * __blk_end_request_all - Helper function for drives to finish the request.
   * @rq: the request to finish
8ebf97560   Randy Dunlap   block: fix kernel...
2375
   * @error: %0 for success, < %0 for error
32fab448e   Kiyoshi Ueda   block: add reques...
2376
2377
   *
   * Description:
b1f744937   FUJITA Tomonori   block: move compl...
2378
   *     Completely finish @rq.  Must be called with queue lock held.
32fab448e   Kiyoshi Ueda   block: add reques...
2379
   */
b1f744937   FUJITA Tomonori   block: move compl...
2380
  void __blk_end_request_all(struct request *rq, int error)
32fab448e   Kiyoshi Ueda   block: add reques...
2381
  {
b1f744937   FUJITA Tomonori   block: move compl...
2382
2383
2384
2385
2386
2387
2388
2389
  	bool pending;
  	unsigned int bidi_bytes = 0;
  
  	if (unlikely(blk_bidi_rq(rq)))
  		bidi_bytes = blk_rq_bytes(rq->next_rq);
  
  	pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
  	BUG_ON(pending);
32fab448e   Kiyoshi Ueda   block: add reques...
2390
  }
56ad1740d   Jens Axboe   block: make the e...
2391
  EXPORT_SYMBOL(__blk_end_request_all);
32fab448e   Kiyoshi Ueda   block: add reques...
2392
2393
  
  /**
b1f744937   FUJITA Tomonori   block: move compl...
2394
2395
   * __blk_end_request_cur - Helper function to finish the current request chunk.
   * @rq: the request to finish the current chunk for
8ebf97560   Randy Dunlap   block: fix kernel...
2396
   * @error: %0 for success, < %0 for error
e19a3ab05   Kiyoshi Ueda   blk_end_request: ...
2397
2398
   *
   * Description:
b1f744937   FUJITA Tomonori   block: move compl...
2399
2400
   *     Complete the current consecutively mapped chunk from @rq.  Must
   *     be called with queue lock held.
e19a3ab05   Kiyoshi Ueda   blk_end_request: ...
2401
2402
   *
   * Return:
b1f744937   FUJITA Tomonori   block: move compl...
2403
2404
2405
2406
   *     %false - we are done with this request
   *     %true  - still buffers pending for this request
   */
  bool __blk_end_request_cur(struct request *rq, int error)
e19a3ab05   Kiyoshi Ueda   blk_end_request: ...
2407
  {
b1f744937   FUJITA Tomonori   block: move compl...
2408
  	return __blk_end_request(rq, error, blk_rq_cur_bytes(rq));
e19a3ab05   Kiyoshi Ueda   blk_end_request: ...
2409
  }
56ad1740d   Jens Axboe   block: make the e...
2410
  EXPORT_SYMBOL(__blk_end_request_cur);
e19a3ab05   Kiyoshi Ueda   blk_end_request: ...
2411

80a761fd3   Tejun Heo   block: implement ...
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
  /**
   * __blk_end_request_err - Finish a request till the next failure boundary.
   * @rq: the request to finish till the next failure boundary for
   * @error: must be negative errno
   *
   * Description:
   *     Complete @rq till the next failure boundary.  Must be called
   *     with queue lock held.
   *
   * Return:
   *     %false - we are done with this request
   *     %true  - still buffers pending for this request
   */
  bool __blk_end_request_err(struct request *rq, int error)
  {
  	WARN_ON(error >= 0);
  	return __blk_end_request(rq, error, blk_rq_err_bytes(rq));
  }
  EXPORT_SYMBOL_GPL(__blk_end_request_err);
86db1e297   Jens Axboe   block: continue l...
2431
2432
  void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
  		     struct bio *bio)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2433
  {
a82afdfcb   Tejun Heo   block: use the sa...
2434
  	/* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */
7b6d91dae   Christoph Hellwig   block: unify flag...
2435
  	rq->cmd_flags |= bio->bi_rw & REQ_WRITE;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2436

fb2dce862   David Woodhouse   Add 'discard' req...
2437
2438
  	if (bio_has_data(bio)) {
  		rq->nr_phys_segments = bio_phys_segments(q, bio);
fb2dce862   David Woodhouse   Add 'discard' req...
2439
2440
  		rq->buffer = bio_data(bio);
  	}
a2dec7b36   Tejun Heo   block: hide reque...
2441
  	rq->__data_len = bio->bi_size;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2442
  	rq->bio = rq->biotail = bio;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2443

66846572b   NeilBrown   Stop exporting bl...
2444
2445
2446
  	if (bio->bi_bdev)
  		rq->rq_disk = bio->bi_bdev->bd_disk;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2447

2d4dc890b   Ilya Loginov   block: add helper...
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
  #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
  /**
   * rq_flush_dcache_pages - Helper function to flush all pages in a request
   * @rq: the request to be flushed
   *
   * Description:
   *     Flush all pages in @rq.
   */
  void rq_flush_dcache_pages(struct request *rq)
  {
  	struct req_iterator iter;
  	struct bio_vec *bvec;
  
  	rq_for_each_segment(bvec, rq, iter)
  		flush_dcache_page(bvec->bv_page);
  }
  EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
  #endif
ef9e3facd   Kiyoshi Ueda   block: add lld bu...
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
  /**
   * blk_lld_busy - Check if underlying low-level drivers of a device are busy
   * @q : the queue of the device being checked
   *
   * Description:
   *    Check if underlying low-level drivers of a device are busy.
   *    If the drivers want to export their busy state, they must set own
   *    exporting function using blk_queue_lld_busy() first.
   *
   *    Basically, this function is used only by request stacking drivers
   *    to stop dispatching requests to underlying devices when underlying
   *    devices are busy.  This behavior helps more I/O merging on the queue
   *    of the request stacking driver and prevents I/O throughput regression
   *    on burst I/O load.
   *
   * Return:
   *    0 - Not busy (The request stacking driver should dispatch request)
   *    1 - Busy (The request stacking driver should stop dispatching request)
   */
  int blk_lld_busy(struct request_queue *q)
  {
  	if (q->lld_busy_fn)
  		return q->lld_busy_fn(q);
  
  	return 0;
  }
  EXPORT_SYMBOL_GPL(blk_lld_busy);
b0fd271d5   Kiyoshi Ueda   block: add reques...
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
  /**
   * blk_rq_unprep_clone - Helper function to free all bios in a cloned request
   * @rq: the clone request to be cleaned up
   *
   * Description:
   *     Free all bios in @rq for a cloned request.
   */
  void blk_rq_unprep_clone(struct request *rq)
  {
  	struct bio *bio;
  
  	while ((bio = rq->bio) != NULL) {
  		rq->bio = bio->bi_next;
  
  		bio_put(bio);
  	}
  }
  EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
  
  /*
   * Copy attributes of the original request to the clone request.
   * The actual data parts (e.g. ->cmd, ->buffer, ->sense) are not copied.
   */
  static void __blk_rq_prep_clone(struct request *dst, struct request *src)
  {
  	dst->cpu = src->cpu;
3a2edd0d6   Tejun Heo   block: make __blk...
2519
  	dst->cmd_flags = (src->cmd_flags & REQ_CLONE_MASK) | REQ_NOMERGE;
b0fd271d5   Kiyoshi Ueda   block: add reques...
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
  	dst->cmd_type = src->cmd_type;
  	dst->__sector = blk_rq_pos(src);
  	dst->__data_len = blk_rq_bytes(src);
  	dst->nr_phys_segments = src->nr_phys_segments;
  	dst->ioprio = src->ioprio;
  	dst->extra_len = src->extra_len;
  }
  
  /**
   * blk_rq_prep_clone - Helper function to setup clone request
   * @rq: the request to be setup
   * @rq_src: original request to be cloned
   * @bs: bio_set that bios for clone are allocated from
   * @gfp_mask: memory allocation mask for bio
   * @bio_ctr: setup function to be called for each clone bio.
   *           Returns %0 for success, non %0 for failure.
   * @data: private data to be passed to @bio_ctr
   *
   * Description:
   *     Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
   *     The actual data parts of @rq_src (e.g. ->cmd, ->buffer, ->sense)
   *     are not copied, and copying such parts is the caller's responsibility.
   *     Also, pages which the original bios are pointing to are not copied
   *     and the cloned bios just point same pages.
   *     So cloned bios must be completed before original bios, which means
   *     the caller must complete @rq before @rq_src.
   */
  int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
  		      struct bio_set *bs, gfp_t gfp_mask,
  		      int (*bio_ctr)(struct bio *, struct bio *, void *),
  		      void *data)
  {
  	struct bio *bio, *bio_src;
  
  	if (!bs)
  		bs = fs_bio_set;
  
  	blk_rq_init(NULL, rq);
  
  	__rq_for_each_bio(bio_src, rq_src) {
  		bio = bio_alloc_bioset(gfp_mask, bio_src->bi_max_vecs, bs);
  		if (!bio)
  			goto free_and_out;
  
  		__bio_clone(bio, bio_src);
  
  		if (bio_integrity(bio_src) &&
7878cba9f   Martin K. Petersen   block: Create bip...
2567
  		    bio_integrity_clone(bio, bio_src, gfp_mask, bs))
b0fd271d5   Kiyoshi Ueda   block: add reques...
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
  			goto free_and_out;
  
  		if (bio_ctr && bio_ctr(bio, bio_src, data))
  			goto free_and_out;
  
  		if (rq->bio) {
  			rq->biotail->bi_next = bio;
  			rq->biotail = bio;
  		} else
  			rq->bio = rq->biotail = bio;
  	}
  
  	__blk_rq_prep_clone(rq, rq_src);
  
  	return 0;
  
  free_and_out:
  	if (bio)
  		bio_free(bio, bs);
  	blk_rq_unprep_clone(rq);
  
  	return -ENOMEM;
  }
  EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
18887ad91   Jens Axboe   block: make kbloc...
2592
  int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2593
2594
2595
  {
  	return queue_work(kblockd_workqueue, work);
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2596
  EXPORT_SYMBOL(kblockd_schedule_work);
e43473b7f   Vivek Goyal   blkio: Core imple...
2597
2598
2599
2600
2601
2602
  int kblockd_schedule_delayed_work(struct request_queue *q,
  			struct delayed_work *dwork, unsigned long delay)
  {
  	return queue_delayed_work(kblockd_workqueue, dwork, delay);
  }
  EXPORT_SYMBOL(kblockd_schedule_delayed_work);
73c101011   Jens Axboe   block: initial pa...
2603
  #define PLUG_MAGIC	0x91827364
75df71362   Suresh Jayaraman   block: document b...
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
  /**
   * blk_start_plug - initialize blk_plug and track it inside the task_struct
   * @plug:	The &struct blk_plug that needs to be initialized
   *
   * Description:
   *   Tracking blk_plug inside the task_struct will help with auto-flushing the
   *   pending I/O should the task end up blocking between blk_start_plug() and
   *   blk_finish_plug(). This is important from a performance perspective, but
   *   also ensures that we don't deadlock. For instance, if the task is blocking
   *   for a memory allocation, memory reclaim could end up wanting to free a
   *   page belonging to that request that is currently residing in our private
   *   plug. By flushing the pending I/O when the process goes to sleep, we avoid
   *   this kind of deadlock.
   */
73c101011   Jens Axboe   block: initial pa...
2618
2619
2620
2621
2622
2623
  void blk_start_plug(struct blk_plug *plug)
  {
  	struct task_struct *tsk = current;
  
  	plug->magic = PLUG_MAGIC;
  	INIT_LIST_HEAD(&plug->list);
048c9374a   NeilBrown   block: Enhance ne...
2624
  	INIT_LIST_HEAD(&plug->cb_list);
73c101011   Jens Axboe   block: initial pa...
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
  	plug->should_sort = 0;
  
  	/*
  	 * If this is a nested plug, don't actually assign it. It will be
  	 * flushed on its own.
  	 */
  	if (!tsk->plug) {
  		/*
  		 * Store ordering should not be needed here, since a potential
  		 * preempt will imply a full memory barrier
  		 */
  		tsk->plug = plug;
  	}
  }
  EXPORT_SYMBOL(blk_start_plug);
  
  static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
  {
  	struct request *rqa = container_of(a, struct request, queuelist);
  	struct request *rqb = container_of(b, struct request, queuelist);
f83e82618   Konstantin Khlebnikov   block: fix reques...
2645
  	return !(rqa->q <= rqb->q);
73c101011   Jens Axboe   block: initial pa...
2646
  }
49cac01e1   Jens Axboe   block: make unplu...
2647
2648
2649
2650
2651
2652
  /*
   * If 'from_schedule' is true, then postpone the dispatch of requests
   * until a safe kblockd context. We due this to avoid accidental big
   * additional stack usage in driver dispatch, in places where the originally
   * plugger did not intend it.
   */
f6603783f   Jens Axboe   block: only force...
2653
  static void queue_unplugged(struct request_queue *q, unsigned int depth,
49cac01e1   Jens Axboe   block: make unplu...
2654
  			    bool from_schedule)
99e22598e   Jens Axboe   block: drop queue...
2655
  	__releases(q->queue_lock)
94b5eb28b   Jens Axboe   block: fixup bloc...
2656
  {
49cac01e1   Jens Axboe   block: make unplu...
2657
  	trace_block_unplug(q, depth, !from_schedule);
99e22598e   Jens Axboe   block: drop queue...
2658
2659
  
  	/*
8ba61435d   Tejun Heo   block: add missin...
2660
2661
2662
2663
2664
2665
2666
2667
  	 * Don't mess with dead queue.
  	 */
  	if (unlikely(blk_queue_dead(q))) {
  		spin_unlock(q->queue_lock);
  		return;
  	}
  
  	/*
99e22598e   Jens Axboe   block: drop queue...
2668
2669
2670
2671
2672
2673
  	 * If we are punting this to kblockd, then we can safely drop
  	 * the queue_lock before waking kblockd (which needs to take
  	 * this lock).
  	 */
  	if (from_schedule) {
  		spin_unlock(q->queue_lock);
24ecfbe27   Christoph Hellwig   block: add blk_ru...
2674
  		blk_run_queue_async(q);
99e22598e   Jens Axboe   block: drop queue...
2675
  	} else {
24ecfbe27   Christoph Hellwig   block: add blk_ru...
2676
  		__blk_run_queue(q);
99e22598e   Jens Axboe   block: drop queue...
2677
2678
  		spin_unlock(q->queue_lock);
  	}
94b5eb28b   Jens Axboe   block: fixup bloc...
2679
  }
048c9374a   NeilBrown   block: Enhance ne...
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
  static void flush_plug_callbacks(struct blk_plug *plug)
  {
  	LIST_HEAD(callbacks);
  
  	if (list_empty(&plug->cb_list))
  		return;
  
  	list_splice_init(&plug->cb_list, &callbacks);
  
  	while (!list_empty(&callbacks)) {
  		struct blk_plug_cb *cb = list_first_entry(&callbacks,
  							  struct blk_plug_cb,
  							  list);
  		list_del(&cb->list);
  		cb->callback(cb);
  	}
  }
49cac01e1   Jens Axboe   block: make unplu...
2697
  void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
73c101011   Jens Axboe   block: initial pa...
2698
2699
2700
2701
  {
  	struct request_queue *q;
  	unsigned long flags;
  	struct request *rq;
109b81296   NeilBrown   block: splice plu...
2702
  	LIST_HEAD(list);
94b5eb28b   Jens Axboe   block: fixup bloc...
2703
  	unsigned int depth;
73c101011   Jens Axboe   block: initial pa...
2704
2705
  
  	BUG_ON(plug->magic != PLUG_MAGIC);
048c9374a   NeilBrown   block: Enhance ne...
2706
  	flush_plug_callbacks(plug);
73c101011   Jens Axboe   block: initial pa...
2707
2708
  	if (list_empty(&plug->list))
  		return;
109b81296   NeilBrown   block: splice plu...
2709
2710
2711
2712
2713
2714
  	list_splice_init(&plug->list, &list);
  
  	if (plug->should_sort) {
  		list_sort(NULL, &list, plug_rq_cmp);
  		plug->should_sort = 0;
  	}
73c101011   Jens Axboe   block: initial pa...
2715
2716
  
  	q = NULL;
94b5eb28b   Jens Axboe   block: fixup bloc...
2717
  	depth = 0;
188112722   Jens Axboe   block: add commen...
2718
2719
2720
2721
2722
  
  	/*
  	 * Save and disable interrupts here, to avoid doing it for every
  	 * queue lock we have to take.
  	 */
73c101011   Jens Axboe   block: initial pa...
2723
  	local_irq_save(flags);
109b81296   NeilBrown   block: splice plu...
2724
2725
  	while (!list_empty(&list)) {
  		rq = list_entry_rq(list.next);
73c101011   Jens Axboe   block: initial pa...
2726
  		list_del_init(&rq->queuelist);
73c101011   Jens Axboe   block: initial pa...
2727
2728
  		BUG_ON(!rq->q);
  		if (rq->q != q) {
99e22598e   Jens Axboe   block: drop queue...
2729
2730
2731
2732
  			/*
  			 * This drops the queue lock
  			 */
  			if (q)
49cac01e1   Jens Axboe   block: make unplu...
2733
  				queue_unplugged(q, depth, from_schedule);
73c101011   Jens Axboe   block: initial pa...
2734
  			q = rq->q;
94b5eb28b   Jens Axboe   block: fixup bloc...
2735
  			depth = 0;
73c101011   Jens Axboe   block: initial pa...
2736
2737
  			spin_lock(q->queue_lock);
  		}
8ba61435d   Tejun Heo   block: add missin...
2738
2739
2740
2741
2742
2743
2744
2745
  
  		/*
  		 * Short-circuit if @q is dead
  		 */
  		if (unlikely(blk_queue_dead(q))) {
  			__blk_end_request_all(rq, -ENODEV);
  			continue;
  		}
73c101011   Jens Axboe   block: initial pa...
2746
2747
2748
  		/*
  		 * rq is already accounted, so use raw insert
  		 */
401a18e92   Jens Axboe   block: fix bug wi...
2749
2750
2751
2752
  		if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA))
  			__elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH);
  		else
  			__elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE);
94b5eb28b   Jens Axboe   block: fixup bloc...
2753
2754
  
  		depth++;
73c101011   Jens Axboe   block: initial pa...
2755
  	}
99e22598e   Jens Axboe   block: drop queue...
2756
2757
2758
2759
  	/*
  	 * This drops the queue lock
  	 */
  	if (q)
49cac01e1   Jens Axboe   block: make unplu...
2760
  		queue_unplugged(q, depth, from_schedule);
73c101011   Jens Axboe   block: initial pa...
2761

73c101011   Jens Axboe   block: initial pa...
2762
2763
  	local_irq_restore(flags);
  }
73c101011   Jens Axboe   block: initial pa...
2764
2765
2766
  
  void blk_finish_plug(struct blk_plug *plug)
  {
f6603783f   Jens Axboe   block: only force...
2767
  	blk_flush_plug_list(plug, false);
73c101011   Jens Axboe   block: initial pa...
2768

88b996cd0   Christoph Hellwig   block: cleanup th...
2769
2770
  	if (plug == current->plug)
  		current->plug = NULL;
73c101011   Jens Axboe   block: initial pa...
2771
  }
88b996cd0   Christoph Hellwig   block: cleanup th...
2772
  EXPORT_SYMBOL(blk_finish_plug);
73c101011   Jens Axboe   block: initial pa...
2773

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2774
2775
  int __init blk_dev_init(void)
  {
9eb55b030   Nikanth Karthikesan   block: catch tryi...
2776
2777
  	BUILD_BUG_ON(__REQ_NR_BITS > 8 *
  			sizeof(((struct request *)0)->cmd_flags));
89b90be2d   Tejun Heo   block: make kbloc...
2778
2779
2780
  	/* used for unplugging and affects IO latency/throughput - HIGHPRI */
  	kblockd_workqueue = alloc_workqueue("kblockd",
  					    WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2781
2782
2783
2784
2785
  	if (!kblockd_workqueue)
  		panic("Failed to create kblockd
  ");
  
  	request_cachep = kmem_cache_create("blkdev_requests",
20c2df83d   Paul Mundt   mm: Remove slab d...
2786
  			sizeof(struct request), 0, SLAB_PANIC, NULL);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2787

8324aa91d   Jens Axboe   block: split tag ...
2788
  	blk_requestq_cachep = kmem_cache_create("blkdev_queue",
165125e1e   Jens Axboe   [BLOCK] Get rid o...
2789
  			sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2790

d38ecf935   Jens Axboe   io context sharin...
2791
  	return 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2792
  }