Blame view

block/blk-core.c 93.6 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2
3
4
5
   * Copyright (C) 1991, 1992 Linus Torvalds
   * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
   * Elevator latency, (C) 2000  Andrea Arcangeli <andrea@suse.de> SuSE
   * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
6728cb0e6   Jens Axboe   block: make core ...
6
7
   * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
   *	-  July2000
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
8
9
10
11
12
13
   * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
   */
  
  /*
   * This handles all read/write requests to block devices
   */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
14
15
16
17
18
  #include <linux/kernel.h>
  #include <linux/module.h>
  #include <linux/backing-dev.h>
  #include <linux/bio.h>
  #include <linux/blkdev.h>
320ae51fe   Jens Axboe   blk-mq: new multi...
19
  #include <linux/blk-mq.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
20
21
22
23
24
  #include <linux/highmem.h>
  #include <linux/mm.h>
  #include <linux/kernel_stat.h>
  #include <linux/string.h>
  #include <linux/init.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
25
26
27
28
  #include <linux/completion.h>
  #include <linux/slab.h>
  #include <linux/swap.h>
  #include <linux/writeback.h>
faccbd4b2   Andrew Morton   [PATCH] io-accoun...
29
  #include <linux/task_io_accounting_ops.h>
c17bb4951   Akinobu Mita   [PATCH] fault-inj...
30
  #include <linux/fault-inject.h>
73c101011   Jens Axboe   block: initial pa...
31
  #include <linux/list_sort.h>
e3c78ca52   Tejun Heo   block: reorganize...
32
  #include <linux/delay.h>
aaf7c6806   Tejun Heo   block: fix elvpri...
33
  #include <linux/ratelimit.h>
6c9546675   Lin Ming   block: add runtim...
34
  #include <linux/pm_runtime.h>
eea8f41cc   Tejun Heo   blkcg: move block...
35
  #include <linux/blk-cgroup.h>
55782138e   Li Zefan   tracing/events: c...
36
37
38
  
  #define CREATE_TRACE_POINTS
  #include <trace/events/block.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
39

8324aa91d   Jens Axboe   block: split tag ...
40
  #include "blk.h"
43a5e4e21   Ming Lei   block: blk-mq: su...
41
  #include "blk-mq.h"
8324aa91d   Jens Axboe   block: split tag ...
42

d07335e51   Mike Snitzer   block: Rename "bl...
43
  EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
b0da3f0da   Jun'ichi Nomura   Add a tracepoint ...
44
  EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
0a82a8d13   Linus Torvalds   Revert "block: ad...
45
  EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
3291fa57c   Keith Busch   NVMe: Add tracepo...
46
  EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
cbae8d45d   NeilBrown   block: export blo...
47
  EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
0bfc24559   Ingo Molnar   blktrace: port to...
48

a73f730d0   Tejun Heo   block, cfq: move ...
49
  DEFINE_IDA(blk_queue_ida);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
50
51
52
  /*
   * For the allocated request tables
   */
d674d4145   Wei Tang   block: do not ini...
53
  struct kmem_cache *request_cachep;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
54
55
56
57
  
  /*
   * For queue allocation
   */
6728cb0e6   Jens Axboe   block: make core ...
58
  struct kmem_cache *blk_requestq_cachep;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
59
60
  
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
61
62
   * Controlling structure to kblockd
   */
ff856bad6   Jens Axboe   [BLOCK] ll_rw_blk...
63
  static struct workqueue_struct *kblockd_workqueue;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
64

d40f75a06   Tejun Heo   writeback, blkcg:...
65
66
  static void blk_clear_congested(struct request_list *rl, int sync)
  {
d40f75a06   Tejun Heo   writeback, blkcg:...
67
68
69
  #ifdef CONFIG_CGROUP_WRITEBACK
  	clear_wb_congested(rl->blkg->wb_congested, sync);
  #else
482cf79cd   Tejun Heo   writeback, blkcg:...
70
71
72
73
74
75
  	/*
  	 * If !CGROUP_WRITEBACK, all blkg's map to bdi->wb and we shouldn't
  	 * flip its congestion state for events on other blkcgs.
  	 */
  	if (rl == &rl->q->root_rl)
  		clear_wb_congested(rl->q->backing_dev_info.wb.congested, sync);
d40f75a06   Tejun Heo   writeback, blkcg:...
76
77
78
79
80
  #endif
  }
  
  static void blk_set_congested(struct request_list *rl, int sync)
  {
d40f75a06   Tejun Heo   writeback, blkcg:...
81
82
83
  #ifdef CONFIG_CGROUP_WRITEBACK
  	set_wb_congested(rl->blkg->wb_congested, sync);
  #else
482cf79cd   Tejun Heo   writeback, blkcg:...
84
85
86
  	/* see blk_clear_congested() */
  	if (rl == &rl->q->root_rl)
  		set_wb_congested(rl->q->backing_dev_info.wb.congested, sync);
d40f75a06   Tejun Heo   writeback, blkcg:...
87
88
  #endif
  }
8324aa91d   Jens Axboe   block: split tag ...
89
  void blk_queue_congestion_threshold(struct request_queue *q)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
90
91
92
93
94
95
96
97
98
99
100
101
102
  {
  	int nr;
  
  	nr = q->nr_requests - (q->nr_requests / 8) + 1;
  	if (nr > q->nr_requests)
  		nr = q->nr_requests;
  	q->nr_congestion_on = nr;
  
  	nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1;
  	if (nr < 1)
  		nr = 1;
  	q->nr_congestion_off = nr;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
103
104
105
106
107
  /**
   * blk_get_backing_dev_info - get the address of a queue's backing_dev_info
   * @bdev:	device
   *
   * Locates the passed device's request queue and returns the address of its
ff9ea3238   Tejun Heo   block, bdi: an ac...
108
109
   * backing_dev_info.  This function can only be called if @bdev is opened
   * and the return value is never NULL.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
110
111
112
   */
  struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
  {
165125e1e   Jens Axboe   [BLOCK] Get rid o...
113
  	struct request_queue *q = bdev_get_queue(bdev);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
114

ff9ea3238   Tejun Heo   block, bdi: an ac...
115
  	return &q->backing_dev_info;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
116
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
117
  EXPORT_SYMBOL(blk_get_backing_dev_info);
2a4aa30c5   FUJITA Tomonori   block: rename and...
118
  void blk_rq_init(struct request_queue *q, struct request *rq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
119
  {
1afb20f30   FUJITA Tomonori   block: make rq_in...
120
  	memset(rq, 0, sizeof(*rq));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
121
  	INIT_LIST_HEAD(&rq->queuelist);
242f9dcb8   Jens Axboe   block: unify requ...
122
  	INIT_LIST_HEAD(&rq->timeout_list);
c7c22e4d5   Jens Axboe   block: add suppor...
123
  	rq->cpu = -1;
63a713867   Jens Axboe   block: fixup rq_i...
124
  	rq->q = q;
a2dec7b36   Tejun Heo   block: hide reque...
125
  	rq->__sector = (sector_t) -1;
2e662b65f   Jens Axboe   [PATCH] elevator:...
126
127
  	INIT_HLIST_NODE(&rq->hash);
  	RB_CLEAR_NODE(&rq->rb_node);
d7e3c3249   FUJITA Tomonori   block: add large ...
128
  	rq->cmd = rq->__cmd;
e2494e1b4   Li Zefan   blktrace: fix pdu...
129
  	rq->cmd_len = BLK_MAX_CDB;
63a713867   Jens Axboe   block: fixup rq_i...
130
  	rq->tag = -1;
b243ddcbe   Tejun Heo   block: move rq->s...
131
  	rq->start_time = jiffies;
9195291e5   Divyesh Shah   blkio: Increment ...
132
  	set_start_time_ns(rq);
09e099d4b   Jerome Marchand   block: fix accoun...
133
  	rq->part = NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
134
  }
2a4aa30c5   FUJITA Tomonori   block: rename and...
135
  EXPORT_SYMBOL(blk_rq_init);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
136

5bb23a688   NeilBrown   Don't decrement b...
137
138
  static void req_bio_endio(struct request *rq, struct bio *bio,
  			  unsigned int nbytes, int error)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
139
  {
78d8e58a0   Mike Snitzer   Revert "block, dm...
140
  	if (error)
4246a0b63   Christoph Hellwig   block: add a bi_e...
141
  		bio->bi_error = error;
797e7dbbe   Tejun Heo   [BLOCK] reimpleme...
142

143a87f4c   Tejun Heo   block: improve fl...
143
  	if (unlikely(rq->cmd_flags & REQ_QUIET))
b7c44ed9d   Jens Axboe   block: manipulate...
144
  		bio_set_flag(bio, BIO_QUIET);
08bafc034   Keith Mannthey   block: Supress Bu...
145

f79ea4161   Kent Overstreet   block: Refactor b...
146
  	bio_advance(bio, nbytes);
7ba1ba12e   Martin K. Petersen   block: Block laye...
147

143a87f4c   Tejun Heo   block: improve fl...
148
  	/* don't actually finish bio if it's part of flush sequence */
78d8e58a0   Mike Snitzer   Revert "block, dm...
149
  	if (bio->bi_iter.bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ))
4246a0b63   Christoph Hellwig   block: add a bi_e...
150
  		bio_endio(bio);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
151
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
152

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
153
154
155
  void blk_dump_rq_flags(struct request *rq, char *msg)
  {
  	int bit;
5953316db   Jens Axboe   block: make rq->c...
156
157
  	printk(KERN_INFO "%s: dev %s: type=%x, flags=%llx
  ", msg,
4aff5e233   Jens Axboe   [PATCH] Split str...
158
  		rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
5953316db   Jens Axboe   block: make rq->c...
159
  		(unsigned long long) rq->cmd_flags);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
160

83096ebf1   Tejun Heo   block: convert to...
161
162
163
164
  	printk(KERN_INFO "  sector %llu, nr/cnr %u/%u
  ",
  	       (unsigned long long)blk_rq_pos(rq),
  	       blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
b4f42e283   Jens Axboe   block: remove str...
165
166
167
  	printk(KERN_INFO "  bio %p, biotail %p, len %u
  ",
  	       rq->bio, rq->biotail, blk_rq_bytes(rq));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
168

33659ebba   Christoph Hellwig   block: remove wra...
169
  	if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
6728cb0e6   Jens Axboe   block: make core ...
170
  		printk(KERN_INFO "  cdb: ");
d34c87e4b   FUJITA Tomonori   block: replace si...
171
  		for (bit = 0; bit < BLK_MAX_CDB; bit++)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
172
173
174
175
176
  			printk("%02x ", rq->cmd[bit]);
  		printk("
  ");
  	}
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
177
  EXPORT_SYMBOL(blk_dump_rq_flags);
3cca6dc1c   Jens Axboe   block: add API fo...
178
  static void blk_delay_work(struct work_struct *work)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
179
  {
3cca6dc1c   Jens Axboe   block: add API fo...
180
  	struct request_queue *q;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
181

3cca6dc1c   Jens Axboe   block: add API fo...
182
183
  	q = container_of(work, struct request_queue, delay_work.work);
  	spin_lock_irq(q->queue_lock);
24ecfbe27   Christoph Hellwig   block: add blk_ru...
184
  	__blk_run_queue(q);
3cca6dc1c   Jens Axboe   block: add API fo...
185
  	spin_unlock_irq(q->queue_lock);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
186
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
187
188
  
  /**
3cca6dc1c   Jens Axboe   block: add API fo...
189
190
191
   * blk_delay_queue - restart queueing after defined interval
   * @q:		The &struct request_queue in question
   * @msecs:	Delay in msecs
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
192
193
   *
   * Description:
3cca6dc1c   Jens Axboe   block: add API fo...
194
195
   *   Sometimes queueing needs to be postponed for a little while, to allow
   *   resources to come back. This function will make sure that queueing is
704605711   Bart Van Assche   block: Avoid sche...
196
   *   restarted around the specified time. Queue lock must be held.
3cca6dc1c   Jens Axboe   block: add API fo...
197
198
   */
  void blk_delay_queue(struct request_queue *q, unsigned long msecs)
2ad8b1ef1   Alan D. Brunelle   Add UNPLUG traces...
199
  {
704605711   Bart Van Assche   block: Avoid sche...
200
201
202
  	if (likely(!blk_queue_dead(q)))
  		queue_delayed_work(kblockd_workqueue, &q->delay_work,
  				   msecs_to_jiffies(msecs));
2ad8b1ef1   Alan D. Brunelle   Add UNPLUG traces...
203
  }
3cca6dc1c   Jens Axboe   block: add API fo...
204
  EXPORT_SYMBOL(blk_delay_queue);
2ad8b1ef1   Alan D. Brunelle   Add UNPLUG traces...
205

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
206
207
  /**
   * blk_start_queue - restart a previously stopped queue
165125e1e   Jens Axboe   [BLOCK] Get rid o...
208
   * @q:    The &struct request_queue in question
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
209
210
211
212
213
214
   *
   * Description:
   *   blk_start_queue() will clear the stop flag on the queue, and call
   *   the request_fn for the queue if it was in a stopped state when
   *   entered. Also see blk_stop_queue(). Queue lock must be held.
   **/
165125e1e   Jens Axboe   [BLOCK] Get rid o...
215
  void blk_start_queue(struct request_queue *q)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
216
  {
a038e2536   Paolo 'Blaisorblade' Giarrusso   [PATCH] blk_start...
217
  	WARN_ON(!irqs_disabled());
75ad23bc0   Nick Piggin   block: make queue...
218
  	queue_flag_clear(QUEUE_FLAG_STOPPED, q);
24ecfbe27   Christoph Hellwig   block: add blk_ru...
219
  	__blk_run_queue(q);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
220
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
221
222
223
224
  EXPORT_SYMBOL(blk_start_queue);
  
  /**
   * blk_stop_queue - stop a queue
165125e1e   Jens Axboe   [BLOCK] Get rid o...
225
   * @q:    The &struct request_queue in question
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
226
227
228
229
230
231
232
233
234
235
236
   *
   * Description:
   *   The Linux block layer assumes that a block driver will consume all
   *   entries on the request queue when the request_fn strategy is called.
   *   Often this will not happen, because of hardware limitations (queue
   *   depth settings). If a device driver gets a 'queue full' response,
   *   or if it simply chooses not to queue more I/O at one point, it can
   *   call this function to prevent the request_fn from being called until
   *   the driver has signalled it's ready to go again. This happens by calling
   *   blk_start_queue() to restart queue operations. Queue lock must be held.
   **/
165125e1e   Jens Axboe   [BLOCK] Get rid o...
237
  void blk_stop_queue(struct request_queue *q)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
238
  {
136b5721d   Tejun Heo   workqueue: deprec...
239
  	cancel_delayed_work(&q->delay_work);
75ad23bc0   Nick Piggin   block: make queue...
240
  	queue_flag_set(QUEUE_FLAG_STOPPED, q);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
241
242
243
244
245
246
247
248
249
250
251
252
  }
  EXPORT_SYMBOL(blk_stop_queue);
  
  /**
   * blk_sync_queue - cancel any pending callbacks on a queue
   * @q: the queue
   *
   * Description:
   *     The block layer may perform asynchronous callback activity
   *     on a queue, such as calling the unplug function after a timeout.
   *     A block device may call blk_sync_queue to ensure that any
   *     such activity is cancelled, thus allowing it to release resources
59c51591a   Michael Opdenacker   Fix occurrences o...
253
   *     that the callbacks might use. The caller must already have made sure
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
254
255
256
   *     that its ->make_request_fn will not re-add plugging prior to calling
   *     this function.
   *
da5277700   Vivek Goyal   block: Move blk_t...
257
   *     This function does not cancel any asynchronous activity arising
da3dae54e   Masanari Iida   Documentation: Do...
258
   *     out of elevator or throttling code. That would require elevator_exit()
5efd61135   Tejun Heo   blkcg: add blkcg_...
259
   *     and blkcg_exit_queue() to be called with queue lock initialized.
da5277700   Vivek Goyal   block: Move blk_t...
260
   *
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
261
262
263
   */
  void blk_sync_queue(struct request_queue *q)
  {
70ed28b92   Jens Axboe   block: leave the ...
264
  	del_timer_sync(&q->timeout);
f04c1fe76   Ming Lei   block: blk-mq: ma...
265
266
267
268
  
  	if (q->mq_ops) {
  		struct blk_mq_hw_ctx *hctx;
  		int i;
70f4db639   Christoph Hellwig   blk-mq: add blk_m...
269
270
271
272
  		queue_for_each_hw_ctx(q, hctx, i) {
  			cancel_delayed_work_sync(&hctx->run_work);
  			cancel_delayed_work_sync(&hctx->delay_work);
  		}
f04c1fe76   Ming Lei   block: blk-mq: ma...
273
274
275
  	} else {
  		cancel_delayed_work_sync(&q->delay_work);
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
276
277
278
279
  }
  EXPORT_SYMBOL(blk_sync_queue);
  
  /**
c246e80d8   Bart Van Assche   block: Avoid that...
280
281
282
283
284
285
286
287
288
289
290
291
292
293
   * __blk_run_queue_uncond - run a queue whether or not it has been stopped
   * @q:	The queue to run
   *
   * Description:
   *    Invoke request handling on a queue if there are any pending requests.
   *    May be used to restart request handling after a request has completed.
   *    This variant runs the queue whether or not the queue has been
   *    stopped. Must be called with the queue lock held and interrupts
   *    disabled. See also @blk_run_queue.
   */
  inline void __blk_run_queue_uncond(struct request_queue *q)
  {
  	if (unlikely(blk_queue_dead(q)))
  		return;
24faf6f60   Bart Van Assche   block: Make blk_c...
294
295
296
297
298
299
300
301
  	/*
  	 * Some request_fn implementations, e.g. scsi_request_fn(), unlock
  	 * the queue lock internally. As a result multiple threads may be
  	 * running such a request function concurrently. Keep track of the
  	 * number of active request_fn invocations such that blk_drain_queue()
  	 * can wait until all these request_fn calls have finished.
  	 */
  	q->request_fn_active++;
c246e80d8   Bart Van Assche   block: Avoid that...
302
  	q->request_fn(q);
24faf6f60   Bart Van Assche   block: Make blk_c...
303
  	q->request_fn_active--;
c246e80d8   Bart Van Assche   block: Avoid that...
304
  }
a7928c157   Christoph Hellwig   block: move PM re...
305
  EXPORT_SYMBOL_GPL(__blk_run_queue_uncond);
c246e80d8   Bart Van Assche   block: Avoid that...
306
307
  
  /**
80a4b58e3   Jens Axboe   block: only call ...
308
   * __blk_run_queue - run a single device queue
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
309
   * @q:	The queue to run
80a4b58e3   Jens Axboe   block: only call ...
310
311
312
   *
   * Description:
   *    See @blk_run_queue. This variant must be called with the queue lock
24ecfbe27   Christoph Hellwig   block: add blk_ru...
313
   *    held and interrupts disabled.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
314
   */
24ecfbe27   Christoph Hellwig   block: add blk_ru...
315
  void __blk_run_queue(struct request_queue *q)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
316
  {
a538cd03b   Tejun Heo   block: merge blk_...
317
318
  	if (unlikely(blk_queue_stopped(q)))
  		return;
c246e80d8   Bart Van Assche   block: Avoid that...
319
  	__blk_run_queue_uncond(q);
75ad23bc0   Nick Piggin   block: make queue...
320
321
  }
  EXPORT_SYMBOL(__blk_run_queue);
dac07ec12   Jens Axboe   [BLOCK] limit req...
322

75ad23bc0   Nick Piggin   block: make queue...
323
  /**
24ecfbe27   Christoph Hellwig   block: add blk_ru...
324
325
326
327
328
   * blk_run_queue_async - run a single device queue in workqueue context
   * @q:	The queue to run
   *
   * Description:
   *    Tells kblockd to perform the equivalent of @blk_run_queue on behalf
704605711   Bart Van Assche   block: Avoid sche...
329
   *    of us. The caller must hold the queue lock.
24ecfbe27   Christoph Hellwig   block: add blk_ru...
330
331
332
   */
  void blk_run_queue_async(struct request_queue *q)
  {
704605711   Bart Van Assche   block: Avoid sche...
333
  	if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q)))
e7c2f9674   Tejun Heo   workqueue: use mo...
334
  		mod_delayed_work(kblockd_workqueue, &q->delay_work, 0);
24ecfbe27   Christoph Hellwig   block: add blk_ru...
335
  }
c21e6beba   Jens Axboe   block: get rid of...
336
  EXPORT_SYMBOL(blk_run_queue_async);
24ecfbe27   Christoph Hellwig   block: add blk_ru...
337
338
  
  /**
75ad23bc0   Nick Piggin   block: make queue...
339
340
   * blk_run_queue - run a single device queue
   * @q: The queue to run
80a4b58e3   Jens Axboe   block: only call ...
341
342
343
   *
   * Description:
   *    Invoke request handling on this queue, if it has pending work to do.
a7f557923   Tejun Heo   block: kill blk_s...
344
   *    May be used to restart queueing when a request has completed.
75ad23bc0   Nick Piggin   block: make queue...
345
346
347
348
349
350
   */
  void blk_run_queue(struct request_queue *q)
  {
  	unsigned long flags;
  
  	spin_lock_irqsave(q->queue_lock, flags);
24ecfbe27   Christoph Hellwig   block: add blk_ru...
351
  	__blk_run_queue(q);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
352
353
354
  	spin_unlock_irqrestore(q->queue_lock, flags);
  }
  EXPORT_SYMBOL(blk_run_queue);
165125e1e   Jens Axboe   [BLOCK] Get rid o...
355
  void blk_put_queue(struct request_queue *q)
483f4afc4   Al Viro   [PATCH] fix sysfs...
356
357
358
  {
  	kobject_put(&q->kobj);
  }
d86e0e83b   Jens Axboe   block: export blk...
359
  EXPORT_SYMBOL(blk_put_queue);
483f4afc4   Al Viro   [PATCH] fix sysfs...
360

e3c78ca52   Tejun Heo   block: reorganize...
361
  /**
807592a4f   Bart Van Assche   block: Let blk_dr...
362
   * __blk_drain_queue - drain requests from request_queue
e3c78ca52   Tejun Heo   block: reorganize...
363
   * @q: queue to drain
c9a929dde   Tejun Heo   block: fix reques...
364
   * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV
e3c78ca52   Tejun Heo   block: reorganize...
365
   *
c9a929dde   Tejun Heo   block: fix reques...
366
367
368
   * Drain requests from @q.  If @drain_all is set, all requests are drained.
   * If not, only ELVPRIV requests are drained.  The caller is responsible
   * for ensuring that no new requests which need to be drained are queued.
e3c78ca52   Tejun Heo   block: reorganize...
369
   */
807592a4f   Bart Van Assche   block: Let blk_dr...
370
371
372
  static void __blk_drain_queue(struct request_queue *q, bool drain_all)
  	__releases(q->queue_lock)
  	__acquires(q->queue_lock)
e3c78ca52   Tejun Heo   block: reorganize...
373
  {
458f27a98   Asias He   block: Avoid miss...
374
  	int i;
807592a4f   Bart Van Assche   block: Let blk_dr...
375
  	lockdep_assert_held(q->queue_lock);
e3c78ca52   Tejun Heo   block: reorganize...
376
  	while (true) {
481a7d647   Tejun Heo   block: fix drain_...
377
  		bool drain = false;
e3c78ca52   Tejun Heo   block: reorganize...
378

b855b04a0   Tejun Heo   block: blk-thrott...
379
380
381
382
383
384
  		/*
  		 * The caller might be trying to drain @q before its
  		 * elevator is initialized.
  		 */
  		if (q->elevator)
  			elv_drain_elevator(q);
5efd61135   Tejun Heo   blkcg: add blkcg_...
385
  		blkcg_drain_queue(q);
e3c78ca52   Tejun Heo   block: reorganize...
386

4eabc9412   Tejun Heo   block: don't kick...
387
388
  		/*
  		 * This function might be called on a queue which failed
b855b04a0   Tejun Heo   block: blk-thrott...
389
390
391
392
  		 * driver init after queue creation or is not yet fully
  		 * active yet.  Some drivers (e.g. fd and loop) get unhappy
  		 * in such cases.  Kick queue iff dispatch queue has
  		 * something on it and @q has request_fn set.
4eabc9412   Tejun Heo   block: don't kick...
393
  		 */
b855b04a0   Tejun Heo   block: blk-thrott...
394
  		if (!list_empty(&q->queue_head) && q->request_fn)
4eabc9412   Tejun Heo   block: don't kick...
395
  			__blk_run_queue(q);
c9a929dde   Tejun Heo   block: fix reques...
396

8a5ecdd42   Tejun Heo   block: add q->nr_...
397
  		drain |= q->nr_rqs_elvpriv;
24faf6f60   Bart Van Assche   block: Make blk_c...
398
  		drain |= q->request_fn_active;
481a7d647   Tejun Heo   block: fix drain_...
399
400
401
402
403
404
405
  
  		/*
  		 * Unfortunately, requests are queued at and tracked from
  		 * multiple places and there's no single counter which can
  		 * be drained.  Check all the queues and counters.
  		 */
  		if (drain_all) {
e97c293cd   Ming Lei   block: introduce ...
406
  			struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
481a7d647   Tejun Heo   block: fix drain_...
407
408
  			drain |= !list_empty(&q->queue_head);
  			for (i = 0; i < 2; i++) {
8a5ecdd42   Tejun Heo   block: add q->nr_...
409
  				drain |= q->nr_rqs[i];
481a7d647   Tejun Heo   block: fix drain_...
410
  				drain |= q->in_flight[i];
7c94e1c15   Ming Lei   block: introduce ...
411
412
  				if (fq)
  				    drain |= !list_empty(&fq->flush_queue[i]);
481a7d647   Tejun Heo   block: fix drain_...
413
414
  			}
  		}
e3c78ca52   Tejun Heo   block: reorganize...
415

481a7d647   Tejun Heo   block: fix drain_...
416
  		if (!drain)
e3c78ca52   Tejun Heo   block: reorganize...
417
  			break;
807592a4f   Bart Van Assche   block: Let blk_dr...
418
419
  
  		spin_unlock_irq(q->queue_lock);
e3c78ca52   Tejun Heo   block: reorganize...
420
  		msleep(10);
807592a4f   Bart Van Assche   block: Let blk_dr...
421
422
  
  		spin_lock_irq(q->queue_lock);
e3c78ca52   Tejun Heo   block: reorganize...
423
  	}
458f27a98   Asias He   block: Avoid miss...
424
425
426
427
428
429
430
  
  	/*
  	 * With queue marked dead, any woken up waiter will fail the
  	 * allocation path, so the wakeup chaining is lost and we're
  	 * left with hung waiters. We need to wake up those waiters.
  	 */
  	if (q->request_fn) {
a051661ca   Tejun Heo   blkcg: implement ...
431
  		struct request_list *rl;
a051661ca   Tejun Heo   blkcg: implement ...
432
433
434
  		blk_queue_for_each_rl(rl, q)
  			for (i = 0; i < ARRAY_SIZE(rl->wait); i++)
  				wake_up_all(&rl->wait[i]);
458f27a98   Asias He   block: Avoid miss...
435
  	}
e3c78ca52   Tejun Heo   block: reorganize...
436
  }
c9a929dde   Tejun Heo   block: fix reques...
437
  /**
d732580b4   Tejun Heo   block: implement ...
438
439
440
441
442
   * blk_queue_bypass_start - enter queue bypass mode
   * @q: queue of interest
   *
   * In bypass mode, only the dispatch FIFO queue of @q is used.  This
   * function makes @q enter bypass mode and drains all requests which were
6ecf23afa   Tejun Heo   block: extend que...
443
   * throttled or issued before.  On return, it's guaranteed that no request
80fd99792   Tejun Heo   blkcg: make sure ...
444
445
   * is being throttled or has ELVPRIV set and blk_queue_bypass() %true
   * inside queue or RCU read lock.
d732580b4   Tejun Heo   block: implement ...
446
447
448
449
   */
  void blk_queue_bypass_start(struct request_queue *q)
  {
  	spin_lock_irq(q->queue_lock);
776687bce   Tejun Heo   block, blk-mq: dr...
450
  	q->bypass_depth++;
d732580b4   Tejun Heo   block: implement ...
451
452
  	queue_flag_set(QUEUE_FLAG_BYPASS, q);
  	spin_unlock_irq(q->queue_lock);
776687bce   Tejun Heo   block, blk-mq: dr...
453
454
455
456
457
458
  	/*
  	 * Queues start drained.  Skip actual draining till init is
  	 * complete.  This avoids lenghty delays during queue init which
  	 * can happen many times during boot.
  	 */
  	if (blk_queue_init_done(q)) {
807592a4f   Bart Van Assche   block: Let blk_dr...
459
460
461
  		spin_lock_irq(q->queue_lock);
  		__blk_drain_queue(q, false);
  		spin_unlock_irq(q->queue_lock);
b82d4b197   Tejun Heo   blkcg: make reque...
462
463
464
  		/* ensure blk_queue_bypass() is %true inside RCU read lock */
  		synchronize_rcu();
  	}
d732580b4   Tejun Heo   block: implement ...
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
  }
  EXPORT_SYMBOL_GPL(blk_queue_bypass_start);
  
  /**
   * blk_queue_bypass_end - leave queue bypass mode
   * @q: queue of interest
   *
   * Leave bypass mode and restore the normal queueing behavior.
   */
  void blk_queue_bypass_end(struct request_queue *q)
  {
  	spin_lock_irq(q->queue_lock);
  	if (!--q->bypass_depth)
  		queue_flag_clear(QUEUE_FLAG_BYPASS, q);
  	WARN_ON_ONCE(q->bypass_depth < 0);
  	spin_unlock_irq(q->queue_lock);
  }
  EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
aed3ea94b   Jens Axboe   block: wake up wa...
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
  void blk_set_queue_dying(struct request_queue *q)
  {
  	queue_flag_set_unlocked(QUEUE_FLAG_DYING, q);
  
  	if (q->mq_ops)
  		blk_mq_wake_waiters(q);
  	else {
  		struct request_list *rl;
  
  		blk_queue_for_each_rl(rl, q) {
  			if (rl->rq_pool) {
  				wake_up(&rl->wait[BLK_RW_SYNC]);
  				wake_up(&rl->wait[BLK_RW_ASYNC]);
  			}
  		}
  	}
  }
  EXPORT_SYMBOL_GPL(blk_set_queue_dying);
d732580b4   Tejun Heo   block: implement ...
501
  /**
c9a929dde   Tejun Heo   block: fix reques...
502
503
504
   * blk_cleanup_queue - shutdown a request queue
   * @q: request queue to shutdown
   *
c246e80d8   Bart Van Assche   block: Avoid that...
505
506
   * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and
   * put it.  All future requests will be failed immediately with -ENODEV.
c94a96ac9   Vivek Goyal   block: Initialize...
507
   */
6728cb0e6   Jens Axboe   block: make core ...
508
  void blk_cleanup_queue(struct request_queue *q)
483f4afc4   Al Viro   [PATCH] fix sysfs...
509
  {
c9a929dde   Tejun Heo   block: fix reques...
510
  	spinlock_t *lock = q->queue_lock;
e3335de94   Jens Axboe   block: blk_cleanu...
511

3f3299d5c   Bart Van Assche   block: Rename que...
512
  	/* mark @q DYING, no new request or merges will be allowed afterwards */
483f4afc4   Al Viro   [PATCH] fix sysfs...
513
  	mutex_lock(&q->sysfs_lock);
aed3ea94b   Jens Axboe   block: wake up wa...
514
  	blk_set_queue_dying(q);
c9a929dde   Tejun Heo   block: fix reques...
515
  	spin_lock_irq(lock);
6ecf23afa   Tejun Heo   block: extend que...
516

80fd99792   Tejun Heo   blkcg: make sure ...
517
  	/*
3f3299d5c   Bart Van Assche   block: Rename que...
518
  	 * A dying queue is permanently in bypass mode till released.  Note
80fd99792   Tejun Heo   blkcg: make sure ...
519
520
521
522
523
524
525
  	 * that, unlike blk_queue_bypass_start(), we aren't performing
  	 * synchronize_rcu() after entering bypass mode to avoid the delay
  	 * as some drivers create and destroy a lot of queues while
  	 * probing.  This is still safe because blk_release_queue() will be
  	 * called only after the queue refcnt drops to zero and nothing,
  	 * RCU or not, would be traversing the queue by then.
  	 */
6ecf23afa   Tejun Heo   block: extend que...
526
527
  	q->bypass_depth++;
  	queue_flag_set(QUEUE_FLAG_BYPASS, q);
c9a929dde   Tejun Heo   block: fix reques...
528
529
  	queue_flag_set(QUEUE_FLAG_NOMERGES, q);
  	queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
3f3299d5c   Bart Van Assche   block: Rename que...
530
  	queue_flag_set(QUEUE_FLAG_DYING, q);
c9a929dde   Tejun Heo   block: fix reques...
531
532
  	spin_unlock_irq(lock);
  	mutex_unlock(&q->sysfs_lock);
c246e80d8   Bart Van Assche   block: Avoid that...
533
534
535
536
  	/*
  	 * Drain all requests queued before DYING marking. Set DEAD flag to
  	 * prevent that q->request_fn() gets invoked after draining finished.
  	 */
3ef28e83a   Dan Williams   block: generic re...
537
538
539
  	blk_freeze_queue(q);
  	spin_lock_irq(lock);
  	if (!q->mq_ops)
43a5e4e21   Ming Lei   block: blk-mq: su...
540
  		__blk_drain_queue(q, true);
c246e80d8   Bart Van Assche   block: Avoid that...
541
  	queue_flag_set(QUEUE_FLAG_DEAD, q);
807592a4f   Bart Van Assche   block: Let blk_dr...
542
  	spin_unlock_irq(lock);
c9a929dde   Tejun Heo   block: fix reques...
543

5a48fc147   Dan Williams   block: blk_flush_...
544
545
  	/* for synchronous bio-based driver finish in-flight integrity i/o */
  	blk_flush_integrity();
c9a929dde   Tejun Heo   block: fix reques...
546
547
548
  	/* @q won't process any more request, flush async actions */
  	del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
  	blk_sync_queue(q);
45a9c9d90   Bart Van Assche   blk-mq: Fix a use...
549
550
  	if (q->mq_ops)
  		blk_mq_free_queue(q);
3ef28e83a   Dan Williams   block: generic re...
551
  	percpu_ref_exit(&q->q_usage_counter);
45a9c9d90   Bart Van Assche   blk-mq: Fix a use...
552

5e5cfac0c   Asias He   block: Mitigate l...
553
554
555
556
  	spin_lock_irq(lock);
  	if (q->queue_lock != &q->__queue_lock)
  		q->queue_lock = &q->__queue_lock;
  	spin_unlock_irq(lock);
b02176f30   Tejun Heo   block: don't rele...
557
  	bdi_unregister(&q->backing_dev_info);
6cd18e711   NeilBrown   block: destroy bd...
558

c9a929dde   Tejun Heo   block: fix reques...
559
  	/* @q is and will stay empty, shutdown and put */
483f4afc4   Al Viro   [PATCH] fix sysfs...
560
561
  	blk_put_queue(q);
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
562
  EXPORT_SYMBOL(blk_cleanup_queue);
271508dba   David Rientjes   block: allocate r...
563
564
565
566
567
568
569
570
571
572
573
  /* Allocate memory local to the request queue */
  static void *alloc_request_struct(gfp_t gfp_mask, void *data)
  {
  	int nid = (int)(long)data;
  	return kmem_cache_alloc_node(request_cachep, gfp_mask, nid);
  }
  
  static void free_request_struct(void *element, void *unused)
  {
  	kmem_cache_free(request_cachep, element);
  }
5b788ce3e   Tejun Heo   block: prepare fo...
574
575
  int blk_init_rl(struct request_list *rl, struct request_queue *q,
  		gfp_t gfp_mask)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
576
  {
1abec4fdb   Mike Snitzer   block: make blk_i...
577
578
  	if (unlikely(rl->rq_pool))
  		return 0;
5b788ce3e   Tejun Heo   block: prepare fo...
579
  	rl->q = q;
1faa16d22   Jens Axboe   block: change the...
580
581
  	rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0;
  	rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0;
1faa16d22   Jens Axboe   block: change the...
582
583
  	init_waitqueue_head(&rl->wait[BLK_RW_SYNC]);
  	init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
584

271508dba   David Rientjes   block: allocate r...
585
586
587
588
  	rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, alloc_request_struct,
  					  free_request_struct,
  					  (void *)(long)q->node, gfp_mask,
  					  q->node);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
589
590
591
592
593
  	if (!rl->rq_pool)
  		return -ENOMEM;
  
  	return 0;
  }
5b788ce3e   Tejun Heo   block: prepare fo...
594
595
596
597
598
  void blk_exit_rl(struct request_list *rl)
  {
  	if (rl->rq_pool)
  		mempool_destroy(rl->rq_pool);
  }
165125e1e   Jens Axboe   [BLOCK] Get rid o...
599
  struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
600
  {
c304a51bf   Ezequiel Garcia   block: use NUMA_N...
601
  	return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE);
1946089a1   Christoph Lameter   [PATCH] NUMA awar...
602
603
  }
  EXPORT_SYMBOL(blk_alloc_queue);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
604

6f3b0e8bc   Christoph Hellwig   blk-mq: add a fla...
605
  int blk_queue_enter(struct request_queue *q, bool nowait)
3ef28e83a   Dan Williams   block: generic re...
606
607
608
609
610
611
  {
  	while (true) {
  		int ret;
  
  		if (percpu_ref_tryget_live(&q->q_usage_counter))
  			return 0;
6f3b0e8bc   Christoph Hellwig   blk-mq: add a fla...
612
  		if (nowait)
3ef28e83a   Dan Williams   block: generic re...
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
  			return -EBUSY;
  
  		ret = wait_event_interruptible(q->mq_freeze_wq,
  				!atomic_read(&q->mq_freeze_depth) ||
  				blk_queue_dying(q));
  		if (blk_queue_dying(q))
  			return -ENODEV;
  		if (ret)
  			return ret;
  	}
  }
  
  void blk_queue_exit(struct request_queue *q)
  {
  	percpu_ref_put(&q->q_usage_counter);
  }
  
  static void blk_queue_usage_counter_release(struct percpu_ref *ref)
  {
  	struct request_queue *q =
  		container_of(ref, struct request_queue, q_usage_counter);
  
  	wake_up_all(&q->mq_freeze_wq);
  }
287922eb0   Christoph Hellwig   block: defer time...
637
638
639
640
641
642
  static void blk_rq_timed_out_timer(unsigned long data)
  {
  	struct request_queue *q = (struct request_queue *)data;
  
  	kblockd_schedule_work(&q->timeout_work);
  }
165125e1e   Jens Axboe   [BLOCK] Get rid o...
643
  struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
1946089a1   Christoph Lameter   [PATCH] NUMA awar...
644
  {
165125e1e   Jens Axboe   [BLOCK] Get rid o...
645
  	struct request_queue *q;
e0bf68dde   Peter Zijlstra   mm: bdi init hooks
646
  	int err;
1946089a1   Christoph Lameter   [PATCH] NUMA awar...
647

8324aa91d   Jens Axboe   block: split tag ...
648
  	q = kmem_cache_alloc_node(blk_requestq_cachep,
94f6030ca   Christoph Lameter   Slab allocators: ...
649
  				gfp_mask | __GFP_ZERO, node_id);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
650
651
  	if (!q)
  		return NULL;
00380a404   Dan Carpenter   block: blk_alloc_...
652
  	q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask);
a73f730d0   Tejun Heo   block, cfq: move ...
653
  	if (q->id < 0)
3d2936f45   Ming Lei   block: only alloc...
654
  		goto fail_q;
a73f730d0   Tejun Heo   block, cfq: move ...
655

54efd50bf   Kent Overstreet   block: make gener...
656
657
658
  	q->bio_split = bioset_create(BIO_POOL_SIZE, 0);
  	if (!q->bio_split)
  		goto fail_id;
0989a025d   Jens Axboe   block: don't over...
659
660
  	q->backing_dev_info.ra_pages =
  			(VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
89e9b9e07   Tejun Heo   writeback: add {C...
661
  	q->backing_dev_info.capabilities = BDI_CAP_CGROUP_WRITEBACK;
d993831fa   Jens Axboe   writeback: add na...
662
  	q->backing_dev_info.name = "block";
5151412dd   Mike Snitzer   block: initialize...
663
  	q->node = node_id;
0989a025d   Jens Axboe   block: don't over...
664

e0bf68dde   Peter Zijlstra   mm: bdi init hooks
665
  	err = bdi_init(&q->backing_dev_info);
a73f730d0   Tejun Heo   block, cfq: move ...
666
  	if (err)
54efd50bf   Kent Overstreet   block: make gener...
667
  		goto fail_split;
e0bf68dde   Peter Zijlstra   mm: bdi init hooks
668

31373d09d   Matthew Garrett   laptop-mode: Make...
669
670
  	setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
  		    laptop_mode_timer_fn, (unsigned long) q);
242f9dcb8   Jens Axboe   block: unify requ...
671
  	setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
b855b04a0   Tejun Heo   block: blk-thrott...
672
  	INIT_LIST_HEAD(&q->queue_head);
242f9dcb8   Jens Axboe   block: unify requ...
673
  	INIT_LIST_HEAD(&q->timeout_list);
a612fddf0   Tejun Heo   block, cfq: move ...
674
  	INIT_LIST_HEAD(&q->icq_list);
4eef30499   Tejun Heo   blkcg: move per-q...
675
  #ifdef CONFIG_BLK_CGROUP
e8989fae3   Tejun Heo   blkcg: unify blkg...
676
  	INIT_LIST_HEAD(&q->blkg_list);
4eef30499   Tejun Heo   blkcg: move per-q...
677
  #endif
3cca6dc1c   Jens Axboe   block: add API fo...
678
  	INIT_DELAYED_WORK(&q->delay_work, blk_delay_work);
483f4afc4   Al Viro   [PATCH] fix sysfs...
679

8324aa91d   Jens Axboe   block: split tag ...
680
  	kobject_init(&q->kobj, &blk_queue_ktype);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
681

483f4afc4   Al Viro   [PATCH] fix sysfs...
682
  	mutex_init(&q->sysfs_lock);
e7e72bf64   Neil Brown   Remove blkdev war...
683
  	spin_lock_init(&q->__queue_lock);
483f4afc4   Al Viro   [PATCH] fix sysfs...
684

c94a96ac9   Vivek Goyal   block: Initialize...
685
686
687
688
689
  	/*
  	 * By default initialize queue_lock to internal lock and driver can
  	 * override it later if need be.
  	 */
  	q->queue_lock = &q->__queue_lock;
b82d4b197   Tejun Heo   blkcg: make reque...
690
691
692
  	/*
  	 * A queue starts its life with bypass turned on to avoid
  	 * unnecessary bypass on/off overhead and nasty surprises during
749fefe67   Tejun Heo   block: lift the i...
693
694
  	 * init.  The initial bypass will be finished when the queue is
  	 * registered by blk_register_queue().
b82d4b197   Tejun Heo   blkcg: make reque...
695
696
697
  	 */
  	q->bypass_depth = 1;
  	__set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
320ae51fe   Jens Axboe   blk-mq: new multi...
698
  	init_waitqueue_head(&q->mq_freeze_wq);
3ef28e83a   Dan Williams   block: generic re...
699
700
701
702
703
704
705
  	/*
  	 * Init percpu_ref in atomic mode so that it's faster to shutdown.
  	 * See blk_register_queue() for details.
  	 */
  	if (percpu_ref_init(&q->q_usage_counter,
  				blk_queue_usage_counter_release,
  				PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
fff4996b7   Mikulas Patocka   blk-core: Fix mem...
706
  		goto fail_bdi;
f51b802c1   Tejun Heo   blkcg: use the us...
707

3ef28e83a   Dan Williams   block: generic re...
708
709
  	if (blkcg_init_queue(q))
  		goto fail_ref;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
710
  	return q;
a73f730d0   Tejun Heo   block, cfq: move ...
711

3ef28e83a   Dan Williams   block: generic re...
712
713
  fail_ref:
  	percpu_ref_exit(&q->q_usage_counter);
fff4996b7   Mikulas Patocka   blk-core: Fix mem...
714
715
  fail_bdi:
  	bdi_destroy(&q->backing_dev_info);
54efd50bf   Kent Overstreet   block: make gener...
716
717
  fail_split:
  	bioset_free(q->bio_split);
a73f730d0   Tejun Heo   block, cfq: move ...
718
719
720
721
722
  fail_id:
  	ida_simple_remove(&blk_queue_ida, q->id);
  fail_q:
  	kmem_cache_free(blk_requestq_cachep, q);
  	return NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
723
  }
1946089a1   Christoph Lameter   [PATCH] NUMA awar...
724
  EXPORT_SYMBOL(blk_alloc_queue_node);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
  
  /**
   * blk_init_queue  - prepare a request queue for use with a block device
   * @rfn:  The function to be called to process requests that have been
   *        placed on the queue.
   * @lock: Request queue spin lock
   *
   * Description:
   *    If a block device wishes to use the standard request handling procedures,
   *    which sorts requests and coalesces adjacent requests, then it must
   *    call blk_init_queue().  The function @rfn will be called when there
   *    are requests on the queue that need to be processed.  If the device
   *    supports plugging, then @rfn may not be called immediately when requests
   *    are available on the queue, but may be called at some time later instead.
   *    Plugged queues are generally unplugged when a buffer belonging to one
   *    of the requests on the queue is needed, or due to memory pressure.
   *
   *    @rfn is not required, or even expected, to remove all requests off the
   *    queue, but only as many as it can handle at a time.  If it does leave
   *    requests on the queue, it is responsible for arranging that the requests
   *    get dealt with eventually.
   *
   *    The queue spin lock must be held while manipulating the requests on the
a038e2536   Paolo 'Blaisorblade' Giarrusso   [PATCH] blk_start...
748
749
   *    request queue; this lock will be taken also from interrupt context, so irq
   *    disabling is needed for it.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
750
   *
710027a48   Randy Dunlap   Add some block/ s...
751
   *    Function returns a pointer to the initialized request queue, or %NULL if
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
752
753
754
755
756
757
   *    it didn't succeed.
   *
   * Note:
   *    blk_init_queue() must be paired with a blk_cleanup_queue() call
   *    when the block device is deactivated (such as at module unload).
   **/
1946089a1   Christoph Lameter   [PATCH] NUMA awar...
758

165125e1e   Jens Axboe   [BLOCK] Get rid o...
759
  struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
760
  {
c304a51bf   Ezequiel Garcia   block: use NUMA_N...
761
  	return blk_init_queue_node(rfn, lock, NUMA_NO_NODE);
1946089a1   Christoph Lameter   [PATCH] NUMA awar...
762
763
  }
  EXPORT_SYMBOL(blk_init_queue);
165125e1e   Jens Axboe   [BLOCK] Get rid o...
764
  struct request_queue *
1946089a1   Christoph Lameter   [PATCH] NUMA awar...
765
766
  blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
  {
c86d1b8ae   Mike Snitzer   block: avoid unco...
767
  	struct request_queue *uninit_q, *q;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
768

c86d1b8ae   Mike Snitzer   block: avoid unco...
769
770
771
  	uninit_q = blk_alloc_queue_node(GFP_KERNEL, node_id);
  	if (!uninit_q)
  		return NULL;
5151412dd   Mike Snitzer   block: initialize...
772
  	q = blk_init_allocated_queue(uninit_q, rfn, lock);
c86d1b8ae   Mike Snitzer   block: avoid unco...
773
  	if (!q)
7982e90c3   Mike Snitzer   block: fix q->flu...
774
  		blk_cleanup_queue(uninit_q);
18741986a   Christoph Hellwig   blk-mq: rework fl...
775

7982e90c3   Mike Snitzer   block: fix q->flu...
776
  	return q;
01effb0dc   Mike Snitzer   block: allow init...
777
778
  }
  EXPORT_SYMBOL(blk_init_queue_node);
dece16353   Jens Axboe   block: change ->m...
779
  static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio);
336b7e1f2   Mike Snitzer   block: remove exp...
780

01effb0dc   Mike Snitzer   block: allow init...
781
782
783
784
  struct request_queue *
  blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
  			 spinlock_t *lock)
  {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
785
786
  	if (!q)
  		return NULL;
f70ced091   Ming Lei   blk-mq: support p...
787
  	q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, 0);
ba483388e   Ming Lei   block: remove blk...
788
  	if (!q->fq)
7982e90c3   Mike Snitzer   block: fix q->flu...
789
  		return NULL;
a051661ca   Tejun Heo   blkcg: implement ...
790
  	if (blk_init_rl(&q->root_rl, q, GFP_KERNEL))
708f04d2a   Dave Jones   block: free q->fl...
791
  		goto fail;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
792

287922eb0   Christoph Hellwig   block: defer time...
793
  	INIT_WORK(&q->timeout_work, blk_timeout_work);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
794
  	q->request_fn		= rfn;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
795
  	q->prep_rq_fn		= NULL;
28018c242   James Bottomley   block: implement ...
796
  	q->unprep_rq_fn		= NULL;
60ea8226c   Tejun Heo   block: fix reques...
797
  	q->queue_flags		|= QUEUE_FLAG_DEFAULT;
c94a96ac9   Vivek Goyal   block: Initialize...
798
799
800
801
  
  	/* Override internal queue lock with supplied lock pointer */
  	if (lock)
  		q->queue_lock		= lock;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
802

f3b144aa7   Jens Axboe   block: remove var...
803
804
805
  	/*
  	 * This also sets hw/phys segments, boundary and size
  	 */
c20e8de27   Jens Axboe   block: rename __m...
806
  	blk_queue_make_request(q, blk_queue_bio);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
807

44ec95425   Alan Stern   [SCSI] sg: cap re...
808
  	q->sg_reserved_size = INT_MAX;
eb1c160b2   Tomoki Sekiyama   elevator: Fix a r...
809
810
  	/* Protect q->elevator from elevator_change */
  	mutex_lock(&q->sysfs_lock);
b82d4b197   Tejun Heo   blkcg: make reque...
811
  	/* init elevator */
eb1c160b2   Tomoki Sekiyama   elevator: Fix a r...
812
813
  	if (elevator_init(q, NULL)) {
  		mutex_unlock(&q->sysfs_lock);
708f04d2a   Dave Jones   block: free q->fl...
814
  		goto fail;
eb1c160b2   Tomoki Sekiyama   elevator: Fix a r...
815
816
817
  	}
  
  	mutex_unlock(&q->sysfs_lock);
b82d4b197   Tejun Heo   blkcg: make reque...
818
  	return q;
708f04d2a   Dave Jones   block: free q->fl...
819
820
  
  fail:
ba483388e   Ming Lei   block: remove blk...
821
  	blk_free_flush_queue(q->fq);
708f04d2a   Dave Jones   block: free q->fl...
822
  	return NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
823
  }
5151412dd   Mike Snitzer   block: initialize...
824
  EXPORT_SYMBOL(blk_init_allocated_queue);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
825

09ac46c42   Tejun Heo   block: misc updat...
826
  bool blk_get_queue(struct request_queue *q)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
827
  {
3f3299d5c   Bart Van Assche   block: Rename que...
828
  	if (likely(!blk_queue_dying(q))) {
09ac46c42   Tejun Heo   block: misc updat...
829
830
  		__blk_get_queue(q);
  		return true;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
831
  	}
09ac46c42   Tejun Heo   block: misc updat...
832
  	return false;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
833
  }
d86e0e83b   Jens Axboe   block: export blk...
834
  EXPORT_SYMBOL(blk_get_queue);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
835

5b788ce3e   Tejun Heo   block: prepare fo...
836
  static inline void blk_free_request(struct request_list *rl, struct request *rq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
837
  {
f1f8cc946   Tejun Heo   block, cfq: move ...
838
  	if (rq->cmd_flags & REQ_ELVPRIV) {
5b788ce3e   Tejun Heo   block: prepare fo...
839
  		elv_put_request(rl->q, rq);
f1f8cc946   Tejun Heo   block, cfq: move ...
840
  		if (rq->elv.icq)
11a3122f6   Tejun Heo   block: strip out ...
841
  			put_io_context(rq->elv.icq->ioc);
f1f8cc946   Tejun Heo   block, cfq: move ...
842
  	}
5b788ce3e   Tejun Heo   block: prepare fo...
843
  	mempool_free(rq, rl->rq_pool);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
844
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
845
846
847
848
  /*
   * ioc_batching returns true if the ioc is a valid batching request and
   * should be given priority access to a request.
   */
165125e1e   Jens Axboe   [BLOCK] Get rid o...
849
  static inline int ioc_batching(struct request_queue *q, struct io_context *ioc)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
  {
  	if (!ioc)
  		return 0;
  
  	/*
  	 * Make sure the process is able to allocate at least 1 request
  	 * even if the batch times out, otherwise we could theoretically
  	 * lose wakeups.
  	 */
  	return ioc->nr_batch_requests == q->nr_batching ||
  		(ioc->nr_batch_requests > 0
  		&& time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME));
  }
  
  /*
   * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This
   * will cause the process to be a "batcher" on all queues in the system. This
   * is the behaviour we want though - once it gets a wakeup it should be given
   * a nice run.
   */
165125e1e   Jens Axboe   [BLOCK] Get rid o...
870
  static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
871
872
873
874
875
876
877
  {
  	if (!ioc || ioc_batching(q, ioc))
  		return;
  
  	ioc->nr_batch_requests = q->nr_batching;
  	ioc->last_waited = jiffies;
  }
5b788ce3e   Tejun Heo   block: prepare fo...
878
  static void __freed_request(struct request_list *rl, int sync)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
879
  {
5b788ce3e   Tejun Heo   block: prepare fo...
880
  	struct request_queue *q = rl->q;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
881

d40f75a06   Tejun Heo   writeback, blkcg:...
882
883
  	if (rl->count[sync] < queue_congestion_off_threshold(q))
  		blk_clear_congested(rl, sync);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
884

1faa16d22   Jens Axboe   block: change the...
885
886
887
  	if (rl->count[sync] + 1 <= q->nr_requests) {
  		if (waitqueue_active(&rl->wait[sync]))
  			wake_up(&rl->wait[sync]);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
888

5b788ce3e   Tejun Heo   block: prepare fo...
889
  		blk_clear_rl_full(rl, sync);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
890
891
892
893
894
895
896
  	}
  }
  
  /*
   * A request has just been released.  Account for it, update the full and
   * congestion status, wake up any waiters.   Called under q->queue_lock.
   */
5b788ce3e   Tejun Heo   block: prepare fo...
897
  static void freed_request(struct request_list *rl, unsigned int flags)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
898
  {
5b788ce3e   Tejun Heo   block: prepare fo...
899
  	struct request_queue *q = rl->q;
75eb6c372   Tejun Heo   block: pass aroun...
900
  	int sync = rw_is_sync(flags);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
901

8a5ecdd42   Tejun Heo   block: add q->nr_...
902
  	q->nr_rqs[sync]--;
1faa16d22   Jens Axboe   block: change the...
903
  	rl->count[sync]--;
75eb6c372   Tejun Heo   block: pass aroun...
904
  	if (flags & REQ_ELVPRIV)
8a5ecdd42   Tejun Heo   block: add q->nr_...
905
  		q->nr_rqs_elvpriv--;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
906

5b788ce3e   Tejun Heo   block: prepare fo...
907
  	__freed_request(rl, sync);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
908

1faa16d22   Jens Axboe   block: change the...
909
  	if (unlikely(rl->starved[sync ^ 1]))
5b788ce3e   Tejun Heo   block: prepare fo...
910
  		__freed_request(rl, sync ^ 1);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
911
  }
e3a2b3f93   Jens Axboe   blk-mq: allow cha...
912
913
914
  int blk_update_nr_requests(struct request_queue *q, unsigned int nr)
  {
  	struct request_list *rl;
d40f75a06   Tejun Heo   writeback, blkcg:...
915
  	int on_thresh, off_thresh;
e3a2b3f93   Jens Axboe   blk-mq: allow cha...
916
917
918
919
  
  	spin_lock_irq(q->queue_lock);
  	q->nr_requests = nr;
  	blk_queue_congestion_threshold(q);
d40f75a06   Tejun Heo   writeback, blkcg:...
920
921
  	on_thresh = queue_congestion_on_threshold(q);
  	off_thresh = queue_congestion_off_threshold(q);
e3a2b3f93   Jens Axboe   blk-mq: allow cha...
922

d40f75a06   Tejun Heo   writeback, blkcg:...
923
924
925
926
927
  	blk_queue_for_each_rl(rl, q) {
  		if (rl->count[BLK_RW_SYNC] >= on_thresh)
  			blk_set_congested(rl, BLK_RW_SYNC);
  		else if (rl->count[BLK_RW_SYNC] < off_thresh)
  			blk_clear_congested(rl, BLK_RW_SYNC);
e3a2b3f93   Jens Axboe   blk-mq: allow cha...
928

d40f75a06   Tejun Heo   writeback, blkcg:...
929
930
931
932
  		if (rl->count[BLK_RW_ASYNC] >= on_thresh)
  			blk_set_congested(rl, BLK_RW_ASYNC);
  		else if (rl->count[BLK_RW_ASYNC] < off_thresh)
  			blk_clear_congested(rl, BLK_RW_ASYNC);
e3a2b3f93   Jens Axboe   blk-mq: allow cha...
933

e3a2b3f93   Jens Axboe   blk-mq: allow cha...
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
  		if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
  			blk_set_rl_full(rl, BLK_RW_SYNC);
  		} else {
  			blk_clear_rl_full(rl, BLK_RW_SYNC);
  			wake_up(&rl->wait[BLK_RW_SYNC]);
  		}
  
  		if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
  			blk_set_rl_full(rl, BLK_RW_ASYNC);
  		} else {
  			blk_clear_rl_full(rl, BLK_RW_ASYNC);
  			wake_up(&rl->wait[BLK_RW_ASYNC]);
  		}
  	}
  
  	spin_unlock_irq(q->queue_lock);
  	return 0;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
952
  /*
9d5a4e946   Mike Snitzer   block: skip eleva...
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
   * Determine if elevator data should be initialized when allocating the
   * request associated with @bio.
   */
  static bool blk_rq_should_init_elevator(struct bio *bio)
  {
  	if (!bio)
  		return true;
  
  	/*
  	 * Flush requests do not use the elevator so skip initialization.
  	 * This allows a request to share the flush and elevator data.
  	 */
  	if (bio->bi_rw & (REQ_FLUSH | REQ_FUA))
  		return false;
  
  	return true;
  }
da8303c63   Tejun Heo   block: make get_r...
970
  /**
852c788f8   Tejun Heo   block: implement ...
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
   * rq_ioc - determine io_context for request allocation
   * @bio: request being allocated is for this bio (can be %NULL)
   *
   * Determine io_context to use for request allocation for @bio.  May return
   * %NULL if %current->io_context doesn't exist.
   */
  static struct io_context *rq_ioc(struct bio *bio)
  {
  #ifdef CONFIG_BLK_CGROUP
  	if (bio && bio->bi_ioc)
  		return bio->bi_ioc;
  #endif
  	return current->io_context;
  }
  
  /**
a06e05e6a   Tejun Heo   block: refactor g...
987
   * __get_request - get a free request
5b788ce3e   Tejun Heo   block: prepare fo...
988
   * @rl: request list to allocate from
da8303c63   Tejun Heo   block: make get_r...
989
990
991
992
993
994
995
   * @rw_flags: RW and SYNC flags
   * @bio: bio to allocate request for (can be %NULL)
   * @gfp_mask: allocation mask
   *
   * Get a free request from @q.  This function may fail under memory
   * pressure or if @q is dead.
   *
da3dae54e   Masanari Iida   Documentation: Do...
996
   * Must be called with @q->queue_lock held and,
a492f0754   Joe Lawrence   block,scsi: fixup...
997
998
   * Returns ERR_PTR on failure, with @q->queue_lock held.
   * Returns request pointer on success, with @q->queue_lock *not held*.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
999
   */
5b788ce3e   Tejun Heo   block: prepare fo...
1000
  static struct request *__get_request(struct request_list *rl, int rw_flags,
a06e05e6a   Tejun Heo   block: refactor g...
1001
  				     struct bio *bio, gfp_t gfp_mask)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1002
  {
5b788ce3e   Tejun Heo   block: prepare fo...
1003
  	struct request_queue *q = rl->q;
b679281a6   Tejun Heo   block: restructur...
1004
  	struct request *rq;
7f4b35d15   Tejun Heo   block: allocate i...
1005
1006
  	struct elevator_type *et = q->elevator->type;
  	struct io_context *ioc = rq_ioc(bio);
f1f8cc946   Tejun Heo   block, cfq: move ...
1007
  	struct io_cq *icq = NULL;
1faa16d22   Jens Axboe   block: change the...
1008
  	const bool is_sync = rw_is_sync(rw_flags) != 0;
75eb6c372   Tejun Heo   block: pass aroun...
1009
  	int may_queue;
88ee5ef15   Jens Axboe   [BLOCK] ll_rw_blk...
1010

3f3299d5c   Bart Van Assche   block: Rename que...
1011
  	if (unlikely(blk_queue_dying(q)))
a492f0754   Joe Lawrence   block,scsi: fixup...
1012
  		return ERR_PTR(-ENODEV);
da8303c63   Tejun Heo   block: make get_r...
1013

7749a8d42   Jens Axboe   [PATCH] Propagate...
1014
  	may_queue = elv_may_queue(q, rw_flags);
88ee5ef15   Jens Axboe   [BLOCK] ll_rw_blk...
1015
1016
  	if (may_queue == ELV_MQUEUE_NO)
  		goto rq_starved;
1faa16d22   Jens Axboe   block: change the...
1017
1018
  	if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) {
  		if (rl->count[is_sync]+1 >= q->nr_requests) {
f2dbd76a0   Tejun Heo   block, cfq: repla...
1019
  			/*
88ee5ef15   Jens Axboe   [BLOCK] ll_rw_blk...
1020
1021
1022
1023
1024
  			 * The queue will fill after this allocation, so set
  			 * it as full, and mark this process as "batching".
  			 * This process will be allowed to complete a batch of
  			 * requests, others will be blocked.
  			 */
5b788ce3e   Tejun Heo   block: prepare fo...
1025
  			if (!blk_rl_full(rl, is_sync)) {
88ee5ef15   Jens Axboe   [BLOCK] ll_rw_blk...
1026
  				ioc_set_batching(q, ioc);
5b788ce3e   Tejun Heo   block: prepare fo...
1027
  				blk_set_rl_full(rl, is_sync);
88ee5ef15   Jens Axboe   [BLOCK] ll_rw_blk...
1028
1029
1030
1031
1032
1033
1034
1035
  			} else {
  				if (may_queue != ELV_MQUEUE_MUST
  						&& !ioc_batching(q, ioc)) {
  					/*
  					 * The queue is full and the allocating
  					 * process is not a "batcher", and not
  					 * exempted by the IO scheduler
  					 */
a492f0754   Joe Lawrence   block,scsi: fixup...
1036
  					return ERR_PTR(-ENOMEM);
88ee5ef15   Jens Axboe   [BLOCK] ll_rw_blk...
1037
1038
  				}
  			}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1039
  		}
d40f75a06   Tejun Heo   writeback, blkcg:...
1040
  		blk_set_congested(rl, is_sync);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1041
  	}
082cf69eb   Jens Axboe   [PATCH] ll_rw_blk...
1042
1043
1044
1045
1046
  	/*
  	 * Only allow batching queuers to allocate up to 50% over the defined
  	 * limit of requests, otherwise we could have thousands of requests
  	 * allocated with any setting of ->nr_requests
  	 */
1faa16d22   Jens Axboe   block: change the...
1047
  	if (rl->count[is_sync] >= (3 * q->nr_requests / 2))
a492f0754   Joe Lawrence   block,scsi: fixup...
1048
  		return ERR_PTR(-ENOMEM);
fd782a4a9   Hugh Dickins   [PATCH] Fix get_r...
1049

8a5ecdd42   Tejun Heo   block: add q->nr_...
1050
  	q->nr_rqs[is_sync]++;
1faa16d22   Jens Axboe   block: change the...
1051
1052
  	rl->count[is_sync]++;
  	rl->starved[is_sync] = 0;
cb98fc8bb   Tejun Heo   [BLOCK] Reimpleme...
1053

f1f8cc946   Tejun Heo   block, cfq: move ...
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
  	/*
  	 * Decide whether the new request will be managed by elevator.  If
  	 * so, mark @rw_flags and increment elvpriv.  Non-zero elvpriv will
  	 * prevent the current elevator from being destroyed until the new
  	 * request is freed.  This guarantees icq's won't be destroyed and
  	 * makes creating new ones safe.
  	 *
  	 * Also, lookup icq while holding queue_lock.  If it doesn't exist,
  	 * it will be created after releasing queue_lock.
  	 */
d732580b4   Tejun Heo   block: implement ...
1064
  	if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) {
75eb6c372   Tejun Heo   block: pass aroun...
1065
  		rw_flags |= REQ_ELVPRIV;
8a5ecdd42   Tejun Heo   block: add q->nr_...
1066
  		q->nr_rqs_elvpriv++;
f1f8cc946   Tejun Heo   block, cfq: move ...
1067
1068
  		if (et->icq_cache && ioc)
  			icq = ioc_lookup_icq(ioc, q);
9d5a4e946   Mike Snitzer   block: skip eleva...
1069
  	}
cb98fc8bb   Tejun Heo   [BLOCK] Reimpleme...
1070

f253b86b4   Jens Axboe   Revert "block: fi...
1071
1072
  	if (blk_queue_io_stat(q))
  		rw_flags |= REQ_IO_STAT;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1073
  	spin_unlock_irq(q->queue_lock);
29e2b09ab   Tejun Heo   block: collapse b...
1074
  	/* allocate and init request */
5b788ce3e   Tejun Heo   block: prepare fo...
1075
  	rq = mempool_alloc(rl->rq_pool, gfp_mask);
29e2b09ab   Tejun Heo   block: collapse b...
1076
  	if (!rq)
b679281a6   Tejun Heo   block: restructur...
1077
  		goto fail_alloc;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1078

29e2b09ab   Tejun Heo   block: collapse b...
1079
  	blk_rq_init(q, rq);
a051661ca   Tejun Heo   blkcg: implement ...
1080
  	blk_rq_set_rl(rq, rl);
29e2b09ab   Tejun Heo   block: collapse b...
1081
  	rq->cmd_flags = rw_flags | REQ_ALLOCED;
aaf7c6806   Tejun Heo   block: fix elvpri...
1082
  	/* init elvpriv */
29e2b09ab   Tejun Heo   block: collapse b...
1083
  	if (rw_flags & REQ_ELVPRIV) {
aaf7c6806   Tejun Heo   block: fix elvpri...
1084
  		if (unlikely(et->icq_cache && !icq)) {
7f4b35d15   Tejun Heo   block: allocate i...
1085
1086
  			if (ioc)
  				icq = ioc_create_icq(ioc, q, gfp_mask);
aaf7c6806   Tejun Heo   block: fix elvpri...
1087
1088
  			if (!icq)
  				goto fail_elvpriv;
29e2b09ab   Tejun Heo   block: collapse b...
1089
  		}
aaf7c6806   Tejun Heo   block: fix elvpri...
1090
1091
1092
1093
1094
1095
  
  		rq->elv.icq = icq;
  		if (unlikely(elv_set_request(q, rq, bio, gfp_mask)))
  			goto fail_elvpriv;
  
  		/* @rq->elv.icq holds io_context until @rq is freed */
29e2b09ab   Tejun Heo   block: collapse b...
1096
1097
1098
  		if (icq)
  			get_io_context(icq->ioc);
  	}
aaf7c6806   Tejun Heo   block: fix elvpri...
1099
  out:
88ee5ef15   Jens Axboe   [BLOCK] ll_rw_blk...
1100
1101
1102
1103
1104
1105
  	/*
  	 * ioc may be NULL here, and ioc_batching will be false. That's
  	 * OK, if the queue is under the request limit then requests need
  	 * not count toward the nr_batch_requests limit. There will always
  	 * be some limit enforced by BLK_BATCH_TIME.
  	 */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1106
1107
  	if (ioc_batching(q, ioc))
  		ioc->nr_batch_requests--;
6728cb0e6   Jens Axboe   block: make core ...
1108

1faa16d22   Jens Axboe   block: change the...
1109
  	trace_block_getrq(q, bio, rw_flags & 1);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1110
  	return rq;
b679281a6   Tejun Heo   block: restructur...
1111

aaf7c6806   Tejun Heo   block: fix elvpri...
1112
1113
1114
1115
1116
1117
1118
  fail_elvpriv:
  	/*
  	 * elvpriv init failed.  ioc, icq and elvpriv aren't mempool backed
  	 * and may fail indefinitely under memory pressure and thus
  	 * shouldn't stall IO.  Treat this request as !elvpriv.  This will
  	 * disturb iosched and blkcg but weird is bettern than dead.
  	 */
7b2b10e0e   Robert Elliott   block: include fu...
1119
1120
1121
  	printk_ratelimited(KERN_WARNING "%s: dev %s: request aux data allocation failed, iosched may be disturbed
  ",
  			   __func__, dev_name(q->backing_dev_info.dev));
aaf7c6806   Tejun Heo   block: fix elvpri...
1122
1123
1124
1125
1126
  
  	rq->cmd_flags &= ~REQ_ELVPRIV;
  	rq->elv.icq = NULL;
  
  	spin_lock_irq(q->queue_lock);
8a5ecdd42   Tejun Heo   block: add q->nr_...
1127
  	q->nr_rqs_elvpriv--;
aaf7c6806   Tejun Heo   block: fix elvpri...
1128
1129
  	spin_unlock_irq(q->queue_lock);
  	goto out;
b679281a6   Tejun Heo   block: restructur...
1130
1131
1132
1133
1134
1135
1136
1137
1138
  fail_alloc:
  	/*
  	 * Allocation failed presumably due to memory. Undo anything we
  	 * might have messed up.
  	 *
  	 * Allocating task should really be put onto the front of the wait
  	 * queue, but this is pretty rare.
  	 */
  	spin_lock_irq(q->queue_lock);
5b788ce3e   Tejun Heo   block: prepare fo...
1139
  	freed_request(rl, rw_flags);
b679281a6   Tejun Heo   block: restructur...
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
  
  	/*
  	 * in the very unlikely event that allocation failed and no
  	 * requests for this direction was pending, mark us starved so that
  	 * freeing of a request in the other direction will notice
  	 * us. another possible fix would be to split the rq mempool into
  	 * READ and WRITE
  	 */
  rq_starved:
  	if (unlikely(rl->count[is_sync] == 0))
  		rl->starved[is_sync] = 1;
a492f0754   Joe Lawrence   block,scsi: fixup...
1151
  	return ERR_PTR(-ENOMEM);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1152
  }
da8303c63   Tejun Heo   block: make get_r...
1153
  /**
a06e05e6a   Tejun Heo   block: refactor g...
1154
   * get_request - get a free request
da8303c63   Tejun Heo   block: make get_r...
1155
1156
1157
   * @q: request_queue to allocate request from
   * @rw_flags: RW and SYNC flags
   * @bio: bio to allocate request for (can be %NULL)
a06e05e6a   Tejun Heo   block: refactor g...
1158
   * @gfp_mask: allocation mask
da8303c63   Tejun Heo   block: make get_r...
1159
   *
d0164adc8   Mel Gorman   mm, page_alloc: d...
1160
1161
   * Get a free request from @q.  If %__GFP_DIRECT_RECLAIM is set in @gfp_mask,
   * this function keeps retrying under memory pressure and fails iff @q is dead.
d6344532a   Nick Piggin   [PATCH] blk: redu...
1162
   *
da3dae54e   Masanari Iida   Documentation: Do...
1163
   * Must be called with @q->queue_lock held and,
a492f0754   Joe Lawrence   block,scsi: fixup...
1164
1165
   * Returns ERR_PTR on failure, with @q->queue_lock held.
   * Returns request pointer on success, with @q->queue_lock *not held*.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1166
   */
a06e05e6a   Tejun Heo   block: refactor g...
1167
1168
  static struct request *get_request(struct request_queue *q, int rw_flags,
  				   struct bio *bio, gfp_t gfp_mask)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1169
  {
1faa16d22   Jens Axboe   block: change the...
1170
  	const bool is_sync = rw_is_sync(rw_flags) != 0;
a06e05e6a   Tejun Heo   block: refactor g...
1171
  	DEFINE_WAIT(wait);
a051661ca   Tejun Heo   blkcg: implement ...
1172
  	struct request_list *rl;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1173
  	struct request *rq;
a051661ca   Tejun Heo   blkcg: implement ...
1174
1175
  
  	rl = blk_get_rl(q, bio);	/* transferred to @rq on success */
a06e05e6a   Tejun Heo   block: refactor g...
1176
  retry:
a051661ca   Tejun Heo   blkcg: implement ...
1177
  	rq = __get_request(rl, rw_flags, bio, gfp_mask);
a492f0754   Joe Lawrence   block,scsi: fixup...
1178
  	if (!IS_ERR(rq))
a06e05e6a   Tejun Heo   block: refactor g...
1179
  		return rq;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1180

d0164adc8   Mel Gorman   mm, page_alloc: d...
1181
  	if (!gfpflags_allow_blocking(gfp_mask) || unlikely(blk_queue_dying(q))) {
a051661ca   Tejun Heo   blkcg: implement ...
1182
  		blk_put_rl(rl);
a492f0754   Joe Lawrence   block,scsi: fixup...
1183
  		return rq;
a051661ca   Tejun Heo   blkcg: implement ...
1184
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1185

a06e05e6a   Tejun Heo   block: refactor g...
1186
1187
1188
  	/* wait on @rl and retry */
  	prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
  				  TASK_UNINTERRUPTIBLE);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1189

a06e05e6a   Tejun Heo   block: refactor g...
1190
  	trace_block_sleeprq(q, bio, rw_flags & 1);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1191

a06e05e6a   Tejun Heo   block: refactor g...
1192
1193
  	spin_unlock_irq(q->queue_lock);
  	io_schedule();
d6344532a   Nick Piggin   [PATCH] blk: redu...
1194

a06e05e6a   Tejun Heo   block: refactor g...
1195
1196
1197
1198
1199
  	/*
  	 * After sleeping, we become a "batching" process and will be able
  	 * to allocate at least one request, and up to a big batch of them
  	 * for a small period time.  See ioc_batching, ioc_set_batching
  	 */
a06e05e6a   Tejun Heo   block: refactor g...
1200
  	ioc_set_batching(q, current->io_context);
05caf8dbc   Zhang, Yanmin   block: Move the s...
1201

a06e05e6a   Tejun Heo   block: refactor g...
1202
1203
  	spin_lock_irq(q->queue_lock);
  	finish_wait(&rl->wait[is_sync], &wait);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1204

a06e05e6a   Tejun Heo   block: refactor g...
1205
  	goto retry;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1206
  }
320ae51fe   Jens Axboe   blk-mq: new multi...
1207
1208
  static struct request *blk_old_get_request(struct request_queue *q, int rw,
  		gfp_t gfp_mask)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1209
1210
1211
1212
  {
  	struct request *rq;
  
  	BUG_ON(rw != READ && rw != WRITE);
7f4b35d15   Tejun Heo   block: allocate i...
1213
1214
  	/* create ioc upfront */
  	create_io_context(gfp_mask, q->node);
d6344532a   Nick Piggin   [PATCH] blk: redu...
1215
  	spin_lock_irq(q->queue_lock);
a06e05e6a   Tejun Heo   block: refactor g...
1216
  	rq = get_request(q, rw, NULL, gfp_mask);
a492f0754   Joe Lawrence   block,scsi: fixup...
1217
  	if (IS_ERR(rq))
da8303c63   Tejun Heo   block: make get_r...
1218
  		spin_unlock_irq(q->queue_lock);
d6344532a   Nick Piggin   [PATCH] blk: redu...
1219
  	/* q->queue_lock is unlocked at this point */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1220
1221
1222
  
  	return rq;
  }
320ae51fe   Jens Axboe   blk-mq: new multi...
1223
1224
1225
1226
  
  struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
  {
  	if (q->mq_ops)
6f3b0e8bc   Christoph Hellwig   blk-mq: add a fla...
1227
1228
1229
  		return blk_mq_alloc_request(q, rw,
  			(gfp_mask & __GFP_DIRECT_RECLAIM) ?
  				0 : BLK_MQ_REQ_NOWAIT);
320ae51fe   Jens Axboe   blk-mq: new multi...
1230
1231
1232
  	else
  		return blk_old_get_request(q, rw, gfp_mask);
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1233
1234
1235
  EXPORT_SYMBOL(blk_get_request);
  
  /**
79eb63e9e   Boaz Harrosh   block: Add blk_ma...
1236
   * blk_make_request - given a bio, allocate a corresponding struct request.
8ebf97560   Randy Dunlap   block: fix kernel...
1237
   * @q: target request queue
79eb63e9e   Boaz Harrosh   block: Add blk_ma...
1238
1239
   * @bio:  The bio describing the memory mappings that will be submitted for IO.
   *        It may be a chained-bio properly constructed by block/bio layer.
8ebf97560   Randy Dunlap   block: fix kernel...
1240
   * @gfp_mask: gfp flags to be used for memory allocation
dc72ef4ae   Jens Axboe   [PATCH] Add blk_s...
1241
   *
79eb63e9e   Boaz Harrosh   block: Add blk_ma...
1242
1243
1244
1245
   * blk_make_request is the parallel of generic_make_request for BLOCK_PC
   * type commands. Where the struct request needs to be farther initialized by
   * the caller. It is passed a &struct bio, which describes the memory info of
   * the I/O transfer.
dc72ef4ae   Jens Axboe   [PATCH] Add blk_s...
1246
   *
79eb63e9e   Boaz Harrosh   block: Add blk_ma...
1247
1248
1249
1250
1251
1252
1253
1254
1255
   * The caller of blk_make_request must make sure that bi_io_vec
   * are set to describe the memory buffers. That bio_data_dir() will return
   * the needed direction of the request. (And all bio's in the passed bio-chain
   * are properly set accordingly)
   *
   * If called under none-sleepable conditions, mapped bio buffers must not
   * need bouncing, by calling the appropriate masked or flagged allocator,
   * suitable for the target device. Otherwise the call to blk_queue_bounce will
   * BUG.
53674ac5a   Jens Axboe   block: add warnin...
1256
1257
   *
   * WARNING: When allocating/cloning a bio-chain, careful consideration should be
d0164adc8   Mel Gorman   mm, page_alloc: d...
1258
1259
1260
1261
1262
   * given to how you allocate bios. In particular, you cannot use
   * __GFP_DIRECT_RECLAIM for anything but the first bio in the chain. Otherwise
   * you risk waiting for IO completion of a bio that hasn't been submitted yet,
   * thus resulting in a deadlock. Alternatively bios should be allocated using
   * bio_kmalloc() instead of bio_alloc(), as that avoids the mempool deadlock.
53674ac5a   Jens Axboe   block: add warnin...
1263
1264
   * If possible a big IO should be split into smaller parts when allocation
   * fails. Partial allocation should not be an error, or you risk a live-lock.
dc72ef4ae   Jens Axboe   [PATCH] Add blk_s...
1265
   */
79eb63e9e   Boaz Harrosh   block: Add blk_ma...
1266
1267
  struct request *blk_make_request(struct request_queue *q, struct bio *bio,
  				 gfp_t gfp_mask)
dc72ef4ae   Jens Axboe   [PATCH] Add blk_s...
1268
  {
79eb63e9e   Boaz Harrosh   block: Add blk_ma...
1269
  	struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask);
a492f0754   Joe Lawrence   block,scsi: fixup...
1270
1271
  	if (IS_ERR(rq))
  		return rq;
79eb63e9e   Boaz Harrosh   block: Add blk_ma...
1272

f27b087b8   Jens Axboe   block: add blk_rq...
1273
  	blk_rq_set_block_pc(rq);
79eb63e9e   Boaz Harrosh   block: Add blk_ma...
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
  	for_each_bio(bio) {
  		struct bio *bounce_bio = bio;
  		int ret;
  
  		blk_queue_bounce(q, &bounce_bio);
  		ret = blk_rq_append_bio(q, rq, bounce_bio);
  		if (unlikely(ret)) {
  			blk_put_request(rq);
  			return ERR_PTR(ret);
  		}
  	}
  
  	return rq;
dc72ef4ae   Jens Axboe   [PATCH] Add blk_s...
1287
  }
79eb63e9e   Boaz Harrosh   block: Add blk_ma...
1288
  EXPORT_SYMBOL(blk_make_request);
dc72ef4ae   Jens Axboe   [PATCH] Add blk_s...
1289
1290
  
  /**
da3dae54e   Masanari Iida   Documentation: Do...
1291
   * blk_rq_set_block_pc - initialize a request to type BLOCK_PC
f27b087b8   Jens Axboe   block: add blk_rq...
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
   * @rq:		request to be initialized
   *
   */
  void blk_rq_set_block_pc(struct request *rq)
  {
  	rq->cmd_type = REQ_TYPE_BLOCK_PC;
  	rq->__data_len = 0;
  	rq->__sector = (sector_t) -1;
  	rq->bio = rq->biotail = NULL;
  	memset(rq->__cmd, 0, sizeof(rq->__cmd));
f27b087b8   Jens Axboe   block: add blk_rq...
1302
1303
1304
1305
  }
  EXPORT_SYMBOL(blk_rq_set_block_pc);
  
  /**
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1306
1307
1308
1309
1310
1311
1312
1313
1314
   * blk_requeue_request - put a request back on queue
   * @q:		request queue where request should be inserted
   * @rq:		request to be inserted
   *
   * Description:
   *    Drivers often keep queueing requests until the hardware cannot accept
   *    more, when that condition happens we need to put the request back
   *    on the queue. Must be called with queue lock held.
   */
165125e1e   Jens Axboe   [BLOCK] Get rid o...
1315
  void blk_requeue_request(struct request_queue *q, struct request *rq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1316
  {
242f9dcb8   Jens Axboe   block: unify requ...
1317
1318
  	blk_delete_timer(rq);
  	blk_clear_rq_complete(rq);
5f3ea37c7   Arnaldo Carvalho de Melo   blktrace: port to...
1319
  	trace_block_rq_requeue(q, rq);
2056a782f   Jens Axboe   [PATCH] Block que...
1320

125c99bc8   Christoph Hellwig   scsi: add new scs...
1321
  	if (rq->cmd_flags & REQ_QUEUED)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1322
  		blk_queue_end_tag(q, rq);
ba396a6c1   James Bottomley   block: fix oops w...
1323
  	BUG_ON(blk_queued_rq(rq));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1324
1325
  	elv_requeue_request(q, rq);
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1326
  EXPORT_SYMBOL(blk_requeue_request);
73c101011   Jens Axboe   block: initial pa...
1327
1328
1329
  static void add_acct_request(struct request_queue *q, struct request *rq,
  			     int where)
  {
320ae51fe   Jens Axboe   blk-mq: new multi...
1330
  	blk_account_io_start(rq, true);
7eaceacca   Jens Axboe   block: remove per...
1331
  	__elv_add_request(q, rq, where);
73c101011   Jens Axboe   block: initial pa...
1332
  }
074a7aca7   Tejun Heo   block: move stats...
1333
1334
1335
  static void part_round_stats_single(int cpu, struct hd_struct *part,
  				    unsigned long now)
  {
7276d02e2   Jens Axboe   block: only calcu...
1336
  	int inflight;
074a7aca7   Tejun Heo   block: move stats...
1337
1338
  	if (now == part->stamp)
  		return;
7276d02e2   Jens Axboe   block: only calcu...
1339
1340
  	inflight = part_in_flight(part);
  	if (inflight) {
074a7aca7   Tejun Heo   block: move stats...
1341
  		__part_stat_add(cpu, part, time_in_queue,
7276d02e2   Jens Axboe   block: only calcu...
1342
  				inflight * (now - part->stamp));
074a7aca7   Tejun Heo   block: move stats...
1343
1344
1345
1346
1347
1348
  		__part_stat_add(cpu, part, io_ticks, (now - part->stamp));
  	}
  	part->stamp = now;
  }
  
  /**
496aa8a98   Randy Dunlap   block: fix curren...
1349
1350
1351
   * part_round_stats() - Round off the performance stats on a struct disk_stats.
   * @cpu: cpu number for stats access
   * @part: target partition
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
   *
   * The average IO queue length and utilisation statistics are maintained
   * by observing the current state of the queue length and the amount of
   * time it has been in this state for.
   *
   * Normally, that accounting is done on IO completion, but that can result
   * in more than a second's worth of IO being accounted for within any one
   * second, leading to >100% utilisation.  To deal with that, we call this
   * function to do a round-off before returning the results when reading
   * /proc/diskstats.  This accounts immediately for all queue usage up to
   * the current jiffies and restarts the counters again.
   */
c99590591   Tejun Heo   block: fix diskst...
1364
  void part_round_stats(int cpu, struct hd_struct *part)
6f2576af5   Jerome Marchand   Enhanced partitio...
1365
1366
  {
  	unsigned long now = jiffies;
074a7aca7   Tejun Heo   block: move stats...
1367
1368
1369
  	if (part->partno)
  		part_round_stats_single(cpu, &part_to_disk(part)->part0, now);
  	part_round_stats_single(cpu, part, now);
6f2576af5   Jerome Marchand   Enhanced partitio...
1370
  }
074a7aca7   Tejun Heo   block: move stats...
1371
  EXPORT_SYMBOL_GPL(part_round_stats);
6f2576af5   Jerome Marchand   Enhanced partitio...
1372

47fafbc70   Rafael J. Wysocki   block / PM: Repla...
1373
  #ifdef CONFIG_PM
c8158819d   Lin Ming   block: implement ...
1374
1375
1376
1377
1378
1379
1380
1381
  static void blk_pm_put_request(struct request *rq)
  {
  	if (rq->q->dev && !(rq->cmd_flags & REQ_PM) && !--rq->q->nr_pending)
  		pm_runtime_mark_last_busy(rq->q->dev);
  }
  #else
  static inline void blk_pm_put_request(struct request *rq) {}
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1382
1383
1384
  /*
   * queue lock must be held
   */
165125e1e   Jens Axboe   [BLOCK] Get rid o...
1385
  void __blk_put_request(struct request_queue *q, struct request *req)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1386
  {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1387
1388
  	if (unlikely(!q))
  		return;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1389

6f5ba581c   Christoph Hellwig   blk-mq: divert __...
1390
1391
1392
1393
  	if (q->mq_ops) {
  		blk_mq_free_request(req);
  		return;
  	}
c8158819d   Lin Ming   block: implement ...
1394
  	blk_pm_put_request(req);
8922e16cf   Tejun Heo   [PATCH] 01/05 Imp...
1395
  	elv_completed_request(q, req);
1cd96c242   Boaz Harrosh   block: WARN in __...
1396
1397
  	/* this is a bio leak */
  	WARN_ON(req->bio != NULL);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1398
1399
1400
1401
  	/*
  	 * Request may not have originated from ll_rw_blk. if not,
  	 * it didn't come out of our reserved rq pools
  	 */
49171e5c6   Jens Axboe   [PATCH] Remove st...
1402
  	if (req->cmd_flags & REQ_ALLOCED) {
75eb6c372   Tejun Heo   block: pass aroun...
1403
  		unsigned int flags = req->cmd_flags;
a051661ca   Tejun Heo   blkcg: implement ...
1404
  		struct request_list *rl = blk_rq_rl(req);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1405

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1406
  		BUG_ON(!list_empty(&req->queuelist));
360f92c24   Jens Axboe   block: fix regres...
1407
  		BUG_ON(ELV_ON_HASH(req));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1408

a051661ca   Tejun Heo   blkcg: implement ...
1409
1410
1411
  		blk_free_request(rl, req);
  		freed_request(rl, flags);
  		blk_put_rl(rl);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1412
1413
  	}
  }
6e39b69e7   Mike Christie   [SCSI] export blk...
1414
  EXPORT_SYMBOL_GPL(__blk_put_request);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1415
1416
  void blk_put_request(struct request *req)
  {
165125e1e   Jens Axboe   [BLOCK] Get rid o...
1417
  	struct request_queue *q = req->q;
8922e16cf   Tejun Heo   [PATCH] 01/05 Imp...
1418

320ae51fe   Jens Axboe   blk-mq: new multi...
1419
1420
1421
1422
1423
1424
1425
1426
1427
  	if (q->mq_ops)
  		blk_mq_free_request(req);
  	else {
  		unsigned long flags;
  
  		spin_lock_irqsave(q->queue_lock, flags);
  		__blk_put_request(q, req);
  		spin_unlock_irqrestore(q->queue_lock, flags);
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1428
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1429
  EXPORT_SYMBOL(blk_put_request);
66ac02801   Christoph Hellwig   block: don't allo...
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
  /**
   * blk_add_request_payload - add a payload to a request
   * @rq: request to update
   * @page: page backing the payload
   * @len: length of the payload.
   *
   * This allows to later add a payload to an already submitted request by
   * a block driver.  The driver needs to take care of freeing the payload
   * itself.
   *
   * Note that this is a quite horrible hack and nothing but handling of
   * discard requests should ever use it.
   */
  void blk_add_request_payload(struct request *rq, struct page *page,
  		unsigned int len)
  {
  	struct bio *bio = rq->bio;
  
  	bio->bi_io_vec->bv_page = page;
  	bio->bi_io_vec->bv_offset = 0;
  	bio->bi_io_vec->bv_len = len;
4f024f379   Kent Overstreet   block: Abstract o...
1451
  	bio->bi_iter.bi_size = len;
66ac02801   Christoph Hellwig   block: don't allo...
1452
1453
1454
1455
1456
  	bio->bi_vcnt = 1;
  	bio->bi_phys_segments = 1;
  
  	rq->__data_len = rq->resid_len = len;
  	rq->nr_phys_segments = 1;
66ac02801   Christoph Hellwig   block: don't allo...
1457
1458
  }
  EXPORT_SYMBOL_GPL(blk_add_request_payload);
320ae51fe   Jens Axboe   blk-mq: new multi...
1459
1460
  bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
  			    struct bio *bio)
73c101011   Jens Axboe   block: initial pa...
1461
1462
  {
  	const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
73c101011   Jens Axboe   block: initial pa...
1463
1464
  	if (!ll_back_merge_fn(q, req, bio))
  		return false;
8c1cf6bb0   Tejun Heo   block: add @req t...
1465
  	trace_block_bio_backmerge(q, req, bio);
73c101011   Jens Axboe   block: initial pa...
1466
1467
1468
1469
1470
1471
  
  	if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
  		blk_rq_set_mixed_merge(req);
  
  	req->biotail->bi_next = bio;
  	req->biotail = bio;
4f024f379   Kent Overstreet   block: Abstract o...
1472
  	req->__data_len += bio->bi_iter.bi_size;
73c101011   Jens Axboe   block: initial pa...
1473
  	req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
320ae51fe   Jens Axboe   blk-mq: new multi...
1474
  	blk_account_io_start(req, false);
73c101011   Jens Axboe   block: initial pa...
1475
1476
  	return true;
  }
320ae51fe   Jens Axboe   blk-mq: new multi...
1477
1478
  bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
  			     struct bio *bio)
73c101011   Jens Axboe   block: initial pa...
1479
1480
  {
  	const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
73c101011   Jens Axboe   block: initial pa...
1481

73c101011   Jens Axboe   block: initial pa...
1482
1483
  	if (!ll_front_merge_fn(q, req, bio))
  		return false;
8c1cf6bb0   Tejun Heo   block: add @req t...
1484
  	trace_block_bio_frontmerge(q, req, bio);
73c101011   Jens Axboe   block: initial pa...
1485
1486
1487
  
  	if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
  		blk_rq_set_mixed_merge(req);
73c101011   Jens Axboe   block: initial pa...
1488
1489
  	bio->bi_next = req->bio;
  	req->bio = bio;
4f024f379   Kent Overstreet   block: Abstract o...
1490
1491
  	req->__sector = bio->bi_iter.bi_sector;
  	req->__data_len += bio->bi_iter.bi_size;
73c101011   Jens Axboe   block: initial pa...
1492
  	req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
320ae51fe   Jens Axboe   blk-mq: new multi...
1493
  	blk_account_io_start(req, false);
73c101011   Jens Axboe   block: initial pa...
1494
1495
  	return true;
  }
bd87b5898   Tejun Heo   block: drop @tsk ...
1496
  /**
320ae51fe   Jens Axboe   blk-mq: new multi...
1497
   * blk_attempt_plug_merge - try to merge with %current's plugged list
bd87b5898   Tejun Heo   block: drop @tsk ...
1498
1499
1500
   * @q: request_queue new bio is being queued at
   * @bio: new bio being queued
   * @request_count: out parameter for number of traversed plugged requests
ccc2600b8   Randy Dunlap   block: fix blk-co...
1501
1502
1503
   * @same_queue_rq: pointer to &struct request that gets filled in when
   * another request associated with @q is found on the plug list
   * (optional, may be %NULL)
bd87b5898   Tejun Heo   block: drop @tsk ...
1504
1505
1506
1507
1508
   *
   * Determine whether @bio being queued on @q can be merged with a request
   * on %current's plugged list.  Returns %true if merge was successful,
   * otherwise %false.
   *
07c2bd373   Tejun Heo   block: don't call...
1509
1510
1511
1512
1513
1514
   * Plugging coalesces IOs from the same issuer for the same purpose without
   * going through @q->queue_lock.  As such it's more of an issuing mechanism
   * than scheduling, and the request, while may have elvpriv data, is not
   * added on the elevator at this point.  In addition, we don't have
   * reliable access to the elevator outside queue lock.  Only check basic
   * merging parameters without querying the elevator.
da41a589f   Robert Elliott   blk-mq: Micro-opt...
1515
1516
   *
   * Caller must ensure !blk_queue_nomerges(q) beforehand.
73c101011   Jens Axboe   block: initial pa...
1517
   */
320ae51fe   Jens Axboe   blk-mq: new multi...
1518
  bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
5b3f341f0   Shaohua Li   blk-mq: make plug...
1519
1520
  			    unsigned int *request_count,
  			    struct request **same_queue_rq)
73c101011   Jens Axboe   block: initial pa...
1521
1522
1523
1524
  {
  	struct blk_plug *plug;
  	struct request *rq;
  	bool ret = false;
92f399c72   Shaohua Li   blk-mq: mq plug l...
1525
  	struct list_head *plug_list;
73c101011   Jens Axboe   block: initial pa...
1526

bd87b5898   Tejun Heo   block: drop @tsk ...
1527
  	plug = current->plug;
73c101011   Jens Axboe   block: initial pa...
1528
1529
  	if (!plug)
  		goto out;
56ebdaf2f   Shaohua Li   block: simplify f...
1530
  	*request_count = 0;
73c101011   Jens Axboe   block: initial pa...
1531

92f399c72   Shaohua Li   blk-mq: mq plug l...
1532
1533
1534
1535
1536
1537
  	if (q->mq_ops)
  		plug_list = &plug->mq_list;
  	else
  		plug_list = &plug->list;
  
  	list_for_each_entry_reverse(rq, plug_list, queuelist) {
73c101011   Jens Axboe   block: initial pa...
1538
  		int el_ret;
5b3f341f0   Shaohua Li   blk-mq: make plug...
1539
  		if (rq->q == q) {
1b2e19f17   Shaohua Li   block: make auto ...
1540
  			(*request_count)++;
5b3f341f0   Shaohua Li   blk-mq: make plug...
1541
1542
1543
1544
1545
1546
1547
1548
  			/*
  			 * Only blk-mq multiple hardware queues case checks the
  			 * rq in the same queue, there should be only one such
  			 * rq in a queue
  			 **/
  			if (same_queue_rq)
  				*same_queue_rq = rq;
  		}
56ebdaf2f   Shaohua Li   block: simplify f...
1549

07c2bd373   Tejun Heo   block: don't call...
1550
  		if (rq->q != q || !blk_rq_merge_ok(rq, bio))
73c101011   Jens Axboe   block: initial pa...
1551
  			continue;
050c8ea80   Tejun Heo   block: separate o...
1552
  		el_ret = blk_try_merge(rq, bio);
73c101011   Jens Axboe   block: initial pa...
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
  		if (el_ret == ELEVATOR_BACK_MERGE) {
  			ret = bio_attempt_back_merge(q, rq, bio);
  			if (ret)
  				break;
  		} else if (el_ret == ELEVATOR_FRONT_MERGE) {
  			ret = bio_attempt_front_merge(q, rq, bio);
  			if (ret)
  				break;
  		}
  	}
  out:
  	return ret;
  }
0809e3ac6   Jeff Moyer   block: fix plug l...
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
  unsigned int blk_plug_queued_count(struct request_queue *q)
  {
  	struct blk_plug *plug;
  	struct request *rq;
  	struct list_head *plug_list;
  	unsigned int ret = 0;
  
  	plug = current->plug;
  	if (!plug)
  		goto out;
  
  	if (q->mq_ops)
  		plug_list = &plug->mq_list;
  	else
  		plug_list = &plug->list;
  
  	list_for_each_entry(rq, plug_list, queuelist) {
  		if (rq->q == q)
  			ret++;
  	}
  out:
  	return ret;
  }
86db1e297   Jens Axboe   block: continue l...
1589
  void init_request_from_bio(struct request *req, struct bio *bio)
52d9e6753   Tejun Heo   [BLOCK] ll_rw_blk...
1590
  {
4aff5e233   Jens Axboe   [PATCH] Split str...
1591
  	req->cmd_type = REQ_TYPE_FS;
52d9e6753   Tejun Heo   [BLOCK] ll_rw_blk...
1592

7b6d91dae   Christoph Hellwig   block: unify flag...
1593
1594
  	req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK;
  	if (bio->bi_rw & REQ_RAHEAD)
a82afdfcb   Tejun Heo   block: use the sa...
1595
  		req->cmd_flags |= REQ_FAILFAST_MASK;
b31dc66a5   Jens Axboe   [PATCH] Kill PF_S...
1596

52d9e6753   Tejun Heo   [BLOCK] ll_rw_blk...
1597
  	req->errors = 0;
4f024f379   Kent Overstreet   block: Abstract o...
1598
  	req->__sector = bio->bi_iter.bi_sector;
52d9e6753   Tejun Heo   [BLOCK] ll_rw_blk...
1599
  	req->ioprio = bio_prio(bio);
bc1c56fde   NeilBrown   Share code betwee...
1600
  	blk_rq_bio_prep(req->q, req, bio);
52d9e6753   Tejun Heo   [BLOCK] ll_rw_blk...
1601
  }
dece16353   Jens Axboe   block: change ->m...
1602
  static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1603
  {
5e00d1b5b   Jiri Slaby   BLOCK: fix bio.bi...
1604
  	const bool sync = !!(bio->bi_rw & REQ_SYNC);
73c101011   Jens Axboe   block: initial pa...
1605
1606
1607
  	struct blk_plug *plug;
  	int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT;
  	struct request *req;
56ebdaf2f   Shaohua Li   block: simplify f...
1608
  	unsigned int request_count = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1609

54efd50bf   Kent Overstreet   block: make gener...
1610
  	blk_queue_split(q, &bio, q->bio_split);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1611
1612
1613
1614
1615
1616
  	/*
  	 * low level driver can indicate that it wants pages above a
  	 * certain limit bounced to low memory (ie for highmem, or even
  	 * ISA dma in theory)
  	 */
  	blk_queue_bounce(q, &bio);
ffecfd1a7   Darrick J. Wong   block: optionally...
1617
  	if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
4246a0b63   Christoph Hellwig   block: add a bi_e...
1618
1619
  		bio->bi_error = -EIO;
  		bio_endio(bio);
dece16353   Jens Axboe   block: change ->m...
1620
  		return BLK_QC_T_NONE;
ffecfd1a7   Darrick J. Wong   block: optionally...
1621
  	}
4fed947cb   Tejun Heo   block: implement ...
1622
  	if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) {
73c101011   Jens Axboe   block: initial pa...
1623
  		spin_lock_irq(q->queue_lock);
ae1b15396   Tejun Heo   block: reimplemen...
1624
  		where = ELEVATOR_INSERT_FLUSH;
28e7d1845   Tejun Heo   block: drop barri...
1625
1626
  		goto get_rq;
  	}
73c101011   Jens Axboe   block: initial pa...
1627
1628
1629
1630
  	/*
  	 * Check if we can merge with the plugged list before grabbing
  	 * any locks.
  	 */
0809e3ac6   Jeff Moyer   block: fix plug l...
1631
1632
  	if (!blk_queue_nomerges(q)) {
  		if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
dece16353   Jens Axboe   block: change ->m...
1633
  			return BLK_QC_T_NONE;
0809e3ac6   Jeff Moyer   block: fix plug l...
1634
1635
  	} else
  		request_count = blk_plug_queued_count(q);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1636

73c101011   Jens Axboe   block: initial pa...
1637
  	spin_lock_irq(q->queue_lock);
2056a782f   Jens Axboe   [PATCH] Block que...
1638

73c101011   Jens Axboe   block: initial pa...
1639
1640
  	el_ret = elv_merge(q, &req, bio);
  	if (el_ret == ELEVATOR_BACK_MERGE) {
73c101011   Jens Axboe   block: initial pa...
1641
  		if (bio_attempt_back_merge(q, req, bio)) {
07c2bd373   Tejun Heo   block: don't call...
1642
  			elv_bio_merged(q, req, bio);
73c101011   Jens Axboe   block: initial pa...
1643
1644
1645
1646
1647
  			if (!attempt_back_merge(q, req))
  				elv_merged_request(q, req, el_ret);
  			goto out_unlock;
  		}
  	} else if (el_ret == ELEVATOR_FRONT_MERGE) {
73c101011   Jens Axboe   block: initial pa...
1648
  		if (bio_attempt_front_merge(q, req, bio)) {
07c2bd373   Tejun Heo   block: don't call...
1649
  			elv_bio_merged(q, req, bio);
73c101011   Jens Axboe   block: initial pa...
1650
1651
1652
  			if (!attempt_front_merge(q, req))
  				elv_merged_request(q, req, el_ret);
  			goto out_unlock;
80a761fd3   Tejun Heo   block: implement ...
1653
  		}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1654
  	}
450991bc1   Nick Piggin   [PATCH] blk: __ma...
1655
  get_rq:
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1656
  	/*
7749a8d42   Jens Axboe   [PATCH] Propagate...
1657
1658
1659
1660
1661
1662
  	 * This sync check and mask will be re-done in init_request_from_bio(),
  	 * but we need to set it earlier to expose the sync flag to the
  	 * rq allocator and io schedulers.
  	 */
  	rw_flags = bio_data_dir(bio);
  	if (sync)
7b6d91dae   Christoph Hellwig   block: unify flag...
1663
  		rw_flags |= REQ_SYNC;
7749a8d42   Jens Axboe   [PATCH] Propagate...
1664
1665
  
  	/*
450991bc1   Nick Piggin   [PATCH] blk: __ma...
1666
  	 * Grab a free request. This is might sleep but can not fail.
d6344532a   Nick Piggin   [PATCH] blk: redu...
1667
  	 * Returns with the queue unlocked.
450991bc1   Nick Piggin   [PATCH] blk: __ma...
1668
  	 */
a06e05e6a   Tejun Heo   block: refactor g...
1669
  	req = get_request(q, rw_flags, bio, GFP_NOIO);
a492f0754   Joe Lawrence   block,scsi: fixup...
1670
  	if (IS_ERR(req)) {
4246a0b63   Christoph Hellwig   block: add a bi_e...
1671
1672
  		bio->bi_error = PTR_ERR(req);
  		bio_endio(bio);
da8303c63   Tejun Heo   block: make get_r...
1673
1674
  		goto out_unlock;
  	}
d6344532a   Nick Piggin   [PATCH] blk: redu...
1675

450991bc1   Nick Piggin   [PATCH] blk: __ma...
1676
1677
1678
1679
1680
  	/*
  	 * After dropping the lock and possibly sleeping here, our request
  	 * may now be mergeable after it had proven unmergeable (above).
  	 * We don't worry about that case for efficiency. It won't happen
  	 * often, and the elevators are able to handle it.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1681
  	 */
52d9e6753   Tejun Heo   [BLOCK] ll_rw_blk...
1682
  	init_request_from_bio(req, bio);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1683

9562ad9ab   Tao Ma   block: Remove the...
1684
  	if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags))
11ccf116d   Jens Axboe   block: fix warnin...
1685
  		req->cpu = raw_smp_processor_id();
73c101011   Jens Axboe   block: initial pa...
1686
1687
  
  	plug = current->plug;
721a9602e   Jens Axboe   block: kill off R...
1688
  	if (plug) {
dc6d36c97   Jens Axboe   block: readd plug...
1689
1690
  		/*
  		 * If this is the first request added after a plug, fire
7aef2e780   Jianpeng Ma   block: trace all ...
1691
  		 * of a plug trace.
dc6d36c97   Jens Axboe   block: readd plug...
1692
  		 */
7aef2e780   Jianpeng Ma   block: trace all ...
1693
  		if (!request_count)
dc6d36c97   Jens Axboe   block: readd plug...
1694
  			trace_block_plug(q);
3540d5e89   Shaohua Li   block: avoid unne...
1695
  		else {
019ceb7d5   Shaohua Li   block: add missed...
1696
  			if (request_count >= BLK_MAX_REQUEST_COUNT) {
3540d5e89   Shaohua Li   block: avoid unne...
1697
  				blk_flush_plug_list(plug, false);
019ceb7d5   Shaohua Li   block: add missed...
1698
1699
  				trace_block_plug(q);
  			}
73c101011   Jens Axboe   block: initial pa...
1700
  		}
73c101011   Jens Axboe   block: initial pa...
1701
  		list_add_tail(&req->queuelist, &plug->list);
320ae51fe   Jens Axboe   blk-mq: new multi...
1702
  		blk_account_io_start(req, true);
73c101011   Jens Axboe   block: initial pa...
1703
1704
1705
  	} else {
  		spin_lock_irq(q->queue_lock);
  		add_acct_request(q, req, where);
24ecfbe27   Christoph Hellwig   block: add blk_ru...
1706
  		__blk_run_queue(q);
73c101011   Jens Axboe   block: initial pa...
1707
1708
1709
  out_unlock:
  		spin_unlock_irq(q->queue_lock);
  	}
dece16353   Jens Axboe   block: change ->m...
1710
1711
  
  	return BLK_QC_T_NONE;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1712
1713
1714
1715
1716
1717
1718
1719
  }
  
  /*
   * If bio->bi_dev is a partition, remap the location
   */
  static inline void blk_partition_remap(struct bio *bio)
  {
  	struct block_device *bdev = bio->bi_bdev;
bf2de6f5a   Jens Axboe   block: Initial su...
1720
  	if (bio_sectors(bio) && bdev != bdev->bd_contains) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1721
  		struct hd_struct *p = bdev->bd_part;
4f024f379   Kent Overstreet   block: Abstract o...
1722
  		bio->bi_iter.bi_sector += p->start_sect;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1723
  		bio->bi_bdev = bdev->bd_contains;
c7149d6bc   Alan D. Brunelle   Fix remap handlin...
1724

d07335e51   Mike Snitzer   block: Rename "bl...
1725
1726
  		trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio,
  				      bdev->bd_dev,
4f024f379   Kent Overstreet   block: Abstract o...
1727
  				      bio->bi_iter.bi_sector - p->start_sect);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1728
1729
  	}
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
  static void handle_bad_sector(struct bio *bio)
  {
  	char b[BDEVNAME_SIZE];
  
  	printk(KERN_INFO "attempt to access beyond end of device
  ");
  	printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu
  ",
  			bdevname(bio->bi_bdev, b),
  			bio->bi_rw,
f73a1c7d1   Kent Overstreet   block: Add bio_en...
1740
  			(unsigned long long)bio_end_sector(bio),
77304d2ab   Mike Snitzer   block: read i_siz...
1741
  			(long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1742
  }
c17bb4951   Akinobu Mita   [PATCH] fault-inj...
1743
1744
1745
1746
1747
1748
1749
1750
1751
  #ifdef CONFIG_FAIL_MAKE_REQUEST
  
  static DECLARE_FAULT_ATTR(fail_make_request);
  
  static int __init setup_fail_make_request(char *str)
  {
  	return setup_fault_attr(&fail_make_request, str);
  }
  __setup("fail_make_request=", setup_fail_make_request);
b2c9cd379   Akinobu Mita   fail_make_request...
1752
  static bool should_fail_request(struct hd_struct *part, unsigned int bytes)
c17bb4951   Akinobu Mita   [PATCH] fault-inj...
1753
  {
b2c9cd379   Akinobu Mita   fail_make_request...
1754
  	return part->make_it_fail && should_fail(&fail_make_request, bytes);
c17bb4951   Akinobu Mita   [PATCH] fault-inj...
1755
1756
1757
1758
  }
  
  static int __init fail_make_request_debugfs(void)
  {
dd48c085c   Akinobu Mita   fault-injection: ...
1759
1760
  	struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
  						NULL, &fail_make_request);
21f9fcd81   Duan Jiong   block: replace IS...
1761
  	return PTR_ERR_OR_ZERO(dir);
c17bb4951   Akinobu Mita   [PATCH] fault-inj...
1762
1763
1764
1765
1766
  }
  
  late_initcall(fail_make_request_debugfs);
  
  #else /* CONFIG_FAIL_MAKE_REQUEST */
b2c9cd379   Akinobu Mita   fail_make_request...
1767
1768
  static inline bool should_fail_request(struct hd_struct *part,
  					unsigned int bytes)
c17bb4951   Akinobu Mita   [PATCH] fault-inj...
1769
  {
b2c9cd379   Akinobu Mita   fail_make_request...
1770
  	return false;
c17bb4951   Akinobu Mita   [PATCH] fault-inj...
1771
1772
1773
  }
  
  #endif /* CONFIG_FAIL_MAKE_REQUEST */
c07e2b412   Jens Axboe   block: factor our...
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
  /*
   * Check whether this bio extends beyond the end of the device.
   */
  static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
  {
  	sector_t maxsector;
  
  	if (!nr_sectors)
  		return 0;
  
  	/* Test device or partition size, when known. */
77304d2ab   Mike Snitzer   block: read i_siz...
1785
  	maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9;
c07e2b412   Jens Axboe   block: factor our...
1786
  	if (maxsector) {
4f024f379   Kent Overstreet   block: Abstract o...
1787
  		sector_t sector = bio->bi_iter.bi_sector;
c07e2b412   Jens Axboe   block: factor our...
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
  
  		if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
  			/*
  			 * This may well happen - the kernel calls bread()
  			 * without checking the size of the device, e.g., when
  			 * mounting a device.
  			 */
  			handle_bad_sector(bio);
  			return 1;
  		}
  	}
  
  	return 0;
  }
27a84d54c   Christoph Hellwig   block: refactor g...
1802
1803
  static noinline_for_stack bool
  generic_make_request_checks(struct bio *bio)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1804
  {
165125e1e   Jens Axboe   [BLOCK] Get rid o...
1805
  	struct request_queue *q;
5a7bbad27   Christoph Hellwig   block: remove sup...
1806
  	int nr_sectors = bio_sectors(bio);
51fd77bd9   Jens Axboe   [BLOCK] Don't all...
1807
  	int err = -EIO;
5a7bbad27   Christoph Hellwig   block: remove sup...
1808
1809
  	char b[BDEVNAME_SIZE];
  	struct hd_struct *part;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1810
1811
  
  	might_sleep();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1812

c07e2b412   Jens Axboe   block: factor our...
1813
1814
  	if (bio_check_eod(bio, nr_sectors))
  		goto end_io;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1815

5a7bbad27   Christoph Hellwig   block: remove sup...
1816
1817
1818
1819
1820
1821
1822
  	q = bdev_get_queue(bio->bi_bdev);
  	if (unlikely(!q)) {
  		printk(KERN_ERR
  		       "generic_make_request: Trying to access "
  			"nonexistent block-device %s (%Lu)
  ",
  			bdevname(bio->bi_bdev, b),
4f024f379   Kent Overstreet   block: Abstract o...
1823
  			(long long) bio->bi_iter.bi_sector);
5a7bbad27   Christoph Hellwig   block: remove sup...
1824
1825
  		goto end_io;
  	}
c17bb4951   Akinobu Mita   [PATCH] fault-inj...
1826

5a7bbad27   Christoph Hellwig   block: remove sup...
1827
  	part = bio->bi_bdev->bd_part;
4f024f379   Kent Overstreet   block: Abstract o...
1828
  	if (should_fail_request(part, bio->bi_iter.bi_size) ||
5a7bbad27   Christoph Hellwig   block: remove sup...
1829
  	    should_fail_request(&part_to_disk(part)->part0,
4f024f379   Kent Overstreet   block: Abstract o...
1830
  				bio->bi_iter.bi_size))
5a7bbad27   Christoph Hellwig   block: remove sup...
1831
  		goto end_io;
2056a782f   Jens Axboe   [PATCH] Block que...
1832

5a7bbad27   Christoph Hellwig   block: remove sup...
1833
1834
1835
1836
1837
  	/*
  	 * If this device has partitions, remap block n
  	 * of partition p to block n+start(p) of the disk.
  	 */
  	blk_partition_remap(bio);
2056a782f   Jens Axboe   [PATCH] Block que...
1838

5a7bbad27   Christoph Hellwig   block: remove sup...
1839
1840
  	if (bio_check_eod(bio, nr_sectors))
  		goto end_io;
1e87901e1   Tejun Heo   block: filter flu...
1841

5a7bbad27   Christoph Hellwig   block: remove sup...
1842
1843
1844
1845
1846
1847
1848
1849
1850
  	/*
  	 * Filter flush bio's early so that make_request based
  	 * drivers without flush support don't have to worry
  	 * about them.
  	 */
  	if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) {
  		bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA);
  		if (!nr_sectors) {
  			err = 0;
51fd77bd9   Jens Axboe   [BLOCK] Don't all...
1851
1852
  			goto end_io;
  		}
5a7bbad27   Christoph Hellwig   block: remove sup...
1853
  	}
5ddfe9691   NeilBrown   [PATCH] md: check...
1854

5a7bbad27   Christoph Hellwig   block: remove sup...
1855
1856
  	if ((bio->bi_rw & REQ_DISCARD) &&
  	    (!blk_queue_discard(q) ||
e2a60da74   Martin K. Petersen   block: Clean up s...
1857
  	     ((bio->bi_rw & REQ_SECURE) && !blk_queue_secdiscard(q)))) {
5a7bbad27   Christoph Hellwig   block: remove sup...
1858
1859
1860
  		err = -EOPNOTSUPP;
  		goto end_io;
  	}
01edede41   Minchan Kim   block: trace bio ...
1861

4363ac7c1   Martin K. Petersen   block: Implement ...
1862
  	if (bio->bi_rw & REQ_WRITE_SAME && !bdev_write_same(bio->bi_bdev)) {
5a7bbad27   Christoph Hellwig   block: remove sup...
1863
1864
1865
  		err = -EOPNOTSUPP;
  		goto end_io;
  	}
01edede41   Minchan Kim   block: trace bio ...
1866

7f4b35d15   Tejun Heo   block: allocate i...
1867
1868
1869
1870
1871
1872
1873
  	/*
  	 * Various block parts want %current->io_context and lazy ioc
  	 * allocation ends up trading a lot of pain for a small amount of
  	 * memory.  Just allocate it upfront.  This may fail and block
  	 * layer knows how to live with it.
  	 */
  	create_io_context(GFP_ATOMIC, q->node);
ae1188963   Tejun Heo   blkcg: consolidat...
1874
1875
  	if (!blkcg_bio_issue_check(q, bio))
  		return false;
27a84d54c   Christoph Hellwig   block: refactor g...
1876

5a7bbad27   Christoph Hellwig   block: remove sup...
1877
  	trace_block_bio_queue(q, bio);
27a84d54c   Christoph Hellwig   block: refactor g...
1878
  	return true;
a7384677b   Tejun Heo   block: remove dup...
1879
1880
  
  end_io:
4246a0b63   Christoph Hellwig   block: add a bi_e...
1881
1882
  	bio->bi_error = err;
  	bio_endio(bio);
27a84d54c   Christoph Hellwig   block: refactor g...
1883
  	return false;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1884
  }
27a84d54c   Christoph Hellwig   block: refactor g...
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
  /**
   * generic_make_request - hand a buffer to its device driver for I/O
   * @bio:  The bio describing the location in memory and on the device.
   *
   * generic_make_request() is used to make I/O requests of block
   * devices. It is passed a &struct bio, which describes the I/O that needs
   * to be done.
   *
   * generic_make_request() does not return any status.  The
   * success/failure status of the request, along with notification of
   * completion, is delivered asynchronously through the bio->bi_end_io
   * function described (one day) else where.
   *
   * The caller of generic_make_request must make sure that bi_io_vec
   * are set to describe the memory buffer, and that bi_dev and bi_sector are
   * set to describe the device address, and the
   * bi_end_io and optionally bi_private are set to describe how
   * completion notification should be signaled.
   *
   * generic_make_request and the drivers it calls may use bi_next if this
   * bio happens to be merged with someone else, and may resubmit the bio to
   * a lower device by calling into generic_make_request recursively, which
   * means the bio should NOT be touched after the call to ->make_request_fn.
d89d87965   Neil Brown   When stacked bloc...
1908
   */
dece16353   Jens Axboe   block: change ->m...
1909
  blk_qc_t generic_make_request(struct bio *bio)
d89d87965   Neil Brown   When stacked bloc...
1910
  {
bddd87c7e   Akinobu Mita   blk-core: use BIO...
1911
  	struct bio_list bio_list_on_stack;
dece16353   Jens Axboe   block: change ->m...
1912
  	blk_qc_t ret = BLK_QC_T_NONE;
bddd87c7e   Akinobu Mita   blk-core: use BIO...
1913

27a84d54c   Christoph Hellwig   block: refactor g...
1914
  	if (!generic_make_request_checks(bio))
dece16353   Jens Axboe   block: change ->m...
1915
  		goto out;
27a84d54c   Christoph Hellwig   block: refactor g...
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
  
  	/*
  	 * We only want one ->make_request_fn to be active at a time, else
  	 * stack usage with stacked devices could be a problem.  So use
  	 * current->bio_list to keep a list of requests submited by a
  	 * make_request_fn function.  current->bio_list is also used as a
  	 * flag to say if generic_make_request is currently active in this
  	 * task or not.  If it is NULL, then no make_request is active.  If
  	 * it is non-NULL, then a make_request is active, and new requests
  	 * should be added at the tail
  	 */
bddd87c7e   Akinobu Mita   blk-core: use BIO...
1927
  	if (current->bio_list) {
bddd87c7e   Akinobu Mita   blk-core: use BIO...
1928
  		bio_list_add(current->bio_list, bio);
dece16353   Jens Axboe   block: change ->m...
1929
  		goto out;
d89d87965   Neil Brown   When stacked bloc...
1930
  	}
27a84d54c   Christoph Hellwig   block: refactor g...
1931

d89d87965   Neil Brown   When stacked bloc...
1932
1933
1934
1935
1936
  	/* following loop may be a bit non-obvious, and so deserves some
  	 * explanation.
  	 * Before entering the loop, bio->bi_next is NULL (as all callers
  	 * ensure that) so we have a list with a single bio.
  	 * We pretend that we have just taken it off a longer list, so
bddd87c7e   Akinobu Mita   blk-core: use BIO...
1937
1938
  	 * we assign bio_list to a pointer to the bio_list_on_stack,
  	 * thus initialising the bio_list of new bios to be
27a84d54c   Christoph Hellwig   block: refactor g...
1939
  	 * added.  ->make_request() may indeed add some more bios
d89d87965   Neil Brown   When stacked bloc...
1940
1941
1942
  	 * through a recursive call to generic_make_request.  If it
  	 * did, we find a non-NULL value in bio_list and re-enter the loop
  	 * from the top.  In this case we really did just take the bio
bddd87c7e   Akinobu Mita   blk-core: use BIO...
1943
  	 * of the top of the list (no pretending) and so remove it from
27a84d54c   Christoph Hellwig   block: refactor g...
1944
  	 * bio_list, and call into ->make_request() again.
d89d87965   Neil Brown   When stacked bloc...
1945
1946
  	 */
  	BUG_ON(bio->bi_next);
bddd87c7e   Akinobu Mita   blk-core: use BIO...
1947
1948
  	bio_list_init(&bio_list_on_stack);
  	current->bio_list = &bio_list_on_stack;
d89d87965   Neil Brown   When stacked bloc...
1949
  	do {
27a84d54c   Christoph Hellwig   block: refactor g...
1950
  		struct request_queue *q = bdev_get_queue(bio->bi_bdev);
6f3b0e8bc   Christoph Hellwig   blk-mq: add a fla...
1951
  		if (likely(blk_queue_enter(q, false) == 0)) {
dece16353   Jens Axboe   block: change ->m...
1952
  			ret = q->make_request_fn(q, bio);
3ef28e83a   Dan Williams   block: generic re...
1953
1954
  
  			blk_queue_exit(q);
27a84d54c   Christoph Hellwig   block: refactor g...
1955

3ef28e83a   Dan Williams   block: generic re...
1956
1957
1958
1959
1960
1961
1962
  			bio = bio_list_pop(current->bio_list);
  		} else {
  			struct bio *bio_next = bio_list_pop(current->bio_list);
  
  			bio_io_error(bio);
  			bio = bio_next;
  		}
d89d87965   Neil Brown   When stacked bloc...
1963
  	} while (bio);
bddd87c7e   Akinobu Mita   blk-core: use BIO...
1964
  	current->bio_list = NULL; /* deactivate */
dece16353   Jens Axboe   block: change ->m...
1965
1966
1967
  
  out:
  	return ret;
d89d87965   Neil Brown   When stacked bloc...
1968
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1969
1970
1971
  EXPORT_SYMBOL(generic_make_request);
  
  /**
710027a48   Randy Dunlap   Add some block/ s...
1972
   * submit_bio - submit a bio to the block device layer for I/O
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1973
1974
1975
1976
1977
   * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
   * @bio: The &struct bio which describes the I/O
   *
   * submit_bio() is very similar in purpose to generic_make_request(), and
   * uses that function to do most of the work. Both are fairly rough
710027a48   Randy Dunlap   Add some block/ s...
1978
   * interfaces; @bio must be presetup and ready for I/O.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1979
1980
   *
   */
dece16353   Jens Axboe   block: change ->m...
1981
  blk_qc_t submit_bio(int rw, struct bio *bio)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1982
  {
22e2c507c   Jens Axboe   [PATCH] Update cf...
1983
  	bio->bi_rw |= rw;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1984

bf2de6f5a   Jens Axboe   block: Initial su...
1985
1986
1987
1988
  	/*
  	 * If it's a regular read/write or a barrier with data attached,
  	 * go through the normal accounting stuff before submission.
  	 */
e2a60da74   Martin K. Petersen   block: Clean up s...
1989
  	if (bio_has_data(bio)) {
4363ac7c1   Martin K. Petersen   block: Implement ...
1990
1991
1992
1993
1994
1995
  		unsigned int count;
  
  		if (unlikely(rw & REQ_WRITE_SAME))
  			count = bdev_logical_block_size(bio->bi_bdev) >> 9;
  		else
  			count = bio_sectors(bio);
bf2de6f5a   Jens Axboe   block: Initial su...
1996
1997
1998
  		if (rw & WRITE) {
  			count_vm_events(PGPGOUT, count);
  		} else {
4f024f379   Kent Overstreet   block: Abstract o...
1999
  			task_io_account_read(bio->bi_iter.bi_size);
bf2de6f5a   Jens Axboe   block: Initial su...
2000
2001
2002
2003
2004
  			count_vm_events(PGPGIN, count);
  		}
  
  		if (unlikely(block_dump)) {
  			char b[BDEVNAME_SIZE];
8dcbdc742   San Mehat   block: block_dump...
2005
2006
  			printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)
  ",
ba25f9dcc   Pavel Emelyanov   Use helpers to ob...
2007
  			current->comm, task_pid_nr(current),
bf2de6f5a   Jens Axboe   block: Initial su...
2008
  				(rw & WRITE) ? "WRITE" : "READ",
4f024f379   Kent Overstreet   block: Abstract o...
2009
  				(unsigned long long)bio->bi_iter.bi_sector,
8dcbdc742   San Mehat   block: block_dump...
2010
2011
  				bdevname(bio->bi_bdev, b),
  				count);
bf2de6f5a   Jens Axboe   block: Initial su...
2012
  		}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2013
  	}
dece16353   Jens Axboe   block: change ->m...
2014
  	return generic_make_request(bio);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2015
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2016
  EXPORT_SYMBOL(submit_bio);
3bcddeac1   Kiyoshi Ueda   blk_end_request: ...
2017
  /**
82124d603   Kiyoshi Ueda   block: add reques...
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
   * blk_rq_check_limits - Helper function to check a request for the queue limit
   * @q:  the queue
   * @rq: the request being checked
   *
   * Description:
   *    @rq may have been made based on weaker limitations of upper-level queues
   *    in request stacking drivers, and it may violate the limitation of @q.
   *    Since the block layer and the underlying device driver trust @rq
   *    after it is inserted to @q, it should be checked against @q before
   *    the insertion using this generic function.
   *
   *    This function should also be useful for request stacking drivers
eef35c2d4   Stefan Weil   Fix spelling fuct...
2030
   *    in some cases below, so export this function.
82124d603   Kiyoshi Ueda   block: add reques...
2031
2032
   *    Request stacking drivers like request-based dm may change the queue
   *    limits while requests are in the queue (e.g. dm's table swapping).
e227867f1   Masanari Iida   treewide: Fix typ...
2033
   *    Such request stacking drivers should check those requests against
82124d603   Kiyoshi Ueda   block: add reques...
2034
2035
2036
2037
2038
2039
   *    the new queue limits again when they dispatch those requests,
   *    although such checkings are also done against the old queue limits
   *    when submitting requests.
   */
  int blk_rq_check_limits(struct request_queue *q, struct request *rq)
  {
e2a60da74   Martin K. Petersen   block: Clean up s...
2040
  	if (!rq_mergeable(rq))
3383977fa   ike Snitzer   block: update req...
2041
  		return 0;
f31dc1cd4   Martin K. Petersen   block: Consolidat...
2042
  	if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, rq->cmd_flags)) {
82124d603   Kiyoshi Ueda   block: add reques...
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
  		printk(KERN_ERR "%s: over max size limit.
  ", __func__);
  		return -EIO;
  	}
  
  	/*
  	 * queue's settings related to segment counting like q->bounce_pfn
  	 * may differ from that of other stacking queues.
  	 * Recalculate it to check the request correctly on this queue's
  	 * limitation.
  	 */
  	blk_recalc_rq_segments(rq);
8a78362c4   Martin K. Petersen   block: Consolidat...
2055
  	if (rq->nr_phys_segments > queue_max_segments(q)) {
82124d603   Kiyoshi Ueda   block: add reques...
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
  		printk(KERN_ERR "%s: over max segments limit.
  ", __func__);
  		return -EIO;
  	}
  
  	return 0;
  }
  EXPORT_SYMBOL_GPL(blk_rq_check_limits);
  
  /**
   * blk_insert_cloned_request - Helper for stacking drivers to submit a request
   * @q:  the queue to submit the request
   * @rq: the request being queued
   */
  int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
  {
  	unsigned long flags;
4853abaae   Jeff Moyer   block: fix flush ...
2073
  	int where = ELEVATOR_INSERT_BACK;
82124d603   Kiyoshi Ueda   block: add reques...
2074
2075
2076
  
  	if (blk_rq_check_limits(q, rq))
  		return -EIO;
b2c9cd379   Akinobu Mita   fail_make_request...
2077
2078
  	if (rq->rq_disk &&
  	    should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq)))
82124d603   Kiyoshi Ueda   block: add reques...
2079
  		return -EIO;
82124d603   Kiyoshi Ueda   block: add reques...
2080

7fb4898e0   Keith Busch   block: add blk-mq...
2081
2082
2083
2084
2085
2086
  	if (q->mq_ops) {
  		if (blk_queue_io_stat(q))
  			blk_account_io_start(rq, true);
  		blk_mq_insert_request(rq, false, true, true);
  		return 0;
  	}
82124d603   Kiyoshi Ueda   block: add reques...
2087
  	spin_lock_irqsave(q->queue_lock, flags);
3f3299d5c   Bart Van Assche   block: Rename que...
2088
  	if (unlikely(blk_queue_dying(q))) {
8ba61435d   Tejun Heo   block: add missin...
2089
2090
2091
  		spin_unlock_irqrestore(q->queue_lock, flags);
  		return -ENODEV;
  	}
82124d603   Kiyoshi Ueda   block: add reques...
2092
2093
2094
2095
2096
2097
  
  	/*
  	 * Submitting request must be dequeued before calling this function
  	 * because it will be linked to another request_queue
  	 */
  	BUG_ON(blk_queued_rq(rq));
4853abaae   Jeff Moyer   block: fix flush ...
2098
2099
2100
2101
  	if (rq->cmd_flags & (REQ_FLUSH|REQ_FUA))
  		where = ELEVATOR_INSERT_FLUSH;
  
  	add_acct_request(q, rq, where);
e67b77c79   Jeff Moyer   blk-flush: move t...
2102
2103
  	if (where == ELEVATOR_INSERT_FLUSH)
  		__blk_run_queue(q);
82124d603   Kiyoshi Ueda   block: add reques...
2104
2105
2106
2107
2108
  	spin_unlock_irqrestore(q->queue_lock, flags);
  
  	return 0;
  }
  EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
80a761fd3   Tejun Heo   block: implement ...
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
  /**
   * blk_rq_err_bytes - determine number of bytes till the next failure boundary
   * @rq: request to examine
   *
   * Description:
   *     A request could be merge of IOs which require different failure
   *     handling.  This function determines the number of bytes which
   *     can be failed from the beginning of the request without
   *     crossing into area which need to be retried further.
   *
   * Return:
   *     The number of bytes to fail.
   *
   * Context:
   *     queue_lock must be held.
   */
  unsigned int blk_rq_err_bytes(const struct request *rq)
  {
  	unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
  	unsigned int bytes = 0;
  	struct bio *bio;
  
  	if (!(rq->cmd_flags & REQ_MIXED_MERGE))
  		return blk_rq_bytes(rq);
  
  	/*
  	 * Currently the only 'mixing' which can happen is between
  	 * different fastfail types.  We can safely fail portions
  	 * which have all the failfast bits that the first one has -
  	 * the ones which are at least as eager to fail as the first
  	 * one.
  	 */
  	for (bio = rq->bio; bio; bio = bio->bi_next) {
  		if ((bio->bi_rw & ff) != ff)
  			break;
4f024f379   Kent Overstreet   block: Abstract o...
2144
  		bytes += bio->bi_iter.bi_size;
80a761fd3   Tejun Heo   block: implement ...
2145
2146
2147
2148
2149
2150
2151
  	}
  
  	/* this could lead to infinite loop */
  	BUG_ON(blk_rq_bytes(rq) && !bytes);
  	return bytes;
  }
  EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
320ae51fe   Jens Axboe   blk-mq: new multi...
2152
  void blk_account_io_completion(struct request *req, unsigned int bytes)
bc58ba946   Jens Axboe   block: add sysfs ...
2153
  {
c2553b584   Jens Axboe   block: make blk_d...
2154
  	if (blk_do_io_stat(req)) {
bc58ba946   Jens Axboe   block: add sysfs ...
2155
2156
2157
2158
2159
  		const int rw = rq_data_dir(req);
  		struct hd_struct *part;
  		int cpu;
  
  		cpu = part_stat_lock();
09e099d4b   Jerome Marchand   block: fix accoun...
2160
  		part = req->part;
bc58ba946   Jens Axboe   block: add sysfs ...
2161
2162
2163
2164
  		part_stat_add(cpu, part, sectors[rw], bytes >> 9);
  		part_stat_unlock();
  	}
  }
320ae51fe   Jens Axboe   blk-mq: new multi...
2165
  void blk_account_io_done(struct request *req)
bc58ba946   Jens Axboe   block: add sysfs ...
2166
  {
bc58ba946   Jens Axboe   block: add sysfs ...
2167
  	/*
dd4c133f3   Tejun Heo   block: rename bar...
2168
2169
2170
  	 * Account IO completion.  flush_rq isn't accounted as a
  	 * normal IO on queueing nor completion.  Accounting the
  	 * containing request is enough.
bc58ba946   Jens Axboe   block: add sysfs ...
2171
  	 */
414b4ff5e   Tejun Heo   block: add REQ_FL...
2172
  	if (blk_do_io_stat(req) && !(req->cmd_flags & REQ_FLUSH_SEQ)) {
bc58ba946   Jens Axboe   block: add sysfs ...
2173
2174
2175
2176
2177
2178
  		unsigned long duration = jiffies - req->start_time;
  		const int rw = rq_data_dir(req);
  		struct hd_struct *part;
  		int cpu;
  
  		cpu = part_stat_lock();
09e099d4b   Jerome Marchand   block: fix accoun...
2179
  		part = req->part;
bc58ba946   Jens Axboe   block: add sysfs ...
2180
2181
2182
2183
  
  		part_stat_inc(cpu, part, ios[rw]);
  		part_stat_add(cpu, part, ticks[rw], duration);
  		part_round_stats(cpu, part);
316d315bf   Nikanth Karthikesan   block: Seperate r...
2184
  		part_dec_in_flight(part, rw);
bc58ba946   Jens Axboe   block: add sysfs ...
2185

6c23a9681   Jens Axboe   block: add intern...
2186
  		hd_struct_put(part);
bc58ba946   Jens Axboe   block: add sysfs ...
2187
2188
2189
  		part_stat_unlock();
  	}
  }
47fafbc70   Rafael J. Wysocki   block / PM: Repla...
2190
  #ifdef CONFIG_PM
c8158819d   Lin Ming   block: implement ...
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
  /*
   * Don't process normal requests when queue is suspended
   * or in the process of suspending/resuming
   */
  static struct request *blk_pm_peek_request(struct request_queue *q,
  					   struct request *rq)
  {
  	if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
  	    (q->rpm_status != RPM_ACTIVE && !(rq->cmd_flags & REQ_PM))))
  		return NULL;
  	else
  		return rq;
  }
  #else
  static inline struct request *blk_pm_peek_request(struct request_queue *q,
  						  struct request *rq)
  {
  	return rq;
  }
  #endif
320ae51fe   Jens Axboe   blk-mq: new multi...
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
  void blk_account_io_start(struct request *rq, bool new_io)
  {
  	struct hd_struct *part;
  	int rw = rq_data_dir(rq);
  	int cpu;
  
  	if (!blk_do_io_stat(rq))
  		return;
  
  	cpu = part_stat_lock();
  
  	if (!new_io) {
  		part = rq->part;
  		part_stat_inc(cpu, part, merges[rw]);
  	} else {
  		part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
  		if (!hd_struct_try_get(part)) {
  			/*
  			 * The partition is already being removed,
  			 * the request will be accounted on the disk only
  			 *
  			 * We take a reference on disk->part0 although that
  			 * partition will never be deleted, so we can treat
  			 * it as any other partition.
  			 */
  			part = &rq->rq_disk->part0;
  			hd_struct_get(part);
  		}
  		part_round_stats(cpu, part);
  		part_inc_in_flight(part, rw);
  		rq->part = part;
  	}
  
  	part_stat_unlock();
  }
53a08807c   Tejun Heo   block: internal d...
2246
  /**
9934c8c04   Tejun Heo   block: implement ...
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
   * blk_peek_request - peek at the top of a request queue
   * @q: request queue to peek at
   *
   * Description:
   *     Return the request at the top of @q.  The returned request
   *     should be started using blk_start_request() before LLD starts
   *     processing it.
   *
   * Return:
   *     Pointer to the request at the top of @q if available.  Null
   *     otherwise.
   *
   * Context:
   *     queue_lock must be held.
   */
  struct request *blk_peek_request(struct request_queue *q)
158dbda00   Tejun Heo   block: reorganize...
2263
2264
2265
2266
2267
  {
  	struct request *rq;
  	int ret;
  
  	while ((rq = __elv_next_request(q)) != NULL) {
c8158819d   Lin Ming   block: implement ...
2268
2269
2270
2271
  
  		rq = blk_pm_peek_request(q, rq);
  		if (!rq)
  			break;
158dbda00   Tejun Heo   block: reorganize...
2272
2273
2274
2275
2276
2277
  		if (!(rq->cmd_flags & REQ_STARTED)) {
  			/*
  			 * This is the first time the device driver
  			 * sees this request (possibly after
  			 * requeueing).  Notify IO scheduler.
  			 */
33659ebba   Christoph Hellwig   block: remove wra...
2278
  			if (rq->cmd_flags & REQ_SORTED)
158dbda00   Tejun Heo   block: reorganize...
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
  				elv_activate_rq(q, rq);
  
  			/*
  			 * just mark as started even if we don't start
  			 * it, a request that has been delayed should
  			 * not be passed by new incoming requests
  			 */
  			rq->cmd_flags |= REQ_STARTED;
  			trace_block_rq_issue(q, rq);
  		}
  
  		if (!q->boundary_rq || q->boundary_rq == rq) {
  			q->end_sector = rq_end_sector(rq);
  			q->boundary_rq = NULL;
  		}
  
  		if (rq->cmd_flags & REQ_DONTPREP)
  			break;
2e46e8b27   Tejun Heo   block: drop reque...
2297
  		if (q->dma_drain_size && blk_rq_bytes(rq)) {
158dbda00   Tejun Heo   block: reorganize...
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
  			/*
  			 * make sure space for the drain appears we
  			 * know we can do this because max_hw_segments
  			 * has been adjusted to be one fewer than the
  			 * device can handle
  			 */
  			rq->nr_phys_segments++;
  		}
  
  		if (!q->prep_rq_fn)
  			break;
  
  		ret = q->prep_rq_fn(q, rq);
  		if (ret == BLKPREP_OK) {
  			break;
  		} else if (ret == BLKPREP_DEFER) {
  			/*
  			 * the request may have been (partially) prepped.
  			 * we need to keep this request in the front to
  			 * avoid resource deadlock.  REQ_STARTED will
  			 * prevent other fs requests from passing this one.
  			 */
2e46e8b27   Tejun Heo   block: drop reque...
2320
  			if (q->dma_drain_size && blk_rq_bytes(rq) &&
158dbda00   Tejun Heo   block: reorganize...
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
  			    !(rq->cmd_flags & REQ_DONTPREP)) {
  				/*
  				 * remove the space for the drain we added
  				 * so that we don't add it again
  				 */
  				--rq->nr_phys_segments;
  			}
  
  			rq = NULL;
  			break;
  		} else if (ret == BLKPREP_KILL) {
  			rq->cmd_flags |= REQ_QUIET;
c143dc903   James Bottomley   block: fix an oop...
2333
2334
2335
2336
2337
  			/*
  			 * Mark this request as started so we don't trigger
  			 * any debug logic in the end I/O path.
  			 */
  			blk_start_request(rq);
40cbbb781   Tejun Heo   block: implement ...
2338
  			__blk_end_request_all(rq, -EIO);
158dbda00   Tejun Heo   block: reorganize...
2339
2340
2341
2342
2343
2344
2345
2346
2347
  		} else {
  			printk(KERN_ERR "%s: bad return=%d
  ", __func__, ret);
  			break;
  		}
  	}
  
  	return rq;
  }
9934c8c04   Tejun Heo   block: implement ...
2348
  EXPORT_SYMBOL(blk_peek_request);
158dbda00   Tejun Heo   block: reorganize...
2349

9934c8c04   Tejun Heo   block: implement ...
2350
  void blk_dequeue_request(struct request *rq)
158dbda00   Tejun Heo   block: reorganize...
2351
  {
9934c8c04   Tejun Heo   block: implement ...
2352
  	struct request_queue *q = rq->q;
158dbda00   Tejun Heo   block: reorganize...
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
  	BUG_ON(list_empty(&rq->queuelist));
  	BUG_ON(ELV_ON_HASH(rq));
  
  	list_del_init(&rq->queuelist);
  
  	/*
  	 * the time frame between a request being removed from the lists
  	 * and to it is freed is accounted as io that is in progress at
  	 * the driver side.
  	 */
9195291e5   Divyesh Shah   blkio: Increment ...
2363
  	if (blk_account_rq(rq)) {
0a7ae2ff0   Jens Axboe   block: change the...
2364
  		q->in_flight[rq_is_sync(rq)]++;
9195291e5   Divyesh Shah   blkio: Increment ...
2365
2366
  		set_io_start_time_ns(rq);
  	}
158dbda00   Tejun Heo   block: reorganize...
2367
  }
5efccd17c   Tejun Heo   block: reorder re...
2368
  /**
9934c8c04   Tejun Heo   block: implement ...
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
   * blk_start_request - start request processing on the driver
   * @req: request to dequeue
   *
   * Description:
   *     Dequeue @req and start timeout timer on it.  This hands off the
   *     request to the driver.
   *
   *     Block internal functions which don't want to start timer should
   *     call blk_dequeue_request().
   *
   * Context:
   *     queue_lock must be held.
   */
  void blk_start_request(struct request *req)
  {
  	blk_dequeue_request(req);
  
  	/*
5f49f6317   Tejun Heo   block: set rq->re...
2387
2388
  	 * We are now handing the request to the hardware, initialize
  	 * resid_len to full count and add the timeout handler.
9934c8c04   Tejun Heo   block: implement ...
2389
  	 */
5f49f6317   Tejun Heo   block: set rq->re...
2390
  	req->resid_len = blk_rq_bytes(req);
dbb66c4be   FUJITA Tomonori   block: needs to s...
2391
2392
  	if (unlikely(blk_bidi_rq(req)))
  		req->next_rq->resid_len = blk_rq_bytes(req->next_rq);
4912aa6c1   Jeff Moyer   block: fix race b...
2393
  	BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags));
9934c8c04   Tejun Heo   block: implement ...
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
  	blk_add_timer(req);
  }
  EXPORT_SYMBOL(blk_start_request);
  
  /**
   * blk_fetch_request - fetch a request from a request queue
   * @q: request queue to fetch a request from
   *
   * Description:
   *     Return the request at the top of @q.  The request is started on
   *     return and LLD can start processing it immediately.
   *
   * Return:
   *     Pointer to the request at the top of @q if available.  Null
   *     otherwise.
   *
   * Context:
   *     queue_lock must be held.
   */
  struct request *blk_fetch_request(struct request_queue *q)
  {
  	struct request *rq;
  
  	rq = blk_peek_request(q);
  	if (rq)
  		blk_start_request(rq);
  	return rq;
  }
  EXPORT_SYMBOL(blk_fetch_request);
  
  /**
2e60e0229   Tejun Heo   block: clean up r...
2425
   * blk_update_request - Special helper function for request stacking drivers
8ebf97560   Randy Dunlap   block: fix kernel...
2426
   * @req:      the request being processed
710027a48   Randy Dunlap   Add some block/ s...
2427
   * @error:    %0 for success, < %0 for error
8ebf97560   Randy Dunlap   block: fix kernel...
2428
   * @nr_bytes: number of bytes to complete @req
3bcddeac1   Kiyoshi Ueda   blk_end_request: ...
2429
2430
   *
   * Description:
8ebf97560   Randy Dunlap   block: fix kernel...
2431
2432
2433
   *     Ends I/O on a number of bytes attached to @req, but doesn't complete
   *     the request structure even if @req doesn't have leftover.
   *     If @req has leftover, sets it up for the next range of segments.
2e60e0229   Tejun Heo   block: clean up r...
2434
2435
2436
2437
2438
2439
2440
   *
   *     This special helper function is only for request stacking drivers
   *     (e.g. request-based dm) so that they can handle partial completion.
   *     Actual device drivers should use blk_end_request instead.
   *
   *     Passing the result of blk_rq_bytes() as @nr_bytes guarantees
   *     %false return from this function.
3bcddeac1   Kiyoshi Ueda   blk_end_request: ...
2441
2442
   *
   * Return:
2e60e0229   Tejun Heo   block: clean up r...
2443
2444
   *     %false - this request doesn't have any more data
   *     %true  - this request has more data
3bcddeac1   Kiyoshi Ueda   blk_end_request: ...
2445
   **/
2e60e0229   Tejun Heo   block: clean up r...
2446
  bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2447
  {
f79ea4161   Kent Overstreet   block: Refactor b...
2448
  	int total_bytes;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2449

4a0efdc93   Hannes Reinecke   block: misplaced ...
2450
  	trace_block_rq_complete(req->q, req, nr_bytes);
2e60e0229   Tejun Heo   block: clean up r...
2451
2452
  	if (!req->bio)
  		return false;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2453
  	/*
6f41469c6   Tejun Heo   block: clear req-...
2454
2455
2456
2457
2458
2459
  	 * For fs requests, rq is just carrier of independent bio's
  	 * and each partial completion should be handled separately.
  	 * Reset per-request error on each partial completion.
  	 *
  	 * TODO: tj: This is too subtle.  It would be better to let
  	 * low level drivers do what they see fit.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2460
  	 */
33659ebba   Christoph Hellwig   block: remove wra...
2461
  	if (req->cmd_type == REQ_TYPE_FS)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2462
  		req->errors = 0;
33659ebba   Christoph Hellwig   block: remove wra...
2463
2464
  	if (error && req->cmd_type == REQ_TYPE_FS &&
  	    !(req->cmd_flags & REQ_QUIET)) {
79775567e   Hannes Reinecke   [SCSI] block: imp...
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
  		char *error_type;
  
  		switch (error) {
  		case -ENOLINK:
  			error_type = "recoverable transport";
  			break;
  		case -EREMOTEIO:
  			error_type = "critical target";
  			break;
  		case -EBADE:
  			error_type = "critical nexus";
  			break;
d1ffc1f86   Hannes Reinecke   block/dasd: detai...
2477
2478
2479
  		case -ETIMEDOUT:
  			error_type = "timeout";
  			break;
a9d6ceb83   Hannes Reinecke   [SCSI] return ENO...
2480
2481
2482
  		case -ENOSPC:
  			error_type = "critical space allocation";
  			break;
7e782af57   Hannes Reinecke   [SCSI] Return ENO...
2483
2484
2485
  		case -ENODATA:
  			error_type = "critical medium";
  			break;
79775567e   Hannes Reinecke   [SCSI] block: imp...
2486
2487
2488
2489
2490
  		case -EIO:
  		default:
  			error_type = "I/O";
  			break;
  		}
ef3ecb66b   Robert Elliott   block: make blk_u...
2491
2492
2493
  		printk_ratelimited(KERN_ERR "%s: %s error, dev %s, sector %llu
  ",
  				   __func__, error_type, req->rq_disk ?
37d7b34f0   Yi Zou   block: rate-limit...
2494
2495
  				   req->rq_disk->disk_name : "?",
  				   (unsigned long long)blk_rq_pos(req));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2496
  	}
bc58ba946   Jens Axboe   block: add sysfs ...
2497
  	blk_account_io_completion(req, nr_bytes);
d72d904a5   Jens Axboe   [BLOCK] Update re...
2498

f79ea4161   Kent Overstreet   block: Refactor b...
2499
2500
2501
  	total_bytes = 0;
  	while (req->bio) {
  		struct bio *bio = req->bio;
4f024f379   Kent Overstreet   block: Abstract o...
2502
  		unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2503

4f024f379   Kent Overstreet   block: Abstract o...
2504
  		if (bio_bytes == bio->bi_iter.bi_size)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2505
  			req->bio = bio->bi_next;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2506

f79ea4161   Kent Overstreet   block: Refactor b...
2507
  		req_bio_endio(req, bio, bio_bytes, error);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2508

f79ea4161   Kent Overstreet   block: Refactor b...
2509
2510
  		total_bytes += bio_bytes;
  		nr_bytes -= bio_bytes;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2511

f79ea4161   Kent Overstreet   block: Refactor b...
2512
2513
  		if (!nr_bytes)
  			break;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2514
2515
2516
2517
2518
  	}
  
  	/*
  	 * completely done
  	 */
2e60e0229   Tejun Heo   block: clean up r...
2519
2520
2521
2522
2523
2524
  	if (!req->bio) {
  		/*
  		 * Reset counters so that the request stacking driver
  		 * can find how many bytes remain in the request
  		 * later.
  		 */
a2dec7b36   Tejun Heo   block: hide reque...
2525
  		req->__data_len = 0;
2e60e0229   Tejun Heo   block: clean up r...
2526
2527
  		return false;
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2528

a2dec7b36   Tejun Heo   block: hide reque...
2529
  	req->__data_len -= total_bytes;
2e46e8b27   Tejun Heo   block: drop reque...
2530
2531
  
  	/* update sector only for requests with clear definition of sector */
e2a60da74   Martin K. Petersen   block: Clean up s...
2532
  	if (req->cmd_type == REQ_TYPE_FS)
a2dec7b36   Tejun Heo   block: hide reque...
2533
  		req->__sector += total_bytes >> 9;
2e46e8b27   Tejun Heo   block: drop reque...
2534

80a761fd3   Tejun Heo   block: implement ...
2535
2536
2537
2538
2539
  	/* mixed attributes always follow the first bio */
  	if (req->cmd_flags & REQ_MIXED_MERGE) {
  		req->cmd_flags &= ~REQ_FAILFAST_MASK;
  		req->cmd_flags |= req->bio->bi_rw & REQ_FAILFAST_MASK;
  	}
2e46e8b27   Tejun Heo   block: drop reque...
2540
2541
2542
2543
2544
  	/*
  	 * If total number of sectors is less than the first segment
  	 * size, something has gone terribly wrong.
  	 */
  	if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
8182924bc   Jens Axboe   block: dump reque...
2545
  		blk_dump_rq_flags(req, "request botched");
a2dec7b36   Tejun Heo   block: hide reque...
2546
  		req->__data_len = blk_rq_cur_bytes(req);
2e46e8b27   Tejun Heo   block: drop reque...
2547
2548
2549
  	}
  
  	/* recalculate the number of segments */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2550
  	blk_recalc_rq_segments(req);
2e46e8b27   Tejun Heo   block: drop reque...
2551

2e60e0229   Tejun Heo   block: clean up r...
2552
  	return true;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2553
  }
2e60e0229   Tejun Heo   block: clean up r...
2554
  EXPORT_SYMBOL_GPL(blk_update_request);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2555

2e60e0229   Tejun Heo   block: clean up r...
2556
2557
2558
  static bool blk_update_bidi_request(struct request *rq, int error,
  				    unsigned int nr_bytes,
  				    unsigned int bidi_bytes)
5efccd17c   Tejun Heo   block: reorder re...
2559
  {
2e60e0229   Tejun Heo   block: clean up r...
2560
2561
  	if (blk_update_request(rq, error, nr_bytes))
  		return true;
5efccd17c   Tejun Heo   block: reorder re...
2562

2e60e0229   Tejun Heo   block: clean up r...
2563
2564
2565
2566
  	/* Bidi request must be completed as a whole */
  	if (unlikely(blk_bidi_rq(rq)) &&
  	    blk_update_request(rq->next_rq, error, bidi_bytes))
  		return true;
5efccd17c   Tejun Heo   block: reorder re...
2567

e2e1a148b   Jens Axboe   block: add sysfs ...
2568
2569
  	if (blk_queue_add_random(rq->q))
  		add_disk_randomness(rq->rq_disk);
2e60e0229   Tejun Heo   block: clean up r...
2570
2571
  
  	return false;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2572
  }
28018c242   James Bottomley   block: implement ...
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
  /**
   * blk_unprep_request - unprepare a request
   * @req:	the request
   *
   * This function makes a request ready for complete resubmission (or
   * completion).  It happens only after all error handling is complete,
   * so represents the appropriate moment to deallocate any resources
   * that were allocated to the request in the prep_rq_fn.  The queue
   * lock is held when calling this.
   */
  void blk_unprep_request(struct request *req)
  {
  	struct request_queue *q = req->q;
  
  	req->cmd_flags &= ~REQ_DONTPREP;
  	if (q->unprep_rq_fn)
  		q->unprep_rq_fn(q, req);
  }
  EXPORT_SYMBOL_GPL(blk_unprep_request);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2592
2593
2594
  /*
   * queue lock must be held
   */
12120077b   Christoph Hellwig   block: export blk...
2595
  void blk_finish_request(struct request *req, int error)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2596
  {
125c99bc8   Christoph Hellwig   scsi: add new scs...
2597
  	if (req->cmd_flags & REQ_QUEUED)
b8286239d   Kiyoshi Ueda   blk_end_request: ...
2598
  		blk_queue_end_tag(req->q, req);
ba396a6c1   James Bottomley   block: fix oops w...
2599
  	BUG_ON(blk_queued_rq(req));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2600

33659ebba   Christoph Hellwig   block: remove wra...
2601
  	if (unlikely(laptop_mode) && req->cmd_type == REQ_TYPE_FS)
31373d09d   Matthew Garrett   laptop-mode: Make...
2602
  		laptop_io_completion(&req->q->backing_dev_info);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2603

e78042e5b   Mike Anderson   blk: move blk_del...
2604
  	blk_delete_timer(req);
28018c242   James Bottomley   block: implement ...
2605
2606
  	if (req->cmd_flags & REQ_DONTPREP)
  		blk_unprep_request(req);
bc58ba946   Jens Axboe   block: add sysfs ...
2607
  	blk_account_io_done(req);
b8286239d   Kiyoshi Ueda   blk_end_request: ...
2608

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2609
  	if (req->end_io)
8ffdc6550   Tejun Heo   [BLOCK] add @upto...
2610
  		req->end_io(req, error);
b8286239d   Kiyoshi Ueda   blk_end_request: ...
2611
2612
2613
  	else {
  		if (blk_bidi_rq(req))
  			__blk_put_request(req->next_rq->q, req->next_rq);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2614
  		__blk_put_request(req->q, req);
b8286239d   Kiyoshi Ueda   blk_end_request: ...
2615
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2616
  }
12120077b   Christoph Hellwig   block: export blk...
2617
  EXPORT_SYMBOL(blk_finish_request);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2618

3b11313a6   Kiyoshi Ueda   blk_end_request: ...
2619
  /**
2e60e0229   Tejun Heo   block: clean up r...
2620
2621
2622
2623
2624
   * blk_end_bidi_request - Complete a bidi request
   * @rq:         the request to complete
   * @error:      %0 for success, < %0 for error
   * @nr_bytes:   number of bytes to complete @rq
   * @bidi_bytes: number of bytes to complete @rq->next_rq
a0cd12854   Jens Axboe   block: add end_qu...
2625
2626
   *
   * Description:
e3a04fe34   Kiyoshi Ueda   blk_end_request: ...
2627
   *     Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
2e60e0229   Tejun Heo   block: clean up r...
2628
2629
2630
   *     Drivers that supports bidi can safely call this member for any
   *     type of request, bidi or uni.  In the later case @bidi_bytes is
   *     just ignored.
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2631
2632
   *
   * Return:
2e60e0229   Tejun Heo   block: clean up r...
2633
2634
   *     %false - we are done with this request
   *     %true  - still buffers pending for this request
a0cd12854   Jens Axboe   block: add end_qu...
2635
   **/
b1f744937   FUJITA Tomonori   block: move compl...
2636
  static bool blk_end_bidi_request(struct request *rq, int error,
32fab448e   Kiyoshi Ueda   block: add reques...
2637
2638
  				 unsigned int nr_bytes, unsigned int bidi_bytes)
  {
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2639
  	struct request_queue *q = rq->q;
2e60e0229   Tejun Heo   block: clean up r...
2640
  	unsigned long flags;
32fab448e   Kiyoshi Ueda   block: add reques...
2641

2e60e0229   Tejun Heo   block: clean up r...
2642
2643
  	if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
  		return true;
32fab448e   Kiyoshi Ueda   block: add reques...
2644

336cdb400   Kiyoshi Ueda   blk_end_request: ...
2645
  	spin_lock_irqsave(q->queue_lock, flags);
2e60e0229   Tejun Heo   block: clean up r...
2646
  	blk_finish_request(rq, error);
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2647
  	spin_unlock_irqrestore(q->queue_lock, flags);
2e60e0229   Tejun Heo   block: clean up r...
2648
  	return false;
32fab448e   Kiyoshi Ueda   block: add reques...
2649
  }
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2650
  /**
2e60e0229   Tejun Heo   block: clean up r...
2651
2652
   * __blk_end_bidi_request - Complete a bidi request with queue lock held
   * @rq:         the request to complete
710027a48   Randy Dunlap   Add some block/ s...
2653
   * @error:      %0 for success, < %0 for error
e3a04fe34   Kiyoshi Ueda   blk_end_request: ...
2654
2655
   * @nr_bytes:   number of bytes to complete @rq
   * @bidi_bytes: number of bytes to complete @rq->next_rq
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2656
2657
   *
   * Description:
2e60e0229   Tejun Heo   block: clean up r...
2658
2659
   *     Identical to blk_end_bidi_request() except that queue lock is
   *     assumed to be locked on entry and remains so on return.
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2660
2661
   *
   * Return:
2e60e0229   Tejun Heo   block: clean up r...
2662
2663
   *     %false - we are done with this request
   *     %true  - still buffers pending for this request
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2664
   **/
4853abaae   Jeff Moyer   block: fix flush ...
2665
  bool __blk_end_bidi_request(struct request *rq, int error,
b1f744937   FUJITA Tomonori   block: move compl...
2666
  				   unsigned int nr_bytes, unsigned int bidi_bytes)
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2667
  {
2e60e0229   Tejun Heo   block: clean up r...
2668
2669
  	if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
  		return true;
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2670

2e60e0229   Tejun Heo   block: clean up r...
2671
  	blk_finish_request(rq, error);
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2672

2e60e0229   Tejun Heo   block: clean up r...
2673
  	return false;
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2674
  }
e19a3ab05   Kiyoshi Ueda   blk_end_request: ...
2675
2676
2677
2678
  
  /**
   * blk_end_request - Helper function for drivers to complete the request.
   * @rq:       the request being processed
710027a48   Randy Dunlap   Add some block/ s...
2679
   * @error:    %0 for success, < %0 for error
e19a3ab05   Kiyoshi Ueda   blk_end_request: ...
2680
2681
2682
2683
2684
2685
2686
   * @nr_bytes: number of bytes to complete
   *
   * Description:
   *     Ends I/O on a number of bytes attached to @rq.
   *     If @rq has leftover, sets it up for the next range of segments.
   *
   * Return:
b1f744937   FUJITA Tomonori   block: move compl...
2687
2688
   *     %false - we are done with this request
   *     %true  - still buffers pending for this request
e19a3ab05   Kiyoshi Ueda   blk_end_request: ...
2689
   **/
b1f744937   FUJITA Tomonori   block: move compl...
2690
  bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
e19a3ab05   Kiyoshi Ueda   blk_end_request: ...
2691
  {
b1f744937   FUJITA Tomonori   block: move compl...
2692
  	return blk_end_bidi_request(rq, error, nr_bytes, 0);
e19a3ab05   Kiyoshi Ueda   blk_end_request: ...
2693
  }
56ad1740d   Jens Axboe   block: make the e...
2694
  EXPORT_SYMBOL(blk_end_request);
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2695
2696
  
  /**
b1f744937   FUJITA Tomonori   block: move compl...
2697
2698
   * blk_end_request_all - Helper function for drives to finish the request.
   * @rq: the request to finish
8ebf97560   Randy Dunlap   block: fix kernel...
2699
   * @error: %0 for success, < %0 for error
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2700
2701
   *
   * Description:
b1f744937   FUJITA Tomonori   block: move compl...
2702
2703
2704
   *     Completely finish @rq.
   */
  void blk_end_request_all(struct request *rq, int error)
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2705
  {
b1f744937   FUJITA Tomonori   block: move compl...
2706
2707
  	bool pending;
  	unsigned int bidi_bytes = 0;
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2708

b1f744937   FUJITA Tomonori   block: move compl...
2709
2710
  	if (unlikely(blk_bidi_rq(rq)))
  		bidi_bytes = blk_rq_bytes(rq->next_rq);
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2711

b1f744937   FUJITA Tomonori   block: move compl...
2712
2713
2714
  	pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
  	BUG_ON(pending);
  }
56ad1740d   Jens Axboe   block: make the e...
2715
  EXPORT_SYMBOL(blk_end_request_all);
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2716

b1f744937   FUJITA Tomonori   block: move compl...
2717
2718
2719
  /**
   * blk_end_request_cur - Helper function to finish the current request chunk.
   * @rq: the request to finish the current chunk for
8ebf97560   Randy Dunlap   block: fix kernel...
2720
   * @error: %0 for success, < %0 for error
b1f744937   FUJITA Tomonori   block: move compl...
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
   *
   * Description:
   *     Complete the current consecutively mapped chunk from @rq.
   *
   * Return:
   *     %false - we are done with this request
   *     %true  - still buffers pending for this request
   */
  bool blk_end_request_cur(struct request *rq, int error)
  {
  	return blk_end_request(rq, error, blk_rq_cur_bytes(rq));
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2732
  }
56ad1740d   Jens Axboe   block: make the e...
2733
  EXPORT_SYMBOL(blk_end_request_cur);
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2734

e19a3ab05   Kiyoshi Ueda   blk_end_request: ...
2735
  /**
80a761fd3   Tejun Heo   block: implement ...
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
   * blk_end_request_err - Finish a request till the next failure boundary.
   * @rq: the request to finish till the next failure boundary for
   * @error: must be negative errno
   *
   * Description:
   *     Complete @rq till the next failure boundary.
   *
   * Return:
   *     %false - we are done with this request
   *     %true  - still buffers pending for this request
   */
  bool blk_end_request_err(struct request *rq, int error)
  {
  	WARN_ON(error >= 0);
  	return blk_end_request(rq, error, blk_rq_err_bytes(rq));
  }
  EXPORT_SYMBOL_GPL(blk_end_request_err);
  
  /**
b1f744937   FUJITA Tomonori   block: move compl...
2755
2756
2757
2758
   * __blk_end_request - Helper function for drivers to complete the request.
   * @rq:       the request being processed
   * @error:    %0 for success, < %0 for error
   * @nr_bytes: number of bytes to complete
e3a04fe34   Kiyoshi Ueda   blk_end_request: ...
2759
2760
   *
   * Description:
b1f744937   FUJITA Tomonori   block: move compl...
2761
   *     Must be called with queue lock held unlike blk_end_request().
e3a04fe34   Kiyoshi Ueda   blk_end_request: ...
2762
2763
   *
   * Return:
b1f744937   FUJITA Tomonori   block: move compl...
2764
2765
   *     %false - we are done with this request
   *     %true  - still buffers pending for this request
e3a04fe34   Kiyoshi Ueda   blk_end_request: ...
2766
   **/
b1f744937   FUJITA Tomonori   block: move compl...
2767
  bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
e3a04fe34   Kiyoshi Ueda   blk_end_request: ...
2768
  {
b1f744937   FUJITA Tomonori   block: move compl...
2769
  	return __blk_end_bidi_request(rq, error, nr_bytes, 0);
e3a04fe34   Kiyoshi Ueda   blk_end_request: ...
2770
  }
56ad1740d   Jens Axboe   block: make the e...
2771
  EXPORT_SYMBOL(__blk_end_request);
e3a04fe34   Kiyoshi Ueda   blk_end_request: ...
2772
2773
  
  /**
b1f744937   FUJITA Tomonori   block: move compl...
2774
2775
   * __blk_end_request_all - Helper function for drives to finish the request.
   * @rq: the request to finish
8ebf97560   Randy Dunlap   block: fix kernel...
2776
   * @error: %0 for success, < %0 for error
32fab448e   Kiyoshi Ueda   block: add reques...
2777
2778
   *
   * Description:
b1f744937   FUJITA Tomonori   block: move compl...
2779
   *     Completely finish @rq.  Must be called with queue lock held.
32fab448e   Kiyoshi Ueda   block: add reques...
2780
   */
b1f744937   FUJITA Tomonori   block: move compl...
2781
  void __blk_end_request_all(struct request *rq, int error)
32fab448e   Kiyoshi Ueda   block: add reques...
2782
  {
b1f744937   FUJITA Tomonori   block: move compl...
2783
2784
2785
2786
2787
2788
2789
2790
  	bool pending;
  	unsigned int bidi_bytes = 0;
  
  	if (unlikely(blk_bidi_rq(rq)))
  		bidi_bytes = blk_rq_bytes(rq->next_rq);
  
  	pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
  	BUG_ON(pending);
32fab448e   Kiyoshi Ueda   block: add reques...
2791
  }
56ad1740d   Jens Axboe   block: make the e...
2792
  EXPORT_SYMBOL(__blk_end_request_all);
32fab448e   Kiyoshi Ueda   block: add reques...
2793
2794
  
  /**
b1f744937   FUJITA Tomonori   block: move compl...
2795
2796
   * __blk_end_request_cur - Helper function to finish the current request chunk.
   * @rq: the request to finish the current chunk for
8ebf97560   Randy Dunlap   block: fix kernel...
2797
   * @error: %0 for success, < %0 for error
e19a3ab05   Kiyoshi Ueda   blk_end_request: ...
2798
2799
   *
   * Description:
b1f744937   FUJITA Tomonori   block: move compl...
2800
2801
   *     Complete the current consecutively mapped chunk from @rq.  Must
   *     be called with queue lock held.
e19a3ab05   Kiyoshi Ueda   blk_end_request: ...
2802
2803
   *
   * Return:
b1f744937   FUJITA Tomonori   block: move compl...
2804
2805
2806
2807
   *     %false - we are done with this request
   *     %true  - still buffers pending for this request
   */
  bool __blk_end_request_cur(struct request *rq, int error)
e19a3ab05   Kiyoshi Ueda   blk_end_request: ...
2808
  {
b1f744937   FUJITA Tomonori   block: move compl...
2809
  	return __blk_end_request(rq, error, blk_rq_cur_bytes(rq));
e19a3ab05   Kiyoshi Ueda   blk_end_request: ...
2810
  }
56ad1740d   Jens Axboe   block: make the e...
2811
  EXPORT_SYMBOL(__blk_end_request_cur);
e19a3ab05   Kiyoshi Ueda   blk_end_request: ...
2812

80a761fd3   Tejun Heo   block: implement ...
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
  /**
   * __blk_end_request_err - Finish a request till the next failure boundary.
   * @rq: the request to finish till the next failure boundary for
   * @error: must be negative errno
   *
   * Description:
   *     Complete @rq till the next failure boundary.  Must be called
   *     with queue lock held.
   *
   * Return:
   *     %false - we are done with this request
   *     %true  - still buffers pending for this request
   */
  bool __blk_end_request_err(struct request *rq, int error)
  {
  	WARN_ON(error >= 0);
  	return __blk_end_request(rq, error, blk_rq_err_bytes(rq));
  }
  EXPORT_SYMBOL_GPL(__blk_end_request_err);
86db1e297   Jens Axboe   block: continue l...
2832
2833
  void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
  		     struct bio *bio)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2834
  {
a82afdfcb   Tejun Heo   block: use the sa...
2835
  	/* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */
7b6d91dae   Christoph Hellwig   block: unify flag...
2836
  	rq->cmd_flags |= bio->bi_rw & REQ_WRITE;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2837

b4f42e283   Jens Axboe   block: remove str...
2838
  	if (bio_has_data(bio))
fb2dce862   David Woodhouse   Add 'discard' req...
2839
  		rq->nr_phys_segments = bio_phys_segments(q, bio);
b4f42e283   Jens Axboe   block: remove str...
2840

4f024f379   Kent Overstreet   block: Abstract o...
2841
  	rq->__data_len = bio->bi_iter.bi_size;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2842
  	rq->bio = rq->biotail = bio;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2843

66846572b   NeilBrown   Stop exporting bl...
2844
2845
2846
  	if (bio->bi_bdev)
  		rq->rq_disk = bio->bi_bdev->bd_disk;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2847

2d4dc890b   Ilya Loginov   block: add helper...
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
  #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
  /**
   * rq_flush_dcache_pages - Helper function to flush all pages in a request
   * @rq: the request to be flushed
   *
   * Description:
   *     Flush all pages in @rq.
   */
  void rq_flush_dcache_pages(struct request *rq)
  {
  	struct req_iterator iter;
7988613b0   Kent Overstreet   block: Convert bi...
2859
  	struct bio_vec bvec;
2d4dc890b   Ilya Loginov   block: add helper...
2860
2861
  
  	rq_for_each_segment(bvec, rq, iter)
7988613b0   Kent Overstreet   block: Convert bi...
2862
  		flush_dcache_page(bvec.bv_page);
2d4dc890b   Ilya Loginov   block: add helper...
2863
2864
2865
  }
  EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
  #endif
ef9e3facd   Kiyoshi Ueda   block: add lld bu...
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
  /**
   * blk_lld_busy - Check if underlying low-level drivers of a device are busy
   * @q : the queue of the device being checked
   *
   * Description:
   *    Check if underlying low-level drivers of a device are busy.
   *    If the drivers want to export their busy state, they must set own
   *    exporting function using blk_queue_lld_busy() first.
   *
   *    Basically, this function is used only by request stacking drivers
   *    to stop dispatching requests to underlying devices when underlying
   *    devices are busy.  This behavior helps more I/O merging on the queue
   *    of the request stacking driver and prevents I/O throughput regression
   *    on burst I/O load.
   *
   * Return:
   *    0 - Not busy (The request stacking driver should dispatch request)
   *    1 - Busy (The request stacking driver should stop dispatching request)
   */
  int blk_lld_busy(struct request_queue *q)
  {
  	if (q->lld_busy_fn)
  		return q->lld_busy_fn(q);
  
  	return 0;
  }
  EXPORT_SYMBOL_GPL(blk_lld_busy);
78d8e58a0   Mike Snitzer   Revert "block, dm...
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
  /**
   * blk_rq_unprep_clone - Helper function to free all bios in a cloned request
   * @rq: the clone request to be cleaned up
   *
   * Description:
   *     Free all bios in @rq for a cloned request.
   */
  void blk_rq_unprep_clone(struct request *rq)
  {
  	struct bio *bio;
  
  	while ((bio = rq->bio) != NULL) {
  		rq->bio = bio->bi_next;
  
  		bio_put(bio);
  	}
  }
  EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
  
  /*
   * Copy attributes of the original request to the clone request.
   * The actual data parts (e.g. ->cmd, ->sense) are not copied.
   */
  static void __blk_rq_prep_clone(struct request *dst, struct request *src)
b0fd271d5   Kiyoshi Ueda   block: add reques...
2917
2918
  {
  	dst->cpu = src->cpu;
78d8e58a0   Mike Snitzer   Revert "block, dm...
2919
  	dst->cmd_flags |= (src->cmd_flags & REQ_CLONE_MASK) | REQ_NOMERGE;
b0fd271d5   Kiyoshi Ueda   block: add reques...
2920
2921
2922
2923
2924
2925
  	dst->cmd_type = src->cmd_type;
  	dst->__sector = blk_rq_pos(src);
  	dst->__data_len = blk_rq_bytes(src);
  	dst->nr_phys_segments = src->nr_phys_segments;
  	dst->ioprio = src->ioprio;
  	dst->extra_len = src->extra_len;
78d8e58a0   Mike Snitzer   Revert "block, dm...
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
  }
  
  /**
   * blk_rq_prep_clone - Helper function to setup clone request
   * @rq: the request to be setup
   * @rq_src: original request to be cloned
   * @bs: bio_set that bios for clone are allocated from
   * @gfp_mask: memory allocation mask for bio
   * @bio_ctr: setup function to be called for each clone bio.
   *           Returns %0 for success, non %0 for failure.
   * @data: private data to be passed to @bio_ctr
   *
   * Description:
   *     Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
   *     The actual data parts of @rq_src (e.g. ->cmd, ->sense)
   *     are not copied, and copying such parts is the caller's responsibility.
   *     Also, pages which the original bios are pointing to are not copied
   *     and the cloned bios just point same pages.
   *     So cloned bios must be completed before original bios, which means
   *     the caller must complete @rq before @rq_src.
   */
  int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
  		      struct bio_set *bs, gfp_t gfp_mask,
  		      int (*bio_ctr)(struct bio *, struct bio *, void *),
  		      void *data)
  {
  	struct bio *bio, *bio_src;
  
  	if (!bs)
  		bs = fs_bio_set;
  
  	__rq_for_each_bio(bio_src, rq_src) {
  		bio = bio_clone_fast(bio_src, gfp_mask, bs);
  		if (!bio)
  			goto free_and_out;
  
  		if (bio_ctr && bio_ctr(bio, bio_src, data))
  			goto free_and_out;
  
  		if (rq->bio) {
  			rq->biotail->bi_next = bio;
  			rq->biotail = bio;
  		} else
  			rq->bio = rq->biotail = bio;
  	}
  
  	__blk_rq_prep_clone(rq, rq_src);
  
  	return 0;
  
  free_and_out:
  	if (bio)
  		bio_put(bio);
  	blk_rq_unprep_clone(rq);
  
  	return -ENOMEM;
b0fd271d5   Kiyoshi Ueda   block: add reques...
2982
2983
  }
  EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
59c3d45e4   Jens Axboe   block: remove 'q'...
2984
  int kblockd_schedule_work(struct work_struct *work)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2985
2986
2987
  {
  	return queue_work(kblockd_workqueue, work);
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2988
  EXPORT_SYMBOL(kblockd_schedule_work);
59c3d45e4   Jens Axboe   block: remove 'q'...
2989
2990
  int kblockd_schedule_delayed_work(struct delayed_work *dwork,
  				  unsigned long delay)
e43473b7f   Vivek Goyal   blkio: Core imple...
2991
2992
2993
2994
  {
  	return queue_delayed_work(kblockd_workqueue, dwork, delay);
  }
  EXPORT_SYMBOL(kblockd_schedule_delayed_work);
8ab14595b   Jens Axboe   block: add kblock...
2995
2996
2997
2998
2999
3000
  int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
  				     unsigned long delay)
  {
  	return queue_delayed_work_on(cpu, kblockd_workqueue, dwork, delay);
  }
  EXPORT_SYMBOL(kblockd_schedule_delayed_work_on);
75df71362   Suresh Jayaraman   block: document b...
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
  /**
   * blk_start_plug - initialize blk_plug and track it inside the task_struct
   * @plug:	The &struct blk_plug that needs to be initialized
   *
   * Description:
   *   Tracking blk_plug inside the task_struct will help with auto-flushing the
   *   pending I/O should the task end up blocking between blk_start_plug() and
   *   blk_finish_plug(). This is important from a performance perspective, but
   *   also ensures that we don't deadlock. For instance, if the task is blocking
   *   for a memory allocation, memory reclaim could end up wanting to free a
   *   page belonging to that request that is currently residing in our private
   *   plug. By flushing the pending I/O when the process goes to sleep, we avoid
   *   this kind of deadlock.
   */
73c101011   Jens Axboe   block: initial pa...
3015
3016
3017
  void blk_start_plug(struct blk_plug *plug)
  {
  	struct task_struct *tsk = current;
dd6cf3e18   Shaohua Li   blk: clean up plug
3018
3019
3020
3021
3022
  	/*
  	 * If this is a nested plug, don't actually assign it.
  	 */
  	if (tsk->plug)
  		return;
73c101011   Jens Axboe   block: initial pa...
3023
  	INIT_LIST_HEAD(&plug->list);
320ae51fe   Jens Axboe   blk-mq: new multi...
3024
  	INIT_LIST_HEAD(&plug->mq_list);
048c9374a   NeilBrown   block: Enhance ne...
3025
  	INIT_LIST_HEAD(&plug->cb_list);
73c101011   Jens Axboe   block: initial pa...
3026
  	/*
dd6cf3e18   Shaohua Li   blk: clean up plug
3027
3028
  	 * Store ordering should not be needed here, since a potential
  	 * preempt will imply a full memory barrier
73c101011   Jens Axboe   block: initial pa...
3029
  	 */
dd6cf3e18   Shaohua Li   blk: clean up plug
3030
  	tsk->plug = plug;
73c101011   Jens Axboe   block: initial pa...
3031
3032
3033
3034
3035
3036
3037
  }
  EXPORT_SYMBOL(blk_start_plug);
  
  static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
  {
  	struct request *rqa = container_of(a, struct request, queuelist);
  	struct request *rqb = container_of(b, struct request, queuelist);
975927b94   Jianpeng Ma   block: Add blk_rq...
3038
3039
  	return !(rqa->q < rqb->q ||
  		(rqa->q == rqb->q && blk_rq_pos(rqa) < blk_rq_pos(rqb)));
73c101011   Jens Axboe   block: initial pa...
3040
  }
49cac01e1   Jens Axboe   block: make unplu...
3041
3042
3043
3044
3045
3046
  /*
   * If 'from_schedule' is true, then postpone the dispatch of requests
   * until a safe kblockd context. We due this to avoid accidental big
   * additional stack usage in driver dispatch, in places where the originally
   * plugger did not intend it.
   */
f6603783f   Jens Axboe   block: only force...
3047
  static void queue_unplugged(struct request_queue *q, unsigned int depth,
49cac01e1   Jens Axboe   block: make unplu...
3048
  			    bool from_schedule)
99e22598e   Jens Axboe   block: drop queue...
3049
  	__releases(q->queue_lock)
94b5eb28b   Jens Axboe   block: fixup bloc...
3050
  {
49cac01e1   Jens Axboe   block: make unplu...
3051
  	trace_block_unplug(q, depth, !from_schedule);
99e22598e   Jens Axboe   block: drop queue...
3052

704605711   Bart Van Assche   block: Avoid sche...
3053
  	if (from_schedule)
24ecfbe27   Christoph Hellwig   block: add blk_ru...
3054
  		blk_run_queue_async(q);
704605711   Bart Van Assche   block: Avoid sche...
3055
  	else
24ecfbe27   Christoph Hellwig   block: add blk_ru...
3056
  		__blk_run_queue(q);
704605711   Bart Van Assche   block: Avoid sche...
3057
  	spin_unlock(q->queue_lock);
94b5eb28b   Jens Axboe   block: fixup bloc...
3058
  }
74018dc30   NeilBrown   blk: pass from_sc...
3059
  static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
048c9374a   NeilBrown   block: Enhance ne...
3060
3061
  {
  	LIST_HEAD(callbacks);
2a7d5559b   Shaohua Li   block: stack unplug
3062
3063
  	while (!list_empty(&plug->cb_list)) {
  		list_splice_init(&plug->cb_list, &callbacks);
048c9374a   NeilBrown   block: Enhance ne...
3064

2a7d5559b   Shaohua Li   block: stack unplug
3065
3066
  		while (!list_empty(&callbacks)) {
  			struct blk_plug_cb *cb = list_first_entry(&callbacks,
048c9374a   NeilBrown   block: Enhance ne...
3067
3068
  							  struct blk_plug_cb,
  							  list);
2a7d5559b   Shaohua Li   block: stack unplug
3069
  			list_del(&cb->list);
74018dc30   NeilBrown   blk: pass from_sc...
3070
  			cb->callback(cb, from_schedule);
2a7d5559b   Shaohua Li   block: stack unplug
3071
  		}
048c9374a   NeilBrown   block: Enhance ne...
3072
3073
  	}
  }
9cbb17508   NeilBrown   blk: centralize n...
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
  struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
  				      int size)
  {
  	struct blk_plug *plug = current->plug;
  	struct blk_plug_cb *cb;
  
  	if (!plug)
  		return NULL;
  
  	list_for_each_entry(cb, &plug->cb_list, list)
  		if (cb->callback == unplug && cb->data == data)
  			return cb;
  
  	/* Not currently on the callback list */
  	BUG_ON(size < sizeof(*cb));
  	cb = kzalloc(size, GFP_ATOMIC);
  	if (cb) {
  		cb->data = data;
  		cb->callback = unplug;
  		list_add(&cb->list, &plug->cb_list);
  	}
  	return cb;
  }
  EXPORT_SYMBOL(blk_check_plugged);
49cac01e1   Jens Axboe   block: make unplu...
3098
  void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
73c101011   Jens Axboe   block: initial pa...
3099
3100
3101
3102
  {
  	struct request_queue *q;
  	unsigned long flags;
  	struct request *rq;
109b81296   NeilBrown   block: splice plu...
3103
  	LIST_HEAD(list);
94b5eb28b   Jens Axboe   block: fixup bloc...
3104
  	unsigned int depth;
73c101011   Jens Axboe   block: initial pa...
3105

74018dc30   NeilBrown   blk: pass from_sc...
3106
  	flush_plug_callbacks(plug, from_schedule);
320ae51fe   Jens Axboe   blk-mq: new multi...
3107
3108
3109
  
  	if (!list_empty(&plug->mq_list))
  		blk_mq_flush_plug_list(plug, from_schedule);
73c101011   Jens Axboe   block: initial pa...
3110
3111
  	if (list_empty(&plug->list))
  		return;
109b81296   NeilBrown   block: splice plu...
3112
  	list_splice_init(&plug->list, &list);
422765c26   Jianpeng Ma   block: Remove sho...
3113
  	list_sort(NULL, &list, plug_rq_cmp);
73c101011   Jens Axboe   block: initial pa...
3114
3115
  
  	q = NULL;
94b5eb28b   Jens Axboe   block: fixup bloc...
3116
  	depth = 0;
188112722   Jens Axboe   block: add commen...
3117
3118
3119
3120
3121
  
  	/*
  	 * Save and disable interrupts here, to avoid doing it for every
  	 * queue lock we have to take.
  	 */
73c101011   Jens Axboe   block: initial pa...
3122
  	local_irq_save(flags);
109b81296   NeilBrown   block: splice plu...
3123
3124
  	while (!list_empty(&list)) {
  		rq = list_entry_rq(list.next);
73c101011   Jens Axboe   block: initial pa...
3125
  		list_del_init(&rq->queuelist);
73c101011   Jens Axboe   block: initial pa...
3126
3127
  		BUG_ON(!rq->q);
  		if (rq->q != q) {
99e22598e   Jens Axboe   block: drop queue...
3128
3129
3130
3131
  			/*
  			 * This drops the queue lock
  			 */
  			if (q)
49cac01e1   Jens Axboe   block: make unplu...
3132
  				queue_unplugged(q, depth, from_schedule);
73c101011   Jens Axboe   block: initial pa...
3133
  			q = rq->q;
94b5eb28b   Jens Axboe   block: fixup bloc...
3134
  			depth = 0;
73c101011   Jens Axboe   block: initial pa...
3135
3136
  			spin_lock(q->queue_lock);
  		}
8ba61435d   Tejun Heo   block: add missin...
3137
3138
3139
3140
  
  		/*
  		 * Short-circuit if @q is dead
  		 */
3f3299d5c   Bart Van Assche   block: Rename que...
3141
  		if (unlikely(blk_queue_dying(q))) {
8ba61435d   Tejun Heo   block: add missin...
3142
3143
3144
  			__blk_end_request_all(rq, -ENODEV);
  			continue;
  		}
73c101011   Jens Axboe   block: initial pa...
3145
3146
3147
  		/*
  		 * rq is already accounted, so use raw insert
  		 */
401a18e92   Jens Axboe   block: fix bug wi...
3148
3149
3150
3151
  		if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA))
  			__elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH);
  		else
  			__elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE);
94b5eb28b   Jens Axboe   block: fixup bloc...
3152
3153
  
  		depth++;
73c101011   Jens Axboe   block: initial pa...
3154
  	}
99e22598e   Jens Axboe   block: drop queue...
3155
3156
3157
3158
  	/*
  	 * This drops the queue lock
  	 */
  	if (q)
49cac01e1   Jens Axboe   block: make unplu...
3159
  		queue_unplugged(q, depth, from_schedule);
73c101011   Jens Axboe   block: initial pa...
3160

73c101011   Jens Axboe   block: initial pa...
3161
3162
  	local_irq_restore(flags);
  }
73c101011   Jens Axboe   block: initial pa...
3163
3164
3165
  
  void blk_finish_plug(struct blk_plug *plug)
  {
dd6cf3e18   Shaohua Li   blk: clean up plug
3166
3167
  	if (plug != current->plug)
  		return;
f6603783f   Jens Axboe   block: only force...
3168
  	blk_flush_plug_list(plug, false);
73c101011   Jens Axboe   block: initial pa...
3169

dd6cf3e18   Shaohua Li   blk: clean up plug
3170
  	current->plug = NULL;
73c101011   Jens Axboe   block: initial pa...
3171
  }
88b996cd0   Christoph Hellwig   block: cleanup th...
3172
  EXPORT_SYMBOL(blk_finish_plug);
73c101011   Jens Axboe   block: initial pa...
3173

05229beed   Jens Axboe   block: add block ...
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213
  bool blk_poll(struct request_queue *q, blk_qc_t cookie)
  {
  	struct blk_plug *plug;
  	long state;
  
  	if (!q->mq_ops || !q->mq_ops->poll || !blk_qc_t_valid(cookie) ||
  	    !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
  		return false;
  
  	plug = current->plug;
  	if (plug)
  		blk_flush_plug_list(plug, false);
  
  	state = current->state;
  	while (!need_resched()) {
  		unsigned int queue_num = blk_qc_t_to_queue_num(cookie);
  		struct blk_mq_hw_ctx *hctx = q->queue_hw_ctx[queue_num];
  		int ret;
  
  		hctx->poll_invoked++;
  
  		ret = q->mq_ops->poll(hctx, blk_qc_t_to_tag(cookie));
  		if (ret > 0) {
  			hctx->poll_success++;
  			set_current_state(TASK_RUNNING);
  			return true;
  		}
  
  		if (signal_pending_state(state, current))
  			set_current_state(TASK_RUNNING);
  
  		if (current->state == TASK_RUNNING)
  			return true;
  		if (ret < 0)
  			break;
  		cpu_relax();
  	}
  
  	return false;
  }
47fafbc70   Rafael J. Wysocki   block / PM: Repla...
3214
  #ifdef CONFIG_PM
6c9546675   Lin Ming   block: add runtim...
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297
3298
3299
3300
3301
3302
3303
3304
3305
3306
3307
3308
3309
3310
3311
3312
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329
3330
3331
3332
3333
3334
3335
3336
3337
3338
3339
3340
3341
3342
3343
3344
3345
3346
3347
  /**
   * blk_pm_runtime_init - Block layer runtime PM initialization routine
   * @q: the queue of the device
   * @dev: the device the queue belongs to
   *
   * Description:
   *    Initialize runtime-PM-related fields for @q and start auto suspend for
   *    @dev. Drivers that want to take advantage of request-based runtime PM
   *    should call this function after @dev has been initialized, and its
   *    request queue @q has been allocated, and runtime PM for it can not happen
   *    yet(either due to disabled/forbidden or its usage_count > 0). In most
   *    cases, driver should call this function before any I/O has taken place.
   *
   *    This function takes care of setting up using auto suspend for the device,
   *    the autosuspend delay is set to -1 to make runtime suspend impossible
   *    until an updated value is either set by user or by driver. Drivers do
   *    not need to touch other autosuspend settings.
   *
   *    The block layer runtime PM is request based, so only works for drivers
   *    that use request as their IO unit instead of those directly use bio's.
   */
  void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
  {
  	q->dev = dev;
  	q->rpm_status = RPM_ACTIVE;
  	pm_runtime_set_autosuspend_delay(q->dev, -1);
  	pm_runtime_use_autosuspend(q->dev);
  }
  EXPORT_SYMBOL(blk_pm_runtime_init);
  
  /**
   * blk_pre_runtime_suspend - Pre runtime suspend check
   * @q: the queue of the device
   *
   * Description:
   *    This function will check if runtime suspend is allowed for the device
   *    by examining if there are any requests pending in the queue. If there
   *    are requests pending, the device can not be runtime suspended; otherwise,
   *    the queue's status will be updated to SUSPENDING and the driver can
   *    proceed to suspend the device.
   *
   *    For the not allowed case, we mark last busy for the device so that
   *    runtime PM core will try to autosuspend it some time later.
   *
   *    This function should be called near the start of the device's
   *    runtime_suspend callback.
   *
   * Return:
   *    0		- OK to runtime suspend the device
   *    -EBUSY	- Device should not be runtime suspended
   */
  int blk_pre_runtime_suspend(struct request_queue *q)
  {
  	int ret = 0;
  
  	spin_lock_irq(q->queue_lock);
  	if (q->nr_pending) {
  		ret = -EBUSY;
  		pm_runtime_mark_last_busy(q->dev);
  	} else {
  		q->rpm_status = RPM_SUSPENDING;
  	}
  	spin_unlock_irq(q->queue_lock);
  	return ret;
  }
  EXPORT_SYMBOL(blk_pre_runtime_suspend);
  
  /**
   * blk_post_runtime_suspend - Post runtime suspend processing
   * @q: the queue of the device
   * @err: return value of the device's runtime_suspend function
   *
   * Description:
   *    Update the queue's runtime status according to the return value of the
   *    device's runtime suspend function and mark last busy for the device so
   *    that PM core will try to auto suspend the device at a later time.
   *
   *    This function should be called near the end of the device's
   *    runtime_suspend callback.
   */
  void blk_post_runtime_suspend(struct request_queue *q, int err)
  {
  	spin_lock_irq(q->queue_lock);
  	if (!err) {
  		q->rpm_status = RPM_SUSPENDED;
  	} else {
  		q->rpm_status = RPM_ACTIVE;
  		pm_runtime_mark_last_busy(q->dev);
  	}
  	spin_unlock_irq(q->queue_lock);
  }
  EXPORT_SYMBOL(blk_post_runtime_suspend);
  
  /**
   * blk_pre_runtime_resume - Pre runtime resume processing
   * @q: the queue of the device
   *
   * Description:
   *    Update the queue's runtime status to RESUMING in preparation for the
   *    runtime resume of the device.
   *
   *    This function should be called near the start of the device's
   *    runtime_resume callback.
   */
  void blk_pre_runtime_resume(struct request_queue *q)
  {
  	spin_lock_irq(q->queue_lock);
  	q->rpm_status = RPM_RESUMING;
  	spin_unlock_irq(q->queue_lock);
  }
  EXPORT_SYMBOL(blk_pre_runtime_resume);
  
  /**
   * blk_post_runtime_resume - Post runtime resume processing
   * @q: the queue of the device
   * @err: return value of the device's runtime_resume function
   *
   * Description:
   *    Update the queue's runtime status according to the return value of the
   *    device's runtime_resume function. If it is successfully resumed, process
   *    the requests that are queued into the device's queue when it is resuming
   *    and then mark last busy and initiate autosuspend for it.
   *
   *    This function should be called near the end of the device's
   *    runtime_resume callback.
   */
  void blk_post_runtime_resume(struct request_queue *q, int err)
  {
  	spin_lock_irq(q->queue_lock);
  	if (!err) {
  		q->rpm_status = RPM_ACTIVE;
  		__blk_run_queue(q);
  		pm_runtime_mark_last_busy(q->dev);
c60855cdb   Aaron Lu   blkpm: avoid slee...
3348
  		pm_request_autosuspend(q->dev);
6c9546675   Lin Ming   block: add runtim...
3349
3350
3351
3352
3353
3354
3355
  	} else {
  		q->rpm_status = RPM_SUSPENDED;
  	}
  	spin_unlock_irq(q->queue_lock);
  }
  EXPORT_SYMBOL(blk_post_runtime_resume);
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3356
3357
  int __init blk_dev_init(void)
  {
9eb55b030   Nikanth Karthikesan   block: catch tryi...
3358
  	BUILD_BUG_ON(__REQ_NR_BITS > 8 *
0762b23d2   Maninder Singh   block: use FIELD_...
3359
  			FIELD_SIZEOF(struct request, cmd_flags));
9eb55b030   Nikanth Karthikesan   block: catch tryi...
3360

89b90be2d   Tejun Heo   block: make kbloc...
3361
3362
  	/* used for unplugging and affects IO latency/throughput - HIGHPRI */
  	kblockd_workqueue = alloc_workqueue("kblockd",
28747fcd2   Matias Bjørling   block: remove WQ_...
3363
  					    WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3364
3365
3366
3367
3368
  	if (!kblockd_workqueue)
  		panic("Failed to create kblockd
  ");
  
  	request_cachep = kmem_cache_create("blkdev_requests",
20c2df83d   Paul Mundt   mm: Remove slab d...
3369
  			sizeof(struct request), 0, SLAB_PANIC, NULL);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3370

c2789bd40   Ilya Dryomov   block: rename req...
3371
  	blk_requestq_cachep = kmem_cache_create("request_queue",
165125e1e   Jens Axboe   [BLOCK] Get rid o...
3372
  			sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3373

d38ecf935   Jens Axboe   io context sharin...
3374
  	return 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3375
  }