Blame view

block/blk-core.c 98.4 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2
3
4
5
   * Copyright (C) 1991, 1992 Linus Torvalds
   * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
   * Elevator latency, (C) 2000  Andrea Arcangeli <andrea@suse.de> SuSE
   * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
6728cb0e6   Jens Axboe   block: make core ...
6
7
   * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
   *	-  July2000
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
8
9
10
11
12
13
   * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
   */
  
  /*
   * This handles all read/write requests to block devices
   */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
14
15
16
17
18
  #include <linux/kernel.h>
  #include <linux/module.h>
  #include <linux/backing-dev.h>
  #include <linux/bio.h>
  #include <linux/blkdev.h>
320ae51fe   Jens Axboe   blk-mq: new multi...
19
  #include <linux/blk-mq.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
20
21
22
23
24
  #include <linux/highmem.h>
  #include <linux/mm.h>
  #include <linux/kernel_stat.h>
  #include <linux/string.h>
  #include <linux/init.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
25
26
27
28
  #include <linux/completion.h>
  #include <linux/slab.h>
  #include <linux/swap.h>
  #include <linux/writeback.h>
faccbd4b2   Andrew Morton   [PATCH] io-accoun...
29
  #include <linux/task_io_accounting_ops.h>
c17bb4951   Akinobu Mita   [PATCH] fault-inj...
30
  #include <linux/fault-inject.h>
73c101011   Jens Axboe   block: initial pa...
31
  #include <linux/list_sort.h>
e3c78ca52   Tejun Heo   block: reorganize...
32
  #include <linux/delay.h>
aaf7c6806   Tejun Heo   block: fix elvpri...
33
  #include <linux/ratelimit.h>
6c9546675   Lin Ming   block: add runtim...
34
  #include <linux/pm_runtime.h>
eea8f41cc   Tejun Heo   blkcg: move block...
35
  #include <linux/blk-cgroup.h>
18fbda91c   Omar Sandoval   block: use same b...
36
  #include <linux/debugfs.h>
55782138e   Li Zefan   tracing/events: c...
37
38
39
  
  #define CREATE_TRACE_POINTS
  #include <trace/events/block.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
40

8324aa91d   Jens Axboe   block: split tag ...
41
  #include "blk.h"
43a5e4e21   Ming Lei   block: blk-mq: su...
42
  #include "blk-mq.h"
bd166ef18   Jens Axboe   blk-mq-sched: add...
43
  #include "blk-mq-sched.h"
87760e5ee   Jens Axboe   block: hook up wr...
44
  #include "blk-wbt.h"
8324aa91d   Jens Axboe   block: split tag ...
45

18fbda91c   Omar Sandoval   block: use same b...
46
47
48
  #ifdef CONFIG_DEBUG_FS
  struct dentry *blk_debugfs_root;
  #endif
d07335e51   Mike Snitzer   block: Rename "bl...
49
  EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
b0da3f0da   Jun'ichi Nomura   Add a tracepoint ...
50
  EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
0a82a8d13   Linus Torvalds   Revert "block: ad...
51
  EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
3291fa57c   Keith Busch   NVMe: Add tracepo...
52
  EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
cbae8d45d   NeilBrown   block: export blo...
53
  EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
0bfc24559   Ingo Molnar   blktrace: port to...
54

a73f730d0   Tejun Heo   block, cfq: move ...
55
  DEFINE_IDA(blk_queue_ida);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
56
57
58
  /*
   * For the allocated request tables
   */
d674d4145   Wei Tang   block: do not ini...
59
  struct kmem_cache *request_cachep;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
60
61
62
63
  
  /*
   * For queue allocation
   */
6728cb0e6   Jens Axboe   block: make core ...
64
  struct kmem_cache *blk_requestq_cachep;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
65
66
  
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
67
68
   * Controlling structure to kblockd
   */
ff856bad6   Jens Axboe   [BLOCK] ll_rw_blk...
69
  static struct workqueue_struct *kblockd_workqueue;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
70

d40f75a06   Tejun Heo   writeback, blkcg:...
71
72
  static void blk_clear_congested(struct request_list *rl, int sync)
  {
d40f75a06   Tejun Heo   writeback, blkcg:...
73
74
75
  #ifdef CONFIG_CGROUP_WRITEBACK
  	clear_wb_congested(rl->blkg->wb_congested, sync);
  #else
482cf79cd   Tejun Heo   writeback, blkcg:...
76
77
78
79
80
  	/*
  	 * If !CGROUP_WRITEBACK, all blkg's map to bdi->wb and we shouldn't
  	 * flip its congestion state for events on other blkcgs.
  	 */
  	if (rl == &rl->q->root_rl)
dc3b17cc8   Jan Kara   block: Use pointe...
81
  		clear_wb_congested(rl->q->backing_dev_info->wb.congested, sync);
d40f75a06   Tejun Heo   writeback, blkcg:...
82
83
84
85
86
  #endif
  }
  
  static void blk_set_congested(struct request_list *rl, int sync)
  {
d40f75a06   Tejun Heo   writeback, blkcg:...
87
88
89
  #ifdef CONFIG_CGROUP_WRITEBACK
  	set_wb_congested(rl->blkg->wb_congested, sync);
  #else
482cf79cd   Tejun Heo   writeback, blkcg:...
90
91
  	/* see blk_clear_congested() */
  	if (rl == &rl->q->root_rl)
dc3b17cc8   Jan Kara   block: Use pointe...
92
  		set_wb_congested(rl->q->backing_dev_info->wb.congested, sync);
d40f75a06   Tejun Heo   writeback, blkcg:...
93
94
  #endif
  }
8324aa91d   Jens Axboe   block: split tag ...
95
  void blk_queue_congestion_threshold(struct request_queue *q)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
96
97
98
99
100
101
102
103
104
105
106
107
108
  {
  	int nr;
  
  	nr = q->nr_requests - (q->nr_requests / 8) + 1;
  	if (nr > q->nr_requests)
  		nr = q->nr_requests;
  	q->nr_congestion_on = nr;
  
  	nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1;
  	if (nr < 1)
  		nr = 1;
  	q->nr_congestion_off = nr;
  }
2a4aa30c5   FUJITA Tomonori   block: rename and...
109
  void blk_rq_init(struct request_queue *q, struct request *rq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
110
  {
1afb20f30   FUJITA Tomonori   block: make rq_in...
111
  	memset(rq, 0, sizeof(*rq));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
112
  	INIT_LIST_HEAD(&rq->queuelist);
242f9dcb8   Jens Axboe   block: unify requ...
113
  	INIT_LIST_HEAD(&rq->timeout_list);
c7c22e4d5   Jens Axboe   block: add suppor...
114
  	rq->cpu = -1;
63a713867   Jens Axboe   block: fixup rq_i...
115
  	rq->q = q;
a2dec7b36   Tejun Heo   block: hide reque...
116
  	rq->__sector = (sector_t) -1;
2e662b65f   Jens Axboe   [PATCH] elevator:...
117
118
  	INIT_HLIST_NODE(&rq->hash);
  	RB_CLEAR_NODE(&rq->rb_node);
63a713867   Jens Axboe   block: fixup rq_i...
119
  	rq->tag = -1;
bd166ef18   Jens Axboe   blk-mq-sched: add...
120
  	rq->internal_tag = -1;
b243ddcbe   Tejun Heo   block: move rq->s...
121
  	rq->start_time = jiffies;
9195291e5   Divyesh Shah   blkio: Increment ...
122
  	set_start_time_ns(rq);
09e099d4b   Jerome Marchand   block: fix accoun...
123
  	rq->part = NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
124
  }
2a4aa30c5   FUJITA Tomonori   block: rename and...
125
  EXPORT_SYMBOL(blk_rq_init);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
126

2a842acab   Christoph Hellwig   block: introduce ...
127
128
129
130
131
132
133
134
135
136
137
138
139
140
  static const struct {
  	int		errno;
  	const char	*name;
  } blk_errors[] = {
  	[BLK_STS_OK]		= { 0,		"" },
  	[BLK_STS_NOTSUPP]	= { -EOPNOTSUPP, "operation not supported" },
  	[BLK_STS_TIMEOUT]	= { -ETIMEDOUT,	"timeout" },
  	[BLK_STS_NOSPC]		= { -ENOSPC,	"critical space allocation" },
  	[BLK_STS_TRANSPORT]	= { -ENOLINK,	"recoverable transport" },
  	[BLK_STS_TARGET]	= { -EREMOTEIO,	"critical target" },
  	[BLK_STS_NEXUS]		= { -EBADE,	"critical nexus" },
  	[BLK_STS_MEDIUM]	= { -ENODATA,	"critical medium" },
  	[BLK_STS_PROTECTION]	= { -EILSEQ,	"protection" },
  	[BLK_STS_RESOURCE]	= { -ENOMEM,	"kernel resource" },
03a07c92a   Goldwyn Rodrigues   block: return on ...
141
  	[BLK_STS_AGAIN]		= { -EAGAIN,	"nonblocking retry" },
2a842acab   Christoph Hellwig   block: introduce ...
142

4e4cbee93   Christoph Hellwig   block: switch bio...
143
144
  	/* device mapper special case, should not leak out: */
  	[BLK_STS_DM_REQUEUE]	= { -EREMCHG, "dm internal retry" },
2a842acab   Christoph Hellwig   block: introduce ...
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
  	/* everything else not covered above: */
  	[BLK_STS_IOERR]		= { -EIO,	"I/O" },
  };
  
  blk_status_t errno_to_blk_status(int errno)
  {
  	int i;
  
  	for (i = 0; i < ARRAY_SIZE(blk_errors); i++) {
  		if (blk_errors[i].errno == errno)
  			return (__force blk_status_t)i;
  	}
  
  	return BLK_STS_IOERR;
  }
  EXPORT_SYMBOL_GPL(errno_to_blk_status);
  
  int blk_status_to_errno(blk_status_t status)
  {
  	int idx = (__force int)status;
34bd9c1c4   Bart Van Assche   block: Fix off-by...
165
  	if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
2a842acab   Christoph Hellwig   block: introduce ...
166
167
168
169
170
171
172
173
  		return -EIO;
  	return blk_errors[idx].errno;
  }
  EXPORT_SYMBOL_GPL(blk_status_to_errno);
  
  static void print_req_error(struct request *req, blk_status_t status)
  {
  	int idx = (__force int)status;
34bd9c1c4   Bart Van Assche   block: Fix off-by...
174
  	if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
2a842acab   Christoph Hellwig   block: introduce ...
175
176
177
178
179
180
181
182
  		return;
  
  	printk_ratelimited(KERN_ERR "%s: %s error, dev %s, sector %llu
  ",
  			   __func__, blk_errors[idx].name, req->rq_disk ?
  			   req->rq_disk->disk_name : "?",
  			   (unsigned long long)blk_rq_pos(req));
  }
5bb23a688   NeilBrown   Don't decrement b...
183
  static void req_bio_endio(struct request *rq, struct bio *bio,
2a842acab   Christoph Hellwig   block: introduce ...
184
  			  unsigned int nbytes, blk_status_t error)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
185
  {
78d8e58a0   Mike Snitzer   Revert "block, dm...
186
  	if (error)
4e4cbee93   Christoph Hellwig   block: switch bio...
187
  		bio->bi_status = error;
797e7dbbe   Tejun Heo   [BLOCK] reimpleme...
188

e80640213   Christoph Hellwig   block: split out ...
189
  	if (unlikely(rq->rq_flags & RQF_QUIET))
b7c44ed9d   Jens Axboe   block: manipulate...
190
  		bio_set_flag(bio, BIO_QUIET);
08bafc034   Keith Mannthey   block: Supress Bu...
191

f79ea4161   Kent Overstreet   block: Refactor b...
192
  	bio_advance(bio, nbytes);
7ba1ba12e   Martin K. Petersen   block: Block laye...
193

143a87f4c   Tejun Heo   block: improve fl...
194
  	/* don't actually finish bio if it's part of flush sequence */
e80640213   Christoph Hellwig   block: split out ...
195
  	if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
4246a0b63   Christoph Hellwig   block: add a bi_e...
196
  		bio_endio(bio);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
197
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
198

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
199
200
  void blk_dump_rq_flags(struct request *rq, char *msg)
  {
aebf526b5   Christoph Hellwig   block: fold cmd_t...
201
202
203
  	printk(KERN_INFO "%s: dev %s: flags=%llx
  ", msg,
  		rq->rq_disk ? rq->rq_disk->disk_name : "?",
5953316db   Jens Axboe   block: make rq->c...
204
  		(unsigned long long) rq->cmd_flags);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
205

83096ebf1   Tejun Heo   block: convert to...
206
207
208
209
  	printk(KERN_INFO "  sector %llu, nr/cnr %u/%u
  ",
  	       (unsigned long long)blk_rq_pos(rq),
  	       blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
b4f42e283   Jens Axboe   block: remove str...
210
211
212
  	printk(KERN_INFO "  bio %p, biotail %p, len %u
  ",
  	       rq->bio, rq->biotail, blk_rq_bytes(rq));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
213
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
214
  EXPORT_SYMBOL(blk_dump_rq_flags);
3cca6dc1c   Jens Axboe   block: add API fo...
215
  static void blk_delay_work(struct work_struct *work)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
216
  {
3cca6dc1c   Jens Axboe   block: add API fo...
217
  	struct request_queue *q;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
218

3cca6dc1c   Jens Axboe   block: add API fo...
219
220
  	q = container_of(work, struct request_queue, delay_work.work);
  	spin_lock_irq(q->queue_lock);
24ecfbe27   Christoph Hellwig   block: add blk_ru...
221
  	__blk_run_queue(q);
3cca6dc1c   Jens Axboe   block: add API fo...
222
  	spin_unlock_irq(q->queue_lock);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
223
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
224
225
  
  /**
3cca6dc1c   Jens Axboe   block: add API fo...
226
227
228
   * blk_delay_queue - restart queueing after defined interval
   * @q:		The &struct request_queue in question
   * @msecs:	Delay in msecs
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
229
230
   *
   * Description:
3cca6dc1c   Jens Axboe   block: add API fo...
231
232
   *   Sometimes queueing needs to be postponed for a little while, to allow
   *   resources to come back. This function will make sure that queueing is
2fff8a924   Bart Van Assche   block: Check lock...
233
   *   restarted around the specified time.
3cca6dc1c   Jens Axboe   block: add API fo...
234
235
   */
  void blk_delay_queue(struct request_queue *q, unsigned long msecs)
2ad8b1ef1   Alan D. Brunelle   Add UNPLUG traces...
236
  {
2fff8a924   Bart Van Assche   block: Check lock...
237
  	lockdep_assert_held(q->queue_lock);
332ebbf7f   Bart Van Assche   block: Document w...
238
  	WARN_ON_ONCE(q->mq_ops);
2fff8a924   Bart Van Assche   block: Check lock...
239

704605711   Bart Van Assche   block: Avoid sche...
240
241
242
  	if (likely(!blk_queue_dead(q)))
  		queue_delayed_work(kblockd_workqueue, &q->delay_work,
  				   msecs_to_jiffies(msecs));
2ad8b1ef1   Alan D. Brunelle   Add UNPLUG traces...
243
  }
3cca6dc1c   Jens Axboe   block: add API fo...
244
  EXPORT_SYMBOL(blk_delay_queue);
2ad8b1ef1   Alan D. Brunelle   Add UNPLUG traces...
245

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
246
  /**
21491412f   Jens Axboe   block: add blk_st...
247
248
249
250
251
252
253
254
255
256
   * blk_start_queue_async - asynchronously restart a previously stopped queue
   * @q:    The &struct request_queue in question
   *
   * Description:
   *   blk_start_queue_async() will clear the stop flag on the queue, and
   *   ensure that the request_fn for the queue is run from an async
   *   context.
   **/
  void blk_start_queue_async(struct request_queue *q)
  {
2fff8a924   Bart Van Assche   block: Check lock...
257
  	lockdep_assert_held(q->queue_lock);
332ebbf7f   Bart Van Assche   block: Document w...
258
  	WARN_ON_ONCE(q->mq_ops);
2fff8a924   Bart Van Assche   block: Check lock...
259

21491412f   Jens Axboe   block: add blk_st...
260
261
262
263
264
265
  	queue_flag_clear(QUEUE_FLAG_STOPPED, q);
  	blk_run_queue_async(q);
  }
  EXPORT_SYMBOL(blk_start_queue_async);
  
  /**
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
266
   * blk_start_queue - restart a previously stopped queue
165125e1e   Jens Axboe   [BLOCK] Get rid o...
267
   * @q:    The &struct request_queue in question
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
268
269
270
271
   *
   * Description:
   *   blk_start_queue() will clear the stop flag on the queue, and call
   *   the request_fn for the queue if it was in a stopped state when
2fff8a924   Bart Van Assche   block: Check lock...
272
   *   entered. Also see blk_stop_queue().
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
273
   **/
165125e1e   Jens Axboe   [BLOCK] Get rid o...
274
  void blk_start_queue(struct request_queue *q)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
275
  {
2fff8a924   Bart Van Assche   block: Check lock...
276
  	lockdep_assert_held(q->queue_lock);
4ddd56b00   Bart Van Assche   block: Relax a ch...
277
  	WARN_ON(!in_interrupt() && !irqs_disabled());
332ebbf7f   Bart Van Assche   block: Document w...
278
  	WARN_ON_ONCE(q->mq_ops);
a038e2536   Paolo 'Blaisorblade' Giarrusso   [PATCH] blk_start...
279

75ad23bc0   Nick Piggin   block: make queue...
280
  	queue_flag_clear(QUEUE_FLAG_STOPPED, q);
24ecfbe27   Christoph Hellwig   block: add blk_ru...
281
  	__blk_run_queue(q);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
282
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
283
284
285
286
  EXPORT_SYMBOL(blk_start_queue);
  
  /**
   * blk_stop_queue - stop a queue
165125e1e   Jens Axboe   [BLOCK] Get rid o...
287
   * @q:    The &struct request_queue in question
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
288
289
290
291
292
293
294
295
296
   *
   * Description:
   *   The Linux block layer assumes that a block driver will consume all
   *   entries on the request queue when the request_fn strategy is called.
   *   Often this will not happen, because of hardware limitations (queue
   *   depth settings). If a device driver gets a 'queue full' response,
   *   or if it simply chooses not to queue more I/O at one point, it can
   *   call this function to prevent the request_fn from being called until
   *   the driver has signalled it's ready to go again. This happens by calling
2fff8a924   Bart Van Assche   block: Check lock...
297
   *   blk_start_queue() to restart queue operations.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
298
   **/
165125e1e   Jens Axboe   [BLOCK] Get rid o...
299
  void blk_stop_queue(struct request_queue *q)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
300
  {
2fff8a924   Bart Van Assche   block: Check lock...
301
  	lockdep_assert_held(q->queue_lock);
332ebbf7f   Bart Van Assche   block: Document w...
302
  	WARN_ON_ONCE(q->mq_ops);
2fff8a924   Bart Van Assche   block: Check lock...
303

136b5721d   Tejun Heo   workqueue: deprec...
304
  	cancel_delayed_work(&q->delay_work);
75ad23bc0   Nick Piggin   block: make queue...
305
  	queue_flag_set(QUEUE_FLAG_STOPPED, q);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
306
307
308
309
310
311
312
313
314
315
316
317
  }
  EXPORT_SYMBOL(blk_stop_queue);
  
  /**
   * blk_sync_queue - cancel any pending callbacks on a queue
   * @q: the queue
   *
   * Description:
   *     The block layer may perform asynchronous callback activity
   *     on a queue, such as calling the unplug function after a timeout.
   *     A block device may call blk_sync_queue to ensure that any
   *     such activity is cancelled, thus allowing it to release resources
59c51591a   Michael Opdenacker   Fix occurrences o...
318
   *     that the callbacks might use. The caller must already have made sure
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
319
320
321
   *     that its ->make_request_fn will not re-add plugging prior to calling
   *     this function.
   *
da5277700   Vivek Goyal   block: Move blk_t...
322
   *     This function does not cancel any asynchronous activity arising
da3dae54e   Masanari Iida   Documentation: Do...
323
   *     out of elevator or throttling code. That would require elevator_exit()
5efd61135   Tejun Heo   blkcg: add blkcg_...
324
   *     and blkcg_exit_queue() to be called with queue lock initialized.
da5277700   Vivek Goyal   block: Move blk_t...
325
   *
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
326
327
328
   */
  void blk_sync_queue(struct request_queue *q)
  {
70ed28b92   Jens Axboe   block: leave the ...
329
  	del_timer_sync(&q->timeout);
77a38e88c   Bart Van Assche   block: Fix a race...
330
  	cancel_work_sync(&q->timeout_work);
f04c1fe76   Ming Lei   block: blk-mq: ma...
331
332
333
334
  
  	if (q->mq_ops) {
  		struct blk_mq_hw_ctx *hctx;
  		int i;
a4000d951   Bart Van Assche   blk-mq: Avoid tha...
335
  		cancel_delayed_work_sync(&q->requeue_work);
21c6e939a   Jens Axboe   blk-mq: unify hct...
336
  		queue_for_each_hw_ctx(q, hctx, i)
9f9937379   Jens Axboe   blk-mq: unify hct...
337
  			cancel_delayed_work_sync(&hctx->run_work);
f04c1fe76   Ming Lei   block: blk-mq: ma...
338
339
340
  	} else {
  		cancel_delayed_work_sync(&q->delay_work);
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
341
342
343
344
  }
  EXPORT_SYMBOL(blk_sync_queue);
  
  /**
c246e80d8   Bart Van Assche   block: Avoid that...
345
346
347
348
349
350
351
352
353
354
355
356
   * __blk_run_queue_uncond - run a queue whether or not it has been stopped
   * @q:	The queue to run
   *
   * Description:
   *    Invoke request handling on a queue if there are any pending requests.
   *    May be used to restart request handling after a request has completed.
   *    This variant runs the queue whether or not the queue has been
   *    stopped. Must be called with the queue lock held and interrupts
   *    disabled. See also @blk_run_queue.
   */
  inline void __blk_run_queue_uncond(struct request_queue *q)
  {
2fff8a924   Bart Van Assche   block: Check lock...
357
  	lockdep_assert_held(q->queue_lock);
332ebbf7f   Bart Van Assche   block: Document w...
358
  	WARN_ON_ONCE(q->mq_ops);
2fff8a924   Bart Van Assche   block: Check lock...
359

c246e80d8   Bart Van Assche   block: Avoid that...
360
361
  	if (unlikely(blk_queue_dead(q)))
  		return;
24faf6f60   Bart Van Assche   block: Make blk_c...
362
363
364
365
366
367
368
369
  	/*
  	 * Some request_fn implementations, e.g. scsi_request_fn(), unlock
  	 * the queue lock internally. As a result multiple threads may be
  	 * running such a request function concurrently. Keep track of the
  	 * number of active request_fn invocations such that blk_drain_queue()
  	 * can wait until all these request_fn calls have finished.
  	 */
  	q->request_fn_active++;
c246e80d8   Bart Van Assche   block: Avoid that...
370
  	q->request_fn(q);
24faf6f60   Bart Van Assche   block: Make blk_c...
371
  	q->request_fn_active--;
c246e80d8   Bart Van Assche   block: Avoid that...
372
  }
a7928c157   Christoph Hellwig   block: move PM re...
373
  EXPORT_SYMBOL_GPL(__blk_run_queue_uncond);
c246e80d8   Bart Van Assche   block: Avoid that...
374
375
  
  /**
80a4b58e3   Jens Axboe   block: only call ...
376
   * __blk_run_queue - run a single device queue
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
377
   * @q:	The queue to run
80a4b58e3   Jens Axboe   block: only call ...
378
379
   *
   * Description:
2fff8a924   Bart Van Assche   block: Check lock...
380
   *    See @blk_run_queue.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
381
   */
24ecfbe27   Christoph Hellwig   block: add blk_ru...
382
  void __blk_run_queue(struct request_queue *q)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
383
  {
2fff8a924   Bart Van Assche   block: Check lock...
384
  	lockdep_assert_held(q->queue_lock);
332ebbf7f   Bart Van Assche   block: Document w...
385
  	WARN_ON_ONCE(q->mq_ops);
2fff8a924   Bart Van Assche   block: Check lock...
386

a538cd03b   Tejun Heo   block: merge blk_...
387
388
  	if (unlikely(blk_queue_stopped(q)))
  		return;
c246e80d8   Bart Van Assche   block: Avoid that...
389
  	__blk_run_queue_uncond(q);
75ad23bc0   Nick Piggin   block: make queue...
390
391
  }
  EXPORT_SYMBOL(__blk_run_queue);
dac07ec12   Jens Axboe   [BLOCK] limit req...
392

75ad23bc0   Nick Piggin   block: make queue...
393
  /**
24ecfbe27   Christoph Hellwig   block: add blk_ru...
394
395
396
397
398
   * blk_run_queue_async - run a single device queue in workqueue context
   * @q:	The queue to run
   *
   * Description:
   *    Tells kblockd to perform the equivalent of @blk_run_queue on behalf
2fff8a924   Bart Van Assche   block: Check lock...
399
400
401
402
403
404
   *    of us.
   *
   * Note:
   *    Since it is not allowed to run q->delay_work after blk_cleanup_queue()
   *    has canceled q->delay_work, callers must hold the queue lock to avoid
   *    race conditions between blk_cleanup_queue() and blk_run_queue_async().
24ecfbe27   Christoph Hellwig   block: add blk_ru...
405
406
407
   */
  void blk_run_queue_async(struct request_queue *q)
  {
2fff8a924   Bart Van Assche   block: Check lock...
408
  	lockdep_assert_held(q->queue_lock);
332ebbf7f   Bart Van Assche   block: Document w...
409
  	WARN_ON_ONCE(q->mq_ops);
2fff8a924   Bart Van Assche   block: Check lock...
410

704605711   Bart Van Assche   block: Avoid sche...
411
  	if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q)))
e7c2f9674   Tejun Heo   workqueue: use mo...
412
  		mod_delayed_work(kblockd_workqueue, &q->delay_work, 0);
24ecfbe27   Christoph Hellwig   block: add blk_ru...
413
  }
c21e6beba   Jens Axboe   block: get rid of...
414
  EXPORT_SYMBOL(blk_run_queue_async);
24ecfbe27   Christoph Hellwig   block: add blk_ru...
415
416
  
  /**
75ad23bc0   Nick Piggin   block: make queue...
417
418
   * blk_run_queue - run a single device queue
   * @q: The queue to run
80a4b58e3   Jens Axboe   block: only call ...
419
420
421
   *
   * Description:
   *    Invoke request handling on this queue, if it has pending work to do.
a7f557923   Tejun Heo   block: kill blk_s...
422
   *    May be used to restart queueing when a request has completed.
75ad23bc0   Nick Piggin   block: make queue...
423
424
425
426
   */
  void blk_run_queue(struct request_queue *q)
  {
  	unsigned long flags;
332ebbf7f   Bart Van Assche   block: Document w...
427
  	WARN_ON_ONCE(q->mq_ops);
75ad23bc0   Nick Piggin   block: make queue...
428
  	spin_lock_irqsave(q->queue_lock, flags);
24ecfbe27   Christoph Hellwig   block: add blk_ru...
429
  	__blk_run_queue(q);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
430
431
432
  	spin_unlock_irqrestore(q->queue_lock, flags);
  }
  EXPORT_SYMBOL(blk_run_queue);
165125e1e   Jens Axboe   [BLOCK] Get rid o...
433
  void blk_put_queue(struct request_queue *q)
483f4afc4   Al Viro   [PATCH] fix sysfs...
434
435
436
  {
  	kobject_put(&q->kobj);
  }
d86e0e83b   Jens Axboe   block: export blk...
437
  EXPORT_SYMBOL(blk_put_queue);
483f4afc4   Al Viro   [PATCH] fix sysfs...
438

e3c78ca52   Tejun Heo   block: reorganize...
439
  /**
807592a4f   Bart Van Assche   block: Let blk_dr...
440
   * __blk_drain_queue - drain requests from request_queue
e3c78ca52   Tejun Heo   block: reorganize...
441
   * @q: queue to drain
c9a929dde   Tejun Heo   block: fix reques...
442
   * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV
e3c78ca52   Tejun Heo   block: reorganize...
443
   *
c9a929dde   Tejun Heo   block: fix reques...
444
445
446
   * Drain requests from @q.  If @drain_all is set, all requests are drained.
   * If not, only ELVPRIV requests are drained.  The caller is responsible
   * for ensuring that no new requests which need to be drained are queued.
e3c78ca52   Tejun Heo   block: reorganize...
447
   */
807592a4f   Bart Van Assche   block: Let blk_dr...
448
449
450
  static void __blk_drain_queue(struct request_queue *q, bool drain_all)
  	__releases(q->queue_lock)
  	__acquires(q->queue_lock)
e3c78ca52   Tejun Heo   block: reorganize...
451
  {
458f27a98   Asias He   block: Avoid miss...
452
  	int i;
807592a4f   Bart Van Assche   block: Let blk_dr...
453
  	lockdep_assert_held(q->queue_lock);
332ebbf7f   Bart Van Assche   block: Document w...
454
  	WARN_ON_ONCE(q->mq_ops);
807592a4f   Bart Van Assche   block: Let blk_dr...
455

e3c78ca52   Tejun Heo   block: reorganize...
456
  	while (true) {
481a7d647   Tejun Heo   block: fix drain_...
457
  		bool drain = false;
e3c78ca52   Tejun Heo   block: reorganize...
458

b855b04a0   Tejun Heo   block: blk-thrott...
459
460
461
462
463
464
  		/*
  		 * The caller might be trying to drain @q before its
  		 * elevator is initialized.
  		 */
  		if (q->elevator)
  			elv_drain_elevator(q);
5efd61135   Tejun Heo   blkcg: add blkcg_...
465
  		blkcg_drain_queue(q);
e3c78ca52   Tejun Heo   block: reorganize...
466

4eabc9412   Tejun Heo   block: don't kick...
467
468
  		/*
  		 * This function might be called on a queue which failed
b855b04a0   Tejun Heo   block: blk-thrott...
469
470
471
472
  		 * driver init after queue creation or is not yet fully
  		 * active yet.  Some drivers (e.g. fd and loop) get unhappy
  		 * in such cases.  Kick queue iff dispatch queue has
  		 * something on it and @q has request_fn set.
4eabc9412   Tejun Heo   block: don't kick...
473
  		 */
b855b04a0   Tejun Heo   block: blk-thrott...
474
  		if (!list_empty(&q->queue_head) && q->request_fn)
4eabc9412   Tejun Heo   block: don't kick...
475
  			__blk_run_queue(q);
c9a929dde   Tejun Heo   block: fix reques...
476

8a5ecdd42   Tejun Heo   block: add q->nr_...
477
  		drain |= q->nr_rqs_elvpriv;
24faf6f60   Bart Van Assche   block: Make blk_c...
478
  		drain |= q->request_fn_active;
481a7d647   Tejun Heo   block: fix drain_...
479
480
481
482
483
484
485
  
  		/*
  		 * Unfortunately, requests are queued at and tracked from
  		 * multiple places and there's no single counter which can
  		 * be drained.  Check all the queues and counters.
  		 */
  		if (drain_all) {
e97c293cd   Ming Lei   block: introduce ...
486
  			struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
481a7d647   Tejun Heo   block: fix drain_...
487
488
  			drain |= !list_empty(&q->queue_head);
  			for (i = 0; i < 2; i++) {
8a5ecdd42   Tejun Heo   block: add q->nr_...
489
  				drain |= q->nr_rqs[i];
481a7d647   Tejun Heo   block: fix drain_...
490
  				drain |= q->in_flight[i];
7c94e1c15   Ming Lei   block: introduce ...
491
492
  				if (fq)
  				    drain |= !list_empty(&fq->flush_queue[i]);
481a7d647   Tejun Heo   block: fix drain_...
493
494
  			}
  		}
e3c78ca52   Tejun Heo   block: reorganize...
495

481a7d647   Tejun Heo   block: fix drain_...
496
  		if (!drain)
e3c78ca52   Tejun Heo   block: reorganize...
497
  			break;
807592a4f   Bart Van Assche   block: Let blk_dr...
498
499
  
  		spin_unlock_irq(q->queue_lock);
e3c78ca52   Tejun Heo   block: reorganize...
500
  		msleep(10);
807592a4f   Bart Van Assche   block: Let blk_dr...
501
502
  
  		spin_lock_irq(q->queue_lock);
e3c78ca52   Tejun Heo   block: reorganize...
503
  	}
458f27a98   Asias He   block: Avoid miss...
504
505
506
507
508
509
510
  
  	/*
  	 * With queue marked dead, any woken up waiter will fail the
  	 * allocation path, so the wakeup chaining is lost and we're
  	 * left with hung waiters. We need to wake up those waiters.
  	 */
  	if (q->request_fn) {
a051661ca   Tejun Heo   blkcg: implement ...
511
  		struct request_list *rl;
a051661ca   Tejun Heo   blkcg: implement ...
512
513
514
  		blk_queue_for_each_rl(rl, q)
  			for (i = 0; i < ARRAY_SIZE(rl->wait); i++)
  				wake_up_all(&rl->wait[i]);
458f27a98   Asias He   block: Avoid miss...
515
  	}
e3c78ca52   Tejun Heo   block: reorganize...
516
  }
7e3acce11   Ming Lei   block: drain queu...
517
518
519
520
521
522
  void blk_drain_queue(struct request_queue *q)
  {
  	spin_lock_irq(q->queue_lock);
  	__blk_drain_queue(q, true);
  	spin_unlock_irq(q->queue_lock);
  }
c9a929dde   Tejun Heo   block: fix reques...
523
  /**
d732580b4   Tejun Heo   block: implement ...
524
525
526
527
528
   * blk_queue_bypass_start - enter queue bypass mode
   * @q: queue of interest
   *
   * In bypass mode, only the dispatch FIFO queue of @q is used.  This
   * function makes @q enter bypass mode and drains all requests which were
6ecf23afa   Tejun Heo   block: extend que...
529
   * throttled or issued before.  On return, it's guaranteed that no request
80fd99792   Tejun Heo   blkcg: make sure ...
530
531
   * is being throttled or has ELVPRIV set and blk_queue_bypass() %true
   * inside queue or RCU read lock.
d732580b4   Tejun Heo   block: implement ...
532
533
534
   */
  void blk_queue_bypass_start(struct request_queue *q)
  {
332ebbf7f   Bart Van Assche   block: Document w...
535
  	WARN_ON_ONCE(q->mq_ops);
d732580b4   Tejun Heo   block: implement ...
536
  	spin_lock_irq(q->queue_lock);
776687bce   Tejun Heo   block, blk-mq: dr...
537
  	q->bypass_depth++;
d732580b4   Tejun Heo   block: implement ...
538
539
  	queue_flag_set(QUEUE_FLAG_BYPASS, q);
  	spin_unlock_irq(q->queue_lock);
776687bce   Tejun Heo   block, blk-mq: dr...
540
541
542
543
544
545
  	/*
  	 * Queues start drained.  Skip actual draining till init is
  	 * complete.  This avoids lenghty delays during queue init which
  	 * can happen many times during boot.
  	 */
  	if (blk_queue_init_done(q)) {
807592a4f   Bart Van Assche   block: Let blk_dr...
546
547
548
  		spin_lock_irq(q->queue_lock);
  		__blk_drain_queue(q, false);
  		spin_unlock_irq(q->queue_lock);
b82d4b197   Tejun Heo   blkcg: make reque...
549
550
551
  		/* ensure blk_queue_bypass() is %true inside RCU read lock */
  		synchronize_rcu();
  	}
d732580b4   Tejun Heo   block: implement ...
552
553
554
555
556
557
558
559
  }
  EXPORT_SYMBOL_GPL(blk_queue_bypass_start);
  
  /**
   * blk_queue_bypass_end - leave queue bypass mode
   * @q: queue of interest
   *
   * Leave bypass mode and restore the normal queueing behavior.
332ebbf7f   Bart Van Assche   block: Document w...
560
561
562
   *
   * Note: although blk_queue_bypass_start() is only called for blk-sq queues,
   * this function is called for both blk-sq and blk-mq queues.
d732580b4   Tejun Heo   block: implement ...
563
564
565
566
567
568
569
570
571
572
   */
  void blk_queue_bypass_end(struct request_queue *q)
  {
  	spin_lock_irq(q->queue_lock);
  	if (!--q->bypass_depth)
  		queue_flag_clear(QUEUE_FLAG_BYPASS, q);
  	WARN_ON_ONCE(q->bypass_depth < 0);
  	spin_unlock_irq(q->queue_lock);
  }
  EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
aed3ea94b   Jens Axboe   block: wake up wa...
573
574
  void blk_set_queue_dying(struct request_queue *q)
  {
1b8560868   Bart Van Assche   block: Fix race t...
575
576
577
  	spin_lock_irq(q->queue_lock);
  	queue_flag_set(QUEUE_FLAG_DYING, q);
  	spin_unlock_irq(q->queue_lock);
aed3ea94b   Jens Axboe   block: wake up wa...
578

d3cfb2a0a   Ming Lei   block: block new ...
579
580
581
582
583
584
  	/*
  	 * When queue DYING flag is set, we need to block new req
  	 * entering queue, so we call blk_freeze_queue_start() to
  	 * prevent I/O from crossing blk_queue_enter().
  	 */
  	blk_freeze_queue_start(q);
aed3ea94b   Jens Axboe   block: wake up wa...
585
586
587
588
  	if (q->mq_ops)
  		blk_mq_wake_waiters(q);
  	else {
  		struct request_list *rl;
bbfc3c5d6   Tahsin Erdogan   block: queue lock...
589
  		spin_lock_irq(q->queue_lock);
aed3ea94b   Jens Axboe   block: wake up wa...
590
591
  		blk_queue_for_each_rl(rl, q) {
  			if (rl->rq_pool) {
60bed713a   Ming Lei   block: wake up al...
592
593
  				wake_up_all(&rl->wait[BLK_RW_SYNC]);
  				wake_up_all(&rl->wait[BLK_RW_ASYNC]);
aed3ea94b   Jens Axboe   block: wake up wa...
594
595
  			}
  		}
bbfc3c5d6   Tahsin Erdogan   block: queue lock...
596
  		spin_unlock_irq(q->queue_lock);
aed3ea94b   Jens Axboe   block: wake up wa...
597
598
599
  	}
  }
  EXPORT_SYMBOL_GPL(blk_set_queue_dying);
d732580b4   Tejun Heo   block: implement ...
600
  /**
c9a929dde   Tejun Heo   block: fix reques...
601
602
603
   * blk_cleanup_queue - shutdown a request queue
   * @q: request queue to shutdown
   *
c246e80d8   Bart Van Assche   block: Avoid that...
604
605
   * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and
   * put it.  All future requests will be failed immediately with -ENODEV.
c94a96ac9   Vivek Goyal   block: Initialize...
606
   */
6728cb0e6   Jens Axboe   block: make core ...
607
  void blk_cleanup_queue(struct request_queue *q)
483f4afc4   Al Viro   [PATCH] fix sysfs...
608
  {
c9a929dde   Tejun Heo   block: fix reques...
609
  	spinlock_t *lock = q->queue_lock;
e3335de94   Jens Axboe   block: blk_cleanu...
610

3f3299d5c   Bart Van Assche   block: Rename que...
611
  	/* mark @q DYING, no new request or merges will be allowed afterwards */
483f4afc4   Al Viro   [PATCH] fix sysfs...
612
  	mutex_lock(&q->sysfs_lock);
aed3ea94b   Jens Axboe   block: wake up wa...
613
  	blk_set_queue_dying(q);
c9a929dde   Tejun Heo   block: fix reques...
614
  	spin_lock_irq(lock);
6ecf23afa   Tejun Heo   block: extend que...
615

80fd99792   Tejun Heo   blkcg: make sure ...
616
  	/*
3f3299d5c   Bart Van Assche   block: Rename que...
617
  	 * A dying queue is permanently in bypass mode till released.  Note
80fd99792   Tejun Heo   blkcg: make sure ...
618
619
620
621
622
623
624
  	 * that, unlike blk_queue_bypass_start(), we aren't performing
  	 * synchronize_rcu() after entering bypass mode to avoid the delay
  	 * as some drivers create and destroy a lot of queues while
  	 * probing.  This is still safe because blk_release_queue() will be
  	 * called only after the queue refcnt drops to zero and nothing,
  	 * RCU or not, would be traversing the queue by then.
  	 */
6ecf23afa   Tejun Heo   block: extend que...
625
626
  	q->bypass_depth++;
  	queue_flag_set(QUEUE_FLAG_BYPASS, q);
c9a929dde   Tejun Heo   block: fix reques...
627
628
  	queue_flag_set(QUEUE_FLAG_NOMERGES, q);
  	queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
3f3299d5c   Bart Van Assche   block: Rename que...
629
  	queue_flag_set(QUEUE_FLAG_DYING, q);
c9a929dde   Tejun Heo   block: fix reques...
630
631
  	spin_unlock_irq(lock);
  	mutex_unlock(&q->sysfs_lock);
c246e80d8   Bart Van Assche   block: Avoid that...
632
633
634
635
  	/*
  	 * Drain all requests queued before DYING marking. Set DEAD flag to
  	 * prevent that q->request_fn() gets invoked after draining finished.
  	 */
3ef28e83a   Dan Williams   block: generic re...
636
  	blk_freeze_queue(q);
9c1051aac   Omar Sandoval   blk-mq: untangle ...
637
  	spin_lock_irq(lock);
c246e80d8   Bart Van Assche   block: Avoid that...
638
  	queue_flag_set(QUEUE_FLAG_DEAD, q);
807592a4f   Bart Van Assche   block: Let blk_dr...
639
  	spin_unlock_irq(lock);
c9a929dde   Tejun Heo   block: fix reques...
640

392640fd1   Ming Lei   blk-mq: quiesce q...
641
642
643
644
  	/*
  	 * make sure all in-progress dispatch are completed because
  	 * blk_freeze_queue() can only complete all requests, and
  	 * dispatch may still be in-progress since we dispatch requests
b520f00da   Ming Lei   blk-mq: avoid to ...
645
646
647
648
649
  	 * from more than one contexts.
  	 *
  	 * No need to quiesce queue if it isn't initialized yet since
  	 * blk_freeze_queue() should be enough for cases of passthrough
  	 * request.
392640fd1   Ming Lei   blk-mq: quiesce q...
650
  	 */
b520f00da   Ming Lei   blk-mq: avoid to ...
651
  	if (q->mq_ops && blk_queue_init_done(q))
392640fd1   Ming Lei   blk-mq: quiesce q...
652
  		blk_mq_quiesce_queue(q);
5a48fc147   Dan Williams   block: blk_flush_...
653
654
  	/* for synchronous bio-based driver finish in-flight integrity i/o */
  	blk_flush_integrity();
c9a929dde   Tejun Heo   block: fix reques...
655
  	/* @q won't process any more request, flush async actions */
dc3b17cc8   Jan Kara   block: Use pointe...
656
  	del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer);
c9a929dde   Tejun Heo   block: fix reques...
657
  	blk_sync_queue(q);
45a9c9d90   Bart Van Assche   blk-mq: Fix a use...
658
659
  	if (q->mq_ops)
  		blk_mq_free_queue(q);
3ef28e83a   Dan Williams   block: generic re...
660
  	percpu_ref_exit(&q->q_usage_counter);
45a9c9d90   Bart Van Assche   blk-mq: Fix a use...
661

5e5cfac0c   Asias He   block: Mitigate l...
662
663
664
665
  	spin_lock_irq(lock);
  	if (q->queue_lock != &q->__queue_lock)
  		q->queue_lock = &q->__queue_lock;
  	spin_unlock_irq(lock);
c9a929dde   Tejun Heo   block: fix reques...
666
  	/* @q is and will stay empty, shutdown and put */
483f4afc4   Al Viro   [PATCH] fix sysfs...
667
668
  	blk_put_queue(q);
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
669
  EXPORT_SYMBOL(blk_cleanup_queue);
271508dba   David Rientjes   block: allocate r...
670
  /* Allocate memory local to the request queue */
6d247d7f7   Christoph Hellwig   block: allow spec...
671
  static void *alloc_request_simple(gfp_t gfp_mask, void *data)
271508dba   David Rientjes   block: allocate r...
672
  {
6d247d7f7   Christoph Hellwig   block: allow spec...
673
674
675
  	struct request_queue *q = data;
  
  	return kmem_cache_alloc_node(request_cachep, gfp_mask, q->node);
271508dba   David Rientjes   block: allocate r...
676
  }
6d247d7f7   Christoph Hellwig   block: allow spec...
677
  static void free_request_simple(void *element, void *data)
271508dba   David Rientjes   block: allocate r...
678
679
680
  {
  	kmem_cache_free(request_cachep, element);
  }
6d247d7f7   Christoph Hellwig   block: allow spec...
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
  static void *alloc_request_size(gfp_t gfp_mask, void *data)
  {
  	struct request_queue *q = data;
  	struct request *rq;
  
  	rq = kmalloc_node(sizeof(struct request) + q->cmd_size, gfp_mask,
  			q->node);
  	if (rq && q->init_rq_fn && q->init_rq_fn(q, rq, gfp_mask) < 0) {
  		kfree(rq);
  		rq = NULL;
  	}
  	return rq;
  }
  
  static void free_request_size(void *element, void *data)
  {
  	struct request_queue *q = data;
  
  	if (q->exit_rq_fn)
  		q->exit_rq_fn(q, element);
  	kfree(element);
  }
5b788ce3e   Tejun Heo   block: prepare fo...
703
704
  int blk_init_rl(struct request_list *rl, struct request_queue *q,
  		gfp_t gfp_mask)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
705
  {
1abec4fdb   Mike Snitzer   block: make blk_i...
706
707
  	if (unlikely(rl->rq_pool))
  		return 0;
5b788ce3e   Tejun Heo   block: prepare fo...
708
  	rl->q = q;
1faa16d22   Jens Axboe   block: change the...
709
710
  	rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0;
  	rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0;
1faa16d22   Jens Axboe   block: change the...
711
712
  	init_waitqueue_head(&rl->wait[BLK_RW_SYNC]);
  	init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
713

6d247d7f7   Christoph Hellwig   block: allow spec...
714
715
716
717
718
719
720
721
722
  	if (q->cmd_size) {
  		rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ,
  				alloc_request_size, free_request_size,
  				q, gfp_mask, q->node);
  	} else {
  		rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ,
  				alloc_request_simple, free_request_simple,
  				q, gfp_mask, q->node);
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
723
724
  	if (!rl->rq_pool)
  		return -ENOMEM;
b425e5049   Bart Van Assche   block: Avoid that...
725
726
  	if (rl != &q->root_rl)
  		WARN_ON_ONCE(!blk_get_queue(q));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
727
728
  	return 0;
  }
b425e5049   Bart Van Assche   block: Avoid that...
729
  void blk_exit_rl(struct request_queue *q, struct request_list *rl)
5b788ce3e   Tejun Heo   block: prepare fo...
730
  {
b425e5049   Bart Van Assche   block: Avoid that...
731
  	if (rl->rq_pool) {
5b788ce3e   Tejun Heo   block: prepare fo...
732
  		mempool_destroy(rl->rq_pool);
b425e5049   Bart Van Assche   block: Avoid that...
733
734
735
  		if (rl != &q->root_rl)
  			blk_put_queue(q);
  	}
5b788ce3e   Tejun Heo   block: prepare fo...
736
  }
165125e1e   Jens Axboe   [BLOCK] Get rid o...
737
  struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
738
  {
c304a51bf   Ezequiel Garcia   block: use NUMA_N...
739
  	return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE);
1946089a1   Christoph Lameter   [PATCH] NUMA awar...
740
741
  }
  EXPORT_SYMBOL(blk_alloc_queue);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
742

6f3b0e8bc   Christoph Hellwig   blk-mq: add a fla...
743
  int blk_queue_enter(struct request_queue *q, bool nowait)
3ef28e83a   Dan Williams   block: generic re...
744
745
  {
  	while (true) {
3ef28e83a   Dan Williams   block: generic re...
746
747
748
  
  		if (percpu_ref_tryget_live(&q->q_usage_counter))
  			return 0;
6f3b0e8bc   Christoph Hellwig   blk-mq: add a fla...
749
  		if (nowait)
3ef28e83a   Dan Williams   block: generic re...
750
  			return -EBUSY;
5ed61d3f0   Ming Lei   block: add a read...
751
  		/*
1671d522c   Ming Lei   block: rename blk...
752
  		 * read pair of barrier in blk_freeze_queue_start(),
5ed61d3f0   Ming Lei   block: add a read...
753
  		 * we need to order reading __PERCPU_REF_DEAD flag of
d3cfb2a0a   Ming Lei   block: block new ...
754
755
756
  		 * .q_usage_counter and reading .mq_freeze_depth or
  		 * queue dying flag, otherwise the following wait may
  		 * never return if the two reads are reordered.
5ed61d3f0   Ming Lei   block: add a read...
757
758
  		 */
  		smp_rmb();
aa6be3967   Alan Jenkins   block: do not use...
759
760
761
  		wait_event(q->mq_freeze_wq,
  			   !atomic_read(&q->mq_freeze_depth) ||
  			   blk_queue_dying(q));
3ef28e83a   Dan Williams   block: generic re...
762
763
  		if (blk_queue_dying(q))
  			return -ENODEV;
3ef28e83a   Dan Williams   block: generic re...
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
  	}
  }
  
  void blk_queue_exit(struct request_queue *q)
  {
  	percpu_ref_put(&q->q_usage_counter);
  }
  
  static void blk_queue_usage_counter_release(struct percpu_ref *ref)
  {
  	struct request_queue *q =
  		container_of(ref, struct request_queue, q_usage_counter);
  
  	wake_up_all(&q->mq_freeze_wq);
  }
287922eb0   Christoph Hellwig   block: defer time...
779
780
781
782
783
784
  static void blk_rq_timed_out_timer(unsigned long data)
  {
  	struct request_queue *q = (struct request_queue *)data;
  
  	kblockd_schedule_work(&q->timeout_work);
  }
165125e1e   Jens Axboe   [BLOCK] Get rid o...
785
  struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
1946089a1   Christoph Lameter   [PATCH] NUMA awar...
786
  {
165125e1e   Jens Axboe   [BLOCK] Get rid o...
787
  	struct request_queue *q;
1946089a1   Christoph Lameter   [PATCH] NUMA awar...
788

8324aa91d   Jens Axboe   block: split tag ...
789
  	q = kmem_cache_alloc_node(blk_requestq_cachep,
94f6030ca   Christoph Lameter   Slab allocators: ...
790
  				gfp_mask | __GFP_ZERO, node_id);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
791
792
  	if (!q)
  		return NULL;
00380a404   Dan Carpenter   block: blk_alloc_...
793
  	q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask);
a73f730d0   Tejun Heo   block, cfq: move ...
794
  	if (q->id < 0)
3d2936f45   Ming Lei   block: only alloc...
795
  		goto fail_q;
a73f730d0   Tejun Heo   block, cfq: move ...
796

93b27e729   NeilBrown   blk: use non-resc...
797
  	q->bio_split = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
54efd50bf   Kent Overstreet   block: make gener...
798
799
  	if (!q->bio_split)
  		goto fail_id;
d03f6cdc1   Jan Kara   block: Dynamicall...
800
801
802
  	q->backing_dev_info = bdi_alloc_node(gfp_mask, node_id);
  	if (!q->backing_dev_info)
  		goto fail_split;
a83b576c9   Jens Axboe   block: fix stacke...
803
804
805
  	q->stats = blk_alloc_queue_stats();
  	if (!q->stats)
  		goto fail_stats;
dc3b17cc8   Jan Kara   block: Use pointe...
806
  	q->backing_dev_info->ra_pages =
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
807
  			(VM_MAX_READAHEAD * 1024) / PAGE_SIZE;
dc3b17cc8   Jan Kara   block: Use pointe...
808
809
  	q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK;
  	q->backing_dev_info->name = "block";
5151412dd   Mike Snitzer   block: initialize...
810
  	q->node = node_id;
0989a025d   Jens Axboe   block: don't over...
811

dc3b17cc8   Jan Kara   block: Use pointe...
812
  	setup_timer(&q->backing_dev_info->laptop_mode_wb_timer,
31373d09d   Matthew Garrett   laptop-mode: Make...
813
  		    laptop_mode_timer_fn, (unsigned long) q);
242f9dcb8   Jens Axboe   block: unify requ...
814
  	setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
77a38e88c   Bart Van Assche   block: Fix a race...
815
  	INIT_WORK(&q->timeout_work, NULL);
b855b04a0   Tejun Heo   block: blk-thrott...
816
  	INIT_LIST_HEAD(&q->queue_head);
242f9dcb8   Jens Axboe   block: unify requ...
817
  	INIT_LIST_HEAD(&q->timeout_list);
a612fddf0   Tejun Heo   block, cfq: move ...
818
  	INIT_LIST_HEAD(&q->icq_list);
4eef30499   Tejun Heo   blkcg: move per-q...
819
  #ifdef CONFIG_BLK_CGROUP
e8989fae3   Tejun Heo   blkcg: unify blkg...
820
  	INIT_LIST_HEAD(&q->blkg_list);
4eef30499   Tejun Heo   blkcg: move per-q...
821
  #endif
3cca6dc1c   Jens Axboe   block: add API fo...
822
  	INIT_DELAYED_WORK(&q->delay_work, blk_delay_work);
483f4afc4   Al Viro   [PATCH] fix sysfs...
823

8324aa91d   Jens Axboe   block: split tag ...
824
  	kobject_init(&q->kobj, &blk_queue_ktype);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
825

5acb3cc2c   Waiman Long   blktrace: Fix pot...
826
827
828
  #ifdef CONFIG_BLK_DEV_IO_TRACE
  	mutex_init(&q->blk_trace_mutex);
  #endif
483f4afc4   Al Viro   [PATCH] fix sysfs...
829
  	mutex_init(&q->sysfs_lock);
e7e72bf64   Neil Brown   Remove blkdev war...
830
  	spin_lock_init(&q->__queue_lock);
483f4afc4   Al Viro   [PATCH] fix sysfs...
831

c94a96ac9   Vivek Goyal   block: Initialize...
832
833
834
835
836
  	/*
  	 * By default initialize queue_lock to internal lock and driver can
  	 * override it later if need be.
  	 */
  	q->queue_lock = &q->__queue_lock;
b82d4b197   Tejun Heo   blkcg: make reque...
837
838
839
  	/*
  	 * A queue starts its life with bypass turned on to avoid
  	 * unnecessary bypass on/off overhead and nasty surprises during
749fefe67   Tejun Heo   block: lift the i...
840
841
  	 * init.  The initial bypass will be finished when the queue is
  	 * registered by blk_register_queue().
b82d4b197   Tejun Heo   blkcg: make reque...
842
843
844
  	 */
  	q->bypass_depth = 1;
  	__set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
320ae51fe   Jens Axboe   blk-mq: new multi...
845
  	init_waitqueue_head(&q->mq_freeze_wq);
3ef28e83a   Dan Williams   block: generic re...
846
847
848
849
850
851
852
  	/*
  	 * Init percpu_ref in atomic mode so that it's faster to shutdown.
  	 * See blk_register_queue() for details.
  	 */
  	if (percpu_ref_init(&q->q_usage_counter,
  				blk_queue_usage_counter_release,
  				PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
fff4996b7   Mikulas Patocka   blk-core: Fix mem...
853
  		goto fail_bdi;
f51b802c1   Tejun Heo   blkcg: use the us...
854

3ef28e83a   Dan Williams   block: generic re...
855
856
  	if (blkcg_init_queue(q))
  		goto fail_ref;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
857
  	return q;
a73f730d0   Tejun Heo   block, cfq: move ...
858

3ef28e83a   Dan Williams   block: generic re...
859
860
  fail_ref:
  	percpu_ref_exit(&q->q_usage_counter);
fff4996b7   Mikulas Patocka   blk-core: Fix mem...
861
  fail_bdi:
a83b576c9   Jens Axboe   block: fix stacke...
862
863
  	blk_free_queue_stats(q->stats);
  fail_stats:
d03f6cdc1   Jan Kara   block: Dynamicall...
864
  	bdi_put(q->backing_dev_info);
54efd50bf   Kent Overstreet   block: make gener...
865
866
  fail_split:
  	bioset_free(q->bio_split);
a73f730d0   Tejun Heo   block, cfq: move ...
867
868
869
870
871
  fail_id:
  	ida_simple_remove(&blk_queue_ida, q->id);
  fail_q:
  	kmem_cache_free(blk_requestq_cachep, q);
  	return NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
872
  }
1946089a1   Christoph Lameter   [PATCH] NUMA awar...
873
  EXPORT_SYMBOL(blk_alloc_queue_node);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
  
  /**
   * blk_init_queue  - prepare a request queue for use with a block device
   * @rfn:  The function to be called to process requests that have been
   *        placed on the queue.
   * @lock: Request queue spin lock
   *
   * Description:
   *    If a block device wishes to use the standard request handling procedures,
   *    which sorts requests and coalesces adjacent requests, then it must
   *    call blk_init_queue().  The function @rfn will be called when there
   *    are requests on the queue that need to be processed.  If the device
   *    supports plugging, then @rfn may not be called immediately when requests
   *    are available on the queue, but may be called at some time later instead.
   *    Plugged queues are generally unplugged when a buffer belonging to one
   *    of the requests on the queue is needed, or due to memory pressure.
   *
   *    @rfn is not required, or even expected, to remove all requests off the
   *    queue, but only as many as it can handle at a time.  If it does leave
   *    requests on the queue, it is responsible for arranging that the requests
   *    get dealt with eventually.
   *
   *    The queue spin lock must be held while manipulating the requests on the
a038e2536   Paolo 'Blaisorblade' Giarrusso   [PATCH] blk_start...
897
898
   *    request queue; this lock will be taken also from interrupt context, so irq
   *    disabling is needed for it.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
899
   *
710027a48   Randy Dunlap   Add some block/ s...
900
   *    Function returns a pointer to the initialized request queue, or %NULL if
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
901
902
903
904
905
906
   *    it didn't succeed.
   *
   * Note:
   *    blk_init_queue() must be paired with a blk_cleanup_queue() call
   *    when the block device is deactivated (such as at module unload).
   **/
1946089a1   Christoph Lameter   [PATCH] NUMA awar...
907

165125e1e   Jens Axboe   [BLOCK] Get rid o...
908
  struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
909
  {
c304a51bf   Ezequiel Garcia   block: use NUMA_N...
910
  	return blk_init_queue_node(rfn, lock, NUMA_NO_NODE);
1946089a1   Christoph Lameter   [PATCH] NUMA awar...
911
912
  }
  EXPORT_SYMBOL(blk_init_queue);
165125e1e   Jens Axboe   [BLOCK] Get rid o...
913
  struct request_queue *
1946089a1   Christoph Lameter   [PATCH] NUMA awar...
914
915
  blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
  {
5ea708d15   Christoph Hellwig   block: simplify b...
916
  	struct request_queue *q;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
917

5ea708d15   Christoph Hellwig   block: simplify b...
918
919
  	q = blk_alloc_queue_node(GFP_KERNEL, node_id);
  	if (!q)
c86d1b8ae   Mike Snitzer   block: avoid unco...
920
  		return NULL;
5ea708d15   Christoph Hellwig   block: simplify b...
921
922
923
924
925
926
927
  	q->request_fn = rfn;
  	if (lock)
  		q->queue_lock = lock;
  	if (blk_init_allocated_queue(q) < 0) {
  		blk_cleanup_queue(q);
  		return NULL;
  	}
18741986a   Christoph Hellwig   blk-mq: rework fl...
928

7982e90c3   Mike Snitzer   block: fix q->flu...
929
  	return q;
01effb0dc   Mike Snitzer   block: allow init...
930
931
  }
  EXPORT_SYMBOL(blk_init_queue_node);
dece16353   Jens Axboe   block: change ->m...
932
  static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio);
336b7e1f2   Mike Snitzer   block: remove exp...
933

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
934

5ea708d15   Christoph Hellwig   block: simplify b...
935
936
  int blk_init_allocated_queue(struct request_queue *q)
  {
332ebbf7f   Bart Van Assche   block: Document w...
937
  	WARN_ON_ONCE(q->mq_ops);
6d247d7f7   Christoph Hellwig   block: allow spec...
938
  	q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, q->cmd_size);
ba483388e   Ming Lei   block: remove blk...
939
  	if (!q->fq)
5ea708d15   Christoph Hellwig   block: simplify b...
940
  		return -ENOMEM;
7982e90c3   Mike Snitzer   block: fix q->flu...
941

6d247d7f7   Christoph Hellwig   block: allow spec...
942
943
  	if (q->init_rq_fn && q->init_rq_fn(q, q->fq->flush_rq, GFP_KERNEL))
  		goto out_free_flush_queue;
7982e90c3   Mike Snitzer   block: fix q->flu...
944

a051661ca   Tejun Heo   blkcg: implement ...
945
  	if (blk_init_rl(&q->root_rl, q, GFP_KERNEL))
6d247d7f7   Christoph Hellwig   block: allow spec...
946
  		goto out_exit_flush_rq;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
947

287922eb0   Christoph Hellwig   block: defer time...
948
  	INIT_WORK(&q->timeout_work, blk_timeout_work);
60ea8226c   Tejun Heo   block: fix reques...
949
  	q->queue_flags		|= QUEUE_FLAG_DEFAULT;
c94a96ac9   Vivek Goyal   block: Initialize...
950

f3b144aa7   Jens Axboe   block: remove var...
951
952
953
  	/*
  	 * This also sets hw/phys segments, boundary and size
  	 */
c20e8de27   Jens Axboe   block: rename __m...
954
  	blk_queue_make_request(q, blk_queue_bio);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
955

44ec95425   Alan Stern   [SCSI] sg: cap re...
956
  	q->sg_reserved_size = INT_MAX;
eb1c160b2   Tomoki Sekiyama   elevator: Fix a r...
957
958
  	/* Protect q->elevator from elevator_change */
  	mutex_lock(&q->sysfs_lock);
b82d4b197   Tejun Heo   blkcg: make reque...
959
  	/* init elevator */
eb1c160b2   Tomoki Sekiyama   elevator: Fix a r...
960
961
  	if (elevator_init(q, NULL)) {
  		mutex_unlock(&q->sysfs_lock);
6d247d7f7   Christoph Hellwig   block: allow spec...
962
  		goto out_exit_flush_rq;
eb1c160b2   Tomoki Sekiyama   elevator: Fix a r...
963
964
965
  	}
  
  	mutex_unlock(&q->sysfs_lock);
5ea708d15   Christoph Hellwig   block: simplify b...
966
  	return 0;
eb1c160b2   Tomoki Sekiyama   elevator: Fix a r...
967

6d247d7f7   Christoph Hellwig   block: allow spec...
968
969
970
971
  out_exit_flush_rq:
  	if (q->exit_rq_fn)
  		q->exit_rq_fn(q, q->fq->flush_rq);
  out_free_flush_queue:
ba483388e   Ming Lei   block: remove blk...
972
  	blk_free_flush_queue(q->fq);
0affbaece   xiao jin   block: blk_init_a...
973
  	q->fq = NULL;
5ea708d15   Christoph Hellwig   block: simplify b...
974
  	return -ENOMEM;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
975
  }
5151412dd   Mike Snitzer   block: initialize...
976
  EXPORT_SYMBOL(blk_init_allocated_queue);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
977

09ac46c42   Tejun Heo   block: misc updat...
978
  bool blk_get_queue(struct request_queue *q)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
979
  {
3f3299d5c   Bart Van Assche   block: Rename que...
980
  	if (likely(!blk_queue_dying(q))) {
09ac46c42   Tejun Heo   block: misc updat...
981
982
  		__blk_get_queue(q);
  		return true;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
983
  	}
09ac46c42   Tejun Heo   block: misc updat...
984
  	return false;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
985
  }
d86e0e83b   Jens Axboe   block: export blk...
986
  EXPORT_SYMBOL(blk_get_queue);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
987

5b788ce3e   Tejun Heo   block: prepare fo...
988
  static inline void blk_free_request(struct request_list *rl, struct request *rq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
989
  {
e80640213   Christoph Hellwig   block: split out ...
990
  	if (rq->rq_flags & RQF_ELVPRIV) {
5b788ce3e   Tejun Heo   block: prepare fo...
991
  		elv_put_request(rl->q, rq);
f1f8cc946   Tejun Heo   block, cfq: move ...
992
  		if (rq->elv.icq)
11a3122f6   Tejun Heo   block: strip out ...
993
  			put_io_context(rq->elv.icq->ioc);
f1f8cc946   Tejun Heo   block, cfq: move ...
994
  	}
5b788ce3e   Tejun Heo   block: prepare fo...
995
  	mempool_free(rq, rl->rq_pool);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
996
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
997
998
999
1000
  /*
   * ioc_batching returns true if the ioc is a valid batching request and
   * should be given priority access to a request.
   */
165125e1e   Jens Axboe   [BLOCK] Get rid o...
1001
  static inline int ioc_batching(struct request_queue *q, struct io_context *ioc)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
  {
  	if (!ioc)
  		return 0;
  
  	/*
  	 * Make sure the process is able to allocate at least 1 request
  	 * even if the batch times out, otherwise we could theoretically
  	 * lose wakeups.
  	 */
  	return ioc->nr_batch_requests == q->nr_batching ||
  		(ioc->nr_batch_requests > 0
  		&& time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME));
  }
  
  /*
   * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This
   * will cause the process to be a "batcher" on all queues in the system. This
   * is the behaviour we want though - once it gets a wakeup it should be given
   * a nice run.
   */
165125e1e   Jens Axboe   [BLOCK] Get rid o...
1022
  static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1023
1024
1025
1026
1027
1028
1029
  {
  	if (!ioc || ioc_batching(q, ioc))
  		return;
  
  	ioc->nr_batch_requests = q->nr_batching;
  	ioc->last_waited = jiffies;
  }
5b788ce3e   Tejun Heo   block: prepare fo...
1030
  static void __freed_request(struct request_list *rl, int sync)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1031
  {
5b788ce3e   Tejun Heo   block: prepare fo...
1032
  	struct request_queue *q = rl->q;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1033

d40f75a06   Tejun Heo   writeback, blkcg:...
1034
1035
  	if (rl->count[sync] < queue_congestion_off_threshold(q))
  		blk_clear_congested(rl, sync);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1036

1faa16d22   Jens Axboe   block: change the...
1037
1038
1039
  	if (rl->count[sync] + 1 <= q->nr_requests) {
  		if (waitqueue_active(&rl->wait[sync]))
  			wake_up(&rl->wait[sync]);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1040

5b788ce3e   Tejun Heo   block: prepare fo...
1041
  		blk_clear_rl_full(rl, sync);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1042
1043
1044
1045
1046
1047
1048
  	}
  }
  
  /*
   * A request has just been released.  Account for it, update the full and
   * congestion status, wake up any waiters.   Called under q->queue_lock.
   */
e80640213   Christoph Hellwig   block: split out ...
1049
1050
  static void freed_request(struct request_list *rl, bool sync,
  		req_flags_t rq_flags)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1051
  {
5b788ce3e   Tejun Heo   block: prepare fo...
1052
  	struct request_queue *q = rl->q;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1053

8a5ecdd42   Tejun Heo   block: add q->nr_...
1054
  	q->nr_rqs[sync]--;
1faa16d22   Jens Axboe   block: change the...
1055
  	rl->count[sync]--;
e80640213   Christoph Hellwig   block: split out ...
1056
  	if (rq_flags & RQF_ELVPRIV)
8a5ecdd42   Tejun Heo   block: add q->nr_...
1057
  		q->nr_rqs_elvpriv--;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1058

5b788ce3e   Tejun Heo   block: prepare fo...
1059
  	__freed_request(rl, sync);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1060

1faa16d22   Jens Axboe   block: change the...
1061
  	if (unlikely(rl->starved[sync ^ 1]))
5b788ce3e   Tejun Heo   block: prepare fo...
1062
  		__freed_request(rl, sync ^ 1);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1063
  }
e3a2b3f93   Jens Axboe   blk-mq: allow cha...
1064
1065
1066
  int blk_update_nr_requests(struct request_queue *q, unsigned int nr)
  {
  	struct request_list *rl;
d40f75a06   Tejun Heo   writeback, blkcg:...
1067
  	int on_thresh, off_thresh;
e3a2b3f93   Jens Axboe   blk-mq: allow cha...
1068

332ebbf7f   Bart Van Assche   block: Document w...
1069
  	WARN_ON_ONCE(q->mq_ops);
e3a2b3f93   Jens Axboe   blk-mq: allow cha...
1070
1071
1072
  	spin_lock_irq(q->queue_lock);
  	q->nr_requests = nr;
  	blk_queue_congestion_threshold(q);
d40f75a06   Tejun Heo   writeback, blkcg:...
1073
1074
  	on_thresh = queue_congestion_on_threshold(q);
  	off_thresh = queue_congestion_off_threshold(q);
e3a2b3f93   Jens Axboe   blk-mq: allow cha...
1075

d40f75a06   Tejun Heo   writeback, blkcg:...
1076
1077
1078
1079
1080
  	blk_queue_for_each_rl(rl, q) {
  		if (rl->count[BLK_RW_SYNC] >= on_thresh)
  			blk_set_congested(rl, BLK_RW_SYNC);
  		else if (rl->count[BLK_RW_SYNC] < off_thresh)
  			blk_clear_congested(rl, BLK_RW_SYNC);
e3a2b3f93   Jens Axboe   blk-mq: allow cha...
1081

d40f75a06   Tejun Heo   writeback, blkcg:...
1082
1083
1084
1085
  		if (rl->count[BLK_RW_ASYNC] >= on_thresh)
  			blk_set_congested(rl, BLK_RW_ASYNC);
  		else if (rl->count[BLK_RW_ASYNC] < off_thresh)
  			blk_clear_congested(rl, BLK_RW_ASYNC);
e3a2b3f93   Jens Axboe   blk-mq: allow cha...
1086

e3a2b3f93   Jens Axboe   blk-mq: allow cha...
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
  		if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
  			blk_set_rl_full(rl, BLK_RW_SYNC);
  		} else {
  			blk_clear_rl_full(rl, BLK_RW_SYNC);
  			wake_up(&rl->wait[BLK_RW_SYNC]);
  		}
  
  		if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
  			blk_set_rl_full(rl, BLK_RW_ASYNC);
  		} else {
  			blk_clear_rl_full(rl, BLK_RW_ASYNC);
  			wake_up(&rl->wait[BLK_RW_ASYNC]);
  		}
  	}
  
  	spin_unlock_irq(q->queue_lock);
  	return 0;
  }
da8303c63   Tejun Heo   block: make get_r...
1105
  /**
a06e05e6a   Tejun Heo   block: refactor g...
1106
   * __get_request - get a free request
5b788ce3e   Tejun Heo   block: prepare fo...
1107
   * @rl: request list to allocate from
ef295ecf0   Christoph Hellwig   block: better op ...
1108
   * @op: operation and flags
da8303c63   Tejun Heo   block: make get_r...
1109
1110
1111
1112
1113
1114
   * @bio: bio to allocate request for (can be %NULL)
   * @gfp_mask: allocation mask
   *
   * Get a free request from @q.  This function may fail under memory
   * pressure or if @q is dead.
   *
da3dae54e   Masanari Iida   Documentation: Do...
1115
   * Must be called with @q->queue_lock held and,
a492f0754   Joe Lawrence   block,scsi: fixup...
1116
1117
   * Returns ERR_PTR on failure, with @q->queue_lock held.
   * Returns request pointer on success, with @q->queue_lock *not held*.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1118
   */
ef295ecf0   Christoph Hellwig   block: better op ...
1119
1120
  static struct request *__get_request(struct request_list *rl, unsigned int op,
  		struct bio *bio, gfp_t gfp_mask)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1121
  {
5b788ce3e   Tejun Heo   block: prepare fo...
1122
  	struct request_queue *q = rl->q;
b679281a6   Tejun Heo   block: restructur...
1123
  	struct request *rq;
7f4b35d15   Tejun Heo   block: allocate i...
1124
1125
  	struct elevator_type *et = q->elevator->type;
  	struct io_context *ioc = rq_ioc(bio);
f1f8cc946   Tejun Heo   block, cfq: move ...
1126
  	struct io_cq *icq = NULL;
ef295ecf0   Christoph Hellwig   block: better op ...
1127
  	const bool is_sync = op_is_sync(op);
75eb6c372   Tejun Heo   block: pass aroun...
1128
  	int may_queue;
e80640213   Christoph Hellwig   block: split out ...
1129
  	req_flags_t rq_flags = RQF_ALLOCED;
88ee5ef15   Jens Axboe   [BLOCK] ll_rw_blk...
1130

2fff8a924   Bart Van Assche   block: Check lock...
1131
  	lockdep_assert_held(q->queue_lock);
3f3299d5c   Bart Van Assche   block: Rename que...
1132
  	if (unlikely(blk_queue_dying(q)))
a492f0754   Joe Lawrence   block,scsi: fixup...
1133
  		return ERR_PTR(-ENODEV);
da8303c63   Tejun Heo   block: make get_r...
1134

ef295ecf0   Christoph Hellwig   block: better op ...
1135
  	may_queue = elv_may_queue(q, op);
88ee5ef15   Jens Axboe   [BLOCK] ll_rw_blk...
1136
1137
  	if (may_queue == ELV_MQUEUE_NO)
  		goto rq_starved;
1faa16d22   Jens Axboe   block: change the...
1138
1139
  	if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) {
  		if (rl->count[is_sync]+1 >= q->nr_requests) {
f2dbd76a0   Tejun Heo   block, cfq: repla...
1140
  			/*
88ee5ef15   Jens Axboe   [BLOCK] ll_rw_blk...
1141
1142
1143
1144
1145
  			 * The queue will fill after this allocation, so set
  			 * it as full, and mark this process as "batching".
  			 * This process will be allowed to complete a batch of
  			 * requests, others will be blocked.
  			 */
5b788ce3e   Tejun Heo   block: prepare fo...
1146
  			if (!blk_rl_full(rl, is_sync)) {
88ee5ef15   Jens Axboe   [BLOCK] ll_rw_blk...
1147
  				ioc_set_batching(q, ioc);
5b788ce3e   Tejun Heo   block: prepare fo...
1148
  				blk_set_rl_full(rl, is_sync);
88ee5ef15   Jens Axboe   [BLOCK] ll_rw_blk...
1149
1150
1151
1152
1153
1154
1155
1156
  			} else {
  				if (may_queue != ELV_MQUEUE_MUST
  						&& !ioc_batching(q, ioc)) {
  					/*
  					 * The queue is full and the allocating
  					 * process is not a "batcher", and not
  					 * exempted by the IO scheduler
  					 */
a492f0754   Joe Lawrence   block,scsi: fixup...
1157
  					return ERR_PTR(-ENOMEM);
88ee5ef15   Jens Axboe   [BLOCK] ll_rw_blk...
1158
1159
  				}
  			}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1160
  		}
d40f75a06   Tejun Heo   writeback, blkcg:...
1161
  		blk_set_congested(rl, is_sync);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1162
  	}
082cf69eb   Jens Axboe   [PATCH] ll_rw_blk...
1163
1164
1165
1166
1167
  	/*
  	 * Only allow batching queuers to allocate up to 50% over the defined
  	 * limit of requests, otherwise we could have thousands of requests
  	 * allocated with any setting of ->nr_requests
  	 */
1faa16d22   Jens Axboe   block: change the...
1168
  	if (rl->count[is_sync] >= (3 * q->nr_requests / 2))
a492f0754   Joe Lawrence   block,scsi: fixup...
1169
  		return ERR_PTR(-ENOMEM);
fd782a4a9   Hugh Dickins   [PATCH] Fix get_r...
1170

8a5ecdd42   Tejun Heo   block: add q->nr_...
1171
  	q->nr_rqs[is_sync]++;
1faa16d22   Jens Axboe   block: change the...
1172
1173
  	rl->count[is_sync]++;
  	rl->starved[is_sync] = 0;
cb98fc8bb   Tejun Heo   [BLOCK] Reimpleme...
1174

f1f8cc946   Tejun Heo   block, cfq: move ...
1175
1176
  	/*
  	 * Decide whether the new request will be managed by elevator.  If
e80640213   Christoph Hellwig   block: split out ...
1177
  	 * so, mark @rq_flags and increment elvpriv.  Non-zero elvpriv will
f1f8cc946   Tejun Heo   block, cfq: move ...
1178
1179
1180
1181
  	 * prevent the current elevator from being destroyed until the new
  	 * request is freed.  This guarantees icq's won't be destroyed and
  	 * makes creating new ones safe.
  	 *
e6f7f93d5   Christoph Hellwig   block: fix elevat...
1182
1183
1184
  	 * Flush requests do not use the elevator so skip initialization.
  	 * This allows a request to share the flush and elevator data.
  	 *
f1f8cc946   Tejun Heo   block, cfq: move ...
1185
1186
1187
  	 * Also, lookup icq while holding queue_lock.  If it doesn't exist,
  	 * it will be created after releasing queue_lock.
  	 */
e6f7f93d5   Christoph Hellwig   block: fix elevat...
1188
  	if (!op_is_flush(op) && !blk_queue_bypass(q)) {
e80640213   Christoph Hellwig   block: split out ...
1189
  		rq_flags |= RQF_ELVPRIV;
8a5ecdd42   Tejun Heo   block: add q->nr_...
1190
  		q->nr_rqs_elvpriv++;
f1f8cc946   Tejun Heo   block, cfq: move ...
1191
1192
  		if (et->icq_cache && ioc)
  			icq = ioc_lookup_icq(ioc, q);
9d5a4e946   Mike Snitzer   block: skip eleva...
1193
  	}
cb98fc8bb   Tejun Heo   [BLOCK] Reimpleme...
1194

f253b86b4   Jens Axboe   Revert "block: fi...
1195
  	if (blk_queue_io_stat(q))
e80640213   Christoph Hellwig   block: split out ...
1196
  		rq_flags |= RQF_IO_STAT;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1197
  	spin_unlock_irq(q->queue_lock);
29e2b09ab   Tejun Heo   block: collapse b...
1198
  	/* allocate and init request */
5b788ce3e   Tejun Heo   block: prepare fo...
1199
  	rq = mempool_alloc(rl->rq_pool, gfp_mask);
29e2b09ab   Tejun Heo   block: collapse b...
1200
  	if (!rq)
b679281a6   Tejun Heo   block: restructur...
1201
  		goto fail_alloc;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1202

29e2b09ab   Tejun Heo   block: collapse b...
1203
  	blk_rq_init(q, rq);
a051661ca   Tejun Heo   blkcg: implement ...
1204
  	blk_rq_set_rl(rq, rl);
ef295ecf0   Christoph Hellwig   block: better op ...
1205
  	rq->cmd_flags = op;
e80640213   Christoph Hellwig   block: split out ...
1206
  	rq->rq_flags = rq_flags;
29e2b09ab   Tejun Heo   block: collapse b...
1207

aaf7c6806   Tejun Heo   block: fix elvpri...
1208
  	/* init elvpriv */
e80640213   Christoph Hellwig   block: split out ...
1209
  	if (rq_flags & RQF_ELVPRIV) {
aaf7c6806   Tejun Heo   block: fix elvpri...
1210
  		if (unlikely(et->icq_cache && !icq)) {
7f4b35d15   Tejun Heo   block: allocate i...
1211
1212
  			if (ioc)
  				icq = ioc_create_icq(ioc, q, gfp_mask);
aaf7c6806   Tejun Heo   block: fix elvpri...
1213
1214
  			if (!icq)
  				goto fail_elvpriv;
29e2b09ab   Tejun Heo   block: collapse b...
1215
  		}
aaf7c6806   Tejun Heo   block: fix elvpri...
1216
1217
1218
1219
1220
1221
  
  		rq->elv.icq = icq;
  		if (unlikely(elv_set_request(q, rq, bio, gfp_mask)))
  			goto fail_elvpriv;
  
  		/* @rq->elv.icq holds io_context until @rq is freed */
29e2b09ab   Tejun Heo   block: collapse b...
1222
1223
1224
  		if (icq)
  			get_io_context(icq->ioc);
  	}
aaf7c6806   Tejun Heo   block: fix elvpri...
1225
  out:
88ee5ef15   Jens Axboe   [BLOCK] ll_rw_blk...
1226
1227
1228
1229
1230
1231
  	/*
  	 * ioc may be NULL here, and ioc_batching will be false. That's
  	 * OK, if the queue is under the request limit then requests need
  	 * not count toward the nr_batch_requests limit. There will always
  	 * be some limit enforced by BLK_BATCH_TIME.
  	 */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1232
1233
  	if (ioc_batching(q, ioc))
  		ioc->nr_batch_requests--;
6728cb0e6   Jens Axboe   block: make core ...
1234

e6a40b096   Mike Christie   block: prepare re...
1235
  	trace_block_getrq(q, bio, op);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1236
  	return rq;
b679281a6   Tejun Heo   block: restructur...
1237

aaf7c6806   Tejun Heo   block: fix elvpri...
1238
1239
1240
1241
1242
1243
1244
  fail_elvpriv:
  	/*
  	 * elvpriv init failed.  ioc, icq and elvpriv aren't mempool backed
  	 * and may fail indefinitely under memory pressure and thus
  	 * shouldn't stall IO.  Treat this request as !elvpriv.  This will
  	 * disturb iosched and blkcg but weird is bettern than dead.
  	 */
7b2b10e0e   Robert Elliott   block: include fu...
1245
1246
  	printk_ratelimited(KERN_WARNING "%s: dev %s: request aux data allocation failed, iosched may be disturbed
  ",
dc3b17cc8   Jan Kara   block: Use pointe...
1247
  			   __func__, dev_name(q->backing_dev_info->dev));
aaf7c6806   Tejun Heo   block: fix elvpri...
1248

e80640213   Christoph Hellwig   block: split out ...
1249
  	rq->rq_flags &= ~RQF_ELVPRIV;
aaf7c6806   Tejun Heo   block: fix elvpri...
1250
1251
1252
  	rq->elv.icq = NULL;
  
  	spin_lock_irq(q->queue_lock);
8a5ecdd42   Tejun Heo   block: add q->nr_...
1253
  	q->nr_rqs_elvpriv--;
aaf7c6806   Tejun Heo   block: fix elvpri...
1254
1255
  	spin_unlock_irq(q->queue_lock);
  	goto out;
b679281a6   Tejun Heo   block: restructur...
1256
1257
1258
1259
1260
1261
1262
1263
1264
  fail_alloc:
  	/*
  	 * Allocation failed presumably due to memory. Undo anything we
  	 * might have messed up.
  	 *
  	 * Allocating task should really be put onto the front of the wait
  	 * queue, but this is pretty rare.
  	 */
  	spin_lock_irq(q->queue_lock);
e80640213   Christoph Hellwig   block: split out ...
1265
  	freed_request(rl, is_sync, rq_flags);
b679281a6   Tejun Heo   block: restructur...
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
  
  	/*
  	 * in the very unlikely event that allocation failed and no
  	 * requests for this direction was pending, mark us starved so that
  	 * freeing of a request in the other direction will notice
  	 * us. another possible fix would be to split the rq mempool into
  	 * READ and WRITE
  	 */
  rq_starved:
  	if (unlikely(rl->count[is_sync] == 0))
  		rl->starved[is_sync] = 1;
a492f0754   Joe Lawrence   block,scsi: fixup...
1277
  	return ERR_PTR(-ENOMEM);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1278
  }
da8303c63   Tejun Heo   block: make get_r...
1279
  /**
a06e05e6a   Tejun Heo   block: refactor g...
1280
   * get_request - get a free request
da8303c63   Tejun Heo   block: make get_r...
1281
   * @q: request_queue to allocate request from
ef295ecf0   Christoph Hellwig   block: better op ...
1282
   * @op: operation and flags
da8303c63   Tejun Heo   block: make get_r...
1283
   * @bio: bio to allocate request for (can be %NULL)
a06e05e6a   Tejun Heo   block: refactor g...
1284
   * @gfp_mask: allocation mask
da8303c63   Tejun Heo   block: make get_r...
1285
   *
d0164adc8   Mel Gorman   mm, page_alloc: d...
1286
1287
   * Get a free request from @q.  If %__GFP_DIRECT_RECLAIM is set in @gfp_mask,
   * this function keeps retrying under memory pressure and fails iff @q is dead.
d6344532a   Nick Piggin   [PATCH] blk: redu...
1288
   *
da3dae54e   Masanari Iida   Documentation: Do...
1289
   * Must be called with @q->queue_lock held and,
a492f0754   Joe Lawrence   block,scsi: fixup...
1290
1291
   * Returns ERR_PTR on failure, with @q->queue_lock held.
   * Returns request pointer on success, with @q->queue_lock *not held*.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1292
   */
ef295ecf0   Christoph Hellwig   block: better op ...
1293
1294
  static struct request *get_request(struct request_queue *q, unsigned int op,
  		struct bio *bio, gfp_t gfp_mask)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1295
  {
ef295ecf0   Christoph Hellwig   block: better op ...
1296
  	const bool is_sync = op_is_sync(op);
a06e05e6a   Tejun Heo   block: refactor g...
1297
  	DEFINE_WAIT(wait);
a051661ca   Tejun Heo   blkcg: implement ...
1298
  	struct request_list *rl;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1299
  	struct request *rq;
a051661ca   Tejun Heo   blkcg: implement ...
1300

2fff8a924   Bart Van Assche   block: Check lock...
1301
  	lockdep_assert_held(q->queue_lock);
332ebbf7f   Bart Van Assche   block: Document w...
1302
  	WARN_ON_ONCE(q->mq_ops);
2fff8a924   Bart Van Assche   block: Check lock...
1303

a051661ca   Tejun Heo   blkcg: implement ...
1304
  	rl = blk_get_rl(q, bio);	/* transferred to @rq on success */
a06e05e6a   Tejun Heo   block: refactor g...
1305
  retry:
ef295ecf0   Christoph Hellwig   block: better op ...
1306
  	rq = __get_request(rl, op, bio, gfp_mask);
a492f0754   Joe Lawrence   block,scsi: fixup...
1307
  	if (!IS_ERR(rq))
a06e05e6a   Tejun Heo   block: refactor g...
1308
  		return rq;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1309

03a07c92a   Goldwyn Rodrigues   block: return on ...
1310
1311
1312
1313
  	if (op & REQ_NOWAIT) {
  		blk_put_rl(rl);
  		return ERR_PTR(-EAGAIN);
  	}
d0164adc8   Mel Gorman   mm, page_alloc: d...
1314
  	if (!gfpflags_allow_blocking(gfp_mask) || unlikely(blk_queue_dying(q))) {
a051661ca   Tejun Heo   blkcg: implement ...
1315
  		blk_put_rl(rl);
a492f0754   Joe Lawrence   block,scsi: fixup...
1316
  		return rq;
a051661ca   Tejun Heo   blkcg: implement ...
1317
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1318

a06e05e6a   Tejun Heo   block: refactor g...
1319
1320
1321
  	/* wait on @rl and retry */
  	prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
  				  TASK_UNINTERRUPTIBLE);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1322

e6a40b096   Mike Christie   block: prepare re...
1323
  	trace_block_sleeprq(q, bio, op);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1324

a06e05e6a   Tejun Heo   block: refactor g...
1325
1326
  	spin_unlock_irq(q->queue_lock);
  	io_schedule();
d6344532a   Nick Piggin   [PATCH] blk: redu...
1327

a06e05e6a   Tejun Heo   block: refactor g...
1328
1329
1330
1331
1332
  	/*
  	 * After sleeping, we become a "batching" process and will be able
  	 * to allocate at least one request, and up to a big batch of them
  	 * for a small period time.  See ioc_batching, ioc_set_batching
  	 */
a06e05e6a   Tejun Heo   block: refactor g...
1333
  	ioc_set_batching(q, current->io_context);
05caf8dbc   Zhang, Yanmin   block: Move the s...
1334

a06e05e6a   Tejun Heo   block: refactor g...
1335
1336
  	spin_lock_irq(q->queue_lock);
  	finish_wait(&rl->wait[is_sync], &wait);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1337

a06e05e6a   Tejun Heo   block: refactor g...
1338
  	goto retry;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1339
  }
cd6ce1482   Bart Van Assche   block: Make reque...
1340
1341
  static struct request *blk_old_get_request(struct request_queue *q,
  					   unsigned int op, gfp_t gfp_mask)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1342
1343
  {
  	struct request *rq;
332ebbf7f   Bart Van Assche   block: Document w...
1344
  	WARN_ON_ONCE(q->mq_ops);
7f4b35d15   Tejun Heo   block: allocate i...
1345
1346
  	/* create ioc upfront */
  	create_io_context(gfp_mask, q->node);
d6344532a   Nick Piggin   [PATCH] blk: redu...
1347
  	spin_lock_irq(q->queue_lock);
cd6ce1482   Bart Van Assche   block: Make reque...
1348
  	rq = get_request(q, op, NULL, gfp_mask);
0c4de0f33   Christoph Hellwig   block: ensure bio...
1349
  	if (IS_ERR(rq)) {
da8303c63   Tejun Heo   block: make get_r...
1350
  		spin_unlock_irq(q->queue_lock);
0c4de0f33   Christoph Hellwig   block: ensure bio...
1351
1352
  		return rq;
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1353

0c4de0f33   Christoph Hellwig   block: ensure bio...
1354
1355
1356
1357
  	/* q->queue_lock is unlocked at this point */
  	rq->__data_len = 0;
  	rq->__sector = (sector_t) -1;
  	rq->bio = rq->biotail = NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1358
1359
  	return rq;
  }
320ae51fe   Jens Axboe   blk-mq: new multi...
1360

cd6ce1482   Bart Van Assche   block: Make reque...
1361
1362
  struct request *blk_get_request(struct request_queue *q, unsigned int op,
  				gfp_t gfp_mask)
320ae51fe   Jens Axboe   blk-mq: new multi...
1363
  {
d280bab30   Bart Van Assche   block: Introduce ...
1364
1365
1366
1367
  	struct request *req;
  
  	if (q->mq_ops) {
  		req = blk_mq_alloc_request(q, op,
6f3b0e8bc   Christoph Hellwig   blk-mq: add a fla...
1368
1369
  			(gfp_mask & __GFP_DIRECT_RECLAIM) ?
  				0 : BLK_MQ_REQ_NOWAIT);
d280bab30   Bart Van Assche   block: Introduce ...
1370
1371
1372
1373
1374
1375
1376
1377
1378
  		if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn)
  			q->mq_ops->initialize_rq_fn(req);
  	} else {
  		req = blk_old_get_request(q, op, gfp_mask);
  		if (!IS_ERR(req) && q->initialize_rq_fn)
  			q->initialize_rq_fn(req);
  	}
  
  	return req;
320ae51fe   Jens Axboe   blk-mq: new multi...
1379
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
  EXPORT_SYMBOL(blk_get_request);
  
  /**
   * blk_requeue_request - put a request back on queue
   * @q:		request queue where request should be inserted
   * @rq:		request to be inserted
   *
   * Description:
   *    Drivers often keep queueing requests until the hardware cannot accept
   *    more, when that condition happens we need to put the request back
   *    on the queue. Must be called with queue lock held.
   */
165125e1e   Jens Axboe   [BLOCK] Get rid o...
1392
  void blk_requeue_request(struct request_queue *q, struct request *rq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1393
  {
2fff8a924   Bart Van Assche   block: Check lock...
1394
  	lockdep_assert_held(q->queue_lock);
332ebbf7f   Bart Van Assche   block: Document w...
1395
  	WARN_ON_ONCE(q->mq_ops);
2fff8a924   Bart Van Assche   block: Check lock...
1396

242f9dcb8   Jens Axboe   block: unify requ...
1397
1398
  	blk_delete_timer(rq);
  	blk_clear_rq_complete(rq);
5f3ea37c7   Arnaldo Carvalho de Melo   blktrace: port to...
1399
  	trace_block_rq_requeue(q, rq);
87760e5ee   Jens Axboe   block: hook up wr...
1400
  	wbt_requeue(q->rq_wb, &rq->issue_stat);
2056a782f   Jens Axboe   [PATCH] Block que...
1401

e80640213   Christoph Hellwig   block: split out ...
1402
  	if (rq->rq_flags & RQF_QUEUED)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1403
  		blk_queue_end_tag(q, rq);
ba396a6c1   James Bottomley   block: fix oops w...
1404
  	BUG_ON(blk_queued_rq(rq));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1405
1406
  	elv_requeue_request(q, rq);
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1407
  EXPORT_SYMBOL(blk_requeue_request);
73c101011   Jens Axboe   block: initial pa...
1408
1409
1410
  static void add_acct_request(struct request_queue *q, struct request *rq,
  			     int where)
  {
320ae51fe   Jens Axboe   blk-mq: new multi...
1411
  	blk_account_io_start(rq, true);
7eaceacca   Jens Axboe   block: remove per...
1412
  	__elv_add_request(q, rq, where);
73c101011   Jens Axboe   block: initial pa...
1413
  }
d62e26b3f   Jens Axboe   block: pass in qu...
1414
  static void part_round_stats_single(struct request_queue *q, int cpu,
b8d62b3a9   Jens Axboe   blk-mq: enable ch...
1415
1416
  				    struct hd_struct *part, unsigned long now,
  				    unsigned int inflight)
074a7aca7   Tejun Heo   block: move stats...
1417
  {
b8d62b3a9   Jens Axboe   blk-mq: enable ch...
1418
  	if (inflight) {
074a7aca7   Tejun Heo   block: move stats...
1419
  		__part_stat_add(cpu, part, time_in_queue,
b8d62b3a9   Jens Axboe   blk-mq: enable ch...
1420
  				inflight * (now - part->stamp));
074a7aca7   Tejun Heo   block: move stats...
1421
1422
1423
1424
1425
1426
  		__part_stat_add(cpu, part, io_ticks, (now - part->stamp));
  	}
  	part->stamp = now;
  }
  
  /**
496aa8a98   Randy Dunlap   block: fix curren...
1427
   * part_round_stats() - Round off the performance stats on a struct disk_stats.
d62e26b3f   Jens Axboe   block: pass in qu...
1428
   * @q: target block queue
496aa8a98   Randy Dunlap   block: fix curren...
1429
1430
   * @cpu: cpu number for stats access
   * @part: target partition
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
   *
   * The average IO queue length and utilisation statistics are maintained
   * by observing the current state of the queue length and the amount of
   * time it has been in this state for.
   *
   * Normally, that accounting is done on IO completion, but that can result
   * in more than a second's worth of IO being accounted for within any one
   * second, leading to >100% utilisation.  To deal with that, we call this
   * function to do a round-off before returning the results when reading
   * /proc/diskstats.  This accounts immediately for all queue usage up to
   * the current jiffies and restarts the counters again.
   */
d62e26b3f   Jens Axboe   block: pass in qu...
1443
  void part_round_stats(struct request_queue *q, int cpu, struct hd_struct *part)
6f2576af5   Jerome Marchand   Enhanced partitio...
1444
  {
b8d62b3a9   Jens Axboe   blk-mq: enable ch...
1445
  	struct hd_struct *part2 = NULL;
6f2576af5   Jerome Marchand   Enhanced partitio...
1446
  	unsigned long now = jiffies;
b8d62b3a9   Jens Axboe   blk-mq: enable ch...
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
  	unsigned int inflight[2];
  	int stats = 0;
  
  	if (part->stamp != now)
  		stats |= 1;
  
  	if (part->partno) {
  		part2 = &part_to_disk(part)->part0;
  		if (part2->stamp != now)
  			stats |= 2;
  	}
  
  	if (!stats)
  		return;
  
  	part_in_flight(q, part, inflight);
6f2576af5   Jerome Marchand   Enhanced partitio...
1463

b8d62b3a9   Jens Axboe   blk-mq: enable ch...
1464
1465
1466
1467
  	if (stats & 2)
  		part_round_stats_single(q, cpu, part2, now, inflight[1]);
  	if (stats & 1)
  		part_round_stats_single(q, cpu, part, now, inflight[0]);
6f2576af5   Jerome Marchand   Enhanced partitio...
1468
  }
074a7aca7   Tejun Heo   block: move stats...
1469
  EXPORT_SYMBOL_GPL(part_round_stats);
6f2576af5   Jerome Marchand   Enhanced partitio...
1470

47fafbc70   Rafael J. Wysocki   block / PM: Repla...
1471
  #ifdef CONFIG_PM
c8158819d   Lin Ming   block: implement ...
1472
1473
  static void blk_pm_put_request(struct request *rq)
  {
e80640213   Christoph Hellwig   block: split out ...
1474
  	if (rq->q->dev && !(rq->rq_flags & RQF_PM) && !--rq->q->nr_pending)
c8158819d   Lin Ming   block: implement ...
1475
1476
1477
1478
1479
  		pm_runtime_mark_last_busy(rq->q->dev);
  }
  #else
  static inline void blk_pm_put_request(struct request *rq) {}
  #endif
165125e1e   Jens Axboe   [BLOCK] Get rid o...
1480
  void __blk_put_request(struct request_queue *q, struct request *req)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1481
  {
e80640213   Christoph Hellwig   block: split out ...
1482
  	req_flags_t rq_flags = req->rq_flags;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1483
1484
  	if (unlikely(!q))
  		return;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1485

6f5ba581c   Christoph Hellwig   blk-mq: divert __...
1486
1487
1488
1489
  	if (q->mq_ops) {
  		blk_mq_free_request(req);
  		return;
  	}
2fff8a924   Bart Van Assche   block: Check lock...
1490
  	lockdep_assert_held(q->queue_lock);
c8158819d   Lin Ming   block: implement ...
1491
  	blk_pm_put_request(req);
8922e16cf   Tejun Heo   [PATCH] 01/05 Imp...
1492
  	elv_completed_request(q, req);
1cd96c242   Boaz Harrosh   block: WARN in __...
1493
1494
  	/* this is a bio leak */
  	WARN_ON(req->bio != NULL);
87760e5ee   Jens Axboe   block: hook up wr...
1495
  	wbt_done(q->rq_wb, &req->issue_stat);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1496
1497
1498
1499
  	/*
  	 * Request may not have originated from ll_rw_blk. if not,
  	 * it didn't come out of our reserved rq pools
  	 */
e80640213   Christoph Hellwig   block: split out ...
1500
  	if (rq_flags & RQF_ALLOCED) {
a051661ca   Tejun Heo   blkcg: implement ...
1501
  		struct request_list *rl = blk_rq_rl(req);
ef295ecf0   Christoph Hellwig   block: better op ...
1502
  		bool sync = op_is_sync(req->cmd_flags);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1503

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1504
  		BUG_ON(!list_empty(&req->queuelist));
360f92c24   Jens Axboe   block: fix regres...
1505
  		BUG_ON(ELV_ON_HASH(req));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1506

a051661ca   Tejun Heo   blkcg: implement ...
1507
  		blk_free_request(rl, req);
e80640213   Christoph Hellwig   block: split out ...
1508
  		freed_request(rl, sync, rq_flags);
a051661ca   Tejun Heo   blkcg: implement ...
1509
  		blk_put_rl(rl);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1510
1511
  	}
  }
6e39b69e7   Mike Christie   [SCSI] export blk...
1512
  EXPORT_SYMBOL_GPL(__blk_put_request);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1513
1514
  void blk_put_request(struct request *req)
  {
165125e1e   Jens Axboe   [BLOCK] Get rid o...
1515
  	struct request_queue *q = req->q;
8922e16cf   Tejun Heo   [PATCH] 01/05 Imp...
1516

320ae51fe   Jens Axboe   blk-mq: new multi...
1517
1518
1519
1520
1521
1522
1523
1524
1525
  	if (q->mq_ops)
  		blk_mq_free_request(req);
  	else {
  		unsigned long flags;
  
  		spin_lock_irqsave(q->queue_lock, flags);
  		__blk_put_request(q, req);
  		spin_unlock_irqrestore(q->queue_lock, flags);
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1526
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1527
  EXPORT_SYMBOL(blk_put_request);
320ae51fe   Jens Axboe   blk-mq: new multi...
1528
1529
  bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
  			    struct bio *bio)
73c101011   Jens Axboe   block: initial pa...
1530
  {
1eff9d322   Jens Axboe   block: rename bio...
1531
  	const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
73c101011   Jens Axboe   block: initial pa...
1532

73c101011   Jens Axboe   block: initial pa...
1533
1534
  	if (!ll_back_merge_fn(q, req, bio))
  		return false;
8c1cf6bb0   Tejun Heo   block: add @req t...
1535
  	trace_block_bio_backmerge(q, req, bio);
73c101011   Jens Axboe   block: initial pa...
1536
1537
1538
1539
1540
1541
  
  	if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
  		blk_rq_set_mixed_merge(req);
  
  	req->biotail->bi_next = bio;
  	req->biotail = bio;
4f024f379   Kent Overstreet   block: Abstract o...
1542
  	req->__data_len += bio->bi_iter.bi_size;
73c101011   Jens Axboe   block: initial pa...
1543
  	req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
320ae51fe   Jens Axboe   blk-mq: new multi...
1544
  	blk_account_io_start(req, false);
73c101011   Jens Axboe   block: initial pa...
1545
1546
  	return true;
  }
320ae51fe   Jens Axboe   blk-mq: new multi...
1547
1548
  bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
  			     struct bio *bio)
73c101011   Jens Axboe   block: initial pa...
1549
  {
1eff9d322   Jens Axboe   block: rename bio...
1550
  	const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
73c101011   Jens Axboe   block: initial pa...
1551

73c101011   Jens Axboe   block: initial pa...
1552
1553
  	if (!ll_front_merge_fn(q, req, bio))
  		return false;
8c1cf6bb0   Tejun Heo   block: add @req t...
1554
  	trace_block_bio_frontmerge(q, req, bio);
73c101011   Jens Axboe   block: initial pa...
1555
1556
1557
  
  	if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
  		blk_rq_set_mixed_merge(req);
73c101011   Jens Axboe   block: initial pa...
1558
1559
  	bio->bi_next = req->bio;
  	req->bio = bio;
4f024f379   Kent Overstreet   block: Abstract o...
1560
1561
  	req->__sector = bio->bi_iter.bi_sector;
  	req->__data_len += bio->bi_iter.bi_size;
73c101011   Jens Axboe   block: initial pa...
1562
  	req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
320ae51fe   Jens Axboe   blk-mq: new multi...
1563
  	blk_account_io_start(req, false);
73c101011   Jens Axboe   block: initial pa...
1564
1565
  	return true;
  }
1e739730c   Christoph Hellwig   block: optionally...
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
  bool bio_attempt_discard_merge(struct request_queue *q, struct request *req,
  		struct bio *bio)
  {
  	unsigned short segments = blk_rq_nr_discard_segments(req);
  
  	if (segments >= queue_max_discard_segments(q))
  		goto no_merge;
  	if (blk_rq_sectors(req) + bio_sectors(bio) >
  	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
  		goto no_merge;
  
  	req->biotail->bi_next = bio;
  	req->biotail = bio;
  	req->__data_len += bio->bi_iter.bi_size;
  	req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
  	req->nr_phys_segments = segments + 1;
  
  	blk_account_io_start(req, false);
  	return true;
  no_merge:
  	req_set_nomerge(q, req);
  	return false;
  }
bd87b5898   Tejun Heo   block: drop @tsk ...
1589
  /**
320ae51fe   Jens Axboe   blk-mq: new multi...
1590
   * blk_attempt_plug_merge - try to merge with %current's plugged list
bd87b5898   Tejun Heo   block: drop @tsk ...
1591
1592
1593
   * @q: request_queue new bio is being queued at
   * @bio: new bio being queued
   * @request_count: out parameter for number of traversed plugged requests
ccc2600b8   Randy Dunlap   block: fix blk-co...
1594
1595
1596
   * @same_queue_rq: pointer to &struct request that gets filled in when
   * another request associated with @q is found on the plug list
   * (optional, may be %NULL)
bd87b5898   Tejun Heo   block: drop @tsk ...
1597
1598
1599
1600
1601
   *
   * Determine whether @bio being queued on @q can be merged with a request
   * on %current's plugged list.  Returns %true if merge was successful,
   * otherwise %false.
   *
07c2bd373   Tejun Heo   block: don't call...
1602
1603
1604
1605
1606
1607
   * Plugging coalesces IOs from the same issuer for the same purpose without
   * going through @q->queue_lock.  As such it's more of an issuing mechanism
   * than scheduling, and the request, while may have elvpriv data, is not
   * added on the elevator at this point.  In addition, we don't have
   * reliable access to the elevator outside queue lock.  Only check basic
   * merging parameters without querying the elevator.
da41a589f   Robert Elliott   blk-mq: Micro-opt...
1608
1609
   *
   * Caller must ensure !blk_queue_nomerges(q) beforehand.
73c101011   Jens Axboe   block: initial pa...
1610
   */
320ae51fe   Jens Axboe   blk-mq: new multi...
1611
  bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
5b3f341f0   Shaohua Li   blk-mq: make plug...
1612
1613
  			    unsigned int *request_count,
  			    struct request **same_queue_rq)
73c101011   Jens Axboe   block: initial pa...
1614
1615
1616
  {
  	struct blk_plug *plug;
  	struct request *rq;
92f399c72   Shaohua Li   blk-mq: mq plug l...
1617
  	struct list_head *plug_list;
73c101011   Jens Axboe   block: initial pa...
1618

bd87b5898   Tejun Heo   block: drop @tsk ...
1619
  	plug = current->plug;
73c101011   Jens Axboe   block: initial pa...
1620
  	if (!plug)
34fe7c054   Christoph Hellwig   block: enumify EL...
1621
  		return false;
56ebdaf2f   Shaohua Li   block: simplify f...
1622
  	*request_count = 0;
73c101011   Jens Axboe   block: initial pa...
1623

92f399c72   Shaohua Li   blk-mq: mq plug l...
1624
1625
1626
1627
1628
1629
  	if (q->mq_ops)
  		plug_list = &plug->mq_list;
  	else
  		plug_list = &plug->list;
  
  	list_for_each_entry_reverse(rq, plug_list, queuelist) {
34fe7c054   Christoph Hellwig   block: enumify EL...
1630
  		bool merged = false;
73c101011   Jens Axboe   block: initial pa...
1631

5b3f341f0   Shaohua Li   blk-mq: make plug...
1632
  		if (rq->q == q) {
1b2e19f17   Shaohua Li   block: make auto ...
1633
  			(*request_count)++;
5b3f341f0   Shaohua Li   blk-mq: make plug...
1634
1635
1636
1637
1638
1639
1640
1641
  			/*
  			 * Only blk-mq multiple hardware queues case checks the
  			 * rq in the same queue, there should be only one such
  			 * rq in a queue
  			 **/
  			if (same_queue_rq)
  				*same_queue_rq = rq;
  		}
56ebdaf2f   Shaohua Li   block: simplify f...
1642

07c2bd373   Tejun Heo   block: don't call...
1643
  		if (rq->q != q || !blk_rq_merge_ok(rq, bio))
73c101011   Jens Axboe   block: initial pa...
1644
  			continue;
34fe7c054   Christoph Hellwig   block: enumify EL...
1645
1646
1647
1648
1649
1650
1651
  		switch (blk_try_merge(rq, bio)) {
  		case ELEVATOR_BACK_MERGE:
  			merged = bio_attempt_back_merge(q, rq, bio);
  			break;
  		case ELEVATOR_FRONT_MERGE:
  			merged = bio_attempt_front_merge(q, rq, bio);
  			break;
1e739730c   Christoph Hellwig   block: optionally...
1652
1653
1654
  		case ELEVATOR_DISCARD_MERGE:
  			merged = bio_attempt_discard_merge(q, rq, bio);
  			break;
34fe7c054   Christoph Hellwig   block: enumify EL...
1655
1656
  		default:
  			break;
73c101011   Jens Axboe   block: initial pa...
1657
  		}
34fe7c054   Christoph Hellwig   block: enumify EL...
1658
1659
1660
  
  		if (merged)
  			return true;
73c101011   Jens Axboe   block: initial pa...
1661
  	}
34fe7c054   Christoph Hellwig   block: enumify EL...
1662
1663
  
  	return false;
73c101011   Jens Axboe   block: initial pa...
1664
  }
0809e3ac6   Jeff Moyer   block: fix plug l...
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
  unsigned int blk_plug_queued_count(struct request_queue *q)
  {
  	struct blk_plug *plug;
  	struct request *rq;
  	struct list_head *plug_list;
  	unsigned int ret = 0;
  
  	plug = current->plug;
  	if (!plug)
  		goto out;
  
  	if (q->mq_ops)
  		plug_list = &plug->mq_list;
  	else
  		plug_list = &plug->list;
  
  	list_for_each_entry(rq, plug_list, queuelist) {
  		if (rq->q == q)
  			ret++;
  	}
  out:
  	return ret;
  }
da8d7f079   Bart Van Assche   block: Export blk...
1688
  void blk_init_request_from_bio(struct request *req, struct bio *bio)
52d9e6753   Tejun Heo   [BLOCK] ll_rw_blk...
1689
  {
0be0dee64   Bart Van Assche   block: Inline blk...
1690
  	struct io_context *ioc = rq_ioc(bio);
1eff9d322   Jens Axboe   block: rename bio...
1691
  	if (bio->bi_opf & REQ_RAHEAD)
a82afdfcb   Tejun Heo   block: use the sa...
1692
  		req->cmd_flags |= REQ_FAILFAST_MASK;
b31dc66a5   Jens Axboe   [PATCH] Kill PF_S...
1693

4f024f379   Kent Overstreet   block: Abstract o...
1694
  	req->__sector = bio->bi_iter.bi_sector;
5dc8b362a   Adam Manzanares   block: Add iocont...
1695
1696
  	if (ioprio_valid(bio_prio(bio)))
  		req->ioprio = bio_prio(bio);
0be0dee64   Bart Van Assche   block: Inline blk...
1697
1698
1699
1700
  	else if (ioc)
  		req->ioprio = ioc->ioprio;
  	else
  		req->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0);
cb6934f8e   Jens Axboe   block: add suppor...
1701
  	req->write_hint = bio->bi_write_hint;
bc1c56fde   NeilBrown   Share code betwee...
1702
  	blk_rq_bio_prep(req->q, req, bio);
52d9e6753   Tejun Heo   [BLOCK] ll_rw_blk...
1703
  }
da8d7f079   Bart Van Assche   block: Export blk...
1704
  EXPORT_SYMBOL_GPL(blk_init_request_from_bio);
52d9e6753   Tejun Heo   [BLOCK] ll_rw_blk...
1705

dece16353   Jens Axboe   block: change ->m...
1706
  static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1707
  {
73c101011   Jens Axboe   block: initial pa...
1708
  	struct blk_plug *plug;
34fe7c054   Christoph Hellwig   block: enumify EL...
1709
  	int where = ELEVATOR_INSERT_SORT;
e4d750c97   Jens Axboe   block: free merge...
1710
  	struct request *req, *free;
56ebdaf2f   Shaohua Li   block: simplify f...
1711
  	unsigned int request_count = 0;
87760e5ee   Jens Axboe   block: hook up wr...
1712
  	unsigned int wb_acct;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1713

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1714
1715
1716
1717
1718
1719
  	/*
  	 * low level driver can indicate that it wants pages above a
  	 * certain limit bounced to low memory (ie for highmem, or even
  	 * ISA dma in theory)
  	 */
  	blk_queue_bounce(q, &bio);
af67c31fb   NeilBrown   blk: remove bio_s...
1720
  	blk_queue_split(q, &bio);
23688bf4f   Junichi Nomura   block: ensure to ...
1721

e23947bd7   Dmitry Monakhov   bio-integrity: fo...
1722
  	if (!bio_integrity_prep(bio))
dece16353   Jens Axboe   block: change ->m...
1723
  		return BLK_QC_T_NONE;
ffecfd1a7   Darrick J. Wong   block: optionally...
1724

f73f44eb0   Christoph Hellwig   block: add a op_i...
1725
  	if (op_is_flush(bio->bi_opf)) {
73c101011   Jens Axboe   block: initial pa...
1726
  		spin_lock_irq(q->queue_lock);
ae1b15396   Tejun Heo   block: reimplemen...
1727
  		where = ELEVATOR_INSERT_FLUSH;
28e7d1845   Tejun Heo   block: drop barri...
1728
1729
  		goto get_rq;
  	}
73c101011   Jens Axboe   block: initial pa...
1730
1731
1732
1733
  	/*
  	 * Check if we can merge with the plugged list before grabbing
  	 * any locks.
  	 */
0809e3ac6   Jeff Moyer   block: fix plug l...
1734
1735
  	if (!blk_queue_nomerges(q)) {
  		if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
dece16353   Jens Axboe   block: change ->m...
1736
  			return BLK_QC_T_NONE;
0809e3ac6   Jeff Moyer   block: fix plug l...
1737
1738
  	} else
  		request_count = blk_plug_queued_count(q);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1739

73c101011   Jens Axboe   block: initial pa...
1740
  	spin_lock_irq(q->queue_lock);
2056a782f   Jens Axboe   [PATCH] Block que...
1741

34fe7c054   Christoph Hellwig   block: enumify EL...
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
  	switch (elv_merge(q, &req, bio)) {
  	case ELEVATOR_BACK_MERGE:
  		if (!bio_attempt_back_merge(q, req, bio))
  			break;
  		elv_bio_merged(q, req, bio);
  		free = attempt_back_merge(q, req);
  		if (free)
  			__blk_put_request(q, free);
  		else
  			elv_merged_request(q, req, ELEVATOR_BACK_MERGE);
  		goto out_unlock;
  	case ELEVATOR_FRONT_MERGE:
  		if (!bio_attempt_front_merge(q, req, bio))
  			break;
  		elv_bio_merged(q, req, bio);
  		free = attempt_front_merge(q, req);
  		if (free)
  			__blk_put_request(q, free);
  		else
  			elv_merged_request(q, req, ELEVATOR_FRONT_MERGE);
  		goto out_unlock;
  	default:
  		break;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1765
  	}
450991bc1   Nick Piggin   [PATCH] blk: __ma...
1766
  get_rq:
87760e5ee   Jens Axboe   block: hook up wr...
1767
  	wb_acct = wbt_wait(q->rq_wb, bio, q->queue_lock);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1768
  	/*
450991bc1   Nick Piggin   [PATCH] blk: __ma...
1769
  	 * Grab a free request. This is might sleep but can not fail.
d6344532a   Nick Piggin   [PATCH] blk: redu...
1770
  	 * Returns with the queue unlocked.
450991bc1   Nick Piggin   [PATCH] blk: __ma...
1771
  	 */
ef295ecf0   Christoph Hellwig   block: better op ...
1772
  	req = get_request(q, bio->bi_opf, bio, GFP_NOIO);
a492f0754   Joe Lawrence   block,scsi: fixup...
1773
  	if (IS_ERR(req)) {
87760e5ee   Jens Axboe   block: hook up wr...
1774
  		__wbt_done(q->rq_wb, wb_acct);
4e4cbee93   Christoph Hellwig   block: switch bio...
1775
1776
1777
1778
  		if (PTR_ERR(req) == -ENOMEM)
  			bio->bi_status = BLK_STS_RESOURCE;
  		else
  			bio->bi_status = BLK_STS_IOERR;
4246a0b63   Christoph Hellwig   block: add a bi_e...
1779
  		bio_endio(bio);
da8303c63   Tejun Heo   block: make get_r...
1780
1781
  		goto out_unlock;
  	}
d6344532a   Nick Piggin   [PATCH] blk: redu...
1782

87760e5ee   Jens Axboe   block: hook up wr...
1783
  	wbt_track(&req->issue_stat, wb_acct);
450991bc1   Nick Piggin   [PATCH] blk: __ma...
1784
1785
1786
1787
1788
  	/*
  	 * After dropping the lock and possibly sleeping here, our request
  	 * may now be mergeable after it had proven unmergeable (above).
  	 * We don't worry about that case for efficiency. It won't happen
  	 * often, and the elevators are able to handle it.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1789
  	 */
da8d7f079   Bart Van Assche   block: Export blk...
1790
  	blk_init_request_from_bio(req, bio);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1791

9562ad9ab   Tao Ma   block: Remove the...
1792
  	if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags))
11ccf116d   Jens Axboe   block: fix warnin...
1793
  		req->cpu = raw_smp_processor_id();
73c101011   Jens Axboe   block: initial pa...
1794
1795
  
  	plug = current->plug;
721a9602e   Jens Axboe   block: kill off R...
1796
  	if (plug) {
dc6d36c97   Jens Axboe   block: readd plug...
1797
1798
  		/*
  		 * If this is the first request added after a plug, fire
7aef2e780   Jianpeng Ma   block: trace all ...
1799
  		 * of a plug trace.
0a6219a95   Ming Lei   block: deal with ...
1800
1801
1802
  		 *
  		 * @request_count may become stale because of schedule
  		 * out, so check plug list again.
dc6d36c97   Jens Axboe   block: readd plug...
1803
  		 */
0a6219a95   Ming Lei   block: deal with ...
1804
  		if (!request_count || list_empty(&plug->list))
dc6d36c97   Jens Axboe   block: readd plug...
1805
  			trace_block_plug(q);
3540d5e89   Shaohua Li   block: avoid unne...
1806
  		else {
50d24c344   Shaohua Li   block: immediatel...
1807
1808
1809
  			struct request *last = list_entry_rq(plug->list.prev);
  			if (request_count >= BLK_MAX_REQUEST_COUNT ||
  			    blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE) {
3540d5e89   Shaohua Li   block: avoid unne...
1810
  				blk_flush_plug_list(plug, false);
019ceb7d5   Shaohua Li   block: add missed...
1811
1812
  				trace_block_plug(q);
  			}
73c101011   Jens Axboe   block: initial pa...
1813
  		}
73c101011   Jens Axboe   block: initial pa...
1814
  		list_add_tail(&req->queuelist, &plug->list);
320ae51fe   Jens Axboe   blk-mq: new multi...
1815
  		blk_account_io_start(req, true);
73c101011   Jens Axboe   block: initial pa...
1816
1817
1818
  	} else {
  		spin_lock_irq(q->queue_lock);
  		add_acct_request(q, req, where);
24ecfbe27   Christoph Hellwig   block: add blk_ru...
1819
  		__blk_run_queue(q);
73c101011   Jens Axboe   block: initial pa...
1820
1821
1822
  out_unlock:
  		spin_unlock_irq(q->queue_lock);
  	}
dece16353   Jens Axboe   block: change ->m...
1823
1824
  
  	return BLK_QC_T_NONE;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1825
  }
c3a8759ec   Christoph Hellwig   block: bio_check_...
1826
  static void handle_bad_sector(struct bio *bio, sector_t maxsector)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1827
1828
1829
1830
1831
  {
  	char b[BDEVNAME_SIZE];
  
  	printk(KERN_INFO "attempt to access beyond end of device
  ");
6296b9604   Mike Christie   block, drivers, f...
1832
1833
  	printk(KERN_INFO "%s: rw=%d, want=%Lu, limit=%Lu
  ",
74d46992e   Christoph Hellwig   block: replace bi...
1834
  			bio_devname(bio, b), bio->bi_opf,
f73a1c7d1   Kent Overstreet   block: Add bio_en...
1835
  			(unsigned long long)bio_end_sector(bio),
c3a8759ec   Christoph Hellwig   block: bio_check_...
1836
  			(long long)maxsector);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1837
  }
c17bb4951   Akinobu Mita   [PATCH] fault-inj...
1838
1839
1840
1841
1842
1843
1844
1845
1846
  #ifdef CONFIG_FAIL_MAKE_REQUEST
  
  static DECLARE_FAULT_ATTR(fail_make_request);
  
  static int __init setup_fail_make_request(char *str)
  {
  	return setup_fault_attr(&fail_make_request, str);
  }
  __setup("fail_make_request=", setup_fail_make_request);
b2c9cd379   Akinobu Mita   fail_make_request...
1847
  static bool should_fail_request(struct hd_struct *part, unsigned int bytes)
c17bb4951   Akinobu Mita   [PATCH] fault-inj...
1848
  {
b2c9cd379   Akinobu Mita   fail_make_request...
1849
  	return part->make_it_fail && should_fail(&fail_make_request, bytes);
c17bb4951   Akinobu Mita   [PATCH] fault-inj...
1850
1851
1852
1853
  }
  
  static int __init fail_make_request_debugfs(void)
  {
dd48c085c   Akinobu Mita   fault-injection: ...
1854
1855
  	struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
  						NULL, &fail_make_request);
21f9fcd81   Duan Jiong   block: replace IS...
1856
  	return PTR_ERR_OR_ZERO(dir);
c17bb4951   Akinobu Mita   [PATCH] fault-inj...
1857
1858
1859
1860
1861
  }
  
  late_initcall(fail_make_request_debugfs);
  
  #else /* CONFIG_FAIL_MAKE_REQUEST */
b2c9cd379   Akinobu Mita   fail_make_request...
1862
1863
  static inline bool should_fail_request(struct hd_struct *part,
  					unsigned int bytes)
c17bb4951   Akinobu Mita   [PATCH] fault-inj...
1864
  {
b2c9cd379   Akinobu Mita   fail_make_request...
1865
  	return false;
c17bb4951   Akinobu Mita   [PATCH] fault-inj...
1866
1867
1868
  }
  
  #endif /* CONFIG_FAIL_MAKE_REQUEST */
b4fb3baf3   Ilya Dryomov   block: fail op_is...
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
  static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part)
  {
  	if (part->policy && op_is_write(bio_op(bio))) {
  		char b[BDEVNAME_SIZE];
  
  		printk(KERN_ERR
  		       "generic_make_request: Trying to write "
  			"to read-only block-device %s (partno %d)
  ",
  			bio_devname(bio, b), part->partno);
  		return true;
  	}
  
  	return false;
  }
c07e2b412   Jens Axboe   block: factor our...
1884
  /*
c3a8759ec   Christoph Hellwig   block: bio_check_...
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
   * Check whether this bio extends beyond the end of the device or partition.
   * This may well happen - the kernel calls bread() without checking the size of
   * the device, e.g., when mounting a file system.
   */
  static inline int bio_check_eod(struct bio *bio, sector_t maxsector)
  {
  	unsigned int nr_sectors = bio_sectors(bio);
  
  	if (nr_sectors && maxsector &&
  	    (nr_sectors > maxsector ||
  	     bio->bi_iter.bi_sector > maxsector - nr_sectors)) {
  		handle_bad_sector(bio, maxsector);
  		return -EIO;
  	}
  	return 0;
  }
  
  /*
74d46992e   Christoph Hellwig   block: replace bi...
1903
1904
1905
1906
1907
   * Remap block n of partition p to block n+start(p) of the disk.
   */
  static inline int blk_partition_remap(struct bio *bio)
  {
  	struct hd_struct *p;
c3a8759ec   Christoph Hellwig   block: bio_check_...
1908
  	int ret = -EIO;
74d46992e   Christoph Hellwig   block: replace bi...
1909

b4fb3baf3   Ilya Dryomov   block: fail op_is...
1910
1911
  	rcu_read_lock();
  	p = __disk_get_part(bio->bi_disk, bio->bi_partno);
c3a8759ec   Christoph Hellwig   block: bio_check_...
1912
1913
1914
1915
1916
  	if (unlikely(!p))
  		goto out;
  	if (unlikely(should_fail_request(p, bio->bi_iter.bi_size)))
  		goto out;
  	if (unlikely(bio_check_ro(bio, p)))
b4fb3baf3   Ilya Dryomov   block: fail op_is...
1917
  		goto out;
b4fb3baf3   Ilya Dryomov   block: fail op_is...
1918

74d46992e   Christoph Hellwig   block: replace bi...
1919
1920
1921
1922
  	/*
  	 * Zone reset does not include bi_size so bio_sectors() is always 0.
  	 * Include a test for the reset op code and perform the remap if needed.
  	 */
c3a8759ec   Christoph Hellwig   block: bio_check_...
1923
1924
1925
1926
1927
1928
1929
1930
1931
  	if (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET) {
  		if (bio_check_eod(bio, part_nr_sects_read(p)))
  			goto out;
  		bio->bi_iter.bi_sector += p->start_sect;
  		bio->bi_partno = 0;
  		trace_block_bio_remap(bio->bi_disk->queue, bio, part_devt(p),
  				      bio->bi_iter.bi_sector - p->start_sect);
  	}
  	ret = 0;
b4fb3baf3   Ilya Dryomov   block: fail op_is...
1932
1933
  out:
  	rcu_read_unlock();
74d46992e   Christoph Hellwig   block: replace bi...
1934
1935
  	return ret;
  }
27a84d54c   Christoph Hellwig   block: refactor g...
1936
1937
  static noinline_for_stack bool
  generic_make_request_checks(struct bio *bio)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1938
  {
165125e1e   Jens Axboe   [BLOCK] Get rid o...
1939
  	struct request_queue *q;
5a7bbad27   Christoph Hellwig   block: remove sup...
1940
  	int nr_sectors = bio_sectors(bio);
4e4cbee93   Christoph Hellwig   block: switch bio...
1941
  	blk_status_t status = BLK_STS_IOERR;
5a7bbad27   Christoph Hellwig   block: remove sup...
1942
  	char b[BDEVNAME_SIZE];
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1943
1944
  
  	might_sleep();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1945

74d46992e   Christoph Hellwig   block: replace bi...
1946
  	q = bio->bi_disk->queue;
5a7bbad27   Christoph Hellwig   block: remove sup...
1947
1948
1949
1950
1951
  	if (unlikely(!q)) {
  		printk(KERN_ERR
  		       "generic_make_request: Trying to access "
  			"nonexistent block-device %s (%Lu)
  ",
74d46992e   Christoph Hellwig   block: replace bi...
1952
  			bio_devname(bio, b), (long long)bio->bi_iter.bi_sector);
5a7bbad27   Christoph Hellwig   block: remove sup...
1953
1954
  		goto end_io;
  	}
c17bb4951   Akinobu Mita   [PATCH] fault-inj...
1955

03a07c92a   Goldwyn Rodrigues   block: return on ...
1956
1957
1958
1959
  	/*
  	 * For a REQ_NOWAIT based request, return -EOPNOTSUPP
  	 * if queue is not a request based queue.
  	 */
03a07c92a   Goldwyn Rodrigues   block: return on ...
1960
1961
  	if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_rq_based(q))
  		goto not_supported;
74d46992e   Christoph Hellwig   block: replace bi...
1962
  	if (should_fail_request(&bio->bi_disk->part0, bio->bi_iter.bi_size))
5a7bbad27   Christoph Hellwig   block: remove sup...
1963
  		goto end_io;
2056a782f   Jens Axboe   [PATCH] Block que...
1964

c3a8759ec   Christoph Hellwig   block: bio_check_...
1965
1966
  	if (bio->bi_partno) {
  		if (unlikely(blk_partition_remap(bio)))
b4fb3baf3   Ilya Dryomov   block: fail op_is...
1967
1968
  			goto end_io;
  	} else {
c3a8759ec   Christoph Hellwig   block: bio_check_...
1969
1970
1971
  		if (unlikely(bio_check_ro(bio, &bio->bi_disk->part0)))
  			goto end_io;
  		if (unlikely(bio_check_eod(bio, get_capacity(bio->bi_disk))))
b4fb3baf3   Ilya Dryomov   block: fail op_is...
1972
1973
  			goto end_io;
  	}
2056a782f   Jens Axboe   [PATCH] Block que...
1974

5a7bbad27   Christoph Hellwig   block: remove sup...
1975
1976
1977
1978
1979
  	/*
  	 * Filter flush bio's early so that make_request based
  	 * drivers without flush support don't have to worry
  	 * about them.
  	 */
f3a8ab7d5   Jens Axboe   block: cleanup re...
1980
  	if (op_is_flush(bio->bi_opf) &&
c888a8f95   Jens Axboe   block: kill off q...
1981
  	    !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
1eff9d322   Jens Axboe   block: rename bio...
1982
  		bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
5a7bbad27   Christoph Hellwig   block: remove sup...
1983
  		if (!nr_sectors) {
4e4cbee93   Christoph Hellwig   block: switch bio...
1984
  			status = BLK_STS_OK;
51fd77bd9   Jens Axboe   [BLOCK] Don't all...
1985
1986
  			goto end_io;
  		}
5a7bbad27   Christoph Hellwig   block: remove sup...
1987
  	}
5ddfe9691   NeilBrown   [PATCH] md: check...
1988

288dab8a3   Christoph Hellwig   block: add a sepa...
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
  	switch (bio_op(bio)) {
  	case REQ_OP_DISCARD:
  		if (!blk_queue_discard(q))
  			goto not_supported;
  		break;
  	case REQ_OP_SECURE_ERASE:
  		if (!blk_queue_secure_erase(q))
  			goto not_supported;
  		break;
  	case REQ_OP_WRITE_SAME:
74d46992e   Christoph Hellwig   block: replace bi...
1999
  		if (!q->limits.max_write_same_sectors)
288dab8a3   Christoph Hellwig   block: add a sepa...
2000
  			goto not_supported;
58886785d   Nicolai Stange   block: fix uninte...
2001
  		break;
2d253440b   Shaun Tancheff   block: Define zon...
2002
2003
  	case REQ_OP_ZONE_REPORT:
  	case REQ_OP_ZONE_RESET:
74d46992e   Christoph Hellwig   block: replace bi...
2004
  		if (!blk_queue_is_zoned(q))
2d253440b   Shaun Tancheff   block: Define zon...
2005
  			goto not_supported;
288dab8a3   Christoph Hellwig   block: add a sepa...
2006
  		break;
a6f0788ec   Chaitanya Kulkarni   block: add suppor...
2007
  	case REQ_OP_WRITE_ZEROES:
74d46992e   Christoph Hellwig   block: replace bi...
2008
  		if (!q->limits.max_write_zeroes_sectors)
a6f0788ec   Chaitanya Kulkarni   block: add suppor...
2009
2010
  			goto not_supported;
  		break;
288dab8a3   Christoph Hellwig   block: add a sepa...
2011
2012
  	default:
  		break;
5a7bbad27   Christoph Hellwig   block: remove sup...
2013
  	}
01edede41   Minchan Kim   block: trace bio ...
2014

7f4b35d15   Tejun Heo   block: allocate i...
2015
2016
2017
2018
2019
2020
2021
  	/*
  	 * Various block parts want %current->io_context and lazy ioc
  	 * allocation ends up trading a lot of pain for a small amount of
  	 * memory.  Just allocate it upfront.  This may fail and block
  	 * layer knows how to live with it.
  	 */
  	create_io_context(GFP_ATOMIC, q->node);
ae1188963   Tejun Heo   blkcg: consolidat...
2022
2023
  	if (!blkcg_bio_issue_check(q, bio))
  		return false;
27a84d54c   Christoph Hellwig   block: refactor g...
2024

fbbaf700e   NeilBrown   block: trace comp...
2025
2026
2027
2028
2029
2030
2031
  	if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
  		trace_block_bio_queue(q, bio);
  		/* Now that enqueuing has been traced, we need to trace
  		 * completion as well.
  		 */
  		bio_set_flag(bio, BIO_TRACE_COMPLETION);
  	}
27a84d54c   Christoph Hellwig   block: refactor g...
2032
  	return true;
a7384677b   Tejun Heo   block: remove dup...
2033

288dab8a3   Christoph Hellwig   block: add a sepa...
2034
  not_supported:
4e4cbee93   Christoph Hellwig   block: switch bio...
2035
  	status = BLK_STS_NOTSUPP;
a7384677b   Tejun Heo   block: remove dup...
2036
  end_io:
4e4cbee93   Christoph Hellwig   block: switch bio...
2037
  	bio->bi_status = status;
4246a0b63   Christoph Hellwig   block: add a bi_e...
2038
  	bio_endio(bio);
27a84d54c   Christoph Hellwig   block: refactor g...
2039
  	return false;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2040
  }
27a84d54c   Christoph Hellwig   block: refactor g...
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
  /**
   * generic_make_request - hand a buffer to its device driver for I/O
   * @bio:  The bio describing the location in memory and on the device.
   *
   * generic_make_request() is used to make I/O requests of block
   * devices. It is passed a &struct bio, which describes the I/O that needs
   * to be done.
   *
   * generic_make_request() does not return any status.  The
   * success/failure status of the request, along with notification of
   * completion, is delivered asynchronously through the bio->bi_end_io
   * function described (one day) else where.
   *
   * The caller of generic_make_request must make sure that bi_io_vec
   * are set to describe the memory buffer, and that bi_dev and bi_sector are
   * set to describe the device address, and the
   * bi_end_io and optionally bi_private are set to describe how
   * completion notification should be signaled.
   *
   * generic_make_request and the drivers it calls may use bi_next if this
   * bio happens to be merged with someone else, and may resubmit the bio to
   * a lower device by calling into generic_make_request recursively, which
   * means the bio should NOT be touched after the call to ->make_request_fn.
d89d87965   Neil Brown   When stacked bloc...
2064
   */
dece16353   Jens Axboe   block: change ->m...
2065
  blk_qc_t generic_make_request(struct bio *bio)
d89d87965   Neil Brown   When stacked bloc...
2066
  {
f5fe1b519   NeilBrown   blk: Ensure users...
2067
2068
2069
2070
2071
2072
2073
2074
  	/*
  	 * bio_list_on_stack[0] contains bios submitted by the current
  	 * make_request_fn.
  	 * bio_list_on_stack[1] contains bios that were submitted before
  	 * the current make_request_fn, but that haven't been processed
  	 * yet.
  	 */
  	struct bio_list bio_list_on_stack[2];
dece16353   Jens Axboe   block: change ->m...
2075
  	blk_qc_t ret = BLK_QC_T_NONE;
bddd87c7e   Akinobu Mita   blk-core: use BIO...
2076

27a84d54c   Christoph Hellwig   block: refactor g...
2077
  	if (!generic_make_request_checks(bio))
dece16353   Jens Axboe   block: change ->m...
2078
  		goto out;
27a84d54c   Christoph Hellwig   block: refactor g...
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
  
  	/*
  	 * We only want one ->make_request_fn to be active at a time, else
  	 * stack usage with stacked devices could be a problem.  So use
  	 * current->bio_list to keep a list of requests submited by a
  	 * make_request_fn function.  current->bio_list is also used as a
  	 * flag to say if generic_make_request is currently active in this
  	 * task or not.  If it is NULL, then no make_request is active.  If
  	 * it is non-NULL, then a make_request is active, and new requests
  	 * should be added at the tail
  	 */
bddd87c7e   Akinobu Mita   blk-core: use BIO...
2090
  	if (current->bio_list) {
f5fe1b519   NeilBrown   blk: Ensure users...
2091
  		bio_list_add(&current->bio_list[0], bio);
dece16353   Jens Axboe   block: change ->m...
2092
  		goto out;
d89d87965   Neil Brown   When stacked bloc...
2093
  	}
27a84d54c   Christoph Hellwig   block: refactor g...
2094

d89d87965   Neil Brown   When stacked bloc...
2095
2096
2097
2098
2099
  	/* following loop may be a bit non-obvious, and so deserves some
  	 * explanation.
  	 * Before entering the loop, bio->bi_next is NULL (as all callers
  	 * ensure that) so we have a list with a single bio.
  	 * We pretend that we have just taken it off a longer list, so
bddd87c7e   Akinobu Mita   blk-core: use BIO...
2100
2101
  	 * we assign bio_list to a pointer to the bio_list_on_stack,
  	 * thus initialising the bio_list of new bios to be
27a84d54c   Christoph Hellwig   block: refactor g...
2102
  	 * added.  ->make_request() may indeed add some more bios
d89d87965   Neil Brown   When stacked bloc...
2103
2104
2105
  	 * through a recursive call to generic_make_request.  If it
  	 * did, we find a non-NULL value in bio_list and re-enter the loop
  	 * from the top.  In this case we really did just take the bio
bddd87c7e   Akinobu Mita   blk-core: use BIO...
2106
  	 * of the top of the list (no pretending) and so remove it from
27a84d54c   Christoph Hellwig   block: refactor g...
2107
  	 * bio_list, and call into ->make_request() again.
d89d87965   Neil Brown   When stacked bloc...
2108
2109
  	 */
  	BUG_ON(bio->bi_next);
f5fe1b519   NeilBrown   blk: Ensure users...
2110
2111
  	bio_list_init(&bio_list_on_stack[0]);
  	current->bio_list = bio_list_on_stack;
d89d87965   Neil Brown   When stacked bloc...
2112
  	do {
74d46992e   Christoph Hellwig   block: replace bi...
2113
  		struct request_queue *q = bio->bi_disk->queue;
27a84d54c   Christoph Hellwig   block: refactor g...
2114

03a07c92a   Goldwyn Rodrigues   block: return on ...
2115
  		if (likely(blk_queue_enter(q, bio->bi_opf & REQ_NOWAIT) == 0)) {
79bd99596   NeilBrown   blk: improve orde...
2116
2117
2118
  			struct bio_list lower, same;
  
  			/* Create a fresh bio_list for all subordinate requests */
f5fe1b519   NeilBrown   blk: Ensure users...
2119
2120
  			bio_list_on_stack[1] = bio_list_on_stack[0];
  			bio_list_init(&bio_list_on_stack[0]);
dece16353   Jens Axboe   block: change ->m...
2121
  			ret = q->make_request_fn(q, bio);
3ef28e83a   Dan Williams   block: generic re...
2122
2123
  
  			blk_queue_exit(q);
27a84d54c   Christoph Hellwig   block: refactor g...
2124

79bd99596   NeilBrown   blk: improve orde...
2125
2126
2127
2128
2129
  			/* sort new bios into those for a lower level
  			 * and those for the same level
  			 */
  			bio_list_init(&lower);
  			bio_list_init(&same);
f5fe1b519   NeilBrown   blk: Ensure users...
2130
  			while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
74d46992e   Christoph Hellwig   block: replace bi...
2131
  				if (q == bio->bi_disk->queue)
79bd99596   NeilBrown   blk: improve orde...
2132
2133
2134
2135
  					bio_list_add(&same, bio);
  				else
  					bio_list_add(&lower, bio);
  			/* now assemble so we handle the lowest level first */
f5fe1b519   NeilBrown   blk: Ensure users...
2136
2137
2138
  			bio_list_merge(&bio_list_on_stack[0], &lower);
  			bio_list_merge(&bio_list_on_stack[0], &same);
  			bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
3ef28e83a   Dan Williams   block: generic re...
2139
  		} else {
03a07c92a   Goldwyn Rodrigues   block: return on ...
2140
2141
2142
2143
2144
  			if (unlikely(!blk_queue_dying(q) &&
  					(bio->bi_opf & REQ_NOWAIT)))
  				bio_wouldblock_error(bio);
  			else
  				bio_io_error(bio);
3ef28e83a   Dan Williams   block: generic re...
2145
  		}
f5fe1b519   NeilBrown   blk: Ensure users...
2146
  		bio = bio_list_pop(&bio_list_on_stack[0]);
d89d87965   Neil Brown   When stacked bloc...
2147
  	} while (bio);
bddd87c7e   Akinobu Mita   blk-core: use BIO...
2148
  	current->bio_list = NULL; /* deactivate */
dece16353   Jens Axboe   block: change ->m...
2149
2150
2151
  
  out:
  	return ret;
d89d87965   Neil Brown   When stacked bloc...
2152
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2153
2154
2155
  EXPORT_SYMBOL(generic_make_request);
  
  /**
56e5b6b0b   Christoph Hellwig   block: provide a ...
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
   * direct_make_request - hand a buffer directly to its device driver for I/O
   * @bio:  The bio describing the location in memory and on the device.
   *
   * This function behaves like generic_make_request(), but does not protect
   * against recursion.  Must only be used if the called driver is known
   * to not call generic_make_request (or direct_make_request) again from
   * its make_request function.  (Calling direct_make_request again from
   * a workqueue is perfectly fine as that doesn't recurse).
   */
  blk_qc_t direct_make_request(struct bio *bio)
  {
  	struct request_queue *q = bio->bi_disk->queue;
  	bool nowait = bio->bi_opf & REQ_NOWAIT;
  	blk_qc_t ret;
  
  	if (!generic_make_request_checks(bio))
  		return BLK_QC_T_NONE;
  
  	if (unlikely(blk_queue_enter(q, nowait))) {
  		if (nowait && !blk_queue_dying(q))
  			bio->bi_status = BLK_STS_AGAIN;
  		else
  			bio->bi_status = BLK_STS_IOERR;
  		bio_endio(bio);
  		return BLK_QC_T_NONE;
  	}
  
  	ret = q->make_request_fn(q, bio);
  	blk_queue_exit(q);
  	return ret;
  }
  EXPORT_SYMBOL_GPL(direct_make_request);
  
  /**
710027a48   Randy Dunlap   Add some block/ s...
2190
   * submit_bio - submit a bio to the block device layer for I/O
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2191
2192
2193
2194
   * @bio: The &struct bio which describes the I/O
   *
   * submit_bio() is very similar in purpose to generic_make_request(), and
   * uses that function to do most of the work. Both are fairly rough
710027a48   Randy Dunlap   Add some block/ s...
2195
   * interfaces; @bio must be presetup and ready for I/O.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2196
2197
   *
   */
4e49ea4a3   Mike Christie   block/fs/drivers:...
2198
  blk_qc_t submit_bio(struct bio *bio)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2199
  {
bf2de6f5a   Jens Axboe   block: Initial su...
2200
2201
2202
2203
  	/*
  	 * If it's a regular read/write or a barrier with data attached,
  	 * go through the normal accounting stuff before submission.
  	 */
e2a60da74   Martin K. Petersen   block: Clean up s...
2204
  	if (bio_has_data(bio)) {
4363ac7c1   Martin K. Petersen   block: Implement ...
2205
  		unsigned int count;
95fe6c1a2   Mike Christie   block, fs, mm, dr...
2206
  		if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
17644a0bb   Jiufei Xue   block: fix the co...
2207
  			count = queue_logical_block_size(bio->bi_disk->queue) >> 9;
4363ac7c1   Martin K. Petersen   block: Implement ...
2208
2209
  		else
  			count = bio_sectors(bio);
a8ebb056a   Mike Christie   block, drivers, c...
2210
  		if (op_is_write(bio_op(bio))) {
bf2de6f5a   Jens Axboe   block: Initial su...
2211
2212
  			count_vm_events(PGPGOUT, count);
  		} else {
4f024f379   Kent Overstreet   block: Abstract o...
2213
  			task_io_account_read(bio->bi_iter.bi_size);
bf2de6f5a   Jens Axboe   block: Initial su...
2214
2215
2216
2217
2218
  			count_vm_events(PGPGIN, count);
  		}
  
  		if (unlikely(block_dump)) {
  			char b[BDEVNAME_SIZE];
8dcbdc742   San Mehat   block: block_dump...
2219
2220
  			printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)
  ",
ba25f9dcc   Pavel Emelyanov   Use helpers to ob...
2221
  			current->comm, task_pid_nr(current),
a8ebb056a   Mike Christie   block, drivers, c...
2222
  				op_is_write(bio_op(bio)) ? "WRITE" : "READ",
4f024f379   Kent Overstreet   block: Abstract o...
2223
  				(unsigned long long)bio->bi_iter.bi_sector,
74d46992e   Christoph Hellwig   block: replace bi...
2224
  				bio_devname(bio, b), count);
bf2de6f5a   Jens Axboe   block: Initial su...
2225
  		}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2226
  	}
dece16353   Jens Axboe   block: change ->m...
2227
  	return generic_make_request(bio);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2228
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2229
  EXPORT_SYMBOL(submit_bio);
45cde2f81   Christoph Hellwig   block: add a poll...
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
  bool blk_poll(struct request_queue *q, blk_qc_t cookie)
  {
  	if (!q->poll_fn || !blk_qc_t_valid(cookie))
  		return false;
  
  	if (current->plug)
  		blk_flush_plug_list(current->plug, false);
  	return q->poll_fn(q, cookie);
  }
  EXPORT_SYMBOL_GPL(blk_poll);
3bcddeac1   Kiyoshi Ueda   blk_end_request: ...
2240
  /**
bf4e6b4e7   Hannes Reinecke   block: Always che...
2241
2242
   * blk_cloned_rq_check_limits - Helper function to check a cloned request
   *                              for new the queue limits
82124d603   Kiyoshi Ueda   block: add reques...
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
   * @q:  the queue
   * @rq: the request being checked
   *
   * Description:
   *    @rq may have been made based on weaker limitations of upper-level queues
   *    in request stacking drivers, and it may violate the limitation of @q.
   *    Since the block layer and the underlying device driver trust @rq
   *    after it is inserted to @q, it should be checked against @q before
   *    the insertion using this generic function.
   *
82124d603   Kiyoshi Ueda   block: add reques...
2253
   *    Request stacking drivers like request-based dm may change the queue
bf4e6b4e7   Hannes Reinecke   block: Always che...
2254
2255
   *    limits when retrying requests on other queues. Those requests need
   *    to be checked against the new queue limits again during dispatch.
82124d603   Kiyoshi Ueda   block: add reques...
2256
   */
bf4e6b4e7   Hannes Reinecke   block: Always che...
2257
2258
  static int blk_cloned_rq_check_limits(struct request_queue *q,
  				      struct request *rq)
82124d603   Kiyoshi Ueda   block: add reques...
2259
  {
8fe0d473f   Mike Christie   block: convert me...
2260
  	if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, req_op(rq))) {
82124d603   Kiyoshi Ueda   block: add reques...
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
  		printk(KERN_ERR "%s: over max size limit.
  ", __func__);
  		return -EIO;
  	}
  
  	/*
  	 * queue's settings related to segment counting like q->bounce_pfn
  	 * may differ from that of other stacking queues.
  	 * Recalculate it to check the request correctly on this queue's
  	 * limitation.
  	 */
  	blk_recalc_rq_segments(rq);
8a78362c4   Martin K. Petersen   block: Consolidat...
2273
  	if (rq->nr_phys_segments > queue_max_segments(q)) {
82124d603   Kiyoshi Ueda   block: add reques...
2274
2275
2276
2277
2278
2279
2280
  		printk(KERN_ERR "%s: over max segments limit.
  ", __func__);
  		return -EIO;
  	}
  
  	return 0;
  }
82124d603   Kiyoshi Ueda   block: add reques...
2281
2282
2283
2284
2285
2286
  
  /**
   * blk_insert_cloned_request - Helper for stacking drivers to submit a request
   * @q:  the queue to submit the request
   * @rq: the request being queued
   */
2a842acab   Christoph Hellwig   block: introduce ...
2287
  blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq)
82124d603   Kiyoshi Ueda   block: add reques...
2288
2289
  {
  	unsigned long flags;
4853abaae   Jeff Moyer   block: fix flush ...
2290
  	int where = ELEVATOR_INSERT_BACK;
82124d603   Kiyoshi Ueda   block: add reques...
2291

bf4e6b4e7   Hannes Reinecke   block: Always che...
2292
  	if (blk_cloned_rq_check_limits(q, rq))
2a842acab   Christoph Hellwig   block: introduce ...
2293
  		return BLK_STS_IOERR;
82124d603   Kiyoshi Ueda   block: add reques...
2294

b2c9cd379   Akinobu Mita   fail_make_request...
2295
2296
  	if (rq->rq_disk &&
  	    should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq)))
2a842acab   Christoph Hellwig   block: introduce ...
2297
  		return BLK_STS_IOERR;
82124d603   Kiyoshi Ueda   block: add reques...
2298

7fb4898e0   Keith Busch   block: add blk-mq...
2299
2300
2301
  	if (q->mq_ops) {
  		if (blk_queue_io_stat(q))
  			blk_account_io_start(rq, true);
157f377be   Jens Axboe   block: directly i...
2302
2303
2304
2305
2306
2307
  		/*
  		 * Since we have a scheduler attached on the top device,
  		 * bypass a potential scheduler on the bottom device for
  		 * insert.
  		 */
  		blk_mq_request_bypass_insert(rq);
2a842acab   Christoph Hellwig   block: introduce ...
2308
  		return BLK_STS_OK;
7fb4898e0   Keith Busch   block: add blk-mq...
2309
  	}
82124d603   Kiyoshi Ueda   block: add reques...
2310
  	spin_lock_irqsave(q->queue_lock, flags);
3f3299d5c   Bart Van Assche   block: Rename que...
2311
  	if (unlikely(blk_queue_dying(q))) {
8ba61435d   Tejun Heo   block: add missin...
2312
  		spin_unlock_irqrestore(q->queue_lock, flags);
2a842acab   Christoph Hellwig   block: introduce ...
2313
  		return BLK_STS_IOERR;
8ba61435d   Tejun Heo   block: add missin...
2314
  	}
82124d603   Kiyoshi Ueda   block: add reques...
2315
2316
2317
2318
2319
2320
  
  	/*
  	 * Submitting request must be dequeued before calling this function
  	 * because it will be linked to another request_queue
  	 */
  	BUG_ON(blk_queued_rq(rq));
f73f44eb0   Christoph Hellwig   block: add a op_i...
2321
  	if (op_is_flush(rq->cmd_flags))
4853abaae   Jeff Moyer   block: fix flush ...
2322
2323
2324
  		where = ELEVATOR_INSERT_FLUSH;
  
  	add_acct_request(q, rq, where);
e67b77c79   Jeff Moyer   blk-flush: move t...
2325
2326
  	if (where == ELEVATOR_INSERT_FLUSH)
  		__blk_run_queue(q);
82124d603   Kiyoshi Ueda   block: add reques...
2327
  	spin_unlock_irqrestore(q->queue_lock, flags);
2a842acab   Christoph Hellwig   block: introduce ...
2328
  	return BLK_STS_OK;
82124d603   Kiyoshi Ueda   block: add reques...
2329
2330
  }
  EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
80a761fd3   Tejun Heo   block: implement ...
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
  /**
   * blk_rq_err_bytes - determine number of bytes till the next failure boundary
   * @rq: request to examine
   *
   * Description:
   *     A request could be merge of IOs which require different failure
   *     handling.  This function determines the number of bytes which
   *     can be failed from the beginning of the request without
   *     crossing into area which need to be retried further.
   *
   * Return:
   *     The number of bytes to fail.
80a761fd3   Tejun Heo   block: implement ...
2343
2344
2345
2346
2347
2348
   */
  unsigned int blk_rq_err_bytes(const struct request *rq)
  {
  	unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
  	unsigned int bytes = 0;
  	struct bio *bio;
e80640213   Christoph Hellwig   block: split out ...
2349
  	if (!(rq->rq_flags & RQF_MIXED_MERGE))
80a761fd3   Tejun Heo   block: implement ...
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
  		return blk_rq_bytes(rq);
  
  	/*
  	 * Currently the only 'mixing' which can happen is between
  	 * different fastfail types.  We can safely fail portions
  	 * which have all the failfast bits that the first one has -
  	 * the ones which are at least as eager to fail as the first
  	 * one.
  	 */
  	for (bio = rq->bio; bio; bio = bio->bi_next) {
1eff9d322   Jens Axboe   block: rename bio...
2360
  		if ((bio->bi_opf & ff) != ff)
80a761fd3   Tejun Heo   block: implement ...
2361
  			break;
4f024f379   Kent Overstreet   block: Abstract o...
2362
  		bytes += bio->bi_iter.bi_size;
80a761fd3   Tejun Heo   block: implement ...
2363
2364
2365
2366
2367
2368
2369
  	}
  
  	/* this could lead to infinite loop */
  	BUG_ON(blk_rq_bytes(rq) && !bytes);
  	return bytes;
  }
  EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
320ae51fe   Jens Axboe   blk-mq: new multi...
2370
  void blk_account_io_completion(struct request *req, unsigned int bytes)
bc58ba946   Jens Axboe   block: add sysfs ...
2371
  {
c2553b584   Jens Axboe   block: make blk_d...
2372
  	if (blk_do_io_stat(req)) {
bc58ba946   Jens Axboe   block: add sysfs ...
2373
2374
2375
2376
2377
  		const int rw = rq_data_dir(req);
  		struct hd_struct *part;
  		int cpu;
  
  		cpu = part_stat_lock();
09e099d4b   Jerome Marchand   block: fix accoun...
2378
  		part = req->part;
bc58ba946   Jens Axboe   block: add sysfs ...
2379
2380
2381
2382
  		part_stat_add(cpu, part, sectors[rw], bytes >> 9);
  		part_stat_unlock();
  	}
  }
320ae51fe   Jens Axboe   blk-mq: new multi...
2383
  void blk_account_io_done(struct request *req)
bc58ba946   Jens Axboe   block: add sysfs ...
2384
  {
bc58ba946   Jens Axboe   block: add sysfs ...
2385
  	/*
dd4c133f3   Tejun Heo   block: rename bar...
2386
2387
2388
  	 * Account IO completion.  flush_rq isn't accounted as a
  	 * normal IO on queueing nor completion.  Accounting the
  	 * containing request is enough.
bc58ba946   Jens Axboe   block: add sysfs ...
2389
  	 */
e80640213   Christoph Hellwig   block: split out ...
2390
  	if (blk_do_io_stat(req) && !(req->rq_flags & RQF_FLUSH_SEQ)) {
bc58ba946   Jens Axboe   block: add sysfs ...
2391
2392
2393
2394
2395
2396
  		unsigned long duration = jiffies - req->start_time;
  		const int rw = rq_data_dir(req);
  		struct hd_struct *part;
  		int cpu;
  
  		cpu = part_stat_lock();
09e099d4b   Jerome Marchand   block: fix accoun...
2397
  		part = req->part;
bc58ba946   Jens Axboe   block: add sysfs ...
2398
2399
2400
  
  		part_stat_inc(cpu, part, ios[rw]);
  		part_stat_add(cpu, part, ticks[rw], duration);
d62e26b3f   Jens Axboe   block: pass in qu...
2401
2402
  		part_round_stats(req->q, cpu, part);
  		part_dec_in_flight(req->q, part, rw);
bc58ba946   Jens Axboe   block: add sysfs ...
2403

6c23a9681   Jens Axboe   block: add intern...
2404
  		hd_struct_put(part);
bc58ba946   Jens Axboe   block: add sysfs ...
2405
2406
2407
  		part_stat_unlock();
  	}
  }
47fafbc70   Rafael J. Wysocki   block / PM: Repla...
2408
  #ifdef CONFIG_PM
c8158819d   Lin Ming   block: implement ...
2409
2410
2411
2412
2413
2414
2415
2416
  /*
   * Don't process normal requests when queue is suspended
   * or in the process of suspending/resuming
   */
  static struct request *blk_pm_peek_request(struct request_queue *q,
  					   struct request *rq)
  {
  	if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
e80640213   Christoph Hellwig   block: split out ...
2417
  	    (q->rpm_status != RPM_ACTIVE && !(rq->rq_flags & RQF_PM))))
c8158819d   Lin Ming   block: implement ...
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
  		return NULL;
  	else
  		return rq;
  }
  #else
  static inline struct request *blk_pm_peek_request(struct request_queue *q,
  						  struct request *rq)
  {
  	return rq;
  }
  #endif
320ae51fe   Jens Axboe   blk-mq: new multi...
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
  void blk_account_io_start(struct request *rq, bool new_io)
  {
  	struct hd_struct *part;
  	int rw = rq_data_dir(rq);
  	int cpu;
  
  	if (!blk_do_io_stat(rq))
  		return;
  
  	cpu = part_stat_lock();
  
  	if (!new_io) {
  		part = rq->part;
  		part_stat_inc(cpu, part, merges[rw]);
  	} else {
  		part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
  		if (!hd_struct_try_get(part)) {
  			/*
  			 * The partition is already being removed,
  			 * the request will be accounted on the disk only
  			 *
  			 * We take a reference on disk->part0 although that
  			 * partition will never be deleted, so we can treat
  			 * it as any other partition.
  			 */
  			part = &rq->rq_disk->part0;
  			hd_struct_get(part);
  		}
d62e26b3f   Jens Axboe   block: pass in qu...
2457
2458
  		part_round_stats(rq->q, cpu, part);
  		part_inc_in_flight(rq->q, part, rw);
320ae51fe   Jens Axboe   blk-mq: new multi...
2459
2460
2461
2462
2463
  		rq->part = part;
  	}
  
  	part_stat_unlock();
  }
53a08807c   Tejun Heo   block: internal d...
2464
  /**
9934c8c04   Tejun Heo   block: implement ...
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
   * blk_peek_request - peek at the top of a request queue
   * @q: request queue to peek at
   *
   * Description:
   *     Return the request at the top of @q.  The returned request
   *     should be started using blk_start_request() before LLD starts
   *     processing it.
   *
   * Return:
   *     Pointer to the request at the top of @q if available.  Null
   *     otherwise.
9934c8c04   Tejun Heo   block: implement ...
2476
2477
   */
  struct request *blk_peek_request(struct request_queue *q)
158dbda00   Tejun Heo   block: reorganize...
2478
2479
2480
  {
  	struct request *rq;
  	int ret;
2fff8a924   Bart Van Assche   block: Check lock...
2481
  	lockdep_assert_held(q->queue_lock);
332ebbf7f   Bart Van Assche   block: Document w...
2482
  	WARN_ON_ONCE(q->mq_ops);
2fff8a924   Bart Van Assche   block: Check lock...
2483

158dbda00   Tejun Heo   block: reorganize...
2484
  	while ((rq = __elv_next_request(q)) != NULL) {
c8158819d   Lin Ming   block: implement ...
2485
2486
2487
2488
  
  		rq = blk_pm_peek_request(q, rq);
  		if (!rq)
  			break;
e80640213   Christoph Hellwig   block: split out ...
2489
  		if (!(rq->rq_flags & RQF_STARTED)) {
158dbda00   Tejun Heo   block: reorganize...
2490
2491
2492
2493
2494
  			/*
  			 * This is the first time the device driver
  			 * sees this request (possibly after
  			 * requeueing).  Notify IO scheduler.
  			 */
e80640213   Christoph Hellwig   block: split out ...
2495
  			if (rq->rq_flags & RQF_SORTED)
158dbda00   Tejun Heo   block: reorganize...
2496
2497
2498
2499
2500
2501
2502
  				elv_activate_rq(q, rq);
  
  			/*
  			 * just mark as started even if we don't start
  			 * it, a request that has been delayed should
  			 * not be passed by new incoming requests
  			 */
e80640213   Christoph Hellwig   block: split out ...
2503
  			rq->rq_flags |= RQF_STARTED;
158dbda00   Tejun Heo   block: reorganize...
2504
2505
2506
2507
2508
2509
2510
  			trace_block_rq_issue(q, rq);
  		}
  
  		if (!q->boundary_rq || q->boundary_rq == rq) {
  			q->end_sector = rq_end_sector(rq);
  			q->boundary_rq = NULL;
  		}
e80640213   Christoph Hellwig   block: split out ...
2511
  		if (rq->rq_flags & RQF_DONTPREP)
158dbda00   Tejun Heo   block: reorganize...
2512
  			break;
2e46e8b27   Tejun Heo   block: drop reque...
2513
  		if (q->dma_drain_size && blk_rq_bytes(rq)) {
158dbda00   Tejun Heo   block: reorganize...
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
  			/*
  			 * make sure space for the drain appears we
  			 * know we can do this because max_hw_segments
  			 * has been adjusted to be one fewer than the
  			 * device can handle
  			 */
  			rq->nr_phys_segments++;
  		}
  
  		if (!q->prep_rq_fn)
  			break;
  
  		ret = q->prep_rq_fn(q, rq);
  		if (ret == BLKPREP_OK) {
  			break;
  		} else if (ret == BLKPREP_DEFER) {
  			/*
  			 * the request may have been (partially) prepped.
  			 * we need to keep this request in the front to
e80640213   Christoph Hellwig   block: split out ...
2533
  			 * avoid resource deadlock.  RQF_STARTED will
158dbda00   Tejun Heo   block: reorganize...
2534
2535
  			 * prevent other fs requests from passing this one.
  			 */
2e46e8b27   Tejun Heo   block: drop reque...
2536
  			if (q->dma_drain_size && blk_rq_bytes(rq) &&
e80640213   Christoph Hellwig   block: split out ...
2537
  			    !(rq->rq_flags & RQF_DONTPREP)) {
158dbda00   Tejun Heo   block: reorganize...
2538
2539
2540
2541
2542
2543
2544
2545
2546
  				/*
  				 * remove the space for the drain we added
  				 * so that we don't add it again
  				 */
  				--rq->nr_phys_segments;
  			}
  
  			rq = NULL;
  			break;
0fb5b1fb3   Martin K. Petersen   block/sd: Return ...
2547
  		} else if (ret == BLKPREP_KILL || ret == BLKPREP_INVALID) {
e80640213   Christoph Hellwig   block: split out ...
2548
  			rq->rq_flags |= RQF_QUIET;
c143dc903   James Bottomley   block: fix an oop...
2549
2550
2551
2552
2553
  			/*
  			 * Mark this request as started so we don't trigger
  			 * any debug logic in the end I/O path.
  			 */
  			blk_start_request(rq);
2a842acab   Christoph Hellwig   block: introduce ...
2554
2555
  			__blk_end_request_all(rq, ret == BLKPREP_INVALID ?
  					BLK_STS_TARGET : BLK_STS_IOERR);
158dbda00   Tejun Heo   block: reorganize...
2556
2557
2558
2559
2560
2561
2562
2563
2564
  		} else {
  			printk(KERN_ERR "%s: bad return=%d
  ", __func__, ret);
  			break;
  		}
  	}
  
  	return rq;
  }
9934c8c04   Tejun Heo   block: implement ...
2565
  EXPORT_SYMBOL(blk_peek_request);
158dbda00   Tejun Heo   block: reorganize...
2566

5034435c8   Damien Le Moal   block: Make blk_d...
2567
  static void blk_dequeue_request(struct request *rq)
158dbda00   Tejun Heo   block: reorganize...
2568
  {
9934c8c04   Tejun Heo   block: implement ...
2569
  	struct request_queue *q = rq->q;
158dbda00   Tejun Heo   block: reorganize...
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
  	BUG_ON(list_empty(&rq->queuelist));
  	BUG_ON(ELV_ON_HASH(rq));
  
  	list_del_init(&rq->queuelist);
  
  	/*
  	 * the time frame between a request being removed from the lists
  	 * and to it is freed is accounted as io that is in progress at
  	 * the driver side.
  	 */
9195291e5   Divyesh Shah   blkio: Increment ...
2580
  	if (blk_account_rq(rq)) {
0a7ae2ff0   Jens Axboe   block: change the...
2581
  		q->in_flight[rq_is_sync(rq)]++;
9195291e5   Divyesh Shah   blkio: Increment ...
2582
2583
  		set_io_start_time_ns(rq);
  	}
158dbda00   Tejun Heo   block: reorganize...
2584
  }
5efccd17c   Tejun Heo   block: reorder re...
2585
  /**
9934c8c04   Tejun Heo   block: implement ...
2586
2587
2588
2589
2590
2591
   * blk_start_request - start request processing on the driver
   * @req: request to dequeue
   *
   * Description:
   *     Dequeue @req and start timeout timer on it.  This hands off the
   *     request to the driver.
9934c8c04   Tejun Heo   block: implement ...
2592
2593
2594
   */
  void blk_start_request(struct request *req)
  {
2fff8a924   Bart Van Assche   block: Check lock...
2595
  	lockdep_assert_held(req->q->queue_lock);
332ebbf7f   Bart Van Assche   block: Document w...
2596
  	WARN_ON_ONCE(req->q->mq_ops);
2fff8a924   Bart Van Assche   block: Check lock...
2597

9934c8c04   Tejun Heo   block: implement ...
2598
  	blk_dequeue_request(req);
cf43e6be8   Jens Axboe   block: add scalab...
2599
  	if (test_bit(QUEUE_FLAG_STATS, &req->q->queue_flags)) {
88eeca495   Shaohua Li   block: track requ...
2600
  		blk_stat_set_issue(&req->issue_stat, blk_rq_sectors(req));
cf43e6be8   Jens Axboe   block: add scalab...
2601
  		req->rq_flags |= RQF_STATS;
87760e5ee   Jens Axboe   block: hook up wr...
2602
  		wbt_issue(req->q->rq_wb, &req->issue_stat);
cf43e6be8   Jens Axboe   block: add scalab...
2603
  	}
4912aa6c1   Jeff Moyer   block: fix race b...
2604
  	BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags));
9934c8c04   Tejun Heo   block: implement ...
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
  	blk_add_timer(req);
  }
  EXPORT_SYMBOL(blk_start_request);
  
  /**
   * blk_fetch_request - fetch a request from a request queue
   * @q: request queue to fetch a request from
   *
   * Description:
   *     Return the request at the top of @q.  The request is started on
   *     return and LLD can start processing it immediately.
   *
   * Return:
   *     Pointer to the request at the top of @q if available.  Null
   *     otherwise.
9934c8c04   Tejun Heo   block: implement ...
2620
2621
2622
2623
   */
  struct request *blk_fetch_request(struct request_queue *q)
  {
  	struct request *rq;
2fff8a924   Bart Van Assche   block: Check lock...
2624
  	lockdep_assert_held(q->queue_lock);
332ebbf7f   Bart Van Assche   block: Document w...
2625
  	WARN_ON_ONCE(q->mq_ops);
2fff8a924   Bart Van Assche   block: Check lock...
2626

9934c8c04   Tejun Heo   block: implement ...
2627
2628
2629
2630
2631
2632
  	rq = blk_peek_request(q);
  	if (rq)
  		blk_start_request(rq);
  	return rq;
  }
  EXPORT_SYMBOL(blk_fetch_request);
091b43061   Christoph Hellwig   block: add a blk_...
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
  /*
   * Steal bios from a request and add them to a bio list.
   * The request must not have been partially completed before.
   */
  void blk_steal_bios(struct bio_list *list, struct request *rq)
  {
  	if (rq->bio) {
  		if (list->tail)
  			list->tail->bi_next = rq->bio;
  		else
  			list->head = rq->bio;
  		list->tail = rq->biotail;
  
  		rq->bio = NULL;
  		rq->biotail = NULL;
  	}
  
  	rq->__data_len = 0;
  }
  EXPORT_SYMBOL_GPL(blk_steal_bios);
9934c8c04   Tejun Heo   block: implement ...
2653
  /**
2e60e0229   Tejun Heo   block: clean up r...
2654
   * blk_update_request - Special helper function for request stacking drivers
8ebf97560   Randy Dunlap   block: fix kernel...
2655
   * @req:      the request being processed
2a842acab   Christoph Hellwig   block: introduce ...
2656
   * @error:    block status code
8ebf97560   Randy Dunlap   block: fix kernel...
2657
   * @nr_bytes: number of bytes to complete @req
3bcddeac1   Kiyoshi Ueda   blk_end_request: ...
2658
2659
   *
   * Description:
8ebf97560   Randy Dunlap   block: fix kernel...
2660
2661
2662
   *     Ends I/O on a number of bytes attached to @req, but doesn't complete
   *     the request structure even if @req doesn't have leftover.
   *     If @req has leftover, sets it up for the next range of segments.
2e60e0229   Tejun Heo   block: clean up r...
2663
2664
2665
2666
2667
2668
2669
   *
   *     This special helper function is only for request stacking drivers
   *     (e.g. request-based dm) so that they can handle partial completion.
   *     Actual device drivers should use blk_end_request instead.
   *
   *     Passing the result of blk_rq_bytes() as @nr_bytes guarantees
   *     %false return from this function.
3bcddeac1   Kiyoshi Ueda   blk_end_request: ...
2670
2671
   *
   * Return:
2e60e0229   Tejun Heo   block: clean up r...
2672
2673
   *     %false - this request doesn't have any more data
   *     %true  - this request has more data
3bcddeac1   Kiyoshi Ueda   blk_end_request: ...
2674
   **/
2a842acab   Christoph Hellwig   block: introduce ...
2675
2676
  bool blk_update_request(struct request *req, blk_status_t error,
  		unsigned int nr_bytes)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2677
  {
f79ea4161   Kent Overstreet   block: Refactor b...
2678
  	int total_bytes;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2679

2a842acab   Christoph Hellwig   block: introduce ...
2680
  	trace_block_rq_complete(req, blk_status_to_errno(error), nr_bytes);
4a0efdc93   Hannes Reinecke   block: misplaced ...
2681

2e60e0229   Tejun Heo   block: clean up r...
2682
2683
  	if (!req->bio)
  		return false;
2a842acab   Christoph Hellwig   block: introduce ...
2684
2685
2686
  	if (unlikely(error && !blk_rq_is_passthrough(req) &&
  		     !(req->rq_flags & RQF_QUIET)))
  		print_req_error(req, error);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2687

bc58ba946   Jens Axboe   block: add sysfs ...
2688
  	blk_account_io_completion(req, nr_bytes);
d72d904a5   Jens Axboe   [BLOCK] Update re...
2689

f79ea4161   Kent Overstreet   block: Refactor b...
2690
2691
2692
  	total_bytes = 0;
  	while (req->bio) {
  		struct bio *bio = req->bio;
4f024f379   Kent Overstreet   block: Abstract o...
2693
  		unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2694

4f024f379   Kent Overstreet   block: Abstract o...
2695
  		if (bio_bytes == bio->bi_iter.bi_size)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2696
  			req->bio = bio->bi_next;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2697

fbbaf700e   NeilBrown   block: trace comp...
2698
2699
  		/* Completion has already been traced */
  		bio_clear_flag(bio, BIO_TRACE_COMPLETION);
f79ea4161   Kent Overstreet   block: Refactor b...
2700
  		req_bio_endio(req, bio, bio_bytes, error);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2701

f79ea4161   Kent Overstreet   block: Refactor b...
2702
2703
  		total_bytes += bio_bytes;
  		nr_bytes -= bio_bytes;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2704

f79ea4161   Kent Overstreet   block: Refactor b...
2705
2706
  		if (!nr_bytes)
  			break;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2707
2708
2709
2710
2711
  	}
  
  	/*
  	 * completely done
  	 */
2e60e0229   Tejun Heo   block: clean up r...
2712
2713
2714
2715
2716
2717
  	if (!req->bio) {
  		/*
  		 * Reset counters so that the request stacking driver
  		 * can find how many bytes remain in the request
  		 * later.
  		 */
a2dec7b36   Tejun Heo   block: hide reque...
2718
  		req->__data_len = 0;
2e60e0229   Tejun Heo   block: clean up r...
2719
2720
  		return false;
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2721

a2dec7b36   Tejun Heo   block: hide reque...
2722
  	req->__data_len -= total_bytes;
2e46e8b27   Tejun Heo   block: drop reque...
2723
2724
  
  	/* update sector only for requests with clear definition of sector */
57292b58d   Christoph Hellwig   block: introduce ...
2725
  	if (!blk_rq_is_passthrough(req))
a2dec7b36   Tejun Heo   block: hide reque...
2726
  		req->__sector += total_bytes >> 9;
2e46e8b27   Tejun Heo   block: drop reque...
2727

80a761fd3   Tejun Heo   block: implement ...
2728
  	/* mixed attributes always follow the first bio */
e80640213   Christoph Hellwig   block: split out ...
2729
  	if (req->rq_flags & RQF_MIXED_MERGE) {
80a761fd3   Tejun Heo   block: implement ...
2730
  		req->cmd_flags &= ~REQ_FAILFAST_MASK;
1eff9d322   Jens Axboe   block: rename bio...
2731
  		req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
80a761fd3   Tejun Heo   block: implement ...
2732
  	}
ed6565e73   Christoph Hellwig   block: handle par...
2733
2734
2735
2736
2737
2738
2739
2740
2741
  	if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) {
  		/*
  		 * If total number of sectors is less than the first segment
  		 * size, something has gone terribly wrong.
  		 */
  		if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
  			blk_dump_rq_flags(req, "request botched");
  			req->__data_len = blk_rq_cur_bytes(req);
  		}
2e46e8b27   Tejun Heo   block: drop reque...
2742

ed6565e73   Christoph Hellwig   block: handle par...
2743
2744
2745
  		/* recalculate the number of segments */
  		blk_recalc_rq_segments(req);
  	}
2e46e8b27   Tejun Heo   block: drop reque...
2746

2e60e0229   Tejun Heo   block: clean up r...
2747
  	return true;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2748
  }
2e60e0229   Tejun Heo   block: clean up r...
2749
  EXPORT_SYMBOL_GPL(blk_update_request);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2750

2a842acab   Christoph Hellwig   block: introduce ...
2751
  static bool blk_update_bidi_request(struct request *rq, blk_status_t error,
2e60e0229   Tejun Heo   block: clean up r...
2752
2753
  				    unsigned int nr_bytes,
  				    unsigned int bidi_bytes)
5efccd17c   Tejun Heo   block: reorder re...
2754
  {
2e60e0229   Tejun Heo   block: clean up r...
2755
2756
  	if (blk_update_request(rq, error, nr_bytes))
  		return true;
5efccd17c   Tejun Heo   block: reorder re...
2757

2e60e0229   Tejun Heo   block: clean up r...
2758
2759
2760
2761
  	/* Bidi request must be completed as a whole */
  	if (unlikely(blk_bidi_rq(rq)) &&
  	    blk_update_request(rq->next_rq, error, bidi_bytes))
  		return true;
5efccd17c   Tejun Heo   block: reorder re...
2762

e2e1a148b   Jens Axboe   block: add sysfs ...
2763
2764
  	if (blk_queue_add_random(rq->q))
  		add_disk_randomness(rq->rq_disk);
2e60e0229   Tejun Heo   block: clean up r...
2765
2766
  
  	return false;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2767
  }
28018c242   James Bottomley   block: implement ...
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
  /**
   * blk_unprep_request - unprepare a request
   * @req:	the request
   *
   * This function makes a request ready for complete resubmission (or
   * completion).  It happens only after all error handling is complete,
   * so represents the appropriate moment to deallocate any resources
   * that were allocated to the request in the prep_rq_fn.  The queue
   * lock is held when calling this.
   */
  void blk_unprep_request(struct request *req)
  {
  	struct request_queue *q = req->q;
e80640213   Christoph Hellwig   block: split out ...
2781
  	req->rq_flags &= ~RQF_DONTPREP;
28018c242   James Bottomley   block: implement ...
2782
2783
2784
2785
  	if (q->unprep_rq_fn)
  		q->unprep_rq_fn(q, req);
  }
  EXPORT_SYMBOL_GPL(blk_unprep_request);
2a842acab   Christoph Hellwig   block: introduce ...
2786
  void blk_finish_request(struct request *req, blk_status_t error)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2787
  {
cf43e6be8   Jens Axboe   block: add scalab...
2788
  	struct request_queue *q = req->q;
2fff8a924   Bart Van Assche   block: Check lock...
2789
  	lockdep_assert_held(req->q->queue_lock);
332ebbf7f   Bart Van Assche   block: Document w...
2790
  	WARN_ON_ONCE(q->mq_ops);
2fff8a924   Bart Van Assche   block: Check lock...
2791

cf43e6be8   Jens Axboe   block: add scalab...
2792
  	if (req->rq_flags & RQF_STATS)
34dbad5d2   Omar Sandoval   blk-stat: convert...
2793
  		blk_stat_add(req);
cf43e6be8   Jens Axboe   block: add scalab...
2794

e80640213   Christoph Hellwig   block: split out ...
2795
  	if (req->rq_flags & RQF_QUEUED)
cf43e6be8   Jens Axboe   block: add scalab...
2796
  		blk_queue_end_tag(q, req);
b8286239d   Kiyoshi Ueda   blk_end_request: ...
2797

ba396a6c1   James Bottomley   block: fix oops w...
2798
  	BUG_ON(blk_queued_rq(req));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2799

57292b58d   Christoph Hellwig   block: introduce ...
2800
  	if (unlikely(laptop_mode) && !blk_rq_is_passthrough(req))
dc3b17cc8   Jan Kara   block: Use pointe...
2801
  		laptop_io_completion(req->q->backing_dev_info);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2802

e78042e5b   Mike Anderson   blk: move blk_del...
2803
  	blk_delete_timer(req);
e80640213   Christoph Hellwig   block: split out ...
2804
  	if (req->rq_flags & RQF_DONTPREP)
28018c242   James Bottomley   block: implement ...
2805
  		blk_unprep_request(req);
bc58ba946   Jens Axboe   block: add sysfs ...
2806
  	blk_account_io_done(req);
b8286239d   Kiyoshi Ueda   blk_end_request: ...
2807

87760e5ee   Jens Axboe   block: hook up wr...
2808
2809
  	if (req->end_io) {
  		wbt_done(req->q->rq_wb, &req->issue_stat);
8ffdc6550   Tejun Heo   [BLOCK] add @upto...
2810
  		req->end_io(req, error);
87760e5ee   Jens Axboe   block: hook up wr...
2811
  	} else {
b8286239d   Kiyoshi Ueda   blk_end_request: ...
2812
2813
  		if (blk_bidi_rq(req))
  			__blk_put_request(req->next_rq->q, req->next_rq);
cf43e6be8   Jens Axboe   block: add scalab...
2814
  		__blk_put_request(q, req);
b8286239d   Kiyoshi Ueda   blk_end_request: ...
2815
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2816
  }
12120077b   Christoph Hellwig   block: export blk...
2817
  EXPORT_SYMBOL(blk_finish_request);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2818

3b11313a6   Kiyoshi Ueda   blk_end_request: ...
2819
  /**
2e60e0229   Tejun Heo   block: clean up r...
2820
2821
   * blk_end_bidi_request - Complete a bidi request
   * @rq:         the request to complete
2a842acab   Christoph Hellwig   block: introduce ...
2822
   * @error:      block status code
2e60e0229   Tejun Heo   block: clean up r...
2823
2824
   * @nr_bytes:   number of bytes to complete @rq
   * @bidi_bytes: number of bytes to complete @rq->next_rq
a0cd12854   Jens Axboe   block: add end_qu...
2825
2826
   *
   * Description:
e3a04fe34   Kiyoshi Ueda   blk_end_request: ...
2827
   *     Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
2e60e0229   Tejun Heo   block: clean up r...
2828
2829
2830
   *     Drivers that supports bidi can safely call this member for any
   *     type of request, bidi or uni.  In the later case @bidi_bytes is
   *     just ignored.
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2831
2832
   *
   * Return:
2e60e0229   Tejun Heo   block: clean up r...
2833
2834
   *     %false - we are done with this request
   *     %true  - still buffers pending for this request
a0cd12854   Jens Axboe   block: add end_qu...
2835
   **/
2a842acab   Christoph Hellwig   block: introduce ...
2836
  static bool blk_end_bidi_request(struct request *rq, blk_status_t error,
32fab448e   Kiyoshi Ueda   block: add reques...
2837
2838
  				 unsigned int nr_bytes, unsigned int bidi_bytes)
  {
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2839
  	struct request_queue *q = rq->q;
2e60e0229   Tejun Heo   block: clean up r...
2840
  	unsigned long flags;
32fab448e   Kiyoshi Ueda   block: add reques...
2841

332ebbf7f   Bart Van Assche   block: Document w...
2842
  	WARN_ON_ONCE(q->mq_ops);
2e60e0229   Tejun Heo   block: clean up r...
2843
2844
  	if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
  		return true;
32fab448e   Kiyoshi Ueda   block: add reques...
2845

336cdb400   Kiyoshi Ueda   blk_end_request: ...
2846
  	spin_lock_irqsave(q->queue_lock, flags);
2e60e0229   Tejun Heo   block: clean up r...
2847
  	blk_finish_request(rq, error);
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2848
  	spin_unlock_irqrestore(q->queue_lock, flags);
2e60e0229   Tejun Heo   block: clean up r...
2849
  	return false;
32fab448e   Kiyoshi Ueda   block: add reques...
2850
  }
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2851
  /**
2e60e0229   Tejun Heo   block: clean up r...
2852
2853
   * __blk_end_bidi_request - Complete a bidi request with queue lock held
   * @rq:         the request to complete
2a842acab   Christoph Hellwig   block: introduce ...
2854
   * @error:      block status code
e3a04fe34   Kiyoshi Ueda   blk_end_request: ...
2855
2856
   * @nr_bytes:   number of bytes to complete @rq
   * @bidi_bytes: number of bytes to complete @rq->next_rq
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2857
2858
   *
   * Description:
2e60e0229   Tejun Heo   block: clean up r...
2859
2860
   *     Identical to blk_end_bidi_request() except that queue lock is
   *     assumed to be locked on entry and remains so on return.
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2861
2862
   *
   * Return:
2e60e0229   Tejun Heo   block: clean up r...
2863
2864
   *     %false - we are done with this request
   *     %true  - still buffers pending for this request
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2865
   **/
2a842acab   Christoph Hellwig   block: introduce ...
2866
  static bool __blk_end_bidi_request(struct request *rq, blk_status_t error,
b1f744937   FUJITA Tomonori   block: move compl...
2867
  				   unsigned int nr_bytes, unsigned int bidi_bytes)
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2868
  {
2fff8a924   Bart Van Assche   block: Check lock...
2869
  	lockdep_assert_held(rq->q->queue_lock);
332ebbf7f   Bart Van Assche   block: Document w...
2870
  	WARN_ON_ONCE(rq->q->mq_ops);
2fff8a924   Bart Van Assche   block: Check lock...
2871

2e60e0229   Tejun Heo   block: clean up r...
2872
2873
  	if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
  		return true;
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2874

2e60e0229   Tejun Heo   block: clean up r...
2875
  	blk_finish_request(rq, error);
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2876

2e60e0229   Tejun Heo   block: clean up r...
2877
  	return false;
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2878
  }
e19a3ab05   Kiyoshi Ueda   blk_end_request: ...
2879
2880
2881
2882
  
  /**
   * blk_end_request - Helper function for drivers to complete the request.
   * @rq:       the request being processed
2a842acab   Christoph Hellwig   block: introduce ...
2883
   * @error:    block status code
e19a3ab05   Kiyoshi Ueda   blk_end_request: ...
2884
2885
2886
2887
2888
2889
2890
   * @nr_bytes: number of bytes to complete
   *
   * Description:
   *     Ends I/O on a number of bytes attached to @rq.
   *     If @rq has leftover, sets it up for the next range of segments.
   *
   * Return:
b1f744937   FUJITA Tomonori   block: move compl...
2891
2892
   *     %false - we are done with this request
   *     %true  - still buffers pending for this request
e19a3ab05   Kiyoshi Ueda   blk_end_request: ...
2893
   **/
2a842acab   Christoph Hellwig   block: introduce ...
2894
2895
  bool blk_end_request(struct request *rq, blk_status_t error,
  		unsigned int nr_bytes)
e19a3ab05   Kiyoshi Ueda   blk_end_request: ...
2896
  {
332ebbf7f   Bart Van Assche   block: Document w...
2897
  	WARN_ON_ONCE(rq->q->mq_ops);
b1f744937   FUJITA Tomonori   block: move compl...
2898
  	return blk_end_bidi_request(rq, error, nr_bytes, 0);
e19a3ab05   Kiyoshi Ueda   blk_end_request: ...
2899
  }
56ad1740d   Jens Axboe   block: make the e...
2900
  EXPORT_SYMBOL(blk_end_request);
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2901
2902
  
  /**
b1f744937   FUJITA Tomonori   block: move compl...
2903
2904
   * blk_end_request_all - Helper function for drives to finish the request.
   * @rq: the request to finish
2a842acab   Christoph Hellwig   block: introduce ...
2905
   * @error: block status code
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2906
2907
   *
   * Description:
b1f744937   FUJITA Tomonori   block: move compl...
2908
2909
   *     Completely finish @rq.
   */
2a842acab   Christoph Hellwig   block: introduce ...
2910
  void blk_end_request_all(struct request *rq, blk_status_t error)
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2911
  {
b1f744937   FUJITA Tomonori   block: move compl...
2912
2913
  	bool pending;
  	unsigned int bidi_bytes = 0;
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2914

b1f744937   FUJITA Tomonori   block: move compl...
2915
2916
  	if (unlikely(blk_bidi_rq(rq)))
  		bidi_bytes = blk_rq_bytes(rq->next_rq);
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2917

b1f744937   FUJITA Tomonori   block: move compl...
2918
2919
2920
  	pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
  	BUG_ON(pending);
  }
56ad1740d   Jens Axboe   block: make the e...
2921
  EXPORT_SYMBOL(blk_end_request_all);
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2922

b1f744937   FUJITA Tomonori   block: move compl...
2923
  /**
b1f744937   FUJITA Tomonori   block: move compl...
2924
2925
   * __blk_end_request - Helper function for drivers to complete the request.
   * @rq:       the request being processed
2a842acab   Christoph Hellwig   block: introduce ...
2926
   * @error:    block status code
b1f744937   FUJITA Tomonori   block: move compl...
2927
   * @nr_bytes: number of bytes to complete
e3a04fe34   Kiyoshi Ueda   blk_end_request: ...
2928
2929
   *
   * Description:
b1f744937   FUJITA Tomonori   block: move compl...
2930
   *     Must be called with queue lock held unlike blk_end_request().
e3a04fe34   Kiyoshi Ueda   blk_end_request: ...
2931
2932
   *
   * Return:
b1f744937   FUJITA Tomonori   block: move compl...
2933
2934
   *     %false - we are done with this request
   *     %true  - still buffers pending for this request
e3a04fe34   Kiyoshi Ueda   blk_end_request: ...
2935
   **/
2a842acab   Christoph Hellwig   block: introduce ...
2936
2937
  bool __blk_end_request(struct request *rq, blk_status_t error,
  		unsigned int nr_bytes)
e3a04fe34   Kiyoshi Ueda   blk_end_request: ...
2938
  {
2fff8a924   Bart Van Assche   block: Check lock...
2939
  	lockdep_assert_held(rq->q->queue_lock);
332ebbf7f   Bart Van Assche   block: Document w...
2940
  	WARN_ON_ONCE(rq->q->mq_ops);
2fff8a924   Bart Van Assche   block: Check lock...
2941

b1f744937   FUJITA Tomonori   block: move compl...
2942
  	return __blk_end_bidi_request(rq, error, nr_bytes, 0);
e3a04fe34   Kiyoshi Ueda   blk_end_request: ...
2943
  }
56ad1740d   Jens Axboe   block: make the e...
2944
  EXPORT_SYMBOL(__blk_end_request);
e3a04fe34   Kiyoshi Ueda   blk_end_request: ...
2945
2946
  
  /**
b1f744937   FUJITA Tomonori   block: move compl...
2947
2948
   * __blk_end_request_all - Helper function for drives to finish the request.
   * @rq: the request to finish
2a842acab   Christoph Hellwig   block: introduce ...
2949
   * @error:    block status code
32fab448e   Kiyoshi Ueda   block: add reques...
2950
2951
   *
   * Description:
b1f744937   FUJITA Tomonori   block: move compl...
2952
   *     Completely finish @rq.  Must be called with queue lock held.
32fab448e   Kiyoshi Ueda   block: add reques...
2953
   */
2a842acab   Christoph Hellwig   block: introduce ...
2954
  void __blk_end_request_all(struct request *rq, blk_status_t error)
32fab448e   Kiyoshi Ueda   block: add reques...
2955
  {
b1f744937   FUJITA Tomonori   block: move compl...
2956
2957
  	bool pending;
  	unsigned int bidi_bytes = 0;
2fff8a924   Bart Van Assche   block: Check lock...
2958
  	lockdep_assert_held(rq->q->queue_lock);
332ebbf7f   Bart Van Assche   block: Document w...
2959
  	WARN_ON_ONCE(rq->q->mq_ops);
2fff8a924   Bart Van Assche   block: Check lock...
2960

b1f744937   FUJITA Tomonori   block: move compl...
2961
2962
2963
2964
2965
  	if (unlikely(blk_bidi_rq(rq)))
  		bidi_bytes = blk_rq_bytes(rq->next_rq);
  
  	pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
  	BUG_ON(pending);
32fab448e   Kiyoshi Ueda   block: add reques...
2966
  }
56ad1740d   Jens Axboe   block: make the e...
2967
  EXPORT_SYMBOL(__blk_end_request_all);
32fab448e   Kiyoshi Ueda   block: add reques...
2968
2969
  
  /**
b1f744937   FUJITA Tomonori   block: move compl...
2970
2971
   * __blk_end_request_cur - Helper function to finish the current request chunk.
   * @rq: the request to finish the current chunk for
2a842acab   Christoph Hellwig   block: introduce ...
2972
   * @error:    block status code
e19a3ab05   Kiyoshi Ueda   blk_end_request: ...
2973
2974
   *
   * Description:
b1f744937   FUJITA Tomonori   block: move compl...
2975
2976
   *     Complete the current consecutively mapped chunk from @rq.  Must
   *     be called with queue lock held.
e19a3ab05   Kiyoshi Ueda   blk_end_request: ...
2977
2978
   *
   * Return:
b1f744937   FUJITA Tomonori   block: move compl...
2979
2980
2981
   *     %false - we are done with this request
   *     %true  - still buffers pending for this request
   */
2a842acab   Christoph Hellwig   block: introduce ...
2982
  bool __blk_end_request_cur(struct request *rq, blk_status_t error)
e19a3ab05   Kiyoshi Ueda   blk_end_request: ...
2983
  {
b1f744937   FUJITA Tomonori   block: move compl...
2984
  	return __blk_end_request(rq, error, blk_rq_cur_bytes(rq));
e19a3ab05   Kiyoshi Ueda   blk_end_request: ...
2985
  }
56ad1740d   Jens Axboe   block: make the e...
2986
  EXPORT_SYMBOL(__blk_end_request_cur);
e19a3ab05   Kiyoshi Ueda   blk_end_request: ...
2987

86db1e297   Jens Axboe   block: continue l...
2988
2989
  void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
  		     struct bio *bio)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2990
  {
b4f42e283   Jens Axboe   block: remove str...
2991
  	if (bio_has_data(bio))
fb2dce862   David Woodhouse   Add 'discard' req...
2992
  		rq->nr_phys_segments = bio_phys_segments(q, bio);
73027d80d   Jens Axboe   blk-mq: fix disca...
2993
2994
  	else if (bio_op(bio) == REQ_OP_DISCARD)
  		rq->nr_phys_segments = 1;
b4f42e283   Jens Axboe   block: remove str...
2995

4f024f379   Kent Overstreet   block: Abstract o...
2996
  	rq->__data_len = bio->bi_iter.bi_size;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2997
  	rq->bio = rq->biotail = bio;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2998

74d46992e   Christoph Hellwig   block: replace bi...
2999
3000
  	if (bio->bi_disk)
  		rq->rq_disk = bio->bi_disk;
66846572b   NeilBrown   Stop exporting bl...
3001
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3002

2d4dc890b   Ilya Loginov   block: add helper...
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
  #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
  /**
   * rq_flush_dcache_pages - Helper function to flush all pages in a request
   * @rq: the request to be flushed
   *
   * Description:
   *     Flush all pages in @rq.
   */
  void rq_flush_dcache_pages(struct request *rq)
  {
  	struct req_iterator iter;
7988613b0   Kent Overstreet   block: Convert bi...
3014
  	struct bio_vec bvec;
2d4dc890b   Ilya Loginov   block: add helper...
3015
3016
  
  	rq_for_each_segment(bvec, rq, iter)
7988613b0   Kent Overstreet   block: Convert bi...
3017
  		flush_dcache_page(bvec.bv_page);
2d4dc890b   Ilya Loginov   block: add helper...
3018
3019
3020
  }
  EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
  #endif
ef9e3facd   Kiyoshi Ueda   block: add lld bu...
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047
  /**
   * blk_lld_busy - Check if underlying low-level drivers of a device are busy
   * @q : the queue of the device being checked
   *
   * Description:
   *    Check if underlying low-level drivers of a device are busy.
   *    If the drivers want to export their busy state, they must set own
   *    exporting function using blk_queue_lld_busy() first.
   *
   *    Basically, this function is used only by request stacking drivers
   *    to stop dispatching requests to underlying devices when underlying
   *    devices are busy.  This behavior helps more I/O merging on the queue
   *    of the request stacking driver and prevents I/O throughput regression
   *    on burst I/O load.
   *
   * Return:
   *    0 - Not busy (The request stacking driver should dispatch request)
   *    1 - Busy (The request stacking driver should stop dispatching request)
   */
  int blk_lld_busy(struct request_queue *q)
  {
  	if (q->lld_busy_fn)
  		return q->lld_busy_fn(q);
  
  	return 0;
  }
  EXPORT_SYMBOL_GPL(blk_lld_busy);
78d8e58a0   Mike Snitzer   Revert "block, dm...
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
  /**
   * blk_rq_unprep_clone - Helper function to free all bios in a cloned request
   * @rq: the clone request to be cleaned up
   *
   * Description:
   *     Free all bios in @rq for a cloned request.
   */
  void blk_rq_unprep_clone(struct request *rq)
  {
  	struct bio *bio;
  
  	while ((bio = rq->bio) != NULL) {
  		rq->bio = bio->bi_next;
  
  		bio_put(bio);
  	}
  }
  EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
  
  /*
   * Copy attributes of the original request to the clone request.
   * The actual data parts (e.g. ->cmd, ->sense) are not copied.
   */
  static void __blk_rq_prep_clone(struct request *dst, struct request *src)
b0fd271d5   Kiyoshi Ueda   block: add reques...
3072
3073
  {
  	dst->cpu = src->cpu;
b0fd271d5   Kiyoshi Ueda   block: add reques...
3074
3075
  	dst->__sector = blk_rq_pos(src);
  	dst->__data_len = blk_rq_bytes(src);
251141340   Bart Van Assche   block: Fix clonin...
3076
3077
3078
3079
  	if (src->rq_flags & RQF_SPECIAL_PAYLOAD) {
  		dst->rq_flags |= RQF_SPECIAL_PAYLOAD;
  		dst->special_vec = src->special_vec;
  	}
b0fd271d5   Kiyoshi Ueda   block: add reques...
3080
3081
3082
  	dst->nr_phys_segments = src->nr_phys_segments;
  	dst->ioprio = src->ioprio;
  	dst->extra_len = src->extra_len;
78d8e58a0   Mike Snitzer   Revert "block, dm...
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
  }
  
  /**
   * blk_rq_prep_clone - Helper function to setup clone request
   * @rq: the request to be setup
   * @rq_src: original request to be cloned
   * @bs: bio_set that bios for clone are allocated from
   * @gfp_mask: memory allocation mask for bio
   * @bio_ctr: setup function to be called for each clone bio.
   *           Returns %0 for success, non %0 for failure.
   * @data: private data to be passed to @bio_ctr
   *
   * Description:
   *     Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
   *     The actual data parts of @rq_src (e.g. ->cmd, ->sense)
   *     are not copied, and copying such parts is the caller's responsibility.
   *     Also, pages which the original bios are pointing to are not copied
   *     and the cloned bios just point same pages.
   *     So cloned bios must be completed before original bios, which means
   *     the caller must complete @rq before @rq_src.
   */
  int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
  		      struct bio_set *bs, gfp_t gfp_mask,
  		      int (*bio_ctr)(struct bio *, struct bio *, void *),
  		      void *data)
  {
  	struct bio *bio, *bio_src;
  
  	if (!bs)
  		bs = fs_bio_set;
  
  	__rq_for_each_bio(bio_src, rq_src) {
  		bio = bio_clone_fast(bio_src, gfp_mask, bs);
  		if (!bio)
  			goto free_and_out;
  
  		if (bio_ctr && bio_ctr(bio, bio_src, data))
  			goto free_and_out;
  
  		if (rq->bio) {
  			rq->biotail->bi_next = bio;
  			rq->biotail = bio;
  		} else
  			rq->bio = rq->biotail = bio;
  	}
  
  	__blk_rq_prep_clone(rq, rq_src);
  
  	return 0;
  
  free_and_out:
  	if (bio)
  		bio_put(bio);
  	blk_rq_unprep_clone(rq);
  
  	return -ENOMEM;
b0fd271d5   Kiyoshi Ueda   block: add reques...
3139
3140
  }
  EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
59c3d45e4   Jens Axboe   block: remove 'q'...
3141
  int kblockd_schedule_work(struct work_struct *work)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3142
3143
3144
  {
  	return queue_work(kblockd_workqueue, work);
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3145
  EXPORT_SYMBOL(kblockd_schedule_work);
ee63cfa7f   Jens Axboe   block: add kblock...
3146
3147
3148
3149
3150
  int kblockd_schedule_work_on(int cpu, struct work_struct *work)
  {
  	return queue_work_on(cpu, kblockd_workqueue, work);
  }
  EXPORT_SYMBOL(kblockd_schedule_work_on);
818cd1cba   Jens Axboe   block: add kblock...
3151
3152
3153
3154
3155
3156
  int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork,
  				unsigned long delay)
  {
  	return mod_delayed_work_on(cpu, kblockd_workqueue, dwork, delay);
  }
  EXPORT_SYMBOL(kblockd_mod_delayed_work_on);
59c3d45e4   Jens Axboe   block: remove 'q'...
3157
3158
  int kblockd_schedule_delayed_work(struct delayed_work *dwork,
  				  unsigned long delay)
e43473b7f   Vivek Goyal   blkio: Core imple...
3159
3160
3161
3162
  {
  	return queue_delayed_work(kblockd_workqueue, dwork, delay);
  }
  EXPORT_SYMBOL(kblockd_schedule_delayed_work);
8ab14595b   Jens Axboe   block: add kblock...
3163
3164
3165
3166
3167
3168
  int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
  				     unsigned long delay)
  {
  	return queue_delayed_work_on(cpu, kblockd_workqueue, dwork, delay);
  }
  EXPORT_SYMBOL(kblockd_schedule_delayed_work_on);
75df71362   Suresh Jayaraman   block: document b...
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
  /**
   * blk_start_plug - initialize blk_plug and track it inside the task_struct
   * @plug:	The &struct blk_plug that needs to be initialized
   *
   * Description:
   *   Tracking blk_plug inside the task_struct will help with auto-flushing the
   *   pending I/O should the task end up blocking between blk_start_plug() and
   *   blk_finish_plug(). This is important from a performance perspective, but
   *   also ensures that we don't deadlock. For instance, if the task is blocking
   *   for a memory allocation, memory reclaim could end up wanting to free a
   *   page belonging to that request that is currently residing in our private
   *   plug. By flushing the pending I/O when the process goes to sleep, we avoid
   *   this kind of deadlock.
   */
73c101011   Jens Axboe   block: initial pa...
3183
3184
3185
  void blk_start_plug(struct blk_plug *plug)
  {
  	struct task_struct *tsk = current;
dd6cf3e18   Shaohua Li   blk: clean up plug
3186
3187
3188
3189
3190
  	/*
  	 * If this is a nested plug, don't actually assign it.
  	 */
  	if (tsk->plug)
  		return;
73c101011   Jens Axboe   block: initial pa...
3191
  	INIT_LIST_HEAD(&plug->list);
320ae51fe   Jens Axboe   blk-mq: new multi...
3192
  	INIT_LIST_HEAD(&plug->mq_list);
048c9374a   NeilBrown   block: Enhance ne...
3193
  	INIT_LIST_HEAD(&plug->cb_list);
73c101011   Jens Axboe   block: initial pa...
3194
  	/*
dd6cf3e18   Shaohua Li   blk: clean up plug
3195
3196
  	 * Store ordering should not be needed here, since a potential
  	 * preempt will imply a full memory barrier
73c101011   Jens Axboe   block: initial pa...
3197
  	 */
dd6cf3e18   Shaohua Li   blk: clean up plug
3198
  	tsk->plug = plug;
73c101011   Jens Axboe   block: initial pa...
3199
3200
3201
3202
3203
3204
3205
  }
  EXPORT_SYMBOL(blk_start_plug);
  
  static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
  {
  	struct request *rqa = container_of(a, struct request, queuelist);
  	struct request *rqb = container_of(b, struct request, queuelist);
975927b94   Jianpeng Ma   block: Add blk_rq...
3206
3207
  	return !(rqa->q < rqb->q ||
  		(rqa->q == rqb->q && blk_rq_pos(rqa) < blk_rq_pos(rqb)));
73c101011   Jens Axboe   block: initial pa...
3208
  }
49cac01e1   Jens Axboe   block: make unplu...
3209
3210
3211
3212
3213
3214
  /*
   * If 'from_schedule' is true, then postpone the dispatch of requests
   * until a safe kblockd context. We due this to avoid accidental big
   * additional stack usage in driver dispatch, in places where the originally
   * plugger did not intend it.
   */
f6603783f   Jens Axboe   block: only force...
3215
  static void queue_unplugged(struct request_queue *q, unsigned int depth,
49cac01e1   Jens Axboe   block: make unplu...
3216
  			    bool from_schedule)
99e22598e   Jens Axboe   block: drop queue...
3217
  	__releases(q->queue_lock)
94b5eb28b   Jens Axboe   block: fixup bloc...
3218
  {
2fff8a924   Bart Van Assche   block: Check lock...
3219
  	lockdep_assert_held(q->queue_lock);
49cac01e1   Jens Axboe   block: make unplu...
3220
  	trace_block_unplug(q, depth, !from_schedule);
99e22598e   Jens Axboe   block: drop queue...
3221

704605711   Bart Van Assche   block: Avoid sche...
3222
  	if (from_schedule)
24ecfbe27   Christoph Hellwig   block: add blk_ru...
3223
  		blk_run_queue_async(q);
704605711   Bart Van Assche   block: Avoid sche...
3224
  	else
24ecfbe27   Christoph Hellwig   block: add blk_ru...
3225
  		__blk_run_queue(q);
704605711   Bart Van Assche   block: Avoid sche...
3226
  	spin_unlock(q->queue_lock);
94b5eb28b   Jens Axboe   block: fixup bloc...
3227
  }
74018dc30   NeilBrown   blk: pass from_sc...
3228
  static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
048c9374a   NeilBrown   block: Enhance ne...
3229
3230
  {
  	LIST_HEAD(callbacks);
2a7d5559b   Shaohua Li   block: stack unplug
3231
3232
  	while (!list_empty(&plug->cb_list)) {
  		list_splice_init(&plug->cb_list, &callbacks);
048c9374a   NeilBrown   block: Enhance ne...
3233

2a7d5559b   Shaohua Li   block: stack unplug
3234
3235
  		while (!list_empty(&callbacks)) {
  			struct blk_plug_cb *cb = list_first_entry(&callbacks,
048c9374a   NeilBrown   block: Enhance ne...
3236
3237
  							  struct blk_plug_cb,
  							  list);
2a7d5559b   Shaohua Li   block: stack unplug
3238
  			list_del(&cb->list);
74018dc30   NeilBrown   blk: pass from_sc...
3239
  			cb->callback(cb, from_schedule);
2a7d5559b   Shaohua Li   block: stack unplug
3240
  		}
048c9374a   NeilBrown   block: Enhance ne...
3241
3242
  	}
  }
9cbb17508   NeilBrown   blk: centralize n...
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
  struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
  				      int size)
  {
  	struct blk_plug *plug = current->plug;
  	struct blk_plug_cb *cb;
  
  	if (!plug)
  		return NULL;
  
  	list_for_each_entry(cb, &plug->cb_list, list)
  		if (cb->callback == unplug && cb->data == data)
  			return cb;
  
  	/* Not currently on the callback list */
  	BUG_ON(size < sizeof(*cb));
  	cb = kzalloc(size, GFP_ATOMIC);
  	if (cb) {
  		cb->data = data;
  		cb->callback = unplug;
  		list_add(&cb->list, &plug->cb_list);
  	}
  	return cb;
  }
  EXPORT_SYMBOL(blk_check_plugged);
49cac01e1   Jens Axboe   block: make unplu...
3267
  void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
73c101011   Jens Axboe   block: initial pa...
3268
3269
3270
3271
  {
  	struct request_queue *q;
  	unsigned long flags;
  	struct request *rq;
109b81296   NeilBrown   block: splice plu...
3272
  	LIST_HEAD(list);
94b5eb28b   Jens Axboe   block: fixup bloc...
3273
  	unsigned int depth;
73c101011   Jens Axboe   block: initial pa...
3274

74018dc30   NeilBrown   blk: pass from_sc...
3275
  	flush_plug_callbacks(plug, from_schedule);
320ae51fe   Jens Axboe   blk-mq: new multi...
3276
3277
3278
  
  	if (!list_empty(&plug->mq_list))
  		blk_mq_flush_plug_list(plug, from_schedule);
73c101011   Jens Axboe   block: initial pa...
3279
3280
  	if (list_empty(&plug->list))
  		return;
109b81296   NeilBrown   block: splice plu...
3281
  	list_splice_init(&plug->list, &list);
422765c26   Jianpeng Ma   block: Remove sho...
3282
  	list_sort(NULL, &list, plug_rq_cmp);
73c101011   Jens Axboe   block: initial pa...
3283
3284
  
  	q = NULL;
94b5eb28b   Jens Axboe   block: fixup bloc...
3285
  	depth = 0;
188112722   Jens Axboe   block: add commen...
3286
3287
3288
3289
3290
  
  	/*
  	 * Save and disable interrupts here, to avoid doing it for every
  	 * queue lock we have to take.
  	 */
73c101011   Jens Axboe   block: initial pa...
3291
  	local_irq_save(flags);
109b81296   NeilBrown   block: splice plu...
3292
3293
  	while (!list_empty(&list)) {
  		rq = list_entry_rq(list.next);
73c101011   Jens Axboe   block: initial pa...
3294
  		list_del_init(&rq->queuelist);
73c101011   Jens Axboe   block: initial pa...
3295
3296
  		BUG_ON(!rq->q);
  		if (rq->q != q) {
99e22598e   Jens Axboe   block: drop queue...
3297
3298
3299
3300
  			/*
  			 * This drops the queue lock
  			 */
  			if (q)
49cac01e1   Jens Axboe   block: make unplu...
3301
  				queue_unplugged(q, depth, from_schedule);
73c101011   Jens Axboe   block: initial pa...
3302
  			q = rq->q;
94b5eb28b   Jens Axboe   block: fixup bloc...
3303
  			depth = 0;
73c101011   Jens Axboe   block: initial pa...
3304
3305
  			spin_lock(q->queue_lock);
  		}
8ba61435d   Tejun Heo   block: add missin...
3306
3307
3308
3309
  
  		/*
  		 * Short-circuit if @q is dead
  		 */
3f3299d5c   Bart Van Assche   block: Rename que...
3310
  		if (unlikely(blk_queue_dying(q))) {
2a842acab   Christoph Hellwig   block: introduce ...
3311
  			__blk_end_request_all(rq, BLK_STS_IOERR);
8ba61435d   Tejun Heo   block: add missin...
3312
3313
  			continue;
  		}
73c101011   Jens Axboe   block: initial pa...
3314
3315
3316
  		/*
  		 * rq is already accounted, so use raw insert
  		 */
f73f44eb0   Christoph Hellwig   block: add a op_i...
3317
  		if (op_is_flush(rq->cmd_flags))
401a18e92   Jens Axboe   block: fix bug wi...
3318
3319
3320
  			__elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH);
  		else
  			__elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE);
94b5eb28b   Jens Axboe   block: fixup bloc...
3321
3322
  
  		depth++;
73c101011   Jens Axboe   block: initial pa...
3323
  	}
99e22598e   Jens Axboe   block: drop queue...
3324
3325
3326
3327
  	/*
  	 * This drops the queue lock
  	 */
  	if (q)
49cac01e1   Jens Axboe   block: make unplu...
3328
  		queue_unplugged(q, depth, from_schedule);
73c101011   Jens Axboe   block: initial pa...
3329

73c101011   Jens Axboe   block: initial pa...
3330
3331
  	local_irq_restore(flags);
  }
73c101011   Jens Axboe   block: initial pa...
3332
3333
3334
  
  void blk_finish_plug(struct blk_plug *plug)
  {
dd6cf3e18   Shaohua Li   blk: clean up plug
3335
3336
  	if (plug != current->plug)
  		return;
f6603783f   Jens Axboe   block: only force...
3337
  	blk_flush_plug_list(plug, false);
73c101011   Jens Axboe   block: initial pa...
3338

dd6cf3e18   Shaohua Li   blk: clean up plug
3339
  	current->plug = NULL;
73c101011   Jens Axboe   block: initial pa...
3340
  }
88b996cd0   Christoph Hellwig   block: cleanup th...
3341
  EXPORT_SYMBOL(blk_finish_plug);
73c101011   Jens Axboe   block: initial pa...
3342

47fafbc70   Rafael J. Wysocki   block / PM: Repla...
3343
  #ifdef CONFIG_PM
6c9546675   Lin Ming   block: add runtim...
3344
3345
3346
3347
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357
3358
3359
3360
3361
3362
3363
3364
3365
3366
  /**
   * blk_pm_runtime_init - Block layer runtime PM initialization routine
   * @q: the queue of the device
   * @dev: the device the queue belongs to
   *
   * Description:
   *    Initialize runtime-PM-related fields for @q and start auto suspend for
   *    @dev. Drivers that want to take advantage of request-based runtime PM
   *    should call this function after @dev has been initialized, and its
   *    request queue @q has been allocated, and runtime PM for it can not happen
   *    yet(either due to disabled/forbidden or its usage_count > 0). In most
   *    cases, driver should call this function before any I/O has taken place.
   *
   *    This function takes care of setting up using auto suspend for the device,
   *    the autosuspend delay is set to -1 to make runtime suspend impossible
   *    until an updated value is either set by user or by driver. Drivers do
   *    not need to touch other autosuspend settings.
   *
   *    The block layer runtime PM is request based, so only works for drivers
   *    that use request as their IO unit instead of those directly use bio's.
   */
  void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
  {
1e2698976   Ming Lei   block: really dis...
3367
3368
3369
  	/* Don't enable runtime PM for blk-mq until it is ready */
  	if (q->mq_ops) {
  		pm_runtime_disable(dev);
765e40b67   Christoph Hellwig   block: disable ru...
3370
  		return;
1e2698976   Ming Lei   block: really dis...
3371
  	}
765e40b67   Christoph Hellwig   block: disable ru...
3372

6c9546675   Lin Ming   block: add runtim...
3373
3374
3375
3376
3377
3378
3379
3380
3381
3382
3383
3384
3385
3386
3387
3388
3389
3390
3391
3392
3393
3394
3395
3396
3397
3398
3399
3400
3401
3402
3403
  	q->dev = dev;
  	q->rpm_status = RPM_ACTIVE;
  	pm_runtime_set_autosuspend_delay(q->dev, -1);
  	pm_runtime_use_autosuspend(q->dev);
  }
  EXPORT_SYMBOL(blk_pm_runtime_init);
  
  /**
   * blk_pre_runtime_suspend - Pre runtime suspend check
   * @q: the queue of the device
   *
   * Description:
   *    This function will check if runtime suspend is allowed for the device
   *    by examining if there are any requests pending in the queue. If there
   *    are requests pending, the device can not be runtime suspended; otherwise,
   *    the queue's status will be updated to SUSPENDING and the driver can
   *    proceed to suspend the device.
   *
   *    For the not allowed case, we mark last busy for the device so that
   *    runtime PM core will try to autosuspend it some time later.
   *
   *    This function should be called near the start of the device's
   *    runtime_suspend callback.
   *
   * Return:
   *    0		- OK to runtime suspend the device
   *    -EBUSY	- Device should not be runtime suspended
   */
  int blk_pre_runtime_suspend(struct request_queue *q)
  {
  	int ret = 0;
4fd41a855   Ken Xue   SCSI: Fix NULL po...
3404
3405
  	if (!q->dev)
  		return ret;
6c9546675   Lin Ming   block: add runtim...
3406
3407
3408
3409
3410
3411
3412
3413
3414
3415
3416
3417
3418
3419
3420
3421
3422
3423
3424
3425
3426
3427
3428
3429
3430
3431
3432
  	spin_lock_irq(q->queue_lock);
  	if (q->nr_pending) {
  		ret = -EBUSY;
  		pm_runtime_mark_last_busy(q->dev);
  	} else {
  		q->rpm_status = RPM_SUSPENDING;
  	}
  	spin_unlock_irq(q->queue_lock);
  	return ret;
  }
  EXPORT_SYMBOL(blk_pre_runtime_suspend);
  
  /**
   * blk_post_runtime_suspend - Post runtime suspend processing
   * @q: the queue of the device
   * @err: return value of the device's runtime_suspend function
   *
   * Description:
   *    Update the queue's runtime status according to the return value of the
   *    device's runtime suspend function and mark last busy for the device so
   *    that PM core will try to auto suspend the device at a later time.
   *
   *    This function should be called near the end of the device's
   *    runtime_suspend callback.
   */
  void blk_post_runtime_suspend(struct request_queue *q, int err)
  {
4fd41a855   Ken Xue   SCSI: Fix NULL po...
3433
3434
  	if (!q->dev)
  		return;
6c9546675   Lin Ming   block: add runtim...
3435
3436
3437
3438
3439
3440
3441
3442
3443
3444
3445
3446
3447
3448
3449
3450
3451
3452
3453
3454
3455
3456
3457
3458
  	spin_lock_irq(q->queue_lock);
  	if (!err) {
  		q->rpm_status = RPM_SUSPENDED;
  	} else {
  		q->rpm_status = RPM_ACTIVE;
  		pm_runtime_mark_last_busy(q->dev);
  	}
  	spin_unlock_irq(q->queue_lock);
  }
  EXPORT_SYMBOL(blk_post_runtime_suspend);
  
  /**
   * blk_pre_runtime_resume - Pre runtime resume processing
   * @q: the queue of the device
   *
   * Description:
   *    Update the queue's runtime status to RESUMING in preparation for the
   *    runtime resume of the device.
   *
   *    This function should be called near the start of the device's
   *    runtime_resume callback.
   */
  void blk_pre_runtime_resume(struct request_queue *q)
  {
4fd41a855   Ken Xue   SCSI: Fix NULL po...
3459
3460
  	if (!q->dev)
  		return;
6c9546675   Lin Ming   block: add runtim...
3461
3462
3463
3464
3465
3466
3467
3468
3469
3470
3471
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481
3482
  	spin_lock_irq(q->queue_lock);
  	q->rpm_status = RPM_RESUMING;
  	spin_unlock_irq(q->queue_lock);
  }
  EXPORT_SYMBOL(blk_pre_runtime_resume);
  
  /**
   * blk_post_runtime_resume - Post runtime resume processing
   * @q: the queue of the device
   * @err: return value of the device's runtime_resume function
   *
   * Description:
   *    Update the queue's runtime status according to the return value of the
   *    device's runtime_resume function. If it is successfully resumed, process
   *    the requests that are queued into the device's queue when it is resuming
   *    and then mark last busy and initiate autosuspend for it.
   *
   *    This function should be called near the end of the device's
   *    runtime_resume callback.
   */
  void blk_post_runtime_resume(struct request_queue *q, int err)
  {
4fd41a855   Ken Xue   SCSI: Fix NULL po...
3483
3484
  	if (!q->dev)
  		return;
6c9546675   Lin Ming   block: add runtim...
3485
3486
3487
3488
3489
  	spin_lock_irq(q->queue_lock);
  	if (!err) {
  		q->rpm_status = RPM_ACTIVE;
  		__blk_run_queue(q);
  		pm_runtime_mark_last_busy(q->dev);
c60855cdb   Aaron Lu   blkpm: avoid slee...
3490
  		pm_request_autosuspend(q->dev);
6c9546675   Lin Ming   block: add runtim...
3491
3492
3493
3494
3495
3496
  	} else {
  		q->rpm_status = RPM_SUSPENDED;
  	}
  	spin_unlock_irq(q->queue_lock);
  }
  EXPORT_SYMBOL(blk_post_runtime_resume);
d07ab6d11   Mika Westerberg   block: Add blk_se...
3497
3498
3499
3500
3501
3502
3503
3504
3505
3506
3507
3508
3509
3510
3511
3512
3513
3514
3515
3516
3517
3518
3519
3520
  
  /**
   * blk_set_runtime_active - Force runtime status of the queue to be active
   * @q: the queue of the device
   *
   * If the device is left runtime suspended during system suspend the resume
   * hook typically resumes the device and corrects runtime status
   * accordingly. However, that does not affect the queue runtime PM status
   * which is still "suspended". This prevents processing requests from the
   * queue.
   *
   * This function can be used in driver's resume hook to correct queue
   * runtime PM status and re-enable peeking requests from the queue. It
   * should be called before first request is added to the queue.
   */
  void blk_set_runtime_active(struct request_queue *q)
  {
  	spin_lock_irq(q->queue_lock);
  	q->rpm_status = RPM_ACTIVE;
  	pm_runtime_mark_last_busy(q->dev);
  	pm_request_autosuspend(q->dev);
  	spin_unlock_irq(q->queue_lock);
  }
  EXPORT_SYMBOL(blk_set_runtime_active);
6c9546675   Lin Ming   block: add runtim...
3521
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3522
3523
  int __init blk_dev_init(void)
  {
ef295ecf0   Christoph Hellwig   block: better op ...
3524
3525
  	BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS));
  	BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
0762b23d2   Maninder Singh   block: use FIELD_...
3526
  			FIELD_SIZEOF(struct request, cmd_flags));
ef295ecf0   Christoph Hellwig   block: better op ...
3527
3528
  	BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
  			FIELD_SIZEOF(struct bio, bi_opf));
9eb55b030   Nikanth Karthikesan   block: catch tryi...
3529

89b90be2d   Tejun Heo   block: make kbloc...
3530
3531
  	/* used for unplugging and affects IO latency/throughput - HIGHPRI */
  	kblockd_workqueue = alloc_workqueue("kblockd",
28747fcd2   Matias Bjørling   block: remove WQ_...
3532
  					    WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3533
3534
3535
3536
3537
  	if (!kblockd_workqueue)
  		panic("Failed to create kblockd
  ");
  
  	request_cachep = kmem_cache_create("blkdev_requests",
20c2df83d   Paul Mundt   mm: Remove slab d...
3538
  			sizeof(struct request), 0, SLAB_PANIC, NULL);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3539

c2789bd40   Ilya Dryomov   block: rename req...
3540
  	blk_requestq_cachep = kmem_cache_create("request_queue",
165125e1e   Jens Axboe   [BLOCK] Get rid o...
3541
  			sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3542

18fbda91c   Omar Sandoval   block: use same b...
3543
3544
3545
  #ifdef CONFIG_DEBUG_FS
  	blk_debugfs_root = debugfs_create_dir("block", NULL);
  #endif
d38ecf935   Jens Axboe   io context sharin...
3546
  	return 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3547
  }