Blame view

block/blk-core.c 98.4 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2
3
4
5
   * Copyright (C) 1991, 1992 Linus Torvalds
   * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
   * Elevator latency, (C) 2000  Andrea Arcangeli <andrea@suse.de> SuSE
   * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
6728cb0e6   Jens Axboe   block: make core ...
6
7
   * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
   *	-  July2000
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
8
9
10
11
12
13
   * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
   */
  
  /*
   * This handles all read/write requests to block devices
   */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
14
15
16
17
18
  #include <linux/kernel.h>
  #include <linux/module.h>
  #include <linux/backing-dev.h>
  #include <linux/bio.h>
  #include <linux/blkdev.h>
320ae51fe   Jens Axboe   blk-mq: new multi...
19
  #include <linux/blk-mq.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
20
21
22
23
24
  #include <linux/highmem.h>
  #include <linux/mm.h>
  #include <linux/kernel_stat.h>
  #include <linux/string.h>
  #include <linux/init.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
25
26
27
28
  #include <linux/completion.h>
  #include <linux/slab.h>
  #include <linux/swap.h>
  #include <linux/writeback.h>
faccbd4b2   Andrew Morton   [PATCH] io-accoun...
29
  #include <linux/task_io_accounting_ops.h>
c17bb4951   Akinobu Mita   [PATCH] fault-inj...
30
  #include <linux/fault-inject.h>
73c101011   Jens Axboe   block: initial pa...
31
  #include <linux/list_sort.h>
e3c78ca52   Tejun Heo   block: reorganize...
32
  #include <linux/delay.h>
aaf7c6806   Tejun Heo   block: fix elvpri...
33
  #include <linux/ratelimit.h>
6c9546675   Lin Ming   block: add runtim...
34
  #include <linux/pm_runtime.h>
eea8f41cc   Tejun Heo   blkcg: move block...
35
  #include <linux/blk-cgroup.h>
18fbda91c   Omar Sandoval   block: use same b...
36
  #include <linux/debugfs.h>
55782138e   Li Zefan   tracing/events: c...
37
38
39
  
  #define CREATE_TRACE_POINTS
  #include <trace/events/block.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
40

8324aa91d   Jens Axboe   block: split tag ...
41
  #include "blk.h"
43a5e4e21   Ming Lei   block: blk-mq: su...
42
  #include "blk-mq.h"
bd166ef18   Jens Axboe   blk-mq-sched: add...
43
  #include "blk-mq-sched.h"
87760e5ee   Jens Axboe   block: hook up wr...
44
  #include "blk-wbt.h"
8324aa91d   Jens Axboe   block: split tag ...
45

18fbda91c   Omar Sandoval   block: use same b...
46
47
48
  #ifdef CONFIG_DEBUG_FS
  struct dentry *blk_debugfs_root;
  #endif
d07335e51   Mike Snitzer   block: Rename "bl...
49
  EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
b0da3f0da   Jun'ichi Nomura   Add a tracepoint ...
50
  EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
0a82a8d13   Linus Torvalds   Revert "block: ad...
51
  EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
3291fa57c   Keith Busch   NVMe: Add tracepo...
52
  EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
cbae8d45d   NeilBrown   block: export blo...
53
  EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
0bfc24559   Ingo Molnar   blktrace: port to...
54

a73f730d0   Tejun Heo   block, cfq: move ...
55
  DEFINE_IDA(blk_queue_ida);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
56
57
58
  /*
   * For the allocated request tables
   */
d674d4145   Wei Tang   block: do not ini...
59
  struct kmem_cache *request_cachep;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
60
61
62
63
  
  /*
   * For queue allocation
   */
6728cb0e6   Jens Axboe   block: make core ...
64
  struct kmem_cache *blk_requestq_cachep;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
65
66
  
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
67
68
   * Controlling structure to kblockd
   */
ff856bad6   Jens Axboe   [BLOCK] ll_rw_blk...
69
  static struct workqueue_struct *kblockd_workqueue;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
70

d40f75a06   Tejun Heo   writeback, blkcg:...
71
72
  static void blk_clear_congested(struct request_list *rl, int sync)
  {
d40f75a06   Tejun Heo   writeback, blkcg:...
73
74
75
  #ifdef CONFIG_CGROUP_WRITEBACK
  	clear_wb_congested(rl->blkg->wb_congested, sync);
  #else
482cf79cd   Tejun Heo   writeback, blkcg:...
76
77
78
79
80
  	/*
  	 * If !CGROUP_WRITEBACK, all blkg's map to bdi->wb and we shouldn't
  	 * flip its congestion state for events on other blkcgs.
  	 */
  	if (rl == &rl->q->root_rl)
dc3b17cc8   Jan Kara   block: Use pointe...
81
  		clear_wb_congested(rl->q->backing_dev_info->wb.congested, sync);
d40f75a06   Tejun Heo   writeback, blkcg:...
82
83
84
85
86
  #endif
  }
  
  static void blk_set_congested(struct request_list *rl, int sync)
  {
d40f75a06   Tejun Heo   writeback, blkcg:...
87
88
89
  #ifdef CONFIG_CGROUP_WRITEBACK
  	set_wb_congested(rl->blkg->wb_congested, sync);
  #else
482cf79cd   Tejun Heo   writeback, blkcg:...
90
91
  	/* see blk_clear_congested() */
  	if (rl == &rl->q->root_rl)
dc3b17cc8   Jan Kara   block: Use pointe...
92
  		set_wb_congested(rl->q->backing_dev_info->wb.congested, sync);
d40f75a06   Tejun Heo   writeback, blkcg:...
93
94
  #endif
  }
8324aa91d   Jens Axboe   block: split tag ...
95
  void blk_queue_congestion_threshold(struct request_queue *q)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
96
97
98
99
100
101
102
103
104
105
106
107
108
  {
  	int nr;
  
  	nr = q->nr_requests - (q->nr_requests / 8) + 1;
  	if (nr > q->nr_requests)
  		nr = q->nr_requests;
  	q->nr_congestion_on = nr;
  
  	nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1;
  	if (nr < 1)
  		nr = 1;
  	q->nr_congestion_off = nr;
  }
2a4aa30c5   FUJITA Tomonori   block: rename and...
109
  void blk_rq_init(struct request_queue *q, struct request *rq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
110
  {
1afb20f30   FUJITA Tomonori   block: make rq_in...
111
  	memset(rq, 0, sizeof(*rq));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
112
  	INIT_LIST_HEAD(&rq->queuelist);
242f9dcb8   Jens Axboe   block: unify requ...
113
  	INIT_LIST_HEAD(&rq->timeout_list);
c7c22e4d5   Jens Axboe   block: add suppor...
114
  	rq->cpu = -1;
63a713867   Jens Axboe   block: fixup rq_i...
115
  	rq->q = q;
a2dec7b36   Tejun Heo   block: hide reque...
116
  	rq->__sector = (sector_t) -1;
2e662b65f   Jens Axboe   [PATCH] elevator:...
117
118
  	INIT_HLIST_NODE(&rq->hash);
  	RB_CLEAR_NODE(&rq->rb_node);
63a713867   Jens Axboe   block: fixup rq_i...
119
  	rq->tag = -1;
bd166ef18   Jens Axboe   blk-mq-sched: add...
120
  	rq->internal_tag = -1;
b243ddcbe   Tejun Heo   block: move rq->s...
121
  	rq->start_time = jiffies;
9195291e5   Divyesh Shah   blkio: Increment ...
122
  	set_start_time_ns(rq);
09e099d4b   Jerome Marchand   block: fix accoun...
123
  	rq->part = NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
124
  }
2a4aa30c5   FUJITA Tomonori   block: rename and...
125
  EXPORT_SYMBOL(blk_rq_init);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
126

2a842acab   Christoph Hellwig   block: introduce ...
127
128
129
130
131
132
133
134
135
136
137
138
139
140
  static const struct {
  	int		errno;
  	const char	*name;
  } blk_errors[] = {
  	[BLK_STS_OK]		= { 0,		"" },
  	[BLK_STS_NOTSUPP]	= { -EOPNOTSUPP, "operation not supported" },
  	[BLK_STS_TIMEOUT]	= { -ETIMEDOUT,	"timeout" },
  	[BLK_STS_NOSPC]		= { -ENOSPC,	"critical space allocation" },
  	[BLK_STS_TRANSPORT]	= { -ENOLINK,	"recoverable transport" },
  	[BLK_STS_TARGET]	= { -EREMOTEIO,	"critical target" },
  	[BLK_STS_NEXUS]		= { -EBADE,	"critical nexus" },
  	[BLK_STS_MEDIUM]	= { -ENODATA,	"critical medium" },
  	[BLK_STS_PROTECTION]	= { -EILSEQ,	"protection" },
  	[BLK_STS_RESOURCE]	= { -ENOMEM,	"kernel resource" },
03a07c92a   Goldwyn Rodrigues   block: return on ...
141
  	[BLK_STS_AGAIN]		= { -EAGAIN,	"nonblocking retry" },
2a842acab   Christoph Hellwig   block: introduce ...
142

4e4cbee93   Christoph Hellwig   block: switch bio...
143
144
  	/* device mapper special case, should not leak out: */
  	[BLK_STS_DM_REQUEUE]	= { -EREMCHG, "dm internal retry" },
2a842acab   Christoph Hellwig   block: introduce ...
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
  	/* everything else not covered above: */
  	[BLK_STS_IOERR]		= { -EIO,	"I/O" },
  };
  
  blk_status_t errno_to_blk_status(int errno)
  {
  	int i;
  
  	for (i = 0; i < ARRAY_SIZE(blk_errors); i++) {
  		if (blk_errors[i].errno == errno)
  			return (__force blk_status_t)i;
  	}
  
  	return BLK_STS_IOERR;
  }
  EXPORT_SYMBOL_GPL(errno_to_blk_status);
  
  int blk_status_to_errno(blk_status_t status)
  {
  	int idx = (__force int)status;
34bd9c1c4   Bart Van Assche   block: Fix off-by...
165
  	if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
2a842acab   Christoph Hellwig   block: introduce ...
166
167
168
169
170
171
172
173
  		return -EIO;
  	return blk_errors[idx].errno;
  }
  EXPORT_SYMBOL_GPL(blk_status_to_errno);
  
  static void print_req_error(struct request *req, blk_status_t status)
  {
  	int idx = (__force int)status;
34bd9c1c4   Bart Van Assche   block: Fix off-by...
174
  	if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
2a842acab   Christoph Hellwig   block: introduce ...
175
176
177
178
179
180
181
182
  		return;
  
  	printk_ratelimited(KERN_ERR "%s: %s error, dev %s, sector %llu
  ",
  			   __func__, blk_errors[idx].name, req->rq_disk ?
  			   req->rq_disk->disk_name : "?",
  			   (unsigned long long)blk_rq_pos(req));
  }
5bb23a688   NeilBrown   Don't decrement b...
183
  static void req_bio_endio(struct request *rq, struct bio *bio,
2a842acab   Christoph Hellwig   block: introduce ...
184
  			  unsigned int nbytes, blk_status_t error)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
185
  {
78d8e58a0   Mike Snitzer   Revert "block, dm...
186
  	if (error)
4e4cbee93   Christoph Hellwig   block: switch bio...
187
  		bio->bi_status = error;
797e7dbbe   Tejun Heo   [BLOCK] reimpleme...
188

e80640213   Christoph Hellwig   block: split out ...
189
  	if (unlikely(rq->rq_flags & RQF_QUIET))
b7c44ed9d   Jens Axboe   block: manipulate...
190
  		bio_set_flag(bio, BIO_QUIET);
08bafc034   Keith Mannthey   block: Supress Bu...
191

f79ea4161   Kent Overstreet   block: Refactor b...
192
  	bio_advance(bio, nbytes);
7ba1ba12e   Martin K. Petersen   block: Block laye...
193

143a87f4c   Tejun Heo   block: improve fl...
194
  	/* don't actually finish bio if it's part of flush sequence */
e80640213   Christoph Hellwig   block: split out ...
195
  	if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
4246a0b63   Christoph Hellwig   block: add a bi_e...
196
  		bio_endio(bio);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
197
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
198

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
199
200
  void blk_dump_rq_flags(struct request *rq, char *msg)
  {
aebf526b5   Christoph Hellwig   block: fold cmd_t...
201
202
203
  	printk(KERN_INFO "%s: dev %s: flags=%llx
  ", msg,
  		rq->rq_disk ? rq->rq_disk->disk_name : "?",
5953316db   Jens Axboe   block: make rq->c...
204
  		(unsigned long long) rq->cmd_flags);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
205

83096ebf1   Tejun Heo   block: convert to...
206
207
208
209
  	printk(KERN_INFO "  sector %llu, nr/cnr %u/%u
  ",
  	       (unsigned long long)blk_rq_pos(rq),
  	       blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
b4f42e283   Jens Axboe   block: remove str...
210
211
212
  	printk(KERN_INFO "  bio %p, biotail %p, len %u
  ",
  	       rq->bio, rq->biotail, blk_rq_bytes(rq));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
213
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
214
  EXPORT_SYMBOL(blk_dump_rq_flags);
3cca6dc1c   Jens Axboe   block: add API fo...
215
  static void blk_delay_work(struct work_struct *work)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
216
  {
3cca6dc1c   Jens Axboe   block: add API fo...
217
  	struct request_queue *q;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
218

3cca6dc1c   Jens Axboe   block: add API fo...
219
220
  	q = container_of(work, struct request_queue, delay_work.work);
  	spin_lock_irq(q->queue_lock);
24ecfbe27   Christoph Hellwig   block: add blk_ru...
221
  	__blk_run_queue(q);
3cca6dc1c   Jens Axboe   block: add API fo...
222
  	spin_unlock_irq(q->queue_lock);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
223
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
224
225
  
  /**
3cca6dc1c   Jens Axboe   block: add API fo...
226
227
228
   * blk_delay_queue - restart queueing after defined interval
   * @q:		The &struct request_queue in question
   * @msecs:	Delay in msecs
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
229
230
   *
   * Description:
3cca6dc1c   Jens Axboe   block: add API fo...
231
232
   *   Sometimes queueing needs to be postponed for a little while, to allow
   *   resources to come back. This function will make sure that queueing is
2fff8a924   Bart Van Assche   block: Check lock...
233
   *   restarted around the specified time.
3cca6dc1c   Jens Axboe   block: add API fo...
234
235
   */
  void blk_delay_queue(struct request_queue *q, unsigned long msecs)
2ad8b1ef1   Alan D. Brunelle   Add UNPLUG traces...
236
  {
2fff8a924   Bart Van Assche   block: Check lock...
237
  	lockdep_assert_held(q->queue_lock);
332ebbf7f   Bart Van Assche   block: Document w...
238
  	WARN_ON_ONCE(q->mq_ops);
2fff8a924   Bart Van Assche   block: Check lock...
239

704605711   Bart Van Assche   block: Avoid sche...
240
241
242
  	if (likely(!blk_queue_dead(q)))
  		queue_delayed_work(kblockd_workqueue, &q->delay_work,
  				   msecs_to_jiffies(msecs));
2ad8b1ef1   Alan D. Brunelle   Add UNPLUG traces...
243
  }
3cca6dc1c   Jens Axboe   block: add API fo...
244
  EXPORT_SYMBOL(blk_delay_queue);
2ad8b1ef1   Alan D. Brunelle   Add UNPLUG traces...
245

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
246
  /**
21491412f   Jens Axboe   block: add blk_st...
247
248
249
250
251
252
253
254
255
256
   * blk_start_queue_async - asynchronously restart a previously stopped queue
   * @q:    The &struct request_queue in question
   *
   * Description:
   *   blk_start_queue_async() will clear the stop flag on the queue, and
   *   ensure that the request_fn for the queue is run from an async
   *   context.
   **/
  void blk_start_queue_async(struct request_queue *q)
  {
2fff8a924   Bart Van Assche   block: Check lock...
257
  	lockdep_assert_held(q->queue_lock);
332ebbf7f   Bart Van Assche   block: Document w...
258
  	WARN_ON_ONCE(q->mq_ops);
2fff8a924   Bart Van Assche   block: Check lock...
259

21491412f   Jens Axboe   block: add blk_st...
260
261
262
263
264
265
  	queue_flag_clear(QUEUE_FLAG_STOPPED, q);
  	blk_run_queue_async(q);
  }
  EXPORT_SYMBOL(blk_start_queue_async);
  
  /**
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
266
   * blk_start_queue - restart a previously stopped queue
165125e1e   Jens Axboe   [BLOCK] Get rid o...
267
   * @q:    The &struct request_queue in question
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
268
269
270
271
   *
   * Description:
   *   blk_start_queue() will clear the stop flag on the queue, and call
   *   the request_fn for the queue if it was in a stopped state when
2fff8a924   Bart Van Assche   block: Check lock...
272
   *   entered. Also see blk_stop_queue().
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
273
   **/
165125e1e   Jens Axboe   [BLOCK] Get rid o...
274
  void blk_start_queue(struct request_queue *q)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
275
  {
2fff8a924   Bart Van Assche   block: Check lock...
276
  	lockdep_assert_held(q->queue_lock);
4ddd56b00   Bart Van Assche   block: Relax a ch...
277
  	WARN_ON(!in_interrupt() && !irqs_disabled());
332ebbf7f   Bart Van Assche   block: Document w...
278
  	WARN_ON_ONCE(q->mq_ops);
a038e2536   Paolo 'Blaisorblade' Giarrusso   [PATCH] blk_start...
279

75ad23bc0   Nick Piggin   block: make queue...
280
  	queue_flag_clear(QUEUE_FLAG_STOPPED, q);
24ecfbe27   Christoph Hellwig   block: add blk_ru...
281
  	__blk_run_queue(q);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
282
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
283
284
285
286
  EXPORT_SYMBOL(blk_start_queue);
  
  /**
   * blk_stop_queue - stop a queue
165125e1e   Jens Axboe   [BLOCK] Get rid o...
287
   * @q:    The &struct request_queue in question
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
288
289
290
291
292
293
294
295
296
   *
   * Description:
   *   The Linux block layer assumes that a block driver will consume all
   *   entries on the request queue when the request_fn strategy is called.
   *   Often this will not happen, because of hardware limitations (queue
   *   depth settings). If a device driver gets a 'queue full' response,
   *   or if it simply chooses not to queue more I/O at one point, it can
   *   call this function to prevent the request_fn from being called until
   *   the driver has signalled it's ready to go again. This happens by calling
2fff8a924   Bart Van Assche   block: Check lock...
297
   *   blk_start_queue() to restart queue operations.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
298
   **/
165125e1e   Jens Axboe   [BLOCK] Get rid o...
299
  void blk_stop_queue(struct request_queue *q)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
300
  {
2fff8a924   Bart Van Assche   block: Check lock...
301
  	lockdep_assert_held(q->queue_lock);
332ebbf7f   Bart Van Assche   block: Document w...
302
  	WARN_ON_ONCE(q->mq_ops);
2fff8a924   Bart Van Assche   block: Check lock...
303

136b5721d   Tejun Heo   workqueue: deprec...
304
  	cancel_delayed_work(&q->delay_work);
75ad23bc0   Nick Piggin   block: make queue...
305
  	queue_flag_set(QUEUE_FLAG_STOPPED, q);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
306
307
308
309
310
311
312
313
314
315
316
317
  }
  EXPORT_SYMBOL(blk_stop_queue);
  
  /**
   * blk_sync_queue - cancel any pending callbacks on a queue
   * @q: the queue
   *
   * Description:
   *     The block layer may perform asynchronous callback activity
   *     on a queue, such as calling the unplug function after a timeout.
   *     A block device may call blk_sync_queue to ensure that any
   *     such activity is cancelled, thus allowing it to release resources
59c51591a   Michael Opdenacker   Fix occurrences o...
318
   *     that the callbacks might use. The caller must already have made sure
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
319
320
321
   *     that its ->make_request_fn will not re-add plugging prior to calling
   *     this function.
   *
da5277700   Vivek Goyal   block: Move blk_t...
322
   *     This function does not cancel any asynchronous activity arising
da3dae54e   Masanari Iida   Documentation: Do...
323
   *     out of elevator or throttling code. That would require elevator_exit()
5efd61135   Tejun Heo   blkcg: add blkcg_...
324
   *     and blkcg_exit_queue() to be called with queue lock initialized.
da5277700   Vivek Goyal   block: Move blk_t...
325
   *
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
326
327
328
   */
  void blk_sync_queue(struct request_queue *q)
  {
70ed28b92   Jens Axboe   block: leave the ...
329
  	del_timer_sync(&q->timeout);
77a38e88c   Bart Van Assche   block: Fix a race...
330
  	cancel_work_sync(&q->timeout_work);
f04c1fe76   Ming Lei   block: blk-mq: ma...
331
332
333
334
  
  	if (q->mq_ops) {
  		struct blk_mq_hw_ctx *hctx;
  		int i;
a4000d951   Bart Van Assche   blk-mq: Avoid tha...
335
  		cancel_delayed_work_sync(&q->requeue_work);
21c6e939a   Jens Axboe   blk-mq: unify hct...
336
  		queue_for_each_hw_ctx(q, hctx, i)
9f9937379   Jens Axboe   blk-mq: unify hct...
337
  			cancel_delayed_work_sync(&hctx->run_work);
f04c1fe76   Ming Lei   block: blk-mq: ma...
338
339
340
  	} else {
  		cancel_delayed_work_sync(&q->delay_work);
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
341
342
343
344
  }
  EXPORT_SYMBOL(blk_sync_queue);
  
  /**
c246e80d8   Bart Van Assche   block: Avoid that...
345
346
347
348
349
350
351
352
353
354
355
356
   * __blk_run_queue_uncond - run a queue whether or not it has been stopped
   * @q:	The queue to run
   *
   * Description:
   *    Invoke request handling on a queue if there are any pending requests.
   *    May be used to restart request handling after a request has completed.
   *    This variant runs the queue whether or not the queue has been
   *    stopped. Must be called with the queue lock held and interrupts
   *    disabled. See also @blk_run_queue.
   */
  inline void __blk_run_queue_uncond(struct request_queue *q)
  {
2fff8a924   Bart Van Assche   block: Check lock...
357
  	lockdep_assert_held(q->queue_lock);
332ebbf7f   Bart Van Assche   block: Document w...
358
  	WARN_ON_ONCE(q->mq_ops);
2fff8a924   Bart Van Assche   block: Check lock...
359

c246e80d8   Bart Van Assche   block: Avoid that...
360
361
  	if (unlikely(blk_queue_dead(q)))
  		return;
24faf6f60   Bart Van Assche   block: Make blk_c...
362
363
364
365
366
367
368
369
  	/*
  	 * Some request_fn implementations, e.g. scsi_request_fn(), unlock
  	 * the queue lock internally. As a result multiple threads may be
  	 * running such a request function concurrently. Keep track of the
  	 * number of active request_fn invocations such that blk_drain_queue()
  	 * can wait until all these request_fn calls have finished.
  	 */
  	q->request_fn_active++;
c246e80d8   Bart Van Assche   block: Avoid that...
370
  	q->request_fn(q);
24faf6f60   Bart Van Assche   block: Make blk_c...
371
  	q->request_fn_active--;
c246e80d8   Bart Van Assche   block: Avoid that...
372
  }
a7928c157   Christoph Hellwig   block: move PM re...
373
  EXPORT_SYMBOL_GPL(__blk_run_queue_uncond);
c246e80d8   Bart Van Assche   block: Avoid that...
374
375
  
  /**
80a4b58e3   Jens Axboe   block: only call ...
376
   * __blk_run_queue - run a single device queue
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
377
   * @q:	The queue to run
80a4b58e3   Jens Axboe   block: only call ...
378
379
   *
   * Description:
2fff8a924   Bart Van Assche   block: Check lock...
380
   *    See @blk_run_queue.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
381
   */
24ecfbe27   Christoph Hellwig   block: add blk_ru...
382
  void __blk_run_queue(struct request_queue *q)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
383
  {
2fff8a924   Bart Van Assche   block: Check lock...
384
  	lockdep_assert_held(q->queue_lock);
332ebbf7f   Bart Van Assche   block: Document w...
385
  	WARN_ON_ONCE(q->mq_ops);
2fff8a924   Bart Van Assche   block: Check lock...
386

a538cd03b   Tejun Heo   block: merge blk_...
387
388
  	if (unlikely(blk_queue_stopped(q)))
  		return;
c246e80d8   Bart Van Assche   block: Avoid that...
389
  	__blk_run_queue_uncond(q);
75ad23bc0   Nick Piggin   block: make queue...
390
391
  }
  EXPORT_SYMBOL(__blk_run_queue);
dac07ec12   Jens Axboe   [BLOCK] limit req...
392

75ad23bc0   Nick Piggin   block: make queue...
393
  /**
24ecfbe27   Christoph Hellwig   block: add blk_ru...
394
395
396
397
398
   * blk_run_queue_async - run a single device queue in workqueue context
   * @q:	The queue to run
   *
   * Description:
   *    Tells kblockd to perform the equivalent of @blk_run_queue on behalf
2fff8a924   Bart Van Assche   block: Check lock...
399
400
401
402
403
404
   *    of us.
   *
   * Note:
   *    Since it is not allowed to run q->delay_work after blk_cleanup_queue()
   *    has canceled q->delay_work, callers must hold the queue lock to avoid
   *    race conditions between blk_cleanup_queue() and blk_run_queue_async().
24ecfbe27   Christoph Hellwig   block: add blk_ru...
405
406
407
   */
  void blk_run_queue_async(struct request_queue *q)
  {
2fff8a924   Bart Van Assche   block: Check lock...
408
  	lockdep_assert_held(q->queue_lock);
332ebbf7f   Bart Van Assche   block: Document w...
409
  	WARN_ON_ONCE(q->mq_ops);
2fff8a924   Bart Van Assche   block: Check lock...
410

704605711   Bart Van Assche   block: Avoid sche...
411
  	if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q)))
e7c2f9674   Tejun Heo   workqueue: use mo...
412
  		mod_delayed_work(kblockd_workqueue, &q->delay_work, 0);
24ecfbe27   Christoph Hellwig   block: add blk_ru...
413
  }
c21e6beba   Jens Axboe   block: get rid of...
414
  EXPORT_SYMBOL(blk_run_queue_async);
24ecfbe27   Christoph Hellwig   block: add blk_ru...
415
416
  
  /**
75ad23bc0   Nick Piggin   block: make queue...
417
418
   * blk_run_queue - run a single device queue
   * @q: The queue to run
80a4b58e3   Jens Axboe   block: only call ...
419
420
421
   *
   * Description:
   *    Invoke request handling on this queue, if it has pending work to do.
a7f557923   Tejun Heo   block: kill blk_s...
422
   *    May be used to restart queueing when a request has completed.
75ad23bc0   Nick Piggin   block: make queue...
423
424
425
426
   */
  void blk_run_queue(struct request_queue *q)
  {
  	unsigned long flags;
332ebbf7f   Bart Van Assche   block: Document w...
427
  	WARN_ON_ONCE(q->mq_ops);
75ad23bc0   Nick Piggin   block: make queue...
428
  	spin_lock_irqsave(q->queue_lock, flags);
24ecfbe27   Christoph Hellwig   block: add blk_ru...
429
  	__blk_run_queue(q);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
430
431
432
  	spin_unlock_irqrestore(q->queue_lock, flags);
  }
  EXPORT_SYMBOL(blk_run_queue);
165125e1e   Jens Axboe   [BLOCK] Get rid o...
433
  void blk_put_queue(struct request_queue *q)
483f4afc4   Al Viro   [PATCH] fix sysfs...
434
435
436
  {
  	kobject_put(&q->kobj);
  }
d86e0e83b   Jens Axboe   block: export blk...
437
  EXPORT_SYMBOL(blk_put_queue);
483f4afc4   Al Viro   [PATCH] fix sysfs...
438

e3c78ca52   Tejun Heo   block: reorganize...
439
  /**
807592a4f   Bart Van Assche   block: Let blk_dr...
440
   * __blk_drain_queue - drain requests from request_queue
e3c78ca52   Tejun Heo   block: reorganize...
441
   * @q: queue to drain
c9a929dde   Tejun Heo   block: fix reques...
442
   * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV
e3c78ca52   Tejun Heo   block: reorganize...
443
   *
c9a929dde   Tejun Heo   block: fix reques...
444
445
446
   * Drain requests from @q.  If @drain_all is set, all requests are drained.
   * If not, only ELVPRIV requests are drained.  The caller is responsible
   * for ensuring that no new requests which need to be drained are queued.
e3c78ca52   Tejun Heo   block: reorganize...
447
   */
807592a4f   Bart Van Assche   block: Let blk_dr...
448
449
450
  static void __blk_drain_queue(struct request_queue *q, bool drain_all)
  	__releases(q->queue_lock)
  	__acquires(q->queue_lock)
e3c78ca52   Tejun Heo   block: reorganize...
451
  {
458f27a98   Asias He   block: Avoid miss...
452
  	int i;
807592a4f   Bart Van Assche   block: Let blk_dr...
453
  	lockdep_assert_held(q->queue_lock);
332ebbf7f   Bart Van Assche   block: Document w...
454
  	WARN_ON_ONCE(q->mq_ops);
807592a4f   Bart Van Assche   block: Let blk_dr...
455

e3c78ca52   Tejun Heo   block: reorganize...
456
  	while (true) {
481a7d647   Tejun Heo   block: fix drain_...
457
  		bool drain = false;
e3c78ca52   Tejun Heo   block: reorganize...
458

b855b04a0   Tejun Heo   block: blk-thrott...
459
460
461
462
463
464
  		/*
  		 * The caller might be trying to drain @q before its
  		 * elevator is initialized.
  		 */
  		if (q->elevator)
  			elv_drain_elevator(q);
5efd61135   Tejun Heo   blkcg: add blkcg_...
465
  		blkcg_drain_queue(q);
e3c78ca52   Tejun Heo   block: reorganize...
466

4eabc9412   Tejun Heo   block: don't kick...
467
468
  		/*
  		 * This function might be called on a queue which failed
b855b04a0   Tejun Heo   block: blk-thrott...
469
470
471
472
  		 * driver init after queue creation or is not yet fully
  		 * active yet.  Some drivers (e.g. fd and loop) get unhappy
  		 * in such cases.  Kick queue iff dispatch queue has
  		 * something on it and @q has request_fn set.
4eabc9412   Tejun Heo   block: don't kick...
473
  		 */
b855b04a0   Tejun Heo   block: blk-thrott...
474
  		if (!list_empty(&q->queue_head) && q->request_fn)
4eabc9412   Tejun Heo   block: don't kick...
475
  			__blk_run_queue(q);
c9a929dde   Tejun Heo   block: fix reques...
476

8a5ecdd42   Tejun Heo   block: add q->nr_...
477
  		drain |= q->nr_rqs_elvpriv;
24faf6f60   Bart Van Assche   block: Make blk_c...
478
  		drain |= q->request_fn_active;
481a7d647   Tejun Heo   block: fix drain_...
479
480
481
482
483
484
485
  
  		/*
  		 * Unfortunately, requests are queued at and tracked from
  		 * multiple places and there's no single counter which can
  		 * be drained.  Check all the queues and counters.
  		 */
  		if (drain_all) {
e97c293cd   Ming Lei   block: introduce ...
486
  			struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
481a7d647   Tejun Heo   block: fix drain_...
487
488
  			drain |= !list_empty(&q->queue_head);
  			for (i = 0; i < 2; i++) {
8a5ecdd42   Tejun Heo   block: add q->nr_...
489
  				drain |= q->nr_rqs[i];
481a7d647   Tejun Heo   block: fix drain_...
490
  				drain |= q->in_flight[i];
7c94e1c15   Ming Lei   block: introduce ...
491
492
  				if (fq)
  				    drain |= !list_empty(&fq->flush_queue[i]);
481a7d647   Tejun Heo   block: fix drain_...
493
494
  			}
  		}
e3c78ca52   Tejun Heo   block: reorganize...
495

481a7d647   Tejun Heo   block: fix drain_...
496
  		if (!drain)
e3c78ca52   Tejun Heo   block: reorganize...
497
  			break;
807592a4f   Bart Van Assche   block: Let blk_dr...
498
499
  
  		spin_unlock_irq(q->queue_lock);
e3c78ca52   Tejun Heo   block: reorganize...
500
  		msleep(10);
807592a4f   Bart Van Assche   block: Let blk_dr...
501
502
  
  		spin_lock_irq(q->queue_lock);
e3c78ca52   Tejun Heo   block: reorganize...
503
  	}
458f27a98   Asias He   block: Avoid miss...
504
505
506
507
508
509
510
  
  	/*
  	 * With queue marked dead, any woken up waiter will fail the
  	 * allocation path, so the wakeup chaining is lost and we're
  	 * left with hung waiters. We need to wake up those waiters.
  	 */
  	if (q->request_fn) {
a051661ca   Tejun Heo   blkcg: implement ...
511
  		struct request_list *rl;
a051661ca   Tejun Heo   blkcg: implement ...
512
513
514
  		blk_queue_for_each_rl(rl, q)
  			for (i = 0; i < ARRAY_SIZE(rl->wait); i++)
  				wake_up_all(&rl->wait[i]);
458f27a98   Asias He   block: Avoid miss...
515
  	}
e3c78ca52   Tejun Heo   block: reorganize...
516
  }
7e3acce11   Ming Lei   block: drain queu...
517
518
519
520
521
522
  void blk_drain_queue(struct request_queue *q)
  {
  	spin_lock_irq(q->queue_lock);
  	__blk_drain_queue(q, true);
  	spin_unlock_irq(q->queue_lock);
  }
c9a929dde   Tejun Heo   block: fix reques...
523
  /**
d732580b4   Tejun Heo   block: implement ...
524
525
526
527
528
   * blk_queue_bypass_start - enter queue bypass mode
   * @q: queue of interest
   *
   * In bypass mode, only the dispatch FIFO queue of @q is used.  This
   * function makes @q enter bypass mode and drains all requests which were
6ecf23afa   Tejun Heo   block: extend que...
529
   * throttled or issued before.  On return, it's guaranteed that no request
80fd99792   Tejun Heo   blkcg: make sure ...
530
531
   * is being throttled or has ELVPRIV set and blk_queue_bypass() %true
   * inside queue or RCU read lock.
d732580b4   Tejun Heo   block: implement ...
532
533
534
   */
  void blk_queue_bypass_start(struct request_queue *q)
  {
332ebbf7f   Bart Van Assche   block: Document w...
535
  	WARN_ON_ONCE(q->mq_ops);
d732580b4   Tejun Heo   block: implement ...
536
  	spin_lock_irq(q->queue_lock);
776687bce   Tejun Heo   block, blk-mq: dr...
537
  	q->bypass_depth++;
d732580b4   Tejun Heo   block: implement ...
538
539
  	queue_flag_set(QUEUE_FLAG_BYPASS, q);
  	spin_unlock_irq(q->queue_lock);
776687bce   Tejun Heo   block, blk-mq: dr...
540
541
542
543
544
545
  	/*
  	 * Queues start drained.  Skip actual draining till init is
  	 * complete.  This avoids lenghty delays during queue init which
  	 * can happen many times during boot.
  	 */
  	if (blk_queue_init_done(q)) {
807592a4f   Bart Van Assche   block: Let blk_dr...
546
547
548
  		spin_lock_irq(q->queue_lock);
  		__blk_drain_queue(q, false);
  		spin_unlock_irq(q->queue_lock);
b82d4b197   Tejun Heo   blkcg: make reque...
549
550
551
  		/* ensure blk_queue_bypass() is %true inside RCU read lock */
  		synchronize_rcu();
  	}
d732580b4   Tejun Heo   block: implement ...
552
553
554
555
556
557
558
559
  }
  EXPORT_SYMBOL_GPL(blk_queue_bypass_start);
  
  /**
   * blk_queue_bypass_end - leave queue bypass mode
   * @q: queue of interest
   *
   * Leave bypass mode and restore the normal queueing behavior.
332ebbf7f   Bart Van Assche   block: Document w...
560
561
562
   *
   * Note: although blk_queue_bypass_start() is only called for blk-sq queues,
   * this function is called for both blk-sq and blk-mq queues.
d732580b4   Tejun Heo   block: implement ...
563
564
565
566
567
568
569
570
571
572
   */
  void blk_queue_bypass_end(struct request_queue *q)
  {
  	spin_lock_irq(q->queue_lock);
  	if (!--q->bypass_depth)
  		queue_flag_clear(QUEUE_FLAG_BYPASS, q);
  	WARN_ON_ONCE(q->bypass_depth < 0);
  	spin_unlock_irq(q->queue_lock);
  }
  EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
aed3ea94b   Jens Axboe   block: wake up wa...
573
574
  void blk_set_queue_dying(struct request_queue *q)
  {
1b8560868   Bart Van Assche   block: Fix race t...
575
576
577
  	spin_lock_irq(q->queue_lock);
  	queue_flag_set(QUEUE_FLAG_DYING, q);
  	spin_unlock_irq(q->queue_lock);
aed3ea94b   Jens Axboe   block: wake up wa...
578

d3cfb2a0a   Ming Lei   block: block new ...
579
580
581
582
583
584
  	/*
  	 * When queue DYING flag is set, we need to block new req
  	 * entering queue, so we call blk_freeze_queue_start() to
  	 * prevent I/O from crossing blk_queue_enter().
  	 */
  	blk_freeze_queue_start(q);
aed3ea94b   Jens Axboe   block: wake up wa...
585
586
587
588
  	if (q->mq_ops)
  		blk_mq_wake_waiters(q);
  	else {
  		struct request_list *rl;
bbfc3c5d6   Tahsin Erdogan   block: queue lock...
589
  		spin_lock_irq(q->queue_lock);
aed3ea94b   Jens Axboe   block: wake up wa...
590
591
  		blk_queue_for_each_rl(rl, q) {
  			if (rl->rq_pool) {
60bed713a   Ming Lei   block: wake up al...
592
593
  				wake_up_all(&rl->wait[BLK_RW_SYNC]);
  				wake_up_all(&rl->wait[BLK_RW_ASYNC]);
aed3ea94b   Jens Axboe   block: wake up wa...
594
595
  			}
  		}
bbfc3c5d6   Tahsin Erdogan   block: queue lock...
596
  		spin_unlock_irq(q->queue_lock);
aed3ea94b   Jens Axboe   block: wake up wa...
597
598
599
  	}
  }
  EXPORT_SYMBOL_GPL(blk_set_queue_dying);
d732580b4   Tejun Heo   block: implement ...
600
  /**
c9a929dde   Tejun Heo   block: fix reques...
601
602
603
   * blk_cleanup_queue - shutdown a request queue
   * @q: request queue to shutdown
   *
c246e80d8   Bart Van Assche   block: Avoid that...
604
605
   * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and
   * put it.  All future requests will be failed immediately with -ENODEV.
c94a96ac9   Vivek Goyal   block: Initialize...
606
   */
6728cb0e6   Jens Axboe   block: make core ...
607
  void blk_cleanup_queue(struct request_queue *q)
483f4afc4   Al Viro   [PATCH] fix sysfs...
608
  {
c9a929dde   Tejun Heo   block: fix reques...
609
  	spinlock_t *lock = q->queue_lock;
e3335de94   Jens Axboe   block: blk_cleanu...
610

3f3299d5c   Bart Van Assche   block: Rename que...
611
  	/* mark @q DYING, no new request or merges will be allowed afterwards */
483f4afc4   Al Viro   [PATCH] fix sysfs...
612
  	mutex_lock(&q->sysfs_lock);
aed3ea94b   Jens Axboe   block: wake up wa...
613
  	blk_set_queue_dying(q);
c9a929dde   Tejun Heo   block: fix reques...
614
  	spin_lock_irq(lock);
6ecf23afa   Tejun Heo   block: extend que...
615

80fd99792   Tejun Heo   blkcg: make sure ...
616
  	/*
3f3299d5c   Bart Van Assche   block: Rename que...
617
  	 * A dying queue is permanently in bypass mode till released.  Note
80fd99792   Tejun Heo   blkcg: make sure ...
618
619
620
621
622
623
624
  	 * that, unlike blk_queue_bypass_start(), we aren't performing
  	 * synchronize_rcu() after entering bypass mode to avoid the delay
  	 * as some drivers create and destroy a lot of queues while
  	 * probing.  This is still safe because blk_release_queue() will be
  	 * called only after the queue refcnt drops to zero and nothing,
  	 * RCU or not, would be traversing the queue by then.
  	 */
6ecf23afa   Tejun Heo   block: extend que...
625
626
  	q->bypass_depth++;
  	queue_flag_set(QUEUE_FLAG_BYPASS, q);
c9a929dde   Tejun Heo   block: fix reques...
627
628
  	queue_flag_set(QUEUE_FLAG_NOMERGES, q);
  	queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
3f3299d5c   Bart Van Assche   block: Rename que...
629
  	queue_flag_set(QUEUE_FLAG_DYING, q);
c9a929dde   Tejun Heo   block: fix reques...
630
631
  	spin_unlock_irq(lock);
  	mutex_unlock(&q->sysfs_lock);
c246e80d8   Bart Van Assche   block: Avoid that...
632
633
634
635
  	/*
  	 * Drain all requests queued before DYING marking. Set DEAD flag to
  	 * prevent that q->request_fn() gets invoked after draining finished.
  	 */
3ef28e83a   Dan Williams   block: generic re...
636
  	blk_freeze_queue(q);
9c1051aac   Omar Sandoval   blk-mq: untangle ...
637
  	spin_lock_irq(lock);
c246e80d8   Bart Van Assche   block: Avoid that...
638
  	queue_flag_set(QUEUE_FLAG_DEAD, q);
807592a4f   Bart Van Assche   block: Let blk_dr...
639
  	spin_unlock_irq(lock);
c9a929dde   Tejun Heo   block: fix reques...
640

392640fd1   Ming Lei   blk-mq: quiesce q...
641
642
643
644
  	/*
  	 * make sure all in-progress dispatch are completed because
  	 * blk_freeze_queue() can only complete all requests, and
  	 * dispatch may still be in-progress since we dispatch requests
b520f00da   Ming Lei   blk-mq: avoid to ...
645
646
  	 * from more than one contexts.
  	 *
a06139578   Ming Lei   SCSI: fix queue c...
647
648
  	 * We rely on driver to deal with the race in case that queue
  	 * initialization isn't done.
392640fd1   Ming Lei   blk-mq: quiesce q...
649
  	 */
b520f00da   Ming Lei   blk-mq: avoid to ...
650
  	if (q->mq_ops && blk_queue_init_done(q))
392640fd1   Ming Lei   blk-mq: quiesce q...
651
  		blk_mq_quiesce_queue(q);
5a48fc147   Dan Williams   block: blk_flush_...
652
653
  	/* for synchronous bio-based driver finish in-flight integrity i/o */
  	blk_flush_integrity();
c9a929dde   Tejun Heo   block: fix reques...
654
  	/* @q won't process any more request, flush async actions */
dc3b17cc8   Jan Kara   block: Use pointe...
655
  	del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer);
c9a929dde   Tejun Heo   block: fix reques...
656
  	blk_sync_queue(q);
45a9c9d90   Bart Van Assche   blk-mq: Fix a use...
657
658
  	if (q->mq_ops)
  		blk_mq_free_queue(q);
3ef28e83a   Dan Williams   block: generic re...
659
  	percpu_ref_exit(&q->q_usage_counter);
45a9c9d90   Bart Van Assche   blk-mq: Fix a use...
660

5e5cfac0c   Asias He   block: Mitigate l...
661
662
663
664
  	spin_lock_irq(lock);
  	if (q->queue_lock != &q->__queue_lock)
  		q->queue_lock = &q->__queue_lock;
  	spin_unlock_irq(lock);
c9a929dde   Tejun Heo   block: fix reques...
665
  	/* @q is and will stay empty, shutdown and put */
483f4afc4   Al Viro   [PATCH] fix sysfs...
666
667
  	blk_put_queue(q);
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
668
  EXPORT_SYMBOL(blk_cleanup_queue);
271508dba   David Rientjes   block: allocate r...
669
  /* Allocate memory local to the request queue */
6d247d7f7   Christoph Hellwig   block: allow spec...
670
  static void *alloc_request_simple(gfp_t gfp_mask, void *data)
271508dba   David Rientjes   block: allocate r...
671
  {
6d247d7f7   Christoph Hellwig   block: allow spec...
672
673
674
  	struct request_queue *q = data;
  
  	return kmem_cache_alloc_node(request_cachep, gfp_mask, q->node);
271508dba   David Rientjes   block: allocate r...
675
  }
6d247d7f7   Christoph Hellwig   block: allow spec...
676
  static void free_request_simple(void *element, void *data)
271508dba   David Rientjes   block: allocate r...
677
678
679
  {
  	kmem_cache_free(request_cachep, element);
  }
6d247d7f7   Christoph Hellwig   block: allow spec...
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
  static void *alloc_request_size(gfp_t gfp_mask, void *data)
  {
  	struct request_queue *q = data;
  	struct request *rq;
  
  	rq = kmalloc_node(sizeof(struct request) + q->cmd_size, gfp_mask,
  			q->node);
  	if (rq && q->init_rq_fn && q->init_rq_fn(q, rq, gfp_mask) < 0) {
  		kfree(rq);
  		rq = NULL;
  	}
  	return rq;
  }
  
  static void free_request_size(void *element, void *data)
  {
  	struct request_queue *q = data;
  
  	if (q->exit_rq_fn)
  		q->exit_rq_fn(q, element);
  	kfree(element);
  }
5b788ce3e   Tejun Heo   block: prepare fo...
702
703
  int blk_init_rl(struct request_list *rl, struct request_queue *q,
  		gfp_t gfp_mask)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
704
  {
1abec4fdb   Mike Snitzer   block: make blk_i...
705
706
  	if (unlikely(rl->rq_pool))
  		return 0;
5b788ce3e   Tejun Heo   block: prepare fo...
707
  	rl->q = q;
1faa16d22   Jens Axboe   block: change the...
708
709
  	rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0;
  	rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0;
1faa16d22   Jens Axboe   block: change the...
710
711
  	init_waitqueue_head(&rl->wait[BLK_RW_SYNC]);
  	init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
712

6d247d7f7   Christoph Hellwig   block: allow spec...
713
714
715
716
717
718
719
720
721
  	if (q->cmd_size) {
  		rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ,
  				alloc_request_size, free_request_size,
  				q, gfp_mask, q->node);
  	} else {
  		rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ,
  				alloc_request_simple, free_request_simple,
  				q, gfp_mask, q->node);
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
722
723
  	if (!rl->rq_pool)
  		return -ENOMEM;
b425e5049   Bart Van Assche   block: Avoid that...
724
725
  	if (rl != &q->root_rl)
  		WARN_ON_ONCE(!blk_get_queue(q));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
726
727
  	return 0;
  }
b425e5049   Bart Van Assche   block: Avoid that...
728
  void blk_exit_rl(struct request_queue *q, struct request_list *rl)
5b788ce3e   Tejun Heo   block: prepare fo...
729
  {
b425e5049   Bart Van Assche   block: Avoid that...
730
  	if (rl->rq_pool) {
5b788ce3e   Tejun Heo   block: prepare fo...
731
  		mempool_destroy(rl->rq_pool);
b425e5049   Bart Van Assche   block: Avoid that...
732
733
734
  		if (rl != &q->root_rl)
  			blk_put_queue(q);
  	}
5b788ce3e   Tejun Heo   block: prepare fo...
735
  }
165125e1e   Jens Axboe   [BLOCK] Get rid o...
736
  struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
737
  {
c304a51bf   Ezequiel Garcia   block: use NUMA_N...
738
  	return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE);
1946089a1   Christoph Lameter   [PATCH] NUMA awar...
739
740
  }
  EXPORT_SYMBOL(blk_alloc_queue);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
741

6f3b0e8bc   Christoph Hellwig   blk-mq: add a fla...
742
  int blk_queue_enter(struct request_queue *q, bool nowait)
3ef28e83a   Dan Williams   block: generic re...
743
744
  {
  	while (true) {
3ef28e83a   Dan Williams   block: generic re...
745
746
747
  
  		if (percpu_ref_tryget_live(&q->q_usage_counter))
  			return 0;
6f3b0e8bc   Christoph Hellwig   blk-mq: add a fla...
748
  		if (nowait)
3ef28e83a   Dan Williams   block: generic re...
749
  			return -EBUSY;
5ed61d3f0   Ming Lei   block: add a read...
750
  		/*
1671d522c   Ming Lei   block: rename blk...
751
  		 * read pair of barrier in blk_freeze_queue_start(),
5ed61d3f0   Ming Lei   block: add a read...
752
  		 * we need to order reading __PERCPU_REF_DEAD flag of
d3cfb2a0a   Ming Lei   block: block new ...
753
754
755
  		 * .q_usage_counter and reading .mq_freeze_depth or
  		 * queue dying flag, otherwise the following wait may
  		 * never return if the two reads are reordered.
5ed61d3f0   Ming Lei   block: add a read...
756
757
  		 */
  		smp_rmb();
aa6be3967   Alan Jenkins   block: do not use...
758
759
760
  		wait_event(q->mq_freeze_wq,
  			   !atomic_read(&q->mq_freeze_depth) ||
  			   blk_queue_dying(q));
3ef28e83a   Dan Williams   block: generic re...
761
762
  		if (blk_queue_dying(q))
  			return -ENODEV;
3ef28e83a   Dan Williams   block: generic re...
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
  	}
  }
  
  void blk_queue_exit(struct request_queue *q)
  {
  	percpu_ref_put(&q->q_usage_counter);
  }
  
  static void blk_queue_usage_counter_release(struct percpu_ref *ref)
  {
  	struct request_queue *q =
  		container_of(ref, struct request_queue, q_usage_counter);
  
  	wake_up_all(&q->mq_freeze_wq);
  }
287922eb0   Christoph Hellwig   block: defer time...
778
779
780
781
782
783
  static void blk_rq_timed_out_timer(unsigned long data)
  {
  	struct request_queue *q = (struct request_queue *)data;
  
  	kblockd_schedule_work(&q->timeout_work);
  }
165125e1e   Jens Axboe   [BLOCK] Get rid o...
784
  struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
1946089a1   Christoph Lameter   [PATCH] NUMA awar...
785
  {
165125e1e   Jens Axboe   [BLOCK] Get rid o...
786
  	struct request_queue *q;
1946089a1   Christoph Lameter   [PATCH] NUMA awar...
787

8324aa91d   Jens Axboe   block: split tag ...
788
  	q = kmem_cache_alloc_node(blk_requestq_cachep,
94f6030ca   Christoph Lameter   Slab allocators: ...
789
  				gfp_mask | __GFP_ZERO, node_id);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
790
791
  	if (!q)
  		return NULL;
00380a404   Dan Carpenter   block: blk_alloc_...
792
  	q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask);
a73f730d0   Tejun Heo   block, cfq: move ...
793
  	if (q->id < 0)
3d2936f45   Ming Lei   block: only alloc...
794
  		goto fail_q;
a73f730d0   Tejun Heo   block, cfq: move ...
795

93b27e729   NeilBrown   blk: use non-resc...
796
  	q->bio_split = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
54efd50bf   Kent Overstreet   block: make gener...
797
798
  	if (!q->bio_split)
  		goto fail_id;
d03f6cdc1   Jan Kara   block: Dynamicall...
799
800
801
  	q->backing_dev_info = bdi_alloc_node(gfp_mask, node_id);
  	if (!q->backing_dev_info)
  		goto fail_split;
a83b576c9   Jens Axboe   block: fix stacke...
802
803
804
  	q->stats = blk_alloc_queue_stats();
  	if (!q->stats)
  		goto fail_stats;
dc3b17cc8   Jan Kara   block: Use pointe...
805
  	q->backing_dev_info->ra_pages =
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
806
  			(VM_MAX_READAHEAD * 1024) / PAGE_SIZE;
dc3b17cc8   Jan Kara   block: Use pointe...
807
808
  	q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK;
  	q->backing_dev_info->name = "block";
5151412dd   Mike Snitzer   block: initialize...
809
  	q->node = node_id;
0989a025d   Jens Axboe   block: don't over...
810

dc3b17cc8   Jan Kara   block: Use pointe...
811
  	setup_timer(&q->backing_dev_info->laptop_mode_wb_timer,
31373d09d   Matthew Garrett   laptop-mode: Make...
812
  		    laptop_mode_timer_fn, (unsigned long) q);
242f9dcb8   Jens Axboe   block: unify requ...
813
  	setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
77a38e88c   Bart Van Assche   block: Fix a race...
814
  	INIT_WORK(&q->timeout_work, NULL);
b855b04a0   Tejun Heo   block: blk-thrott...
815
  	INIT_LIST_HEAD(&q->queue_head);
242f9dcb8   Jens Axboe   block: unify requ...
816
  	INIT_LIST_HEAD(&q->timeout_list);
a612fddf0   Tejun Heo   block, cfq: move ...
817
  	INIT_LIST_HEAD(&q->icq_list);
4eef30499   Tejun Heo   blkcg: move per-q...
818
  #ifdef CONFIG_BLK_CGROUP
e8989fae3   Tejun Heo   blkcg: unify blkg...
819
  	INIT_LIST_HEAD(&q->blkg_list);
4eef30499   Tejun Heo   blkcg: move per-q...
820
  #endif
3cca6dc1c   Jens Axboe   block: add API fo...
821
  	INIT_DELAYED_WORK(&q->delay_work, blk_delay_work);
483f4afc4   Al Viro   [PATCH] fix sysfs...
822

8324aa91d   Jens Axboe   block: split tag ...
823
  	kobject_init(&q->kobj, &blk_queue_ktype);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
824

5acb3cc2c   Waiman Long   blktrace: Fix pot...
825
826
827
  #ifdef CONFIG_BLK_DEV_IO_TRACE
  	mutex_init(&q->blk_trace_mutex);
  #endif
483f4afc4   Al Viro   [PATCH] fix sysfs...
828
  	mutex_init(&q->sysfs_lock);
e7e72bf64   Neil Brown   Remove blkdev war...
829
  	spin_lock_init(&q->__queue_lock);
483f4afc4   Al Viro   [PATCH] fix sysfs...
830

c94a96ac9   Vivek Goyal   block: Initialize...
831
832
833
834
835
  	/*
  	 * By default initialize queue_lock to internal lock and driver can
  	 * override it later if need be.
  	 */
  	q->queue_lock = &q->__queue_lock;
b82d4b197   Tejun Heo   blkcg: make reque...
836
837
838
  	/*
  	 * A queue starts its life with bypass turned on to avoid
  	 * unnecessary bypass on/off overhead and nasty surprises during
749fefe67   Tejun Heo   block: lift the i...
839
840
  	 * init.  The initial bypass will be finished when the queue is
  	 * registered by blk_register_queue().
b82d4b197   Tejun Heo   blkcg: make reque...
841
842
843
  	 */
  	q->bypass_depth = 1;
  	__set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
320ae51fe   Jens Axboe   blk-mq: new multi...
844
  	init_waitqueue_head(&q->mq_freeze_wq);
3ef28e83a   Dan Williams   block: generic re...
845
846
847
848
849
850
851
  	/*
  	 * Init percpu_ref in atomic mode so that it's faster to shutdown.
  	 * See blk_register_queue() for details.
  	 */
  	if (percpu_ref_init(&q->q_usage_counter,
  				blk_queue_usage_counter_release,
  				PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
fff4996b7   Mikulas Patocka   blk-core: Fix mem...
852
  		goto fail_bdi;
f51b802c1   Tejun Heo   blkcg: use the us...
853

3ef28e83a   Dan Williams   block: generic re...
854
855
  	if (blkcg_init_queue(q))
  		goto fail_ref;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
856
  	return q;
a73f730d0   Tejun Heo   block, cfq: move ...
857

3ef28e83a   Dan Williams   block: generic re...
858
859
  fail_ref:
  	percpu_ref_exit(&q->q_usage_counter);
fff4996b7   Mikulas Patocka   blk-core: Fix mem...
860
  fail_bdi:
a83b576c9   Jens Axboe   block: fix stacke...
861
862
  	blk_free_queue_stats(q->stats);
  fail_stats:
d03f6cdc1   Jan Kara   block: Dynamicall...
863
  	bdi_put(q->backing_dev_info);
54efd50bf   Kent Overstreet   block: make gener...
864
865
  fail_split:
  	bioset_free(q->bio_split);
a73f730d0   Tejun Heo   block, cfq: move ...
866
867
868
869
870
  fail_id:
  	ida_simple_remove(&blk_queue_ida, q->id);
  fail_q:
  	kmem_cache_free(blk_requestq_cachep, q);
  	return NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
871
  }
1946089a1   Christoph Lameter   [PATCH] NUMA awar...
872
  EXPORT_SYMBOL(blk_alloc_queue_node);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
  
  /**
   * blk_init_queue  - prepare a request queue for use with a block device
   * @rfn:  The function to be called to process requests that have been
   *        placed on the queue.
   * @lock: Request queue spin lock
   *
   * Description:
   *    If a block device wishes to use the standard request handling procedures,
   *    which sorts requests and coalesces adjacent requests, then it must
   *    call blk_init_queue().  The function @rfn will be called when there
   *    are requests on the queue that need to be processed.  If the device
   *    supports plugging, then @rfn may not be called immediately when requests
   *    are available on the queue, but may be called at some time later instead.
   *    Plugged queues are generally unplugged when a buffer belonging to one
   *    of the requests on the queue is needed, or due to memory pressure.
   *
   *    @rfn is not required, or even expected, to remove all requests off the
   *    queue, but only as many as it can handle at a time.  If it does leave
   *    requests on the queue, it is responsible for arranging that the requests
   *    get dealt with eventually.
   *
   *    The queue spin lock must be held while manipulating the requests on the
a038e2536   Paolo 'Blaisorblade' Giarrusso   [PATCH] blk_start...
896
897
   *    request queue; this lock will be taken also from interrupt context, so irq
   *    disabling is needed for it.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
898
   *
710027a48   Randy Dunlap   Add some block/ s...
899
   *    Function returns a pointer to the initialized request queue, or %NULL if
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
900
901
902
903
904
905
   *    it didn't succeed.
   *
   * Note:
   *    blk_init_queue() must be paired with a blk_cleanup_queue() call
   *    when the block device is deactivated (such as at module unload).
   **/
1946089a1   Christoph Lameter   [PATCH] NUMA awar...
906

165125e1e   Jens Axboe   [BLOCK] Get rid o...
907
  struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
908
  {
c304a51bf   Ezequiel Garcia   block: use NUMA_N...
909
  	return blk_init_queue_node(rfn, lock, NUMA_NO_NODE);
1946089a1   Christoph Lameter   [PATCH] NUMA awar...
910
911
  }
  EXPORT_SYMBOL(blk_init_queue);
165125e1e   Jens Axboe   [BLOCK] Get rid o...
912
  struct request_queue *
1946089a1   Christoph Lameter   [PATCH] NUMA awar...
913
914
  blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
  {
5ea708d15   Christoph Hellwig   block: simplify b...
915
  	struct request_queue *q;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
916

5ea708d15   Christoph Hellwig   block: simplify b...
917
918
  	q = blk_alloc_queue_node(GFP_KERNEL, node_id);
  	if (!q)
c86d1b8ae   Mike Snitzer   block: avoid unco...
919
  		return NULL;
5ea708d15   Christoph Hellwig   block: simplify b...
920
921
922
923
924
925
926
  	q->request_fn = rfn;
  	if (lock)
  		q->queue_lock = lock;
  	if (blk_init_allocated_queue(q) < 0) {
  		blk_cleanup_queue(q);
  		return NULL;
  	}
18741986a   Christoph Hellwig   blk-mq: rework fl...
927

7982e90c3   Mike Snitzer   block: fix q->flu...
928
  	return q;
01effb0dc   Mike Snitzer   block: allow init...
929
930
  }
  EXPORT_SYMBOL(blk_init_queue_node);
dece16353   Jens Axboe   block: change ->m...
931
  static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio);
336b7e1f2   Mike Snitzer   block: remove exp...
932

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
933

5ea708d15   Christoph Hellwig   block: simplify b...
934
935
  int blk_init_allocated_queue(struct request_queue *q)
  {
332ebbf7f   Bart Van Assche   block: Document w...
936
  	WARN_ON_ONCE(q->mq_ops);
6d247d7f7   Christoph Hellwig   block: allow spec...
937
  	q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, q->cmd_size);
ba483388e   Ming Lei   block: remove blk...
938
  	if (!q->fq)
5ea708d15   Christoph Hellwig   block: simplify b...
939
  		return -ENOMEM;
7982e90c3   Mike Snitzer   block: fix q->flu...
940

6d247d7f7   Christoph Hellwig   block: allow spec...
941
942
  	if (q->init_rq_fn && q->init_rq_fn(q, q->fq->flush_rq, GFP_KERNEL))
  		goto out_free_flush_queue;
7982e90c3   Mike Snitzer   block: fix q->flu...
943

a051661ca   Tejun Heo   blkcg: implement ...
944
  	if (blk_init_rl(&q->root_rl, q, GFP_KERNEL))
6d247d7f7   Christoph Hellwig   block: allow spec...
945
  		goto out_exit_flush_rq;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
946

287922eb0   Christoph Hellwig   block: defer time...
947
  	INIT_WORK(&q->timeout_work, blk_timeout_work);
60ea8226c   Tejun Heo   block: fix reques...
948
  	q->queue_flags		|= QUEUE_FLAG_DEFAULT;
c94a96ac9   Vivek Goyal   block: Initialize...
949

f3b144aa7   Jens Axboe   block: remove var...
950
951
952
  	/*
  	 * This also sets hw/phys segments, boundary and size
  	 */
c20e8de27   Jens Axboe   block: rename __m...
953
  	blk_queue_make_request(q, blk_queue_bio);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
954

44ec95425   Alan Stern   [SCSI] sg: cap re...
955
  	q->sg_reserved_size = INT_MAX;
eb1c160b2   Tomoki Sekiyama   elevator: Fix a r...
956
957
  	/* Protect q->elevator from elevator_change */
  	mutex_lock(&q->sysfs_lock);
b82d4b197   Tejun Heo   blkcg: make reque...
958
  	/* init elevator */
eb1c160b2   Tomoki Sekiyama   elevator: Fix a r...
959
960
  	if (elevator_init(q, NULL)) {
  		mutex_unlock(&q->sysfs_lock);
6d247d7f7   Christoph Hellwig   block: allow spec...
961
  		goto out_exit_flush_rq;
eb1c160b2   Tomoki Sekiyama   elevator: Fix a r...
962
963
964
  	}
  
  	mutex_unlock(&q->sysfs_lock);
5ea708d15   Christoph Hellwig   block: simplify b...
965
  	return 0;
eb1c160b2   Tomoki Sekiyama   elevator: Fix a r...
966

6d247d7f7   Christoph Hellwig   block: allow spec...
967
968
969
970
  out_exit_flush_rq:
  	if (q->exit_rq_fn)
  		q->exit_rq_fn(q, q->fq->flush_rq);
  out_free_flush_queue:
ba483388e   Ming Lei   block: remove blk...
971
  	blk_free_flush_queue(q->fq);
0affbaece   xiao jin   block: blk_init_a...
972
  	q->fq = NULL;
5ea708d15   Christoph Hellwig   block: simplify b...
973
  	return -ENOMEM;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
974
  }
5151412dd   Mike Snitzer   block: initialize...
975
  EXPORT_SYMBOL(blk_init_allocated_queue);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
976

09ac46c42   Tejun Heo   block: misc updat...
977
  bool blk_get_queue(struct request_queue *q)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
978
  {
3f3299d5c   Bart Van Assche   block: Rename que...
979
  	if (likely(!blk_queue_dying(q))) {
09ac46c42   Tejun Heo   block: misc updat...
980
981
  		__blk_get_queue(q);
  		return true;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
982
  	}
09ac46c42   Tejun Heo   block: misc updat...
983
  	return false;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
984
  }
d86e0e83b   Jens Axboe   block: export blk...
985
  EXPORT_SYMBOL(blk_get_queue);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
986

5b788ce3e   Tejun Heo   block: prepare fo...
987
  static inline void blk_free_request(struct request_list *rl, struct request *rq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
988
  {
e80640213   Christoph Hellwig   block: split out ...
989
  	if (rq->rq_flags & RQF_ELVPRIV) {
5b788ce3e   Tejun Heo   block: prepare fo...
990
  		elv_put_request(rl->q, rq);
f1f8cc946   Tejun Heo   block, cfq: move ...
991
  		if (rq->elv.icq)
11a3122f6   Tejun Heo   block: strip out ...
992
  			put_io_context(rq->elv.icq->ioc);
f1f8cc946   Tejun Heo   block, cfq: move ...
993
  	}
5b788ce3e   Tejun Heo   block: prepare fo...
994
  	mempool_free(rq, rl->rq_pool);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
995
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
996
997
998
999
  /*
   * ioc_batching returns true if the ioc is a valid batching request and
   * should be given priority access to a request.
   */
165125e1e   Jens Axboe   [BLOCK] Get rid o...
1000
  static inline int ioc_batching(struct request_queue *q, struct io_context *ioc)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
  {
  	if (!ioc)
  		return 0;
  
  	/*
  	 * Make sure the process is able to allocate at least 1 request
  	 * even if the batch times out, otherwise we could theoretically
  	 * lose wakeups.
  	 */
  	return ioc->nr_batch_requests == q->nr_batching ||
  		(ioc->nr_batch_requests > 0
  		&& time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME));
  }
  
  /*
   * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This
   * will cause the process to be a "batcher" on all queues in the system. This
   * is the behaviour we want though - once it gets a wakeup it should be given
   * a nice run.
   */
165125e1e   Jens Axboe   [BLOCK] Get rid o...
1021
  static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1022
1023
1024
1025
1026
1027
1028
  {
  	if (!ioc || ioc_batching(q, ioc))
  		return;
  
  	ioc->nr_batch_requests = q->nr_batching;
  	ioc->last_waited = jiffies;
  }
5b788ce3e   Tejun Heo   block: prepare fo...
1029
  static void __freed_request(struct request_list *rl, int sync)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1030
  {
5b788ce3e   Tejun Heo   block: prepare fo...
1031
  	struct request_queue *q = rl->q;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1032

d40f75a06   Tejun Heo   writeback, blkcg:...
1033
1034
  	if (rl->count[sync] < queue_congestion_off_threshold(q))
  		blk_clear_congested(rl, sync);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1035

1faa16d22   Jens Axboe   block: change the...
1036
1037
1038
  	if (rl->count[sync] + 1 <= q->nr_requests) {
  		if (waitqueue_active(&rl->wait[sync]))
  			wake_up(&rl->wait[sync]);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1039

5b788ce3e   Tejun Heo   block: prepare fo...
1040
  		blk_clear_rl_full(rl, sync);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1041
1042
1043
1044
1045
1046
1047
  	}
  }
  
  /*
   * A request has just been released.  Account for it, update the full and
   * congestion status, wake up any waiters.   Called under q->queue_lock.
   */
e80640213   Christoph Hellwig   block: split out ...
1048
1049
  static void freed_request(struct request_list *rl, bool sync,
  		req_flags_t rq_flags)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1050
  {
5b788ce3e   Tejun Heo   block: prepare fo...
1051
  	struct request_queue *q = rl->q;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1052

8a5ecdd42   Tejun Heo   block: add q->nr_...
1053
  	q->nr_rqs[sync]--;
1faa16d22   Jens Axboe   block: change the...
1054
  	rl->count[sync]--;
e80640213   Christoph Hellwig   block: split out ...
1055
  	if (rq_flags & RQF_ELVPRIV)
8a5ecdd42   Tejun Heo   block: add q->nr_...
1056
  		q->nr_rqs_elvpriv--;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1057

5b788ce3e   Tejun Heo   block: prepare fo...
1058
  	__freed_request(rl, sync);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1059

1faa16d22   Jens Axboe   block: change the...
1060
  	if (unlikely(rl->starved[sync ^ 1]))
5b788ce3e   Tejun Heo   block: prepare fo...
1061
  		__freed_request(rl, sync ^ 1);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1062
  }
e3a2b3f93   Jens Axboe   blk-mq: allow cha...
1063
1064
1065
  int blk_update_nr_requests(struct request_queue *q, unsigned int nr)
  {
  	struct request_list *rl;
d40f75a06   Tejun Heo   writeback, blkcg:...
1066
  	int on_thresh, off_thresh;
e3a2b3f93   Jens Axboe   blk-mq: allow cha...
1067

332ebbf7f   Bart Van Assche   block: Document w...
1068
  	WARN_ON_ONCE(q->mq_ops);
e3a2b3f93   Jens Axboe   blk-mq: allow cha...
1069
1070
1071
  	spin_lock_irq(q->queue_lock);
  	q->nr_requests = nr;
  	blk_queue_congestion_threshold(q);
d40f75a06   Tejun Heo   writeback, blkcg:...
1072
1073
  	on_thresh = queue_congestion_on_threshold(q);
  	off_thresh = queue_congestion_off_threshold(q);
e3a2b3f93   Jens Axboe   blk-mq: allow cha...
1074

d40f75a06   Tejun Heo   writeback, blkcg:...
1075
1076
1077
1078
1079
  	blk_queue_for_each_rl(rl, q) {
  		if (rl->count[BLK_RW_SYNC] >= on_thresh)
  			blk_set_congested(rl, BLK_RW_SYNC);
  		else if (rl->count[BLK_RW_SYNC] < off_thresh)
  			blk_clear_congested(rl, BLK_RW_SYNC);
e3a2b3f93   Jens Axboe   blk-mq: allow cha...
1080

d40f75a06   Tejun Heo   writeback, blkcg:...
1081
1082
1083
1084
  		if (rl->count[BLK_RW_ASYNC] >= on_thresh)
  			blk_set_congested(rl, BLK_RW_ASYNC);
  		else if (rl->count[BLK_RW_ASYNC] < off_thresh)
  			blk_clear_congested(rl, BLK_RW_ASYNC);
e3a2b3f93   Jens Axboe   blk-mq: allow cha...
1085

e3a2b3f93   Jens Axboe   blk-mq: allow cha...
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
  		if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
  			blk_set_rl_full(rl, BLK_RW_SYNC);
  		} else {
  			blk_clear_rl_full(rl, BLK_RW_SYNC);
  			wake_up(&rl->wait[BLK_RW_SYNC]);
  		}
  
  		if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
  			blk_set_rl_full(rl, BLK_RW_ASYNC);
  		} else {
  			blk_clear_rl_full(rl, BLK_RW_ASYNC);
  			wake_up(&rl->wait[BLK_RW_ASYNC]);
  		}
  	}
  
  	spin_unlock_irq(q->queue_lock);
  	return 0;
  }
da8303c63   Tejun Heo   block: make get_r...
1104
  /**
a06e05e6a   Tejun Heo   block: refactor g...
1105
   * __get_request - get a free request
5b788ce3e   Tejun Heo   block: prepare fo...
1106
   * @rl: request list to allocate from
ef295ecf0   Christoph Hellwig   block: better op ...
1107
   * @op: operation and flags
da8303c63   Tejun Heo   block: make get_r...
1108
1109
1110
1111
1112
1113
   * @bio: bio to allocate request for (can be %NULL)
   * @gfp_mask: allocation mask
   *
   * Get a free request from @q.  This function may fail under memory
   * pressure or if @q is dead.
   *
da3dae54e   Masanari Iida   Documentation: Do...
1114
   * Must be called with @q->queue_lock held and,
a492f0754   Joe Lawrence   block,scsi: fixup...
1115
1116
   * Returns ERR_PTR on failure, with @q->queue_lock held.
   * Returns request pointer on success, with @q->queue_lock *not held*.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1117
   */
ef295ecf0   Christoph Hellwig   block: better op ...
1118
1119
  static struct request *__get_request(struct request_list *rl, unsigned int op,
  		struct bio *bio, gfp_t gfp_mask)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1120
  {
5b788ce3e   Tejun Heo   block: prepare fo...
1121
  	struct request_queue *q = rl->q;
b679281a6   Tejun Heo   block: restructur...
1122
  	struct request *rq;
7f4b35d15   Tejun Heo   block: allocate i...
1123
1124
  	struct elevator_type *et = q->elevator->type;
  	struct io_context *ioc = rq_ioc(bio);
f1f8cc946   Tejun Heo   block, cfq: move ...
1125
  	struct io_cq *icq = NULL;
ef295ecf0   Christoph Hellwig   block: better op ...
1126
  	const bool is_sync = op_is_sync(op);
75eb6c372   Tejun Heo   block: pass aroun...
1127
  	int may_queue;
e80640213   Christoph Hellwig   block: split out ...
1128
  	req_flags_t rq_flags = RQF_ALLOCED;
88ee5ef15   Jens Axboe   [BLOCK] ll_rw_blk...
1129

2fff8a924   Bart Van Assche   block: Check lock...
1130
  	lockdep_assert_held(q->queue_lock);
3f3299d5c   Bart Van Assche   block: Rename que...
1131
  	if (unlikely(blk_queue_dying(q)))
a492f0754   Joe Lawrence   block,scsi: fixup...
1132
  		return ERR_PTR(-ENODEV);
da8303c63   Tejun Heo   block: make get_r...
1133

ef295ecf0   Christoph Hellwig   block: better op ...
1134
  	may_queue = elv_may_queue(q, op);
88ee5ef15   Jens Axboe   [BLOCK] ll_rw_blk...
1135
1136
  	if (may_queue == ELV_MQUEUE_NO)
  		goto rq_starved;
1faa16d22   Jens Axboe   block: change the...
1137
1138
  	if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) {
  		if (rl->count[is_sync]+1 >= q->nr_requests) {
f2dbd76a0   Tejun Heo   block, cfq: repla...
1139
  			/*
88ee5ef15   Jens Axboe   [BLOCK] ll_rw_blk...
1140
1141
1142
1143
1144
  			 * The queue will fill after this allocation, so set
  			 * it as full, and mark this process as "batching".
  			 * This process will be allowed to complete a batch of
  			 * requests, others will be blocked.
  			 */
5b788ce3e   Tejun Heo   block: prepare fo...
1145
  			if (!blk_rl_full(rl, is_sync)) {
88ee5ef15   Jens Axboe   [BLOCK] ll_rw_blk...
1146
  				ioc_set_batching(q, ioc);
5b788ce3e   Tejun Heo   block: prepare fo...
1147
  				blk_set_rl_full(rl, is_sync);
88ee5ef15   Jens Axboe   [BLOCK] ll_rw_blk...
1148
1149
1150
1151
1152
1153
1154
1155
  			} else {
  				if (may_queue != ELV_MQUEUE_MUST
  						&& !ioc_batching(q, ioc)) {
  					/*
  					 * The queue is full and the allocating
  					 * process is not a "batcher", and not
  					 * exempted by the IO scheduler
  					 */
a492f0754   Joe Lawrence   block,scsi: fixup...
1156
  					return ERR_PTR(-ENOMEM);
88ee5ef15   Jens Axboe   [BLOCK] ll_rw_blk...
1157
1158
  				}
  			}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1159
  		}
d40f75a06   Tejun Heo   writeback, blkcg:...
1160
  		blk_set_congested(rl, is_sync);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1161
  	}
082cf69eb   Jens Axboe   [PATCH] ll_rw_blk...
1162
1163
1164
1165
1166
  	/*
  	 * Only allow batching queuers to allocate up to 50% over the defined
  	 * limit of requests, otherwise we could have thousands of requests
  	 * allocated with any setting of ->nr_requests
  	 */
1faa16d22   Jens Axboe   block: change the...
1167
  	if (rl->count[is_sync] >= (3 * q->nr_requests / 2))
a492f0754   Joe Lawrence   block,scsi: fixup...
1168
  		return ERR_PTR(-ENOMEM);
fd782a4a9   Hugh Dickins   [PATCH] Fix get_r...
1169

8a5ecdd42   Tejun Heo   block: add q->nr_...
1170
  	q->nr_rqs[is_sync]++;
1faa16d22   Jens Axboe   block: change the...
1171
1172
  	rl->count[is_sync]++;
  	rl->starved[is_sync] = 0;
cb98fc8bb   Tejun Heo   [BLOCK] Reimpleme...
1173

f1f8cc946   Tejun Heo   block, cfq: move ...
1174
1175
  	/*
  	 * Decide whether the new request will be managed by elevator.  If
e80640213   Christoph Hellwig   block: split out ...
1176
  	 * so, mark @rq_flags and increment elvpriv.  Non-zero elvpriv will
f1f8cc946   Tejun Heo   block, cfq: move ...
1177
1178
1179
1180
  	 * prevent the current elevator from being destroyed until the new
  	 * request is freed.  This guarantees icq's won't be destroyed and
  	 * makes creating new ones safe.
  	 *
e6f7f93d5   Christoph Hellwig   block: fix elevat...
1181
1182
1183
  	 * Flush requests do not use the elevator so skip initialization.
  	 * This allows a request to share the flush and elevator data.
  	 *
f1f8cc946   Tejun Heo   block, cfq: move ...
1184
1185
1186
  	 * Also, lookup icq while holding queue_lock.  If it doesn't exist,
  	 * it will be created after releasing queue_lock.
  	 */
e6f7f93d5   Christoph Hellwig   block: fix elevat...
1187
  	if (!op_is_flush(op) && !blk_queue_bypass(q)) {
e80640213   Christoph Hellwig   block: split out ...
1188
  		rq_flags |= RQF_ELVPRIV;
8a5ecdd42   Tejun Heo   block: add q->nr_...
1189
  		q->nr_rqs_elvpriv++;
f1f8cc946   Tejun Heo   block, cfq: move ...
1190
1191
  		if (et->icq_cache && ioc)
  			icq = ioc_lookup_icq(ioc, q);
9d5a4e946   Mike Snitzer   block: skip eleva...
1192
  	}
cb98fc8bb   Tejun Heo   [BLOCK] Reimpleme...
1193

f253b86b4   Jens Axboe   Revert "block: fi...
1194
  	if (blk_queue_io_stat(q))
e80640213   Christoph Hellwig   block: split out ...
1195
  		rq_flags |= RQF_IO_STAT;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1196
  	spin_unlock_irq(q->queue_lock);
29e2b09ab   Tejun Heo   block: collapse b...
1197
  	/* allocate and init request */
5b788ce3e   Tejun Heo   block: prepare fo...
1198
  	rq = mempool_alloc(rl->rq_pool, gfp_mask);
29e2b09ab   Tejun Heo   block: collapse b...
1199
  	if (!rq)
b679281a6   Tejun Heo   block: restructur...
1200
  		goto fail_alloc;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1201

29e2b09ab   Tejun Heo   block: collapse b...
1202
  	blk_rq_init(q, rq);
a051661ca   Tejun Heo   blkcg: implement ...
1203
  	blk_rq_set_rl(rq, rl);
ef295ecf0   Christoph Hellwig   block: better op ...
1204
  	rq->cmd_flags = op;
e80640213   Christoph Hellwig   block: split out ...
1205
  	rq->rq_flags = rq_flags;
29e2b09ab   Tejun Heo   block: collapse b...
1206

aaf7c6806   Tejun Heo   block: fix elvpri...
1207
  	/* init elvpriv */
e80640213   Christoph Hellwig   block: split out ...
1208
  	if (rq_flags & RQF_ELVPRIV) {
aaf7c6806   Tejun Heo   block: fix elvpri...
1209
  		if (unlikely(et->icq_cache && !icq)) {
7f4b35d15   Tejun Heo   block: allocate i...
1210
1211
  			if (ioc)
  				icq = ioc_create_icq(ioc, q, gfp_mask);
aaf7c6806   Tejun Heo   block: fix elvpri...
1212
1213
  			if (!icq)
  				goto fail_elvpriv;
29e2b09ab   Tejun Heo   block: collapse b...
1214
  		}
aaf7c6806   Tejun Heo   block: fix elvpri...
1215
1216
1217
1218
1219
1220
  
  		rq->elv.icq = icq;
  		if (unlikely(elv_set_request(q, rq, bio, gfp_mask)))
  			goto fail_elvpriv;
  
  		/* @rq->elv.icq holds io_context until @rq is freed */
29e2b09ab   Tejun Heo   block: collapse b...
1221
1222
1223
  		if (icq)
  			get_io_context(icq->ioc);
  	}
aaf7c6806   Tejun Heo   block: fix elvpri...
1224
  out:
88ee5ef15   Jens Axboe   [BLOCK] ll_rw_blk...
1225
1226
1227
1228
1229
1230
  	/*
  	 * ioc may be NULL here, and ioc_batching will be false. That's
  	 * OK, if the queue is under the request limit then requests need
  	 * not count toward the nr_batch_requests limit. There will always
  	 * be some limit enforced by BLK_BATCH_TIME.
  	 */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1231
1232
  	if (ioc_batching(q, ioc))
  		ioc->nr_batch_requests--;
6728cb0e6   Jens Axboe   block: make core ...
1233

e6a40b096   Mike Christie   block: prepare re...
1234
  	trace_block_getrq(q, bio, op);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1235
  	return rq;
b679281a6   Tejun Heo   block: restructur...
1236

aaf7c6806   Tejun Heo   block: fix elvpri...
1237
1238
1239
1240
1241
1242
1243
  fail_elvpriv:
  	/*
  	 * elvpriv init failed.  ioc, icq and elvpriv aren't mempool backed
  	 * and may fail indefinitely under memory pressure and thus
  	 * shouldn't stall IO.  Treat this request as !elvpriv.  This will
  	 * disturb iosched and blkcg but weird is bettern than dead.
  	 */
7b2b10e0e   Robert Elliott   block: include fu...
1244
1245
  	printk_ratelimited(KERN_WARNING "%s: dev %s: request aux data allocation failed, iosched may be disturbed
  ",
dc3b17cc8   Jan Kara   block: Use pointe...
1246
  			   __func__, dev_name(q->backing_dev_info->dev));
aaf7c6806   Tejun Heo   block: fix elvpri...
1247

e80640213   Christoph Hellwig   block: split out ...
1248
  	rq->rq_flags &= ~RQF_ELVPRIV;
aaf7c6806   Tejun Heo   block: fix elvpri...
1249
1250
1251
  	rq->elv.icq = NULL;
  
  	spin_lock_irq(q->queue_lock);
8a5ecdd42   Tejun Heo   block: add q->nr_...
1252
  	q->nr_rqs_elvpriv--;
aaf7c6806   Tejun Heo   block: fix elvpri...
1253
1254
  	spin_unlock_irq(q->queue_lock);
  	goto out;
b679281a6   Tejun Heo   block: restructur...
1255
1256
1257
1258
1259
1260
1261
1262
1263
  fail_alloc:
  	/*
  	 * Allocation failed presumably due to memory. Undo anything we
  	 * might have messed up.
  	 *
  	 * Allocating task should really be put onto the front of the wait
  	 * queue, but this is pretty rare.
  	 */
  	spin_lock_irq(q->queue_lock);
e80640213   Christoph Hellwig   block: split out ...
1264
  	freed_request(rl, is_sync, rq_flags);
b679281a6   Tejun Heo   block: restructur...
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
  
  	/*
  	 * in the very unlikely event that allocation failed and no
  	 * requests for this direction was pending, mark us starved so that
  	 * freeing of a request in the other direction will notice
  	 * us. another possible fix would be to split the rq mempool into
  	 * READ and WRITE
  	 */
  rq_starved:
  	if (unlikely(rl->count[is_sync] == 0))
  		rl->starved[is_sync] = 1;
a492f0754   Joe Lawrence   block,scsi: fixup...
1276
  	return ERR_PTR(-ENOMEM);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1277
  }
da8303c63   Tejun Heo   block: make get_r...
1278
  /**
a06e05e6a   Tejun Heo   block: refactor g...
1279
   * get_request - get a free request
da8303c63   Tejun Heo   block: make get_r...
1280
   * @q: request_queue to allocate request from
ef295ecf0   Christoph Hellwig   block: better op ...
1281
   * @op: operation and flags
da8303c63   Tejun Heo   block: make get_r...
1282
   * @bio: bio to allocate request for (can be %NULL)
a06e05e6a   Tejun Heo   block: refactor g...
1283
   * @gfp_mask: allocation mask
da8303c63   Tejun Heo   block: make get_r...
1284
   *
d0164adc8   Mel Gorman   mm, page_alloc: d...
1285
1286
   * Get a free request from @q.  If %__GFP_DIRECT_RECLAIM is set in @gfp_mask,
   * this function keeps retrying under memory pressure and fails iff @q is dead.
d6344532a   Nick Piggin   [PATCH] blk: redu...
1287
   *
da3dae54e   Masanari Iida   Documentation: Do...
1288
   * Must be called with @q->queue_lock held and,
a492f0754   Joe Lawrence   block,scsi: fixup...
1289
1290
   * Returns ERR_PTR on failure, with @q->queue_lock held.
   * Returns request pointer on success, with @q->queue_lock *not held*.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1291
   */
ef295ecf0   Christoph Hellwig   block: better op ...
1292
1293
  static struct request *get_request(struct request_queue *q, unsigned int op,
  		struct bio *bio, gfp_t gfp_mask)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1294
  {
ef295ecf0   Christoph Hellwig   block: better op ...
1295
  	const bool is_sync = op_is_sync(op);
a06e05e6a   Tejun Heo   block: refactor g...
1296
  	DEFINE_WAIT(wait);
a051661ca   Tejun Heo   blkcg: implement ...
1297
  	struct request_list *rl;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1298
  	struct request *rq;
a051661ca   Tejun Heo   blkcg: implement ...
1299

2fff8a924   Bart Van Assche   block: Check lock...
1300
  	lockdep_assert_held(q->queue_lock);
332ebbf7f   Bart Van Assche   block: Document w...
1301
  	WARN_ON_ONCE(q->mq_ops);
2fff8a924   Bart Van Assche   block: Check lock...
1302

a051661ca   Tejun Heo   blkcg: implement ...
1303
  	rl = blk_get_rl(q, bio);	/* transferred to @rq on success */
a06e05e6a   Tejun Heo   block: refactor g...
1304
  retry:
ef295ecf0   Christoph Hellwig   block: better op ...
1305
  	rq = __get_request(rl, op, bio, gfp_mask);
a492f0754   Joe Lawrence   block,scsi: fixup...
1306
  	if (!IS_ERR(rq))
a06e05e6a   Tejun Heo   block: refactor g...
1307
  		return rq;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1308

03a07c92a   Goldwyn Rodrigues   block: return on ...
1309
1310
1311
1312
  	if (op & REQ_NOWAIT) {
  		blk_put_rl(rl);
  		return ERR_PTR(-EAGAIN);
  	}
d0164adc8   Mel Gorman   mm, page_alloc: d...
1313
  	if (!gfpflags_allow_blocking(gfp_mask) || unlikely(blk_queue_dying(q))) {
a051661ca   Tejun Heo   blkcg: implement ...
1314
  		blk_put_rl(rl);
a492f0754   Joe Lawrence   block,scsi: fixup...
1315
  		return rq;
a051661ca   Tejun Heo   blkcg: implement ...
1316
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1317

a06e05e6a   Tejun Heo   block: refactor g...
1318
1319
1320
  	/* wait on @rl and retry */
  	prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
  				  TASK_UNINTERRUPTIBLE);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1321

e6a40b096   Mike Christie   block: prepare re...
1322
  	trace_block_sleeprq(q, bio, op);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1323

a06e05e6a   Tejun Heo   block: refactor g...
1324
1325
  	spin_unlock_irq(q->queue_lock);
  	io_schedule();
d6344532a   Nick Piggin   [PATCH] blk: redu...
1326

a06e05e6a   Tejun Heo   block: refactor g...
1327
1328
1329
1330
1331
  	/*
  	 * After sleeping, we become a "batching" process and will be able
  	 * to allocate at least one request, and up to a big batch of them
  	 * for a small period time.  See ioc_batching, ioc_set_batching
  	 */
a06e05e6a   Tejun Heo   block: refactor g...
1332
  	ioc_set_batching(q, current->io_context);
05caf8dbc   Zhang, Yanmin   block: Move the s...
1333

a06e05e6a   Tejun Heo   block: refactor g...
1334
1335
  	spin_lock_irq(q->queue_lock);
  	finish_wait(&rl->wait[is_sync], &wait);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1336

a06e05e6a   Tejun Heo   block: refactor g...
1337
  	goto retry;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1338
  }
cd6ce1482   Bart Van Assche   block: Make reque...
1339
1340
  static struct request *blk_old_get_request(struct request_queue *q,
  					   unsigned int op, gfp_t gfp_mask)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1341
1342
  {
  	struct request *rq;
332ebbf7f   Bart Van Assche   block: Document w...
1343
  	WARN_ON_ONCE(q->mq_ops);
7f4b35d15   Tejun Heo   block: allocate i...
1344
1345
  	/* create ioc upfront */
  	create_io_context(gfp_mask, q->node);
d6344532a   Nick Piggin   [PATCH] blk: redu...
1346
  	spin_lock_irq(q->queue_lock);
cd6ce1482   Bart Van Assche   block: Make reque...
1347
  	rq = get_request(q, op, NULL, gfp_mask);
0c4de0f33   Christoph Hellwig   block: ensure bio...
1348
  	if (IS_ERR(rq)) {
da8303c63   Tejun Heo   block: make get_r...
1349
  		spin_unlock_irq(q->queue_lock);
0c4de0f33   Christoph Hellwig   block: ensure bio...
1350
1351
  		return rq;
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1352

0c4de0f33   Christoph Hellwig   block: ensure bio...
1353
1354
1355
1356
  	/* q->queue_lock is unlocked at this point */
  	rq->__data_len = 0;
  	rq->__sector = (sector_t) -1;
  	rq->bio = rq->biotail = NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1357
1358
  	return rq;
  }
320ae51fe   Jens Axboe   blk-mq: new multi...
1359

cd6ce1482   Bart Van Assche   block: Make reque...
1360
1361
  struct request *blk_get_request(struct request_queue *q, unsigned int op,
  				gfp_t gfp_mask)
320ae51fe   Jens Axboe   blk-mq: new multi...
1362
  {
d280bab30   Bart Van Assche   block: Introduce ...
1363
1364
1365
1366
  	struct request *req;
  
  	if (q->mq_ops) {
  		req = blk_mq_alloc_request(q, op,
6f3b0e8bc   Christoph Hellwig   blk-mq: add a fla...
1367
1368
  			(gfp_mask & __GFP_DIRECT_RECLAIM) ?
  				0 : BLK_MQ_REQ_NOWAIT);
d280bab30   Bart Van Assche   block: Introduce ...
1369
1370
1371
1372
1373
1374
1375
1376
1377
  		if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn)
  			q->mq_ops->initialize_rq_fn(req);
  	} else {
  		req = blk_old_get_request(q, op, gfp_mask);
  		if (!IS_ERR(req) && q->initialize_rq_fn)
  			q->initialize_rq_fn(req);
  	}
  
  	return req;
320ae51fe   Jens Axboe   blk-mq: new multi...
1378
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
  EXPORT_SYMBOL(blk_get_request);
  
  /**
   * blk_requeue_request - put a request back on queue
   * @q:		request queue where request should be inserted
   * @rq:		request to be inserted
   *
   * Description:
   *    Drivers often keep queueing requests until the hardware cannot accept
   *    more, when that condition happens we need to put the request back
   *    on the queue. Must be called with queue lock held.
   */
165125e1e   Jens Axboe   [BLOCK] Get rid o...
1391
  void blk_requeue_request(struct request_queue *q, struct request *rq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1392
  {
2fff8a924   Bart Van Assche   block: Check lock...
1393
  	lockdep_assert_held(q->queue_lock);
332ebbf7f   Bart Van Assche   block: Document w...
1394
  	WARN_ON_ONCE(q->mq_ops);
2fff8a924   Bart Van Assche   block: Check lock...
1395

242f9dcb8   Jens Axboe   block: unify requ...
1396
1397
  	blk_delete_timer(rq);
  	blk_clear_rq_complete(rq);
5f3ea37c7   Arnaldo Carvalho de Melo   blktrace: port to...
1398
  	trace_block_rq_requeue(q, rq);
87760e5ee   Jens Axboe   block: hook up wr...
1399
  	wbt_requeue(q->rq_wb, &rq->issue_stat);
2056a782f   Jens Axboe   [PATCH] Block que...
1400

e80640213   Christoph Hellwig   block: split out ...
1401
  	if (rq->rq_flags & RQF_QUEUED)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1402
  		blk_queue_end_tag(q, rq);
ba396a6c1   James Bottomley   block: fix oops w...
1403
  	BUG_ON(blk_queued_rq(rq));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1404
1405
  	elv_requeue_request(q, rq);
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1406
  EXPORT_SYMBOL(blk_requeue_request);
73c101011   Jens Axboe   block: initial pa...
1407
1408
1409
  static void add_acct_request(struct request_queue *q, struct request *rq,
  			     int where)
  {
320ae51fe   Jens Axboe   blk-mq: new multi...
1410
  	blk_account_io_start(rq, true);
7eaceacca   Jens Axboe   block: remove per...
1411
  	__elv_add_request(q, rq, where);
73c101011   Jens Axboe   block: initial pa...
1412
  }
d62e26b3f   Jens Axboe   block: pass in qu...
1413
  static void part_round_stats_single(struct request_queue *q, int cpu,
b8d62b3a9   Jens Axboe   blk-mq: enable ch...
1414
1415
  				    struct hd_struct *part, unsigned long now,
  				    unsigned int inflight)
074a7aca7   Tejun Heo   block: move stats...
1416
  {
b8d62b3a9   Jens Axboe   blk-mq: enable ch...
1417
  	if (inflight) {
074a7aca7   Tejun Heo   block: move stats...
1418
  		__part_stat_add(cpu, part, time_in_queue,
b8d62b3a9   Jens Axboe   blk-mq: enable ch...
1419
  				inflight * (now - part->stamp));
074a7aca7   Tejun Heo   block: move stats...
1420
1421
1422
1423
1424
1425
  		__part_stat_add(cpu, part, io_ticks, (now - part->stamp));
  	}
  	part->stamp = now;
  }
  
  /**
496aa8a98   Randy Dunlap   block: fix curren...
1426
   * part_round_stats() - Round off the performance stats on a struct disk_stats.
d62e26b3f   Jens Axboe   block: pass in qu...
1427
   * @q: target block queue
496aa8a98   Randy Dunlap   block: fix curren...
1428
1429
   * @cpu: cpu number for stats access
   * @part: target partition
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
   *
   * The average IO queue length and utilisation statistics are maintained
   * by observing the current state of the queue length and the amount of
   * time it has been in this state for.
   *
   * Normally, that accounting is done on IO completion, but that can result
   * in more than a second's worth of IO being accounted for within any one
   * second, leading to >100% utilisation.  To deal with that, we call this
   * function to do a round-off before returning the results when reading
   * /proc/diskstats.  This accounts immediately for all queue usage up to
   * the current jiffies and restarts the counters again.
   */
d62e26b3f   Jens Axboe   block: pass in qu...
1442
  void part_round_stats(struct request_queue *q, int cpu, struct hd_struct *part)
6f2576af5   Jerome Marchand   Enhanced partitio...
1443
  {
b8d62b3a9   Jens Axboe   blk-mq: enable ch...
1444
  	struct hd_struct *part2 = NULL;
6f2576af5   Jerome Marchand   Enhanced partitio...
1445
  	unsigned long now = jiffies;
b8d62b3a9   Jens Axboe   blk-mq: enable ch...
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
  	unsigned int inflight[2];
  	int stats = 0;
  
  	if (part->stamp != now)
  		stats |= 1;
  
  	if (part->partno) {
  		part2 = &part_to_disk(part)->part0;
  		if (part2->stamp != now)
  			stats |= 2;
  	}
  
  	if (!stats)
  		return;
  
  	part_in_flight(q, part, inflight);
6f2576af5   Jerome Marchand   Enhanced partitio...
1462

b8d62b3a9   Jens Axboe   blk-mq: enable ch...
1463
1464
1465
1466
  	if (stats & 2)
  		part_round_stats_single(q, cpu, part2, now, inflight[1]);
  	if (stats & 1)
  		part_round_stats_single(q, cpu, part, now, inflight[0]);
6f2576af5   Jerome Marchand   Enhanced partitio...
1467
  }
074a7aca7   Tejun Heo   block: move stats...
1468
  EXPORT_SYMBOL_GPL(part_round_stats);
6f2576af5   Jerome Marchand   Enhanced partitio...
1469

47fafbc70   Rafael J. Wysocki   block / PM: Repla...
1470
  #ifdef CONFIG_PM
c8158819d   Lin Ming   block: implement ...
1471
1472
  static void blk_pm_put_request(struct request *rq)
  {
e80640213   Christoph Hellwig   block: split out ...
1473
  	if (rq->q->dev && !(rq->rq_flags & RQF_PM) && !--rq->q->nr_pending)
c8158819d   Lin Ming   block: implement ...
1474
1475
1476
1477
1478
  		pm_runtime_mark_last_busy(rq->q->dev);
  }
  #else
  static inline void blk_pm_put_request(struct request *rq) {}
  #endif
165125e1e   Jens Axboe   [BLOCK] Get rid o...
1479
  void __blk_put_request(struct request_queue *q, struct request *req)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1480
  {
e80640213   Christoph Hellwig   block: split out ...
1481
  	req_flags_t rq_flags = req->rq_flags;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1482
1483
  	if (unlikely(!q))
  		return;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1484

6f5ba581c   Christoph Hellwig   blk-mq: divert __...
1485
1486
1487
1488
  	if (q->mq_ops) {
  		blk_mq_free_request(req);
  		return;
  	}
2fff8a924   Bart Van Assche   block: Check lock...
1489
  	lockdep_assert_held(q->queue_lock);
c8158819d   Lin Ming   block: implement ...
1490
  	blk_pm_put_request(req);
8922e16cf   Tejun Heo   [PATCH] 01/05 Imp...
1491
  	elv_completed_request(q, req);
1cd96c242   Boaz Harrosh   block: WARN in __...
1492
1493
  	/* this is a bio leak */
  	WARN_ON(req->bio != NULL);
87760e5ee   Jens Axboe   block: hook up wr...
1494
  	wbt_done(q->rq_wb, &req->issue_stat);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1495
1496
1497
1498
  	/*
  	 * Request may not have originated from ll_rw_blk. if not,
  	 * it didn't come out of our reserved rq pools
  	 */
e80640213   Christoph Hellwig   block: split out ...
1499
  	if (rq_flags & RQF_ALLOCED) {
a051661ca   Tejun Heo   blkcg: implement ...
1500
  		struct request_list *rl = blk_rq_rl(req);
ef295ecf0   Christoph Hellwig   block: better op ...
1501
  		bool sync = op_is_sync(req->cmd_flags);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1502

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1503
  		BUG_ON(!list_empty(&req->queuelist));
360f92c24   Jens Axboe   block: fix regres...
1504
  		BUG_ON(ELV_ON_HASH(req));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1505

a051661ca   Tejun Heo   blkcg: implement ...
1506
  		blk_free_request(rl, req);
e80640213   Christoph Hellwig   block: split out ...
1507
  		freed_request(rl, sync, rq_flags);
a051661ca   Tejun Heo   blkcg: implement ...
1508
  		blk_put_rl(rl);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1509
1510
  	}
  }
6e39b69e7   Mike Christie   [SCSI] export blk...
1511
  EXPORT_SYMBOL_GPL(__blk_put_request);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1512
1513
  void blk_put_request(struct request *req)
  {
165125e1e   Jens Axboe   [BLOCK] Get rid o...
1514
  	struct request_queue *q = req->q;
8922e16cf   Tejun Heo   [PATCH] 01/05 Imp...
1515

320ae51fe   Jens Axboe   blk-mq: new multi...
1516
1517
1518
1519
1520
1521
1522
1523
1524
  	if (q->mq_ops)
  		blk_mq_free_request(req);
  	else {
  		unsigned long flags;
  
  		spin_lock_irqsave(q->queue_lock, flags);
  		__blk_put_request(q, req);
  		spin_unlock_irqrestore(q->queue_lock, flags);
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1525
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1526
  EXPORT_SYMBOL(blk_put_request);
320ae51fe   Jens Axboe   blk-mq: new multi...
1527
1528
  bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
  			    struct bio *bio)
73c101011   Jens Axboe   block: initial pa...
1529
  {
1eff9d322   Jens Axboe   block: rename bio...
1530
  	const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
73c101011   Jens Axboe   block: initial pa...
1531

73c101011   Jens Axboe   block: initial pa...
1532
1533
  	if (!ll_back_merge_fn(q, req, bio))
  		return false;
8c1cf6bb0   Tejun Heo   block: add @req t...
1534
  	trace_block_bio_backmerge(q, req, bio);
73c101011   Jens Axboe   block: initial pa...
1535
1536
1537
1538
1539
1540
  
  	if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
  		blk_rq_set_mixed_merge(req);
  
  	req->biotail->bi_next = bio;
  	req->biotail = bio;
4f024f379   Kent Overstreet   block: Abstract o...
1541
  	req->__data_len += bio->bi_iter.bi_size;
73c101011   Jens Axboe   block: initial pa...
1542
  	req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
320ae51fe   Jens Axboe   blk-mq: new multi...
1543
  	blk_account_io_start(req, false);
73c101011   Jens Axboe   block: initial pa...
1544
1545
  	return true;
  }
320ae51fe   Jens Axboe   blk-mq: new multi...
1546
1547
  bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
  			     struct bio *bio)
73c101011   Jens Axboe   block: initial pa...
1548
  {
1eff9d322   Jens Axboe   block: rename bio...
1549
  	const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
73c101011   Jens Axboe   block: initial pa...
1550

73c101011   Jens Axboe   block: initial pa...
1551
1552
  	if (!ll_front_merge_fn(q, req, bio))
  		return false;
8c1cf6bb0   Tejun Heo   block: add @req t...
1553
  	trace_block_bio_frontmerge(q, req, bio);
73c101011   Jens Axboe   block: initial pa...
1554
1555
1556
  
  	if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
  		blk_rq_set_mixed_merge(req);
73c101011   Jens Axboe   block: initial pa...
1557
1558
  	bio->bi_next = req->bio;
  	req->bio = bio;
4f024f379   Kent Overstreet   block: Abstract o...
1559
1560
  	req->__sector = bio->bi_iter.bi_sector;
  	req->__data_len += bio->bi_iter.bi_size;
73c101011   Jens Axboe   block: initial pa...
1561
  	req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
320ae51fe   Jens Axboe   blk-mq: new multi...
1562
  	blk_account_io_start(req, false);
73c101011   Jens Axboe   block: initial pa...
1563
1564
  	return true;
  }
1e739730c   Christoph Hellwig   block: optionally...
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
  bool bio_attempt_discard_merge(struct request_queue *q, struct request *req,
  		struct bio *bio)
  {
  	unsigned short segments = blk_rq_nr_discard_segments(req);
  
  	if (segments >= queue_max_discard_segments(q))
  		goto no_merge;
  	if (blk_rq_sectors(req) + bio_sectors(bio) >
  	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
  		goto no_merge;
  
  	req->biotail->bi_next = bio;
  	req->biotail = bio;
  	req->__data_len += bio->bi_iter.bi_size;
  	req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
  	req->nr_phys_segments = segments + 1;
  
  	blk_account_io_start(req, false);
  	return true;
  no_merge:
  	req_set_nomerge(q, req);
  	return false;
  }
bd87b5898   Tejun Heo   block: drop @tsk ...
1588
  /**
320ae51fe   Jens Axboe   blk-mq: new multi...
1589
   * blk_attempt_plug_merge - try to merge with %current's plugged list
bd87b5898   Tejun Heo   block: drop @tsk ...
1590
1591
1592
   * @q: request_queue new bio is being queued at
   * @bio: new bio being queued
   * @request_count: out parameter for number of traversed plugged requests
ccc2600b8   Randy Dunlap   block: fix blk-co...
1593
1594
1595
   * @same_queue_rq: pointer to &struct request that gets filled in when
   * another request associated with @q is found on the plug list
   * (optional, may be %NULL)
bd87b5898   Tejun Heo   block: drop @tsk ...
1596
1597
1598
1599
1600
   *
   * Determine whether @bio being queued on @q can be merged with a request
   * on %current's plugged list.  Returns %true if merge was successful,
   * otherwise %false.
   *
07c2bd373   Tejun Heo   block: don't call...
1601
1602
1603
1604
1605
1606
   * Plugging coalesces IOs from the same issuer for the same purpose without
   * going through @q->queue_lock.  As such it's more of an issuing mechanism
   * than scheduling, and the request, while may have elvpriv data, is not
   * added on the elevator at this point.  In addition, we don't have
   * reliable access to the elevator outside queue lock.  Only check basic
   * merging parameters without querying the elevator.
da41a589f   Robert Elliott   blk-mq: Micro-opt...
1607
1608
   *
   * Caller must ensure !blk_queue_nomerges(q) beforehand.
73c101011   Jens Axboe   block: initial pa...
1609
   */
320ae51fe   Jens Axboe   blk-mq: new multi...
1610
  bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
5b3f341f0   Shaohua Li   blk-mq: make plug...
1611
1612
  			    unsigned int *request_count,
  			    struct request **same_queue_rq)
73c101011   Jens Axboe   block: initial pa...
1613
1614
1615
  {
  	struct blk_plug *plug;
  	struct request *rq;
92f399c72   Shaohua Li   blk-mq: mq plug l...
1616
  	struct list_head *plug_list;
73c101011   Jens Axboe   block: initial pa...
1617

bd87b5898   Tejun Heo   block: drop @tsk ...
1618
  	plug = current->plug;
73c101011   Jens Axboe   block: initial pa...
1619
  	if (!plug)
34fe7c054   Christoph Hellwig   block: enumify EL...
1620
  		return false;
56ebdaf2f   Shaohua Li   block: simplify f...
1621
  	*request_count = 0;
73c101011   Jens Axboe   block: initial pa...
1622

92f399c72   Shaohua Li   blk-mq: mq plug l...
1623
1624
1625
1626
1627
1628
  	if (q->mq_ops)
  		plug_list = &plug->mq_list;
  	else
  		plug_list = &plug->list;
  
  	list_for_each_entry_reverse(rq, plug_list, queuelist) {
34fe7c054   Christoph Hellwig   block: enumify EL...
1629
  		bool merged = false;
73c101011   Jens Axboe   block: initial pa...
1630

5b3f341f0   Shaohua Li   blk-mq: make plug...
1631
  		if (rq->q == q) {
1b2e19f17   Shaohua Li   block: make auto ...
1632
  			(*request_count)++;
5b3f341f0   Shaohua Li   blk-mq: make plug...
1633
1634
1635
1636
1637
1638
1639
1640
  			/*
  			 * Only blk-mq multiple hardware queues case checks the
  			 * rq in the same queue, there should be only one such
  			 * rq in a queue
  			 **/
  			if (same_queue_rq)
  				*same_queue_rq = rq;
  		}
56ebdaf2f   Shaohua Li   block: simplify f...
1641

07c2bd373   Tejun Heo   block: don't call...
1642
  		if (rq->q != q || !blk_rq_merge_ok(rq, bio))
73c101011   Jens Axboe   block: initial pa...
1643
  			continue;
34fe7c054   Christoph Hellwig   block: enumify EL...
1644
1645
1646
1647
1648
1649
1650
  		switch (blk_try_merge(rq, bio)) {
  		case ELEVATOR_BACK_MERGE:
  			merged = bio_attempt_back_merge(q, rq, bio);
  			break;
  		case ELEVATOR_FRONT_MERGE:
  			merged = bio_attempt_front_merge(q, rq, bio);
  			break;
1e739730c   Christoph Hellwig   block: optionally...
1651
1652
1653
  		case ELEVATOR_DISCARD_MERGE:
  			merged = bio_attempt_discard_merge(q, rq, bio);
  			break;
34fe7c054   Christoph Hellwig   block: enumify EL...
1654
1655
  		default:
  			break;
73c101011   Jens Axboe   block: initial pa...
1656
  		}
34fe7c054   Christoph Hellwig   block: enumify EL...
1657
1658
1659
  
  		if (merged)
  			return true;
73c101011   Jens Axboe   block: initial pa...
1660
  	}
34fe7c054   Christoph Hellwig   block: enumify EL...
1661
1662
  
  	return false;
73c101011   Jens Axboe   block: initial pa...
1663
  }
0809e3ac6   Jeff Moyer   block: fix plug l...
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
  unsigned int blk_plug_queued_count(struct request_queue *q)
  {
  	struct blk_plug *plug;
  	struct request *rq;
  	struct list_head *plug_list;
  	unsigned int ret = 0;
  
  	plug = current->plug;
  	if (!plug)
  		goto out;
  
  	if (q->mq_ops)
  		plug_list = &plug->mq_list;
  	else
  		plug_list = &plug->list;
  
  	list_for_each_entry(rq, plug_list, queuelist) {
  		if (rq->q == q)
  			ret++;
  	}
  out:
  	return ret;
  }
da8d7f079   Bart Van Assche   block: Export blk...
1687
  void blk_init_request_from_bio(struct request *req, struct bio *bio)
52d9e6753   Tejun Heo   [BLOCK] ll_rw_blk...
1688
  {
0be0dee64   Bart Van Assche   block: Inline blk...
1689
  	struct io_context *ioc = rq_ioc(bio);
1eff9d322   Jens Axboe   block: rename bio...
1690
  	if (bio->bi_opf & REQ_RAHEAD)
a82afdfcb   Tejun Heo   block: use the sa...
1691
  		req->cmd_flags |= REQ_FAILFAST_MASK;
b31dc66a5   Jens Axboe   [PATCH] Kill PF_S...
1692

4f024f379   Kent Overstreet   block: Abstract o...
1693
  	req->__sector = bio->bi_iter.bi_sector;
5dc8b362a   Adam Manzanares   block: Add iocont...
1694
1695
  	if (ioprio_valid(bio_prio(bio)))
  		req->ioprio = bio_prio(bio);
0be0dee64   Bart Van Assche   block: Inline blk...
1696
1697
1698
1699
  	else if (ioc)
  		req->ioprio = ioc->ioprio;
  	else
  		req->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0);
cb6934f8e   Jens Axboe   block: add suppor...
1700
  	req->write_hint = bio->bi_write_hint;
bc1c56fde   NeilBrown   Share code betwee...
1701
  	blk_rq_bio_prep(req->q, req, bio);
52d9e6753   Tejun Heo   [BLOCK] ll_rw_blk...
1702
  }
da8d7f079   Bart Van Assche   block: Export blk...
1703
  EXPORT_SYMBOL_GPL(blk_init_request_from_bio);
52d9e6753   Tejun Heo   [BLOCK] ll_rw_blk...
1704

dece16353   Jens Axboe   block: change ->m...
1705
  static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1706
  {
73c101011   Jens Axboe   block: initial pa...
1707
  	struct blk_plug *plug;
34fe7c054   Christoph Hellwig   block: enumify EL...
1708
  	int where = ELEVATOR_INSERT_SORT;
e4d750c97   Jens Axboe   block: free merge...
1709
  	struct request *req, *free;
56ebdaf2f   Shaohua Li   block: simplify f...
1710
  	unsigned int request_count = 0;
87760e5ee   Jens Axboe   block: hook up wr...
1711
  	unsigned int wb_acct;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1712

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1713
1714
1715
1716
1717
1718
  	/*
  	 * low level driver can indicate that it wants pages above a
  	 * certain limit bounced to low memory (ie for highmem, or even
  	 * ISA dma in theory)
  	 */
  	blk_queue_bounce(q, &bio);
af67c31fb   NeilBrown   blk: remove bio_s...
1719
  	blk_queue_split(q, &bio);
23688bf4f   Junichi Nomura   block: ensure to ...
1720

e23947bd7   Dmitry Monakhov   bio-integrity: fo...
1721
  	if (!bio_integrity_prep(bio))
dece16353   Jens Axboe   block: change ->m...
1722
  		return BLK_QC_T_NONE;
ffecfd1a7   Darrick J. Wong   block: optionally...
1723

f73f44eb0   Christoph Hellwig   block: add a op_i...
1724
  	if (op_is_flush(bio->bi_opf)) {
73c101011   Jens Axboe   block: initial pa...
1725
  		spin_lock_irq(q->queue_lock);
ae1b15396   Tejun Heo   block: reimplemen...
1726
  		where = ELEVATOR_INSERT_FLUSH;
28e7d1845   Tejun Heo   block: drop barri...
1727
1728
  		goto get_rq;
  	}
73c101011   Jens Axboe   block: initial pa...
1729
1730
1731
1732
  	/*
  	 * Check if we can merge with the plugged list before grabbing
  	 * any locks.
  	 */
0809e3ac6   Jeff Moyer   block: fix plug l...
1733
1734
  	if (!blk_queue_nomerges(q)) {
  		if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
dece16353   Jens Axboe   block: change ->m...
1735
  			return BLK_QC_T_NONE;
0809e3ac6   Jeff Moyer   block: fix plug l...
1736
1737
  	} else
  		request_count = blk_plug_queued_count(q);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1738

73c101011   Jens Axboe   block: initial pa...
1739
  	spin_lock_irq(q->queue_lock);
2056a782f   Jens Axboe   [PATCH] Block que...
1740

34fe7c054   Christoph Hellwig   block: enumify EL...
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
  	switch (elv_merge(q, &req, bio)) {
  	case ELEVATOR_BACK_MERGE:
  		if (!bio_attempt_back_merge(q, req, bio))
  			break;
  		elv_bio_merged(q, req, bio);
  		free = attempt_back_merge(q, req);
  		if (free)
  			__blk_put_request(q, free);
  		else
  			elv_merged_request(q, req, ELEVATOR_BACK_MERGE);
  		goto out_unlock;
  	case ELEVATOR_FRONT_MERGE:
  		if (!bio_attempt_front_merge(q, req, bio))
  			break;
  		elv_bio_merged(q, req, bio);
  		free = attempt_front_merge(q, req);
  		if (free)
  			__blk_put_request(q, free);
  		else
  			elv_merged_request(q, req, ELEVATOR_FRONT_MERGE);
  		goto out_unlock;
  	default:
  		break;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1764
  	}
450991bc1   Nick Piggin   [PATCH] blk: __ma...
1765
  get_rq:
87760e5ee   Jens Axboe   block: hook up wr...
1766
  	wb_acct = wbt_wait(q->rq_wb, bio, q->queue_lock);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1767
  	/*
450991bc1   Nick Piggin   [PATCH] blk: __ma...
1768
  	 * Grab a free request. This is might sleep but can not fail.
d6344532a   Nick Piggin   [PATCH] blk: redu...
1769
  	 * Returns with the queue unlocked.
450991bc1   Nick Piggin   [PATCH] blk: __ma...
1770
  	 */
ef295ecf0   Christoph Hellwig   block: better op ...
1771
  	req = get_request(q, bio->bi_opf, bio, GFP_NOIO);
a492f0754   Joe Lawrence   block,scsi: fixup...
1772
  	if (IS_ERR(req)) {
87760e5ee   Jens Axboe   block: hook up wr...
1773
  		__wbt_done(q->rq_wb, wb_acct);
4e4cbee93   Christoph Hellwig   block: switch bio...
1774
1775
1776
1777
  		if (PTR_ERR(req) == -ENOMEM)
  			bio->bi_status = BLK_STS_RESOURCE;
  		else
  			bio->bi_status = BLK_STS_IOERR;
4246a0b63   Christoph Hellwig   block: add a bi_e...
1778
  		bio_endio(bio);
da8303c63   Tejun Heo   block: make get_r...
1779
1780
  		goto out_unlock;
  	}
d6344532a   Nick Piggin   [PATCH] blk: redu...
1781

87760e5ee   Jens Axboe   block: hook up wr...
1782
  	wbt_track(&req->issue_stat, wb_acct);
450991bc1   Nick Piggin   [PATCH] blk: __ma...
1783
1784
1785
1786
1787
  	/*
  	 * After dropping the lock and possibly sleeping here, our request
  	 * may now be mergeable after it had proven unmergeable (above).
  	 * We don't worry about that case for efficiency. It won't happen
  	 * often, and the elevators are able to handle it.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1788
  	 */
da8d7f079   Bart Van Assche   block: Export blk...
1789
  	blk_init_request_from_bio(req, bio);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1790

9562ad9ab   Tao Ma   block: Remove the...
1791
  	if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags))
11ccf116d   Jens Axboe   block: fix warnin...
1792
  		req->cpu = raw_smp_processor_id();
73c101011   Jens Axboe   block: initial pa...
1793
1794
  
  	plug = current->plug;
721a9602e   Jens Axboe   block: kill off R...
1795
  	if (plug) {
dc6d36c97   Jens Axboe   block: readd plug...
1796
1797
  		/*
  		 * If this is the first request added after a plug, fire
7aef2e780   Jianpeng Ma   block: trace all ...
1798
  		 * of a plug trace.
0a6219a95   Ming Lei   block: deal with ...
1799
1800
1801
  		 *
  		 * @request_count may become stale because of schedule
  		 * out, so check plug list again.
dc6d36c97   Jens Axboe   block: readd plug...
1802
  		 */
0a6219a95   Ming Lei   block: deal with ...
1803
  		if (!request_count || list_empty(&plug->list))
dc6d36c97   Jens Axboe   block: readd plug...
1804
  			trace_block_plug(q);
3540d5e89   Shaohua Li   block: avoid unne...
1805
  		else {
50d24c344   Shaohua Li   block: immediatel...
1806
1807
1808
  			struct request *last = list_entry_rq(plug->list.prev);
  			if (request_count >= BLK_MAX_REQUEST_COUNT ||
  			    blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE) {
3540d5e89   Shaohua Li   block: avoid unne...
1809
  				blk_flush_plug_list(plug, false);
019ceb7d5   Shaohua Li   block: add missed...
1810
1811
  				trace_block_plug(q);
  			}
73c101011   Jens Axboe   block: initial pa...
1812
  		}
73c101011   Jens Axboe   block: initial pa...
1813
  		list_add_tail(&req->queuelist, &plug->list);
320ae51fe   Jens Axboe   blk-mq: new multi...
1814
  		blk_account_io_start(req, true);
73c101011   Jens Axboe   block: initial pa...
1815
1816
1817
  	} else {
  		spin_lock_irq(q->queue_lock);
  		add_acct_request(q, req, where);
24ecfbe27   Christoph Hellwig   block: add blk_ru...
1818
  		__blk_run_queue(q);
73c101011   Jens Axboe   block: initial pa...
1819
1820
1821
  out_unlock:
  		spin_unlock_irq(q->queue_lock);
  	}
dece16353   Jens Axboe   block: change ->m...
1822
1823
  
  	return BLK_QC_T_NONE;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1824
  }
a4f73a021   Christoph Hellwig   block: bio_check_...
1825
  static void handle_bad_sector(struct bio *bio, sector_t maxsector)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1826
1827
1828
1829
1830
  {
  	char b[BDEVNAME_SIZE];
  
  	printk(KERN_INFO "attempt to access beyond end of device
  ");
6296b9604   Mike Christie   block, drivers, f...
1831
1832
  	printk(KERN_INFO "%s: rw=%d, want=%Lu, limit=%Lu
  ",
74d46992e   Christoph Hellwig   block: replace bi...
1833
  			bio_devname(bio, b), bio->bi_opf,
f73a1c7d1   Kent Overstreet   block: Add bio_en...
1834
  			(unsigned long long)bio_end_sector(bio),
a4f73a021   Christoph Hellwig   block: bio_check_...
1835
  			(long long)maxsector);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1836
  }
c17bb4951   Akinobu Mita   [PATCH] fault-inj...
1837
1838
1839
1840
1841
1842
1843
1844
1845
  #ifdef CONFIG_FAIL_MAKE_REQUEST
  
  static DECLARE_FAULT_ATTR(fail_make_request);
  
  static int __init setup_fail_make_request(char *str)
  {
  	return setup_fault_attr(&fail_make_request, str);
  }
  __setup("fail_make_request=", setup_fail_make_request);
b2c9cd379   Akinobu Mita   fail_make_request...
1846
  static bool should_fail_request(struct hd_struct *part, unsigned int bytes)
c17bb4951   Akinobu Mita   [PATCH] fault-inj...
1847
  {
b2c9cd379   Akinobu Mita   fail_make_request...
1848
  	return part->make_it_fail && should_fail(&fail_make_request, bytes);
c17bb4951   Akinobu Mita   [PATCH] fault-inj...
1849
1850
1851
1852
  }
  
  static int __init fail_make_request_debugfs(void)
  {
dd48c085c   Akinobu Mita   fault-injection: ...
1853
1854
  	struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
  						NULL, &fail_make_request);
21f9fcd81   Duan Jiong   block: replace IS...
1855
  	return PTR_ERR_OR_ZERO(dir);
c17bb4951   Akinobu Mita   [PATCH] fault-inj...
1856
1857
1858
1859
1860
  }
  
  late_initcall(fail_make_request_debugfs);
  
  #else /* CONFIG_FAIL_MAKE_REQUEST */
b2c9cd379   Akinobu Mita   fail_make_request...
1861
1862
  static inline bool should_fail_request(struct hd_struct *part,
  					unsigned int bytes)
c17bb4951   Akinobu Mita   [PATCH] fault-inj...
1863
  {
b2c9cd379   Akinobu Mita   fail_make_request...
1864
  	return false;
c17bb4951   Akinobu Mita   [PATCH] fault-inj...
1865
1866
1867
  }
  
  #endif /* CONFIG_FAIL_MAKE_REQUEST */
e0d75ce59   Ilya Dryomov   block: fail op_is...
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
  static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part)
  {
  	if (part->policy && op_is_write(bio_op(bio))) {
  		char b[BDEVNAME_SIZE];
  
  		printk(KERN_ERR
  		       "generic_make_request: Trying to write "
  			"to read-only block-device %s (partno %d)
  ",
  			bio_devname(bio, b), part->partno);
  		return true;
  	}
  
  	return false;
  }
c07e2b412   Jens Axboe   block: factor our...
1883
  /*
a4f73a021   Christoph Hellwig   block: bio_check_...
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
   * Check whether this bio extends beyond the end of the device or partition.
   * This may well happen - the kernel calls bread() without checking the size of
   * the device, e.g., when mounting a file system.
   */
  static inline int bio_check_eod(struct bio *bio, sector_t maxsector)
  {
  	unsigned int nr_sectors = bio_sectors(bio);
  
  	if (nr_sectors && maxsector &&
  	    (nr_sectors > maxsector ||
  	     bio->bi_iter.bi_sector > maxsector - nr_sectors)) {
  		handle_bad_sector(bio, maxsector);
  		return -EIO;
  	}
  	return 0;
  }
  
  /*
74d46992e   Christoph Hellwig   block: replace bi...
1902
1903
1904
1905
1906
   * Remap block n of partition p to block n+start(p) of the disk.
   */
  static inline int blk_partition_remap(struct bio *bio)
  {
  	struct hd_struct *p;
a4f73a021   Christoph Hellwig   block: bio_check_...
1907
  	int ret = -EIO;
74d46992e   Christoph Hellwig   block: replace bi...
1908

e0d75ce59   Ilya Dryomov   block: fail op_is...
1909
1910
  	rcu_read_lock();
  	p = __disk_get_part(bio->bi_disk, bio->bi_partno);
a4f73a021   Christoph Hellwig   block: bio_check_...
1911
1912
1913
1914
1915
  	if (unlikely(!p))
  		goto out;
  	if (unlikely(should_fail_request(p, bio->bi_iter.bi_size)))
  		goto out;
  	if (unlikely(bio_check_ro(bio, p)))
e0d75ce59   Ilya Dryomov   block: fail op_is...
1916
  		goto out;
e0d75ce59   Ilya Dryomov   block: fail op_is...
1917

74d46992e   Christoph Hellwig   block: replace bi...
1918
1919
1920
1921
  	/*
  	 * Zone reset does not include bi_size so bio_sectors() is always 0.
  	 * Include a test for the reset op code and perform the remap if needed.
  	 */
a4f73a021   Christoph Hellwig   block: bio_check_...
1922
1923
1924
1925
1926
1927
1928
1929
1930
  	if (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET) {
  		if (bio_check_eod(bio, part_nr_sects_read(p)))
  			goto out;
  		bio->bi_iter.bi_sector += p->start_sect;
  		bio->bi_partno = 0;
  		trace_block_bio_remap(bio->bi_disk->queue, bio, part_devt(p),
  				      bio->bi_iter.bi_sector - p->start_sect);
  	}
  	ret = 0;
e0d75ce59   Ilya Dryomov   block: fail op_is...
1931
1932
  out:
  	rcu_read_unlock();
74d46992e   Christoph Hellwig   block: replace bi...
1933
1934
  	return ret;
  }
27a84d54c   Christoph Hellwig   block: refactor g...
1935
1936
  static noinline_for_stack bool
  generic_make_request_checks(struct bio *bio)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1937
  {
165125e1e   Jens Axboe   [BLOCK] Get rid o...
1938
  	struct request_queue *q;
5a7bbad27   Christoph Hellwig   block: remove sup...
1939
  	int nr_sectors = bio_sectors(bio);
4e4cbee93   Christoph Hellwig   block: switch bio...
1940
  	blk_status_t status = BLK_STS_IOERR;
5a7bbad27   Christoph Hellwig   block: remove sup...
1941
  	char b[BDEVNAME_SIZE];
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1942
1943
  
  	might_sleep();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1944

74d46992e   Christoph Hellwig   block: replace bi...
1945
  	q = bio->bi_disk->queue;
5a7bbad27   Christoph Hellwig   block: remove sup...
1946
1947
1948
1949
1950
  	if (unlikely(!q)) {
  		printk(KERN_ERR
  		       "generic_make_request: Trying to access "
  			"nonexistent block-device %s (%Lu)
  ",
74d46992e   Christoph Hellwig   block: replace bi...
1951
  			bio_devname(bio, b), (long long)bio->bi_iter.bi_sector);
5a7bbad27   Christoph Hellwig   block: remove sup...
1952
1953
  		goto end_io;
  	}
c17bb4951   Akinobu Mita   [PATCH] fault-inj...
1954

03a07c92a   Goldwyn Rodrigues   block: return on ...
1955
1956
1957
1958
  	/*
  	 * For a REQ_NOWAIT based request, return -EOPNOTSUPP
  	 * if queue is not a request based queue.
  	 */
03a07c92a   Goldwyn Rodrigues   block: return on ...
1959
1960
  	if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_rq_based(q))
  		goto not_supported;
74d46992e   Christoph Hellwig   block: replace bi...
1961
  	if (should_fail_request(&bio->bi_disk->part0, bio->bi_iter.bi_size))
5a7bbad27   Christoph Hellwig   block: remove sup...
1962
  		goto end_io;
2056a782f   Jens Axboe   [PATCH] Block que...
1963

a4f73a021   Christoph Hellwig   block: bio_check_...
1964
1965
  	if (bio->bi_partno) {
  		if (unlikely(blk_partition_remap(bio)))
e0d75ce59   Ilya Dryomov   block: fail op_is...
1966
1967
  			goto end_io;
  	} else {
a4f73a021   Christoph Hellwig   block: bio_check_...
1968
1969
1970
  		if (unlikely(bio_check_ro(bio, &bio->bi_disk->part0)))
  			goto end_io;
  		if (unlikely(bio_check_eod(bio, get_capacity(bio->bi_disk))))
e0d75ce59   Ilya Dryomov   block: fail op_is...
1971
1972
  			goto end_io;
  	}
2056a782f   Jens Axboe   [PATCH] Block que...
1973

5a7bbad27   Christoph Hellwig   block: remove sup...
1974
1975
1976
1977
1978
  	/*
  	 * Filter flush bio's early so that make_request based
  	 * drivers without flush support don't have to worry
  	 * about them.
  	 */
f3a8ab7d5   Jens Axboe   block: cleanup re...
1979
  	if (op_is_flush(bio->bi_opf) &&
c888a8f95   Jens Axboe   block: kill off q...
1980
  	    !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
1eff9d322   Jens Axboe   block: rename bio...
1981
  		bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
5a7bbad27   Christoph Hellwig   block: remove sup...
1982
  		if (!nr_sectors) {
4e4cbee93   Christoph Hellwig   block: switch bio...
1983
  			status = BLK_STS_OK;
51fd77bd9   Jens Axboe   [BLOCK] Don't all...
1984
1985
  			goto end_io;
  		}
5a7bbad27   Christoph Hellwig   block: remove sup...
1986
  	}
5ddfe9691   NeilBrown   [PATCH] md: check...
1987

288dab8a3   Christoph Hellwig   block: add a sepa...
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
  	switch (bio_op(bio)) {
  	case REQ_OP_DISCARD:
  		if (!blk_queue_discard(q))
  			goto not_supported;
  		break;
  	case REQ_OP_SECURE_ERASE:
  		if (!blk_queue_secure_erase(q))
  			goto not_supported;
  		break;
  	case REQ_OP_WRITE_SAME:
74d46992e   Christoph Hellwig   block: replace bi...
1998
  		if (!q->limits.max_write_same_sectors)
288dab8a3   Christoph Hellwig   block: add a sepa...
1999
  			goto not_supported;
58886785d   Nicolai Stange   block: fix uninte...
2000
  		break;
2d253440b   Shaun Tancheff   block: Define zon...
2001
2002
  	case REQ_OP_ZONE_REPORT:
  	case REQ_OP_ZONE_RESET:
74d46992e   Christoph Hellwig   block: replace bi...
2003
  		if (!blk_queue_is_zoned(q))
2d253440b   Shaun Tancheff   block: Define zon...
2004
  			goto not_supported;
288dab8a3   Christoph Hellwig   block: add a sepa...
2005
  		break;
a6f0788ec   Chaitanya Kulkarni   block: add suppor...
2006
  	case REQ_OP_WRITE_ZEROES:
74d46992e   Christoph Hellwig   block: replace bi...
2007
  		if (!q->limits.max_write_zeroes_sectors)
a6f0788ec   Chaitanya Kulkarni   block: add suppor...
2008
2009
  			goto not_supported;
  		break;
288dab8a3   Christoph Hellwig   block: add a sepa...
2010
2011
  	default:
  		break;
5a7bbad27   Christoph Hellwig   block: remove sup...
2012
  	}
01edede41   Minchan Kim   block: trace bio ...
2013

7f4b35d15   Tejun Heo   block: allocate i...
2014
2015
2016
2017
2018
2019
2020
  	/*
  	 * Various block parts want %current->io_context and lazy ioc
  	 * allocation ends up trading a lot of pain for a small amount of
  	 * memory.  Just allocate it upfront.  This may fail and block
  	 * layer knows how to live with it.
  	 */
  	create_io_context(GFP_ATOMIC, q->node);
ae1188963   Tejun Heo   blkcg: consolidat...
2021
2022
  	if (!blkcg_bio_issue_check(q, bio))
  		return false;
27a84d54c   Christoph Hellwig   block: refactor g...
2023

fbbaf700e   NeilBrown   block: trace comp...
2024
2025
2026
2027
2028
2029
2030
  	if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
  		trace_block_bio_queue(q, bio);
  		/* Now that enqueuing has been traced, we need to trace
  		 * completion as well.
  		 */
  		bio_set_flag(bio, BIO_TRACE_COMPLETION);
  	}
27a84d54c   Christoph Hellwig   block: refactor g...
2031
  	return true;
a7384677b   Tejun Heo   block: remove dup...
2032

288dab8a3   Christoph Hellwig   block: add a sepa...
2033
  not_supported:
4e4cbee93   Christoph Hellwig   block: switch bio...
2034
  	status = BLK_STS_NOTSUPP;
a7384677b   Tejun Heo   block: remove dup...
2035
  end_io:
4e4cbee93   Christoph Hellwig   block: switch bio...
2036
  	bio->bi_status = status;
4246a0b63   Christoph Hellwig   block: add a bi_e...
2037
  	bio_endio(bio);
27a84d54c   Christoph Hellwig   block: refactor g...
2038
  	return false;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2039
  }
27a84d54c   Christoph Hellwig   block: refactor g...
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
  /**
   * generic_make_request - hand a buffer to its device driver for I/O
   * @bio:  The bio describing the location in memory and on the device.
   *
   * generic_make_request() is used to make I/O requests of block
   * devices. It is passed a &struct bio, which describes the I/O that needs
   * to be done.
   *
   * generic_make_request() does not return any status.  The
   * success/failure status of the request, along with notification of
   * completion, is delivered asynchronously through the bio->bi_end_io
   * function described (one day) else where.
   *
   * The caller of generic_make_request must make sure that bi_io_vec
   * are set to describe the memory buffer, and that bi_dev and bi_sector are
   * set to describe the device address, and the
   * bi_end_io and optionally bi_private are set to describe how
   * completion notification should be signaled.
   *
   * generic_make_request and the drivers it calls may use bi_next if this
   * bio happens to be merged with someone else, and may resubmit the bio to
   * a lower device by calling into generic_make_request recursively, which
   * means the bio should NOT be touched after the call to ->make_request_fn.
d89d87965   Neil Brown   When stacked bloc...
2063
   */
dece16353   Jens Axboe   block: change ->m...
2064
  blk_qc_t generic_make_request(struct bio *bio)
d89d87965   Neil Brown   When stacked bloc...
2065
  {
f5fe1b519   NeilBrown   blk: Ensure users...
2066
2067
2068
2069
2070
2071
2072
2073
  	/*
  	 * bio_list_on_stack[0] contains bios submitted by the current
  	 * make_request_fn.
  	 * bio_list_on_stack[1] contains bios that were submitted before
  	 * the current make_request_fn, but that haven't been processed
  	 * yet.
  	 */
  	struct bio_list bio_list_on_stack[2];
dece16353   Jens Axboe   block: change ->m...
2074
  	blk_qc_t ret = BLK_QC_T_NONE;
bddd87c7e   Akinobu Mita   blk-core: use BIO...
2075

27a84d54c   Christoph Hellwig   block: refactor g...
2076
  	if (!generic_make_request_checks(bio))
dece16353   Jens Axboe   block: change ->m...
2077
  		goto out;
27a84d54c   Christoph Hellwig   block: refactor g...
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
  
  	/*
  	 * We only want one ->make_request_fn to be active at a time, else
  	 * stack usage with stacked devices could be a problem.  So use
  	 * current->bio_list to keep a list of requests submited by a
  	 * make_request_fn function.  current->bio_list is also used as a
  	 * flag to say if generic_make_request is currently active in this
  	 * task or not.  If it is NULL, then no make_request is active.  If
  	 * it is non-NULL, then a make_request is active, and new requests
  	 * should be added at the tail
  	 */
bddd87c7e   Akinobu Mita   blk-core: use BIO...
2089
  	if (current->bio_list) {
f5fe1b519   NeilBrown   blk: Ensure users...
2090
  		bio_list_add(&current->bio_list[0], bio);
dece16353   Jens Axboe   block: change ->m...
2091
  		goto out;
d89d87965   Neil Brown   When stacked bloc...
2092
  	}
27a84d54c   Christoph Hellwig   block: refactor g...
2093

d89d87965   Neil Brown   When stacked bloc...
2094
2095
2096
2097
2098
  	/* following loop may be a bit non-obvious, and so deserves some
  	 * explanation.
  	 * Before entering the loop, bio->bi_next is NULL (as all callers
  	 * ensure that) so we have a list with a single bio.
  	 * We pretend that we have just taken it off a longer list, so
bddd87c7e   Akinobu Mita   blk-core: use BIO...
2099
2100
  	 * we assign bio_list to a pointer to the bio_list_on_stack,
  	 * thus initialising the bio_list of new bios to be
27a84d54c   Christoph Hellwig   block: refactor g...
2101
  	 * added.  ->make_request() may indeed add some more bios
d89d87965   Neil Brown   When stacked bloc...
2102
2103
2104
  	 * through a recursive call to generic_make_request.  If it
  	 * did, we find a non-NULL value in bio_list and re-enter the loop
  	 * from the top.  In this case we really did just take the bio
bddd87c7e   Akinobu Mita   blk-core: use BIO...
2105
  	 * of the top of the list (no pretending) and so remove it from
27a84d54c   Christoph Hellwig   block: refactor g...
2106
  	 * bio_list, and call into ->make_request() again.
d89d87965   Neil Brown   When stacked bloc...
2107
2108
  	 */
  	BUG_ON(bio->bi_next);
f5fe1b519   NeilBrown   blk: Ensure users...
2109
2110
  	bio_list_init(&bio_list_on_stack[0]);
  	current->bio_list = bio_list_on_stack;
d89d87965   Neil Brown   When stacked bloc...
2111
  	do {
74d46992e   Christoph Hellwig   block: replace bi...
2112
  		struct request_queue *q = bio->bi_disk->queue;
27a84d54c   Christoph Hellwig   block: refactor g...
2113

03a07c92a   Goldwyn Rodrigues   block: return on ...
2114
  		if (likely(blk_queue_enter(q, bio->bi_opf & REQ_NOWAIT) == 0)) {
79bd99596   NeilBrown   blk: improve orde...
2115
2116
2117
  			struct bio_list lower, same;
  
  			/* Create a fresh bio_list for all subordinate requests */
f5fe1b519   NeilBrown   blk: Ensure users...
2118
2119
  			bio_list_on_stack[1] = bio_list_on_stack[0];
  			bio_list_init(&bio_list_on_stack[0]);
dece16353   Jens Axboe   block: change ->m...
2120
  			ret = q->make_request_fn(q, bio);
3ef28e83a   Dan Williams   block: generic re...
2121
2122
  
  			blk_queue_exit(q);
27a84d54c   Christoph Hellwig   block: refactor g...
2123

79bd99596   NeilBrown   blk: improve orde...
2124
2125
2126
2127
2128
  			/* sort new bios into those for a lower level
  			 * and those for the same level
  			 */
  			bio_list_init(&lower);
  			bio_list_init(&same);
f5fe1b519   NeilBrown   blk: Ensure users...
2129
  			while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
74d46992e   Christoph Hellwig   block: replace bi...
2130
  				if (q == bio->bi_disk->queue)
79bd99596   NeilBrown   blk: improve orde...
2131
2132
2133
2134
  					bio_list_add(&same, bio);
  				else
  					bio_list_add(&lower, bio);
  			/* now assemble so we handle the lowest level first */
f5fe1b519   NeilBrown   blk: Ensure users...
2135
2136
2137
  			bio_list_merge(&bio_list_on_stack[0], &lower);
  			bio_list_merge(&bio_list_on_stack[0], &same);
  			bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
3ef28e83a   Dan Williams   block: generic re...
2138
  		} else {
03a07c92a   Goldwyn Rodrigues   block: return on ...
2139
2140
2141
2142
2143
  			if (unlikely(!blk_queue_dying(q) &&
  					(bio->bi_opf & REQ_NOWAIT)))
  				bio_wouldblock_error(bio);
  			else
  				bio_io_error(bio);
3ef28e83a   Dan Williams   block: generic re...
2144
  		}
f5fe1b519   NeilBrown   blk: Ensure users...
2145
  		bio = bio_list_pop(&bio_list_on_stack[0]);
d89d87965   Neil Brown   When stacked bloc...
2146
  	} while (bio);
bddd87c7e   Akinobu Mita   blk-core: use BIO...
2147
  	current->bio_list = NULL; /* deactivate */
dece16353   Jens Axboe   block: change ->m...
2148
2149
2150
  
  out:
  	return ret;
d89d87965   Neil Brown   When stacked bloc...
2151
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2152
2153
2154
  EXPORT_SYMBOL(generic_make_request);
  
  /**
7055bd523   Christoph Hellwig   block: provide a ...
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
   * direct_make_request - hand a buffer directly to its device driver for I/O
   * @bio:  The bio describing the location in memory and on the device.
   *
   * This function behaves like generic_make_request(), but does not protect
   * against recursion.  Must only be used if the called driver is known
   * to not call generic_make_request (or direct_make_request) again from
   * its make_request function.  (Calling direct_make_request again from
   * a workqueue is perfectly fine as that doesn't recurse).
   */
  blk_qc_t direct_make_request(struct bio *bio)
  {
  	struct request_queue *q = bio->bi_disk->queue;
  	bool nowait = bio->bi_opf & REQ_NOWAIT;
  	blk_qc_t ret;
  
  	if (!generic_make_request_checks(bio))
  		return BLK_QC_T_NONE;
  
  	if (unlikely(blk_queue_enter(q, nowait))) {
  		if (nowait && !blk_queue_dying(q))
  			bio->bi_status = BLK_STS_AGAIN;
  		else
  			bio->bi_status = BLK_STS_IOERR;
  		bio_endio(bio);
  		return BLK_QC_T_NONE;
  	}
  
  	ret = q->make_request_fn(q, bio);
  	blk_queue_exit(q);
  	return ret;
  }
  EXPORT_SYMBOL_GPL(direct_make_request);
  
  /**
710027a48   Randy Dunlap   Add some block/ s...
2189
   * submit_bio - submit a bio to the block device layer for I/O
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2190
2191
2192
2193
   * @bio: The &struct bio which describes the I/O
   *
   * submit_bio() is very similar in purpose to generic_make_request(), and
   * uses that function to do most of the work. Both are fairly rough
710027a48   Randy Dunlap   Add some block/ s...
2194
   * interfaces; @bio must be presetup and ready for I/O.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2195
2196
   *
   */
4e49ea4a3   Mike Christie   block/fs/drivers:...
2197
  blk_qc_t submit_bio(struct bio *bio)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2198
  {
bf2de6f5a   Jens Axboe   block: Initial su...
2199
2200
2201
2202
  	/*
  	 * If it's a regular read/write or a barrier with data attached,
  	 * go through the normal accounting stuff before submission.
  	 */
e2a60da74   Martin K. Petersen   block: Clean up s...
2203
  	if (bio_has_data(bio)) {
4363ac7c1   Martin K. Petersen   block: Implement ...
2204
  		unsigned int count;
95fe6c1a2   Mike Christie   block, fs, mm, dr...
2205
  		if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
17644a0bb   Jiufei Xue   block: fix the co...
2206
  			count = queue_logical_block_size(bio->bi_disk->queue) >> 9;
4363ac7c1   Martin K. Petersen   block: Implement ...
2207
2208
  		else
  			count = bio_sectors(bio);
a8ebb056a   Mike Christie   block, drivers, c...
2209
  		if (op_is_write(bio_op(bio))) {
bf2de6f5a   Jens Axboe   block: Initial su...
2210
2211
  			count_vm_events(PGPGOUT, count);
  		} else {
4f024f379   Kent Overstreet   block: Abstract o...
2212
  			task_io_account_read(bio->bi_iter.bi_size);
bf2de6f5a   Jens Axboe   block: Initial su...
2213
2214
2215
2216
2217
  			count_vm_events(PGPGIN, count);
  		}
  
  		if (unlikely(block_dump)) {
  			char b[BDEVNAME_SIZE];
8dcbdc742   San Mehat   block: block_dump...
2218
2219
  			printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)
  ",
ba25f9dcc   Pavel Emelyanov   Use helpers to ob...
2220
  			current->comm, task_pid_nr(current),
a8ebb056a   Mike Christie   block, drivers, c...
2221
  				op_is_write(bio_op(bio)) ? "WRITE" : "READ",
4f024f379   Kent Overstreet   block: Abstract o...
2222
  				(unsigned long long)bio->bi_iter.bi_sector,
74d46992e   Christoph Hellwig   block: replace bi...
2223
  				bio_devname(bio, b), count);
bf2de6f5a   Jens Axboe   block: Initial su...
2224
  		}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2225
  	}
dece16353   Jens Axboe   block: change ->m...
2226
  	return generic_make_request(bio);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2227
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2228
  EXPORT_SYMBOL(submit_bio);
ee4e916b2   Christoph Hellwig   block: add a poll...
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
  bool blk_poll(struct request_queue *q, blk_qc_t cookie)
  {
  	if (!q->poll_fn || !blk_qc_t_valid(cookie))
  		return false;
  
  	if (current->plug)
  		blk_flush_plug_list(current->plug, false);
  	return q->poll_fn(q, cookie);
  }
  EXPORT_SYMBOL_GPL(blk_poll);
3bcddeac1   Kiyoshi Ueda   blk_end_request: ...
2239
  /**
bf4e6b4e7   Hannes Reinecke   block: Always che...
2240
2241
   * blk_cloned_rq_check_limits - Helper function to check a cloned request
   *                              for new the queue limits
82124d603   Kiyoshi Ueda   block: add reques...
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
   * @q:  the queue
   * @rq: the request being checked
   *
   * Description:
   *    @rq may have been made based on weaker limitations of upper-level queues
   *    in request stacking drivers, and it may violate the limitation of @q.
   *    Since the block layer and the underlying device driver trust @rq
   *    after it is inserted to @q, it should be checked against @q before
   *    the insertion using this generic function.
   *
82124d603   Kiyoshi Ueda   block: add reques...
2252
   *    Request stacking drivers like request-based dm may change the queue
bf4e6b4e7   Hannes Reinecke   block: Always che...
2253
2254
   *    limits when retrying requests on other queues. Those requests need
   *    to be checked against the new queue limits again during dispatch.
82124d603   Kiyoshi Ueda   block: add reques...
2255
   */
bf4e6b4e7   Hannes Reinecke   block: Always che...
2256
2257
  static int blk_cloned_rq_check_limits(struct request_queue *q,
  				      struct request *rq)
82124d603   Kiyoshi Ueda   block: add reques...
2258
  {
8fe0d473f   Mike Christie   block: convert me...
2259
  	if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, req_op(rq))) {
82124d603   Kiyoshi Ueda   block: add reques...
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
  		printk(KERN_ERR "%s: over max size limit.
  ", __func__);
  		return -EIO;
  	}
  
  	/*
  	 * queue's settings related to segment counting like q->bounce_pfn
  	 * may differ from that of other stacking queues.
  	 * Recalculate it to check the request correctly on this queue's
  	 * limitation.
  	 */
  	blk_recalc_rq_segments(rq);
8a78362c4   Martin K. Petersen   block: Consolidat...
2272
  	if (rq->nr_phys_segments > queue_max_segments(q)) {
82124d603   Kiyoshi Ueda   block: add reques...
2273
2274
2275
2276
2277
2278
2279
  		printk(KERN_ERR "%s: over max segments limit.
  ", __func__);
  		return -EIO;
  	}
  
  	return 0;
  }
82124d603   Kiyoshi Ueda   block: add reques...
2280
2281
2282
2283
2284
2285
  
  /**
   * blk_insert_cloned_request - Helper for stacking drivers to submit a request
   * @q:  the queue to submit the request
   * @rq: the request being queued
   */
2a842acab   Christoph Hellwig   block: introduce ...
2286
  blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq)
82124d603   Kiyoshi Ueda   block: add reques...
2287
2288
  {
  	unsigned long flags;
4853abaae   Jeff Moyer   block: fix flush ...
2289
  	int where = ELEVATOR_INSERT_BACK;
82124d603   Kiyoshi Ueda   block: add reques...
2290

bf4e6b4e7   Hannes Reinecke   block: Always che...
2291
  	if (blk_cloned_rq_check_limits(q, rq))
2a842acab   Christoph Hellwig   block: introduce ...
2292
  		return BLK_STS_IOERR;
82124d603   Kiyoshi Ueda   block: add reques...
2293

b2c9cd379   Akinobu Mita   fail_make_request...
2294
2295
  	if (rq->rq_disk &&
  	    should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq)))
2a842acab   Christoph Hellwig   block: introduce ...
2296
  		return BLK_STS_IOERR;
82124d603   Kiyoshi Ueda   block: add reques...
2297

7fb4898e0   Keith Busch   block: add blk-mq...
2298
2299
2300
  	if (q->mq_ops) {
  		if (blk_queue_io_stat(q))
  			blk_account_io_start(rq, true);
157f377be   Jens Axboe   block: directly i...
2301
2302
2303
2304
2305
2306
  		/*
  		 * Since we have a scheduler attached on the top device,
  		 * bypass a potential scheduler on the bottom device for
  		 * insert.
  		 */
  		blk_mq_request_bypass_insert(rq);
2a842acab   Christoph Hellwig   block: introduce ...
2307
  		return BLK_STS_OK;
7fb4898e0   Keith Busch   block: add blk-mq...
2308
  	}
82124d603   Kiyoshi Ueda   block: add reques...
2309
  	spin_lock_irqsave(q->queue_lock, flags);
3f3299d5c   Bart Van Assche   block: Rename que...
2310
  	if (unlikely(blk_queue_dying(q))) {
8ba61435d   Tejun Heo   block: add missin...
2311
  		spin_unlock_irqrestore(q->queue_lock, flags);
2a842acab   Christoph Hellwig   block: introduce ...
2312
  		return BLK_STS_IOERR;
8ba61435d   Tejun Heo   block: add missin...
2313
  	}
82124d603   Kiyoshi Ueda   block: add reques...
2314
2315
2316
2317
2318
2319
  
  	/*
  	 * Submitting request must be dequeued before calling this function
  	 * because it will be linked to another request_queue
  	 */
  	BUG_ON(blk_queued_rq(rq));
f73f44eb0   Christoph Hellwig   block: add a op_i...
2320
  	if (op_is_flush(rq->cmd_flags))
4853abaae   Jeff Moyer   block: fix flush ...
2321
2322
2323
  		where = ELEVATOR_INSERT_FLUSH;
  
  	add_acct_request(q, rq, where);
e67b77c79   Jeff Moyer   blk-flush: move t...
2324
2325
  	if (where == ELEVATOR_INSERT_FLUSH)
  		__blk_run_queue(q);
82124d603   Kiyoshi Ueda   block: add reques...
2326
  	spin_unlock_irqrestore(q->queue_lock, flags);
2a842acab   Christoph Hellwig   block: introduce ...
2327
  	return BLK_STS_OK;
82124d603   Kiyoshi Ueda   block: add reques...
2328
2329
  }
  EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
80a761fd3   Tejun Heo   block: implement ...
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
  /**
   * blk_rq_err_bytes - determine number of bytes till the next failure boundary
   * @rq: request to examine
   *
   * Description:
   *     A request could be merge of IOs which require different failure
   *     handling.  This function determines the number of bytes which
   *     can be failed from the beginning of the request without
   *     crossing into area which need to be retried further.
   *
   * Return:
   *     The number of bytes to fail.
80a761fd3   Tejun Heo   block: implement ...
2342
2343
2344
2345
2346
2347
   */
  unsigned int blk_rq_err_bytes(const struct request *rq)
  {
  	unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
  	unsigned int bytes = 0;
  	struct bio *bio;
e80640213   Christoph Hellwig   block: split out ...
2348
  	if (!(rq->rq_flags & RQF_MIXED_MERGE))
80a761fd3   Tejun Heo   block: implement ...
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
  		return blk_rq_bytes(rq);
  
  	/*
  	 * Currently the only 'mixing' which can happen is between
  	 * different fastfail types.  We can safely fail portions
  	 * which have all the failfast bits that the first one has -
  	 * the ones which are at least as eager to fail as the first
  	 * one.
  	 */
  	for (bio = rq->bio; bio; bio = bio->bi_next) {
1eff9d322   Jens Axboe   block: rename bio...
2359
  		if ((bio->bi_opf & ff) != ff)
80a761fd3   Tejun Heo   block: implement ...
2360
  			break;
4f024f379   Kent Overstreet   block: Abstract o...
2361
  		bytes += bio->bi_iter.bi_size;
80a761fd3   Tejun Heo   block: implement ...
2362
2363
2364
2365
2366
2367
2368
  	}
  
  	/* this could lead to infinite loop */
  	BUG_ON(blk_rq_bytes(rq) && !bytes);
  	return bytes;
  }
  EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
320ae51fe   Jens Axboe   blk-mq: new multi...
2369
  void blk_account_io_completion(struct request *req, unsigned int bytes)
bc58ba946   Jens Axboe   block: add sysfs ...
2370
  {
c2553b584   Jens Axboe   block: make blk_d...
2371
  	if (blk_do_io_stat(req)) {
bc58ba946   Jens Axboe   block: add sysfs ...
2372
2373
2374
2375
2376
  		const int rw = rq_data_dir(req);
  		struct hd_struct *part;
  		int cpu;
  
  		cpu = part_stat_lock();
09e099d4b   Jerome Marchand   block: fix accoun...
2377
  		part = req->part;
bc58ba946   Jens Axboe   block: add sysfs ...
2378
2379
2380
2381
  		part_stat_add(cpu, part, sectors[rw], bytes >> 9);
  		part_stat_unlock();
  	}
  }
320ae51fe   Jens Axboe   blk-mq: new multi...
2382
  void blk_account_io_done(struct request *req)
bc58ba946   Jens Axboe   block: add sysfs ...
2383
  {
bc58ba946   Jens Axboe   block: add sysfs ...
2384
  	/*
dd4c133f3   Tejun Heo   block: rename bar...
2385
2386
2387
  	 * Account IO completion.  flush_rq isn't accounted as a
  	 * normal IO on queueing nor completion.  Accounting the
  	 * containing request is enough.
bc58ba946   Jens Axboe   block: add sysfs ...
2388
  	 */
e80640213   Christoph Hellwig   block: split out ...
2389
  	if (blk_do_io_stat(req) && !(req->rq_flags & RQF_FLUSH_SEQ)) {
bc58ba946   Jens Axboe   block: add sysfs ...
2390
2391
2392
2393
2394
2395
  		unsigned long duration = jiffies - req->start_time;
  		const int rw = rq_data_dir(req);
  		struct hd_struct *part;
  		int cpu;
  
  		cpu = part_stat_lock();
09e099d4b   Jerome Marchand   block: fix accoun...
2396
  		part = req->part;
bc58ba946   Jens Axboe   block: add sysfs ...
2397
2398
2399
  
  		part_stat_inc(cpu, part, ios[rw]);
  		part_stat_add(cpu, part, ticks[rw], duration);
d62e26b3f   Jens Axboe   block: pass in qu...
2400
2401
  		part_round_stats(req->q, cpu, part);
  		part_dec_in_flight(req->q, part, rw);
bc58ba946   Jens Axboe   block: add sysfs ...
2402

6c23a9681   Jens Axboe   block: add intern...
2403
  		hd_struct_put(part);
bc58ba946   Jens Axboe   block: add sysfs ...
2404
2405
2406
  		part_stat_unlock();
  	}
  }
47fafbc70   Rafael J. Wysocki   block / PM: Repla...
2407
  #ifdef CONFIG_PM
c8158819d   Lin Ming   block: implement ...
2408
2409
2410
2411
2412
2413
2414
2415
  /*
   * Don't process normal requests when queue is suspended
   * or in the process of suspending/resuming
   */
  static struct request *blk_pm_peek_request(struct request_queue *q,
  					   struct request *rq)
  {
  	if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
e80640213   Christoph Hellwig   block: split out ...
2416
  	    (q->rpm_status != RPM_ACTIVE && !(rq->rq_flags & RQF_PM))))
c8158819d   Lin Ming   block: implement ...
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
  		return NULL;
  	else
  		return rq;
  }
  #else
  static inline struct request *blk_pm_peek_request(struct request_queue *q,
  						  struct request *rq)
  {
  	return rq;
  }
  #endif
320ae51fe   Jens Axboe   blk-mq: new multi...
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
  void blk_account_io_start(struct request *rq, bool new_io)
  {
  	struct hd_struct *part;
  	int rw = rq_data_dir(rq);
  	int cpu;
  
  	if (!blk_do_io_stat(rq))
  		return;
  
  	cpu = part_stat_lock();
  
  	if (!new_io) {
  		part = rq->part;
  		part_stat_inc(cpu, part, merges[rw]);
  	} else {
  		part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
  		if (!hd_struct_try_get(part)) {
  			/*
  			 * The partition is already being removed,
  			 * the request will be accounted on the disk only
  			 *
  			 * We take a reference on disk->part0 although that
  			 * partition will never be deleted, so we can treat
  			 * it as any other partition.
  			 */
  			part = &rq->rq_disk->part0;
  			hd_struct_get(part);
  		}
d62e26b3f   Jens Axboe   block: pass in qu...
2456
2457
  		part_round_stats(rq->q, cpu, part);
  		part_inc_in_flight(rq->q, part, rw);
320ae51fe   Jens Axboe   blk-mq: new multi...
2458
2459
2460
2461
2462
  		rq->part = part;
  	}
  
  	part_stat_unlock();
  }
53a08807c   Tejun Heo   block: internal d...
2463
  /**
9934c8c04   Tejun Heo   block: implement ...
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
   * blk_peek_request - peek at the top of a request queue
   * @q: request queue to peek at
   *
   * Description:
   *     Return the request at the top of @q.  The returned request
   *     should be started using blk_start_request() before LLD starts
   *     processing it.
   *
   * Return:
   *     Pointer to the request at the top of @q if available.  Null
   *     otherwise.
9934c8c04   Tejun Heo   block: implement ...
2475
2476
   */
  struct request *blk_peek_request(struct request_queue *q)
158dbda00   Tejun Heo   block: reorganize...
2477
2478
2479
  {
  	struct request *rq;
  	int ret;
2fff8a924   Bart Van Assche   block: Check lock...
2480
  	lockdep_assert_held(q->queue_lock);
332ebbf7f   Bart Van Assche   block: Document w...
2481
  	WARN_ON_ONCE(q->mq_ops);
2fff8a924   Bart Van Assche   block: Check lock...
2482

158dbda00   Tejun Heo   block: reorganize...
2483
  	while ((rq = __elv_next_request(q)) != NULL) {
c8158819d   Lin Ming   block: implement ...
2484
2485
2486
2487
  
  		rq = blk_pm_peek_request(q, rq);
  		if (!rq)
  			break;
e80640213   Christoph Hellwig   block: split out ...
2488
  		if (!(rq->rq_flags & RQF_STARTED)) {
158dbda00   Tejun Heo   block: reorganize...
2489
2490
2491
2492
2493
  			/*
  			 * This is the first time the device driver
  			 * sees this request (possibly after
  			 * requeueing).  Notify IO scheduler.
  			 */
e80640213   Christoph Hellwig   block: split out ...
2494
  			if (rq->rq_flags & RQF_SORTED)
158dbda00   Tejun Heo   block: reorganize...
2495
2496
2497
2498
2499
2500
2501
  				elv_activate_rq(q, rq);
  
  			/*
  			 * just mark as started even if we don't start
  			 * it, a request that has been delayed should
  			 * not be passed by new incoming requests
  			 */
e80640213   Christoph Hellwig   block: split out ...
2502
  			rq->rq_flags |= RQF_STARTED;
158dbda00   Tejun Heo   block: reorganize...
2503
2504
2505
2506
2507
2508
2509
  			trace_block_rq_issue(q, rq);
  		}
  
  		if (!q->boundary_rq || q->boundary_rq == rq) {
  			q->end_sector = rq_end_sector(rq);
  			q->boundary_rq = NULL;
  		}
e80640213   Christoph Hellwig   block: split out ...
2510
  		if (rq->rq_flags & RQF_DONTPREP)
158dbda00   Tejun Heo   block: reorganize...
2511
  			break;
2e46e8b27   Tejun Heo   block: drop reque...
2512
  		if (q->dma_drain_size && blk_rq_bytes(rq)) {
158dbda00   Tejun Heo   block: reorganize...
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
  			/*
  			 * make sure space for the drain appears we
  			 * know we can do this because max_hw_segments
  			 * has been adjusted to be one fewer than the
  			 * device can handle
  			 */
  			rq->nr_phys_segments++;
  		}
  
  		if (!q->prep_rq_fn)
  			break;
  
  		ret = q->prep_rq_fn(q, rq);
  		if (ret == BLKPREP_OK) {
  			break;
  		} else if (ret == BLKPREP_DEFER) {
  			/*
  			 * the request may have been (partially) prepped.
  			 * we need to keep this request in the front to
e80640213   Christoph Hellwig   block: split out ...
2532
  			 * avoid resource deadlock.  RQF_STARTED will
158dbda00   Tejun Heo   block: reorganize...
2533
2534
  			 * prevent other fs requests from passing this one.
  			 */
2e46e8b27   Tejun Heo   block: drop reque...
2535
  			if (q->dma_drain_size && blk_rq_bytes(rq) &&
e80640213   Christoph Hellwig   block: split out ...
2536
  			    !(rq->rq_flags & RQF_DONTPREP)) {
158dbda00   Tejun Heo   block: reorganize...
2537
2538
2539
2540
2541
2542
2543
2544
2545
  				/*
  				 * remove the space for the drain we added
  				 * so that we don't add it again
  				 */
  				--rq->nr_phys_segments;
  			}
  
  			rq = NULL;
  			break;
0fb5b1fb3   Martin K. Petersen   block/sd: Return ...
2546
  		} else if (ret == BLKPREP_KILL || ret == BLKPREP_INVALID) {
e80640213   Christoph Hellwig   block: split out ...
2547
  			rq->rq_flags |= RQF_QUIET;
c143dc903   James Bottomley   block: fix an oop...
2548
2549
2550
2551
2552
  			/*
  			 * Mark this request as started so we don't trigger
  			 * any debug logic in the end I/O path.
  			 */
  			blk_start_request(rq);
2a842acab   Christoph Hellwig   block: introduce ...
2553
2554
  			__blk_end_request_all(rq, ret == BLKPREP_INVALID ?
  					BLK_STS_TARGET : BLK_STS_IOERR);
158dbda00   Tejun Heo   block: reorganize...
2555
2556
2557
2558
2559
2560
2561
2562
2563
  		} else {
  			printk(KERN_ERR "%s: bad return=%d
  ", __func__, ret);
  			break;
  		}
  	}
  
  	return rq;
  }
9934c8c04   Tejun Heo   block: implement ...
2564
  EXPORT_SYMBOL(blk_peek_request);
158dbda00   Tejun Heo   block: reorganize...
2565

5034435c8   Damien Le Moal   block: Make blk_d...
2566
  static void blk_dequeue_request(struct request *rq)
158dbda00   Tejun Heo   block: reorganize...
2567
  {
9934c8c04   Tejun Heo   block: implement ...
2568
  	struct request_queue *q = rq->q;
158dbda00   Tejun Heo   block: reorganize...
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
  	BUG_ON(list_empty(&rq->queuelist));
  	BUG_ON(ELV_ON_HASH(rq));
  
  	list_del_init(&rq->queuelist);
  
  	/*
  	 * the time frame between a request being removed from the lists
  	 * and to it is freed is accounted as io that is in progress at
  	 * the driver side.
  	 */
9195291e5   Divyesh Shah   blkio: Increment ...
2579
  	if (blk_account_rq(rq)) {
0a7ae2ff0   Jens Axboe   block: change the...
2580
  		q->in_flight[rq_is_sync(rq)]++;
9195291e5   Divyesh Shah   blkio: Increment ...
2581
2582
  		set_io_start_time_ns(rq);
  	}
158dbda00   Tejun Heo   block: reorganize...
2583
  }
5efccd17c   Tejun Heo   block: reorder re...
2584
  /**
9934c8c04   Tejun Heo   block: implement ...
2585
2586
2587
2588
2589
2590
   * blk_start_request - start request processing on the driver
   * @req: request to dequeue
   *
   * Description:
   *     Dequeue @req and start timeout timer on it.  This hands off the
   *     request to the driver.
9934c8c04   Tejun Heo   block: implement ...
2591
2592
2593
   */
  void blk_start_request(struct request *req)
  {
2fff8a924   Bart Van Assche   block: Check lock...
2594
  	lockdep_assert_held(req->q->queue_lock);
332ebbf7f   Bart Van Assche   block: Document w...
2595
  	WARN_ON_ONCE(req->q->mq_ops);
2fff8a924   Bart Van Assche   block: Check lock...
2596

9934c8c04   Tejun Heo   block: implement ...
2597
  	blk_dequeue_request(req);
cf43e6be8   Jens Axboe   block: add scalab...
2598
  	if (test_bit(QUEUE_FLAG_STATS, &req->q->queue_flags)) {
88eeca495   Shaohua Li   block: track requ...
2599
  		blk_stat_set_issue(&req->issue_stat, blk_rq_sectors(req));
cf43e6be8   Jens Axboe   block: add scalab...
2600
  		req->rq_flags |= RQF_STATS;
87760e5ee   Jens Axboe   block: hook up wr...
2601
  		wbt_issue(req->q->rq_wb, &req->issue_stat);
cf43e6be8   Jens Axboe   block: add scalab...
2602
  	}
4912aa6c1   Jeff Moyer   block: fix race b...
2603
  	BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags));
9934c8c04   Tejun Heo   block: implement ...
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
  	blk_add_timer(req);
  }
  EXPORT_SYMBOL(blk_start_request);
  
  /**
   * blk_fetch_request - fetch a request from a request queue
   * @q: request queue to fetch a request from
   *
   * Description:
   *     Return the request at the top of @q.  The request is started on
   *     return and LLD can start processing it immediately.
   *
   * Return:
   *     Pointer to the request at the top of @q if available.  Null
   *     otherwise.
9934c8c04   Tejun Heo   block: implement ...
2619
2620
2621
2622
   */
  struct request *blk_fetch_request(struct request_queue *q)
  {
  	struct request *rq;
2fff8a924   Bart Van Assche   block: Check lock...
2623
  	lockdep_assert_held(q->queue_lock);
332ebbf7f   Bart Van Assche   block: Document w...
2624
  	WARN_ON_ONCE(q->mq_ops);
2fff8a924   Bart Van Assche   block: Check lock...
2625

9934c8c04   Tejun Heo   block: implement ...
2626
2627
2628
2629
2630
2631
  	rq = blk_peek_request(q);
  	if (rq)
  		blk_start_request(rq);
  	return rq;
  }
  EXPORT_SYMBOL(blk_fetch_request);
c21c8b24e   Christoph Hellwig   block: add a blk_...
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
  /*
   * Steal bios from a request and add them to a bio list.
   * The request must not have been partially completed before.
   */
  void blk_steal_bios(struct bio_list *list, struct request *rq)
  {
  	if (rq->bio) {
  		if (list->tail)
  			list->tail->bi_next = rq->bio;
  		else
  			list->head = rq->bio;
  		list->tail = rq->biotail;
  
  		rq->bio = NULL;
  		rq->biotail = NULL;
  	}
  
  	rq->__data_len = 0;
  }
  EXPORT_SYMBOL_GPL(blk_steal_bios);
9934c8c04   Tejun Heo   block: implement ...
2652
  /**
2e60e0229   Tejun Heo   block: clean up r...
2653
   * blk_update_request - Special helper function for request stacking drivers
8ebf97560   Randy Dunlap   block: fix kernel...
2654
   * @req:      the request being processed
2a842acab   Christoph Hellwig   block: introduce ...
2655
   * @error:    block status code
8ebf97560   Randy Dunlap   block: fix kernel...
2656
   * @nr_bytes: number of bytes to complete @req
3bcddeac1   Kiyoshi Ueda   blk_end_request: ...
2657
2658
   *
   * Description:
8ebf97560   Randy Dunlap   block: fix kernel...
2659
2660
2661
   *     Ends I/O on a number of bytes attached to @req, but doesn't complete
   *     the request structure even if @req doesn't have leftover.
   *     If @req has leftover, sets it up for the next range of segments.
2e60e0229   Tejun Heo   block: clean up r...
2662
2663
2664
2665
2666
2667
2668
   *
   *     This special helper function is only for request stacking drivers
   *     (e.g. request-based dm) so that they can handle partial completion.
   *     Actual device drivers should use blk_end_request instead.
   *
   *     Passing the result of blk_rq_bytes() as @nr_bytes guarantees
   *     %false return from this function.
3bcddeac1   Kiyoshi Ueda   blk_end_request: ...
2669
2670
   *
   * Return:
2e60e0229   Tejun Heo   block: clean up r...
2671
2672
   *     %false - this request doesn't have any more data
   *     %true  - this request has more data
3bcddeac1   Kiyoshi Ueda   blk_end_request: ...
2673
   **/
2a842acab   Christoph Hellwig   block: introduce ...
2674
2675
  bool blk_update_request(struct request *req, blk_status_t error,
  		unsigned int nr_bytes)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2676
  {
f79ea4161   Kent Overstreet   block: Refactor b...
2677
  	int total_bytes;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2678

2a842acab   Christoph Hellwig   block: introduce ...
2679
  	trace_block_rq_complete(req, blk_status_to_errno(error), nr_bytes);
4a0efdc93   Hannes Reinecke   block: misplaced ...
2680

2e60e0229   Tejun Heo   block: clean up r...
2681
2682
  	if (!req->bio)
  		return false;
2a842acab   Christoph Hellwig   block: introduce ...
2683
2684
2685
  	if (unlikely(error && !blk_rq_is_passthrough(req) &&
  		     !(req->rq_flags & RQF_QUIET)))
  		print_req_error(req, error);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2686

bc58ba946   Jens Axboe   block: add sysfs ...
2687
  	blk_account_io_completion(req, nr_bytes);
d72d904a5   Jens Axboe   [BLOCK] Update re...
2688

f79ea4161   Kent Overstreet   block: Refactor b...
2689
2690
2691
  	total_bytes = 0;
  	while (req->bio) {
  		struct bio *bio = req->bio;
4f024f379   Kent Overstreet   block: Abstract o...
2692
  		unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2693

4f024f379   Kent Overstreet   block: Abstract o...
2694
  		if (bio_bytes == bio->bi_iter.bi_size)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2695
  			req->bio = bio->bi_next;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2696

fbbaf700e   NeilBrown   block: trace comp...
2697
2698
  		/* Completion has already been traced */
  		bio_clear_flag(bio, BIO_TRACE_COMPLETION);
f79ea4161   Kent Overstreet   block: Refactor b...
2699
  		req_bio_endio(req, bio, bio_bytes, error);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2700

f79ea4161   Kent Overstreet   block: Refactor b...
2701
2702
  		total_bytes += bio_bytes;
  		nr_bytes -= bio_bytes;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2703

f79ea4161   Kent Overstreet   block: Refactor b...
2704
2705
  		if (!nr_bytes)
  			break;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2706
2707
2708
2709
2710
  	}
  
  	/*
  	 * completely done
  	 */
2e60e0229   Tejun Heo   block: clean up r...
2711
2712
2713
2714
2715
2716
  	if (!req->bio) {
  		/*
  		 * Reset counters so that the request stacking driver
  		 * can find how many bytes remain in the request
  		 * later.
  		 */
a2dec7b36   Tejun Heo   block: hide reque...
2717
  		req->__data_len = 0;
2e60e0229   Tejun Heo   block: clean up r...
2718
2719
  		return false;
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2720

a2dec7b36   Tejun Heo   block: hide reque...
2721
  	req->__data_len -= total_bytes;
2e46e8b27   Tejun Heo   block: drop reque...
2722
2723
  
  	/* update sector only for requests with clear definition of sector */
57292b58d   Christoph Hellwig   block: introduce ...
2724
  	if (!blk_rq_is_passthrough(req))
a2dec7b36   Tejun Heo   block: hide reque...
2725
  		req->__sector += total_bytes >> 9;
2e46e8b27   Tejun Heo   block: drop reque...
2726

80a761fd3   Tejun Heo   block: implement ...
2727
  	/* mixed attributes always follow the first bio */
e80640213   Christoph Hellwig   block: split out ...
2728
  	if (req->rq_flags & RQF_MIXED_MERGE) {
80a761fd3   Tejun Heo   block: implement ...
2729
  		req->cmd_flags &= ~REQ_FAILFAST_MASK;
1eff9d322   Jens Axboe   block: rename bio...
2730
  		req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
80a761fd3   Tejun Heo   block: implement ...
2731
  	}
ed6565e73   Christoph Hellwig   block: handle par...
2732
2733
2734
2735
2736
2737
2738
2739
2740
  	if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) {
  		/*
  		 * If total number of sectors is less than the first segment
  		 * size, something has gone terribly wrong.
  		 */
  		if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
  			blk_dump_rq_flags(req, "request botched");
  			req->__data_len = blk_rq_cur_bytes(req);
  		}
2e46e8b27   Tejun Heo   block: drop reque...
2741

ed6565e73   Christoph Hellwig   block: handle par...
2742
2743
2744
  		/* recalculate the number of segments */
  		blk_recalc_rq_segments(req);
  	}
2e46e8b27   Tejun Heo   block: drop reque...
2745

2e60e0229   Tejun Heo   block: clean up r...
2746
  	return true;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2747
  }
2e60e0229   Tejun Heo   block: clean up r...
2748
  EXPORT_SYMBOL_GPL(blk_update_request);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2749

2a842acab   Christoph Hellwig   block: introduce ...
2750
  static bool blk_update_bidi_request(struct request *rq, blk_status_t error,
2e60e0229   Tejun Heo   block: clean up r...
2751
2752
  				    unsigned int nr_bytes,
  				    unsigned int bidi_bytes)
5efccd17c   Tejun Heo   block: reorder re...
2753
  {
2e60e0229   Tejun Heo   block: clean up r...
2754
2755
  	if (blk_update_request(rq, error, nr_bytes))
  		return true;
5efccd17c   Tejun Heo   block: reorder re...
2756

2e60e0229   Tejun Heo   block: clean up r...
2757
2758
2759
2760
  	/* Bidi request must be completed as a whole */
  	if (unlikely(blk_bidi_rq(rq)) &&
  	    blk_update_request(rq->next_rq, error, bidi_bytes))
  		return true;
5efccd17c   Tejun Heo   block: reorder re...
2761

e2e1a148b   Jens Axboe   block: add sysfs ...
2762
2763
  	if (blk_queue_add_random(rq->q))
  		add_disk_randomness(rq->rq_disk);
2e60e0229   Tejun Heo   block: clean up r...
2764
2765
  
  	return false;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2766
  }
28018c242   James Bottomley   block: implement ...
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
  /**
   * blk_unprep_request - unprepare a request
   * @req:	the request
   *
   * This function makes a request ready for complete resubmission (or
   * completion).  It happens only after all error handling is complete,
   * so represents the appropriate moment to deallocate any resources
   * that were allocated to the request in the prep_rq_fn.  The queue
   * lock is held when calling this.
   */
  void blk_unprep_request(struct request *req)
  {
  	struct request_queue *q = req->q;
e80640213   Christoph Hellwig   block: split out ...
2780
  	req->rq_flags &= ~RQF_DONTPREP;
28018c242   James Bottomley   block: implement ...
2781
2782
2783
2784
  	if (q->unprep_rq_fn)
  		q->unprep_rq_fn(q, req);
  }
  EXPORT_SYMBOL_GPL(blk_unprep_request);
2a842acab   Christoph Hellwig   block: introduce ...
2785
  void blk_finish_request(struct request *req, blk_status_t error)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2786
  {
cf43e6be8   Jens Axboe   block: add scalab...
2787
  	struct request_queue *q = req->q;
2fff8a924   Bart Van Assche   block: Check lock...
2788
  	lockdep_assert_held(req->q->queue_lock);
332ebbf7f   Bart Van Assche   block: Document w...
2789
  	WARN_ON_ONCE(q->mq_ops);
2fff8a924   Bart Van Assche   block: Check lock...
2790

cf43e6be8   Jens Axboe   block: add scalab...
2791
  	if (req->rq_flags & RQF_STATS)
34dbad5d2   Omar Sandoval   blk-stat: convert...
2792
  		blk_stat_add(req);
cf43e6be8   Jens Axboe   block: add scalab...
2793

e80640213   Christoph Hellwig   block: split out ...
2794
  	if (req->rq_flags & RQF_QUEUED)
cf43e6be8   Jens Axboe   block: add scalab...
2795
  		blk_queue_end_tag(q, req);
b8286239d   Kiyoshi Ueda   blk_end_request: ...
2796

ba396a6c1   James Bottomley   block: fix oops w...
2797
  	BUG_ON(blk_queued_rq(req));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2798

57292b58d   Christoph Hellwig   block: introduce ...
2799
  	if (unlikely(laptop_mode) && !blk_rq_is_passthrough(req))
dc3b17cc8   Jan Kara   block: Use pointe...
2800
  		laptop_io_completion(req->q->backing_dev_info);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2801

e78042e5b   Mike Anderson   blk: move blk_del...
2802
  	blk_delete_timer(req);
e80640213   Christoph Hellwig   block: split out ...
2803
  	if (req->rq_flags & RQF_DONTPREP)
28018c242   James Bottomley   block: implement ...
2804
  		blk_unprep_request(req);
bc58ba946   Jens Axboe   block: add sysfs ...
2805
  	blk_account_io_done(req);
b8286239d   Kiyoshi Ueda   blk_end_request: ...
2806

87760e5ee   Jens Axboe   block: hook up wr...
2807
2808
  	if (req->end_io) {
  		wbt_done(req->q->rq_wb, &req->issue_stat);
8ffdc6550   Tejun Heo   [BLOCK] add @upto...
2809
  		req->end_io(req, error);
87760e5ee   Jens Axboe   block: hook up wr...
2810
  	} else {
b8286239d   Kiyoshi Ueda   blk_end_request: ...
2811
2812
  		if (blk_bidi_rq(req))
  			__blk_put_request(req->next_rq->q, req->next_rq);
cf43e6be8   Jens Axboe   block: add scalab...
2813
  		__blk_put_request(q, req);
b8286239d   Kiyoshi Ueda   blk_end_request: ...
2814
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2815
  }
12120077b   Christoph Hellwig   block: export blk...
2816
  EXPORT_SYMBOL(blk_finish_request);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2817

3b11313a6   Kiyoshi Ueda   blk_end_request: ...
2818
  /**
2e60e0229   Tejun Heo   block: clean up r...
2819
2820
   * blk_end_bidi_request - Complete a bidi request
   * @rq:         the request to complete
2a842acab   Christoph Hellwig   block: introduce ...
2821
   * @error:      block status code
2e60e0229   Tejun Heo   block: clean up r...
2822
2823
   * @nr_bytes:   number of bytes to complete @rq
   * @bidi_bytes: number of bytes to complete @rq->next_rq
a0cd12854   Jens Axboe   block: add end_qu...
2824
2825
   *
   * Description:
e3a04fe34   Kiyoshi Ueda   blk_end_request: ...
2826
   *     Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
2e60e0229   Tejun Heo   block: clean up r...
2827
2828
2829
   *     Drivers that supports bidi can safely call this member for any
   *     type of request, bidi or uni.  In the later case @bidi_bytes is
   *     just ignored.
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2830
2831
   *
   * Return:
2e60e0229   Tejun Heo   block: clean up r...
2832
2833
   *     %false - we are done with this request
   *     %true  - still buffers pending for this request
a0cd12854   Jens Axboe   block: add end_qu...
2834
   **/
2a842acab   Christoph Hellwig   block: introduce ...
2835
  static bool blk_end_bidi_request(struct request *rq, blk_status_t error,
32fab448e   Kiyoshi Ueda   block: add reques...
2836
2837
  				 unsigned int nr_bytes, unsigned int bidi_bytes)
  {
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2838
  	struct request_queue *q = rq->q;
2e60e0229   Tejun Heo   block: clean up r...
2839
  	unsigned long flags;
32fab448e   Kiyoshi Ueda   block: add reques...
2840

332ebbf7f   Bart Van Assche   block: Document w...
2841
  	WARN_ON_ONCE(q->mq_ops);
2e60e0229   Tejun Heo   block: clean up r...
2842
2843
  	if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
  		return true;
32fab448e   Kiyoshi Ueda   block: add reques...
2844

336cdb400   Kiyoshi Ueda   blk_end_request: ...
2845
  	spin_lock_irqsave(q->queue_lock, flags);
2e60e0229   Tejun Heo   block: clean up r...
2846
  	blk_finish_request(rq, error);
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2847
  	spin_unlock_irqrestore(q->queue_lock, flags);
2e60e0229   Tejun Heo   block: clean up r...
2848
  	return false;
32fab448e   Kiyoshi Ueda   block: add reques...
2849
  }
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2850
  /**
2e60e0229   Tejun Heo   block: clean up r...
2851
2852
   * __blk_end_bidi_request - Complete a bidi request with queue lock held
   * @rq:         the request to complete
2a842acab   Christoph Hellwig   block: introduce ...
2853
   * @error:      block status code
e3a04fe34   Kiyoshi Ueda   blk_end_request: ...
2854
2855
   * @nr_bytes:   number of bytes to complete @rq
   * @bidi_bytes: number of bytes to complete @rq->next_rq
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2856
2857
   *
   * Description:
2e60e0229   Tejun Heo   block: clean up r...
2858
2859
   *     Identical to blk_end_bidi_request() except that queue lock is
   *     assumed to be locked on entry and remains so on return.
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2860
2861
   *
   * Return:
2e60e0229   Tejun Heo   block: clean up r...
2862
2863
   *     %false - we are done with this request
   *     %true  - still buffers pending for this request
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2864
   **/
2a842acab   Christoph Hellwig   block: introduce ...
2865
  static bool __blk_end_bidi_request(struct request *rq, blk_status_t error,
b1f744937   FUJITA Tomonori   block: move compl...
2866
  				   unsigned int nr_bytes, unsigned int bidi_bytes)
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2867
  {
2fff8a924   Bart Van Assche   block: Check lock...
2868
  	lockdep_assert_held(rq->q->queue_lock);
332ebbf7f   Bart Van Assche   block: Document w...
2869
  	WARN_ON_ONCE(rq->q->mq_ops);
2fff8a924   Bart Van Assche   block: Check lock...
2870

2e60e0229   Tejun Heo   block: clean up r...
2871
2872
  	if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
  		return true;
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2873

2e60e0229   Tejun Heo   block: clean up r...
2874
  	blk_finish_request(rq, error);
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2875

2e60e0229   Tejun Heo   block: clean up r...
2876
  	return false;
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2877
  }
e19a3ab05   Kiyoshi Ueda   blk_end_request: ...
2878
2879
2880
2881
  
  /**
   * blk_end_request - Helper function for drivers to complete the request.
   * @rq:       the request being processed
2a842acab   Christoph Hellwig   block: introduce ...
2882
   * @error:    block status code
e19a3ab05   Kiyoshi Ueda   blk_end_request: ...
2883
2884
2885
2886
2887
2888
2889
   * @nr_bytes: number of bytes to complete
   *
   * Description:
   *     Ends I/O on a number of bytes attached to @rq.
   *     If @rq has leftover, sets it up for the next range of segments.
   *
   * Return:
b1f744937   FUJITA Tomonori   block: move compl...
2890
2891
   *     %false - we are done with this request
   *     %true  - still buffers pending for this request
e19a3ab05   Kiyoshi Ueda   blk_end_request: ...
2892
   **/
2a842acab   Christoph Hellwig   block: introduce ...
2893
2894
  bool blk_end_request(struct request *rq, blk_status_t error,
  		unsigned int nr_bytes)
e19a3ab05   Kiyoshi Ueda   blk_end_request: ...
2895
  {
332ebbf7f   Bart Van Assche   block: Document w...
2896
  	WARN_ON_ONCE(rq->q->mq_ops);
b1f744937   FUJITA Tomonori   block: move compl...
2897
  	return blk_end_bidi_request(rq, error, nr_bytes, 0);
e19a3ab05   Kiyoshi Ueda   blk_end_request: ...
2898
  }
56ad1740d   Jens Axboe   block: make the e...
2899
  EXPORT_SYMBOL(blk_end_request);
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2900
2901
  
  /**
b1f744937   FUJITA Tomonori   block: move compl...
2902
2903
   * blk_end_request_all - Helper function for drives to finish the request.
   * @rq: the request to finish
2a842acab   Christoph Hellwig   block: introduce ...
2904
   * @error: block status code
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2905
2906
   *
   * Description:
b1f744937   FUJITA Tomonori   block: move compl...
2907
2908
   *     Completely finish @rq.
   */
2a842acab   Christoph Hellwig   block: introduce ...
2909
  void blk_end_request_all(struct request *rq, blk_status_t error)
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2910
  {
b1f744937   FUJITA Tomonori   block: move compl...
2911
2912
  	bool pending;
  	unsigned int bidi_bytes = 0;
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2913

b1f744937   FUJITA Tomonori   block: move compl...
2914
2915
  	if (unlikely(blk_bidi_rq(rq)))
  		bidi_bytes = blk_rq_bytes(rq->next_rq);
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2916

b1f744937   FUJITA Tomonori   block: move compl...
2917
2918
2919
  	pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
  	BUG_ON(pending);
  }
56ad1740d   Jens Axboe   block: make the e...
2920
  EXPORT_SYMBOL(blk_end_request_all);
336cdb400   Kiyoshi Ueda   blk_end_request: ...
2921

b1f744937   FUJITA Tomonori   block: move compl...
2922
  /**
b1f744937   FUJITA Tomonori   block: move compl...
2923
2924
   * __blk_end_request - Helper function for drivers to complete the request.
   * @rq:       the request being processed
2a842acab   Christoph Hellwig   block: introduce ...
2925
   * @error:    block status code
b1f744937   FUJITA Tomonori   block: move compl...
2926
   * @nr_bytes: number of bytes to complete
e3a04fe34   Kiyoshi Ueda   blk_end_request: ...
2927
2928
   *
   * Description:
b1f744937   FUJITA Tomonori   block: move compl...
2929
   *     Must be called with queue lock held unlike blk_end_request().
e3a04fe34   Kiyoshi Ueda   blk_end_request: ...
2930
2931
   *
   * Return:
b1f744937   FUJITA Tomonori   block: move compl...
2932
2933
   *     %false - we are done with this request
   *     %true  - still buffers pending for this request
e3a04fe34   Kiyoshi Ueda   blk_end_request: ...
2934
   **/
2a842acab   Christoph Hellwig   block: introduce ...
2935
2936
  bool __blk_end_request(struct request *rq, blk_status_t error,
  		unsigned int nr_bytes)
e3a04fe34   Kiyoshi Ueda   blk_end_request: ...
2937
  {
2fff8a924   Bart Van Assche   block: Check lock...
2938
  	lockdep_assert_held(rq->q->queue_lock);
332ebbf7f   Bart Van Assche   block: Document w...
2939
  	WARN_ON_ONCE(rq->q->mq_ops);
2fff8a924   Bart Van Assche   block: Check lock...
2940

b1f744937   FUJITA Tomonori   block: move compl...
2941
  	return __blk_end_bidi_request(rq, error, nr_bytes, 0);
e3a04fe34   Kiyoshi Ueda   blk_end_request: ...
2942
  }
56ad1740d   Jens Axboe   block: make the e...
2943
  EXPORT_SYMBOL(__blk_end_request);
e3a04fe34   Kiyoshi Ueda   blk_end_request: ...
2944
2945
  
  /**
b1f744937   FUJITA Tomonori   block: move compl...
2946
2947
   * __blk_end_request_all - Helper function for drives to finish the request.
   * @rq: the request to finish
2a842acab   Christoph Hellwig   block: introduce ...
2948
   * @error:    block status code
32fab448e   Kiyoshi Ueda   block: add reques...
2949
2950
   *
   * Description:
b1f744937   FUJITA Tomonori   block: move compl...
2951
   *     Completely finish @rq.  Must be called with queue lock held.
32fab448e   Kiyoshi Ueda   block: add reques...
2952
   */
2a842acab   Christoph Hellwig   block: introduce ...
2953
  void __blk_end_request_all(struct request *rq, blk_status_t error)
32fab448e   Kiyoshi Ueda   block: add reques...
2954
  {
b1f744937   FUJITA Tomonori   block: move compl...
2955
2956
  	bool pending;
  	unsigned int bidi_bytes = 0;
2fff8a924   Bart Van Assche   block: Check lock...
2957
  	lockdep_assert_held(rq->q->queue_lock);
332ebbf7f   Bart Van Assche   block: Document w...
2958
  	WARN_ON_ONCE(rq->q->mq_ops);
2fff8a924   Bart Van Assche   block: Check lock...
2959

b1f744937   FUJITA Tomonori   block: move compl...
2960
2961
2962
2963
2964
  	if (unlikely(blk_bidi_rq(rq)))
  		bidi_bytes = blk_rq_bytes(rq->next_rq);
  
  	pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
  	BUG_ON(pending);
32fab448e   Kiyoshi Ueda   block: add reques...
2965
  }
56ad1740d   Jens Axboe   block: make the e...
2966
  EXPORT_SYMBOL(__blk_end_request_all);
32fab448e   Kiyoshi Ueda   block: add reques...
2967
2968
  
  /**
b1f744937   FUJITA Tomonori   block: move compl...
2969
2970
   * __blk_end_request_cur - Helper function to finish the current request chunk.
   * @rq: the request to finish the current chunk for
2a842acab   Christoph Hellwig   block: introduce ...
2971
   * @error:    block status code
e19a3ab05   Kiyoshi Ueda   blk_end_request: ...
2972
2973
   *
   * Description:
b1f744937   FUJITA Tomonori   block: move compl...
2974
2975
   *     Complete the current consecutively mapped chunk from @rq.  Must
   *     be called with queue lock held.
e19a3ab05   Kiyoshi Ueda   blk_end_request: ...
2976
2977
   *
   * Return:
b1f744937   FUJITA Tomonori   block: move compl...
2978
2979
2980
   *     %false - we are done with this request
   *     %true  - still buffers pending for this request
   */
2a842acab   Christoph Hellwig   block: introduce ...
2981
  bool __blk_end_request_cur(struct request *rq, blk_status_t error)
e19a3ab05   Kiyoshi Ueda   blk_end_request: ...
2982
  {
b1f744937   FUJITA Tomonori   block: move compl...
2983
  	return __blk_end_request(rq, error, blk_rq_cur_bytes(rq));
e19a3ab05   Kiyoshi Ueda   blk_end_request: ...
2984
  }
56ad1740d   Jens Axboe   block: make the e...
2985
  EXPORT_SYMBOL(__blk_end_request_cur);
e19a3ab05   Kiyoshi Ueda   blk_end_request: ...
2986

86db1e297   Jens Axboe   block: continue l...
2987
2988
  void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
  		     struct bio *bio)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2989
  {
b4f42e283   Jens Axboe   block: remove str...
2990
  	if (bio_has_data(bio))
fb2dce862   David Woodhouse   Add 'discard' req...
2991
  		rq->nr_phys_segments = bio_phys_segments(q, bio);
73027d80d   Jens Axboe   blk-mq: fix disca...
2992
2993
  	else if (bio_op(bio) == REQ_OP_DISCARD)
  		rq->nr_phys_segments = 1;
b4f42e283   Jens Axboe   block: remove str...
2994

4f024f379   Kent Overstreet   block: Abstract o...
2995
  	rq->__data_len = bio->bi_iter.bi_size;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2996
  	rq->bio = rq->biotail = bio;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2997

74d46992e   Christoph Hellwig   block: replace bi...
2998
2999
  	if (bio->bi_disk)
  		rq->rq_disk = bio->bi_disk;
66846572b   NeilBrown   Stop exporting bl...
3000
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3001

2d4dc890b   Ilya Loginov   block: add helper...
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
  #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
  /**
   * rq_flush_dcache_pages - Helper function to flush all pages in a request
   * @rq: the request to be flushed
   *
   * Description:
   *     Flush all pages in @rq.
   */
  void rq_flush_dcache_pages(struct request *rq)
  {
  	struct req_iterator iter;
7988613b0   Kent Overstreet   block: Convert bi...
3013
  	struct bio_vec bvec;
2d4dc890b   Ilya Loginov   block: add helper...
3014
3015
  
  	rq_for_each_segment(bvec, rq, iter)
7988613b0   Kent Overstreet   block: Convert bi...
3016
  		flush_dcache_page(bvec.bv_page);
2d4dc890b   Ilya Loginov   block: add helper...
3017
3018
3019
  }
  EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
  #endif
ef9e3facd   Kiyoshi Ueda   block: add lld bu...
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
  /**
   * blk_lld_busy - Check if underlying low-level drivers of a device are busy
   * @q : the queue of the device being checked
   *
   * Description:
   *    Check if underlying low-level drivers of a device are busy.
   *    If the drivers want to export their busy state, they must set own
   *    exporting function using blk_queue_lld_busy() first.
   *
   *    Basically, this function is used only by request stacking drivers
   *    to stop dispatching requests to underlying devices when underlying
   *    devices are busy.  This behavior helps more I/O merging on the queue
   *    of the request stacking driver and prevents I/O throughput regression
   *    on burst I/O load.
   *
   * Return:
   *    0 - Not busy (The request stacking driver should dispatch request)
   *    1 - Busy (The request stacking driver should stop dispatching request)
   */
  int blk_lld_busy(struct request_queue *q)
  {
  	if (q->lld_busy_fn)
  		return q->lld_busy_fn(q);
  
  	return 0;
  }
  EXPORT_SYMBOL_GPL(blk_lld_busy);
78d8e58a0   Mike Snitzer   Revert "block, dm...
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
  /**
   * blk_rq_unprep_clone - Helper function to free all bios in a cloned request
   * @rq: the clone request to be cleaned up
   *
   * Description:
   *     Free all bios in @rq for a cloned request.
   */
  void blk_rq_unprep_clone(struct request *rq)
  {
  	struct bio *bio;
  
  	while ((bio = rq->bio) != NULL) {
  		rq->bio = bio->bi_next;
  
  		bio_put(bio);
  	}
  }
  EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
  
  /*
   * Copy attributes of the original request to the clone request.
   * The actual data parts (e.g. ->cmd, ->sense) are not copied.
   */
  static void __blk_rq_prep_clone(struct request *dst, struct request *src)
b0fd271d5   Kiyoshi Ueda   block: add reques...
3071
3072
  {
  	dst->cpu = src->cpu;
b0fd271d5   Kiyoshi Ueda   block: add reques...
3073
3074
  	dst->__sector = blk_rq_pos(src);
  	dst->__data_len = blk_rq_bytes(src);
251141340   Bart Van Assche   block: Fix clonin...
3075
3076
3077
3078
  	if (src->rq_flags & RQF_SPECIAL_PAYLOAD) {
  		dst->rq_flags |= RQF_SPECIAL_PAYLOAD;
  		dst->special_vec = src->special_vec;
  	}
b0fd271d5   Kiyoshi Ueda   block: add reques...
3079
3080
3081
  	dst->nr_phys_segments = src->nr_phys_segments;
  	dst->ioprio = src->ioprio;
  	dst->extra_len = src->extra_len;
78d8e58a0   Mike Snitzer   Revert "block, dm...
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
  }
  
  /**
   * blk_rq_prep_clone - Helper function to setup clone request
   * @rq: the request to be setup
   * @rq_src: original request to be cloned
   * @bs: bio_set that bios for clone are allocated from
   * @gfp_mask: memory allocation mask for bio
   * @bio_ctr: setup function to be called for each clone bio.
   *           Returns %0 for success, non %0 for failure.
   * @data: private data to be passed to @bio_ctr
   *
   * Description:
   *     Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
   *     The actual data parts of @rq_src (e.g. ->cmd, ->sense)
   *     are not copied, and copying such parts is the caller's responsibility.
   *     Also, pages which the original bios are pointing to are not copied
   *     and the cloned bios just point same pages.
   *     So cloned bios must be completed before original bios, which means
   *     the caller must complete @rq before @rq_src.
   */
  int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
  		      struct bio_set *bs, gfp_t gfp_mask,
  		      int (*bio_ctr)(struct bio *, struct bio *, void *),
  		      void *data)
  {
  	struct bio *bio, *bio_src;
  
  	if (!bs)
  		bs = fs_bio_set;
  
  	__rq_for_each_bio(bio_src, rq_src) {
  		bio = bio_clone_fast(bio_src, gfp_mask, bs);
  		if (!bio)
  			goto free_and_out;
  
  		if (bio_ctr && bio_ctr(bio, bio_src, data))
  			goto free_and_out;
  
  		if (rq->bio) {
  			rq->biotail->bi_next = bio;
  			rq->biotail = bio;
  		} else
  			rq->bio = rq->biotail = bio;
  	}
  
  	__blk_rq_prep_clone(rq, rq_src);
  
  	return 0;
  
  free_and_out:
  	if (bio)
  		bio_put(bio);
  	blk_rq_unprep_clone(rq);
  
  	return -ENOMEM;
b0fd271d5   Kiyoshi Ueda   block: add reques...
3138
3139
  }
  EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
59c3d45e4   Jens Axboe   block: remove 'q'...
3140
  int kblockd_schedule_work(struct work_struct *work)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3141
3142
3143
  {
  	return queue_work(kblockd_workqueue, work);
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3144
  EXPORT_SYMBOL(kblockd_schedule_work);
ee63cfa7f   Jens Axboe   block: add kblock...
3145
3146
3147
3148
3149
  int kblockd_schedule_work_on(int cpu, struct work_struct *work)
  {
  	return queue_work_on(cpu, kblockd_workqueue, work);
  }
  EXPORT_SYMBOL(kblockd_schedule_work_on);
818cd1cba   Jens Axboe   block: add kblock...
3150
3151
3152
3153
3154
3155
  int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork,
  				unsigned long delay)
  {
  	return mod_delayed_work_on(cpu, kblockd_workqueue, dwork, delay);
  }
  EXPORT_SYMBOL(kblockd_mod_delayed_work_on);
59c3d45e4   Jens Axboe   block: remove 'q'...
3156
3157
  int kblockd_schedule_delayed_work(struct delayed_work *dwork,
  				  unsigned long delay)
e43473b7f   Vivek Goyal   blkio: Core imple...
3158
3159
3160
3161
  {
  	return queue_delayed_work(kblockd_workqueue, dwork, delay);
  }
  EXPORT_SYMBOL(kblockd_schedule_delayed_work);
8ab14595b   Jens Axboe   block: add kblock...
3162
3163
3164
3165
3166
3167
  int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
  				     unsigned long delay)
  {
  	return queue_delayed_work_on(cpu, kblockd_workqueue, dwork, delay);
  }
  EXPORT_SYMBOL(kblockd_schedule_delayed_work_on);
75df71362   Suresh Jayaraman   block: document b...
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
  /**
   * blk_start_plug - initialize blk_plug and track it inside the task_struct
   * @plug:	The &struct blk_plug that needs to be initialized
   *
   * Description:
   *   Tracking blk_plug inside the task_struct will help with auto-flushing the
   *   pending I/O should the task end up blocking between blk_start_plug() and
   *   blk_finish_plug(). This is important from a performance perspective, but
   *   also ensures that we don't deadlock. For instance, if the task is blocking
   *   for a memory allocation, memory reclaim could end up wanting to free a
   *   page belonging to that request that is currently residing in our private
   *   plug. By flushing the pending I/O when the process goes to sleep, we avoid
   *   this kind of deadlock.
   */
73c101011   Jens Axboe   block: initial pa...
3182
3183
3184
  void blk_start_plug(struct blk_plug *plug)
  {
  	struct task_struct *tsk = current;
dd6cf3e18   Shaohua Li   blk: clean up plug
3185
3186
3187
3188
3189
  	/*
  	 * If this is a nested plug, don't actually assign it.
  	 */
  	if (tsk->plug)
  		return;
73c101011   Jens Axboe   block: initial pa...
3190
  	INIT_LIST_HEAD(&plug->list);
320ae51fe   Jens Axboe   blk-mq: new multi...
3191
  	INIT_LIST_HEAD(&plug->mq_list);
048c9374a   NeilBrown   block: Enhance ne...
3192
  	INIT_LIST_HEAD(&plug->cb_list);
73c101011   Jens Axboe   block: initial pa...
3193
  	/*
dd6cf3e18   Shaohua Li   blk: clean up plug
3194
3195
  	 * Store ordering should not be needed here, since a potential
  	 * preempt will imply a full memory barrier
73c101011   Jens Axboe   block: initial pa...
3196
  	 */
dd6cf3e18   Shaohua Li   blk: clean up plug
3197
  	tsk->plug = plug;
73c101011   Jens Axboe   block: initial pa...
3198
3199
3200
3201
3202
3203
3204
  }
  EXPORT_SYMBOL(blk_start_plug);
  
  static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
  {
  	struct request *rqa = container_of(a, struct request, queuelist);
  	struct request *rqb = container_of(b, struct request, queuelist);
975927b94   Jianpeng Ma   block: Add blk_rq...
3205
3206
  	return !(rqa->q < rqb->q ||
  		(rqa->q == rqb->q && blk_rq_pos(rqa) < blk_rq_pos(rqb)));
73c101011   Jens Axboe   block: initial pa...
3207
  }
49cac01e1   Jens Axboe   block: make unplu...
3208
3209
3210
3211
3212
3213
  /*
   * If 'from_schedule' is true, then postpone the dispatch of requests
   * until a safe kblockd context. We due this to avoid accidental big
   * additional stack usage in driver dispatch, in places where the originally
   * plugger did not intend it.
   */
f6603783f   Jens Axboe   block: only force...
3214
  static void queue_unplugged(struct request_queue *q, unsigned int depth,
49cac01e1   Jens Axboe   block: make unplu...
3215
  			    bool from_schedule)
99e22598e   Jens Axboe   block: drop queue...
3216
  	__releases(q->queue_lock)
94b5eb28b   Jens Axboe   block: fixup bloc...
3217
  {
2fff8a924   Bart Van Assche   block: Check lock...
3218
  	lockdep_assert_held(q->queue_lock);
49cac01e1   Jens Axboe   block: make unplu...
3219
  	trace_block_unplug(q, depth, !from_schedule);
99e22598e   Jens Axboe   block: drop queue...
3220

704605711   Bart Van Assche   block: Avoid sche...
3221
  	if (from_schedule)
24ecfbe27   Christoph Hellwig   block: add blk_ru...
3222
  		blk_run_queue_async(q);
704605711   Bart Van Assche   block: Avoid sche...
3223
  	else
24ecfbe27   Christoph Hellwig   block: add blk_ru...
3224
  		__blk_run_queue(q);
704605711   Bart Van Assche   block: Avoid sche...
3225
  	spin_unlock(q->queue_lock);
94b5eb28b   Jens Axboe   block: fixup bloc...
3226
  }
74018dc30   NeilBrown   blk: pass from_sc...
3227
  static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
048c9374a   NeilBrown   block: Enhance ne...
3228
3229
  {
  	LIST_HEAD(callbacks);
2a7d5559b   Shaohua Li   block: stack unplug
3230
3231
  	while (!list_empty(&plug->cb_list)) {
  		list_splice_init(&plug->cb_list, &callbacks);
048c9374a   NeilBrown   block: Enhance ne...
3232

2a7d5559b   Shaohua Li   block: stack unplug
3233
3234
  		while (!list_empty(&callbacks)) {
  			struct blk_plug_cb *cb = list_first_entry(&callbacks,
048c9374a   NeilBrown   block: Enhance ne...
3235
3236
  							  struct blk_plug_cb,
  							  list);
2a7d5559b   Shaohua Li   block: stack unplug
3237
  			list_del(&cb->list);
74018dc30   NeilBrown   blk: pass from_sc...
3238
  			cb->callback(cb, from_schedule);
2a7d5559b   Shaohua Li   block: stack unplug
3239
  		}
048c9374a   NeilBrown   block: Enhance ne...
3240
3241
  	}
  }
9cbb17508   NeilBrown   blk: centralize n...
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
  struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
  				      int size)
  {
  	struct blk_plug *plug = current->plug;
  	struct blk_plug_cb *cb;
  
  	if (!plug)
  		return NULL;
  
  	list_for_each_entry(cb, &plug->cb_list, list)
  		if (cb->callback == unplug && cb->data == data)
  			return cb;
  
  	/* Not currently on the callback list */
  	BUG_ON(size < sizeof(*cb));
  	cb = kzalloc(size, GFP_ATOMIC);
  	if (cb) {
  		cb->data = data;
  		cb->callback = unplug;
  		list_add(&cb->list, &plug->cb_list);
  	}
  	return cb;
  }
  EXPORT_SYMBOL(blk_check_plugged);
49cac01e1   Jens Axboe   block: make unplu...
3266
  void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
73c101011   Jens Axboe   block: initial pa...
3267
3268
3269
3270
  {
  	struct request_queue *q;
  	unsigned long flags;
  	struct request *rq;
109b81296   NeilBrown   block: splice plu...
3271
  	LIST_HEAD(list);
94b5eb28b   Jens Axboe   block: fixup bloc...
3272
  	unsigned int depth;
73c101011   Jens Axboe   block: initial pa...
3273

74018dc30   NeilBrown   blk: pass from_sc...
3274
  	flush_plug_callbacks(plug, from_schedule);
320ae51fe   Jens Axboe   blk-mq: new multi...
3275
3276
3277
  
  	if (!list_empty(&plug->mq_list))
  		blk_mq_flush_plug_list(plug, from_schedule);
73c101011   Jens Axboe   block: initial pa...
3278
3279
  	if (list_empty(&plug->list))
  		return;
109b81296   NeilBrown   block: splice plu...
3280
  	list_splice_init(&plug->list, &list);
422765c26   Jianpeng Ma   block: Remove sho...
3281
  	list_sort(NULL, &list, plug_rq_cmp);
73c101011   Jens Axboe   block: initial pa...
3282
3283
  
  	q = NULL;
94b5eb28b   Jens Axboe   block: fixup bloc...
3284
  	depth = 0;
188112722   Jens Axboe   block: add commen...
3285
3286
3287
3288
3289
  
  	/*
  	 * Save and disable interrupts here, to avoid doing it for every
  	 * queue lock we have to take.
  	 */
73c101011   Jens Axboe   block: initial pa...
3290
  	local_irq_save(flags);
109b81296   NeilBrown   block: splice plu...
3291
3292
  	while (!list_empty(&list)) {
  		rq = list_entry_rq(list.next);
73c101011   Jens Axboe   block: initial pa...
3293
  		list_del_init(&rq->queuelist);
73c101011   Jens Axboe   block: initial pa...
3294
3295
  		BUG_ON(!rq->q);
  		if (rq->q != q) {
99e22598e   Jens Axboe   block: drop queue...
3296
3297
3298
3299
  			/*
  			 * This drops the queue lock
  			 */
  			if (q)
49cac01e1   Jens Axboe   block: make unplu...
3300
  				queue_unplugged(q, depth, from_schedule);
73c101011   Jens Axboe   block: initial pa...
3301
  			q = rq->q;
94b5eb28b   Jens Axboe   block: fixup bloc...
3302
  			depth = 0;
73c101011   Jens Axboe   block: initial pa...
3303
3304
  			spin_lock(q->queue_lock);
  		}
8ba61435d   Tejun Heo   block: add missin...
3305
3306
3307
3308
  
  		/*
  		 * Short-circuit if @q is dead
  		 */
3f3299d5c   Bart Van Assche   block: Rename que...
3309
  		if (unlikely(blk_queue_dying(q))) {
2a842acab   Christoph Hellwig   block: introduce ...
3310
  			__blk_end_request_all(rq, BLK_STS_IOERR);
8ba61435d   Tejun Heo   block: add missin...
3311
3312
  			continue;
  		}
73c101011   Jens Axboe   block: initial pa...
3313
3314
3315
  		/*
  		 * rq is already accounted, so use raw insert
  		 */
f73f44eb0   Christoph Hellwig   block: add a op_i...
3316
  		if (op_is_flush(rq->cmd_flags))
401a18e92   Jens Axboe   block: fix bug wi...
3317
3318
3319
  			__elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH);
  		else
  			__elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE);
94b5eb28b   Jens Axboe   block: fixup bloc...
3320
3321
  
  		depth++;
73c101011   Jens Axboe   block: initial pa...
3322
  	}
99e22598e   Jens Axboe   block: drop queue...
3323
3324
3325
3326
  	/*
  	 * This drops the queue lock
  	 */
  	if (q)
49cac01e1   Jens Axboe   block: make unplu...
3327
  		queue_unplugged(q, depth, from_schedule);
73c101011   Jens Axboe   block: initial pa...
3328

73c101011   Jens Axboe   block: initial pa...
3329
3330
  	local_irq_restore(flags);
  }
73c101011   Jens Axboe   block: initial pa...
3331
3332
3333
  
  void blk_finish_plug(struct blk_plug *plug)
  {
dd6cf3e18   Shaohua Li   blk: clean up plug
3334
3335
  	if (plug != current->plug)
  		return;
f6603783f   Jens Axboe   block: only force...
3336
  	blk_flush_plug_list(plug, false);
73c101011   Jens Axboe   block: initial pa...
3337

dd6cf3e18   Shaohua Li   blk: clean up plug
3338
  	current->plug = NULL;
73c101011   Jens Axboe   block: initial pa...
3339
  }
88b996cd0   Christoph Hellwig   block: cleanup th...
3340
  EXPORT_SYMBOL(blk_finish_plug);
73c101011   Jens Axboe   block: initial pa...
3341

47fafbc70   Rafael J. Wysocki   block / PM: Repla...
3342
  #ifdef CONFIG_PM
6c9546675   Lin Ming   block: add runtim...
3343
3344
3345
3346
3347
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357
3358
3359
3360
3361
3362
3363
3364
3365
  /**
   * blk_pm_runtime_init - Block layer runtime PM initialization routine
   * @q: the queue of the device
   * @dev: the device the queue belongs to
   *
   * Description:
   *    Initialize runtime-PM-related fields for @q and start auto suspend for
   *    @dev. Drivers that want to take advantage of request-based runtime PM
   *    should call this function after @dev has been initialized, and its
   *    request queue @q has been allocated, and runtime PM for it can not happen
   *    yet(either due to disabled/forbidden or its usage_count > 0). In most
   *    cases, driver should call this function before any I/O has taken place.
   *
   *    This function takes care of setting up using auto suspend for the device,
   *    the autosuspend delay is set to -1 to make runtime suspend impossible
   *    until an updated value is either set by user or by driver. Drivers do
   *    not need to touch other autosuspend settings.
   *
   *    The block layer runtime PM is request based, so only works for drivers
   *    that use request as their IO unit instead of those directly use bio's.
   */
  void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
  {
1e2698976   Ming Lei   block: really dis...
3366
3367
3368
  	/* Don't enable runtime PM for blk-mq until it is ready */
  	if (q->mq_ops) {
  		pm_runtime_disable(dev);
765e40b67   Christoph Hellwig   block: disable ru...
3369
  		return;
1e2698976   Ming Lei   block: really dis...
3370
  	}
765e40b67   Christoph Hellwig   block: disable ru...
3371

6c9546675   Lin Ming   block: add runtim...
3372
3373
3374
3375
3376
3377
3378
3379
3380
3381
3382
3383
3384
3385
3386
3387
3388
3389
3390
3391
3392
3393
3394
3395
3396
3397
3398
3399
3400
3401
3402
  	q->dev = dev;
  	q->rpm_status = RPM_ACTIVE;
  	pm_runtime_set_autosuspend_delay(q->dev, -1);
  	pm_runtime_use_autosuspend(q->dev);
  }
  EXPORT_SYMBOL(blk_pm_runtime_init);
  
  /**
   * blk_pre_runtime_suspend - Pre runtime suspend check
   * @q: the queue of the device
   *
   * Description:
   *    This function will check if runtime suspend is allowed for the device
   *    by examining if there are any requests pending in the queue. If there
   *    are requests pending, the device can not be runtime suspended; otherwise,
   *    the queue's status will be updated to SUSPENDING and the driver can
   *    proceed to suspend the device.
   *
   *    For the not allowed case, we mark last busy for the device so that
   *    runtime PM core will try to autosuspend it some time later.
   *
   *    This function should be called near the start of the device's
   *    runtime_suspend callback.
   *
   * Return:
   *    0		- OK to runtime suspend the device
   *    -EBUSY	- Device should not be runtime suspended
   */
  int blk_pre_runtime_suspend(struct request_queue *q)
  {
  	int ret = 0;
4fd41a855   Ken Xue   SCSI: Fix NULL po...
3403
3404
  	if (!q->dev)
  		return ret;
6c9546675   Lin Ming   block: add runtim...
3405
3406
3407
3408
3409
3410
3411
3412
3413
3414
3415
3416
3417
3418
3419
3420
3421
3422
3423
3424
3425
3426
3427
3428
3429
3430
3431
  	spin_lock_irq(q->queue_lock);
  	if (q->nr_pending) {
  		ret = -EBUSY;
  		pm_runtime_mark_last_busy(q->dev);
  	} else {
  		q->rpm_status = RPM_SUSPENDING;
  	}
  	spin_unlock_irq(q->queue_lock);
  	return ret;
  }
  EXPORT_SYMBOL(blk_pre_runtime_suspend);
  
  /**
   * blk_post_runtime_suspend - Post runtime suspend processing
   * @q: the queue of the device
   * @err: return value of the device's runtime_suspend function
   *
   * Description:
   *    Update the queue's runtime status according to the return value of the
   *    device's runtime suspend function and mark last busy for the device so
   *    that PM core will try to auto suspend the device at a later time.
   *
   *    This function should be called near the end of the device's
   *    runtime_suspend callback.
   */
  void blk_post_runtime_suspend(struct request_queue *q, int err)
  {
4fd41a855   Ken Xue   SCSI: Fix NULL po...
3432
3433
  	if (!q->dev)
  		return;
6c9546675   Lin Ming   block: add runtim...
3434
3435
3436
3437
3438
3439
3440
3441
3442
3443
3444
3445
3446
3447
3448
3449
3450
3451
3452
3453
3454
3455
3456
3457
  	spin_lock_irq(q->queue_lock);
  	if (!err) {
  		q->rpm_status = RPM_SUSPENDED;
  	} else {
  		q->rpm_status = RPM_ACTIVE;
  		pm_runtime_mark_last_busy(q->dev);
  	}
  	spin_unlock_irq(q->queue_lock);
  }
  EXPORT_SYMBOL(blk_post_runtime_suspend);
  
  /**
   * blk_pre_runtime_resume - Pre runtime resume processing
   * @q: the queue of the device
   *
   * Description:
   *    Update the queue's runtime status to RESUMING in preparation for the
   *    runtime resume of the device.
   *
   *    This function should be called near the start of the device's
   *    runtime_resume callback.
   */
  void blk_pre_runtime_resume(struct request_queue *q)
  {
4fd41a855   Ken Xue   SCSI: Fix NULL po...
3458
3459
  	if (!q->dev)
  		return;
6c9546675   Lin Ming   block: add runtim...
3460
3461
3462
3463
3464
3465
3466
3467
3468
3469
3470
3471
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481
  	spin_lock_irq(q->queue_lock);
  	q->rpm_status = RPM_RESUMING;
  	spin_unlock_irq(q->queue_lock);
  }
  EXPORT_SYMBOL(blk_pre_runtime_resume);
  
  /**
   * blk_post_runtime_resume - Post runtime resume processing
   * @q: the queue of the device
   * @err: return value of the device's runtime_resume function
   *
   * Description:
   *    Update the queue's runtime status according to the return value of the
   *    device's runtime_resume function. If it is successfully resumed, process
   *    the requests that are queued into the device's queue when it is resuming
   *    and then mark last busy and initiate autosuspend for it.
   *
   *    This function should be called near the end of the device's
   *    runtime_resume callback.
   */
  void blk_post_runtime_resume(struct request_queue *q, int err)
  {
4fd41a855   Ken Xue   SCSI: Fix NULL po...
3482
3483
  	if (!q->dev)
  		return;
6c9546675   Lin Ming   block: add runtim...
3484
3485
3486
3487
3488
  	spin_lock_irq(q->queue_lock);
  	if (!err) {
  		q->rpm_status = RPM_ACTIVE;
  		__blk_run_queue(q);
  		pm_runtime_mark_last_busy(q->dev);
c60855cdb   Aaron Lu   blkpm: avoid slee...
3489
  		pm_request_autosuspend(q->dev);
6c9546675   Lin Ming   block: add runtim...
3490
3491
3492
3493
3494
3495
  	} else {
  		q->rpm_status = RPM_SUSPENDED;
  	}
  	spin_unlock_irq(q->queue_lock);
  }
  EXPORT_SYMBOL(blk_post_runtime_resume);
d07ab6d11   Mika Westerberg   block: Add blk_se...
3496
3497
3498
3499
3500
3501
3502
3503
3504
3505
3506
3507
3508
3509
3510
3511
3512
3513
3514
3515
3516
3517
3518
3519
  
  /**
   * blk_set_runtime_active - Force runtime status of the queue to be active
   * @q: the queue of the device
   *
   * If the device is left runtime suspended during system suspend the resume
   * hook typically resumes the device and corrects runtime status
   * accordingly. However, that does not affect the queue runtime PM status
   * which is still "suspended". This prevents processing requests from the
   * queue.
   *
   * This function can be used in driver's resume hook to correct queue
   * runtime PM status and re-enable peeking requests from the queue. It
   * should be called before first request is added to the queue.
   */
  void blk_set_runtime_active(struct request_queue *q)
  {
  	spin_lock_irq(q->queue_lock);
  	q->rpm_status = RPM_ACTIVE;
  	pm_runtime_mark_last_busy(q->dev);
  	pm_request_autosuspend(q->dev);
  	spin_unlock_irq(q->queue_lock);
  }
  EXPORT_SYMBOL(blk_set_runtime_active);
6c9546675   Lin Ming   block: add runtim...
3520
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3521
3522
  int __init blk_dev_init(void)
  {
ef295ecf0   Christoph Hellwig   block: better op ...
3523
3524
  	BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS));
  	BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
0762b23d2   Maninder Singh   block: use FIELD_...
3525
  			FIELD_SIZEOF(struct request, cmd_flags));
ef295ecf0   Christoph Hellwig   block: better op ...
3526
3527
  	BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
  			FIELD_SIZEOF(struct bio, bi_opf));
9eb55b030   Nikanth Karthikesan   block: catch tryi...
3528

89b90be2d   Tejun Heo   block: make kbloc...
3529
3530
  	/* used for unplugging and affects IO latency/throughput - HIGHPRI */
  	kblockd_workqueue = alloc_workqueue("kblockd",
28747fcd2   Matias Bjørling   block: remove WQ_...
3531
  					    WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3532
3533
3534
3535
3536
  	if (!kblockd_workqueue)
  		panic("Failed to create kblockd
  ");
  
  	request_cachep = kmem_cache_create("blkdev_requests",
20c2df83d   Paul Mundt   mm: Remove slab d...
3537
  			sizeof(struct request), 0, SLAB_PANIC, NULL);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3538

c2789bd40   Ilya Dryomov   block: rename req...
3539
  	blk_requestq_cachep = kmem_cache_create("request_queue",
165125e1e   Jens Axboe   [BLOCK] Get rid o...
3540
  			sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3541

18fbda91c   Omar Sandoval   block: use same b...
3542
3543
3544
  #ifdef CONFIG_DEBUG_FS
  	blk_debugfs_root = debugfs_create_dir("block", NULL);
  #endif
d38ecf935   Jens Axboe   io context sharin...
3545
  	return 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3546
  }