Commit af498d7fa3e786f52650819a56e117ed9a40920c
Committed by
Jens Axboe
1 parent
a4d7749be5
block: fix the bio_vec array index out-of-bounds test
Current bio_vec array index out-of-bounds test within __end_that_request_first() does not seem correct. It checks bio->bi_idx against bio->bi_vcnt, but the subsequent code uses idx (which is, bio->bi_idx + next_idx) as the array index into bio_vec array. This means that the test really make sense only at the first iteration of !(nr_bytes >=bio->bi_size) case (when next_idx == zero). Fix this by replacing bio->bi_idx with idx. (This patch applies to 2.6.30-rc4.) Signed-off-by: Kazuhisa Ichikawa <ki@epsilou.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Showing 1 changed file with 2 additions and 2 deletions Inline Diff
block/blk-core.c
1 | /* | 1 | /* |
2 | * Copyright (C) 1991, 1992 Linus Torvalds | 2 | * Copyright (C) 1991, 1992 Linus Torvalds |
3 | * Copyright (C) 1994, Karl Keyte: Added support for disk statistics | 3 | * Copyright (C) 1994, Karl Keyte: Added support for disk statistics |
4 | * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE | 4 | * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE |
5 | * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de> | 5 | * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de> |
6 | * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> | 6 | * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> |
7 | * - July2000 | 7 | * - July2000 |
8 | * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001 | 8 | * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001 |
9 | */ | 9 | */ |
10 | 10 | ||
11 | /* | 11 | /* |
12 | * This handles all read/write requests to block devices | 12 | * This handles all read/write requests to block devices |
13 | */ | 13 | */ |
14 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | #include <linux/backing-dev.h> | 16 | #include <linux/backing-dev.h> |
17 | #include <linux/bio.h> | 17 | #include <linux/bio.h> |
18 | #include <linux/blkdev.h> | 18 | #include <linux/blkdev.h> |
19 | #include <linux/highmem.h> | 19 | #include <linux/highmem.h> |
20 | #include <linux/mm.h> | 20 | #include <linux/mm.h> |
21 | #include <linux/kernel_stat.h> | 21 | #include <linux/kernel_stat.h> |
22 | #include <linux/string.h> | 22 | #include <linux/string.h> |
23 | #include <linux/init.h> | 23 | #include <linux/init.h> |
24 | #include <linux/completion.h> | 24 | #include <linux/completion.h> |
25 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
26 | #include <linux/swap.h> | 26 | #include <linux/swap.h> |
27 | #include <linux/writeback.h> | 27 | #include <linux/writeback.h> |
28 | #include <linux/task_io_accounting_ops.h> | 28 | #include <linux/task_io_accounting_ops.h> |
29 | #include <linux/blktrace_api.h> | 29 | #include <linux/blktrace_api.h> |
30 | #include <linux/fault-inject.h> | 30 | #include <linux/fault-inject.h> |
31 | #include <trace/block.h> | 31 | #include <trace/block.h> |
32 | 32 | ||
33 | #include "blk.h" | 33 | #include "blk.h" |
34 | 34 | ||
35 | DEFINE_TRACE(block_plug); | 35 | DEFINE_TRACE(block_plug); |
36 | DEFINE_TRACE(block_unplug_io); | 36 | DEFINE_TRACE(block_unplug_io); |
37 | DEFINE_TRACE(block_unplug_timer); | 37 | DEFINE_TRACE(block_unplug_timer); |
38 | DEFINE_TRACE(block_getrq); | 38 | DEFINE_TRACE(block_getrq); |
39 | DEFINE_TRACE(block_sleeprq); | 39 | DEFINE_TRACE(block_sleeprq); |
40 | DEFINE_TRACE(block_rq_requeue); | 40 | DEFINE_TRACE(block_rq_requeue); |
41 | DEFINE_TRACE(block_bio_backmerge); | 41 | DEFINE_TRACE(block_bio_backmerge); |
42 | DEFINE_TRACE(block_bio_frontmerge); | 42 | DEFINE_TRACE(block_bio_frontmerge); |
43 | DEFINE_TRACE(block_bio_queue); | 43 | DEFINE_TRACE(block_bio_queue); |
44 | DEFINE_TRACE(block_rq_complete); | 44 | DEFINE_TRACE(block_rq_complete); |
45 | DEFINE_TRACE(block_remap); /* Also used in drivers/md/dm.c */ | 45 | DEFINE_TRACE(block_remap); /* Also used in drivers/md/dm.c */ |
46 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_remap); | 46 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_remap); |
47 | 47 | ||
48 | static int __make_request(struct request_queue *q, struct bio *bio); | 48 | static int __make_request(struct request_queue *q, struct bio *bio); |
49 | 49 | ||
50 | /* | 50 | /* |
51 | * For the allocated request tables | 51 | * For the allocated request tables |
52 | */ | 52 | */ |
53 | static struct kmem_cache *request_cachep; | 53 | static struct kmem_cache *request_cachep; |
54 | 54 | ||
55 | /* | 55 | /* |
56 | * For queue allocation | 56 | * For queue allocation |
57 | */ | 57 | */ |
58 | struct kmem_cache *blk_requestq_cachep; | 58 | struct kmem_cache *blk_requestq_cachep; |
59 | 59 | ||
60 | /* | 60 | /* |
61 | * Controlling structure to kblockd | 61 | * Controlling structure to kblockd |
62 | */ | 62 | */ |
63 | static struct workqueue_struct *kblockd_workqueue; | 63 | static struct workqueue_struct *kblockd_workqueue; |
64 | 64 | ||
65 | static void drive_stat_acct(struct request *rq, int new_io) | 65 | static void drive_stat_acct(struct request *rq, int new_io) |
66 | { | 66 | { |
67 | struct hd_struct *part; | 67 | struct hd_struct *part; |
68 | int rw = rq_data_dir(rq); | 68 | int rw = rq_data_dir(rq); |
69 | int cpu; | 69 | int cpu; |
70 | 70 | ||
71 | if (!blk_fs_request(rq) || !blk_do_io_stat(rq)) | 71 | if (!blk_fs_request(rq) || !blk_do_io_stat(rq)) |
72 | return; | 72 | return; |
73 | 73 | ||
74 | cpu = part_stat_lock(); | 74 | cpu = part_stat_lock(); |
75 | part = disk_map_sector_rcu(rq->rq_disk, rq->sector); | 75 | part = disk_map_sector_rcu(rq->rq_disk, rq->sector); |
76 | 76 | ||
77 | if (!new_io) | 77 | if (!new_io) |
78 | part_stat_inc(cpu, part, merges[rw]); | 78 | part_stat_inc(cpu, part, merges[rw]); |
79 | else { | 79 | else { |
80 | part_round_stats(cpu, part); | 80 | part_round_stats(cpu, part); |
81 | part_inc_in_flight(part); | 81 | part_inc_in_flight(part); |
82 | } | 82 | } |
83 | 83 | ||
84 | part_stat_unlock(); | 84 | part_stat_unlock(); |
85 | } | 85 | } |
86 | 86 | ||
87 | void blk_queue_congestion_threshold(struct request_queue *q) | 87 | void blk_queue_congestion_threshold(struct request_queue *q) |
88 | { | 88 | { |
89 | int nr; | 89 | int nr; |
90 | 90 | ||
91 | nr = q->nr_requests - (q->nr_requests / 8) + 1; | 91 | nr = q->nr_requests - (q->nr_requests / 8) + 1; |
92 | if (nr > q->nr_requests) | 92 | if (nr > q->nr_requests) |
93 | nr = q->nr_requests; | 93 | nr = q->nr_requests; |
94 | q->nr_congestion_on = nr; | 94 | q->nr_congestion_on = nr; |
95 | 95 | ||
96 | nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1; | 96 | nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1; |
97 | if (nr < 1) | 97 | if (nr < 1) |
98 | nr = 1; | 98 | nr = 1; |
99 | q->nr_congestion_off = nr; | 99 | q->nr_congestion_off = nr; |
100 | } | 100 | } |
101 | 101 | ||
102 | /** | 102 | /** |
103 | * blk_get_backing_dev_info - get the address of a queue's backing_dev_info | 103 | * blk_get_backing_dev_info - get the address of a queue's backing_dev_info |
104 | * @bdev: device | 104 | * @bdev: device |
105 | * | 105 | * |
106 | * Locates the passed device's request queue and returns the address of its | 106 | * Locates the passed device's request queue and returns the address of its |
107 | * backing_dev_info | 107 | * backing_dev_info |
108 | * | 108 | * |
109 | * Will return NULL if the request queue cannot be located. | 109 | * Will return NULL if the request queue cannot be located. |
110 | */ | 110 | */ |
111 | struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev) | 111 | struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev) |
112 | { | 112 | { |
113 | struct backing_dev_info *ret = NULL; | 113 | struct backing_dev_info *ret = NULL; |
114 | struct request_queue *q = bdev_get_queue(bdev); | 114 | struct request_queue *q = bdev_get_queue(bdev); |
115 | 115 | ||
116 | if (q) | 116 | if (q) |
117 | ret = &q->backing_dev_info; | 117 | ret = &q->backing_dev_info; |
118 | return ret; | 118 | return ret; |
119 | } | 119 | } |
120 | EXPORT_SYMBOL(blk_get_backing_dev_info); | 120 | EXPORT_SYMBOL(blk_get_backing_dev_info); |
121 | 121 | ||
122 | void blk_rq_init(struct request_queue *q, struct request *rq) | 122 | void blk_rq_init(struct request_queue *q, struct request *rq) |
123 | { | 123 | { |
124 | memset(rq, 0, sizeof(*rq)); | 124 | memset(rq, 0, sizeof(*rq)); |
125 | 125 | ||
126 | INIT_LIST_HEAD(&rq->queuelist); | 126 | INIT_LIST_HEAD(&rq->queuelist); |
127 | INIT_LIST_HEAD(&rq->timeout_list); | 127 | INIT_LIST_HEAD(&rq->timeout_list); |
128 | rq->cpu = -1; | 128 | rq->cpu = -1; |
129 | rq->q = q; | 129 | rq->q = q; |
130 | rq->sector = rq->hard_sector = (sector_t) -1; | 130 | rq->sector = rq->hard_sector = (sector_t) -1; |
131 | INIT_HLIST_NODE(&rq->hash); | 131 | INIT_HLIST_NODE(&rq->hash); |
132 | RB_CLEAR_NODE(&rq->rb_node); | 132 | RB_CLEAR_NODE(&rq->rb_node); |
133 | rq->cmd = rq->__cmd; | 133 | rq->cmd = rq->__cmd; |
134 | rq->cmd_len = BLK_MAX_CDB; | 134 | rq->cmd_len = BLK_MAX_CDB; |
135 | rq->tag = -1; | 135 | rq->tag = -1; |
136 | rq->ref_count = 1; | 136 | rq->ref_count = 1; |
137 | } | 137 | } |
138 | EXPORT_SYMBOL(blk_rq_init); | 138 | EXPORT_SYMBOL(blk_rq_init); |
139 | 139 | ||
140 | static void req_bio_endio(struct request *rq, struct bio *bio, | 140 | static void req_bio_endio(struct request *rq, struct bio *bio, |
141 | unsigned int nbytes, int error) | 141 | unsigned int nbytes, int error) |
142 | { | 142 | { |
143 | struct request_queue *q = rq->q; | 143 | struct request_queue *q = rq->q; |
144 | 144 | ||
145 | if (&q->bar_rq != rq) { | 145 | if (&q->bar_rq != rq) { |
146 | if (error) | 146 | if (error) |
147 | clear_bit(BIO_UPTODATE, &bio->bi_flags); | 147 | clear_bit(BIO_UPTODATE, &bio->bi_flags); |
148 | else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) | 148 | else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) |
149 | error = -EIO; | 149 | error = -EIO; |
150 | 150 | ||
151 | if (unlikely(nbytes > bio->bi_size)) { | 151 | if (unlikely(nbytes > bio->bi_size)) { |
152 | printk(KERN_ERR "%s: want %u bytes done, %u left\n", | 152 | printk(KERN_ERR "%s: want %u bytes done, %u left\n", |
153 | __func__, nbytes, bio->bi_size); | 153 | __func__, nbytes, bio->bi_size); |
154 | nbytes = bio->bi_size; | 154 | nbytes = bio->bi_size; |
155 | } | 155 | } |
156 | 156 | ||
157 | if (unlikely(rq->cmd_flags & REQ_QUIET)) | 157 | if (unlikely(rq->cmd_flags & REQ_QUIET)) |
158 | set_bit(BIO_QUIET, &bio->bi_flags); | 158 | set_bit(BIO_QUIET, &bio->bi_flags); |
159 | 159 | ||
160 | bio->bi_size -= nbytes; | 160 | bio->bi_size -= nbytes; |
161 | bio->bi_sector += (nbytes >> 9); | 161 | bio->bi_sector += (nbytes >> 9); |
162 | 162 | ||
163 | if (bio_integrity(bio)) | 163 | if (bio_integrity(bio)) |
164 | bio_integrity_advance(bio, nbytes); | 164 | bio_integrity_advance(bio, nbytes); |
165 | 165 | ||
166 | if (bio->bi_size == 0) | 166 | if (bio->bi_size == 0) |
167 | bio_endio(bio, error); | 167 | bio_endio(bio, error); |
168 | } else { | 168 | } else { |
169 | 169 | ||
170 | /* | 170 | /* |
171 | * Okay, this is the barrier request in progress, just | 171 | * Okay, this is the barrier request in progress, just |
172 | * record the error; | 172 | * record the error; |
173 | */ | 173 | */ |
174 | if (error && !q->orderr) | 174 | if (error && !q->orderr) |
175 | q->orderr = error; | 175 | q->orderr = error; |
176 | } | 176 | } |
177 | } | 177 | } |
178 | 178 | ||
179 | void blk_dump_rq_flags(struct request *rq, char *msg) | 179 | void blk_dump_rq_flags(struct request *rq, char *msg) |
180 | { | 180 | { |
181 | int bit; | 181 | int bit; |
182 | 182 | ||
183 | printk(KERN_INFO "%s: dev %s: type=%x, flags=%x\n", msg, | 183 | printk(KERN_INFO "%s: dev %s: type=%x, flags=%x\n", msg, |
184 | rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type, | 184 | rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type, |
185 | rq->cmd_flags); | 185 | rq->cmd_flags); |
186 | 186 | ||
187 | printk(KERN_INFO " sector %llu, nr/cnr %lu/%u\n", | 187 | printk(KERN_INFO " sector %llu, nr/cnr %lu/%u\n", |
188 | (unsigned long long)rq->sector, | 188 | (unsigned long long)rq->sector, |
189 | rq->nr_sectors, | 189 | rq->nr_sectors, |
190 | rq->current_nr_sectors); | 190 | rq->current_nr_sectors); |
191 | printk(KERN_INFO " bio %p, biotail %p, buffer %p, data %p, len %u\n", | 191 | printk(KERN_INFO " bio %p, biotail %p, buffer %p, data %p, len %u\n", |
192 | rq->bio, rq->biotail, | 192 | rq->bio, rq->biotail, |
193 | rq->buffer, rq->data, | 193 | rq->buffer, rq->data, |
194 | rq->data_len); | 194 | rq->data_len); |
195 | 195 | ||
196 | if (blk_pc_request(rq)) { | 196 | if (blk_pc_request(rq)) { |
197 | printk(KERN_INFO " cdb: "); | 197 | printk(KERN_INFO " cdb: "); |
198 | for (bit = 0; bit < BLK_MAX_CDB; bit++) | 198 | for (bit = 0; bit < BLK_MAX_CDB; bit++) |
199 | printk("%02x ", rq->cmd[bit]); | 199 | printk("%02x ", rq->cmd[bit]); |
200 | printk("\n"); | 200 | printk("\n"); |
201 | } | 201 | } |
202 | } | 202 | } |
203 | EXPORT_SYMBOL(blk_dump_rq_flags); | 203 | EXPORT_SYMBOL(blk_dump_rq_flags); |
204 | 204 | ||
205 | /* | 205 | /* |
206 | * "plug" the device if there are no outstanding requests: this will | 206 | * "plug" the device if there are no outstanding requests: this will |
207 | * force the transfer to start only after we have put all the requests | 207 | * force the transfer to start only after we have put all the requests |
208 | * on the list. | 208 | * on the list. |
209 | * | 209 | * |
210 | * This is called with interrupts off and no requests on the queue and | 210 | * This is called with interrupts off and no requests on the queue and |
211 | * with the queue lock held. | 211 | * with the queue lock held. |
212 | */ | 212 | */ |
213 | void blk_plug_device(struct request_queue *q) | 213 | void blk_plug_device(struct request_queue *q) |
214 | { | 214 | { |
215 | WARN_ON(!irqs_disabled()); | 215 | WARN_ON(!irqs_disabled()); |
216 | 216 | ||
217 | /* | 217 | /* |
218 | * don't plug a stopped queue, it must be paired with blk_start_queue() | 218 | * don't plug a stopped queue, it must be paired with blk_start_queue() |
219 | * which will restart the queueing | 219 | * which will restart the queueing |
220 | */ | 220 | */ |
221 | if (blk_queue_stopped(q)) | 221 | if (blk_queue_stopped(q)) |
222 | return; | 222 | return; |
223 | 223 | ||
224 | if (!queue_flag_test_and_set(QUEUE_FLAG_PLUGGED, q)) { | 224 | if (!queue_flag_test_and_set(QUEUE_FLAG_PLUGGED, q)) { |
225 | mod_timer(&q->unplug_timer, jiffies + q->unplug_delay); | 225 | mod_timer(&q->unplug_timer, jiffies + q->unplug_delay); |
226 | trace_block_plug(q); | 226 | trace_block_plug(q); |
227 | } | 227 | } |
228 | } | 228 | } |
229 | EXPORT_SYMBOL(blk_plug_device); | 229 | EXPORT_SYMBOL(blk_plug_device); |
230 | 230 | ||
231 | /** | 231 | /** |
232 | * blk_plug_device_unlocked - plug a device without queue lock held | 232 | * blk_plug_device_unlocked - plug a device without queue lock held |
233 | * @q: The &struct request_queue to plug | 233 | * @q: The &struct request_queue to plug |
234 | * | 234 | * |
235 | * Description: | 235 | * Description: |
236 | * Like @blk_plug_device(), but grabs the queue lock and disables | 236 | * Like @blk_plug_device(), but grabs the queue lock and disables |
237 | * interrupts. | 237 | * interrupts. |
238 | **/ | 238 | **/ |
239 | void blk_plug_device_unlocked(struct request_queue *q) | 239 | void blk_plug_device_unlocked(struct request_queue *q) |
240 | { | 240 | { |
241 | unsigned long flags; | 241 | unsigned long flags; |
242 | 242 | ||
243 | spin_lock_irqsave(q->queue_lock, flags); | 243 | spin_lock_irqsave(q->queue_lock, flags); |
244 | blk_plug_device(q); | 244 | blk_plug_device(q); |
245 | spin_unlock_irqrestore(q->queue_lock, flags); | 245 | spin_unlock_irqrestore(q->queue_lock, flags); |
246 | } | 246 | } |
247 | EXPORT_SYMBOL(blk_plug_device_unlocked); | 247 | EXPORT_SYMBOL(blk_plug_device_unlocked); |
248 | 248 | ||
249 | /* | 249 | /* |
250 | * remove the queue from the plugged list, if present. called with | 250 | * remove the queue from the plugged list, if present. called with |
251 | * queue lock held and interrupts disabled. | 251 | * queue lock held and interrupts disabled. |
252 | */ | 252 | */ |
253 | int blk_remove_plug(struct request_queue *q) | 253 | int blk_remove_plug(struct request_queue *q) |
254 | { | 254 | { |
255 | WARN_ON(!irqs_disabled()); | 255 | WARN_ON(!irqs_disabled()); |
256 | 256 | ||
257 | if (!queue_flag_test_and_clear(QUEUE_FLAG_PLUGGED, q)) | 257 | if (!queue_flag_test_and_clear(QUEUE_FLAG_PLUGGED, q)) |
258 | return 0; | 258 | return 0; |
259 | 259 | ||
260 | del_timer(&q->unplug_timer); | 260 | del_timer(&q->unplug_timer); |
261 | return 1; | 261 | return 1; |
262 | } | 262 | } |
263 | EXPORT_SYMBOL(blk_remove_plug); | 263 | EXPORT_SYMBOL(blk_remove_plug); |
264 | 264 | ||
265 | /* | 265 | /* |
266 | * remove the plug and let it rip.. | 266 | * remove the plug and let it rip.. |
267 | */ | 267 | */ |
268 | void __generic_unplug_device(struct request_queue *q) | 268 | void __generic_unplug_device(struct request_queue *q) |
269 | { | 269 | { |
270 | if (unlikely(blk_queue_stopped(q))) | 270 | if (unlikely(blk_queue_stopped(q))) |
271 | return; | 271 | return; |
272 | if (!blk_remove_plug(q) && !blk_queue_nonrot(q)) | 272 | if (!blk_remove_plug(q) && !blk_queue_nonrot(q)) |
273 | return; | 273 | return; |
274 | 274 | ||
275 | q->request_fn(q); | 275 | q->request_fn(q); |
276 | } | 276 | } |
277 | 277 | ||
278 | /** | 278 | /** |
279 | * generic_unplug_device - fire a request queue | 279 | * generic_unplug_device - fire a request queue |
280 | * @q: The &struct request_queue in question | 280 | * @q: The &struct request_queue in question |
281 | * | 281 | * |
282 | * Description: | 282 | * Description: |
283 | * Linux uses plugging to build bigger requests queues before letting | 283 | * Linux uses plugging to build bigger requests queues before letting |
284 | * the device have at them. If a queue is plugged, the I/O scheduler | 284 | * the device have at them. If a queue is plugged, the I/O scheduler |
285 | * is still adding and merging requests on the queue. Once the queue | 285 | * is still adding and merging requests on the queue. Once the queue |
286 | * gets unplugged, the request_fn defined for the queue is invoked and | 286 | * gets unplugged, the request_fn defined for the queue is invoked and |
287 | * transfers started. | 287 | * transfers started. |
288 | **/ | 288 | **/ |
289 | void generic_unplug_device(struct request_queue *q) | 289 | void generic_unplug_device(struct request_queue *q) |
290 | { | 290 | { |
291 | if (blk_queue_plugged(q)) { | 291 | if (blk_queue_plugged(q)) { |
292 | spin_lock_irq(q->queue_lock); | 292 | spin_lock_irq(q->queue_lock); |
293 | __generic_unplug_device(q); | 293 | __generic_unplug_device(q); |
294 | spin_unlock_irq(q->queue_lock); | 294 | spin_unlock_irq(q->queue_lock); |
295 | } | 295 | } |
296 | } | 296 | } |
297 | EXPORT_SYMBOL(generic_unplug_device); | 297 | EXPORT_SYMBOL(generic_unplug_device); |
298 | 298 | ||
299 | static void blk_backing_dev_unplug(struct backing_dev_info *bdi, | 299 | static void blk_backing_dev_unplug(struct backing_dev_info *bdi, |
300 | struct page *page) | 300 | struct page *page) |
301 | { | 301 | { |
302 | struct request_queue *q = bdi->unplug_io_data; | 302 | struct request_queue *q = bdi->unplug_io_data; |
303 | 303 | ||
304 | blk_unplug(q); | 304 | blk_unplug(q); |
305 | } | 305 | } |
306 | 306 | ||
307 | void blk_unplug_work(struct work_struct *work) | 307 | void blk_unplug_work(struct work_struct *work) |
308 | { | 308 | { |
309 | struct request_queue *q = | 309 | struct request_queue *q = |
310 | container_of(work, struct request_queue, unplug_work); | 310 | container_of(work, struct request_queue, unplug_work); |
311 | 311 | ||
312 | trace_block_unplug_io(q); | 312 | trace_block_unplug_io(q); |
313 | q->unplug_fn(q); | 313 | q->unplug_fn(q); |
314 | } | 314 | } |
315 | 315 | ||
316 | void blk_unplug_timeout(unsigned long data) | 316 | void blk_unplug_timeout(unsigned long data) |
317 | { | 317 | { |
318 | struct request_queue *q = (struct request_queue *)data; | 318 | struct request_queue *q = (struct request_queue *)data; |
319 | 319 | ||
320 | trace_block_unplug_timer(q); | 320 | trace_block_unplug_timer(q); |
321 | kblockd_schedule_work(q, &q->unplug_work); | 321 | kblockd_schedule_work(q, &q->unplug_work); |
322 | } | 322 | } |
323 | 323 | ||
324 | void blk_unplug(struct request_queue *q) | 324 | void blk_unplug(struct request_queue *q) |
325 | { | 325 | { |
326 | /* | 326 | /* |
327 | * devices don't necessarily have an ->unplug_fn defined | 327 | * devices don't necessarily have an ->unplug_fn defined |
328 | */ | 328 | */ |
329 | if (q->unplug_fn) { | 329 | if (q->unplug_fn) { |
330 | trace_block_unplug_io(q); | 330 | trace_block_unplug_io(q); |
331 | q->unplug_fn(q); | 331 | q->unplug_fn(q); |
332 | } | 332 | } |
333 | } | 333 | } |
334 | EXPORT_SYMBOL(blk_unplug); | 334 | EXPORT_SYMBOL(blk_unplug); |
335 | 335 | ||
336 | static void blk_invoke_request_fn(struct request_queue *q) | 336 | static void blk_invoke_request_fn(struct request_queue *q) |
337 | { | 337 | { |
338 | if (unlikely(blk_queue_stopped(q))) | 338 | if (unlikely(blk_queue_stopped(q))) |
339 | return; | 339 | return; |
340 | 340 | ||
341 | /* | 341 | /* |
342 | * one level of recursion is ok and is much faster than kicking | 342 | * one level of recursion is ok and is much faster than kicking |
343 | * the unplug handling | 343 | * the unplug handling |
344 | */ | 344 | */ |
345 | if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { | 345 | if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { |
346 | q->request_fn(q); | 346 | q->request_fn(q); |
347 | queue_flag_clear(QUEUE_FLAG_REENTER, q); | 347 | queue_flag_clear(QUEUE_FLAG_REENTER, q); |
348 | } else { | 348 | } else { |
349 | queue_flag_set(QUEUE_FLAG_PLUGGED, q); | 349 | queue_flag_set(QUEUE_FLAG_PLUGGED, q); |
350 | kblockd_schedule_work(q, &q->unplug_work); | 350 | kblockd_schedule_work(q, &q->unplug_work); |
351 | } | 351 | } |
352 | } | 352 | } |
353 | 353 | ||
354 | /** | 354 | /** |
355 | * blk_start_queue - restart a previously stopped queue | 355 | * blk_start_queue - restart a previously stopped queue |
356 | * @q: The &struct request_queue in question | 356 | * @q: The &struct request_queue in question |
357 | * | 357 | * |
358 | * Description: | 358 | * Description: |
359 | * blk_start_queue() will clear the stop flag on the queue, and call | 359 | * blk_start_queue() will clear the stop flag on the queue, and call |
360 | * the request_fn for the queue if it was in a stopped state when | 360 | * the request_fn for the queue if it was in a stopped state when |
361 | * entered. Also see blk_stop_queue(). Queue lock must be held. | 361 | * entered. Also see blk_stop_queue(). Queue lock must be held. |
362 | **/ | 362 | **/ |
363 | void blk_start_queue(struct request_queue *q) | 363 | void blk_start_queue(struct request_queue *q) |
364 | { | 364 | { |
365 | WARN_ON(!irqs_disabled()); | 365 | WARN_ON(!irqs_disabled()); |
366 | 366 | ||
367 | queue_flag_clear(QUEUE_FLAG_STOPPED, q); | 367 | queue_flag_clear(QUEUE_FLAG_STOPPED, q); |
368 | blk_invoke_request_fn(q); | 368 | blk_invoke_request_fn(q); |
369 | } | 369 | } |
370 | EXPORT_SYMBOL(blk_start_queue); | 370 | EXPORT_SYMBOL(blk_start_queue); |
371 | 371 | ||
372 | /** | 372 | /** |
373 | * blk_stop_queue - stop a queue | 373 | * blk_stop_queue - stop a queue |
374 | * @q: The &struct request_queue in question | 374 | * @q: The &struct request_queue in question |
375 | * | 375 | * |
376 | * Description: | 376 | * Description: |
377 | * The Linux block layer assumes that a block driver will consume all | 377 | * The Linux block layer assumes that a block driver will consume all |
378 | * entries on the request queue when the request_fn strategy is called. | 378 | * entries on the request queue when the request_fn strategy is called. |
379 | * Often this will not happen, because of hardware limitations (queue | 379 | * Often this will not happen, because of hardware limitations (queue |
380 | * depth settings). If a device driver gets a 'queue full' response, | 380 | * depth settings). If a device driver gets a 'queue full' response, |
381 | * or if it simply chooses not to queue more I/O at one point, it can | 381 | * or if it simply chooses not to queue more I/O at one point, it can |
382 | * call this function to prevent the request_fn from being called until | 382 | * call this function to prevent the request_fn from being called until |
383 | * the driver has signalled it's ready to go again. This happens by calling | 383 | * the driver has signalled it's ready to go again. This happens by calling |
384 | * blk_start_queue() to restart queue operations. Queue lock must be held. | 384 | * blk_start_queue() to restart queue operations. Queue lock must be held. |
385 | **/ | 385 | **/ |
386 | void blk_stop_queue(struct request_queue *q) | 386 | void blk_stop_queue(struct request_queue *q) |
387 | { | 387 | { |
388 | blk_remove_plug(q); | 388 | blk_remove_plug(q); |
389 | queue_flag_set(QUEUE_FLAG_STOPPED, q); | 389 | queue_flag_set(QUEUE_FLAG_STOPPED, q); |
390 | } | 390 | } |
391 | EXPORT_SYMBOL(blk_stop_queue); | 391 | EXPORT_SYMBOL(blk_stop_queue); |
392 | 392 | ||
393 | /** | 393 | /** |
394 | * blk_sync_queue - cancel any pending callbacks on a queue | 394 | * blk_sync_queue - cancel any pending callbacks on a queue |
395 | * @q: the queue | 395 | * @q: the queue |
396 | * | 396 | * |
397 | * Description: | 397 | * Description: |
398 | * The block layer may perform asynchronous callback activity | 398 | * The block layer may perform asynchronous callback activity |
399 | * on a queue, such as calling the unplug function after a timeout. | 399 | * on a queue, such as calling the unplug function after a timeout. |
400 | * A block device may call blk_sync_queue to ensure that any | 400 | * A block device may call blk_sync_queue to ensure that any |
401 | * such activity is cancelled, thus allowing it to release resources | 401 | * such activity is cancelled, thus allowing it to release resources |
402 | * that the callbacks might use. The caller must already have made sure | 402 | * that the callbacks might use. The caller must already have made sure |
403 | * that its ->make_request_fn will not re-add plugging prior to calling | 403 | * that its ->make_request_fn will not re-add plugging prior to calling |
404 | * this function. | 404 | * this function. |
405 | * | 405 | * |
406 | */ | 406 | */ |
407 | void blk_sync_queue(struct request_queue *q) | 407 | void blk_sync_queue(struct request_queue *q) |
408 | { | 408 | { |
409 | del_timer_sync(&q->unplug_timer); | 409 | del_timer_sync(&q->unplug_timer); |
410 | del_timer_sync(&q->timeout); | 410 | del_timer_sync(&q->timeout); |
411 | cancel_work_sync(&q->unplug_work); | 411 | cancel_work_sync(&q->unplug_work); |
412 | } | 412 | } |
413 | EXPORT_SYMBOL(blk_sync_queue); | 413 | EXPORT_SYMBOL(blk_sync_queue); |
414 | 414 | ||
415 | /** | 415 | /** |
416 | * __blk_run_queue - run a single device queue | 416 | * __blk_run_queue - run a single device queue |
417 | * @q: The queue to run | 417 | * @q: The queue to run |
418 | * | 418 | * |
419 | * Description: | 419 | * Description: |
420 | * See @blk_run_queue. This variant must be called with the queue lock | 420 | * See @blk_run_queue. This variant must be called with the queue lock |
421 | * held and interrupts disabled. | 421 | * held and interrupts disabled. |
422 | * | 422 | * |
423 | */ | 423 | */ |
424 | void __blk_run_queue(struct request_queue *q) | 424 | void __blk_run_queue(struct request_queue *q) |
425 | { | 425 | { |
426 | blk_remove_plug(q); | 426 | blk_remove_plug(q); |
427 | 427 | ||
428 | /* | 428 | /* |
429 | * Only recurse once to avoid overrunning the stack, let the unplug | 429 | * Only recurse once to avoid overrunning the stack, let the unplug |
430 | * handling reinvoke the handler shortly if we already got there. | 430 | * handling reinvoke the handler shortly if we already got there. |
431 | */ | 431 | */ |
432 | if (!elv_queue_empty(q)) | 432 | if (!elv_queue_empty(q)) |
433 | blk_invoke_request_fn(q); | 433 | blk_invoke_request_fn(q); |
434 | } | 434 | } |
435 | EXPORT_SYMBOL(__blk_run_queue); | 435 | EXPORT_SYMBOL(__blk_run_queue); |
436 | 436 | ||
437 | /** | 437 | /** |
438 | * blk_run_queue - run a single device queue | 438 | * blk_run_queue - run a single device queue |
439 | * @q: The queue to run | 439 | * @q: The queue to run |
440 | * | 440 | * |
441 | * Description: | 441 | * Description: |
442 | * Invoke request handling on this queue, if it has pending work to do. | 442 | * Invoke request handling on this queue, if it has pending work to do. |
443 | * May be used to restart queueing when a request has completed. Also | 443 | * May be used to restart queueing when a request has completed. Also |
444 | * See @blk_start_queueing. | 444 | * See @blk_start_queueing. |
445 | * | 445 | * |
446 | */ | 446 | */ |
447 | void blk_run_queue(struct request_queue *q) | 447 | void blk_run_queue(struct request_queue *q) |
448 | { | 448 | { |
449 | unsigned long flags; | 449 | unsigned long flags; |
450 | 450 | ||
451 | spin_lock_irqsave(q->queue_lock, flags); | 451 | spin_lock_irqsave(q->queue_lock, flags); |
452 | __blk_run_queue(q); | 452 | __blk_run_queue(q); |
453 | spin_unlock_irqrestore(q->queue_lock, flags); | 453 | spin_unlock_irqrestore(q->queue_lock, flags); |
454 | } | 454 | } |
455 | EXPORT_SYMBOL(blk_run_queue); | 455 | EXPORT_SYMBOL(blk_run_queue); |
456 | 456 | ||
457 | void blk_put_queue(struct request_queue *q) | 457 | void blk_put_queue(struct request_queue *q) |
458 | { | 458 | { |
459 | kobject_put(&q->kobj); | 459 | kobject_put(&q->kobj); |
460 | } | 460 | } |
461 | 461 | ||
462 | void blk_cleanup_queue(struct request_queue *q) | 462 | void blk_cleanup_queue(struct request_queue *q) |
463 | { | 463 | { |
464 | /* | 464 | /* |
465 | * We know we have process context here, so we can be a little | 465 | * We know we have process context here, so we can be a little |
466 | * cautious and ensure that pending block actions on this device | 466 | * cautious and ensure that pending block actions on this device |
467 | * are done before moving on. Going into this function, we should | 467 | * are done before moving on. Going into this function, we should |
468 | * not have processes doing IO to this device. | 468 | * not have processes doing IO to this device. |
469 | */ | 469 | */ |
470 | blk_sync_queue(q); | 470 | blk_sync_queue(q); |
471 | 471 | ||
472 | mutex_lock(&q->sysfs_lock); | 472 | mutex_lock(&q->sysfs_lock); |
473 | queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q); | 473 | queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q); |
474 | mutex_unlock(&q->sysfs_lock); | 474 | mutex_unlock(&q->sysfs_lock); |
475 | 475 | ||
476 | if (q->elevator) | 476 | if (q->elevator) |
477 | elevator_exit(q->elevator); | 477 | elevator_exit(q->elevator); |
478 | 478 | ||
479 | blk_put_queue(q); | 479 | blk_put_queue(q); |
480 | } | 480 | } |
481 | EXPORT_SYMBOL(blk_cleanup_queue); | 481 | EXPORT_SYMBOL(blk_cleanup_queue); |
482 | 482 | ||
483 | static int blk_init_free_list(struct request_queue *q) | 483 | static int blk_init_free_list(struct request_queue *q) |
484 | { | 484 | { |
485 | struct request_list *rl = &q->rq; | 485 | struct request_list *rl = &q->rq; |
486 | 486 | ||
487 | rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0; | 487 | rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0; |
488 | rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0; | 488 | rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0; |
489 | rl->elvpriv = 0; | 489 | rl->elvpriv = 0; |
490 | init_waitqueue_head(&rl->wait[BLK_RW_SYNC]); | 490 | init_waitqueue_head(&rl->wait[BLK_RW_SYNC]); |
491 | init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]); | 491 | init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]); |
492 | 492 | ||
493 | rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, | 493 | rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, |
494 | mempool_free_slab, request_cachep, q->node); | 494 | mempool_free_slab, request_cachep, q->node); |
495 | 495 | ||
496 | if (!rl->rq_pool) | 496 | if (!rl->rq_pool) |
497 | return -ENOMEM; | 497 | return -ENOMEM; |
498 | 498 | ||
499 | return 0; | 499 | return 0; |
500 | } | 500 | } |
501 | 501 | ||
502 | struct request_queue *blk_alloc_queue(gfp_t gfp_mask) | 502 | struct request_queue *blk_alloc_queue(gfp_t gfp_mask) |
503 | { | 503 | { |
504 | return blk_alloc_queue_node(gfp_mask, -1); | 504 | return blk_alloc_queue_node(gfp_mask, -1); |
505 | } | 505 | } |
506 | EXPORT_SYMBOL(blk_alloc_queue); | 506 | EXPORT_SYMBOL(blk_alloc_queue); |
507 | 507 | ||
508 | struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) | 508 | struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) |
509 | { | 509 | { |
510 | struct request_queue *q; | 510 | struct request_queue *q; |
511 | int err; | 511 | int err; |
512 | 512 | ||
513 | q = kmem_cache_alloc_node(blk_requestq_cachep, | 513 | q = kmem_cache_alloc_node(blk_requestq_cachep, |
514 | gfp_mask | __GFP_ZERO, node_id); | 514 | gfp_mask | __GFP_ZERO, node_id); |
515 | if (!q) | 515 | if (!q) |
516 | return NULL; | 516 | return NULL; |
517 | 517 | ||
518 | q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug; | 518 | q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug; |
519 | q->backing_dev_info.unplug_io_data = q; | 519 | q->backing_dev_info.unplug_io_data = q; |
520 | err = bdi_init(&q->backing_dev_info); | 520 | err = bdi_init(&q->backing_dev_info); |
521 | if (err) { | 521 | if (err) { |
522 | kmem_cache_free(blk_requestq_cachep, q); | 522 | kmem_cache_free(blk_requestq_cachep, q); |
523 | return NULL; | 523 | return NULL; |
524 | } | 524 | } |
525 | 525 | ||
526 | init_timer(&q->unplug_timer); | 526 | init_timer(&q->unplug_timer); |
527 | setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); | 527 | setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); |
528 | INIT_LIST_HEAD(&q->timeout_list); | 528 | INIT_LIST_HEAD(&q->timeout_list); |
529 | INIT_WORK(&q->unplug_work, blk_unplug_work); | 529 | INIT_WORK(&q->unplug_work, blk_unplug_work); |
530 | 530 | ||
531 | kobject_init(&q->kobj, &blk_queue_ktype); | 531 | kobject_init(&q->kobj, &blk_queue_ktype); |
532 | 532 | ||
533 | mutex_init(&q->sysfs_lock); | 533 | mutex_init(&q->sysfs_lock); |
534 | spin_lock_init(&q->__queue_lock); | 534 | spin_lock_init(&q->__queue_lock); |
535 | 535 | ||
536 | return q; | 536 | return q; |
537 | } | 537 | } |
538 | EXPORT_SYMBOL(blk_alloc_queue_node); | 538 | EXPORT_SYMBOL(blk_alloc_queue_node); |
539 | 539 | ||
540 | /** | 540 | /** |
541 | * blk_init_queue - prepare a request queue for use with a block device | 541 | * blk_init_queue - prepare a request queue for use with a block device |
542 | * @rfn: The function to be called to process requests that have been | 542 | * @rfn: The function to be called to process requests that have been |
543 | * placed on the queue. | 543 | * placed on the queue. |
544 | * @lock: Request queue spin lock | 544 | * @lock: Request queue spin lock |
545 | * | 545 | * |
546 | * Description: | 546 | * Description: |
547 | * If a block device wishes to use the standard request handling procedures, | 547 | * If a block device wishes to use the standard request handling procedures, |
548 | * which sorts requests and coalesces adjacent requests, then it must | 548 | * which sorts requests and coalesces adjacent requests, then it must |
549 | * call blk_init_queue(). The function @rfn will be called when there | 549 | * call blk_init_queue(). The function @rfn will be called when there |
550 | * are requests on the queue that need to be processed. If the device | 550 | * are requests on the queue that need to be processed. If the device |
551 | * supports plugging, then @rfn may not be called immediately when requests | 551 | * supports plugging, then @rfn may not be called immediately when requests |
552 | * are available on the queue, but may be called at some time later instead. | 552 | * are available on the queue, but may be called at some time later instead. |
553 | * Plugged queues are generally unplugged when a buffer belonging to one | 553 | * Plugged queues are generally unplugged when a buffer belonging to one |
554 | * of the requests on the queue is needed, or due to memory pressure. | 554 | * of the requests on the queue is needed, or due to memory pressure. |
555 | * | 555 | * |
556 | * @rfn is not required, or even expected, to remove all requests off the | 556 | * @rfn is not required, or even expected, to remove all requests off the |
557 | * queue, but only as many as it can handle at a time. If it does leave | 557 | * queue, but only as many as it can handle at a time. If it does leave |
558 | * requests on the queue, it is responsible for arranging that the requests | 558 | * requests on the queue, it is responsible for arranging that the requests |
559 | * get dealt with eventually. | 559 | * get dealt with eventually. |
560 | * | 560 | * |
561 | * The queue spin lock must be held while manipulating the requests on the | 561 | * The queue spin lock must be held while manipulating the requests on the |
562 | * request queue; this lock will be taken also from interrupt context, so irq | 562 | * request queue; this lock will be taken also from interrupt context, so irq |
563 | * disabling is needed for it. | 563 | * disabling is needed for it. |
564 | * | 564 | * |
565 | * Function returns a pointer to the initialized request queue, or %NULL if | 565 | * Function returns a pointer to the initialized request queue, or %NULL if |
566 | * it didn't succeed. | 566 | * it didn't succeed. |
567 | * | 567 | * |
568 | * Note: | 568 | * Note: |
569 | * blk_init_queue() must be paired with a blk_cleanup_queue() call | 569 | * blk_init_queue() must be paired with a blk_cleanup_queue() call |
570 | * when the block device is deactivated (such as at module unload). | 570 | * when the block device is deactivated (such as at module unload). |
571 | **/ | 571 | **/ |
572 | 572 | ||
573 | struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock) | 573 | struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock) |
574 | { | 574 | { |
575 | return blk_init_queue_node(rfn, lock, -1); | 575 | return blk_init_queue_node(rfn, lock, -1); |
576 | } | 576 | } |
577 | EXPORT_SYMBOL(blk_init_queue); | 577 | EXPORT_SYMBOL(blk_init_queue); |
578 | 578 | ||
579 | struct request_queue * | 579 | struct request_queue * |
580 | blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) | 580 | blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) |
581 | { | 581 | { |
582 | struct request_queue *q = blk_alloc_queue_node(GFP_KERNEL, node_id); | 582 | struct request_queue *q = blk_alloc_queue_node(GFP_KERNEL, node_id); |
583 | 583 | ||
584 | if (!q) | 584 | if (!q) |
585 | return NULL; | 585 | return NULL; |
586 | 586 | ||
587 | q->node = node_id; | 587 | q->node = node_id; |
588 | if (blk_init_free_list(q)) { | 588 | if (blk_init_free_list(q)) { |
589 | kmem_cache_free(blk_requestq_cachep, q); | 589 | kmem_cache_free(blk_requestq_cachep, q); |
590 | return NULL; | 590 | return NULL; |
591 | } | 591 | } |
592 | 592 | ||
593 | /* | 593 | /* |
594 | * if caller didn't supply a lock, they get per-queue locking with | 594 | * if caller didn't supply a lock, they get per-queue locking with |
595 | * our embedded lock | 595 | * our embedded lock |
596 | */ | 596 | */ |
597 | if (!lock) | 597 | if (!lock) |
598 | lock = &q->__queue_lock; | 598 | lock = &q->__queue_lock; |
599 | 599 | ||
600 | q->request_fn = rfn; | 600 | q->request_fn = rfn; |
601 | q->prep_rq_fn = NULL; | 601 | q->prep_rq_fn = NULL; |
602 | q->unplug_fn = generic_unplug_device; | 602 | q->unplug_fn = generic_unplug_device; |
603 | q->queue_flags = QUEUE_FLAG_DEFAULT; | 603 | q->queue_flags = QUEUE_FLAG_DEFAULT; |
604 | q->queue_lock = lock; | 604 | q->queue_lock = lock; |
605 | 605 | ||
606 | /* | 606 | /* |
607 | * This also sets hw/phys segments, boundary and size | 607 | * This also sets hw/phys segments, boundary and size |
608 | */ | 608 | */ |
609 | blk_queue_make_request(q, __make_request); | 609 | blk_queue_make_request(q, __make_request); |
610 | 610 | ||
611 | q->sg_reserved_size = INT_MAX; | 611 | q->sg_reserved_size = INT_MAX; |
612 | 612 | ||
613 | blk_set_cmd_filter_defaults(&q->cmd_filter); | 613 | blk_set_cmd_filter_defaults(&q->cmd_filter); |
614 | 614 | ||
615 | /* | 615 | /* |
616 | * all done | 616 | * all done |
617 | */ | 617 | */ |
618 | if (!elevator_init(q, NULL)) { | 618 | if (!elevator_init(q, NULL)) { |
619 | blk_queue_congestion_threshold(q); | 619 | blk_queue_congestion_threshold(q); |
620 | return q; | 620 | return q; |
621 | } | 621 | } |
622 | 622 | ||
623 | blk_put_queue(q); | 623 | blk_put_queue(q); |
624 | return NULL; | 624 | return NULL; |
625 | } | 625 | } |
626 | EXPORT_SYMBOL(blk_init_queue_node); | 626 | EXPORT_SYMBOL(blk_init_queue_node); |
627 | 627 | ||
628 | int blk_get_queue(struct request_queue *q) | 628 | int blk_get_queue(struct request_queue *q) |
629 | { | 629 | { |
630 | if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) { | 630 | if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) { |
631 | kobject_get(&q->kobj); | 631 | kobject_get(&q->kobj); |
632 | return 0; | 632 | return 0; |
633 | } | 633 | } |
634 | 634 | ||
635 | return 1; | 635 | return 1; |
636 | } | 636 | } |
637 | 637 | ||
638 | static inline void blk_free_request(struct request_queue *q, struct request *rq) | 638 | static inline void blk_free_request(struct request_queue *q, struct request *rq) |
639 | { | 639 | { |
640 | if (rq->cmd_flags & REQ_ELVPRIV) | 640 | if (rq->cmd_flags & REQ_ELVPRIV) |
641 | elv_put_request(q, rq); | 641 | elv_put_request(q, rq); |
642 | mempool_free(rq, q->rq.rq_pool); | 642 | mempool_free(rq, q->rq.rq_pool); |
643 | } | 643 | } |
644 | 644 | ||
645 | static struct request * | 645 | static struct request * |
646 | blk_alloc_request(struct request_queue *q, int flags, int priv, gfp_t gfp_mask) | 646 | blk_alloc_request(struct request_queue *q, int flags, int priv, gfp_t gfp_mask) |
647 | { | 647 | { |
648 | struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask); | 648 | struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask); |
649 | 649 | ||
650 | if (!rq) | 650 | if (!rq) |
651 | return NULL; | 651 | return NULL; |
652 | 652 | ||
653 | blk_rq_init(q, rq); | 653 | blk_rq_init(q, rq); |
654 | 654 | ||
655 | rq->cmd_flags = flags | REQ_ALLOCED; | 655 | rq->cmd_flags = flags | REQ_ALLOCED; |
656 | 656 | ||
657 | if (priv) { | 657 | if (priv) { |
658 | if (unlikely(elv_set_request(q, rq, gfp_mask))) { | 658 | if (unlikely(elv_set_request(q, rq, gfp_mask))) { |
659 | mempool_free(rq, q->rq.rq_pool); | 659 | mempool_free(rq, q->rq.rq_pool); |
660 | return NULL; | 660 | return NULL; |
661 | } | 661 | } |
662 | rq->cmd_flags |= REQ_ELVPRIV; | 662 | rq->cmd_flags |= REQ_ELVPRIV; |
663 | } | 663 | } |
664 | 664 | ||
665 | return rq; | 665 | return rq; |
666 | } | 666 | } |
667 | 667 | ||
668 | /* | 668 | /* |
669 | * ioc_batching returns true if the ioc is a valid batching request and | 669 | * ioc_batching returns true if the ioc is a valid batching request and |
670 | * should be given priority access to a request. | 670 | * should be given priority access to a request. |
671 | */ | 671 | */ |
672 | static inline int ioc_batching(struct request_queue *q, struct io_context *ioc) | 672 | static inline int ioc_batching(struct request_queue *q, struct io_context *ioc) |
673 | { | 673 | { |
674 | if (!ioc) | 674 | if (!ioc) |
675 | return 0; | 675 | return 0; |
676 | 676 | ||
677 | /* | 677 | /* |
678 | * Make sure the process is able to allocate at least 1 request | 678 | * Make sure the process is able to allocate at least 1 request |
679 | * even if the batch times out, otherwise we could theoretically | 679 | * even if the batch times out, otherwise we could theoretically |
680 | * lose wakeups. | 680 | * lose wakeups. |
681 | */ | 681 | */ |
682 | return ioc->nr_batch_requests == q->nr_batching || | 682 | return ioc->nr_batch_requests == q->nr_batching || |
683 | (ioc->nr_batch_requests > 0 | 683 | (ioc->nr_batch_requests > 0 |
684 | && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME)); | 684 | && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME)); |
685 | } | 685 | } |
686 | 686 | ||
687 | /* | 687 | /* |
688 | * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This | 688 | * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This |
689 | * will cause the process to be a "batcher" on all queues in the system. This | 689 | * will cause the process to be a "batcher" on all queues in the system. This |
690 | * is the behaviour we want though - once it gets a wakeup it should be given | 690 | * is the behaviour we want though - once it gets a wakeup it should be given |
691 | * a nice run. | 691 | * a nice run. |
692 | */ | 692 | */ |
693 | static void ioc_set_batching(struct request_queue *q, struct io_context *ioc) | 693 | static void ioc_set_batching(struct request_queue *q, struct io_context *ioc) |
694 | { | 694 | { |
695 | if (!ioc || ioc_batching(q, ioc)) | 695 | if (!ioc || ioc_batching(q, ioc)) |
696 | return; | 696 | return; |
697 | 697 | ||
698 | ioc->nr_batch_requests = q->nr_batching; | 698 | ioc->nr_batch_requests = q->nr_batching; |
699 | ioc->last_waited = jiffies; | 699 | ioc->last_waited = jiffies; |
700 | } | 700 | } |
701 | 701 | ||
702 | static void __freed_request(struct request_queue *q, int sync) | 702 | static void __freed_request(struct request_queue *q, int sync) |
703 | { | 703 | { |
704 | struct request_list *rl = &q->rq; | 704 | struct request_list *rl = &q->rq; |
705 | 705 | ||
706 | if (rl->count[sync] < queue_congestion_off_threshold(q)) | 706 | if (rl->count[sync] < queue_congestion_off_threshold(q)) |
707 | blk_clear_queue_congested(q, sync); | 707 | blk_clear_queue_congested(q, sync); |
708 | 708 | ||
709 | if (rl->count[sync] + 1 <= q->nr_requests) { | 709 | if (rl->count[sync] + 1 <= q->nr_requests) { |
710 | if (waitqueue_active(&rl->wait[sync])) | 710 | if (waitqueue_active(&rl->wait[sync])) |
711 | wake_up(&rl->wait[sync]); | 711 | wake_up(&rl->wait[sync]); |
712 | 712 | ||
713 | blk_clear_queue_full(q, sync); | 713 | blk_clear_queue_full(q, sync); |
714 | } | 714 | } |
715 | } | 715 | } |
716 | 716 | ||
717 | /* | 717 | /* |
718 | * A request has just been released. Account for it, update the full and | 718 | * A request has just been released. Account for it, update the full and |
719 | * congestion status, wake up any waiters. Called under q->queue_lock. | 719 | * congestion status, wake up any waiters. Called under q->queue_lock. |
720 | */ | 720 | */ |
721 | static void freed_request(struct request_queue *q, int sync, int priv) | 721 | static void freed_request(struct request_queue *q, int sync, int priv) |
722 | { | 722 | { |
723 | struct request_list *rl = &q->rq; | 723 | struct request_list *rl = &q->rq; |
724 | 724 | ||
725 | rl->count[sync]--; | 725 | rl->count[sync]--; |
726 | if (priv) | 726 | if (priv) |
727 | rl->elvpriv--; | 727 | rl->elvpriv--; |
728 | 728 | ||
729 | __freed_request(q, sync); | 729 | __freed_request(q, sync); |
730 | 730 | ||
731 | if (unlikely(rl->starved[sync ^ 1])) | 731 | if (unlikely(rl->starved[sync ^ 1])) |
732 | __freed_request(q, sync ^ 1); | 732 | __freed_request(q, sync ^ 1); |
733 | } | 733 | } |
734 | 734 | ||
735 | /* | 735 | /* |
736 | * Get a free request, queue_lock must be held. | 736 | * Get a free request, queue_lock must be held. |
737 | * Returns NULL on failure, with queue_lock held. | 737 | * Returns NULL on failure, with queue_lock held. |
738 | * Returns !NULL on success, with queue_lock *not held*. | 738 | * Returns !NULL on success, with queue_lock *not held*. |
739 | */ | 739 | */ |
740 | static struct request *get_request(struct request_queue *q, int rw_flags, | 740 | static struct request *get_request(struct request_queue *q, int rw_flags, |
741 | struct bio *bio, gfp_t gfp_mask) | 741 | struct bio *bio, gfp_t gfp_mask) |
742 | { | 742 | { |
743 | struct request *rq = NULL; | 743 | struct request *rq = NULL; |
744 | struct request_list *rl = &q->rq; | 744 | struct request_list *rl = &q->rq; |
745 | struct io_context *ioc = NULL; | 745 | struct io_context *ioc = NULL; |
746 | const bool is_sync = rw_is_sync(rw_flags) != 0; | 746 | const bool is_sync = rw_is_sync(rw_flags) != 0; |
747 | int may_queue, priv; | 747 | int may_queue, priv; |
748 | 748 | ||
749 | may_queue = elv_may_queue(q, rw_flags); | 749 | may_queue = elv_may_queue(q, rw_flags); |
750 | if (may_queue == ELV_MQUEUE_NO) | 750 | if (may_queue == ELV_MQUEUE_NO) |
751 | goto rq_starved; | 751 | goto rq_starved; |
752 | 752 | ||
753 | if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) { | 753 | if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) { |
754 | if (rl->count[is_sync]+1 >= q->nr_requests) { | 754 | if (rl->count[is_sync]+1 >= q->nr_requests) { |
755 | ioc = current_io_context(GFP_ATOMIC, q->node); | 755 | ioc = current_io_context(GFP_ATOMIC, q->node); |
756 | /* | 756 | /* |
757 | * The queue will fill after this allocation, so set | 757 | * The queue will fill after this allocation, so set |
758 | * it as full, and mark this process as "batching". | 758 | * it as full, and mark this process as "batching". |
759 | * This process will be allowed to complete a batch of | 759 | * This process will be allowed to complete a batch of |
760 | * requests, others will be blocked. | 760 | * requests, others will be blocked. |
761 | */ | 761 | */ |
762 | if (!blk_queue_full(q, is_sync)) { | 762 | if (!blk_queue_full(q, is_sync)) { |
763 | ioc_set_batching(q, ioc); | 763 | ioc_set_batching(q, ioc); |
764 | blk_set_queue_full(q, is_sync); | 764 | blk_set_queue_full(q, is_sync); |
765 | } else { | 765 | } else { |
766 | if (may_queue != ELV_MQUEUE_MUST | 766 | if (may_queue != ELV_MQUEUE_MUST |
767 | && !ioc_batching(q, ioc)) { | 767 | && !ioc_batching(q, ioc)) { |
768 | /* | 768 | /* |
769 | * The queue is full and the allocating | 769 | * The queue is full and the allocating |
770 | * process is not a "batcher", and not | 770 | * process is not a "batcher", and not |
771 | * exempted by the IO scheduler | 771 | * exempted by the IO scheduler |
772 | */ | 772 | */ |
773 | goto out; | 773 | goto out; |
774 | } | 774 | } |
775 | } | 775 | } |
776 | } | 776 | } |
777 | blk_set_queue_congested(q, is_sync); | 777 | blk_set_queue_congested(q, is_sync); |
778 | } | 778 | } |
779 | 779 | ||
780 | /* | 780 | /* |
781 | * Only allow batching queuers to allocate up to 50% over the defined | 781 | * Only allow batching queuers to allocate up to 50% over the defined |
782 | * limit of requests, otherwise we could have thousands of requests | 782 | * limit of requests, otherwise we could have thousands of requests |
783 | * allocated with any setting of ->nr_requests | 783 | * allocated with any setting of ->nr_requests |
784 | */ | 784 | */ |
785 | if (rl->count[is_sync] >= (3 * q->nr_requests / 2)) | 785 | if (rl->count[is_sync] >= (3 * q->nr_requests / 2)) |
786 | goto out; | 786 | goto out; |
787 | 787 | ||
788 | rl->count[is_sync]++; | 788 | rl->count[is_sync]++; |
789 | rl->starved[is_sync] = 0; | 789 | rl->starved[is_sync] = 0; |
790 | 790 | ||
791 | priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); | 791 | priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); |
792 | if (priv) | 792 | if (priv) |
793 | rl->elvpriv++; | 793 | rl->elvpriv++; |
794 | 794 | ||
795 | if (blk_queue_io_stat(q)) | 795 | if (blk_queue_io_stat(q)) |
796 | rw_flags |= REQ_IO_STAT; | 796 | rw_flags |= REQ_IO_STAT; |
797 | spin_unlock_irq(q->queue_lock); | 797 | spin_unlock_irq(q->queue_lock); |
798 | 798 | ||
799 | rq = blk_alloc_request(q, rw_flags, priv, gfp_mask); | 799 | rq = blk_alloc_request(q, rw_flags, priv, gfp_mask); |
800 | if (unlikely(!rq)) { | 800 | if (unlikely(!rq)) { |
801 | /* | 801 | /* |
802 | * Allocation failed presumably due to memory. Undo anything | 802 | * Allocation failed presumably due to memory. Undo anything |
803 | * we might have messed up. | 803 | * we might have messed up. |
804 | * | 804 | * |
805 | * Allocating task should really be put onto the front of the | 805 | * Allocating task should really be put onto the front of the |
806 | * wait queue, but this is pretty rare. | 806 | * wait queue, but this is pretty rare. |
807 | */ | 807 | */ |
808 | spin_lock_irq(q->queue_lock); | 808 | spin_lock_irq(q->queue_lock); |
809 | freed_request(q, is_sync, priv); | 809 | freed_request(q, is_sync, priv); |
810 | 810 | ||
811 | /* | 811 | /* |
812 | * in the very unlikely event that allocation failed and no | 812 | * in the very unlikely event that allocation failed and no |
813 | * requests for this direction was pending, mark us starved | 813 | * requests for this direction was pending, mark us starved |
814 | * so that freeing of a request in the other direction will | 814 | * so that freeing of a request in the other direction will |
815 | * notice us. another possible fix would be to split the | 815 | * notice us. another possible fix would be to split the |
816 | * rq mempool into READ and WRITE | 816 | * rq mempool into READ and WRITE |
817 | */ | 817 | */ |
818 | rq_starved: | 818 | rq_starved: |
819 | if (unlikely(rl->count[is_sync] == 0)) | 819 | if (unlikely(rl->count[is_sync] == 0)) |
820 | rl->starved[is_sync] = 1; | 820 | rl->starved[is_sync] = 1; |
821 | 821 | ||
822 | goto out; | 822 | goto out; |
823 | } | 823 | } |
824 | 824 | ||
825 | /* | 825 | /* |
826 | * ioc may be NULL here, and ioc_batching will be false. That's | 826 | * ioc may be NULL here, and ioc_batching will be false. That's |
827 | * OK, if the queue is under the request limit then requests need | 827 | * OK, if the queue is under the request limit then requests need |
828 | * not count toward the nr_batch_requests limit. There will always | 828 | * not count toward the nr_batch_requests limit. There will always |
829 | * be some limit enforced by BLK_BATCH_TIME. | 829 | * be some limit enforced by BLK_BATCH_TIME. |
830 | */ | 830 | */ |
831 | if (ioc_batching(q, ioc)) | 831 | if (ioc_batching(q, ioc)) |
832 | ioc->nr_batch_requests--; | 832 | ioc->nr_batch_requests--; |
833 | 833 | ||
834 | trace_block_getrq(q, bio, rw_flags & 1); | 834 | trace_block_getrq(q, bio, rw_flags & 1); |
835 | out: | 835 | out: |
836 | return rq; | 836 | return rq; |
837 | } | 837 | } |
838 | 838 | ||
839 | /* | 839 | /* |
840 | * No available requests for this queue, unplug the device and wait for some | 840 | * No available requests for this queue, unplug the device and wait for some |
841 | * requests to become available. | 841 | * requests to become available. |
842 | * | 842 | * |
843 | * Called with q->queue_lock held, and returns with it unlocked. | 843 | * Called with q->queue_lock held, and returns with it unlocked. |
844 | */ | 844 | */ |
845 | static struct request *get_request_wait(struct request_queue *q, int rw_flags, | 845 | static struct request *get_request_wait(struct request_queue *q, int rw_flags, |
846 | struct bio *bio) | 846 | struct bio *bio) |
847 | { | 847 | { |
848 | const bool is_sync = rw_is_sync(rw_flags) != 0; | 848 | const bool is_sync = rw_is_sync(rw_flags) != 0; |
849 | struct request *rq; | 849 | struct request *rq; |
850 | 850 | ||
851 | rq = get_request(q, rw_flags, bio, GFP_NOIO); | 851 | rq = get_request(q, rw_flags, bio, GFP_NOIO); |
852 | while (!rq) { | 852 | while (!rq) { |
853 | DEFINE_WAIT(wait); | 853 | DEFINE_WAIT(wait); |
854 | struct io_context *ioc; | 854 | struct io_context *ioc; |
855 | struct request_list *rl = &q->rq; | 855 | struct request_list *rl = &q->rq; |
856 | 856 | ||
857 | prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, | 857 | prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, |
858 | TASK_UNINTERRUPTIBLE); | 858 | TASK_UNINTERRUPTIBLE); |
859 | 859 | ||
860 | trace_block_sleeprq(q, bio, rw_flags & 1); | 860 | trace_block_sleeprq(q, bio, rw_flags & 1); |
861 | 861 | ||
862 | __generic_unplug_device(q); | 862 | __generic_unplug_device(q); |
863 | spin_unlock_irq(q->queue_lock); | 863 | spin_unlock_irq(q->queue_lock); |
864 | io_schedule(); | 864 | io_schedule(); |
865 | 865 | ||
866 | /* | 866 | /* |
867 | * After sleeping, we become a "batching" process and | 867 | * After sleeping, we become a "batching" process and |
868 | * will be able to allocate at least one request, and | 868 | * will be able to allocate at least one request, and |
869 | * up to a big batch of them for a small period time. | 869 | * up to a big batch of them for a small period time. |
870 | * See ioc_batching, ioc_set_batching | 870 | * See ioc_batching, ioc_set_batching |
871 | */ | 871 | */ |
872 | ioc = current_io_context(GFP_NOIO, q->node); | 872 | ioc = current_io_context(GFP_NOIO, q->node); |
873 | ioc_set_batching(q, ioc); | 873 | ioc_set_batching(q, ioc); |
874 | 874 | ||
875 | spin_lock_irq(q->queue_lock); | 875 | spin_lock_irq(q->queue_lock); |
876 | finish_wait(&rl->wait[is_sync], &wait); | 876 | finish_wait(&rl->wait[is_sync], &wait); |
877 | 877 | ||
878 | rq = get_request(q, rw_flags, bio, GFP_NOIO); | 878 | rq = get_request(q, rw_flags, bio, GFP_NOIO); |
879 | }; | 879 | }; |
880 | 880 | ||
881 | return rq; | 881 | return rq; |
882 | } | 882 | } |
883 | 883 | ||
884 | struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) | 884 | struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) |
885 | { | 885 | { |
886 | struct request *rq; | 886 | struct request *rq; |
887 | 887 | ||
888 | BUG_ON(rw != READ && rw != WRITE); | 888 | BUG_ON(rw != READ && rw != WRITE); |
889 | 889 | ||
890 | spin_lock_irq(q->queue_lock); | 890 | spin_lock_irq(q->queue_lock); |
891 | if (gfp_mask & __GFP_WAIT) { | 891 | if (gfp_mask & __GFP_WAIT) { |
892 | rq = get_request_wait(q, rw, NULL); | 892 | rq = get_request_wait(q, rw, NULL); |
893 | } else { | 893 | } else { |
894 | rq = get_request(q, rw, NULL, gfp_mask); | 894 | rq = get_request(q, rw, NULL, gfp_mask); |
895 | if (!rq) | 895 | if (!rq) |
896 | spin_unlock_irq(q->queue_lock); | 896 | spin_unlock_irq(q->queue_lock); |
897 | } | 897 | } |
898 | /* q->queue_lock is unlocked at this point */ | 898 | /* q->queue_lock is unlocked at this point */ |
899 | 899 | ||
900 | return rq; | 900 | return rq; |
901 | } | 901 | } |
902 | EXPORT_SYMBOL(blk_get_request); | 902 | EXPORT_SYMBOL(blk_get_request); |
903 | 903 | ||
904 | /** | 904 | /** |
905 | * blk_start_queueing - initiate dispatch of requests to device | 905 | * blk_start_queueing - initiate dispatch of requests to device |
906 | * @q: request queue to kick into gear | 906 | * @q: request queue to kick into gear |
907 | * | 907 | * |
908 | * This is basically a helper to remove the need to know whether a queue | 908 | * This is basically a helper to remove the need to know whether a queue |
909 | * is plugged or not if someone just wants to initiate dispatch of requests | 909 | * is plugged or not if someone just wants to initiate dispatch of requests |
910 | * for this queue. Should be used to start queueing on a device outside | 910 | * for this queue. Should be used to start queueing on a device outside |
911 | * of ->request_fn() context. Also see @blk_run_queue. | 911 | * of ->request_fn() context. Also see @blk_run_queue. |
912 | * | 912 | * |
913 | * The queue lock must be held with interrupts disabled. | 913 | * The queue lock must be held with interrupts disabled. |
914 | */ | 914 | */ |
915 | void blk_start_queueing(struct request_queue *q) | 915 | void blk_start_queueing(struct request_queue *q) |
916 | { | 916 | { |
917 | if (!blk_queue_plugged(q)) { | 917 | if (!blk_queue_plugged(q)) { |
918 | if (unlikely(blk_queue_stopped(q))) | 918 | if (unlikely(blk_queue_stopped(q))) |
919 | return; | 919 | return; |
920 | q->request_fn(q); | 920 | q->request_fn(q); |
921 | } else | 921 | } else |
922 | __generic_unplug_device(q); | 922 | __generic_unplug_device(q); |
923 | } | 923 | } |
924 | EXPORT_SYMBOL(blk_start_queueing); | 924 | EXPORT_SYMBOL(blk_start_queueing); |
925 | 925 | ||
926 | /** | 926 | /** |
927 | * blk_requeue_request - put a request back on queue | 927 | * blk_requeue_request - put a request back on queue |
928 | * @q: request queue where request should be inserted | 928 | * @q: request queue where request should be inserted |
929 | * @rq: request to be inserted | 929 | * @rq: request to be inserted |
930 | * | 930 | * |
931 | * Description: | 931 | * Description: |
932 | * Drivers often keep queueing requests until the hardware cannot accept | 932 | * Drivers often keep queueing requests until the hardware cannot accept |
933 | * more, when that condition happens we need to put the request back | 933 | * more, when that condition happens we need to put the request back |
934 | * on the queue. Must be called with queue lock held. | 934 | * on the queue. Must be called with queue lock held. |
935 | */ | 935 | */ |
936 | void blk_requeue_request(struct request_queue *q, struct request *rq) | 936 | void blk_requeue_request(struct request_queue *q, struct request *rq) |
937 | { | 937 | { |
938 | blk_delete_timer(rq); | 938 | blk_delete_timer(rq); |
939 | blk_clear_rq_complete(rq); | 939 | blk_clear_rq_complete(rq); |
940 | trace_block_rq_requeue(q, rq); | 940 | trace_block_rq_requeue(q, rq); |
941 | 941 | ||
942 | if (blk_rq_tagged(rq)) | 942 | if (blk_rq_tagged(rq)) |
943 | blk_queue_end_tag(q, rq); | 943 | blk_queue_end_tag(q, rq); |
944 | 944 | ||
945 | elv_requeue_request(q, rq); | 945 | elv_requeue_request(q, rq); |
946 | } | 946 | } |
947 | EXPORT_SYMBOL(blk_requeue_request); | 947 | EXPORT_SYMBOL(blk_requeue_request); |
948 | 948 | ||
949 | /** | 949 | /** |
950 | * blk_insert_request - insert a special request into a request queue | 950 | * blk_insert_request - insert a special request into a request queue |
951 | * @q: request queue where request should be inserted | 951 | * @q: request queue where request should be inserted |
952 | * @rq: request to be inserted | 952 | * @rq: request to be inserted |
953 | * @at_head: insert request at head or tail of queue | 953 | * @at_head: insert request at head or tail of queue |
954 | * @data: private data | 954 | * @data: private data |
955 | * | 955 | * |
956 | * Description: | 956 | * Description: |
957 | * Many block devices need to execute commands asynchronously, so they don't | 957 | * Many block devices need to execute commands asynchronously, so they don't |
958 | * block the whole kernel from preemption during request execution. This is | 958 | * block the whole kernel from preemption during request execution. This is |
959 | * accomplished normally by inserting aritficial requests tagged as | 959 | * accomplished normally by inserting aritficial requests tagged as |
960 | * REQ_TYPE_SPECIAL in to the corresponding request queue, and letting them | 960 | * REQ_TYPE_SPECIAL in to the corresponding request queue, and letting them |
961 | * be scheduled for actual execution by the request queue. | 961 | * be scheduled for actual execution by the request queue. |
962 | * | 962 | * |
963 | * We have the option of inserting the head or the tail of the queue. | 963 | * We have the option of inserting the head or the tail of the queue. |
964 | * Typically we use the tail for new ioctls and so forth. We use the head | 964 | * Typically we use the tail for new ioctls and so forth. We use the head |
965 | * of the queue for things like a QUEUE_FULL message from a device, or a | 965 | * of the queue for things like a QUEUE_FULL message from a device, or a |
966 | * host that is unable to accept a particular command. | 966 | * host that is unable to accept a particular command. |
967 | */ | 967 | */ |
968 | void blk_insert_request(struct request_queue *q, struct request *rq, | 968 | void blk_insert_request(struct request_queue *q, struct request *rq, |
969 | int at_head, void *data) | 969 | int at_head, void *data) |
970 | { | 970 | { |
971 | int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; | 971 | int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; |
972 | unsigned long flags; | 972 | unsigned long flags; |
973 | 973 | ||
974 | /* | 974 | /* |
975 | * tell I/O scheduler that this isn't a regular read/write (ie it | 975 | * tell I/O scheduler that this isn't a regular read/write (ie it |
976 | * must not attempt merges on this) and that it acts as a soft | 976 | * must not attempt merges on this) and that it acts as a soft |
977 | * barrier | 977 | * barrier |
978 | */ | 978 | */ |
979 | rq->cmd_type = REQ_TYPE_SPECIAL; | 979 | rq->cmd_type = REQ_TYPE_SPECIAL; |
980 | rq->cmd_flags |= REQ_SOFTBARRIER; | 980 | rq->cmd_flags |= REQ_SOFTBARRIER; |
981 | 981 | ||
982 | rq->special = data; | 982 | rq->special = data; |
983 | 983 | ||
984 | spin_lock_irqsave(q->queue_lock, flags); | 984 | spin_lock_irqsave(q->queue_lock, flags); |
985 | 985 | ||
986 | /* | 986 | /* |
987 | * If command is tagged, release the tag | 987 | * If command is tagged, release the tag |
988 | */ | 988 | */ |
989 | if (blk_rq_tagged(rq)) | 989 | if (blk_rq_tagged(rq)) |
990 | blk_queue_end_tag(q, rq); | 990 | blk_queue_end_tag(q, rq); |
991 | 991 | ||
992 | drive_stat_acct(rq, 1); | 992 | drive_stat_acct(rq, 1); |
993 | __elv_add_request(q, rq, where, 0); | 993 | __elv_add_request(q, rq, where, 0); |
994 | blk_start_queueing(q); | 994 | blk_start_queueing(q); |
995 | spin_unlock_irqrestore(q->queue_lock, flags); | 995 | spin_unlock_irqrestore(q->queue_lock, flags); |
996 | } | 996 | } |
997 | EXPORT_SYMBOL(blk_insert_request); | 997 | EXPORT_SYMBOL(blk_insert_request); |
998 | 998 | ||
999 | /* | 999 | /* |
1000 | * add-request adds a request to the linked list. | 1000 | * add-request adds a request to the linked list. |
1001 | * queue lock is held and interrupts disabled, as we muck with the | 1001 | * queue lock is held and interrupts disabled, as we muck with the |
1002 | * request queue list. | 1002 | * request queue list. |
1003 | */ | 1003 | */ |
1004 | static inline void add_request(struct request_queue *q, struct request *req) | 1004 | static inline void add_request(struct request_queue *q, struct request *req) |
1005 | { | 1005 | { |
1006 | drive_stat_acct(req, 1); | 1006 | drive_stat_acct(req, 1); |
1007 | 1007 | ||
1008 | /* | 1008 | /* |
1009 | * elevator indicated where it wants this request to be | 1009 | * elevator indicated where it wants this request to be |
1010 | * inserted at elevator_merge time | 1010 | * inserted at elevator_merge time |
1011 | */ | 1011 | */ |
1012 | __elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0); | 1012 | __elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0); |
1013 | } | 1013 | } |
1014 | 1014 | ||
1015 | static void part_round_stats_single(int cpu, struct hd_struct *part, | 1015 | static void part_round_stats_single(int cpu, struct hd_struct *part, |
1016 | unsigned long now) | 1016 | unsigned long now) |
1017 | { | 1017 | { |
1018 | if (now == part->stamp) | 1018 | if (now == part->stamp) |
1019 | return; | 1019 | return; |
1020 | 1020 | ||
1021 | if (part->in_flight) { | 1021 | if (part->in_flight) { |
1022 | __part_stat_add(cpu, part, time_in_queue, | 1022 | __part_stat_add(cpu, part, time_in_queue, |
1023 | part->in_flight * (now - part->stamp)); | 1023 | part->in_flight * (now - part->stamp)); |
1024 | __part_stat_add(cpu, part, io_ticks, (now - part->stamp)); | 1024 | __part_stat_add(cpu, part, io_ticks, (now - part->stamp)); |
1025 | } | 1025 | } |
1026 | part->stamp = now; | 1026 | part->stamp = now; |
1027 | } | 1027 | } |
1028 | 1028 | ||
1029 | /** | 1029 | /** |
1030 | * part_round_stats() - Round off the performance stats on a struct disk_stats. | 1030 | * part_round_stats() - Round off the performance stats on a struct disk_stats. |
1031 | * @cpu: cpu number for stats access | 1031 | * @cpu: cpu number for stats access |
1032 | * @part: target partition | 1032 | * @part: target partition |
1033 | * | 1033 | * |
1034 | * The average IO queue length and utilisation statistics are maintained | 1034 | * The average IO queue length and utilisation statistics are maintained |
1035 | * by observing the current state of the queue length and the amount of | 1035 | * by observing the current state of the queue length and the amount of |
1036 | * time it has been in this state for. | 1036 | * time it has been in this state for. |
1037 | * | 1037 | * |
1038 | * Normally, that accounting is done on IO completion, but that can result | 1038 | * Normally, that accounting is done on IO completion, but that can result |
1039 | * in more than a second's worth of IO being accounted for within any one | 1039 | * in more than a second's worth of IO being accounted for within any one |
1040 | * second, leading to >100% utilisation. To deal with that, we call this | 1040 | * second, leading to >100% utilisation. To deal with that, we call this |
1041 | * function to do a round-off before returning the results when reading | 1041 | * function to do a round-off before returning the results when reading |
1042 | * /proc/diskstats. This accounts immediately for all queue usage up to | 1042 | * /proc/diskstats. This accounts immediately for all queue usage up to |
1043 | * the current jiffies and restarts the counters again. | 1043 | * the current jiffies and restarts the counters again. |
1044 | */ | 1044 | */ |
1045 | void part_round_stats(int cpu, struct hd_struct *part) | 1045 | void part_round_stats(int cpu, struct hd_struct *part) |
1046 | { | 1046 | { |
1047 | unsigned long now = jiffies; | 1047 | unsigned long now = jiffies; |
1048 | 1048 | ||
1049 | if (part->partno) | 1049 | if (part->partno) |
1050 | part_round_stats_single(cpu, &part_to_disk(part)->part0, now); | 1050 | part_round_stats_single(cpu, &part_to_disk(part)->part0, now); |
1051 | part_round_stats_single(cpu, part, now); | 1051 | part_round_stats_single(cpu, part, now); |
1052 | } | 1052 | } |
1053 | EXPORT_SYMBOL_GPL(part_round_stats); | 1053 | EXPORT_SYMBOL_GPL(part_round_stats); |
1054 | 1054 | ||
1055 | /* | 1055 | /* |
1056 | * queue lock must be held | 1056 | * queue lock must be held |
1057 | */ | 1057 | */ |
1058 | void __blk_put_request(struct request_queue *q, struct request *req) | 1058 | void __blk_put_request(struct request_queue *q, struct request *req) |
1059 | { | 1059 | { |
1060 | if (unlikely(!q)) | 1060 | if (unlikely(!q)) |
1061 | return; | 1061 | return; |
1062 | if (unlikely(--req->ref_count)) | 1062 | if (unlikely(--req->ref_count)) |
1063 | return; | 1063 | return; |
1064 | 1064 | ||
1065 | elv_completed_request(q, req); | 1065 | elv_completed_request(q, req); |
1066 | 1066 | ||
1067 | /* this is a bio leak */ | 1067 | /* this is a bio leak */ |
1068 | WARN_ON(req->bio != NULL); | 1068 | WARN_ON(req->bio != NULL); |
1069 | 1069 | ||
1070 | /* | 1070 | /* |
1071 | * Request may not have originated from ll_rw_blk. if not, | 1071 | * Request may not have originated from ll_rw_blk. if not, |
1072 | * it didn't come out of our reserved rq pools | 1072 | * it didn't come out of our reserved rq pools |
1073 | */ | 1073 | */ |
1074 | if (req->cmd_flags & REQ_ALLOCED) { | 1074 | if (req->cmd_flags & REQ_ALLOCED) { |
1075 | int is_sync = rq_is_sync(req) != 0; | 1075 | int is_sync = rq_is_sync(req) != 0; |
1076 | int priv = req->cmd_flags & REQ_ELVPRIV; | 1076 | int priv = req->cmd_flags & REQ_ELVPRIV; |
1077 | 1077 | ||
1078 | BUG_ON(!list_empty(&req->queuelist)); | 1078 | BUG_ON(!list_empty(&req->queuelist)); |
1079 | BUG_ON(!hlist_unhashed(&req->hash)); | 1079 | BUG_ON(!hlist_unhashed(&req->hash)); |
1080 | 1080 | ||
1081 | blk_free_request(q, req); | 1081 | blk_free_request(q, req); |
1082 | freed_request(q, is_sync, priv); | 1082 | freed_request(q, is_sync, priv); |
1083 | } | 1083 | } |
1084 | } | 1084 | } |
1085 | EXPORT_SYMBOL_GPL(__blk_put_request); | 1085 | EXPORT_SYMBOL_GPL(__blk_put_request); |
1086 | 1086 | ||
1087 | void blk_put_request(struct request *req) | 1087 | void blk_put_request(struct request *req) |
1088 | { | 1088 | { |
1089 | unsigned long flags; | 1089 | unsigned long flags; |
1090 | struct request_queue *q = req->q; | 1090 | struct request_queue *q = req->q; |
1091 | 1091 | ||
1092 | spin_lock_irqsave(q->queue_lock, flags); | 1092 | spin_lock_irqsave(q->queue_lock, flags); |
1093 | __blk_put_request(q, req); | 1093 | __blk_put_request(q, req); |
1094 | spin_unlock_irqrestore(q->queue_lock, flags); | 1094 | spin_unlock_irqrestore(q->queue_lock, flags); |
1095 | } | 1095 | } |
1096 | EXPORT_SYMBOL(blk_put_request); | 1096 | EXPORT_SYMBOL(blk_put_request); |
1097 | 1097 | ||
1098 | void init_request_from_bio(struct request *req, struct bio *bio) | 1098 | void init_request_from_bio(struct request *req, struct bio *bio) |
1099 | { | 1099 | { |
1100 | req->cpu = bio->bi_comp_cpu; | 1100 | req->cpu = bio->bi_comp_cpu; |
1101 | req->cmd_type = REQ_TYPE_FS; | 1101 | req->cmd_type = REQ_TYPE_FS; |
1102 | 1102 | ||
1103 | /* | 1103 | /* |
1104 | * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST) | 1104 | * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST) |
1105 | */ | 1105 | */ |
1106 | if (bio_rw_ahead(bio)) | 1106 | if (bio_rw_ahead(bio)) |
1107 | req->cmd_flags |= (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | | 1107 | req->cmd_flags |= (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | |
1108 | REQ_FAILFAST_DRIVER); | 1108 | REQ_FAILFAST_DRIVER); |
1109 | if (bio_failfast_dev(bio)) | 1109 | if (bio_failfast_dev(bio)) |
1110 | req->cmd_flags |= REQ_FAILFAST_DEV; | 1110 | req->cmd_flags |= REQ_FAILFAST_DEV; |
1111 | if (bio_failfast_transport(bio)) | 1111 | if (bio_failfast_transport(bio)) |
1112 | req->cmd_flags |= REQ_FAILFAST_TRANSPORT; | 1112 | req->cmd_flags |= REQ_FAILFAST_TRANSPORT; |
1113 | if (bio_failfast_driver(bio)) | 1113 | if (bio_failfast_driver(bio)) |
1114 | req->cmd_flags |= REQ_FAILFAST_DRIVER; | 1114 | req->cmd_flags |= REQ_FAILFAST_DRIVER; |
1115 | 1115 | ||
1116 | /* | 1116 | /* |
1117 | * REQ_BARRIER implies no merging, but lets make it explicit | 1117 | * REQ_BARRIER implies no merging, but lets make it explicit |
1118 | */ | 1118 | */ |
1119 | if (unlikely(bio_discard(bio))) { | 1119 | if (unlikely(bio_discard(bio))) { |
1120 | req->cmd_flags |= REQ_DISCARD; | 1120 | req->cmd_flags |= REQ_DISCARD; |
1121 | if (bio_barrier(bio)) | 1121 | if (bio_barrier(bio)) |
1122 | req->cmd_flags |= REQ_SOFTBARRIER; | 1122 | req->cmd_flags |= REQ_SOFTBARRIER; |
1123 | req->q->prepare_discard_fn(req->q, req); | 1123 | req->q->prepare_discard_fn(req->q, req); |
1124 | } else if (unlikely(bio_barrier(bio))) | 1124 | } else if (unlikely(bio_barrier(bio))) |
1125 | req->cmd_flags |= (REQ_HARDBARRIER | REQ_NOMERGE); | 1125 | req->cmd_flags |= (REQ_HARDBARRIER | REQ_NOMERGE); |
1126 | 1126 | ||
1127 | if (bio_sync(bio)) | 1127 | if (bio_sync(bio)) |
1128 | req->cmd_flags |= REQ_RW_SYNC; | 1128 | req->cmd_flags |= REQ_RW_SYNC; |
1129 | if (bio_rw_meta(bio)) | 1129 | if (bio_rw_meta(bio)) |
1130 | req->cmd_flags |= REQ_RW_META; | 1130 | req->cmd_flags |= REQ_RW_META; |
1131 | if (bio_noidle(bio)) | 1131 | if (bio_noidle(bio)) |
1132 | req->cmd_flags |= REQ_NOIDLE; | 1132 | req->cmd_flags |= REQ_NOIDLE; |
1133 | 1133 | ||
1134 | req->errors = 0; | 1134 | req->errors = 0; |
1135 | req->hard_sector = req->sector = bio->bi_sector; | 1135 | req->hard_sector = req->sector = bio->bi_sector; |
1136 | req->ioprio = bio_prio(bio); | 1136 | req->ioprio = bio_prio(bio); |
1137 | req->start_time = jiffies; | 1137 | req->start_time = jiffies; |
1138 | blk_rq_bio_prep(req->q, req, bio); | 1138 | blk_rq_bio_prep(req->q, req, bio); |
1139 | } | 1139 | } |
1140 | 1140 | ||
1141 | /* | 1141 | /* |
1142 | * Only disabling plugging for non-rotational devices if it does tagging | 1142 | * Only disabling plugging for non-rotational devices if it does tagging |
1143 | * as well, otherwise we do need the proper merging | 1143 | * as well, otherwise we do need the proper merging |
1144 | */ | 1144 | */ |
1145 | static inline bool queue_should_plug(struct request_queue *q) | 1145 | static inline bool queue_should_plug(struct request_queue *q) |
1146 | { | 1146 | { |
1147 | return !(blk_queue_nonrot(q) && blk_queue_tagged(q)); | 1147 | return !(blk_queue_nonrot(q) && blk_queue_tagged(q)); |
1148 | } | 1148 | } |
1149 | 1149 | ||
1150 | static int __make_request(struct request_queue *q, struct bio *bio) | 1150 | static int __make_request(struct request_queue *q, struct bio *bio) |
1151 | { | 1151 | { |
1152 | struct request *req; | 1152 | struct request *req; |
1153 | int el_ret, nr_sectors; | 1153 | int el_ret, nr_sectors; |
1154 | const unsigned short prio = bio_prio(bio); | 1154 | const unsigned short prio = bio_prio(bio); |
1155 | const int sync = bio_sync(bio); | 1155 | const int sync = bio_sync(bio); |
1156 | const int unplug = bio_unplug(bio); | 1156 | const int unplug = bio_unplug(bio); |
1157 | int rw_flags; | 1157 | int rw_flags; |
1158 | 1158 | ||
1159 | nr_sectors = bio_sectors(bio); | 1159 | nr_sectors = bio_sectors(bio); |
1160 | 1160 | ||
1161 | /* | 1161 | /* |
1162 | * low level driver can indicate that it wants pages above a | 1162 | * low level driver can indicate that it wants pages above a |
1163 | * certain limit bounced to low memory (ie for highmem, or even | 1163 | * certain limit bounced to low memory (ie for highmem, or even |
1164 | * ISA dma in theory) | 1164 | * ISA dma in theory) |
1165 | */ | 1165 | */ |
1166 | blk_queue_bounce(q, &bio); | 1166 | blk_queue_bounce(q, &bio); |
1167 | 1167 | ||
1168 | spin_lock_irq(q->queue_lock); | 1168 | spin_lock_irq(q->queue_lock); |
1169 | 1169 | ||
1170 | if (unlikely(bio_barrier(bio)) || elv_queue_empty(q)) | 1170 | if (unlikely(bio_barrier(bio)) || elv_queue_empty(q)) |
1171 | goto get_rq; | 1171 | goto get_rq; |
1172 | 1172 | ||
1173 | el_ret = elv_merge(q, &req, bio); | 1173 | el_ret = elv_merge(q, &req, bio); |
1174 | switch (el_ret) { | 1174 | switch (el_ret) { |
1175 | case ELEVATOR_BACK_MERGE: | 1175 | case ELEVATOR_BACK_MERGE: |
1176 | BUG_ON(!rq_mergeable(req)); | 1176 | BUG_ON(!rq_mergeable(req)); |
1177 | 1177 | ||
1178 | if (!ll_back_merge_fn(q, req, bio)) | 1178 | if (!ll_back_merge_fn(q, req, bio)) |
1179 | break; | 1179 | break; |
1180 | 1180 | ||
1181 | trace_block_bio_backmerge(q, bio); | 1181 | trace_block_bio_backmerge(q, bio); |
1182 | 1182 | ||
1183 | req->biotail->bi_next = bio; | 1183 | req->biotail->bi_next = bio; |
1184 | req->biotail = bio; | 1184 | req->biotail = bio; |
1185 | req->nr_sectors = req->hard_nr_sectors += nr_sectors; | 1185 | req->nr_sectors = req->hard_nr_sectors += nr_sectors; |
1186 | req->ioprio = ioprio_best(req->ioprio, prio); | 1186 | req->ioprio = ioprio_best(req->ioprio, prio); |
1187 | if (!blk_rq_cpu_valid(req)) | 1187 | if (!blk_rq_cpu_valid(req)) |
1188 | req->cpu = bio->bi_comp_cpu; | 1188 | req->cpu = bio->bi_comp_cpu; |
1189 | drive_stat_acct(req, 0); | 1189 | drive_stat_acct(req, 0); |
1190 | if (!attempt_back_merge(q, req)) | 1190 | if (!attempt_back_merge(q, req)) |
1191 | elv_merged_request(q, req, el_ret); | 1191 | elv_merged_request(q, req, el_ret); |
1192 | goto out; | 1192 | goto out; |
1193 | 1193 | ||
1194 | case ELEVATOR_FRONT_MERGE: | 1194 | case ELEVATOR_FRONT_MERGE: |
1195 | BUG_ON(!rq_mergeable(req)); | 1195 | BUG_ON(!rq_mergeable(req)); |
1196 | 1196 | ||
1197 | if (!ll_front_merge_fn(q, req, bio)) | 1197 | if (!ll_front_merge_fn(q, req, bio)) |
1198 | break; | 1198 | break; |
1199 | 1199 | ||
1200 | trace_block_bio_frontmerge(q, bio); | 1200 | trace_block_bio_frontmerge(q, bio); |
1201 | 1201 | ||
1202 | bio->bi_next = req->bio; | 1202 | bio->bi_next = req->bio; |
1203 | req->bio = bio; | 1203 | req->bio = bio; |
1204 | 1204 | ||
1205 | /* | 1205 | /* |
1206 | * may not be valid. if the low level driver said | 1206 | * may not be valid. if the low level driver said |
1207 | * it didn't need a bounce buffer then it better | 1207 | * it didn't need a bounce buffer then it better |
1208 | * not touch req->buffer either... | 1208 | * not touch req->buffer either... |
1209 | */ | 1209 | */ |
1210 | req->buffer = bio_data(bio); | 1210 | req->buffer = bio_data(bio); |
1211 | req->current_nr_sectors = bio_cur_sectors(bio); | 1211 | req->current_nr_sectors = bio_cur_sectors(bio); |
1212 | req->hard_cur_sectors = req->current_nr_sectors; | 1212 | req->hard_cur_sectors = req->current_nr_sectors; |
1213 | req->sector = req->hard_sector = bio->bi_sector; | 1213 | req->sector = req->hard_sector = bio->bi_sector; |
1214 | req->nr_sectors = req->hard_nr_sectors += nr_sectors; | 1214 | req->nr_sectors = req->hard_nr_sectors += nr_sectors; |
1215 | req->ioprio = ioprio_best(req->ioprio, prio); | 1215 | req->ioprio = ioprio_best(req->ioprio, prio); |
1216 | if (!blk_rq_cpu_valid(req)) | 1216 | if (!blk_rq_cpu_valid(req)) |
1217 | req->cpu = bio->bi_comp_cpu; | 1217 | req->cpu = bio->bi_comp_cpu; |
1218 | drive_stat_acct(req, 0); | 1218 | drive_stat_acct(req, 0); |
1219 | if (!attempt_front_merge(q, req)) | 1219 | if (!attempt_front_merge(q, req)) |
1220 | elv_merged_request(q, req, el_ret); | 1220 | elv_merged_request(q, req, el_ret); |
1221 | goto out; | 1221 | goto out; |
1222 | 1222 | ||
1223 | /* ELV_NO_MERGE: elevator says don't/can't merge. */ | 1223 | /* ELV_NO_MERGE: elevator says don't/can't merge. */ |
1224 | default: | 1224 | default: |
1225 | ; | 1225 | ; |
1226 | } | 1226 | } |
1227 | 1227 | ||
1228 | get_rq: | 1228 | get_rq: |
1229 | /* | 1229 | /* |
1230 | * This sync check and mask will be re-done in init_request_from_bio(), | 1230 | * This sync check and mask will be re-done in init_request_from_bio(), |
1231 | * but we need to set it earlier to expose the sync flag to the | 1231 | * but we need to set it earlier to expose the sync flag to the |
1232 | * rq allocator and io schedulers. | 1232 | * rq allocator and io schedulers. |
1233 | */ | 1233 | */ |
1234 | rw_flags = bio_data_dir(bio); | 1234 | rw_flags = bio_data_dir(bio); |
1235 | if (sync) | 1235 | if (sync) |
1236 | rw_flags |= REQ_RW_SYNC; | 1236 | rw_flags |= REQ_RW_SYNC; |
1237 | 1237 | ||
1238 | /* | 1238 | /* |
1239 | * Grab a free request. This is might sleep but can not fail. | 1239 | * Grab a free request. This is might sleep but can not fail. |
1240 | * Returns with the queue unlocked. | 1240 | * Returns with the queue unlocked. |
1241 | */ | 1241 | */ |
1242 | req = get_request_wait(q, rw_flags, bio); | 1242 | req = get_request_wait(q, rw_flags, bio); |
1243 | 1243 | ||
1244 | /* | 1244 | /* |
1245 | * After dropping the lock and possibly sleeping here, our request | 1245 | * After dropping the lock and possibly sleeping here, our request |
1246 | * may now be mergeable after it had proven unmergeable (above). | 1246 | * may now be mergeable after it had proven unmergeable (above). |
1247 | * We don't worry about that case for efficiency. It won't happen | 1247 | * We don't worry about that case for efficiency. It won't happen |
1248 | * often, and the elevators are able to handle it. | 1248 | * often, and the elevators are able to handle it. |
1249 | */ | 1249 | */ |
1250 | init_request_from_bio(req, bio); | 1250 | init_request_from_bio(req, bio); |
1251 | 1251 | ||
1252 | spin_lock_irq(q->queue_lock); | 1252 | spin_lock_irq(q->queue_lock); |
1253 | if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) || | 1253 | if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) || |
1254 | bio_flagged(bio, BIO_CPU_AFFINE)) | 1254 | bio_flagged(bio, BIO_CPU_AFFINE)) |
1255 | req->cpu = blk_cpu_to_group(smp_processor_id()); | 1255 | req->cpu = blk_cpu_to_group(smp_processor_id()); |
1256 | if (queue_should_plug(q) && elv_queue_empty(q)) | 1256 | if (queue_should_plug(q) && elv_queue_empty(q)) |
1257 | blk_plug_device(q); | 1257 | blk_plug_device(q); |
1258 | add_request(q, req); | 1258 | add_request(q, req); |
1259 | out: | 1259 | out: |
1260 | if (unplug || !queue_should_plug(q)) | 1260 | if (unplug || !queue_should_plug(q)) |
1261 | __generic_unplug_device(q); | 1261 | __generic_unplug_device(q); |
1262 | spin_unlock_irq(q->queue_lock); | 1262 | spin_unlock_irq(q->queue_lock); |
1263 | return 0; | 1263 | return 0; |
1264 | } | 1264 | } |
1265 | 1265 | ||
1266 | /* | 1266 | /* |
1267 | * If bio->bi_dev is a partition, remap the location | 1267 | * If bio->bi_dev is a partition, remap the location |
1268 | */ | 1268 | */ |
1269 | static inline void blk_partition_remap(struct bio *bio) | 1269 | static inline void blk_partition_remap(struct bio *bio) |
1270 | { | 1270 | { |
1271 | struct block_device *bdev = bio->bi_bdev; | 1271 | struct block_device *bdev = bio->bi_bdev; |
1272 | 1272 | ||
1273 | if (bio_sectors(bio) && bdev != bdev->bd_contains) { | 1273 | if (bio_sectors(bio) && bdev != bdev->bd_contains) { |
1274 | struct hd_struct *p = bdev->bd_part; | 1274 | struct hd_struct *p = bdev->bd_part; |
1275 | 1275 | ||
1276 | bio->bi_sector += p->start_sect; | 1276 | bio->bi_sector += p->start_sect; |
1277 | bio->bi_bdev = bdev->bd_contains; | 1277 | bio->bi_bdev = bdev->bd_contains; |
1278 | 1278 | ||
1279 | trace_block_remap(bdev_get_queue(bio->bi_bdev), bio, | 1279 | trace_block_remap(bdev_get_queue(bio->bi_bdev), bio, |
1280 | bdev->bd_dev, bio->bi_sector, | 1280 | bdev->bd_dev, bio->bi_sector, |
1281 | bio->bi_sector - p->start_sect); | 1281 | bio->bi_sector - p->start_sect); |
1282 | } | 1282 | } |
1283 | } | 1283 | } |
1284 | 1284 | ||
1285 | static void handle_bad_sector(struct bio *bio) | 1285 | static void handle_bad_sector(struct bio *bio) |
1286 | { | 1286 | { |
1287 | char b[BDEVNAME_SIZE]; | 1287 | char b[BDEVNAME_SIZE]; |
1288 | 1288 | ||
1289 | printk(KERN_INFO "attempt to access beyond end of device\n"); | 1289 | printk(KERN_INFO "attempt to access beyond end of device\n"); |
1290 | printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n", | 1290 | printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n", |
1291 | bdevname(bio->bi_bdev, b), | 1291 | bdevname(bio->bi_bdev, b), |
1292 | bio->bi_rw, | 1292 | bio->bi_rw, |
1293 | (unsigned long long)bio->bi_sector + bio_sectors(bio), | 1293 | (unsigned long long)bio->bi_sector + bio_sectors(bio), |
1294 | (long long)(bio->bi_bdev->bd_inode->i_size >> 9)); | 1294 | (long long)(bio->bi_bdev->bd_inode->i_size >> 9)); |
1295 | 1295 | ||
1296 | set_bit(BIO_EOF, &bio->bi_flags); | 1296 | set_bit(BIO_EOF, &bio->bi_flags); |
1297 | } | 1297 | } |
1298 | 1298 | ||
1299 | #ifdef CONFIG_FAIL_MAKE_REQUEST | 1299 | #ifdef CONFIG_FAIL_MAKE_REQUEST |
1300 | 1300 | ||
1301 | static DECLARE_FAULT_ATTR(fail_make_request); | 1301 | static DECLARE_FAULT_ATTR(fail_make_request); |
1302 | 1302 | ||
1303 | static int __init setup_fail_make_request(char *str) | 1303 | static int __init setup_fail_make_request(char *str) |
1304 | { | 1304 | { |
1305 | return setup_fault_attr(&fail_make_request, str); | 1305 | return setup_fault_attr(&fail_make_request, str); |
1306 | } | 1306 | } |
1307 | __setup("fail_make_request=", setup_fail_make_request); | 1307 | __setup("fail_make_request=", setup_fail_make_request); |
1308 | 1308 | ||
1309 | static int should_fail_request(struct bio *bio) | 1309 | static int should_fail_request(struct bio *bio) |
1310 | { | 1310 | { |
1311 | struct hd_struct *part = bio->bi_bdev->bd_part; | 1311 | struct hd_struct *part = bio->bi_bdev->bd_part; |
1312 | 1312 | ||
1313 | if (part_to_disk(part)->part0.make_it_fail || part->make_it_fail) | 1313 | if (part_to_disk(part)->part0.make_it_fail || part->make_it_fail) |
1314 | return should_fail(&fail_make_request, bio->bi_size); | 1314 | return should_fail(&fail_make_request, bio->bi_size); |
1315 | 1315 | ||
1316 | return 0; | 1316 | return 0; |
1317 | } | 1317 | } |
1318 | 1318 | ||
1319 | static int __init fail_make_request_debugfs(void) | 1319 | static int __init fail_make_request_debugfs(void) |
1320 | { | 1320 | { |
1321 | return init_fault_attr_dentries(&fail_make_request, | 1321 | return init_fault_attr_dentries(&fail_make_request, |
1322 | "fail_make_request"); | 1322 | "fail_make_request"); |
1323 | } | 1323 | } |
1324 | 1324 | ||
1325 | late_initcall(fail_make_request_debugfs); | 1325 | late_initcall(fail_make_request_debugfs); |
1326 | 1326 | ||
1327 | #else /* CONFIG_FAIL_MAKE_REQUEST */ | 1327 | #else /* CONFIG_FAIL_MAKE_REQUEST */ |
1328 | 1328 | ||
1329 | static inline int should_fail_request(struct bio *bio) | 1329 | static inline int should_fail_request(struct bio *bio) |
1330 | { | 1330 | { |
1331 | return 0; | 1331 | return 0; |
1332 | } | 1332 | } |
1333 | 1333 | ||
1334 | #endif /* CONFIG_FAIL_MAKE_REQUEST */ | 1334 | #endif /* CONFIG_FAIL_MAKE_REQUEST */ |
1335 | 1335 | ||
1336 | /* | 1336 | /* |
1337 | * Check whether this bio extends beyond the end of the device. | 1337 | * Check whether this bio extends beyond the end of the device. |
1338 | */ | 1338 | */ |
1339 | static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors) | 1339 | static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors) |
1340 | { | 1340 | { |
1341 | sector_t maxsector; | 1341 | sector_t maxsector; |
1342 | 1342 | ||
1343 | if (!nr_sectors) | 1343 | if (!nr_sectors) |
1344 | return 0; | 1344 | return 0; |
1345 | 1345 | ||
1346 | /* Test device or partition size, when known. */ | 1346 | /* Test device or partition size, when known. */ |
1347 | maxsector = bio->bi_bdev->bd_inode->i_size >> 9; | 1347 | maxsector = bio->bi_bdev->bd_inode->i_size >> 9; |
1348 | if (maxsector) { | 1348 | if (maxsector) { |
1349 | sector_t sector = bio->bi_sector; | 1349 | sector_t sector = bio->bi_sector; |
1350 | 1350 | ||
1351 | if (maxsector < nr_sectors || maxsector - nr_sectors < sector) { | 1351 | if (maxsector < nr_sectors || maxsector - nr_sectors < sector) { |
1352 | /* | 1352 | /* |
1353 | * This may well happen - the kernel calls bread() | 1353 | * This may well happen - the kernel calls bread() |
1354 | * without checking the size of the device, e.g., when | 1354 | * without checking the size of the device, e.g., when |
1355 | * mounting a device. | 1355 | * mounting a device. |
1356 | */ | 1356 | */ |
1357 | handle_bad_sector(bio); | 1357 | handle_bad_sector(bio); |
1358 | return 1; | 1358 | return 1; |
1359 | } | 1359 | } |
1360 | } | 1360 | } |
1361 | 1361 | ||
1362 | return 0; | 1362 | return 0; |
1363 | } | 1363 | } |
1364 | 1364 | ||
1365 | /** | 1365 | /** |
1366 | * generic_make_request - hand a buffer to its device driver for I/O | 1366 | * generic_make_request - hand a buffer to its device driver for I/O |
1367 | * @bio: The bio describing the location in memory and on the device. | 1367 | * @bio: The bio describing the location in memory and on the device. |
1368 | * | 1368 | * |
1369 | * generic_make_request() is used to make I/O requests of block | 1369 | * generic_make_request() is used to make I/O requests of block |
1370 | * devices. It is passed a &struct bio, which describes the I/O that needs | 1370 | * devices. It is passed a &struct bio, which describes the I/O that needs |
1371 | * to be done. | 1371 | * to be done. |
1372 | * | 1372 | * |
1373 | * generic_make_request() does not return any status. The | 1373 | * generic_make_request() does not return any status. The |
1374 | * success/failure status of the request, along with notification of | 1374 | * success/failure status of the request, along with notification of |
1375 | * completion, is delivered asynchronously through the bio->bi_end_io | 1375 | * completion, is delivered asynchronously through the bio->bi_end_io |
1376 | * function described (one day) else where. | 1376 | * function described (one day) else where. |
1377 | * | 1377 | * |
1378 | * The caller of generic_make_request must make sure that bi_io_vec | 1378 | * The caller of generic_make_request must make sure that bi_io_vec |
1379 | * are set to describe the memory buffer, and that bi_dev and bi_sector are | 1379 | * are set to describe the memory buffer, and that bi_dev and bi_sector are |
1380 | * set to describe the device address, and the | 1380 | * set to describe the device address, and the |
1381 | * bi_end_io and optionally bi_private are set to describe how | 1381 | * bi_end_io and optionally bi_private are set to describe how |
1382 | * completion notification should be signaled. | 1382 | * completion notification should be signaled. |
1383 | * | 1383 | * |
1384 | * generic_make_request and the drivers it calls may use bi_next if this | 1384 | * generic_make_request and the drivers it calls may use bi_next if this |
1385 | * bio happens to be merged with someone else, and may change bi_dev and | 1385 | * bio happens to be merged with someone else, and may change bi_dev and |
1386 | * bi_sector for remaps as it sees fit. So the values of these fields | 1386 | * bi_sector for remaps as it sees fit. So the values of these fields |
1387 | * should NOT be depended on after the call to generic_make_request. | 1387 | * should NOT be depended on after the call to generic_make_request. |
1388 | */ | 1388 | */ |
1389 | static inline void __generic_make_request(struct bio *bio) | 1389 | static inline void __generic_make_request(struct bio *bio) |
1390 | { | 1390 | { |
1391 | struct request_queue *q; | 1391 | struct request_queue *q; |
1392 | sector_t old_sector; | 1392 | sector_t old_sector; |
1393 | int ret, nr_sectors = bio_sectors(bio); | 1393 | int ret, nr_sectors = bio_sectors(bio); |
1394 | dev_t old_dev; | 1394 | dev_t old_dev; |
1395 | int err = -EIO; | 1395 | int err = -EIO; |
1396 | 1396 | ||
1397 | might_sleep(); | 1397 | might_sleep(); |
1398 | 1398 | ||
1399 | if (bio_check_eod(bio, nr_sectors)) | 1399 | if (bio_check_eod(bio, nr_sectors)) |
1400 | goto end_io; | 1400 | goto end_io; |
1401 | 1401 | ||
1402 | /* | 1402 | /* |
1403 | * Resolve the mapping until finished. (drivers are | 1403 | * Resolve the mapping until finished. (drivers are |
1404 | * still free to implement/resolve their own stacking | 1404 | * still free to implement/resolve their own stacking |
1405 | * by explicitly returning 0) | 1405 | * by explicitly returning 0) |
1406 | * | 1406 | * |
1407 | * NOTE: we don't repeat the blk_size check for each new device. | 1407 | * NOTE: we don't repeat the blk_size check for each new device. |
1408 | * Stacking drivers are expected to know what they are doing. | 1408 | * Stacking drivers are expected to know what they are doing. |
1409 | */ | 1409 | */ |
1410 | old_sector = -1; | 1410 | old_sector = -1; |
1411 | old_dev = 0; | 1411 | old_dev = 0; |
1412 | do { | 1412 | do { |
1413 | char b[BDEVNAME_SIZE]; | 1413 | char b[BDEVNAME_SIZE]; |
1414 | 1414 | ||
1415 | q = bdev_get_queue(bio->bi_bdev); | 1415 | q = bdev_get_queue(bio->bi_bdev); |
1416 | if (unlikely(!q)) { | 1416 | if (unlikely(!q)) { |
1417 | printk(KERN_ERR | 1417 | printk(KERN_ERR |
1418 | "generic_make_request: Trying to access " | 1418 | "generic_make_request: Trying to access " |
1419 | "nonexistent block-device %s (%Lu)\n", | 1419 | "nonexistent block-device %s (%Lu)\n", |
1420 | bdevname(bio->bi_bdev, b), | 1420 | bdevname(bio->bi_bdev, b), |
1421 | (long long) bio->bi_sector); | 1421 | (long long) bio->bi_sector); |
1422 | goto end_io; | 1422 | goto end_io; |
1423 | } | 1423 | } |
1424 | 1424 | ||
1425 | if (unlikely(nr_sectors > q->max_hw_sectors)) { | 1425 | if (unlikely(nr_sectors > q->max_hw_sectors)) { |
1426 | printk(KERN_ERR "bio too big device %s (%u > %u)\n", | 1426 | printk(KERN_ERR "bio too big device %s (%u > %u)\n", |
1427 | bdevname(bio->bi_bdev, b), | 1427 | bdevname(bio->bi_bdev, b), |
1428 | bio_sectors(bio), | 1428 | bio_sectors(bio), |
1429 | q->max_hw_sectors); | 1429 | q->max_hw_sectors); |
1430 | goto end_io; | 1430 | goto end_io; |
1431 | } | 1431 | } |
1432 | 1432 | ||
1433 | if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) | 1433 | if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) |
1434 | goto end_io; | 1434 | goto end_io; |
1435 | 1435 | ||
1436 | if (should_fail_request(bio)) | 1436 | if (should_fail_request(bio)) |
1437 | goto end_io; | 1437 | goto end_io; |
1438 | 1438 | ||
1439 | /* | 1439 | /* |
1440 | * If this device has partitions, remap block n | 1440 | * If this device has partitions, remap block n |
1441 | * of partition p to block n+start(p) of the disk. | 1441 | * of partition p to block n+start(p) of the disk. |
1442 | */ | 1442 | */ |
1443 | blk_partition_remap(bio); | 1443 | blk_partition_remap(bio); |
1444 | 1444 | ||
1445 | if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) | 1445 | if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) |
1446 | goto end_io; | 1446 | goto end_io; |
1447 | 1447 | ||
1448 | if (old_sector != -1) | 1448 | if (old_sector != -1) |
1449 | trace_block_remap(q, bio, old_dev, bio->bi_sector, | 1449 | trace_block_remap(q, bio, old_dev, bio->bi_sector, |
1450 | old_sector); | 1450 | old_sector); |
1451 | 1451 | ||
1452 | trace_block_bio_queue(q, bio); | 1452 | trace_block_bio_queue(q, bio); |
1453 | 1453 | ||
1454 | old_sector = bio->bi_sector; | 1454 | old_sector = bio->bi_sector; |
1455 | old_dev = bio->bi_bdev->bd_dev; | 1455 | old_dev = bio->bi_bdev->bd_dev; |
1456 | 1456 | ||
1457 | if (bio_check_eod(bio, nr_sectors)) | 1457 | if (bio_check_eod(bio, nr_sectors)) |
1458 | goto end_io; | 1458 | goto end_io; |
1459 | 1459 | ||
1460 | if (bio_discard(bio) && !q->prepare_discard_fn) { | 1460 | if (bio_discard(bio) && !q->prepare_discard_fn) { |
1461 | err = -EOPNOTSUPP; | 1461 | err = -EOPNOTSUPP; |
1462 | goto end_io; | 1462 | goto end_io; |
1463 | } | 1463 | } |
1464 | if (bio_barrier(bio) && bio_has_data(bio) && | 1464 | if (bio_barrier(bio) && bio_has_data(bio) && |
1465 | (q->next_ordered == QUEUE_ORDERED_NONE)) { | 1465 | (q->next_ordered == QUEUE_ORDERED_NONE)) { |
1466 | err = -EOPNOTSUPP; | 1466 | err = -EOPNOTSUPP; |
1467 | goto end_io; | 1467 | goto end_io; |
1468 | } | 1468 | } |
1469 | 1469 | ||
1470 | ret = q->make_request_fn(q, bio); | 1470 | ret = q->make_request_fn(q, bio); |
1471 | } while (ret); | 1471 | } while (ret); |
1472 | 1472 | ||
1473 | return; | 1473 | return; |
1474 | 1474 | ||
1475 | end_io: | 1475 | end_io: |
1476 | bio_endio(bio, err); | 1476 | bio_endio(bio, err); |
1477 | } | 1477 | } |
1478 | 1478 | ||
1479 | /* | 1479 | /* |
1480 | * We only want one ->make_request_fn to be active at a time, | 1480 | * We only want one ->make_request_fn to be active at a time, |
1481 | * else stack usage with stacked devices could be a problem. | 1481 | * else stack usage with stacked devices could be a problem. |
1482 | * So use current->bio_{list,tail} to keep a list of requests | 1482 | * So use current->bio_{list,tail} to keep a list of requests |
1483 | * submited by a make_request_fn function. | 1483 | * submited by a make_request_fn function. |
1484 | * current->bio_tail is also used as a flag to say if | 1484 | * current->bio_tail is also used as a flag to say if |
1485 | * generic_make_request is currently active in this task or not. | 1485 | * generic_make_request is currently active in this task or not. |
1486 | * If it is NULL, then no make_request is active. If it is non-NULL, | 1486 | * If it is NULL, then no make_request is active. If it is non-NULL, |
1487 | * then a make_request is active, and new requests should be added | 1487 | * then a make_request is active, and new requests should be added |
1488 | * at the tail | 1488 | * at the tail |
1489 | */ | 1489 | */ |
1490 | void generic_make_request(struct bio *bio) | 1490 | void generic_make_request(struct bio *bio) |
1491 | { | 1491 | { |
1492 | if (current->bio_tail) { | 1492 | if (current->bio_tail) { |
1493 | /* make_request is active */ | 1493 | /* make_request is active */ |
1494 | *(current->bio_tail) = bio; | 1494 | *(current->bio_tail) = bio; |
1495 | bio->bi_next = NULL; | 1495 | bio->bi_next = NULL; |
1496 | current->bio_tail = &bio->bi_next; | 1496 | current->bio_tail = &bio->bi_next; |
1497 | return; | 1497 | return; |
1498 | } | 1498 | } |
1499 | /* following loop may be a bit non-obvious, and so deserves some | 1499 | /* following loop may be a bit non-obvious, and so deserves some |
1500 | * explanation. | 1500 | * explanation. |
1501 | * Before entering the loop, bio->bi_next is NULL (as all callers | 1501 | * Before entering the loop, bio->bi_next is NULL (as all callers |
1502 | * ensure that) so we have a list with a single bio. | 1502 | * ensure that) so we have a list with a single bio. |
1503 | * We pretend that we have just taken it off a longer list, so | 1503 | * We pretend that we have just taken it off a longer list, so |
1504 | * we assign bio_list to the next (which is NULL) and bio_tail | 1504 | * we assign bio_list to the next (which is NULL) and bio_tail |
1505 | * to &bio_list, thus initialising the bio_list of new bios to be | 1505 | * to &bio_list, thus initialising the bio_list of new bios to be |
1506 | * added. __generic_make_request may indeed add some more bios | 1506 | * added. __generic_make_request may indeed add some more bios |
1507 | * through a recursive call to generic_make_request. If it | 1507 | * through a recursive call to generic_make_request. If it |
1508 | * did, we find a non-NULL value in bio_list and re-enter the loop | 1508 | * did, we find a non-NULL value in bio_list and re-enter the loop |
1509 | * from the top. In this case we really did just take the bio | 1509 | * from the top. In this case we really did just take the bio |
1510 | * of the top of the list (no pretending) and so fixup bio_list and | 1510 | * of the top of the list (no pretending) and so fixup bio_list and |
1511 | * bio_tail or bi_next, and call into __generic_make_request again. | 1511 | * bio_tail or bi_next, and call into __generic_make_request again. |
1512 | * | 1512 | * |
1513 | * The loop was structured like this to make only one call to | 1513 | * The loop was structured like this to make only one call to |
1514 | * __generic_make_request (which is important as it is large and | 1514 | * __generic_make_request (which is important as it is large and |
1515 | * inlined) and to keep the structure simple. | 1515 | * inlined) and to keep the structure simple. |
1516 | */ | 1516 | */ |
1517 | BUG_ON(bio->bi_next); | 1517 | BUG_ON(bio->bi_next); |
1518 | do { | 1518 | do { |
1519 | current->bio_list = bio->bi_next; | 1519 | current->bio_list = bio->bi_next; |
1520 | if (bio->bi_next == NULL) | 1520 | if (bio->bi_next == NULL) |
1521 | current->bio_tail = ¤t->bio_list; | 1521 | current->bio_tail = ¤t->bio_list; |
1522 | else | 1522 | else |
1523 | bio->bi_next = NULL; | 1523 | bio->bi_next = NULL; |
1524 | __generic_make_request(bio); | 1524 | __generic_make_request(bio); |
1525 | bio = current->bio_list; | 1525 | bio = current->bio_list; |
1526 | } while (bio); | 1526 | } while (bio); |
1527 | current->bio_tail = NULL; /* deactivate */ | 1527 | current->bio_tail = NULL; /* deactivate */ |
1528 | } | 1528 | } |
1529 | EXPORT_SYMBOL(generic_make_request); | 1529 | EXPORT_SYMBOL(generic_make_request); |
1530 | 1530 | ||
1531 | /** | 1531 | /** |
1532 | * submit_bio - submit a bio to the block device layer for I/O | 1532 | * submit_bio - submit a bio to the block device layer for I/O |
1533 | * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead) | 1533 | * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead) |
1534 | * @bio: The &struct bio which describes the I/O | 1534 | * @bio: The &struct bio which describes the I/O |
1535 | * | 1535 | * |
1536 | * submit_bio() is very similar in purpose to generic_make_request(), and | 1536 | * submit_bio() is very similar in purpose to generic_make_request(), and |
1537 | * uses that function to do most of the work. Both are fairly rough | 1537 | * uses that function to do most of the work. Both are fairly rough |
1538 | * interfaces; @bio must be presetup and ready for I/O. | 1538 | * interfaces; @bio must be presetup and ready for I/O. |
1539 | * | 1539 | * |
1540 | */ | 1540 | */ |
1541 | void submit_bio(int rw, struct bio *bio) | 1541 | void submit_bio(int rw, struct bio *bio) |
1542 | { | 1542 | { |
1543 | int count = bio_sectors(bio); | 1543 | int count = bio_sectors(bio); |
1544 | 1544 | ||
1545 | bio->bi_rw |= rw; | 1545 | bio->bi_rw |= rw; |
1546 | 1546 | ||
1547 | /* | 1547 | /* |
1548 | * If it's a regular read/write or a barrier with data attached, | 1548 | * If it's a regular read/write or a barrier with data attached, |
1549 | * go through the normal accounting stuff before submission. | 1549 | * go through the normal accounting stuff before submission. |
1550 | */ | 1550 | */ |
1551 | if (bio_has_data(bio)) { | 1551 | if (bio_has_data(bio)) { |
1552 | if (rw & WRITE) { | 1552 | if (rw & WRITE) { |
1553 | count_vm_events(PGPGOUT, count); | 1553 | count_vm_events(PGPGOUT, count); |
1554 | } else { | 1554 | } else { |
1555 | task_io_account_read(bio->bi_size); | 1555 | task_io_account_read(bio->bi_size); |
1556 | count_vm_events(PGPGIN, count); | 1556 | count_vm_events(PGPGIN, count); |
1557 | } | 1557 | } |
1558 | 1558 | ||
1559 | if (unlikely(block_dump)) { | 1559 | if (unlikely(block_dump)) { |
1560 | char b[BDEVNAME_SIZE]; | 1560 | char b[BDEVNAME_SIZE]; |
1561 | printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n", | 1561 | printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n", |
1562 | current->comm, task_pid_nr(current), | 1562 | current->comm, task_pid_nr(current), |
1563 | (rw & WRITE) ? "WRITE" : "READ", | 1563 | (rw & WRITE) ? "WRITE" : "READ", |
1564 | (unsigned long long)bio->bi_sector, | 1564 | (unsigned long long)bio->bi_sector, |
1565 | bdevname(bio->bi_bdev, b)); | 1565 | bdevname(bio->bi_bdev, b)); |
1566 | } | 1566 | } |
1567 | } | 1567 | } |
1568 | 1568 | ||
1569 | generic_make_request(bio); | 1569 | generic_make_request(bio); |
1570 | } | 1570 | } |
1571 | EXPORT_SYMBOL(submit_bio); | 1571 | EXPORT_SYMBOL(submit_bio); |
1572 | 1572 | ||
1573 | /** | 1573 | /** |
1574 | * blk_rq_check_limits - Helper function to check a request for the queue limit | 1574 | * blk_rq_check_limits - Helper function to check a request for the queue limit |
1575 | * @q: the queue | 1575 | * @q: the queue |
1576 | * @rq: the request being checked | 1576 | * @rq: the request being checked |
1577 | * | 1577 | * |
1578 | * Description: | 1578 | * Description: |
1579 | * @rq may have been made based on weaker limitations of upper-level queues | 1579 | * @rq may have been made based on weaker limitations of upper-level queues |
1580 | * in request stacking drivers, and it may violate the limitation of @q. | 1580 | * in request stacking drivers, and it may violate the limitation of @q. |
1581 | * Since the block layer and the underlying device driver trust @rq | 1581 | * Since the block layer and the underlying device driver trust @rq |
1582 | * after it is inserted to @q, it should be checked against @q before | 1582 | * after it is inserted to @q, it should be checked against @q before |
1583 | * the insertion using this generic function. | 1583 | * the insertion using this generic function. |
1584 | * | 1584 | * |
1585 | * This function should also be useful for request stacking drivers | 1585 | * This function should also be useful for request stacking drivers |
1586 | * in some cases below, so export this fuction. | 1586 | * in some cases below, so export this fuction. |
1587 | * Request stacking drivers like request-based dm may change the queue | 1587 | * Request stacking drivers like request-based dm may change the queue |
1588 | * limits while requests are in the queue (e.g. dm's table swapping). | 1588 | * limits while requests are in the queue (e.g. dm's table swapping). |
1589 | * Such request stacking drivers should check those requests agaist | 1589 | * Such request stacking drivers should check those requests agaist |
1590 | * the new queue limits again when they dispatch those requests, | 1590 | * the new queue limits again when they dispatch those requests, |
1591 | * although such checkings are also done against the old queue limits | 1591 | * although such checkings are also done against the old queue limits |
1592 | * when submitting requests. | 1592 | * when submitting requests. |
1593 | */ | 1593 | */ |
1594 | int blk_rq_check_limits(struct request_queue *q, struct request *rq) | 1594 | int blk_rq_check_limits(struct request_queue *q, struct request *rq) |
1595 | { | 1595 | { |
1596 | if (rq->nr_sectors > q->max_sectors || | 1596 | if (rq->nr_sectors > q->max_sectors || |
1597 | rq->data_len > q->max_hw_sectors << 9) { | 1597 | rq->data_len > q->max_hw_sectors << 9) { |
1598 | printk(KERN_ERR "%s: over max size limit.\n", __func__); | 1598 | printk(KERN_ERR "%s: over max size limit.\n", __func__); |
1599 | return -EIO; | 1599 | return -EIO; |
1600 | } | 1600 | } |
1601 | 1601 | ||
1602 | /* | 1602 | /* |
1603 | * queue's settings related to segment counting like q->bounce_pfn | 1603 | * queue's settings related to segment counting like q->bounce_pfn |
1604 | * may differ from that of other stacking queues. | 1604 | * may differ from that of other stacking queues. |
1605 | * Recalculate it to check the request correctly on this queue's | 1605 | * Recalculate it to check the request correctly on this queue's |
1606 | * limitation. | 1606 | * limitation. |
1607 | */ | 1607 | */ |
1608 | blk_recalc_rq_segments(rq); | 1608 | blk_recalc_rq_segments(rq); |
1609 | if (rq->nr_phys_segments > q->max_phys_segments || | 1609 | if (rq->nr_phys_segments > q->max_phys_segments || |
1610 | rq->nr_phys_segments > q->max_hw_segments) { | 1610 | rq->nr_phys_segments > q->max_hw_segments) { |
1611 | printk(KERN_ERR "%s: over max segments limit.\n", __func__); | 1611 | printk(KERN_ERR "%s: over max segments limit.\n", __func__); |
1612 | return -EIO; | 1612 | return -EIO; |
1613 | } | 1613 | } |
1614 | 1614 | ||
1615 | return 0; | 1615 | return 0; |
1616 | } | 1616 | } |
1617 | EXPORT_SYMBOL_GPL(blk_rq_check_limits); | 1617 | EXPORT_SYMBOL_GPL(blk_rq_check_limits); |
1618 | 1618 | ||
1619 | /** | 1619 | /** |
1620 | * blk_insert_cloned_request - Helper for stacking drivers to submit a request | 1620 | * blk_insert_cloned_request - Helper for stacking drivers to submit a request |
1621 | * @q: the queue to submit the request | 1621 | * @q: the queue to submit the request |
1622 | * @rq: the request being queued | 1622 | * @rq: the request being queued |
1623 | */ | 1623 | */ |
1624 | int blk_insert_cloned_request(struct request_queue *q, struct request *rq) | 1624 | int blk_insert_cloned_request(struct request_queue *q, struct request *rq) |
1625 | { | 1625 | { |
1626 | unsigned long flags; | 1626 | unsigned long flags; |
1627 | 1627 | ||
1628 | if (blk_rq_check_limits(q, rq)) | 1628 | if (blk_rq_check_limits(q, rq)) |
1629 | return -EIO; | 1629 | return -EIO; |
1630 | 1630 | ||
1631 | #ifdef CONFIG_FAIL_MAKE_REQUEST | 1631 | #ifdef CONFIG_FAIL_MAKE_REQUEST |
1632 | if (rq->rq_disk && rq->rq_disk->part0.make_it_fail && | 1632 | if (rq->rq_disk && rq->rq_disk->part0.make_it_fail && |
1633 | should_fail(&fail_make_request, blk_rq_bytes(rq))) | 1633 | should_fail(&fail_make_request, blk_rq_bytes(rq))) |
1634 | return -EIO; | 1634 | return -EIO; |
1635 | #endif | 1635 | #endif |
1636 | 1636 | ||
1637 | spin_lock_irqsave(q->queue_lock, flags); | 1637 | spin_lock_irqsave(q->queue_lock, flags); |
1638 | 1638 | ||
1639 | /* | 1639 | /* |
1640 | * Submitting request must be dequeued before calling this function | 1640 | * Submitting request must be dequeued before calling this function |
1641 | * because it will be linked to another request_queue | 1641 | * because it will be linked to another request_queue |
1642 | */ | 1642 | */ |
1643 | BUG_ON(blk_queued_rq(rq)); | 1643 | BUG_ON(blk_queued_rq(rq)); |
1644 | 1644 | ||
1645 | drive_stat_acct(rq, 1); | 1645 | drive_stat_acct(rq, 1); |
1646 | __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0); | 1646 | __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0); |
1647 | 1647 | ||
1648 | spin_unlock_irqrestore(q->queue_lock, flags); | 1648 | spin_unlock_irqrestore(q->queue_lock, flags); |
1649 | 1649 | ||
1650 | return 0; | 1650 | return 0; |
1651 | } | 1651 | } |
1652 | EXPORT_SYMBOL_GPL(blk_insert_cloned_request); | 1652 | EXPORT_SYMBOL_GPL(blk_insert_cloned_request); |
1653 | 1653 | ||
1654 | /** | 1654 | /** |
1655 | * blkdev_dequeue_request - dequeue request and start timeout timer | 1655 | * blkdev_dequeue_request - dequeue request and start timeout timer |
1656 | * @req: request to dequeue | 1656 | * @req: request to dequeue |
1657 | * | 1657 | * |
1658 | * Dequeue @req and start timeout timer on it. This hands off the | 1658 | * Dequeue @req and start timeout timer on it. This hands off the |
1659 | * request to the driver. | 1659 | * request to the driver. |
1660 | * | 1660 | * |
1661 | * Block internal functions which don't want to start timer should | 1661 | * Block internal functions which don't want to start timer should |
1662 | * call elv_dequeue_request(). | 1662 | * call elv_dequeue_request(). |
1663 | */ | 1663 | */ |
1664 | void blkdev_dequeue_request(struct request *req) | 1664 | void blkdev_dequeue_request(struct request *req) |
1665 | { | 1665 | { |
1666 | elv_dequeue_request(req->q, req); | 1666 | elv_dequeue_request(req->q, req); |
1667 | 1667 | ||
1668 | /* | 1668 | /* |
1669 | * We are now handing the request to the hardware, add the | 1669 | * We are now handing the request to the hardware, add the |
1670 | * timeout handler. | 1670 | * timeout handler. |
1671 | */ | 1671 | */ |
1672 | blk_add_timer(req); | 1672 | blk_add_timer(req); |
1673 | } | 1673 | } |
1674 | EXPORT_SYMBOL(blkdev_dequeue_request); | 1674 | EXPORT_SYMBOL(blkdev_dequeue_request); |
1675 | 1675 | ||
1676 | static void blk_account_io_completion(struct request *req, unsigned int bytes) | 1676 | static void blk_account_io_completion(struct request *req, unsigned int bytes) |
1677 | { | 1677 | { |
1678 | if (!blk_do_io_stat(req)) | 1678 | if (!blk_do_io_stat(req)) |
1679 | return; | 1679 | return; |
1680 | 1680 | ||
1681 | if (blk_fs_request(req)) { | 1681 | if (blk_fs_request(req)) { |
1682 | const int rw = rq_data_dir(req); | 1682 | const int rw = rq_data_dir(req); |
1683 | struct hd_struct *part; | 1683 | struct hd_struct *part; |
1684 | int cpu; | 1684 | int cpu; |
1685 | 1685 | ||
1686 | cpu = part_stat_lock(); | 1686 | cpu = part_stat_lock(); |
1687 | part = disk_map_sector_rcu(req->rq_disk, req->sector); | 1687 | part = disk_map_sector_rcu(req->rq_disk, req->sector); |
1688 | part_stat_add(cpu, part, sectors[rw], bytes >> 9); | 1688 | part_stat_add(cpu, part, sectors[rw], bytes >> 9); |
1689 | part_stat_unlock(); | 1689 | part_stat_unlock(); |
1690 | } | 1690 | } |
1691 | } | 1691 | } |
1692 | 1692 | ||
1693 | static void blk_account_io_done(struct request *req) | 1693 | static void blk_account_io_done(struct request *req) |
1694 | { | 1694 | { |
1695 | if (!blk_do_io_stat(req)) | 1695 | if (!blk_do_io_stat(req)) |
1696 | return; | 1696 | return; |
1697 | 1697 | ||
1698 | /* | 1698 | /* |
1699 | * Account IO completion. bar_rq isn't accounted as a normal | 1699 | * Account IO completion. bar_rq isn't accounted as a normal |
1700 | * IO on queueing nor completion. Accounting the containing | 1700 | * IO on queueing nor completion. Accounting the containing |
1701 | * request is enough. | 1701 | * request is enough. |
1702 | */ | 1702 | */ |
1703 | if (blk_fs_request(req) && req != &req->q->bar_rq) { | 1703 | if (blk_fs_request(req) && req != &req->q->bar_rq) { |
1704 | unsigned long duration = jiffies - req->start_time; | 1704 | unsigned long duration = jiffies - req->start_time; |
1705 | const int rw = rq_data_dir(req); | 1705 | const int rw = rq_data_dir(req); |
1706 | struct hd_struct *part; | 1706 | struct hd_struct *part; |
1707 | int cpu; | 1707 | int cpu; |
1708 | 1708 | ||
1709 | cpu = part_stat_lock(); | 1709 | cpu = part_stat_lock(); |
1710 | part = disk_map_sector_rcu(req->rq_disk, req->sector); | 1710 | part = disk_map_sector_rcu(req->rq_disk, req->sector); |
1711 | 1711 | ||
1712 | part_stat_inc(cpu, part, ios[rw]); | 1712 | part_stat_inc(cpu, part, ios[rw]); |
1713 | part_stat_add(cpu, part, ticks[rw], duration); | 1713 | part_stat_add(cpu, part, ticks[rw], duration); |
1714 | part_round_stats(cpu, part); | 1714 | part_round_stats(cpu, part); |
1715 | part_dec_in_flight(part); | 1715 | part_dec_in_flight(part); |
1716 | 1716 | ||
1717 | part_stat_unlock(); | 1717 | part_stat_unlock(); |
1718 | } | 1718 | } |
1719 | } | 1719 | } |
1720 | 1720 | ||
1721 | /** | 1721 | /** |
1722 | * __end_that_request_first - end I/O on a request | 1722 | * __end_that_request_first - end I/O on a request |
1723 | * @req: the request being processed | 1723 | * @req: the request being processed |
1724 | * @error: %0 for success, < %0 for error | 1724 | * @error: %0 for success, < %0 for error |
1725 | * @nr_bytes: number of bytes to complete | 1725 | * @nr_bytes: number of bytes to complete |
1726 | * | 1726 | * |
1727 | * Description: | 1727 | * Description: |
1728 | * Ends I/O on a number of bytes attached to @req, and sets it up | 1728 | * Ends I/O on a number of bytes attached to @req, and sets it up |
1729 | * for the next range of segments (if any) in the cluster. | 1729 | * for the next range of segments (if any) in the cluster. |
1730 | * | 1730 | * |
1731 | * Return: | 1731 | * Return: |
1732 | * %0 - we are done with this request, call end_that_request_last() | 1732 | * %0 - we are done with this request, call end_that_request_last() |
1733 | * %1 - still buffers pending for this request | 1733 | * %1 - still buffers pending for this request |
1734 | **/ | 1734 | **/ |
1735 | static int __end_that_request_first(struct request *req, int error, | 1735 | static int __end_that_request_first(struct request *req, int error, |
1736 | int nr_bytes) | 1736 | int nr_bytes) |
1737 | { | 1737 | { |
1738 | int total_bytes, bio_nbytes, next_idx = 0; | 1738 | int total_bytes, bio_nbytes, next_idx = 0; |
1739 | struct bio *bio; | 1739 | struct bio *bio; |
1740 | 1740 | ||
1741 | trace_block_rq_complete(req->q, req); | 1741 | trace_block_rq_complete(req->q, req); |
1742 | 1742 | ||
1743 | /* | 1743 | /* |
1744 | * for a REQ_TYPE_BLOCK_PC request, we want to carry any eventual | 1744 | * for a REQ_TYPE_BLOCK_PC request, we want to carry any eventual |
1745 | * sense key with us all the way through | 1745 | * sense key with us all the way through |
1746 | */ | 1746 | */ |
1747 | if (!blk_pc_request(req)) | 1747 | if (!blk_pc_request(req)) |
1748 | req->errors = 0; | 1748 | req->errors = 0; |
1749 | 1749 | ||
1750 | if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) { | 1750 | if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) { |
1751 | printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n", | 1751 | printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n", |
1752 | req->rq_disk ? req->rq_disk->disk_name : "?", | 1752 | req->rq_disk ? req->rq_disk->disk_name : "?", |
1753 | (unsigned long long)req->sector); | 1753 | (unsigned long long)req->sector); |
1754 | } | 1754 | } |
1755 | 1755 | ||
1756 | blk_account_io_completion(req, nr_bytes); | 1756 | blk_account_io_completion(req, nr_bytes); |
1757 | 1757 | ||
1758 | total_bytes = bio_nbytes = 0; | 1758 | total_bytes = bio_nbytes = 0; |
1759 | while ((bio = req->bio) != NULL) { | 1759 | while ((bio = req->bio) != NULL) { |
1760 | int nbytes; | 1760 | int nbytes; |
1761 | 1761 | ||
1762 | if (nr_bytes >= bio->bi_size) { | 1762 | if (nr_bytes >= bio->bi_size) { |
1763 | req->bio = bio->bi_next; | 1763 | req->bio = bio->bi_next; |
1764 | nbytes = bio->bi_size; | 1764 | nbytes = bio->bi_size; |
1765 | req_bio_endio(req, bio, nbytes, error); | 1765 | req_bio_endio(req, bio, nbytes, error); |
1766 | next_idx = 0; | 1766 | next_idx = 0; |
1767 | bio_nbytes = 0; | 1767 | bio_nbytes = 0; |
1768 | } else { | 1768 | } else { |
1769 | int idx = bio->bi_idx + next_idx; | 1769 | int idx = bio->bi_idx + next_idx; |
1770 | 1770 | ||
1771 | if (unlikely(bio->bi_idx >= bio->bi_vcnt)) { | 1771 | if (unlikely(idx >= bio->bi_vcnt)) { |
1772 | blk_dump_rq_flags(req, "__end_that"); | 1772 | blk_dump_rq_flags(req, "__end_that"); |
1773 | printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n", | 1773 | printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n", |
1774 | __func__, bio->bi_idx, bio->bi_vcnt); | 1774 | __func__, idx, bio->bi_vcnt); |
1775 | break; | 1775 | break; |
1776 | } | 1776 | } |
1777 | 1777 | ||
1778 | nbytes = bio_iovec_idx(bio, idx)->bv_len; | 1778 | nbytes = bio_iovec_idx(bio, idx)->bv_len; |
1779 | BIO_BUG_ON(nbytes > bio->bi_size); | 1779 | BIO_BUG_ON(nbytes > bio->bi_size); |
1780 | 1780 | ||
1781 | /* | 1781 | /* |
1782 | * not a complete bvec done | 1782 | * not a complete bvec done |
1783 | */ | 1783 | */ |
1784 | if (unlikely(nbytes > nr_bytes)) { | 1784 | if (unlikely(nbytes > nr_bytes)) { |
1785 | bio_nbytes += nr_bytes; | 1785 | bio_nbytes += nr_bytes; |
1786 | total_bytes += nr_bytes; | 1786 | total_bytes += nr_bytes; |
1787 | break; | 1787 | break; |
1788 | } | 1788 | } |
1789 | 1789 | ||
1790 | /* | 1790 | /* |
1791 | * advance to the next vector | 1791 | * advance to the next vector |
1792 | */ | 1792 | */ |
1793 | next_idx++; | 1793 | next_idx++; |
1794 | bio_nbytes += nbytes; | 1794 | bio_nbytes += nbytes; |
1795 | } | 1795 | } |
1796 | 1796 | ||
1797 | total_bytes += nbytes; | 1797 | total_bytes += nbytes; |
1798 | nr_bytes -= nbytes; | 1798 | nr_bytes -= nbytes; |
1799 | 1799 | ||
1800 | bio = req->bio; | 1800 | bio = req->bio; |
1801 | if (bio) { | 1801 | if (bio) { |
1802 | /* | 1802 | /* |
1803 | * end more in this run, or just return 'not-done' | 1803 | * end more in this run, or just return 'not-done' |
1804 | */ | 1804 | */ |
1805 | if (unlikely(nr_bytes <= 0)) | 1805 | if (unlikely(nr_bytes <= 0)) |
1806 | break; | 1806 | break; |
1807 | } | 1807 | } |
1808 | } | 1808 | } |
1809 | 1809 | ||
1810 | /* | 1810 | /* |
1811 | * completely done | 1811 | * completely done |
1812 | */ | 1812 | */ |
1813 | if (!req->bio) | 1813 | if (!req->bio) |
1814 | return 0; | 1814 | return 0; |
1815 | 1815 | ||
1816 | /* | 1816 | /* |
1817 | * if the request wasn't completed, update state | 1817 | * if the request wasn't completed, update state |
1818 | */ | 1818 | */ |
1819 | if (bio_nbytes) { | 1819 | if (bio_nbytes) { |
1820 | req_bio_endio(req, bio, bio_nbytes, error); | 1820 | req_bio_endio(req, bio, bio_nbytes, error); |
1821 | bio->bi_idx += next_idx; | 1821 | bio->bi_idx += next_idx; |
1822 | bio_iovec(bio)->bv_offset += nr_bytes; | 1822 | bio_iovec(bio)->bv_offset += nr_bytes; |
1823 | bio_iovec(bio)->bv_len -= nr_bytes; | 1823 | bio_iovec(bio)->bv_len -= nr_bytes; |
1824 | } | 1824 | } |
1825 | 1825 | ||
1826 | blk_recalc_rq_sectors(req, total_bytes >> 9); | 1826 | blk_recalc_rq_sectors(req, total_bytes >> 9); |
1827 | blk_recalc_rq_segments(req); | 1827 | blk_recalc_rq_segments(req); |
1828 | return 1; | 1828 | return 1; |
1829 | } | 1829 | } |
1830 | 1830 | ||
1831 | /* | 1831 | /* |
1832 | * queue lock must be held | 1832 | * queue lock must be held |
1833 | */ | 1833 | */ |
1834 | static void end_that_request_last(struct request *req, int error) | 1834 | static void end_that_request_last(struct request *req, int error) |
1835 | { | 1835 | { |
1836 | if (blk_rq_tagged(req)) | 1836 | if (blk_rq_tagged(req)) |
1837 | blk_queue_end_tag(req->q, req); | 1837 | blk_queue_end_tag(req->q, req); |
1838 | 1838 | ||
1839 | if (blk_queued_rq(req)) | 1839 | if (blk_queued_rq(req)) |
1840 | elv_dequeue_request(req->q, req); | 1840 | elv_dequeue_request(req->q, req); |
1841 | 1841 | ||
1842 | if (unlikely(laptop_mode) && blk_fs_request(req)) | 1842 | if (unlikely(laptop_mode) && blk_fs_request(req)) |
1843 | laptop_io_completion(); | 1843 | laptop_io_completion(); |
1844 | 1844 | ||
1845 | blk_delete_timer(req); | 1845 | blk_delete_timer(req); |
1846 | 1846 | ||
1847 | blk_account_io_done(req); | 1847 | blk_account_io_done(req); |
1848 | 1848 | ||
1849 | if (req->end_io) | 1849 | if (req->end_io) |
1850 | req->end_io(req, error); | 1850 | req->end_io(req, error); |
1851 | else { | 1851 | else { |
1852 | if (blk_bidi_rq(req)) | 1852 | if (blk_bidi_rq(req)) |
1853 | __blk_put_request(req->next_rq->q, req->next_rq); | 1853 | __blk_put_request(req->next_rq->q, req->next_rq); |
1854 | 1854 | ||
1855 | __blk_put_request(req->q, req); | 1855 | __blk_put_request(req->q, req); |
1856 | } | 1856 | } |
1857 | } | 1857 | } |
1858 | 1858 | ||
1859 | /** | 1859 | /** |
1860 | * blk_rq_bytes - Returns bytes left to complete in the entire request | 1860 | * blk_rq_bytes - Returns bytes left to complete in the entire request |
1861 | * @rq: the request being processed | 1861 | * @rq: the request being processed |
1862 | **/ | 1862 | **/ |
1863 | unsigned int blk_rq_bytes(struct request *rq) | 1863 | unsigned int blk_rq_bytes(struct request *rq) |
1864 | { | 1864 | { |
1865 | if (blk_fs_request(rq)) | 1865 | if (blk_fs_request(rq)) |
1866 | return rq->hard_nr_sectors << 9; | 1866 | return rq->hard_nr_sectors << 9; |
1867 | 1867 | ||
1868 | return rq->data_len; | 1868 | return rq->data_len; |
1869 | } | 1869 | } |
1870 | EXPORT_SYMBOL_GPL(blk_rq_bytes); | 1870 | EXPORT_SYMBOL_GPL(blk_rq_bytes); |
1871 | 1871 | ||
1872 | /** | 1872 | /** |
1873 | * blk_rq_cur_bytes - Returns bytes left to complete in the current segment | 1873 | * blk_rq_cur_bytes - Returns bytes left to complete in the current segment |
1874 | * @rq: the request being processed | 1874 | * @rq: the request being processed |
1875 | **/ | 1875 | **/ |
1876 | unsigned int blk_rq_cur_bytes(struct request *rq) | 1876 | unsigned int blk_rq_cur_bytes(struct request *rq) |
1877 | { | 1877 | { |
1878 | if (blk_fs_request(rq)) | 1878 | if (blk_fs_request(rq)) |
1879 | return rq->current_nr_sectors << 9; | 1879 | return rq->current_nr_sectors << 9; |
1880 | 1880 | ||
1881 | if (rq->bio) | 1881 | if (rq->bio) |
1882 | return rq->bio->bi_size; | 1882 | return rq->bio->bi_size; |
1883 | 1883 | ||
1884 | return rq->data_len; | 1884 | return rq->data_len; |
1885 | } | 1885 | } |
1886 | EXPORT_SYMBOL_GPL(blk_rq_cur_bytes); | 1886 | EXPORT_SYMBOL_GPL(blk_rq_cur_bytes); |
1887 | 1887 | ||
1888 | /** | 1888 | /** |
1889 | * end_request - end I/O on the current segment of the request | 1889 | * end_request - end I/O on the current segment of the request |
1890 | * @req: the request being processed | 1890 | * @req: the request being processed |
1891 | * @uptodate: error value or %0/%1 uptodate flag | 1891 | * @uptodate: error value or %0/%1 uptodate flag |
1892 | * | 1892 | * |
1893 | * Description: | 1893 | * Description: |
1894 | * Ends I/O on the current segment of a request. If that is the only | 1894 | * Ends I/O on the current segment of a request. If that is the only |
1895 | * remaining segment, the request is also completed and freed. | 1895 | * remaining segment, the request is also completed and freed. |
1896 | * | 1896 | * |
1897 | * This is a remnant of how older block drivers handled I/O completions. | 1897 | * This is a remnant of how older block drivers handled I/O completions. |
1898 | * Modern drivers typically end I/O on the full request in one go, unless | 1898 | * Modern drivers typically end I/O on the full request in one go, unless |
1899 | * they have a residual value to account for. For that case this function | 1899 | * they have a residual value to account for. For that case this function |
1900 | * isn't really useful, unless the residual just happens to be the | 1900 | * isn't really useful, unless the residual just happens to be the |
1901 | * full current segment. In other words, don't use this function in new | 1901 | * full current segment. In other words, don't use this function in new |
1902 | * code. Use blk_end_request() or __blk_end_request() to end a request. | 1902 | * code. Use blk_end_request() or __blk_end_request() to end a request. |
1903 | **/ | 1903 | **/ |
1904 | void end_request(struct request *req, int uptodate) | 1904 | void end_request(struct request *req, int uptodate) |
1905 | { | 1905 | { |
1906 | int error = 0; | 1906 | int error = 0; |
1907 | 1907 | ||
1908 | if (uptodate <= 0) | 1908 | if (uptodate <= 0) |
1909 | error = uptodate ? uptodate : -EIO; | 1909 | error = uptodate ? uptodate : -EIO; |
1910 | 1910 | ||
1911 | __blk_end_request(req, error, req->hard_cur_sectors << 9); | 1911 | __blk_end_request(req, error, req->hard_cur_sectors << 9); |
1912 | } | 1912 | } |
1913 | EXPORT_SYMBOL(end_request); | 1913 | EXPORT_SYMBOL(end_request); |
1914 | 1914 | ||
1915 | static int end_that_request_data(struct request *rq, int error, | 1915 | static int end_that_request_data(struct request *rq, int error, |
1916 | unsigned int nr_bytes, unsigned int bidi_bytes) | 1916 | unsigned int nr_bytes, unsigned int bidi_bytes) |
1917 | { | 1917 | { |
1918 | if (rq->bio) { | 1918 | if (rq->bio) { |
1919 | if (__end_that_request_first(rq, error, nr_bytes)) | 1919 | if (__end_that_request_first(rq, error, nr_bytes)) |
1920 | return 1; | 1920 | return 1; |
1921 | 1921 | ||
1922 | /* Bidi request must be completed as a whole */ | 1922 | /* Bidi request must be completed as a whole */ |
1923 | if (blk_bidi_rq(rq) && | 1923 | if (blk_bidi_rq(rq) && |
1924 | __end_that_request_first(rq->next_rq, error, bidi_bytes)) | 1924 | __end_that_request_first(rq->next_rq, error, bidi_bytes)) |
1925 | return 1; | 1925 | return 1; |
1926 | } | 1926 | } |
1927 | 1927 | ||
1928 | return 0; | 1928 | return 0; |
1929 | } | 1929 | } |
1930 | 1930 | ||
1931 | /** | 1931 | /** |
1932 | * blk_end_io - Generic end_io function to complete a request. | 1932 | * blk_end_io - Generic end_io function to complete a request. |
1933 | * @rq: the request being processed | 1933 | * @rq: the request being processed |
1934 | * @error: %0 for success, < %0 for error | 1934 | * @error: %0 for success, < %0 for error |
1935 | * @nr_bytes: number of bytes to complete @rq | 1935 | * @nr_bytes: number of bytes to complete @rq |
1936 | * @bidi_bytes: number of bytes to complete @rq->next_rq | 1936 | * @bidi_bytes: number of bytes to complete @rq->next_rq |
1937 | * @drv_callback: function called between completion of bios in the request | 1937 | * @drv_callback: function called between completion of bios in the request |
1938 | * and completion of the request. | 1938 | * and completion of the request. |
1939 | * If the callback returns non %0, this helper returns without | 1939 | * If the callback returns non %0, this helper returns without |
1940 | * completion of the request. | 1940 | * completion of the request. |
1941 | * | 1941 | * |
1942 | * Description: | 1942 | * Description: |
1943 | * Ends I/O on a number of bytes attached to @rq and @rq->next_rq. | 1943 | * Ends I/O on a number of bytes attached to @rq and @rq->next_rq. |
1944 | * If @rq has leftover, sets it up for the next range of segments. | 1944 | * If @rq has leftover, sets it up for the next range of segments. |
1945 | * | 1945 | * |
1946 | * Return: | 1946 | * Return: |
1947 | * %0 - we are done with this request | 1947 | * %0 - we are done with this request |
1948 | * %1 - this request is not freed yet, it still has pending buffers. | 1948 | * %1 - this request is not freed yet, it still has pending buffers. |
1949 | **/ | 1949 | **/ |
1950 | static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes, | 1950 | static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes, |
1951 | unsigned int bidi_bytes, | 1951 | unsigned int bidi_bytes, |
1952 | int (drv_callback)(struct request *)) | 1952 | int (drv_callback)(struct request *)) |
1953 | { | 1953 | { |
1954 | struct request_queue *q = rq->q; | 1954 | struct request_queue *q = rq->q; |
1955 | unsigned long flags = 0UL; | 1955 | unsigned long flags = 0UL; |
1956 | 1956 | ||
1957 | if (end_that_request_data(rq, error, nr_bytes, bidi_bytes)) | 1957 | if (end_that_request_data(rq, error, nr_bytes, bidi_bytes)) |
1958 | return 1; | 1958 | return 1; |
1959 | 1959 | ||
1960 | /* Special feature for tricky drivers */ | 1960 | /* Special feature for tricky drivers */ |
1961 | if (drv_callback && drv_callback(rq)) | 1961 | if (drv_callback && drv_callback(rq)) |
1962 | return 1; | 1962 | return 1; |
1963 | 1963 | ||
1964 | add_disk_randomness(rq->rq_disk); | 1964 | add_disk_randomness(rq->rq_disk); |
1965 | 1965 | ||
1966 | spin_lock_irqsave(q->queue_lock, flags); | 1966 | spin_lock_irqsave(q->queue_lock, flags); |
1967 | end_that_request_last(rq, error); | 1967 | end_that_request_last(rq, error); |
1968 | spin_unlock_irqrestore(q->queue_lock, flags); | 1968 | spin_unlock_irqrestore(q->queue_lock, flags); |
1969 | 1969 | ||
1970 | return 0; | 1970 | return 0; |
1971 | } | 1971 | } |
1972 | 1972 | ||
1973 | /** | 1973 | /** |
1974 | * blk_end_request - Helper function for drivers to complete the request. | 1974 | * blk_end_request - Helper function for drivers to complete the request. |
1975 | * @rq: the request being processed | 1975 | * @rq: the request being processed |
1976 | * @error: %0 for success, < %0 for error | 1976 | * @error: %0 for success, < %0 for error |
1977 | * @nr_bytes: number of bytes to complete | 1977 | * @nr_bytes: number of bytes to complete |
1978 | * | 1978 | * |
1979 | * Description: | 1979 | * Description: |
1980 | * Ends I/O on a number of bytes attached to @rq. | 1980 | * Ends I/O on a number of bytes attached to @rq. |
1981 | * If @rq has leftover, sets it up for the next range of segments. | 1981 | * If @rq has leftover, sets it up for the next range of segments. |
1982 | * | 1982 | * |
1983 | * Return: | 1983 | * Return: |
1984 | * %0 - we are done with this request | 1984 | * %0 - we are done with this request |
1985 | * %1 - still buffers pending for this request | 1985 | * %1 - still buffers pending for this request |
1986 | **/ | 1986 | **/ |
1987 | int blk_end_request(struct request *rq, int error, unsigned int nr_bytes) | 1987 | int blk_end_request(struct request *rq, int error, unsigned int nr_bytes) |
1988 | { | 1988 | { |
1989 | return blk_end_io(rq, error, nr_bytes, 0, NULL); | 1989 | return blk_end_io(rq, error, nr_bytes, 0, NULL); |
1990 | } | 1990 | } |
1991 | EXPORT_SYMBOL_GPL(blk_end_request); | 1991 | EXPORT_SYMBOL_GPL(blk_end_request); |
1992 | 1992 | ||
1993 | /** | 1993 | /** |
1994 | * __blk_end_request - Helper function for drivers to complete the request. | 1994 | * __blk_end_request - Helper function for drivers to complete the request. |
1995 | * @rq: the request being processed | 1995 | * @rq: the request being processed |
1996 | * @error: %0 for success, < %0 for error | 1996 | * @error: %0 for success, < %0 for error |
1997 | * @nr_bytes: number of bytes to complete | 1997 | * @nr_bytes: number of bytes to complete |
1998 | * | 1998 | * |
1999 | * Description: | 1999 | * Description: |
2000 | * Must be called with queue lock held unlike blk_end_request(). | 2000 | * Must be called with queue lock held unlike blk_end_request(). |
2001 | * | 2001 | * |
2002 | * Return: | 2002 | * Return: |
2003 | * %0 - we are done with this request | 2003 | * %0 - we are done with this request |
2004 | * %1 - still buffers pending for this request | 2004 | * %1 - still buffers pending for this request |
2005 | **/ | 2005 | **/ |
2006 | int __blk_end_request(struct request *rq, int error, unsigned int nr_bytes) | 2006 | int __blk_end_request(struct request *rq, int error, unsigned int nr_bytes) |
2007 | { | 2007 | { |
2008 | if (rq->bio && __end_that_request_first(rq, error, nr_bytes)) | 2008 | if (rq->bio && __end_that_request_first(rq, error, nr_bytes)) |
2009 | return 1; | 2009 | return 1; |
2010 | 2010 | ||
2011 | add_disk_randomness(rq->rq_disk); | 2011 | add_disk_randomness(rq->rq_disk); |
2012 | 2012 | ||
2013 | end_that_request_last(rq, error); | 2013 | end_that_request_last(rq, error); |
2014 | 2014 | ||
2015 | return 0; | 2015 | return 0; |
2016 | } | 2016 | } |
2017 | EXPORT_SYMBOL_GPL(__blk_end_request); | 2017 | EXPORT_SYMBOL_GPL(__blk_end_request); |
2018 | 2018 | ||
2019 | /** | 2019 | /** |
2020 | * blk_end_bidi_request - Helper function for drivers to complete bidi request. | 2020 | * blk_end_bidi_request - Helper function for drivers to complete bidi request. |
2021 | * @rq: the bidi request being processed | 2021 | * @rq: the bidi request being processed |
2022 | * @error: %0 for success, < %0 for error | 2022 | * @error: %0 for success, < %0 for error |
2023 | * @nr_bytes: number of bytes to complete @rq | 2023 | * @nr_bytes: number of bytes to complete @rq |
2024 | * @bidi_bytes: number of bytes to complete @rq->next_rq | 2024 | * @bidi_bytes: number of bytes to complete @rq->next_rq |
2025 | * | 2025 | * |
2026 | * Description: | 2026 | * Description: |
2027 | * Ends I/O on a number of bytes attached to @rq and @rq->next_rq. | 2027 | * Ends I/O on a number of bytes attached to @rq and @rq->next_rq. |
2028 | * | 2028 | * |
2029 | * Return: | 2029 | * Return: |
2030 | * %0 - we are done with this request | 2030 | * %0 - we are done with this request |
2031 | * %1 - still buffers pending for this request | 2031 | * %1 - still buffers pending for this request |
2032 | **/ | 2032 | **/ |
2033 | int blk_end_bidi_request(struct request *rq, int error, unsigned int nr_bytes, | 2033 | int blk_end_bidi_request(struct request *rq, int error, unsigned int nr_bytes, |
2034 | unsigned int bidi_bytes) | 2034 | unsigned int bidi_bytes) |
2035 | { | 2035 | { |
2036 | return blk_end_io(rq, error, nr_bytes, bidi_bytes, NULL); | 2036 | return blk_end_io(rq, error, nr_bytes, bidi_bytes, NULL); |
2037 | } | 2037 | } |
2038 | EXPORT_SYMBOL_GPL(blk_end_bidi_request); | 2038 | EXPORT_SYMBOL_GPL(blk_end_bidi_request); |
2039 | 2039 | ||
2040 | /** | 2040 | /** |
2041 | * blk_update_request - Special helper function for request stacking drivers | 2041 | * blk_update_request - Special helper function for request stacking drivers |
2042 | * @rq: the request being processed | 2042 | * @rq: the request being processed |
2043 | * @error: %0 for success, < %0 for error | 2043 | * @error: %0 for success, < %0 for error |
2044 | * @nr_bytes: number of bytes to complete @rq | 2044 | * @nr_bytes: number of bytes to complete @rq |
2045 | * | 2045 | * |
2046 | * Description: | 2046 | * Description: |
2047 | * Ends I/O on a number of bytes attached to @rq, but doesn't complete | 2047 | * Ends I/O on a number of bytes attached to @rq, but doesn't complete |
2048 | * the request structure even if @rq doesn't have leftover. | 2048 | * the request structure even if @rq doesn't have leftover. |
2049 | * If @rq has leftover, sets it up for the next range of segments. | 2049 | * If @rq has leftover, sets it up for the next range of segments. |
2050 | * | 2050 | * |
2051 | * This special helper function is only for request stacking drivers | 2051 | * This special helper function is only for request stacking drivers |
2052 | * (e.g. request-based dm) so that they can handle partial completion. | 2052 | * (e.g. request-based dm) so that they can handle partial completion. |
2053 | * Actual device drivers should use blk_end_request instead. | 2053 | * Actual device drivers should use blk_end_request instead. |
2054 | */ | 2054 | */ |
2055 | void blk_update_request(struct request *rq, int error, unsigned int nr_bytes) | 2055 | void blk_update_request(struct request *rq, int error, unsigned int nr_bytes) |
2056 | { | 2056 | { |
2057 | if (!end_that_request_data(rq, error, nr_bytes, 0)) { | 2057 | if (!end_that_request_data(rq, error, nr_bytes, 0)) { |
2058 | /* | 2058 | /* |
2059 | * These members are not updated in end_that_request_data() | 2059 | * These members are not updated in end_that_request_data() |
2060 | * when all bios are completed. | 2060 | * when all bios are completed. |
2061 | * Update them so that the request stacking driver can find | 2061 | * Update them so that the request stacking driver can find |
2062 | * how many bytes remain in the request later. | 2062 | * how many bytes remain in the request later. |
2063 | */ | 2063 | */ |
2064 | rq->nr_sectors = rq->hard_nr_sectors = 0; | 2064 | rq->nr_sectors = rq->hard_nr_sectors = 0; |
2065 | rq->current_nr_sectors = rq->hard_cur_sectors = 0; | 2065 | rq->current_nr_sectors = rq->hard_cur_sectors = 0; |
2066 | } | 2066 | } |
2067 | } | 2067 | } |
2068 | EXPORT_SYMBOL_GPL(blk_update_request); | 2068 | EXPORT_SYMBOL_GPL(blk_update_request); |
2069 | 2069 | ||
2070 | /** | 2070 | /** |
2071 | * blk_end_request_callback - Special helper function for tricky drivers | 2071 | * blk_end_request_callback - Special helper function for tricky drivers |
2072 | * @rq: the request being processed | 2072 | * @rq: the request being processed |
2073 | * @error: %0 for success, < %0 for error | 2073 | * @error: %0 for success, < %0 for error |
2074 | * @nr_bytes: number of bytes to complete | 2074 | * @nr_bytes: number of bytes to complete |
2075 | * @drv_callback: function called between completion of bios in the request | 2075 | * @drv_callback: function called between completion of bios in the request |
2076 | * and completion of the request. | 2076 | * and completion of the request. |
2077 | * If the callback returns non %0, this helper returns without | 2077 | * If the callback returns non %0, this helper returns without |
2078 | * completion of the request. | 2078 | * completion of the request. |
2079 | * | 2079 | * |
2080 | * Description: | 2080 | * Description: |
2081 | * Ends I/O on a number of bytes attached to @rq. | 2081 | * Ends I/O on a number of bytes attached to @rq. |
2082 | * If @rq has leftover, sets it up for the next range of segments. | 2082 | * If @rq has leftover, sets it up for the next range of segments. |
2083 | * | 2083 | * |
2084 | * This special helper function is used only for existing tricky drivers. | 2084 | * This special helper function is used only for existing tricky drivers. |
2085 | * (e.g. cdrom_newpc_intr() of ide-cd) | 2085 | * (e.g. cdrom_newpc_intr() of ide-cd) |
2086 | * This interface will be removed when such drivers are rewritten. | 2086 | * This interface will be removed when such drivers are rewritten. |
2087 | * Don't use this interface in other places anymore. | 2087 | * Don't use this interface in other places anymore. |
2088 | * | 2088 | * |
2089 | * Return: | 2089 | * Return: |
2090 | * %0 - we are done with this request | 2090 | * %0 - we are done with this request |
2091 | * %1 - this request is not freed yet. | 2091 | * %1 - this request is not freed yet. |
2092 | * this request still has pending buffers or | 2092 | * this request still has pending buffers or |
2093 | * the driver doesn't want to finish this request yet. | 2093 | * the driver doesn't want to finish this request yet. |
2094 | **/ | 2094 | **/ |
2095 | int blk_end_request_callback(struct request *rq, int error, | 2095 | int blk_end_request_callback(struct request *rq, int error, |
2096 | unsigned int nr_bytes, | 2096 | unsigned int nr_bytes, |
2097 | int (drv_callback)(struct request *)) | 2097 | int (drv_callback)(struct request *)) |
2098 | { | 2098 | { |
2099 | return blk_end_io(rq, error, nr_bytes, 0, drv_callback); | 2099 | return blk_end_io(rq, error, nr_bytes, 0, drv_callback); |
2100 | } | 2100 | } |
2101 | EXPORT_SYMBOL_GPL(blk_end_request_callback); | 2101 | EXPORT_SYMBOL_GPL(blk_end_request_callback); |
2102 | 2102 | ||
2103 | void blk_rq_bio_prep(struct request_queue *q, struct request *rq, | 2103 | void blk_rq_bio_prep(struct request_queue *q, struct request *rq, |
2104 | struct bio *bio) | 2104 | struct bio *bio) |
2105 | { | 2105 | { |
2106 | /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw, and | 2106 | /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw, and |
2107 | we want BIO_RW_AHEAD (bit 1) to imply REQ_FAILFAST (bit 1). */ | 2107 | we want BIO_RW_AHEAD (bit 1) to imply REQ_FAILFAST (bit 1). */ |
2108 | rq->cmd_flags |= (bio->bi_rw & 3); | 2108 | rq->cmd_flags |= (bio->bi_rw & 3); |
2109 | 2109 | ||
2110 | if (bio_has_data(bio)) { | 2110 | if (bio_has_data(bio)) { |
2111 | rq->nr_phys_segments = bio_phys_segments(q, bio); | 2111 | rq->nr_phys_segments = bio_phys_segments(q, bio); |
2112 | rq->buffer = bio_data(bio); | 2112 | rq->buffer = bio_data(bio); |
2113 | } | 2113 | } |
2114 | rq->current_nr_sectors = bio_cur_sectors(bio); | 2114 | rq->current_nr_sectors = bio_cur_sectors(bio); |
2115 | rq->hard_cur_sectors = rq->current_nr_sectors; | 2115 | rq->hard_cur_sectors = rq->current_nr_sectors; |
2116 | rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio); | 2116 | rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio); |
2117 | rq->data_len = bio->bi_size; | 2117 | rq->data_len = bio->bi_size; |
2118 | 2118 | ||
2119 | rq->bio = rq->biotail = bio; | 2119 | rq->bio = rq->biotail = bio; |
2120 | 2120 | ||
2121 | if (bio->bi_bdev) | 2121 | if (bio->bi_bdev) |
2122 | rq->rq_disk = bio->bi_bdev->bd_disk; | 2122 | rq->rq_disk = bio->bi_bdev->bd_disk; |
2123 | } | 2123 | } |
2124 | 2124 | ||
2125 | /** | 2125 | /** |
2126 | * blk_lld_busy - Check if underlying low-level drivers of a device are busy | 2126 | * blk_lld_busy - Check if underlying low-level drivers of a device are busy |
2127 | * @q : the queue of the device being checked | 2127 | * @q : the queue of the device being checked |
2128 | * | 2128 | * |
2129 | * Description: | 2129 | * Description: |
2130 | * Check if underlying low-level drivers of a device are busy. | 2130 | * Check if underlying low-level drivers of a device are busy. |
2131 | * If the drivers want to export their busy state, they must set own | 2131 | * If the drivers want to export their busy state, they must set own |
2132 | * exporting function using blk_queue_lld_busy() first. | 2132 | * exporting function using blk_queue_lld_busy() first. |
2133 | * | 2133 | * |
2134 | * Basically, this function is used only by request stacking drivers | 2134 | * Basically, this function is used only by request stacking drivers |
2135 | * to stop dispatching requests to underlying devices when underlying | 2135 | * to stop dispatching requests to underlying devices when underlying |
2136 | * devices are busy. This behavior helps more I/O merging on the queue | 2136 | * devices are busy. This behavior helps more I/O merging on the queue |
2137 | * of the request stacking driver and prevents I/O throughput regression | 2137 | * of the request stacking driver and prevents I/O throughput regression |
2138 | * on burst I/O load. | 2138 | * on burst I/O load. |
2139 | * | 2139 | * |
2140 | * Return: | 2140 | * Return: |
2141 | * 0 - Not busy (The request stacking driver should dispatch request) | 2141 | * 0 - Not busy (The request stacking driver should dispatch request) |
2142 | * 1 - Busy (The request stacking driver should stop dispatching request) | 2142 | * 1 - Busy (The request stacking driver should stop dispatching request) |
2143 | */ | 2143 | */ |
2144 | int blk_lld_busy(struct request_queue *q) | 2144 | int blk_lld_busy(struct request_queue *q) |
2145 | { | 2145 | { |
2146 | if (q->lld_busy_fn) | 2146 | if (q->lld_busy_fn) |
2147 | return q->lld_busy_fn(q); | 2147 | return q->lld_busy_fn(q); |
2148 | 2148 | ||
2149 | return 0; | 2149 | return 0; |
2150 | } | 2150 | } |
2151 | EXPORT_SYMBOL_GPL(blk_lld_busy); | 2151 | EXPORT_SYMBOL_GPL(blk_lld_busy); |
2152 | 2152 | ||
2153 | int kblockd_schedule_work(struct request_queue *q, struct work_struct *work) | 2153 | int kblockd_schedule_work(struct request_queue *q, struct work_struct *work) |
2154 | { | 2154 | { |
2155 | return queue_work(kblockd_workqueue, work); | 2155 | return queue_work(kblockd_workqueue, work); |
2156 | } | 2156 | } |
2157 | EXPORT_SYMBOL(kblockd_schedule_work); | 2157 | EXPORT_SYMBOL(kblockd_schedule_work); |
2158 | 2158 | ||
2159 | int __init blk_dev_init(void) | 2159 | int __init blk_dev_init(void) |
2160 | { | 2160 | { |
2161 | kblockd_workqueue = create_workqueue("kblockd"); | 2161 | kblockd_workqueue = create_workqueue("kblockd"); |
2162 | if (!kblockd_workqueue) | 2162 | if (!kblockd_workqueue) |
2163 | panic("Failed to create kblockd\n"); | 2163 | panic("Failed to create kblockd\n"); |
2164 | 2164 | ||
2165 | request_cachep = kmem_cache_create("blkdev_requests", | 2165 | request_cachep = kmem_cache_create("blkdev_requests", |
2166 | sizeof(struct request), 0, SLAB_PANIC, NULL); | 2166 | sizeof(struct request), 0, SLAB_PANIC, NULL); |
2167 | 2167 | ||
2168 | blk_requestq_cachep = kmem_cache_create("blkdev_queue", | 2168 | blk_requestq_cachep = kmem_cache_create("blkdev_queue", |
2169 | sizeof(struct request_queue), 0, SLAB_PANIC, NULL); | 2169 | sizeof(struct request_queue), 0, SLAB_PANIC, NULL); |
2170 | 2170 | ||
2171 | return 0; | 2171 | return 0; |
2172 | } | 2172 | } |
2173 | 2173 | ||
2174 | 2174 |