Commit a6b3f7614ca690e49e934c291f707b0c19312194

Authored by Jan Kara
Committed by Jens Axboe
1 parent a5faeaf910

block: Reserve only one queue tag for sync IO if only 3 tags are available

In case a device has three tags available we still reserve two of them
for sync IO. That leaves only a single tag for async IO such as
writeback from flusher thread which results in poor performance.

Allow async IO to consume two tags in case queue has three tag availabe
to get a decent async write performance.

This patch improves streaming write performance on a machine with such disk
from ~21 MB/s to ~52 MB/s. Also postmark throughput in presence of
streaming writer improves from 8 to 12 transactions per second so sync
IO doesn't seem to be harmed in presence of heavy async writer.

Signed-off-by: Jan Kara <jack@suse.cz>
Signed-off-by: Jens Axboe <axboe@kernel.dk>

Showing 1 changed file with 9 additions and 2 deletions Inline Diff

1 /* 1 /*
2 * Functions related to tagged command queuing 2 * Functions related to tagged command queuing
3 */ 3 */
4 #include <linux/kernel.h> 4 #include <linux/kernel.h>
5 #include <linux/module.h> 5 #include <linux/module.h>
6 #include <linux/bio.h> 6 #include <linux/bio.h>
7 #include <linux/blkdev.h> 7 #include <linux/blkdev.h>
8 #include <linux/slab.h> 8 #include <linux/slab.h>
9 9
10 #include "blk.h" 10 #include "blk.h"
11 11
12 /** 12 /**
13 * blk_queue_find_tag - find a request by its tag and queue 13 * blk_queue_find_tag - find a request by its tag and queue
14 * @q: The request queue for the device 14 * @q: The request queue for the device
15 * @tag: The tag of the request 15 * @tag: The tag of the request
16 * 16 *
17 * Notes: 17 * Notes:
18 * Should be used when a device returns a tag and you want to match 18 * Should be used when a device returns a tag and you want to match
19 * it with a request. 19 * it with a request.
20 * 20 *
21 * no locks need be held. 21 * no locks need be held.
22 **/ 22 **/
23 struct request *blk_queue_find_tag(struct request_queue *q, int tag) 23 struct request *blk_queue_find_tag(struct request_queue *q, int tag)
24 { 24 {
25 return blk_map_queue_find_tag(q->queue_tags, tag); 25 return blk_map_queue_find_tag(q->queue_tags, tag);
26 } 26 }
27 EXPORT_SYMBOL(blk_queue_find_tag); 27 EXPORT_SYMBOL(blk_queue_find_tag);
28 28
29 /** 29 /**
30 * __blk_free_tags - release a given set of tag maintenance info 30 * __blk_free_tags - release a given set of tag maintenance info
31 * @bqt: the tag map to free 31 * @bqt: the tag map to free
32 * 32 *
33 * Tries to free the specified @bqt. Returns true if it was 33 * Tries to free the specified @bqt. Returns true if it was
34 * actually freed and false if there are still references using it 34 * actually freed and false if there are still references using it
35 */ 35 */
36 static int __blk_free_tags(struct blk_queue_tag *bqt) 36 static int __blk_free_tags(struct blk_queue_tag *bqt)
37 { 37 {
38 int retval; 38 int retval;
39 39
40 retval = atomic_dec_and_test(&bqt->refcnt); 40 retval = atomic_dec_and_test(&bqt->refcnt);
41 if (retval) { 41 if (retval) {
42 BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) < 42 BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) <
43 bqt->max_depth); 43 bqt->max_depth);
44 44
45 kfree(bqt->tag_index); 45 kfree(bqt->tag_index);
46 bqt->tag_index = NULL; 46 bqt->tag_index = NULL;
47 47
48 kfree(bqt->tag_map); 48 kfree(bqt->tag_map);
49 bqt->tag_map = NULL; 49 bqt->tag_map = NULL;
50 50
51 kfree(bqt); 51 kfree(bqt);
52 } 52 }
53 53
54 return retval; 54 return retval;
55 } 55 }
56 56
57 /** 57 /**
58 * __blk_queue_free_tags - release tag maintenance info 58 * __blk_queue_free_tags - release tag maintenance info
59 * @q: the request queue for the device 59 * @q: the request queue for the device
60 * 60 *
61 * Notes: 61 * Notes:
62 * blk_cleanup_queue() will take care of calling this function, if tagging 62 * blk_cleanup_queue() will take care of calling this function, if tagging
63 * has been used. So there's no need to call this directly. 63 * has been used. So there's no need to call this directly.
64 **/ 64 **/
65 void __blk_queue_free_tags(struct request_queue *q) 65 void __blk_queue_free_tags(struct request_queue *q)
66 { 66 {
67 struct blk_queue_tag *bqt = q->queue_tags; 67 struct blk_queue_tag *bqt = q->queue_tags;
68 68
69 if (!bqt) 69 if (!bqt)
70 return; 70 return;
71 71
72 __blk_free_tags(bqt); 72 __blk_free_tags(bqt);
73 73
74 q->queue_tags = NULL; 74 q->queue_tags = NULL;
75 queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q); 75 queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
76 } 76 }
77 77
78 /** 78 /**
79 * blk_free_tags - release a given set of tag maintenance info 79 * blk_free_tags - release a given set of tag maintenance info
80 * @bqt: the tag map to free 80 * @bqt: the tag map to free
81 * 81 *
82 * For externally managed @bqt frees the map. Callers of this 82 * For externally managed @bqt frees the map. Callers of this
83 * function must guarantee to have released all the queues that 83 * function must guarantee to have released all the queues that
84 * might have been using this tag map. 84 * might have been using this tag map.
85 */ 85 */
86 void blk_free_tags(struct blk_queue_tag *bqt) 86 void blk_free_tags(struct blk_queue_tag *bqt)
87 { 87 {
88 if (unlikely(!__blk_free_tags(bqt))) 88 if (unlikely(!__blk_free_tags(bqt)))
89 BUG(); 89 BUG();
90 } 90 }
91 EXPORT_SYMBOL(blk_free_tags); 91 EXPORT_SYMBOL(blk_free_tags);
92 92
93 /** 93 /**
94 * blk_queue_free_tags - release tag maintenance info 94 * blk_queue_free_tags - release tag maintenance info
95 * @q: the request queue for the device 95 * @q: the request queue for the device
96 * 96 *
97 * Notes: 97 * Notes:
98 * This is used to disable tagged queuing to a device, yet leave 98 * This is used to disable tagged queuing to a device, yet leave
99 * queue in function. 99 * queue in function.
100 **/ 100 **/
101 void blk_queue_free_tags(struct request_queue *q) 101 void blk_queue_free_tags(struct request_queue *q)
102 { 102 {
103 queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q); 103 queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
104 } 104 }
105 EXPORT_SYMBOL(blk_queue_free_tags); 105 EXPORT_SYMBOL(blk_queue_free_tags);
106 106
107 static int 107 static int
108 init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth) 108 init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth)
109 { 109 {
110 struct request **tag_index; 110 struct request **tag_index;
111 unsigned long *tag_map; 111 unsigned long *tag_map;
112 int nr_ulongs; 112 int nr_ulongs;
113 113
114 if (q && depth > q->nr_requests * 2) { 114 if (q && depth > q->nr_requests * 2) {
115 depth = q->nr_requests * 2; 115 depth = q->nr_requests * 2;
116 printk(KERN_ERR "%s: adjusted depth to %d\n", 116 printk(KERN_ERR "%s: adjusted depth to %d\n",
117 __func__, depth); 117 __func__, depth);
118 } 118 }
119 119
120 tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC); 120 tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC);
121 if (!tag_index) 121 if (!tag_index)
122 goto fail; 122 goto fail;
123 123
124 nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG; 124 nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG;
125 tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC); 125 tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC);
126 if (!tag_map) 126 if (!tag_map)
127 goto fail; 127 goto fail;
128 128
129 tags->real_max_depth = depth; 129 tags->real_max_depth = depth;
130 tags->max_depth = depth; 130 tags->max_depth = depth;
131 tags->tag_index = tag_index; 131 tags->tag_index = tag_index;
132 tags->tag_map = tag_map; 132 tags->tag_map = tag_map;
133 133
134 return 0; 134 return 0;
135 fail: 135 fail:
136 kfree(tag_index); 136 kfree(tag_index);
137 return -ENOMEM; 137 return -ENOMEM;
138 } 138 }
139 139
140 static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q, 140 static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
141 int depth) 141 int depth)
142 { 142 {
143 struct blk_queue_tag *tags; 143 struct blk_queue_tag *tags;
144 144
145 tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC); 145 tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC);
146 if (!tags) 146 if (!tags)
147 goto fail; 147 goto fail;
148 148
149 if (init_tag_map(q, tags, depth)) 149 if (init_tag_map(q, tags, depth))
150 goto fail; 150 goto fail;
151 151
152 atomic_set(&tags->refcnt, 1); 152 atomic_set(&tags->refcnt, 1);
153 return tags; 153 return tags;
154 fail: 154 fail:
155 kfree(tags); 155 kfree(tags);
156 return NULL; 156 return NULL;
157 } 157 }
158 158
159 /** 159 /**
160 * blk_init_tags - initialize the tag info for an external tag map 160 * blk_init_tags - initialize the tag info for an external tag map
161 * @depth: the maximum queue depth supported 161 * @depth: the maximum queue depth supported
162 **/ 162 **/
163 struct blk_queue_tag *blk_init_tags(int depth) 163 struct blk_queue_tag *blk_init_tags(int depth)
164 { 164 {
165 return __blk_queue_init_tags(NULL, depth); 165 return __blk_queue_init_tags(NULL, depth);
166 } 166 }
167 EXPORT_SYMBOL(blk_init_tags); 167 EXPORT_SYMBOL(blk_init_tags);
168 168
169 /** 169 /**
170 * blk_queue_init_tags - initialize the queue tag info 170 * blk_queue_init_tags - initialize the queue tag info
171 * @q: the request queue for the device 171 * @q: the request queue for the device
172 * @depth: the maximum queue depth supported 172 * @depth: the maximum queue depth supported
173 * @tags: the tag to use 173 * @tags: the tag to use
174 * 174 *
175 * Queue lock must be held here if the function is called to resize an 175 * Queue lock must be held here if the function is called to resize an
176 * existing map. 176 * existing map.
177 **/ 177 **/
178 int blk_queue_init_tags(struct request_queue *q, int depth, 178 int blk_queue_init_tags(struct request_queue *q, int depth,
179 struct blk_queue_tag *tags) 179 struct blk_queue_tag *tags)
180 { 180 {
181 int rc; 181 int rc;
182 182
183 BUG_ON(tags && q->queue_tags && tags != q->queue_tags); 183 BUG_ON(tags && q->queue_tags && tags != q->queue_tags);
184 184
185 if (!tags && !q->queue_tags) { 185 if (!tags && !q->queue_tags) {
186 tags = __blk_queue_init_tags(q, depth); 186 tags = __blk_queue_init_tags(q, depth);
187 187
188 if (!tags) 188 if (!tags)
189 return -ENOMEM; 189 return -ENOMEM;
190 190
191 } else if (q->queue_tags) { 191 } else if (q->queue_tags) {
192 rc = blk_queue_resize_tags(q, depth); 192 rc = blk_queue_resize_tags(q, depth);
193 if (rc) 193 if (rc)
194 return rc; 194 return rc;
195 queue_flag_set(QUEUE_FLAG_QUEUED, q); 195 queue_flag_set(QUEUE_FLAG_QUEUED, q);
196 return 0; 196 return 0;
197 } else 197 } else
198 atomic_inc(&tags->refcnt); 198 atomic_inc(&tags->refcnt);
199 199
200 /* 200 /*
201 * assign it, all done 201 * assign it, all done
202 */ 202 */
203 q->queue_tags = tags; 203 q->queue_tags = tags;
204 queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q); 204 queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q);
205 INIT_LIST_HEAD(&q->tag_busy_list); 205 INIT_LIST_HEAD(&q->tag_busy_list);
206 return 0; 206 return 0;
207 } 207 }
208 EXPORT_SYMBOL(blk_queue_init_tags); 208 EXPORT_SYMBOL(blk_queue_init_tags);
209 209
210 /** 210 /**
211 * blk_queue_resize_tags - change the queueing depth 211 * blk_queue_resize_tags - change the queueing depth
212 * @q: the request queue for the device 212 * @q: the request queue for the device
213 * @new_depth: the new max command queueing depth 213 * @new_depth: the new max command queueing depth
214 * 214 *
215 * Notes: 215 * Notes:
216 * Must be called with the queue lock held. 216 * Must be called with the queue lock held.
217 **/ 217 **/
218 int blk_queue_resize_tags(struct request_queue *q, int new_depth) 218 int blk_queue_resize_tags(struct request_queue *q, int new_depth)
219 { 219 {
220 struct blk_queue_tag *bqt = q->queue_tags; 220 struct blk_queue_tag *bqt = q->queue_tags;
221 struct request **tag_index; 221 struct request **tag_index;
222 unsigned long *tag_map; 222 unsigned long *tag_map;
223 int max_depth, nr_ulongs; 223 int max_depth, nr_ulongs;
224 224
225 if (!bqt) 225 if (!bqt)
226 return -ENXIO; 226 return -ENXIO;
227 227
228 /* 228 /*
229 * if we already have large enough real_max_depth. just 229 * if we already have large enough real_max_depth. just
230 * adjust max_depth. *NOTE* as requests with tag value 230 * adjust max_depth. *NOTE* as requests with tag value
231 * between new_depth and real_max_depth can be in-flight, tag 231 * between new_depth and real_max_depth can be in-flight, tag
232 * map can not be shrunk blindly here. 232 * map can not be shrunk blindly here.
233 */ 233 */
234 if (new_depth <= bqt->real_max_depth) { 234 if (new_depth <= bqt->real_max_depth) {
235 bqt->max_depth = new_depth; 235 bqt->max_depth = new_depth;
236 return 0; 236 return 0;
237 } 237 }
238 238
239 /* 239 /*
240 * Currently cannot replace a shared tag map with a new 240 * Currently cannot replace a shared tag map with a new
241 * one, so error out if this is the case 241 * one, so error out if this is the case
242 */ 242 */
243 if (atomic_read(&bqt->refcnt) != 1) 243 if (atomic_read(&bqt->refcnt) != 1)
244 return -EBUSY; 244 return -EBUSY;
245 245
246 /* 246 /*
247 * save the old state info, so we can copy it back 247 * save the old state info, so we can copy it back
248 */ 248 */
249 tag_index = bqt->tag_index; 249 tag_index = bqt->tag_index;
250 tag_map = bqt->tag_map; 250 tag_map = bqt->tag_map;
251 max_depth = bqt->real_max_depth; 251 max_depth = bqt->real_max_depth;
252 252
253 if (init_tag_map(q, bqt, new_depth)) 253 if (init_tag_map(q, bqt, new_depth))
254 return -ENOMEM; 254 return -ENOMEM;
255 255
256 memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *)); 256 memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *));
257 nr_ulongs = ALIGN(max_depth, BITS_PER_LONG) / BITS_PER_LONG; 257 nr_ulongs = ALIGN(max_depth, BITS_PER_LONG) / BITS_PER_LONG;
258 memcpy(bqt->tag_map, tag_map, nr_ulongs * sizeof(unsigned long)); 258 memcpy(bqt->tag_map, tag_map, nr_ulongs * sizeof(unsigned long));
259 259
260 kfree(tag_index); 260 kfree(tag_index);
261 kfree(tag_map); 261 kfree(tag_map);
262 return 0; 262 return 0;
263 } 263 }
264 EXPORT_SYMBOL(blk_queue_resize_tags); 264 EXPORT_SYMBOL(blk_queue_resize_tags);
265 265
266 /** 266 /**
267 * blk_queue_end_tag - end tag operations for a request 267 * blk_queue_end_tag - end tag operations for a request
268 * @q: the request queue for the device 268 * @q: the request queue for the device
269 * @rq: the request that has completed 269 * @rq: the request that has completed
270 * 270 *
271 * Description: 271 * Description:
272 * Typically called when end_that_request_first() returns %0, meaning 272 * Typically called when end_that_request_first() returns %0, meaning
273 * all transfers have been done for a request. It's important to call 273 * all transfers have been done for a request. It's important to call
274 * this function before end_that_request_last(), as that will put the 274 * this function before end_that_request_last(), as that will put the
275 * request back on the free list thus corrupting the internal tag list. 275 * request back on the free list thus corrupting the internal tag list.
276 * 276 *
277 * Notes: 277 * Notes:
278 * queue lock must be held. 278 * queue lock must be held.
279 **/ 279 **/
280 void blk_queue_end_tag(struct request_queue *q, struct request *rq) 280 void blk_queue_end_tag(struct request_queue *q, struct request *rq)
281 { 281 {
282 struct blk_queue_tag *bqt = q->queue_tags; 282 struct blk_queue_tag *bqt = q->queue_tags;
283 unsigned tag = rq->tag; /* negative tags invalid */ 283 unsigned tag = rq->tag; /* negative tags invalid */
284 284
285 BUG_ON(tag >= bqt->real_max_depth); 285 BUG_ON(tag >= bqt->real_max_depth);
286 286
287 list_del_init(&rq->queuelist); 287 list_del_init(&rq->queuelist);
288 rq->cmd_flags &= ~REQ_QUEUED; 288 rq->cmd_flags &= ~REQ_QUEUED;
289 rq->tag = -1; 289 rq->tag = -1;
290 290
291 if (unlikely(bqt->tag_index[tag] == NULL)) 291 if (unlikely(bqt->tag_index[tag] == NULL))
292 printk(KERN_ERR "%s: tag %d is missing\n", 292 printk(KERN_ERR "%s: tag %d is missing\n",
293 __func__, tag); 293 __func__, tag);
294 294
295 bqt->tag_index[tag] = NULL; 295 bqt->tag_index[tag] = NULL;
296 296
297 if (unlikely(!test_bit(tag, bqt->tag_map))) { 297 if (unlikely(!test_bit(tag, bqt->tag_map))) {
298 printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n", 298 printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
299 __func__, tag); 299 __func__, tag);
300 return; 300 return;
301 } 301 }
302 /* 302 /*
303 * The tag_map bit acts as a lock for tag_index[bit], so we need 303 * The tag_map bit acts as a lock for tag_index[bit], so we need
304 * unlock memory barrier semantics. 304 * unlock memory barrier semantics.
305 */ 305 */
306 clear_bit_unlock(tag, bqt->tag_map); 306 clear_bit_unlock(tag, bqt->tag_map);
307 } 307 }
308 EXPORT_SYMBOL(blk_queue_end_tag); 308 EXPORT_SYMBOL(blk_queue_end_tag);
309 309
310 /** 310 /**
311 * blk_queue_start_tag - find a free tag and assign it 311 * blk_queue_start_tag - find a free tag and assign it
312 * @q: the request queue for the device 312 * @q: the request queue for the device
313 * @rq: the block request that needs tagging 313 * @rq: the block request that needs tagging
314 * 314 *
315 * Description: 315 * Description:
316 * This can either be used as a stand-alone helper, or possibly be 316 * This can either be used as a stand-alone helper, or possibly be
317 * assigned as the queue &prep_rq_fn (in which case &struct request 317 * assigned as the queue &prep_rq_fn (in which case &struct request
318 * automagically gets a tag assigned). Note that this function 318 * automagically gets a tag assigned). Note that this function
319 * assumes that any type of request can be queued! if this is not 319 * assumes that any type of request can be queued! if this is not
320 * true for your device, you must check the request type before 320 * true for your device, you must check the request type before
321 * calling this function. The request will also be removed from 321 * calling this function. The request will also be removed from
322 * the request queue, so it's the drivers responsibility to readd 322 * the request queue, so it's the drivers responsibility to readd
323 * it if it should need to be restarted for some reason. 323 * it if it should need to be restarted for some reason.
324 * 324 *
325 * Notes: 325 * Notes:
326 * queue lock must be held. 326 * queue lock must be held.
327 **/ 327 **/
328 int blk_queue_start_tag(struct request_queue *q, struct request *rq) 328 int blk_queue_start_tag(struct request_queue *q, struct request *rq)
329 { 329 {
330 struct blk_queue_tag *bqt = q->queue_tags; 330 struct blk_queue_tag *bqt = q->queue_tags;
331 unsigned max_depth; 331 unsigned max_depth;
332 int tag; 332 int tag;
333 333
334 if (unlikely((rq->cmd_flags & REQ_QUEUED))) { 334 if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
335 printk(KERN_ERR 335 printk(KERN_ERR
336 "%s: request %p for device [%s] already tagged %d", 336 "%s: request %p for device [%s] already tagged %d",
337 __func__, rq, 337 __func__, rq,
338 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag); 338 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
339 BUG(); 339 BUG();
340 } 340 }
341 341
342 /* 342 /*
343 * Protect against shared tag maps, as we may not have exclusive 343 * Protect against shared tag maps, as we may not have exclusive
344 * access to the tag map. 344 * access to the tag map.
345 * 345 *
346 * We reserve a few tags just for sync IO, since we don't want 346 * We reserve a few tags just for sync IO, since we don't want
347 * to starve sync IO on behalf of flooding async IO. 347 * to starve sync IO on behalf of flooding async IO.
348 */ 348 */
349 max_depth = bqt->max_depth; 349 max_depth = bqt->max_depth;
350 if (!rq_is_sync(rq) && max_depth > 1) { 350 if (!rq_is_sync(rq) && max_depth > 1) {
351 max_depth -= 2; 351 switch (max_depth) {
352 if (!max_depth) 352 case 2:
353 max_depth = 1; 353 max_depth = 1;
354 break;
355 case 3:
356 max_depth = 2;
357 break;
358 default:
359 max_depth -= 2;
360 }
354 if (q->in_flight[BLK_RW_ASYNC] > max_depth) 361 if (q->in_flight[BLK_RW_ASYNC] > max_depth)
355 return 1; 362 return 1;
356 } 363 }
357 364
358 do { 365 do {
359 tag = find_first_zero_bit(bqt->tag_map, max_depth); 366 tag = find_first_zero_bit(bqt->tag_map, max_depth);
360 if (tag >= max_depth) 367 if (tag >= max_depth)
361 return 1; 368 return 1;
362 369
363 } while (test_and_set_bit_lock(tag, bqt->tag_map)); 370 } while (test_and_set_bit_lock(tag, bqt->tag_map));
364 /* 371 /*
365 * We need lock ordering semantics given by test_and_set_bit_lock. 372 * We need lock ordering semantics given by test_and_set_bit_lock.
366 * See blk_queue_end_tag for details. 373 * See blk_queue_end_tag for details.
367 */ 374 */
368 375
369 rq->cmd_flags |= REQ_QUEUED; 376 rq->cmd_flags |= REQ_QUEUED;
370 rq->tag = tag; 377 rq->tag = tag;
371 bqt->tag_index[tag] = rq; 378 bqt->tag_index[tag] = rq;
372 blk_start_request(rq); 379 blk_start_request(rq);
373 list_add(&rq->queuelist, &q->tag_busy_list); 380 list_add(&rq->queuelist, &q->tag_busy_list);
374 return 0; 381 return 0;
375 } 382 }
376 EXPORT_SYMBOL(blk_queue_start_tag); 383 EXPORT_SYMBOL(blk_queue_start_tag);
377 384
378 /** 385 /**
379 * blk_queue_invalidate_tags - invalidate all pending tags 386 * blk_queue_invalidate_tags - invalidate all pending tags
380 * @q: the request queue for the device 387 * @q: the request queue for the device
381 * 388 *
382 * Description: 389 * Description:
383 * Hardware conditions may dictate a need to stop all pending requests. 390 * Hardware conditions may dictate a need to stop all pending requests.
384 * In this case, we will safely clear the block side of the tag queue and 391 * In this case, we will safely clear the block side of the tag queue and
385 * readd all requests to the request queue in the right order. 392 * readd all requests to the request queue in the right order.
386 * 393 *
387 * Notes: 394 * Notes:
388 * queue lock must be held. 395 * queue lock must be held.
389 **/ 396 **/
390 void blk_queue_invalidate_tags(struct request_queue *q) 397 void blk_queue_invalidate_tags(struct request_queue *q)
391 { 398 {
392 struct list_head *tmp, *n; 399 struct list_head *tmp, *n;
393 400
394 list_for_each_safe(tmp, n, &q->tag_busy_list) 401 list_for_each_safe(tmp, n, &q->tag_busy_list)
395 blk_requeue_request(q, list_entry_rq(tmp)); 402 blk_requeue_request(q, list_entry_rq(tmp));
396 } 403 }
397 EXPORT_SYMBOL(blk_queue_invalidate_tags); 404 EXPORT_SYMBOL(blk_queue_invalidate_tags);
398 405