Commit 86072d8112595ea1b6beeb33f578e7c2839e014e
Committed by
Jens Axboe
1 parent
a91a5ac685
Exists in
master
and in
20 other branches
block: drop custom queue draining used by scsi_transport_{iscsi|fc}
iscsi_remove_host() uses bsg_remove_queue() which implements custom queue draining. fc_bsg_remove() open-codes mostly identical logic. The draining logic isn't correct in that blk_stop_queue() doesn't prevent new requests from being queued - it just stops processing, so nothing prevents new requests to be queued after the logic determines that the queue is drained. blk_cleanup_queue() now implements proper queue draining and these custom draining logics aren't necessary. Drop them and use bsg_unregister_queue() + blk_cleanup_queue() instead. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Mike Christie <michaelc@cs.wisc.edu> Acked-by: Vivek Goyal <vgoyal@redhat.com> Cc: James Bottomley <James.Bottomley@HansenPartnership.com> Cc: James Smart <james.smart@emulex.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Showing 4 changed files with 1 additions and 93 deletions Side-by-side Diff
block/bsg-lib.c
... | ... | @@ -243,57 +243,4 @@ |
243 | 243 | return 0; |
244 | 244 | } |
245 | 245 | EXPORT_SYMBOL_GPL(bsg_setup_queue); |
246 | - | |
247 | -/** | |
248 | - * bsg_remove_queue - Deletes the bsg dev from the q | |
249 | - * @q: the request_queue that is to be torn down. | |
250 | - * | |
251 | - * Notes: | |
252 | - * Before unregistering the queue empty any requests that are blocked | |
253 | - */ | |
254 | -void bsg_remove_queue(struct request_queue *q) | |
255 | -{ | |
256 | - struct request *req; /* block request */ | |
257 | - int counts; /* totals for request_list count and starved */ | |
258 | - | |
259 | - if (!q) | |
260 | - return; | |
261 | - | |
262 | - /* Stop taking in new requests */ | |
263 | - spin_lock_irq(q->queue_lock); | |
264 | - blk_stop_queue(q); | |
265 | - | |
266 | - /* drain all requests in the queue */ | |
267 | - while (1) { | |
268 | - /* need the lock to fetch a request | |
269 | - * this may fetch the same reqeust as the previous pass | |
270 | - */ | |
271 | - req = blk_fetch_request(q); | |
272 | - /* save requests in use and starved */ | |
273 | - counts = q->rq.count[0] + q->rq.count[1] + | |
274 | - q->rq.starved[0] + q->rq.starved[1]; | |
275 | - spin_unlock_irq(q->queue_lock); | |
276 | - /* any requests still outstanding? */ | |
277 | - if (counts == 0) | |
278 | - break; | |
279 | - | |
280 | - /* This may be the same req as the previous iteration, | |
281 | - * always send the blk_end_request_all after a prefetch. | |
282 | - * It is not okay to not end the request because the | |
283 | - * prefetch started the request. | |
284 | - */ | |
285 | - if (req) { | |
286 | - /* return -ENXIO to indicate that this queue is | |
287 | - * going away | |
288 | - */ | |
289 | - req->errors = -ENXIO; | |
290 | - blk_end_request_all(req, -ENXIO); | |
291 | - } | |
292 | - | |
293 | - msleep(200); /* allow bsg to possibly finish */ | |
294 | - spin_lock_irq(q->queue_lock); | |
295 | - } | |
296 | - bsg_unregister_queue(q); | |
297 | -} | |
298 | -EXPORT_SYMBOL_GPL(bsg_remove_queue); |
drivers/scsi/scsi_transport_fc.c
... | ... | @@ -4130,45 +4130,7 @@ |
4130 | 4130 | static void |
4131 | 4131 | fc_bsg_remove(struct request_queue *q) |
4132 | 4132 | { |
4133 | - struct request *req; /* block request */ | |
4134 | - int counts; /* totals for request_list count and starved */ | |
4135 | - | |
4136 | 4133 | if (q) { |
4137 | - /* Stop taking in new requests */ | |
4138 | - spin_lock_irq(q->queue_lock); | |
4139 | - blk_stop_queue(q); | |
4140 | - | |
4141 | - /* drain all requests in the queue */ | |
4142 | - while (1) { | |
4143 | - /* need the lock to fetch a request | |
4144 | - * this may fetch the same reqeust as the previous pass | |
4145 | - */ | |
4146 | - req = blk_fetch_request(q); | |
4147 | - /* save requests in use and starved */ | |
4148 | - counts = q->rq.count[0] + q->rq.count[1] + | |
4149 | - q->rq.starved[0] + q->rq.starved[1]; | |
4150 | - spin_unlock_irq(q->queue_lock); | |
4151 | - /* any requests still outstanding? */ | |
4152 | - if (counts == 0) | |
4153 | - break; | |
4154 | - | |
4155 | - /* This may be the same req as the previous iteration, | |
4156 | - * always send the blk_end_request_all after a prefetch. | |
4157 | - * It is not okay to not end the request because the | |
4158 | - * prefetch started the request. | |
4159 | - */ | |
4160 | - if (req) { | |
4161 | - /* return -ENXIO to indicate that this queue is | |
4162 | - * going away | |
4163 | - */ | |
4164 | - req->errors = -ENXIO; | |
4165 | - blk_end_request_all(req, -ENXIO); | |
4166 | - } | |
4167 | - | |
4168 | - msleep(200); /* allow bsg to possibly finish */ | |
4169 | - spin_lock_irq(q->queue_lock); | |
4170 | - } | |
4171 | - | |
4172 | 4134 | bsg_unregister_queue(q); |
4173 | 4135 | blk_cleanup_queue(q); |
4174 | 4136 | } |
drivers/scsi/scsi_transport_iscsi.c
include/linux/bsg-lib.h
... | ... | @@ -67,7 +67,6 @@ |
67 | 67 | int bsg_setup_queue(struct device *dev, struct request_queue *q, char *name, |
68 | 68 | bsg_job_fn *job_fn, int dd_job_size); |
69 | 69 | void bsg_request_fn(struct request_queue *q); |
70 | -void bsg_remove_queue(struct request_queue *q); | |
71 | 70 | void bsg_goose_queue(struct request_queue *q); |
72 | 71 | |
73 | 72 | #endif |