Commit 71e75c97f97a9645d25fbf3d8e4165a558f18747

Authored by Christoph Hellwig
1 parent 7466501608

scsi: convert device_busy to atomic_t

Avoid taking the queue_lock to check the per-device queue limit.  Instead
we do an atomic_inc_return early on to grab our slot in the queue,
and if necessary decrement it after finishing all checks.

Unlike the host and target busy counters this doesn't allow us to avoid the
queue_lock in the request_fn due to the way the interface works, but it'll
allow us to prepare for using the blk-mq code, which doesn't use the
queue_lock at all, and it at least avoids a queue_lock round trip in
scsi_device_unbusy, which is still important given how busy the queue_lock
is.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Reviewed-by: Webb Scales <webbnh@hp.com>
Acked-by: Jens Axboe <axboe@kernel.dk>
Tested-by: Bart Van Assche <bvanassche@acm.org>
Tested-by: Robert Elliott <elliott@hp.com>

Showing 5 changed files with 40 additions and 28 deletions Side-by-side Diff

drivers/message/fusion/mptsas.c
... ... @@ -3763,7 +3763,7 @@
3763 3763 printk(MYIOC_s_DEBUG_FMT
3764 3764 "SDEV OUTSTANDING CMDS"
3765 3765 "%d\n", ioc->name,
3766   - sdev->device_busy));
  3766 + atomic_read(&sdev->device_busy)));
3767 3767 }
3768 3768  
3769 3769 }
drivers/scsi/scsi_lib.c
... ... @@ -302,9 +302,7 @@
302 302 spin_unlock_irqrestore(shost->host_lock, flags);
303 303 }
304 304  
305   - spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
306   - sdev->device_busy--;
307   - spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
  305 + atomic_dec(&sdev->device_busy);
308 306 }
309 307  
310 308 /*
311 309  
... ... @@ -355,9 +353,9 @@
355 353  
356 354 static inline int scsi_device_is_busy(struct scsi_device *sdev)
357 355 {
358   - if (sdev->device_busy >= sdev->queue_depth || sdev->device_blocked)
  356 + if (atomic_read(&sdev->device_busy) >= sdev->queue_depth ||
  357 + sdev->device_blocked)
359 358 return 1;
360   -
361 359 return 0;
362 360 }
363 361  
... ... @@ -1204,7 +1202,7 @@
1204 1202 * queue must be restarted, so we schedule a callback to happen
1205 1203 * shortly.
1206 1204 */
1207   - if (sdev->device_busy == 0)
  1205 + if (atomic_read(&sdev->device_busy) == 0)
1208 1206 blk_delay_queue(q, SCSI_QUEUE_DELAY);
1209 1207 break;
1210 1208 default:
1211 1209  
1212 1210  
1213 1211  
1214 1212  
1215 1213  
1216 1214  
1217 1215  
... ... @@ -1255,26 +1253,33 @@
1255 1253 static inline int scsi_dev_queue_ready(struct request_queue *q,
1256 1254 struct scsi_device *sdev)
1257 1255 {
1258   - if (sdev->device_busy == 0 && sdev->device_blocked) {
  1256 + unsigned int busy;
  1257 +
  1258 + busy = atomic_inc_return(&sdev->device_busy) - 1;
  1259 + if (sdev->device_blocked) {
  1260 + if (busy)
  1261 + goto out_dec;
  1262 +
1259 1263 /*
1260 1264 * unblock after device_blocked iterates to zero
1261 1265 */
1262   - if (--sdev->device_blocked == 0) {
1263   - SCSI_LOG_MLQUEUE(3,
1264   - sdev_printk(KERN_INFO, sdev,
1265   - "unblocking device at zero depth\n"));
1266   - } else {
  1266 + if (--sdev->device_blocked != 0) {
1267 1267 blk_delay_queue(q, SCSI_QUEUE_DELAY);
1268   - return 0;
  1268 + goto out_dec;
1269 1269 }
  1270 + SCSI_LOG_MLQUEUE(3, sdev_printk(KERN_INFO, sdev,
  1271 + "unblocking device at zero depth\n"));
1270 1272 }
1271   - if (scsi_device_is_busy(sdev))
1272   - return 0;
1273 1273  
  1274 + if (busy >= sdev->queue_depth)
  1275 + goto out_dec;
  1276 +
1274 1277 return 1;
  1278 +out_dec:
  1279 + atomic_dec(&sdev->device_busy);
  1280 + return 0;
1275 1281 }
1276 1282  
1277   -
1278 1283 /*
1279 1284 * scsi_target_queue_ready: checks if there we can send commands to target
1280 1285 * @sdev: scsi device on starget to check.
... ... @@ -1448,7 +1453,7 @@
1448 1453 * bump busy counts. To bump the counters, we need to dance
1449 1454 * with the locks as normal issue path does.
1450 1455 */
1451   - sdev->device_busy++;
  1456 + atomic_inc(&sdev->device_busy);
1452 1457 atomic_inc(&shost->host_busy);
1453 1458 atomic_inc(&starget->target_busy);
1454 1459  
... ... @@ -1544,7 +1549,7 @@
1544 1549 * accept it.
1545 1550 */
1546 1551 req = blk_peek_request(q);
1547   - if (!req || !scsi_dev_queue_ready(q, sdev))
  1552 + if (!req)
1548 1553 break;
1549 1554  
1550 1555 if (unlikely(!scsi_device_online(sdev))) {
1551 1556  
... ... @@ -1554,13 +1559,14 @@
1554 1559 continue;
1555 1560 }
1556 1561  
  1562 + if (!scsi_dev_queue_ready(q, sdev))
  1563 + break;
1557 1564  
1558 1565 /*
1559 1566 * Remove the request from the request list.
1560 1567 */
1561 1568 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
1562 1569 blk_start_request(req);
1563   - sdev->device_busy++;
1564 1570  
1565 1571 spin_unlock_irq(q->queue_lock);
1566 1572 cmd = req->special;
1567 1573  
... ... @@ -1630,9 +1636,9 @@
1630 1636 */
1631 1637 spin_lock_irq(q->queue_lock);
1632 1638 blk_requeue_request(q, req);
1633   - sdev->device_busy--;
  1639 + atomic_dec(&sdev->device_busy);
1634 1640 out_delay:
1635   - if (sdev->device_busy == 0 && !scsi_device_blocked(sdev))
  1641 + if (atomic_read(&sdev->device_busy) && !scsi_device_blocked(sdev))
1636 1642 blk_delay_queue(q, SCSI_QUEUE_DELAY);
1637 1643 }
1638 1644  
... ... @@ -2371,7 +2377,7 @@
2371 2377 return err;
2372 2378  
2373 2379 scsi_run_queue(sdev->request_queue);
2374   - while (sdev->device_busy) {
  2380 + while (atomic_read(&sdev->device_busy)) {
2375 2381 msleep_interruptible(200);
2376 2382 scsi_run_queue(sdev->request_queue);
2377 2383 }
drivers/scsi/scsi_sysfs.c
... ... @@ -585,12 +585,20 @@
585 585 * Create the actual show/store functions and data structures.
586 586 */
587 587 sdev_rd_attr (device_blocked, "%d\n");
588   -sdev_rd_attr (device_busy, "%d\n");
589 588 sdev_rd_attr (type, "%d\n");
590 589 sdev_rd_attr (scsi_level, "%d\n");
591 590 sdev_rd_attr (vendor, "%.8s\n");
592 591 sdev_rd_attr (model, "%.16s\n");
593 592 sdev_rd_attr (rev, "%.4s\n");
  593 +
  594 +static ssize_t
  595 +sdev_show_device_busy(struct device *dev, struct device_attribute *attr,
  596 + char *buf)
  597 +{
  598 + struct scsi_device *sdev = to_scsi_device(dev);
  599 + return snprintf(buf, 20, "%d\n", atomic_read(&sdev->device_busy));
  600 +}
  601 +static DEVICE_ATTR(device_busy, S_IRUGO, sdev_show_device_busy, NULL);
594 602  
595 603 /*
596 604 * TODO: can we make these symlinks to the block layer ones?
... ... @@ -2574,7 +2574,7 @@
2574 2574 scsidp->id, scsidp->lun, (int) scsidp->type,
2575 2575 1,
2576 2576 (int) scsidp->queue_depth,
2577   - (int) scsidp->device_busy,
  2577 + (int) atomic_read(&scsidp->device_busy),
2578 2578 (int) scsi_device_online(scsidp));
2579 2579 }
2580 2580 read_unlock_irqrestore(&sg_index_lock, iflags);
include/scsi/scsi_device.h
... ... @@ -81,9 +81,7 @@
81 81 struct list_head siblings; /* list of all devices on this host */
82 82 struct list_head same_target_siblings; /* just the devices sharing same target id */
83 83  
84   - /* this is now protected by the request_queue->queue_lock */
85   - unsigned int device_busy; /* commands actually active on
86   - * low-level. protected by queue_lock. */
  84 + atomic_t device_busy; /* commands actually active on LLDD */
87 85 spinlock_t list_lock;
88 86 struct list_head cmd_list; /* queue of in use SCSI Command structures */
89 87 struct list_head starved_entry;