Commit 74665016086615bbaa3fa6f83af410a0a4e029ee

Authored by Christoph Hellwig
1 parent 7ae65c0f96

scsi: convert host_busy to atomic_t

Avoid taking the host-wide host_lock to check the per-host queue limit.
Instead we do an atomic_inc_return early on to grab our slot in the queue,
and if necessary decrement it after finishing all checks.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Reviewed-by: Webb Scales <webbnh@hp.com>
Acked-by: Jens Axboe <axboe@kernel.dk>
Tested-by: Bart Van Assche <bvanassche@acm.org>
Tested-by: Robert Elliott <elliott@hp.com>

Showing 9 changed files with 69 additions and 48 deletions Side-by-side Diff

drivers/scsi/advansys.c
... ... @@ -2512,7 +2512,7 @@
2512 2512  
2513 2513 printk("Scsi_Host at addr 0x%p, device %s\n", s, dev_name(boardp->dev));
2514 2514 printk(" host_busy %u, host_no %d,\n",
2515   - s->host_busy, s->host_no);
  2515 + atomic_read(&s->host_busy), s->host_no);
2516 2516  
2517 2517 printk(" base 0x%lx, io_port 0x%lx, irq %d,\n",
2518 2518 (ulong)s->base, (ulong)s->io_port, boardp->irq);
... ... @@ -3346,7 +3346,7 @@
3346 3346  
3347 3347 seq_printf(m,
3348 3348 " host_busy %u, max_id %u, max_lun %llu, max_channel %u\n",
3349   - shost->host_busy, shost->max_id,
  3349 + atomic_read(&shost->host_busy), shost->max_id,
3350 3350 shost->max_lun, shost->max_channel);
3351 3351  
3352 3352 seq_printf(m,
drivers/scsi/libiscsi.c
... ... @@ -2971,7 +2971,7 @@
2971 2971 */
2972 2972 for (;;) {
2973 2973 spin_lock_irqsave(session->host->host_lock, flags);
2974   - if (!session->host->host_busy) { /* OK for ERL == 0 */
  2974 + if (!atomic_read(&session->host->host_busy)) { /* OK for ERL == 0 */
2975 2975 spin_unlock_irqrestore(session->host->host_lock, flags);
2976 2976 break;
2977 2977 }
... ... @@ -2979,7 +2979,7 @@
2979 2979 msleep_interruptible(500);
2980 2980 iscsi_conn_printk(KERN_INFO, conn, "iscsi conn_destroy(): "
2981 2981 "host_busy %d host_failed %d\n",
2982   - session->host->host_busy,
  2982 + atomic_read(&session->host->host_busy),
2983 2983 session->host->host_failed);
2984 2984 /*
2985 2985 * force eh_abort() to unblock
drivers/scsi/libsas/sas_scsi_host.c
... ... @@ -813,7 +813,7 @@
813 813 spin_unlock_irq(shost->host_lock);
814 814  
815 815 SAS_DPRINTK("Enter %s busy: %d failed: %d\n",
816   - __func__, shost->host_busy, shost->host_failed);
  816 + __func__, atomic_read(&shost->host_busy), shost->host_failed);
817 817 /*
818 818 * Deal with commands that still have SAS tasks (i.e. they didn't
819 819 * complete via the normal sas_task completion mechanism),
... ... @@ -858,7 +858,8 @@
858 858 goto retry;
859 859  
860 860 SAS_DPRINTK("--- Exit %s: busy: %d failed: %d tries: %d\n",
861   - __func__, shost->host_busy, shost->host_failed, tries);
  861 + __func__, atomic_read(&shost->host_busy),
  862 + shost->host_failed, tries);
862 863 }
863 864  
864 865 enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
drivers/scsi/qlogicpti.c
... ... @@ -959,7 +959,7 @@
959 959 /* Temporary workaround until bug is found and fixed (one bug has been found
960 960 already, but fixing it makes things even worse) -jj */
961 961 int num_free = QLOGICPTI_REQ_QUEUE_LEN - REQ_QUEUE_DEPTH(in_ptr, out_ptr) - 64;
962   - host->can_queue = host->host_busy + num_free;
  962 + host->can_queue = atomic_read(&host->host_busy) + num_free;
963 963 host->sg_tablesize = QLOGICPTI_MAX_SG(num_free);
964 964 }
965 965  
... ... @@ -600,7 +600,7 @@
600 600 if (level > 3)
601 601 scmd_printk(KERN_INFO, cmd,
602 602 "scsi host busy %d failed %d\n",
603   - cmd->device->host->host_busy,
  603 + atomic_read(&cmd->device->host->host_busy),
604 604 cmd->device->host->host_failed);
605 605 }
606 606 }
drivers/scsi/scsi_error.c
... ... @@ -59,7 +59,7 @@
59 59 /* called with shost->host_lock held */
60 60 void scsi_eh_wakeup(struct Scsi_Host *shost)
61 61 {
62   - if (shost->host_busy == shost->host_failed) {
  62 + if (atomic_read(&shost->host_busy) == shost->host_failed) {
63 63 trace_scsi_eh_wakeup(shost);
64 64 wake_up_process(shost->ehandler);
65 65 SCSI_LOG_ERROR_RECOVERY(5, shost_printk(KERN_INFO, shost,
... ... @@ -2164,7 +2164,7 @@
2164 2164 while (!kthread_should_stop()) {
2165 2165 set_current_state(TASK_INTERRUPTIBLE);
2166 2166 if ((shost->host_failed == 0 && shost->host_eh_scheduled == 0) ||
2167   - shost->host_failed != shost->host_busy) {
  2167 + shost->host_failed != atomic_read(&shost->host_busy)) {
2168 2168 SCSI_LOG_ERROR_RECOVERY(1,
2169 2169 shost_printk(KERN_INFO, shost,
2170 2170 "scsi_eh_%d: sleeping\n",
... ... @@ -2178,7 +2178,8 @@
2178 2178 shost_printk(KERN_INFO, shost,
2179 2179 "scsi_eh_%d: waking up %d/%d/%d\n",
2180 2180 shost->host_no, shost->host_eh_scheduled,
2181   - shost->host_failed, shost->host_busy));
  2181 + shost->host_failed,
  2182 + atomic_read(&shost->host_busy)));
2182 2183  
2183 2184 /*
2184 2185 * We have a host that is failing for some reason. Figure out
drivers/scsi/scsi_lib.c
... ... @@ -292,14 +292,17 @@
292 292 struct scsi_target *starget = scsi_target(sdev);
293 293 unsigned long flags;
294 294  
295   - spin_lock_irqsave(shost->host_lock, flags);
296   - shost->host_busy--;
  295 + atomic_dec(&shost->host_busy);
297 296 atomic_dec(&starget->target_busy);
  297 +
298 298 if (unlikely(scsi_host_in_recovery(shost) &&
299   - (shost->host_failed || shost->host_eh_scheduled)))
  299 + (shost->host_failed || shost->host_eh_scheduled))) {
  300 + spin_lock_irqsave(shost->host_lock, flags);
300 301 scsi_eh_wakeup(shost);
301   - spin_unlock(shost->host_lock);
302   - spin_lock(sdev->request_queue->queue_lock);
  302 + spin_unlock_irqrestore(shost->host_lock, flags);
  303 + }
  304 +
  305 + spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
303 306 sdev->device_busy--;
304 307 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
305 308 }
... ... @@ -367,7 +370,8 @@
367 370  
368 371 static inline int scsi_host_is_busy(struct Scsi_Host *shost)
369 372 {
370   - if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
  373 + if ((shost->can_queue > 0 &&
  374 + atomic_read(&shost->host_busy) >= shost->can_queue) ||
371 375 shost->host_blocked || shost->host_self_blocked)
372 376 return 1;
373 377  
374 378  
375 379  
376 380  
377 381  
378 382  
379 383  
380 384  
381 385  
... ... @@ -1334,38 +1338,54 @@
1334 1338 struct Scsi_Host *shost,
1335 1339 struct scsi_device *sdev)
1336 1340 {
1337   - int ret = 0;
  1341 + unsigned int busy;
1338 1342  
1339   - spin_lock_irq(shost->host_lock);
1340   -
1341 1343 if (scsi_host_in_recovery(shost))
1342   - goto out;
1343   - if (shost->host_busy == 0 && shost->host_blocked) {
  1344 + return 0;
  1345 +
  1346 + busy = atomic_inc_return(&shost->host_busy) - 1;
  1347 + if (shost->host_blocked) {
  1348 + if (busy)
  1349 + goto starved;
  1350 +
1344 1351 /*
1345 1352 * unblock after host_blocked iterates to zero
1346 1353 */
1347   - if (--shost->host_blocked != 0)
1348   - goto out;
  1354 + spin_lock_irq(shost->host_lock);
  1355 + if (--shost->host_blocked != 0) {
  1356 + spin_unlock_irq(shost->host_lock);
  1357 + goto out_dec;
  1358 + }
  1359 + spin_unlock_irq(shost->host_lock);
1349 1360  
1350 1361 SCSI_LOG_MLQUEUE(3,
1351 1362 shost_printk(KERN_INFO, shost,
1352 1363 "unblocking host at zero depth\n"));
1353 1364 }
1354   - if (scsi_host_is_busy(shost)) {
1355   - if (list_empty(&sdev->starved_entry))
1356   - list_add_tail(&sdev->starved_entry, &shost->starved_list);
1357   - goto out;
1358   - }
1359 1365  
  1366 + if (shost->can_queue > 0 && busy >= shost->can_queue)
  1367 + goto starved;
  1368 + if (shost->host_self_blocked)
  1369 + goto starved;
  1370 +
1360 1371 /* We're OK to process the command, so we can't be starved */
1361   - if (!list_empty(&sdev->starved_entry))
1362   - list_del_init(&sdev->starved_entry);
  1372 + if (!list_empty(&sdev->starved_entry)) {
  1373 + spin_lock_irq(shost->host_lock);
  1374 + if (!list_empty(&sdev->starved_entry))
  1375 + list_del_init(&sdev->starved_entry);
  1376 + spin_unlock_irq(shost->host_lock);
  1377 + }
1363 1378  
1364   - shost->host_busy++;
1365   - ret = 1;
1366   -out:
  1379 + return 1;
  1380 +
  1381 +starved:
  1382 + spin_lock_irq(shost->host_lock);
  1383 + if (list_empty(&sdev->starved_entry))
  1384 + list_add_tail(&sdev->starved_entry, &shost->starved_list);
1367 1385 spin_unlock_irq(shost->host_lock);
1368   - return ret;
  1386 +out_dec:
  1387 + atomic_dec(&shost->host_busy);
  1388 + return 0;
1369 1389 }
1370 1390  
1371 1391 /*
1372 1392  
... ... @@ -1429,12 +1449,8 @@
1429 1449 * with the locks as normal issue path does.
1430 1450 */
1431 1451 sdev->device_busy++;
1432   - spin_unlock(sdev->request_queue->queue_lock);
1433   - spin_lock(shost->host_lock);
1434   - shost->host_busy++;
  1452 + atomic_inc(&shost->host_busy);
1435 1453 atomic_inc(&starget->target_busy);
1436   - spin_unlock(shost->host_lock);
1437   - spin_lock(sdev->request_queue->queue_lock);
1438 1454  
1439 1455 blk_complete_request(req);
1440 1456 }
drivers/scsi/scsi_sysfs.c
... ... @@ -334,7 +334,6 @@
334 334 static DEVICE_ATTR(eh_deadline, S_IRUGO | S_IWUSR, show_shost_eh_deadline, store_shost_eh_deadline);
335 335  
336 336 shost_rd_attr(unique_id, "%u\n");
337   -shost_rd_attr(host_busy, "%hu\n");
338 337 shost_rd_attr(cmd_per_lun, "%hd\n");
339 338 shost_rd_attr(can_queue, "%hd\n");
340 339 shost_rd_attr(sg_tablesize, "%hu\n");
... ... @@ -343,6 +342,14 @@
343 342 shost_rd_attr(prot_capabilities, "%u\n");
344 343 shost_rd_attr(prot_guard_type, "%hd\n");
345 344 shost_rd_attr2(proc_name, hostt->proc_name, "%s\n");
  345 +
  346 +static ssize_t
  347 +show_host_busy(struct device *dev, struct device_attribute *attr, char *buf)
  348 +{
  349 + struct Scsi_Host *shost = class_to_shost(dev);
  350 + return snprintf(buf, 20, "%d\n", atomic_read(&shost->host_busy));
  351 +}
  352 +static DEVICE_ATTR(host_busy, S_IRUGO, show_host_busy, NULL);
346 353  
347 354 static struct attribute *scsi_sysfs_shost_attrs[] = {
348 355 &dev_attr_unique_id.attr,
include/scsi/scsi_host.h
... ... @@ -582,13 +582,9 @@
582 582 */
583 583 struct blk_queue_tag *bqt;
584 584  
585   - /*
586   - * The following two fields are protected with host_lock;
587   - * however, eh routines can safely access during eh processing
588   - * without acquiring the lock.
589   - */
590   - unsigned int host_busy; /* commands actually active on low-level */
591   - unsigned int host_failed; /* commands that failed. */
  585 + atomic_t host_busy; /* commands actually active on low-level */
  586 + unsigned int host_failed; /* commands that failed.
  587 + protected by host_lock */
592 588 unsigned int host_eh_scheduled; /* EH scheduled without command */
593 589  
594 590 unsigned int host_no; /* Used for IOCTL_GET_IDLUN, /proc/scsi et al. */