Commit 5befb98b30cce19bdf2221ea48c39f1fec5c4568

Authored by Linus Torvalds

Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull SCSI fixes from James Bottomley:
 "This is a set of small bug fixes for lpfc and zfcp and a fix for a
  fairly nasty bug in sg where a process which cancels I/O completes in
  a kernel thread which would then try to write back to the now gone
  userspace and end up writing to a random kernel address instead"

* tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi:
  [SCSI] zfcp: remove access control tables interface (keep sysfs files)
  [SCSI] zfcp: fix schedule-inside-lock in scsi_device list loops
  [SCSI] zfcp: fix lock imbalance by reworking request queue locking
  [SCSI] sg: Fix user memory corruption when SG_IO is interrupted by a signal
  [SCSI] lpfc: Don't force CONFIG_GENERIC_CSUM on

Showing 6 changed files Side-by-side Diff

drivers/s390/scsi/zfcp_erp.c
... ... @@ -102,10 +102,13 @@
102 102  
103 103 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
104 104 zfcp_erp_action_dismiss(&port->erp_action);
105   - else
106   - shost_for_each_device(sdev, port->adapter->scsi_host)
  105 + else {
  106 + spin_lock(port->adapter->scsi_host->host_lock);
  107 + __shost_for_each_device(sdev, port->adapter->scsi_host)
107 108 if (sdev_to_zfcp(sdev)->port == port)
108 109 zfcp_erp_action_dismiss_lun(sdev);
  110 + spin_unlock(port->adapter->scsi_host->host_lock);
  111 + }
109 112 }
110 113  
111 114 static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
112 115  
... ... @@ -592,9 +595,11 @@
592 595 {
593 596 struct scsi_device *sdev;
594 597  
595   - shost_for_each_device(sdev, port->adapter->scsi_host)
  598 + spin_lock(port->adapter->scsi_host->host_lock);
  599 + __shost_for_each_device(sdev, port->adapter->scsi_host)
596 600 if (sdev_to_zfcp(sdev)->port == port)
597 601 _zfcp_erp_lun_reopen(sdev, clear, id, 0);
  602 + spin_unlock(port->adapter->scsi_host->host_lock);
598 603 }
599 604  
600 605 static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act)
601 606  
... ... @@ -1434,8 +1439,10 @@
1434 1439 atomic_set_mask(common_mask, &port->status);
1435 1440 read_unlock_irqrestore(&adapter->port_list_lock, flags);
1436 1441  
1437   - shost_for_each_device(sdev, adapter->scsi_host)
  1442 + spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
  1443 + __shost_for_each_device(sdev, adapter->scsi_host)
1438 1444 atomic_set_mask(common_mask, &sdev_to_zfcp(sdev)->status);
  1445 + spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags);
1439 1446 }
1440 1447  
1441 1448 /**
1442 1449  
... ... @@ -1469,11 +1476,13 @@
1469 1476 }
1470 1477 read_unlock_irqrestore(&adapter->port_list_lock, flags);
1471 1478  
1472   - shost_for_each_device(sdev, adapter->scsi_host) {
  1479 + spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
  1480 + __shost_for_each_device(sdev, adapter->scsi_host) {
1473 1481 atomic_clear_mask(common_mask, &sdev_to_zfcp(sdev)->status);
1474 1482 if (clear_counter)
1475 1483 atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
1476 1484 }
  1485 + spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags);
1477 1486 }
1478 1487  
1479 1488 /**
1480 1489  
1481 1490  
... ... @@ -1487,16 +1496,19 @@
1487 1496 {
1488 1497 struct scsi_device *sdev;
1489 1498 u32 common_mask = mask & ZFCP_COMMON_FLAGS;
  1499 + unsigned long flags;
1490 1500  
1491 1501 atomic_set_mask(mask, &port->status);
1492 1502  
1493 1503 if (!common_mask)
1494 1504 return;
1495 1505  
1496   - shost_for_each_device(sdev, port->adapter->scsi_host)
  1506 + spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags);
  1507 + __shost_for_each_device(sdev, port->adapter->scsi_host)
1497 1508 if (sdev_to_zfcp(sdev)->port == port)
1498 1509 atomic_set_mask(common_mask,
1499 1510 &sdev_to_zfcp(sdev)->status);
  1511 + spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags);
1500 1512 }
1501 1513  
1502 1514 /**
... ... @@ -1511,6 +1523,7 @@
1511 1523 struct scsi_device *sdev;
1512 1524 u32 common_mask = mask & ZFCP_COMMON_FLAGS;
1513 1525 u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED;
  1526 + unsigned long flags;
1514 1527  
1515 1528 atomic_clear_mask(mask, &port->status);
1516 1529  
1517 1530  
... ... @@ -1520,13 +1533,15 @@
1520 1533 if (clear_counter)
1521 1534 atomic_set(&port->erp_counter, 0);
1522 1535  
1523   - shost_for_each_device(sdev, port->adapter->scsi_host)
  1536 + spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags);
  1537 + __shost_for_each_device(sdev, port->adapter->scsi_host)
1524 1538 if (sdev_to_zfcp(sdev)->port == port) {
1525 1539 atomic_clear_mask(common_mask,
1526 1540 &sdev_to_zfcp(sdev)->status);
1527 1541 if (clear_counter)
1528 1542 atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
1529 1543 }
  1544 + spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags);
1530 1545 }
1531 1546  
1532 1547 /**
drivers/s390/scsi/zfcp_qdio.c
... ... @@ -224,11 +224,9 @@
224 224  
225 225 static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
226 226 {
227   - spin_lock_irq(&qdio->req_q_lock);
228 227 if (atomic_read(&qdio->req_q_free) ||
229 228 !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
230 229 return 1;
231   - spin_unlock_irq(&qdio->req_q_lock);
232 230 return 0;
233 231 }
234 232  
... ... @@ -246,9 +244,8 @@
246 244 {
247 245 long ret;
248 246  
249   - spin_unlock_irq(&qdio->req_q_lock);
250   - ret = wait_event_interruptible_timeout(qdio->req_q_wq,
251   - zfcp_qdio_sbal_check(qdio), 5 * HZ);
  247 + ret = wait_event_interruptible_lock_irq_timeout(qdio->req_q_wq,
  248 + zfcp_qdio_sbal_check(qdio), qdio->req_q_lock, 5 * HZ);
252 249  
253 250 if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
254 251 return -EIO;
... ... @@ -262,7 +259,6 @@
262 259 zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1");
263 260 }
264 261  
265   - spin_lock_irq(&qdio->req_q_lock);
266 262 return -EIO;
267 263 }
268 264  
drivers/s390/scsi/zfcp_sysfs.c
... ... @@ -27,6 +27,16 @@
27 27 static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \
28 28 zfcp_sysfs_##_feat##_##_name##_show, NULL);
29 29  
  30 +#define ZFCP_DEFINE_ATTR_CONST(_feat, _name, _format, _value) \
  31 +static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev, \
  32 + struct device_attribute *at,\
  33 + char *buf) \
  34 +{ \
  35 + return sprintf(buf, _format, _value); \
  36 +} \
  37 +static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \
  38 + zfcp_sysfs_##_feat##_##_name##_show, NULL);
  39 +
30 40 #define ZFCP_DEFINE_A_ATTR(_name, _format, _value) \
31 41 static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, \
32 42 struct device_attribute *at,\
... ... @@ -75,6 +85,8 @@
75 85 ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_denied, "%d\n",
76 86 (zfcp_unit_sdev_status(unit) &
77 87 ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0);
  88 +ZFCP_DEFINE_ATTR_CONST(unit, access_shared, "%d\n", 0);
  89 +ZFCP_DEFINE_ATTR_CONST(unit, access_readonly, "%d\n", 0);
78 90  
79 91 static ssize_t zfcp_sysfs_port_failed_show(struct device *dev,
80 92 struct device_attribute *attr,
... ... @@ -347,6 +359,8 @@
347 359 &dev_attr_unit_in_recovery.attr,
348 360 &dev_attr_unit_status.attr,
349 361 &dev_attr_unit_access_denied.attr,
  362 + &dev_attr_unit_access_shared.attr,
  363 + &dev_attr_unit_access_readonly.attr,
350 364 NULL
351 365 };
352 366 static struct attribute_group zfcp_unit_attr_group = {
drivers/scsi/Kconfig
... ... @@ -1353,7 +1353,6 @@
1353 1353 tristate "Emulex LightPulse Fibre Channel Support"
1354 1354 depends on PCI && SCSI
1355 1355 select SCSI_FC_ATTRS
1356   - select GENERIC_CSUM
1357 1356 select CRC_T10DIF
1358 1357 help
1359 1358 This lpfc driver supports the Emulex LightPulse
... ... @@ -1045,12 +1045,22 @@
1045 1045 int bio_uncopy_user(struct bio *bio)
1046 1046 {
1047 1047 struct bio_map_data *bmd = bio->bi_private;
1048   - int ret = 0;
  1048 + struct bio_vec *bvec;
  1049 + int ret = 0, i;
1049 1050  
1050   - if (!bio_flagged(bio, BIO_NULL_MAPPED))
1051   - ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs,
1052   - bmd->nr_sgvecs, bio_data_dir(bio) == READ,
1053   - 0, bmd->is_our_pages);
  1051 + if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
  1052 + /*
  1053 + * if we're in a workqueue, the request is orphaned, so
  1054 + * don't copy into a random user address space, just free.
  1055 + */
  1056 + if (current->mm)
  1057 + ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs,
  1058 + bmd->nr_sgvecs, bio_data_dir(bio) == READ,
  1059 + 0, bmd->is_our_pages);
  1060 + else if (bmd->is_our_pages)
  1061 + bio_for_each_segment_all(bvec, bio, i)
  1062 + __free_page(bvec->bv_page);
  1063 + }
1054 1064 bio_free_map_data(bmd);
1055 1065 bio_put(bio);
1056 1066 return ret;
include/linux/wait.h
... ... @@ -811,6 +811,63 @@
811 811 __ret; \
812 812 })
813 813  
  814 +#define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
  815 + lock, ret) \
  816 +do { \
  817 + DEFINE_WAIT(__wait); \
  818 + \
  819 + for (;;) { \
  820 + prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
  821 + if (condition) \
  822 + break; \
  823 + if (signal_pending(current)) { \
  824 + ret = -ERESTARTSYS; \
  825 + break; \
  826 + } \
  827 + spin_unlock_irq(&lock); \
  828 + ret = schedule_timeout(ret); \
  829 + spin_lock_irq(&lock); \
  830 + if (!ret) \
  831 + break; \
  832 + } \
  833 + finish_wait(&wq, &__wait); \
  834 +} while (0)
  835 +
  836 +/**
  837 + * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets true or a timeout elapses.
  838 + * The condition is checked under the lock. This is expected
  839 + * to be called with the lock taken.
  840 + * @wq: the waitqueue to wait on
  841 + * @condition: a C expression for the event to wait for
  842 + * @lock: a locked spinlock_t, which will be released before schedule()
  843 + * and reacquired afterwards.
  844 + * @timeout: timeout, in jiffies
  845 + *
  846 + * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  847 + * @condition evaluates to true or signal is received. The @condition is
  848 + * checked each time the waitqueue @wq is woken up.
  849 + *
  850 + * wake_up() has to be called after changing any variable that could
  851 + * change the result of the wait condition.
  852 + *
  853 + * This is supposed to be called while holding the lock. The lock is
  854 + * dropped before going to sleep and is reacquired afterwards.
  855 + *
  856 + * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
  857 + * was interrupted by a signal, and the remaining jiffies otherwise
  858 + * if the condition evaluated to true before the timeout elapsed.
  859 + */
  860 +#define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
  861 + timeout) \
  862 +({ \
  863 + int __ret = timeout; \
  864 + \
  865 + if (!(condition)) \
  866 + __wait_event_interruptible_lock_irq_timeout( \
  867 + wq, condition, lock, __ret); \
  868 + __ret; \
  869 +})
  870 +
814 871  
815 872 /*
816 873 * These are the old interfaces to sleep waiting for an event.