Commit c104f1fa1ecf4ee0fc06e31b1f77630b2551be81

Authored by Linus Torvalds

Merge branch 'for-3.4/drivers' of git://git.kernel.dk/linux-block

Pull block driver bits from Jens Axboe:

 - A series of fixes for mtip32xx.  Most from Asai at Micron, but also
   one from Greg, getting rid of the dependency on PCIE_HOTPLUG.

 - A few bug fixes for xen-blkfront, and blkback.

 - A virtio-blk fix for Vivek, making resize actually work.

 - Two fixes from Stephen, making larger transfers possible on cciss.
   This is needed for tape drive support.

* 'for-3.4/drivers' of git://git.kernel.dk/linux-block:
  block: mtip32xx: remove HOTPLUG_PCI_PCIE dependancy
  mtip32xx: dump tagmap on failure
  mtip32xx: fix handling of commands in various scenarios
  mtip32xx: Shorten macro names
  mtip32xx: misc changes
  mtip32xx: Add new sysfs entry 'status'
  mtip32xx: make setting comp_time as common
  mtip32xx: Add new bitwise flag 'dd_flag'
  mtip32xx: fix error handling in mtip_init()
  virtio-blk: Call revalidate_disk() upon online disk resize
  xen/blkback: Make optional features be really optional.
  xen/blkback: Squash the discard support for 'file' and 'phy' type.
  mtip32xx: fix incorrect value set for drv_cleanup_done, and re-initialize and start port in mtip_restart_port()
  cciss: Fix scsi tape io with more than 255 scatter gather elements
  cciss: Initialize scsi host max_sectors for tape drive support
  xen-blkfront: make blkif_io_lock spinlock per-device
  xen/blkfront: don't put bdev right after getting it
  xen-blkfront: use bitmap_set() and bitmap_clear()
  xen/blkback: Enable blkback on HVM guests
  xen/blkback: use grant-table.c hypercall wrappers

Showing 10 changed files Side-by-side Diff

Documentation/ABI/testing/sysfs-block-rssd
  1 +What: /sys/block/rssd*/registers
  2 +Date: March 2012
  3 +KernelVersion: 3.3
  4 +Contact: Asai Thambi S P <asamymuthupa@micron.com>
  5 +Description: This is a read-only file. Dumps below driver information and
  6 + hardware registers.
  7 + - S ACTive
  8 + - Command Issue
  9 + - Allocated
  10 + - Completed
  11 + - PORT IRQ STAT
  12 + - HOST IRQ STAT
  13 +
  14 +What: /sys/block/rssd*/status
  15 +Date: April 2012
  16 +KernelVersion: 3.4
  17 +Contact: Asai Thambi S P <asamymuthupa@micron.com>
  18 +Description: This is a read-only file. Indicates the status of the device.
drivers/block/cciss_scsi.c
... ... @@ -866,6 +866,7 @@
866 866 sh->can_queue = cciss_tape_cmds;
867 867 sh->sg_tablesize = h->maxsgentries;
868 868 sh->max_cmd_len = MAX_COMMAND_SIZE;
  869 + sh->max_sectors = h->cciss_max_sectors;
869 870  
870 871 ((struct cciss_scsi_adapter_data_t *)
871 872 h->scsi_ctlr)->scsi_host = sh;
... ... @@ -1410,7 +1411,7 @@
1410 1411 /* track how many SG entries we are using */
1411 1412 if (request_nsgs > h->maxSG)
1412 1413 h->maxSG = request_nsgs;
1413   - c->Header.SGTotal = (__u8) request_nsgs + chained;
  1414 + c->Header.SGTotal = (u16) request_nsgs + chained;
1414 1415 if (request_nsgs > h->max_cmd_sgentries)
1415 1416 c->Header.SGList = h->max_cmd_sgentries;
1416 1417 else
drivers/block/mtip32xx/Kconfig
... ... @@ -4,7 +4,7 @@
4 4  
5 5 config BLK_DEV_PCIESSD_MTIP32XX
6 6 tristate "Block Device Driver for Micron PCIe SSDs"
7   - depends on HOTPLUG_PCI_PCIE
  7 + depends on PCI
8 8 help
9 9 This enables the block driver for Micron PCIe SSDs.
drivers/block/mtip32xx/mtip32xx.c
Changes suppressed. Click to show
... ... @@ -36,6 +36,7 @@
36 36 #include <linux/idr.h>
37 37 #include <linux/kthread.h>
38 38 #include <../drivers/ata/ahci.h>
  39 +#include <linux/export.h>
39 40 #include "mtip32xx.h"
40 41  
41 42 #define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
... ... @@ -44,6 +45,7 @@
44 45 #define HW_PORT_PRIV_DMA_SZ \
45 46 (HW_CMD_SLOT_SZ + HW_CMD_TBL_AR_SZ + AHCI_RX_FIS_SZ)
46 47  
  48 +#define HOST_CAP_NZDMA (1 << 19)
47 49 #define HOST_HSORG 0xFC
48 50 #define HSORG_DISABLE_SLOTGRP_INTR (1<<24)
49 51 #define HSORG_DISABLE_SLOTGRP_PXIS (1<<16)
50 52  
... ... @@ -139,7 +141,13 @@
139 141 int group = 0, commandslot = 0, commandindex = 0;
140 142 struct mtip_cmd *command;
141 143 struct mtip_port *port = dd->port;
  144 + static int in_progress;
142 145  
  146 + if (in_progress)
  147 + return;
  148 +
  149 + in_progress = 1;
  150 +
143 151 for (group = 0; group < 4; group++) {
144 152 for (commandslot = 0; commandslot < 32; commandslot++) {
145 153 if (!(port->allocated[group] & (1 << commandslot)))
... ... @@ -165,7 +173,8 @@
165 173  
166 174 up(&port->cmd_slot);
167 175  
168   - atomic_set(&dd->drv_cleanup_done, true);
  176 + set_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag);
  177 + in_progress = 0;
169 178 }
170 179  
171 180 /*
... ... @@ -262,6 +271,9 @@
262 271 && time_before(jiffies, timeout))
263 272 mdelay(1);
264 273  
  274 + if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))
  275 + return -1;
  276 +
265 277 if (readl(dd->mmio + HOST_CTL) & HOST_RESET)
266 278 return -1;
267 279  
... ... @@ -294,6 +306,10 @@
294 306 port->cmd_issue[MTIP_TAG_INDEX(tag)]);
295 307  
296 308 spin_unlock_irqrestore(&port->cmd_issue_lock, flags);
  309 +
  310 + /* Set the command's timeout value.*/
  311 + port->commands[tag].comp_time = jiffies + msecs_to_jiffies(
  312 + MTIP_NCQ_COMMAND_TIMEOUT_MS);
297 313 }
298 314  
299 315 /*
300 316  
... ... @@ -420,8 +436,13 @@
420 436 writel(0xFFFFFFFF, port->completed[i]);
421 437  
422 438 /* Clear any pending interrupts for this port */
423   - writel(readl(port->mmio + PORT_IRQ_STAT), port->mmio + PORT_IRQ_STAT);
  439 + writel(readl(port->dd->mmio + PORT_IRQ_STAT),
  440 + port->dd->mmio + PORT_IRQ_STAT);
424 441  
  442 + /* Clear any pending interrupts on the HBA. */
  443 + writel(readl(port->dd->mmio + HOST_IRQ_STAT),
  444 + port->dd->mmio + HOST_IRQ_STAT);
  445 +
425 446 /* Enable port interrupts */
426 447 writel(DEF_PORT_IRQ, port->mmio + PORT_IRQ_MASK);
427 448 }
... ... @@ -447,6 +468,9 @@
447 468 && time_before(jiffies, timeout))
448 469 ;
449 470  
  471 + if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
  472 + return;
  473 +
450 474 /*
451 475 * Chip quirk: escalate to hba reset if
452 476 * PxCMD.CR not clear after 500 ms
... ... @@ -475,6 +499,9 @@
475 499 while (time_before(jiffies, timeout))
476 500 ;
477 501  
  502 + if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
  503 + return;
  504 +
478 505 /* Clear PxSCTL.DET */
479 506 writel(readl(port->mmio + PORT_SCR_CTL) & ~1,
480 507 port->mmio + PORT_SCR_CTL);
481 508  
482 509  
483 510  
... ... @@ -486,18 +513,38 @@
486 513 && time_before(jiffies, timeout))
487 514 ;
488 515  
  516 + if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
  517 + return;
  518 +
489 519 if ((readl(port->mmio + PORT_SCR_STAT) & 0x01) == 0)
490 520 dev_warn(&port->dd->pdev->dev,
491 521 "COM reset failed\n");
492 522  
493   - /* Clear SError, the PxSERR.DIAG.x should be set so clear it */
494   - writel(readl(port->mmio + PORT_SCR_ERR), port->mmio + PORT_SCR_ERR);
  523 + mtip_init_port(port);
  524 + mtip_start_port(port);
495 525  
496   - /* Enable the DMA engine */
497   - mtip_enable_engine(port, 1);
498 526 }
499 527  
500 528 /*
  529 + * Helper function for tag logging
  530 + */
  531 +static void print_tags(struct driver_data *dd,
  532 + char *msg,
  533 + unsigned long *tagbits,
  534 + int cnt)
  535 +{
  536 + unsigned char tagmap[128];
  537 + int group, tagmap_len = 0;
  538 +
  539 + memset(tagmap, 0, sizeof(tagmap));
  540 + for (group = SLOTBITS_IN_LONGS; group > 0; group--)
  541 + tagmap_len = sprintf(tagmap + tagmap_len, "%016lX ",
  542 + tagbits[group-1]);
  543 + dev_warn(&dd->pdev->dev,
  544 + "%d command(s) %s: tagmap [%s]", cnt, msg, tagmap);
  545 +}
  546 +
  547 +/*
501 548 * Called periodically to see if any read/write commands are
502 549 * taking too long to complete.
503 550 *
504 551  
505 552  
... ... @@ -514,15 +561,18 @@
514 561 int tag, cmdto_cnt = 0;
515 562 unsigned int bit, group;
516 563 unsigned int num_command_slots = port->dd->slot_groups * 32;
  564 + unsigned long to, tagaccum[SLOTBITS_IN_LONGS];
517 565  
518 566 if (unlikely(!port))
519 567 return;
520 568  
521   - if (atomic_read(&port->dd->resumeflag) == true) {
  569 + if (test_bit(MTIP_DDF_RESUME_BIT, &port->dd->dd_flag)) {
522 570 mod_timer(&port->cmd_timer,
523 571 jiffies + msecs_to_jiffies(30000));
524 572 return;
525 573 }
  574 + /* clear the tag accumulator */
  575 + memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
526 576  
527 577 for (tag = 0; tag < num_command_slots; tag++) {
528 578 /*
529 579  
... ... @@ -540,12 +590,10 @@
540 590 command = &port->commands[tag];
541 591 fis = (struct host_to_dev_fis *) command->command;
542 592  
543   - dev_warn(&port->dd->pdev->dev,
544   - "Timeout for command tag %d\n", tag);
545   -
  593 + set_bit(tag, tagaccum);
546 594 cmdto_cnt++;
547 595 if (cmdto_cnt == 1)
548   - set_bit(MTIP_FLAG_EH_ACTIVE_BIT, &port->flags);
  596 + set_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
549 597  
550 598 /*
551 599 * Clear the completed bit. This should prevent
552 600  
553 601  
... ... @@ -578,15 +626,29 @@
578 626 }
579 627 }
580 628  
581   - if (cmdto_cnt) {
582   - dev_warn(&port->dd->pdev->dev,
583   - "%d commands timed out: restarting port",
584   - cmdto_cnt);
  629 + if (cmdto_cnt && !test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
  630 + print_tags(port->dd, "timed out", tagaccum, cmdto_cnt);
  631 +
585 632 mtip_restart_port(port);
586   - clear_bit(MTIP_FLAG_EH_ACTIVE_BIT, &port->flags);
  633 + clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
587 634 wake_up_interruptible(&port->svc_wait);
588 635 }
589 636  
  637 + if (port->ic_pause_timer) {
  638 + to = port->ic_pause_timer + msecs_to_jiffies(1000);
  639 + if (time_after(jiffies, to)) {
  640 + if (!test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
  641 + port->ic_pause_timer = 0;
  642 + clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags);
  643 + clear_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags);
  644 + clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
  645 + wake_up_interruptible(&port->svc_wait);
  646 + }
  647 +
  648 +
  649 + }
  650 + }
  651 +
590 652 /* Restart the timer */
591 653 mod_timer(&port->cmd_timer,
592 654 jiffies + msecs_to_jiffies(MTIP_TIMEOUT_CHECK_PERIOD));
593 655  
594 656  
... ... @@ -681,23 +743,18 @@
681 743 complete(waiting);
682 744 }
683 745  
684   -/*
685   - * Helper function for tag logging
686   - */
687   -static void print_tags(struct driver_data *dd,
688   - char *msg,
689   - unsigned long *tagbits)
  746 +static void mtip_null_completion(struct mtip_port *port,
  747 + int tag,
  748 + void *data,
  749 + int status)
690 750 {
691   - unsigned int tag, count = 0;
692   -
693   - for (tag = 0; tag < (dd->slot_groups) * 32; tag++) {
694   - if (test_bit(tag, tagbits))
695   - count++;
696   - }
697   - if (count)
698   - dev_info(&dd->pdev->dev, "%s [%i tags]\n", msg, count);
  751 + return;
699 752 }
700 753  
  754 +static int mtip_read_log_page(struct mtip_port *port, u8 page, u16 *buffer,
  755 + dma_addr_t buffer_dma, unsigned int sectors);
  756 +static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id,
  757 + struct smart_attr *attrib);
701 758 /*
702 759 * Handle an error.
703 760 *
704 761  
705 762  
... ... @@ -708,12 +765,16 @@
708 765 */
709 766 static void mtip_handle_tfe(struct driver_data *dd)
710 767 {
711   - int group, tag, bit, reissue;
  768 + int group, tag, bit, reissue, rv;
712 769 struct mtip_port *port;
713   - struct mtip_cmd *command;
  770 + struct mtip_cmd *cmd;
714 771 u32 completed;
715 772 struct host_to_dev_fis *fis;
716 773 unsigned long tagaccum[SLOTBITS_IN_LONGS];
  774 + unsigned int cmd_cnt = 0;
  775 + unsigned char *buf;
  776 + char *fail_reason = NULL;
  777 + int fail_all_ncq_write = 0, fail_all_ncq_cmds = 0;
717 778  
718 779 dev_warn(&dd->pdev->dev, "Taskfile error\n");
719 780  
720 781  
... ... @@ -722,8 +783,11 @@
722 783 /* Stop the timer to prevent command timeouts. */
723 784 del_timer(&port->cmd_timer);
724 785  
  786 + /* clear the tag accumulator */
  787 + memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
  788 +
725 789 /* Set eh_active */
726   - set_bit(MTIP_FLAG_EH_ACTIVE_BIT, &port->flags);
  790 + set_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
727 791  
728 792 /* Loop through all the groups */
729 793 for (group = 0; group < dd->slot_groups; group++) {
... ... @@ -732,9 +796,6 @@
732 796 /* clear completed status register in the hardware.*/
733 797 writel(completed, port->completed[group]);
734 798  
735   - /* clear the tag accumulator */
736   - memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
737   -
738 799 /* Process successfully completed commands */
739 800 for (bit = 0; bit < 32 && completed; bit++) {
740 801 if (!(completed & (1<<bit)))
741 802  
742 803  
... ... @@ -745,13 +806,14 @@
745 806 if (tag == MTIP_TAG_INTERNAL)
746 807 continue;
747 808  
748   - command = &port->commands[tag];
749   - if (likely(command->comp_func)) {
  809 + cmd = &port->commands[tag];
  810 + if (likely(cmd->comp_func)) {
750 811 set_bit(tag, tagaccum);
751   - atomic_set(&port->commands[tag].active, 0);
752   - command->comp_func(port,
  812 + cmd_cnt++;
  813 + atomic_set(&cmd->active, 0);
  814 + cmd->comp_func(port,
753 815 tag,
754   - command->comp_data,
  816 + cmd->comp_data,
755 817 0);
756 818 } else {
757 819 dev_err(&port->dd->pdev->dev,
758 820  
759 821  
... ... @@ -765,12 +827,45 @@
765 827 }
766 828 }
767 829 }
768   - print_tags(dd, "TFE tags completed:", tagaccum);
769 830  
  831 + print_tags(dd, "completed (TFE)", tagaccum, cmd_cnt);
  832 +
770 833 /* Restart the port */
771 834 mdelay(20);
772 835 mtip_restart_port(port);
773 836  
  837 + /* Trying to determine the cause of the error */
  838 + rv = mtip_read_log_page(dd->port, ATA_LOG_SATA_NCQ,
  839 + dd->port->log_buf,
  840 + dd->port->log_buf_dma, 1);
  841 + if (rv) {
  842 + dev_warn(&dd->pdev->dev,
  843 + "Error in READ LOG EXT (10h) command\n");
  844 + /* non-critical error, don't fail the load */
  845 + } else {
  846 + buf = (unsigned char *)dd->port->log_buf;
  847 + if (buf[259] & 0x1) {
  848 + dev_info(&dd->pdev->dev,
  849 + "Write protect bit is set.\n");
  850 + set_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag);
  851 + fail_all_ncq_write = 1;
  852 + fail_reason = "write protect";
  853 + }
  854 + if (buf[288] == 0xF7) {
  855 + dev_info(&dd->pdev->dev,
  856 + "Exceeded Tmax, drive in thermal shutdown.\n");
  857 + set_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag);
  858 + fail_all_ncq_cmds = 1;
  859 + fail_reason = "thermal shutdown";
  860 + }
  861 + if (buf[288] == 0xBF) {
  862 + dev_info(&dd->pdev->dev,
  863 + "Drive indicates rebuild has failed.\n");
  864 + fail_all_ncq_cmds = 1;
  865 + fail_reason = "rebuild failed";
  866 + }
  867 + }
  868 +
774 869 /* clear the tag accumulator */
775 870 memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
776 871  
777 872  
778 873  
779 874  
780 875  
781 876  
... ... @@ -779,32 +874,47 @@
779 874 for (bit = 0; bit < 32; bit++) {
780 875 reissue = 1;
781 876 tag = (group << 5) + bit;
  877 + cmd = &port->commands[tag];
782 878  
783 879 /* If the active bit is set re-issue the command */
784   - if (atomic_read(&port->commands[tag].active) == 0)
  880 + if (atomic_read(&cmd->active) == 0)
785 881 continue;
786 882  
787   - fis = (struct host_to_dev_fis *)
788   - port->commands[tag].command;
  883 + fis = (struct host_to_dev_fis *)cmd->command;
789 884  
790 885 /* Should re-issue? */
791 886 if (tag == MTIP_TAG_INTERNAL ||
792 887 fis->command == ATA_CMD_SET_FEATURES)
793 888 reissue = 0;
  889 + else {
  890 + if (fail_all_ncq_cmds ||
  891 + (fail_all_ncq_write &&
  892 + fis->command == ATA_CMD_FPDMA_WRITE)) {
  893 + dev_warn(&dd->pdev->dev,
  894 + " Fail: %s w/tag %d [%s].\n",
  895 + fis->command == ATA_CMD_FPDMA_WRITE ?
  896 + "write" : "read",
  897 + tag,
  898 + fail_reason != NULL ?
  899 + fail_reason : "unknown");
  900 + atomic_set(&cmd->active, 0);
  901 + if (cmd->comp_func) {
  902 + cmd->comp_func(port, tag,
  903 + cmd->comp_data,
  904 + -ENODATA);
  905 + }
  906 + continue;
  907 + }
  908 + }
794 909  
795 910 /*
796 911 * First check if this command has
797 912 * exceeded its retries.
798 913 */
799   - if (reissue &&
800   - (port->commands[tag].retries-- > 0)) {
  914 + if (reissue && (cmd->retries-- > 0)) {
801 915  
802 916 set_bit(tag, tagaccum);
803 917  
804   - /* Update the timeout value. */
805   - port->commands[tag].comp_time =
806   - jiffies + msecs_to_jiffies(
807   - MTIP_NCQ_COMMAND_TIMEOUT_MS);
808 918 /* Re-issue the command. */
809 919 mtip_issue_ncq_command(port, tag);
810 920  
811 921  
812 922  
... ... @@ -814,13 +924,13 @@
814 924 /* Retire a command that will not be reissued */
815 925 dev_warn(&port->dd->pdev->dev,
816 926 "retiring tag %d\n", tag);
817   - atomic_set(&port->commands[tag].active, 0);
  927 + atomic_set(&cmd->active, 0);
818 928  
819   - if (port->commands[tag].comp_func)
820   - port->commands[tag].comp_func(
  929 + if (cmd->comp_func)
  930 + cmd->comp_func(
821 931 port,
822 932 tag,
823   - port->commands[tag].comp_data,
  933 + cmd->comp_data,
824 934 PORT_IRQ_TF_ERR);
825 935 else
826 936 dev_warn(&port->dd->pdev->dev,
827 937  
... ... @@ -828,10 +938,10 @@
828 938 tag);
829 939 }
830 940 }
831   - print_tags(dd, "TFE tags reissued:", tagaccum);
  941 + print_tags(dd, "reissued (TFE)", tagaccum, cmd_cnt);
832 942  
833 943 /* clear eh_active */
834   - clear_bit(MTIP_FLAG_EH_ACTIVE_BIT, &port->flags);
  944 + clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
835 945 wake_up_interruptible(&port->svc_wait);
836 946  
837 947 mod_timer(&port->cmd_timer,
... ... @@ -899,7 +1009,7 @@
899 1009 struct mtip_port *port = dd->port;
900 1010 struct mtip_cmd *cmd = &port->commands[MTIP_TAG_INTERNAL];
901 1011  
902   - if (test_bit(MTIP_FLAG_IC_ACTIVE_BIT, &port->flags) &&
  1012 + if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags) &&
903 1013 (cmd != NULL) && !(readl(port->cmd_issue[MTIP_TAG_INTERNAL])
904 1014 & (1 << MTIP_TAG_INTERNAL))) {
905 1015 if (cmd->comp_func) {
... ... @@ -911,8 +1021,6 @@
911 1021 }
912 1022 }
913 1023  
914   - dev_warn(&dd->pdev->dev, "IRQ status 0x%x ignored.\n", port_stat);
915   -
916 1024 return;
917 1025 }
918 1026  
... ... @@ -968,6 +1076,9 @@
968 1076 /* don't proceed further */
969 1077 return IRQ_HANDLED;
970 1078 }
  1079 + if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
  1080 + &dd->dd_flag))
  1081 + return rv;
971 1082  
972 1083 mtip_process_errors(dd, port_stat & PORT_IRQ_ERR);
973 1084 }
... ... @@ -1015,6 +1126,39 @@
1015 1126 port->cmd_issue[MTIP_TAG_INDEX(tag)]);
1016 1127 }
1017 1128  
  1129 +static bool mtip_pause_ncq(struct mtip_port *port,
  1130 + struct host_to_dev_fis *fis)
  1131 +{
  1132 + struct host_to_dev_fis *reply;
  1133 + unsigned long task_file_data;
  1134 +
  1135 + reply = port->rxfis + RX_FIS_D2H_REG;
  1136 + task_file_data = readl(port->mmio+PORT_TFDATA);
  1137 +
  1138 + if ((task_file_data & 1) || (fis->command == ATA_CMD_SEC_ERASE_UNIT))
  1139 + return false;
  1140 +
  1141 + if (fis->command == ATA_CMD_SEC_ERASE_PREP) {
  1142 + set_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags);
  1143 + port->ic_pause_timer = jiffies;
  1144 + return true;
  1145 + } else if ((fis->command == ATA_CMD_DOWNLOAD_MICRO) &&
  1146 + (fis->features == 0x03)) {
  1147 + set_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags);
  1148 + port->ic_pause_timer = jiffies;
  1149 + return true;
  1150 + } else if ((fis->command == ATA_CMD_SEC_ERASE_UNIT) ||
  1151 + ((fis->command == 0xFC) &&
  1152 + (fis->features == 0x27 || fis->features == 0x72 ||
  1153 + fis->features == 0x62 || fis->features == 0x26))) {
  1154 + /* Com reset after secure erase or lowlevel format */
  1155 + mtip_restart_port(port);
  1156 + return false;
  1157 + }
  1158 +
  1159 + return false;
  1160 +}
  1161 +
1018 1162 /*
1019 1163 * Wait for port to quiesce
1020 1164 *
1021 1165  
... ... @@ -1033,11 +1177,13 @@
1033 1177  
1034 1178 to = jiffies + msecs_to_jiffies(timeout);
1035 1179 do {
1036   - if (test_bit(MTIP_FLAG_SVC_THD_ACTIVE_BIT, &port->flags) &&
1037   - test_bit(MTIP_FLAG_ISSUE_CMDS_BIT, &port->flags)) {
  1180 + if (test_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags) &&
  1181 + test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) {
1038 1182 msleep(20);
1039 1183 continue; /* svc thd is actively issuing commands */
1040 1184 }
  1185 + if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
  1186 + return -EFAULT;
1041 1187 /*
1042 1188 * Ignore s_active bit 0 of array element 0.
1043 1189 * This bit will always be set
... ... @@ -1074,7 +1220,7 @@
1074 1220 * -EAGAIN Time out waiting for command to complete.
1075 1221 */
1076 1222 static int mtip_exec_internal_command(struct mtip_port *port,
1077   - void *fis,
  1223 + struct host_to_dev_fis *fis,
1078 1224 int fis_len,
1079 1225 dma_addr_t buffer,
1080 1226 int buf_len,
1081 1227  
... ... @@ -1084,8 +1230,9 @@
1084 1230 {
1085 1231 struct mtip_cmd_sg *command_sg;
1086 1232 DECLARE_COMPLETION_ONSTACK(wait);
1087   - int rv = 0;
  1233 + int rv = 0, ready2go = 1;
1088 1234 struct mtip_cmd *int_cmd = &port->commands[MTIP_TAG_INTERNAL];
  1235 + unsigned long to;
1089 1236  
1090 1237 /* Make sure the buffer is 8 byte aligned. This is asic specific. */
1091 1238 if (buffer & 0x00000007) {
1092 1239  
1093 1240  
1094 1241  
1095 1242  
... ... @@ -1094,23 +1241,38 @@
1094 1241 return -EFAULT;
1095 1242 }
1096 1243  
1097   - /* Only one internal command should be running at a time */
1098   - if (test_and_set_bit(MTIP_TAG_INTERNAL, port->allocated)) {
  1244 + to = jiffies + msecs_to_jiffies(timeout);
  1245 + do {
  1246 + ready2go = !test_and_set_bit(MTIP_TAG_INTERNAL,
  1247 + port->allocated);
  1248 + if (ready2go)
  1249 + break;
  1250 + mdelay(100);
  1251 + } while (time_before(jiffies, to));
  1252 + if (!ready2go) {
1099 1253 dev_warn(&port->dd->pdev->dev,
1100   - "Internal command already active\n");
  1254 + "Internal cmd active. new cmd [%02X]\n", fis->command);
1101 1255 return -EBUSY;
1102 1256 }
1103   - set_bit(MTIP_FLAG_IC_ACTIVE_BIT, &port->flags);
  1257 + set_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
  1258 + port->ic_pause_timer = 0;
1104 1259  
  1260 + if (fis->command == ATA_CMD_SEC_ERASE_UNIT)
  1261 + clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags);
  1262 + else if (fis->command == ATA_CMD_DOWNLOAD_MICRO)
  1263 + clear_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags);
  1264 +
1105 1265 if (atomic == GFP_KERNEL) {
1106   - /* wait for io to complete if non atomic */
1107   - if (mtip_quiesce_io(port, 5000) < 0) {
1108   - dev_warn(&port->dd->pdev->dev,
1109   - "Failed to quiesce IO\n");
1110   - release_slot(port, MTIP_TAG_INTERNAL);
1111   - clear_bit(MTIP_FLAG_IC_ACTIVE_BIT, &port->flags);
1112   - wake_up_interruptible(&port->svc_wait);
1113   - return -EBUSY;
  1266 + if (fis->command != ATA_CMD_STANDBYNOW1) {
  1267 + /* wait for io to complete if non atomic */
  1268 + if (mtip_quiesce_io(port, 5000) < 0) {
  1269 + dev_warn(&port->dd->pdev->dev,
  1270 + "Failed to quiesce IO\n");
  1271 + release_slot(port, MTIP_TAG_INTERNAL);
  1272 + clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
  1273 + wake_up_interruptible(&port->svc_wait);
  1274 + return -EBUSY;
  1275 + }
1114 1276 }
1115 1277  
1116 1278 /* Set the completion function and data for the command. */
... ... @@ -1120,7 +1282,7 @@
1120 1282 } else {
1121 1283 /* Clear completion - we're going to poll */
1122 1284 int_cmd->comp_data = NULL;
1123   - int_cmd->comp_func = NULL;
  1285 + int_cmd->comp_func = mtip_null_completion;
1124 1286 }
1125 1287  
1126 1288 /* Copy the command to the command table */
... ... @@ -1159,6 +1321,12 @@
1159 1321 "Internal command did not complete [%d] "
1160 1322 "within timeout of %lu ms\n",
1161 1323 atomic, timeout);
  1324 + if (mtip_check_surprise_removal(port->dd->pdev) ||
  1325 + test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
  1326 + &port->dd->dd_flag)) {
  1327 + rv = -ENXIO;
  1328 + goto exec_ic_exit;
  1329 + }
1162 1330 rv = -EAGAIN;
1163 1331 }
1164 1332  
1165 1333  
1166 1334  
1167 1335  
1168 1336  
1169 1337  
1170 1338  
... ... @@ -1166,31 +1334,59 @@
1166 1334 & (1 << MTIP_TAG_INTERNAL)) {
1167 1335 dev_warn(&port->dd->pdev->dev,
1168 1336 "Retiring internal command but CI is 1.\n");
  1337 + if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
  1338 + &port->dd->dd_flag)) {
  1339 + hba_reset_nosleep(port->dd);
  1340 + rv = -ENXIO;
  1341 + } else {
  1342 + mtip_restart_port(port);
  1343 + rv = -EAGAIN;
  1344 + }
  1345 + goto exec_ic_exit;
1169 1346 }
1170 1347  
1171 1348 } else {
1172 1349 /* Spin for <timeout> checking if command still outstanding */
1173 1350 timeout = jiffies + msecs_to_jiffies(timeout);
  1351 + while ((readl(port->cmd_issue[MTIP_TAG_INTERNAL])
  1352 + & (1 << MTIP_TAG_INTERNAL))
  1353 + && time_before(jiffies, timeout)) {
  1354 + if (mtip_check_surprise_removal(port->dd->pdev)) {
  1355 + rv = -ENXIO;
  1356 + goto exec_ic_exit;
  1357 + }
  1358 + if ((fis->command != ATA_CMD_STANDBYNOW1) &&
  1359 + test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
  1360 + &port->dd->dd_flag)) {
  1361 + rv = -ENXIO;
  1362 + goto exec_ic_exit;
  1363 + }
  1364 + }
1174 1365  
1175   - while ((readl(
1176   - port->cmd_issue[MTIP_TAG_INTERNAL])
1177   - & (1 << MTIP_TAG_INTERNAL))
1178   - && time_before(jiffies, timeout))
1179   - ;
1180   -
1181 1366 if (readl(port->cmd_issue[MTIP_TAG_INTERNAL])
1182 1367 & (1 << MTIP_TAG_INTERNAL)) {
1183 1368 dev_err(&port->dd->pdev->dev,
1184   - "Internal command did not complete [%d]\n",
1185   - atomic);
  1369 + "Internal command did not complete [atomic]\n");
1186 1370 rv = -EAGAIN;
  1371 + if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
  1372 + &port->dd->dd_flag)) {
  1373 + hba_reset_nosleep(port->dd);
  1374 + rv = -ENXIO;
  1375 + } else {
  1376 + mtip_restart_port(port);
  1377 + rv = -EAGAIN;
  1378 + }
1187 1379 }
1188 1380 }
1189   -
  1381 +exec_ic_exit:
1190 1382 /* Clear the allocated and active bits for the internal command. */
1191 1383 atomic_set(&int_cmd->active, 0);
1192 1384 release_slot(port, MTIP_TAG_INTERNAL);
1193   - clear_bit(MTIP_FLAG_IC_ACTIVE_BIT, &port->flags);
  1385 + if (rv >= 0 && mtip_pause_ncq(port, fis)) {
  1386 + /* NCQ paused */
  1387 + return rv;
  1388 + }
  1389 + clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
1194 1390 wake_up_interruptible(&port->svc_wait);
1195 1391  
1196 1392 return rv;
... ... @@ -1240,6 +1436,9 @@
1240 1436 int rv = 0;
1241 1437 struct host_to_dev_fis fis;
1242 1438  
  1439 + if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
  1440 + return -EFAULT;
  1441 +
1243 1442 /* Build the FIS. */
1244 1443 memset(&fis, 0, sizeof(struct host_to_dev_fis));
1245 1444 fis.type = 0x27;
... ... @@ -1313,6 +1512,7 @@
1313 1512 {
1314 1513 int rv;
1315 1514 struct host_to_dev_fis fis;
  1515 + unsigned long start;
1316 1516  
1317 1517 /* Build the FIS. */
1318 1518 memset(&fis, 0, sizeof(struct host_to_dev_fis));
1319 1519  
1320 1520  
1321 1521  
... ... @@ -1320,20 +1520,155 @@
1320 1520 fis.opts = 1 << 7;
1321 1521 fis.command = ATA_CMD_STANDBYNOW1;
1322 1522  
1323   - /* Execute the command. Use a 15-second timeout for large drives. */
  1523 + start = jiffies;
1324 1524 rv = mtip_exec_internal_command(port,
1325 1525 &fis,
1326 1526 5,
1327 1527 0,
1328 1528 0,
1329 1529 0,
1330   - GFP_KERNEL,
  1530 + GFP_ATOMIC,
1331 1531 15000);
  1532 + dbg_printk(MTIP_DRV_NAME "Time taken to complete standby cmd: %d ms\n",
  1533 + jiffies_to_msecs(jiffies - start));
  1534 + if (rv)
  1535 + dev_warn(&port->dd->pdev->dev,
  1536 + "STANDBY IMMEDIATE command failed.\n");
1332 1537  
1333 1538 return rv;
1334 1539 }
1335 1540  
1336 1541 /*
  1542 + * Issue a READ LOG EXT command to the device.
  1543 + *
  1544 + * @port pointer to the port structure.
  1545 + * @page page number to fetch
  1546 + * @buffer pointer to buffer
  1547 + * @buffer_dma dma address corresponding to @buffer
  1548 + * @sectors page length to fetch, in sectors
  1549 + *
  1550 + * return value
  1551 + * @rv return value from mtip_exec_internal_command()
  1552 + */
  1553 +static int mtip_read_log_page(struct mtip_port *port, u8 page, u16 *buffer,
  1554 + dma_addr_t buffer_dma, unsigned int sectors)
  1555 +{
  1556 + struct host_to_dev_fis fis;
  1557 +
  1558 + memset(&fis, 0, sizeof(struct host_to_dev_fis));
  1559 + fis.type = 0x27;
  1560 + fis.opts = 1 << 7;
  1561 + fis.command = ATA_CMD_READ_LOG_EXT;
  1562 + fis.sect_count = sectors & 0xFF;
  1563 + fis.sect_cnt_ex = (sectors >> 8) & 0xFF;
  1564 + fis.lba_low = page;
  1565 + fis.lba_mid = 0;
  1566 + fis.device = ATA_DEVICE_OBS;
  1567 +
  1568 + memset(buffer, 0, sectors * ATA_SECT_SIZE);
  1569 +
  1570 + return mtip_exec_internal_command(port,
  1571 + &fis,
  1572 + 5,
  1573 + buffer_dma,
  1574 + sectors * ATA_SECT_SIZE,
  1575 + 0,
  1576 + GFP_ATOMIC,
  1577 + MTIP_INTERNAL_COMMAND_TIMEOUT_MS);
  1578 +}
  1579 +
  1580 +/*
  1581 + * Issue a SMART READ DATA command to the device.
  1582 + *
  1583 + * @port pointer to the port structure.
  1584 + * @buffer pointer to buffer
  1585 + * @buffer_dma dma address corresponding to @buffer
  1586 + *
  1587 + * return value
  1588 + * @rv return value from mtip_exec_internal_command()
  1589 + */
  1590 +static int mtip_get_smart_data(struct mtip_port *port, u8 *buffer,
  1591 + dma_addr_t buffer_dma)
  1592 +{
  1593 + struct host_to_dev_fis fis;
  1594 +
  1595 + memset(&fis, 0, sizeof(struct host_to_dev_fis));
  1596 + fis.type = 0x27;
  1597 + fis.opts = 1 << 7;
  1598 + fis.command = ATA_CMD_SMART;
  1599 + fis.features = 0xD0;
  1600 + fis.sect_count = 1;
  1601 + fis.lba_mid = 0x4F;
  1602 + fis.lba_hi = 0xC2;
  1603 + fis.device = ATA_DEVICE_OBS;
  1604 +
  1605 + return mtip_exec_internal_command(port,
  1606 + &fis,
  1607 + 5,
  1608 + buffer_dma,
  1609 + ATA_SECT_SIZE,
  1610 + 0,
  1611 + GFP_ATOMIC,
  1612 + 15000);
  1613 +}
  1614 +
  1615 +/*
  1616 + * Get the value of a smart attribute
  1617 + *
  1618 + * @port pointer to the port structure
  1619 + * @id attribute number
  1620 + * @attrib pointer to return attrib information corresponding to @id
  1621 + *
  1622 + * return value
  1623 + * -EINVAL NULL buffer passed or unsupported attribute @id.
  1624 + * -EPERM Identify data not valid, SMART not supported or not enabled
  1625 + */
  1626 +static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id,
  1627 + struct smart_attr *attrib)
  1628 +{
  1629 + int rv, i;
  1630 + struct smart_attr *pattr;
  1631 +
  1632 + if (!attrib)
  1633 + return -EINVAL;
  1634 +
  1635 + if (!port->identify_valid) {
  1636 + dev_warn(&port->dd->pdev->dev, "IDENTIFY DATA not valid\n");
  1637 + return -EPERM;
  1638 + }
  1639 + if (!(port->identify[82] & 0x1)) {
  1640 + dev_warn(&port->dd->pdev->dev, "SMART not supported\n");
  1641 + return -EPERM;
  1642 + }
  1643 + if (!(port->identify[85] & 0x1)) {
  1644 + dev_warn(&port->dd->pdev->dev, "SMART not enabled\n");
  1645 + return -EPERM;
  1646 + }
  1647 +
  1648 + memset(port->smart_buf, 0, ATA_SECT_SIZE);
  1649 + rv = mtip_get_smart_data(port, port->smart_buf, port->smart_buf_dma);
  1650 + if (rv) {
  1651 + dev_warn(&port->dd->pdev->dev, "Failed to ge SMART data\n");
  1652 + return rv;
  1653 + }
  1654 +
  1655 + pattr = (struct smart_attr *)(port->smart_buf + 2);
  1656 + for (i = 0; i < 29; i++, pattr++)
  1657 + if (pattr->attr_id == id) {
  1658 + memcpy(attrib, pattr, sizeof(struct smart_attr));
  1659 + break;
  1660 + }
  1661 +
  1662 + if (i == 29) {
  1663 + dev_warn(&port->dd->pdev->dev,
  1664 + "Query for invalid SMART attribute ID\n");
  1665 + rv = -EINVAL;
  1666 + }
  1667 +
  1668 + return rv;
  1669 +}
  1670 +
  1671 +/*
1337 1672 * Get the drive capacity.
1338 1673 *
1339 1674 * @dd Pointer to the device data structure.
... ... @@ -1504,10 +1839,7 @@
1504 1839 fis.cyl_hi = command[5];
1505 1840 fis.device = command[6] & ~0x10; /* Clear the dev bit*/
1506 1841  
1507   -
1508   - dbg_printk(MTIP_DRV_NAME "%s: User Command: cmd %x, feat %x, "
1509   - "nsect %x, sect %x, lcyl %x, "
1510   - "hcyl %x, sel %x\n",
  1842 + dbg_printk(MTIP_DRV_NAME " %s: User Command: cmd %x, feat %x, nsect %x, sect %x, lcyl %x, hcyl %x, sel %x\n",
1511 1843 __func__,
1512 1844 command[0],
1513 1845 command[1],
... ... @@ -1534,8 +1866,7 @@
1534 1866 command[4] = reply->cyl_low;
1535 1867 command[5] = reply->cyl_hi;
1536 1868  
1537   - dbg_printk(MTIP_DRV_NAME "%s: Completion Status: stat %x, "
1538   - "err %x , cyl_lo %x cyl_hi %x\n",
  1869 + dbg_printk(MTIP_DRV_NAME " %s: Completion Status: stat %x, err %x , cyl_lo %x cyl_hi %x\n",
1539 1870 __func__,
1540 1871 command[0],
1541 1872 command[1],
... ... @@ -1578,7 +1909,7 @@
1578 1909 }
1579 1910  
1580 1911 dbg_printk(MTIP_DRV_NAME
1581   - "%s: User Command: cmd %x, sect %x, "
  1912 + " %s: User Command: cmd %x, sect %x, "
1582 1913 "feat %x, sectcnt %x\n",
1583 1914 __func__,
1584 1915 command[0],
... ... @@ -1607,7 +1938,7 @@
1607 1938 command[2] = command[3];
1608 1939  
1609 1940 dbg_printk(MTIP_DRV_NAME
1610   - "%s: Completion Status: stat %x, "
  1941 + " %s: Completion Status: stat %x, "
1611 1942 "err %x, cmd %x\n",
1612 1943 __func__,
1613 1944 command[0],
1614 1945  
... ... @@ -1810,9 +2141,10 @@
1810 2141 }
1811 2142  
1812 2143 dbg_printk(MTIP_DRV_NAME
1813   - "taskfile: cmd %x, feat %x, nsect %x,"
  2144 + " %s: cmd %x, feat %x, nsect %x,"
1814 2145 " sect/lbal %x, lcyl/lbam %x, hcyl/lbah %x,"
1815 2146 " head/dev %x\n",
  2147 + __func__,
1816 2148 fis.command,
1817 2149 fis.features,
1818 2150 fis.sect_count,
... ... @@ -1823,8 +2155,8 @@
1823 2155  
1824 2156 switch (fis.command) {
1825 2157 case ATA_CMD_DOWNLOAD_MICRO:
1826   - /* Change timeout for Download Microcode to 60 seconds.*/
1827   - timeout = 60000;
  2158 + /* Change timeout for Download Microcode to 2 minutes */
  2159 + timeout = 120000;
1828 2160 break;
1829 2161 case ATA_CMD_SEC_ERASE_UNIT:
1830 2162 /* Change timeout for Security Erase Unit to 4 minutes.*/
... ... @@ -1840,8 +2172,8 @@
1840 2172 timeout = 10000;
1841 2173 break;
1842 2174 case ATA_CMD_SMART:
1843   - /* Change timeout for vendor unique command to 10 secs */
1844   - timeout = 10000;
  2175 + /* Change timeout for vendor unique command to 15 secs */
  2176 + timeout = 15000;
1845 2177 break;
1846 2178 default:
1847 2179 timeout = MTIP_IOCTL_COMMAND_TIMEOUT_MS;
1848 2180  
... ... @@ -1903,18 +2235,8 @@
1903 2235 req_task->hob_ports[1] = reply->features_ex;
1904 2236 req_task->hob_ports[2] = reply->sect_cnt_ex;
1905 2237 }
1906   -
1907   - /* Com rest after secure erase or lowlevel format */
1908   - if (((fis.command == ATA_CMD_SEC_ERASE_UNIT) ||
1909   - ((fis.command == 0xFC) &&
1910   - (fis.features == 0x27 || fis.features == 0x72 ||
1911   - fis.features == 0x62 || fis.features == 0x26))) &&
1912   - !(reply->command & 1)) {
1913   - mtip_restart_port(dd->port);
1914   - }
1915   -
1916 2238 dbg_printk(MTIP_DRV_NAME
1917   - "%s: Completion: stat %x,"
  2239 + " %s: Completion: stat %x,"
1918 2240 "err %x, sect_cnt %x, lbalo %x,"
1919 2241 "lbamid %x, lbahi %x, dev %x\n",
1920 2242 __func__,
1921 2243  
... ... @@ -2080,14 +2402,10 @@
2080 2402 struct host_to_dev_fis *fis;
2081 2403 struct mtip_port *port = dd->port;
2082 2404 struct mtip_cmd *command = &port->commands[tag];
  2405 + int dma_dir = (dir == READ) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
2083 2406  
2084 2407 /* Map the scatter list for DMA access */
2085   - if (dir == READ)
2086   - nents = dma_map_sg(&dd->pdev->dev, command->sg,
2087   - nents, DMA_FROM_DEVICE);
2088   - else
2089   - nents = dma_map_sg(&dd->pdev->dev, command->sg,
2090   - nents, DMA_TO_DEVICE);
  2408 + nents = dma_map_sg(&dd->pdev->dev, command->sg, nents, dma_dir);
2091 2409  
2092 2410 command->scatter_ents = nents;
2093 2411  
... ... @@ -2127,7 +2445,7 @@
2127 2445 */
2128 2446 command->comp_data = dd;
2129 2447 command->comp_func = mtip_async_complete;
2130   - command->direction = (dir == READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
  2448 + command->direction = dma_dir;
2131 2449  
2132 2450 /*
2133 2451 * Set the completion function and data for the command passed
2134 2452  
2135 2453  
... ... @@ -2140,19 +2458,16 @@
2140 2458 * To prevent this command from being issued
2141 2459 * if an internal command is in progress or error handling is active.
2142 2460 */
2143   - if (unlikely(test_bit(MTIP_FLAG_IC_ACTIVE_BIT, &port->flags) ||
2144   - test_bit(MTIP_FLAG_EH_ACTIVE_BIT, &port->flags))) {
  2461 + if (port->flags & MTIP_PF_PAUSE_IO) {
2145 2462 set_bit(tag, port->cmds_to_issue);
2146   - set_bit(MTIP_FLAG_ISSUE_CMDS_BIT, &port->flags);
  2463 + set_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags);
2147 2464 return;
2148 2465 }
2149 2466  
2150 2467 /* Issue the command to the hardware */
2151 2468 mtip_issue_ncq_command(port, tag);
2152 2469  
2153   - /* Set the command's timeout value.*/
2154   - port->commands[tag].comp_time = jiffies + msecs_to_jiffies(
2155   - MTIP_NCQ_COMMAND_TIMEOUT_MS);
  2470 + return;
2156 2471 }
2157 2472  
2158 2473 /*
... ... @@ -2191,6 +2506,10 @@
2191 2506 down(&dd->port->cmd_slot);
2192 2507 *tag = get_slot(dd->port);
2193 2508  
  2509 + if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))) {
  2510 + up(&dd->port->cmd_slot);
  2511 + return NULL;
  2512 + }
2194 2513 if (unlikely(*tag < 0))
2195 2514 return NULL;
2196 2515  
... ... @@ -2207,7 +2526,7 @@
2207 2526 * return value
2208 2527 * The size, in bytes, of the data copied into buf.
2209 2528 */
2210   -static ssize_t hw_show_registers(struct device *dev,
  2529 +static ssize_t mtip_hw_show_registers(struct device *dev,
2211 2530 struct device_attribute *attr,
2212 2531 char *buf)
2213 2532 {
... ... @@ -2216,7 +2535,7 @@
2216 2535 int size = 0;
2217 2536 int n;
2218 2537  
2219   - size += sprintf(&buf[size], "%s:\ns_active:\n", __func__);
  2538 + size += sprintf(&buf[size], "S ACTive:\n");
2220 2539  
2221 2540 for (n = 0; n < dd->slot_groups; n++)
2222 2541 size += sprintf(&buf[size], "0x%08x\n",
2223 2542  
2224 2543  
2225 2544  
2226 2545  
... ... @@ -2240,21 +2559,40 @@
2240 2559 group_allocated);
2241 2560 }
2242 2561  
2243   - size += sprintf(&buf[size], "completed:\n");
  2562 + size += sprintf(&buf[size], "Completed:\n");
2244 2563  
2245 2564 for (n = 0; n < dd->slot_groups; n++)
2246 2565 size += sprintf(&buf[size], "0x%08x\n",
2247 2566 readl(dd->port->completed[n]));
2248 2567  
2249   - size += sprintf(&buf[size], "PORT_IRQ_STAT 0x%08x\n",
  2568 + size += sprintf(&buf[size], "PORT IRQ STAT : 0x%08x\n",
2250 2569 readl(dd->port->mmio + PORT_IRQ_STAT));
2251   - size += sprintf(&buf[size], "HOST_IRQ_STAT 0x%08x\n",
  2570 + size += sprintf(&buf[size], "HOST IRQ STAT : 0x%08x\n",
2252 2571 readl(dd->mmio + HOST_IRQ_STAT));
2253 2572  
2254 2573 return size;
2255 2574 }
2256   -static DEVICE_ATTR(registers, S_IRUGO, hw_show_registers, NULL);
2257 2575  
  2576 +static ssize_t mtip_hw_show_status(struct device *dev,
  2577 + struct device_attribute *attr,
  2578 + char *buf)
  2579 +{
  2580 + struct driver_data *dd = dev_to_disk(dev)->private_data;
  2581 + int size = 0;
  2582 +
  2583 + if (test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))
  2584 + size += sprintf(buf, "%s", "thermal_shutdown\n");
  2585 + else if (test_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag))
  2586 + size += sprintf(buf, "%s", "write_protect\n");
  2587 + else
  2588 + size += sprintf(buf, "%s", "online\n");
  2589 +
  2590 + return size;
  2591 +}
  2592 +
  2593 +static DEVICE_ATTR(registers, S_IRUGO, mtip_hw_show_registers, NULL);
  2594 +static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL);
  2595 +
2258 2596 /*
2259 2597 * Create the sysfs related attributes.
2260 2598 *
... ... @@ -2272,7 +2610,10 @@
2272 2610  
2273 2611 if (sysfs_create_file(kobj, &dev_attr_registers.attr))
2274 2612 dev_warn(&dd->pdev->dev,
2275   - "Error creating registers sysfs entry\n");
  2613 + "Error creating 'registers' sysfs entry\n");
  2614 + if (sysfs_create_file(kobj, &dev_attr_status.attr))
  2615 + dev_warn(&dd->pdev->dev,
  2616 + "Error creating 'status' sysfs entry\n");
2276 2617 return 0;
2277 2618 }
2278 2619  
... ... @@ -2292,6 +2633,7 @@
2292 2633 return -EINVAL;
2293 2634  
2294 2635 sysfs_remove_file(kobj, &dev_attr_registers.attr);
  2636 + sysfs_remove_file(kobj, &dev_attr_status.attr);
2295 2637  
2296 2638 return 0;
2297 2639 }
2298 2640  
... ... @@ -2384,10 +2726,12 @@
2384 2726 "FTL rebuild in progress. Polling for completion.\n");
2385 2727  
2386 2728 start = jiffies;
2387   - dd->ftlrebuildflag = 1;
2388 2729 timeout = jiffies + msecs_to_jiffies(MTIP_FTL_REBUILD_TIMEOUT_MS);
2389 2730  
2390 2731 do {
  2732 + if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
  2733 + &dd->dd_flag)))
  2734 + return -EFAULT;
2391 2735 if (mtip_check_surprise_removal(dd->pdev))
2392 2736 return -EFAULT;
2393 2737  
2394 2738  
2395 2739  
2396 2740  
... ... @@ -2408,22 +2752,17 @@
2408 2752 dev_warn(&dd->pdev->dev,
2409 2753 "FTL rebuild complete (%d secs).\n",
2410 2754 jiffies_to_msecs(jiffies - start) / 1000);
2411   - dd->ftlrebuildflag = 0;
2412 2755 mtip_block_initialize(dd);
2413   - break;
  2756 + return 0;
2414 2757 }
2415 2758 ssleep(10);
2416 2759 } while (time_before(jiffies, timeout));
2417 2760  
2418 2761 /* Check for timeout */
2419   - if (dd->ftlrebuildflag) {
2420   - dev_err(&dd->pdev->dev,
  2762 + dev_err(&dd->pdev->dev,
2421 2763 "Timed out waiting for FTL rebuild to complete (%d secs).\n",
2422 2764 jiffies_to_msecs(jiffies - start) / 1000);
2423   - return -EFAULT;
2424   - }
2425   -
2426   - return 0;
  2765 + return -EFAULT;
2427 2766 }
2428 2767  
2429 2768 /*
2430 2769  
... ... @@ -2448,14 +2787,17 @@
2448 2787 * is in progress nor error handling is active
2449 2788 */
2450 2789 wait_event_interruptible(port->svc_wait, (port->flags) &&
2451   - !test_bit(MTIP_FLAG_IC_ACTIVE_BIT, &port->flags) &&
2452   - !test_bit(MTIP_FLAG_EH_ACTIVE_BIT, &port->flags));
  2790 + !(port->flags & MTIP_PF_PAUSE_IO));
2453 2791  
2454 2792 if (kthread_should_stop())
2455 2793 break;
2456 2794  
2457   - set_bit(MTIP_FLAG_SVC_THD_ACTIVE_BIT, &port->flags);
2458   - if (test_bit(MTIP_FLAG_ISSUE_CMDS_BIT, &port->flags)) {
  2795 + if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
  2796 + &dd->dd_flag)))
  2797 + break;
  2798 +
  2799 + set_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
  2800 + if (test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) {
2459 2801 slot = 1;
2460 2802 /* used to restrict the loop to one iteration */
2461 2803 slot_start = num_cmd_slots;
2462 2804  
2463 2805  
2464 2806  
... ... @@ -2480,21 +2822,19 @@
2480 2822 /* Issue the command to the hardware */
2481 2823 mtip_issue_ncq_command(port, slot);
2482 2824  
2483   - /* Set the command's timeout value.*/
2484   - port->commands[slot].comp_time = jiffies +
2485   - msecs_to_jiffies(MTIP_NCQ_COMMAND_TIMEOUT_MS);
2486   -
2487 2825 clear_bit(slot, port->cmds_to_issue);
2488 2826 }
2489 2827  
2490   - clear_bit(MTIP_FLAG_ISSUE_CMDS_BIT, &port->flags);
2491   - } else if (test_bit(MTIP_FLAG_REBUILD_BIT, &port->flags)) {
2492   - mtip_ftl_rebuild_poll(dd);
2493   - clear_bit(MTIP_FLAG_REBUILD_BIT, &port->flags);
  2828 + clear_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags);
  2829 + } else if (test_bit(MTIP_PF_REBUILD_BIT, &port->flags)) {
  2830 + if (!mtip_ftl_rebuild_poll(dd))
  2831 + set_bit(MTIP_DDF_REBUILD_FAILED_BIT,
  2832 + &dd->dd_flag);
  2833 + clear_bit(MTIP_PF_REBUILD_BIT, &port->flags);
2494 2834 }
2495   - clear_bit(MTIP_FLAG_SVC_THD_ACTIVE_BIT, &port->flags);
  2835 + clear_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
2496 2836  
2497   - if (test_bit(MTIP_FLAG_SVC_THD_SHOULD_STOP_BIT, &port->flags))
  2837 + if (test_bit(MTIP_PF_SVC_THD_STOP_BIT, &port->flags))
2498 2838 break;
2499 2839 }
2500 2840 return 0;
... ... @@ -2513,6 +2853,9 @@
2513 2853 int i;
2514 2854 int rv;
2515 2855 unsigned int num_command_slots;
  2856 + unsigned long timeout, timetaken;
  2857 + unsigned char *buf;
  2858 + struct smart_attr attr242;
2516 2859  
2517 2860 dd->mmio = pcim_iomap_table(dd->pdev)[MTIP_ABAR];
2518 2861  
... ... @@ -2547,7 +2890,7 @@
2547 2890 /* Allocate memory for the command list. */
2548 2891 dd->port->command_list =
2549 2892 dmam_alloc_coherent(&dd->pdev->dev,
2550   - HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 2),
  2893 + HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 4),
2551 2894 &dd->port->command_list_dma,
2552 2895 GFP_KERNEL);
2553 2896 if (!dd->port->command_list) {
... ... @@ -2560,7 +2903,7 @@
2560 2903 /* Clear the memory we have allocated. */
2561 2904 memset(dd->port->command_list,
2562 2905 0,
2563   - HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 2));
  2906 + HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 4));
2564 2907  
2565 2908 /* Setup the addresse of the RX FIS. */
2566 2909 dd->port->rxfis = dd->port->command_list + HW_CMD_SLOT_SZ;
2567 2910  
... ... @@ -2576,10 +2919,19 @@
2576 2919 dd->port->identify_dma = dd->port->command_tbl_dma +
2577 2920 HW_CMD_TBL_AR_SZ;
2578 2921  
2579   - /* Setup the address of the sector buffer. */
  2922 + /* Setup the address of the sector buffer - for some non-ncq cmds */
2580 2923 dd->port->sector_buffer = (void *) dd->port->identify + ATA_SECT_SIZE;
2581 2924 dd->port->sector_buffer_dma = dd->port->identify_dma + ATA_SECT_SIZE;
2582 2925  
  2926 + /* Setup the address of the log buf - for read log command */
  2927 + dd->port->log_buf = (void *)dd->port->sector_buffer + ATA_SECT_SIZE;
  2928 + dd->port->log_buf_dma = dd->port->sector_buffer_dma + ATA_SECT_SIZE;
  2929 +
  2930 + /* Setup the address of the smart buf - for smart read data command */
  2931 + dd->port->smart_buf = (void *)dd->port->log_buf + ATA_SECT_SIZE;
  2932 + dd->port->smart_buf_dma = dd->port->log_buf_dma + ATA_SECT_SIZE;
  2933 +
  2934 +
2583 2935 /* Point the command headers at the command tables. */
2584 2936 for (i = 0; i < num_command_slots; i++) {
2585 2937 dd->port->commands[i].command_header =
2586 2938  
... ... @@ -2623,14 +2975,43 @@
2623 2975 dd->port->mmio + i*0x80 + PORT_SDBV;
2624 2976 }
2625 2977  
2626   - /* Reset the HBA. */
2627   - if (mtip_hba_reset(dd) < 0) {
2628   - dev_err(&dd->pdev->dev,
2629   - "Card did not reset within timeout\n");
2630   - rv = -EIO;
  2978 + timetaken = jiffies;
  2979 + timeout = jiffies + msecs_to_jiffies(30000);
  2980 + while (((readl(dd->port->mmio + PORT_SCR_STAT) & 0x0F) != 0x03) &&
  2981 + time_before(jiffies, timeout)) {
  2982 + mdelay(100);
  2983 + }
  2984 + if (unlikely(mtip_check_surprise_removal(dd->pdev))) {
  2985 + timetaken = jiffies - timetaken;
  2986 + dev_warn(&dd->pdev->dev,
  2987 + "Surprise removal detected at %u ms\n",
  2988 + jiffies_to_msecs(timetaken));
  2989 + rv = -ENODEV;
  2990 + goto out2 ;
  2991 + }
  2992 + if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))) {
  2993 + timetaken = jiffies - timetaken;
  2994 + dev_warn(&dd->pdev->dev,
  2995 + "Removal detected at %u ms\n",
  2996 + jiffies_to_msecs(timetaken));
  2997 + rv = -EFAULT;
2631 2998 goto out2;
2632 2999 }
2633 3000  
  3001 + /* Conditionally reset the HBA. */
  3002 + if (!(readl(dd->mmio + HOST_CAP) & HOST_CAP_NZDMA)) {
  3003 + if (mtip_hba_reset(dd) < 0) {
  3004 + dev_err(&dd->pdev->dev,
  3005 + "Card did not reset within timeout\n");
  3006 + rv = -EIO;
  3007 + goto out2;
  3008 + }
  3009 + } else {
  3010 + /* Clear any pending interrupts on the HBA */
  3011 + writel(readl(dd->mmio + HOST_IRQ_STAT),
  3012 + dd->mmio + HOST_IRQ_STAT);
  3013 + }
  3014 +
2634 3015 mtip_init_port(dd->port);
2635 3016 mtip_start_port(dd->port);
2636 3017  
... ... @@ -2660,6 +3041,12 @@
2660 3041 mod_timer(&dd->port->cmd_timer,
2661 3042 jiffies + msecs_to_jiffies(MTIP_TIMEOUT_CHECK_PERIOD));
2662 3043  
  3044 +
  3045 + if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) {
  3046 + rv = -EFAULT;
  3047 + goto out3;
  3048 + }
  3049 +
2663 3050 if (mtip_get_identify(dd->port, NULL) < 0) {
2664 3051 rv = -EFAULT;
2665 3052 goto out3;
2666 3053  
... ... @@ -2667,10 +3054,47 @@
2667 3054  
2668 3055 if (*(dd->port->identify + MTIP_FTL_REBUILD_OFFSET) ==
2669 3056 MTIP_FTL_REBUILD_MAGIC) {
2670   - set_bit(MTIP_FLAG_REBUILD_BIT, &dd->port->flags);
  3057 + set_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags);
2671 3058 return MTIP_FTL_REBUILD_MAGIC;
2672 3059 }
2673 3060 mtip_dump_identify(dd->port);
  3061 +
  3062 + /* check write protect, over temp and rebuild statuses */
  3063 + rv = mtip_read_log_page(dd->port, ATA_LOG_SATA_NCQ,
  3064 + dd->port->log_buf,
  3065 + dd->port->log_buf_dma, 1);
  3066 + if (rv) {
  3067 + dev_warn(&dd->pdev->dev,
  3068 + "Error in READ LOG EXT (10h) command\n");
  3069 + /* non-critical error, don't fail the load */
  3070 + } else {
  3071 + buf = (unsigned char *)dd->port->log_buf;
  3072 + if (buf[259] & 0x1) {
  3073 + dev_info(&dd->pdev->dev,
  3074 + "Write protect bit is set.\n");
  3075 + set_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag);
  3076 + }
  3077 + if (buf[288] == 0xF7) {
  3078 + dev_info(&dd->pdev->dev,
  3079 + "Exceeded Tmax, drive in thermal shutdown.\n");
  3080 + set_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag);
  3081 + }
  3082 + if (buf[288] == 0xBF) {
  3083 + dev_info(&dd->pdev->dev,
  3084 + "Drive indicates rebuild has failed.\n");
  3085 + /* TODO */
  3086 + }
  3087 + }
  3088 +
  3089 + /* get write protect progess */
  3090 + memset(&attr242, 0, sizeof(struct smart_attr));
  3091 + if (mtip_get_smart_attr(dd->port, 242, &attr242))
  3092 + dev_warn(&dd->pdev->dev,
  3093 + "Unable to check write protect progress\n");
  3094 + else
  3095 + dev_info(&dd->pdev->dev,
  3096 + "Write protect progress: %d%% (%d blocks)\n",
  3097 + attr242.cur, attr242.data);
2674 3098 return rv;
2675 3099  
2676 3100 out3:
... ... @@ -2688,7 +3112,7 @@
2688 3112  
2689 3113 /* Free the command/command header memory. */
2690 3114 dmam_free_coherent(&dd->pdev->dev,
2691   - HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 2),
  3115 + HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 4),
2692 3116 dd->port->command_list,
2693 3117 dd->port->command_list_dma);
2694 3118 out1:
2695 3119  
... ... @@ -2712,9 +3136,12 @@
2712 3136 * Send standby immediate (E0h) to the drive so that it
2713 3137 * saves its state.
2714 3138 */
2715   - if (atomic_read(&dd->drv_cleanup_done) != true) {
  3139 + if (!test_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag)) {
2716 3140  
2717   - mtip_standby_immediate(dd->port);
  3141 + if (!test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags))
  3142 + if (mtip_standby_immediate(dd->port))
  3143 + dev_warn(&dd->pdev->dev,
  3144 + "STANDBY IMMEDIATE failed\n");
2718 3145  
2719 3146 /* de-initialize the port. */
2720 3147 mtip_deinit_port(dd->port);
... ... @@ -2734,7 +3161,7 @@
2734 3161  
2735 3162 /* Free the command/command header memory. */
2736 3163 dmam_free_coherent(&dd->pdev->dev,
2737   - HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 2),
  3164 + HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 4),
2738 3165 dd->port->command_list,
2739 3166 dd->port->command_list_dma);
2740 3167 /* Free the memory allocated for the for structure. */
... ... @@ -2892,6 +3319,9 @@
2892 3319 if (!dd)
2893 3320 return -ENOTTY;
2894 3321  
  3322 + if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)))
  3323 + return -ENOTTY;
  3324 +
2895 3325 switch (cmd) {
2896 3326 case BLKFLSBUF:
2897 3327 return -ENOTTY;
... ... @@ -2927,6 +3357,9 @@
2927 3357 if (!dd)
2928 3358 return -ENOTTY;
2929 3359  
  3360 + if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)))
  3361 + return -ENOTTY;
  3362 +
2930 3363 switch (cmd) {
2931 3364 case BLKFLSBUF:
2932 3365 return -ENOTTY;
... ... @@ -3049,6 +3482,24 @@
3049 3482 int nents = 0;
3050 3483 int tag = 0;
3051 3484  
  3485 + if (unlikely(dd->dd_flag & MTIP_DDF_STOP_IO)) {
  3486 + if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
  3487 + &dd->dd_flag))) {
  3488 + bio_endio(bio, -ENXIO);
  3489 + return;
  3490 + }
  3491 + if (unlikely(test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))) {
  3492 + bio_endio(bio, -ENODATA);
  3493 + return;
  3494 + }
  3495 + if (unlikely(test_bit(MTIP_DDF_WRITE_PROTECT_BIT,
  3496 + &dd->dd_flag) &&
  3497 + bio_data_dir(bio))) {
  3498 + bio_endio(bio, -ENODATA);
  3499 + return;
  3500 + }
  3501 + }
  3502 +
3052 3503 if (unlikely(!bio_has_data(bio))) {
3053 3504 blk_queue_flush(queue, 0);
3054 3505 bio_endio(bio, 0);
... ... @@ -3061,7 +3512,7 @@
3061 3512  
3062 3513 if (unlikely((bio)->bi_vcnt > MTIP_MAX_SG)) {
3063 3514 dev_warn(&dd->pdev->dev,
3064   - "Maximum number of SGL entries exceeded");
  3515 + "Maximum number of SGL entries exceeded\n");
3065 3516 bio_io_error(bio);
3066 3517 mtip_hw_release_scatterlist(dd, tag);
3067 3518 return;
3068 3519  
... ... @@ -3210,8 +3661,10 @@
3210 3661 kobject_put(kobj);
3211 3662 }
3212 3663  
3213   - if (dd->mtip_svc_handler)
  3664 + if (dd->mtip_svc_handler) {
  3665 + set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag);
3214 3666 return rv; /* service thread created for handling rebuild */
  3667 + }
3215 3668  
3216 3669 start_service_thread:
3217 3670 sprintf(thd_name, "mtip_svc_thd_%02d", index);
3218 3671  
... ... @@ -3220,12 +3673,15 @@
3220 3673 dd, thd_name);
3221 3674  
3222 3675 if (IS_ERR(dd->mtip_svc_handler)) {
3223   - printk(KERN_ERR "mtip32xx: service thread failed to start\n");
  3676 + dev_err(&dd->pdev->dev, "service thread failed to start\n");
3224 3677 dd->mtip_svc_handler = NULL;
3225 3678 rv = -EFAULT;
3226 3679 goto kthread_run_error;
3227 3680 }
3228 3681  
  3682 + if (wait_for_rebuild == MTIP_FTL_REBUILD_MAGIC)
  3683 + rv = wait_for_rebuild;
  3684 +
3229 3685 return rv;
3230 3686  
3231 3687 kthread_run_error:
3232 3688  
... ... @@ -3266,16 +3722,18 @@
3266 3722 struct kobject *kobj;
3267 3723  
3268 3724 if (dd->mtip_svc_handler) {
3269   - set_bit(MTIP_FLAG_SVC_THD_SHOULD_STOP_BIT, &dd->port->flags);
  3725 + set_bit(MTIP_PF_SVC_THD_STOP_BIT, &dd->port->flags);
3270 3726 wake_up_interruptible(&dd->port->svc_wait);
3271 3727 kthread_stop(dd->mtip_svc_handler);
3272 3728 }
3273 3729  
3274   - /* Clean up the sysfs attributes managed by the protocol layer. */
3275   - kobj = kobject_get(&disk_to_dev(dd->disk)->kobj);
3276   - if (kobj) {
3277   - mtip_hw_sysfs_exit(dd, kobj);
3278   - kobject_put(kobj);
  3730 + /* Clean up the sysfs attributes, if created */
  3731 + if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag)) {
  3732 + kobj = kobject_get(&disk_to_dev(dd->disk)->kobj);
  3733 + if (kobj) {
  3734 + mtip_hw_sysfs_exit(dd, kobj);
  3735 + kobject_put(kobj);
  3736 + }
3279 3737 }
3280 3738  
3281 3739 /*
... ... @@ -3283,6 +3741,11 @@
3283 3741 * from /dev
3284 3742 */
3285 3743 del_gendisk(dd->disk);
  3744 +
  3745 + spin_lock(&rssd_index_lock);
  3746 + ida_remove(&rssd_index_ida, dd->index);
  3747 + spin_unlock(&rssd_index_lock);
  3748 +
3286 3749 blk_cleanup_queue(dd->queue);
3287 3750 dd->disk = NULL;
3288 3751 dd->queue = NULL;
... ... @@ -3312,6 +3775,11 @@
3312 3775  
3313 3776 /* Delete our gendisk structure, and cleanup the blk queue. */
3314 3777 del_gendisk(dd->disk);
  3778 +
  3779 + spin_lock(&rssd_index_lock);
  3780 + ida_remove(&rssd_index_ida, dd->index);
  3781 + spin_unlock(&rssd_index_lock);
  3782 +
3315 3783 blk_cleanup_queue(dd->queue);
3316 3784 dd->disk = NULL;
3317 3785 dd->queue = NULL;
... ... @@ -3359,11 +3827,6 @@
3359 3827 return -ENOMEM;
3360 3828 }
3361 3829  
3362   - /* Set the atomic variable as 1 in case of SRSI */
3363   - atomic_set(&dd->drv_cleanup_done, true);
3364   -
3365   - atomic_set(&dd->resumeflag, false);
3366   -
3367 3830 /* Attach the private data to this PCI device. */
3368 3831 pci_set_drvdata(pdev, dd);
3369 3832  
... ... @@ -3420,7 +3883,8 @@
3420 3883 * instance number.
3421 3884 */
3422 3885 instance++;
3423   -
  3886 + if (rv != MTIP_FTL_REBUILD_MAGIC)
  3887 + set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag);
3424 3888 goto done;
3425 3889  
3426 3890 block_initialize_err:
... ... @@ -3434,9 +3898,6 @@
3434 3898 pci_set_drvdata(pdev, NULL);
3435 3899 return rv;
3436 3900 done:
3437   - /* Set the atomic variable as 0 in case of SRSI */
3438   - atomic_set(&dd->drv_cleanup_done, true);
3439   -
3440 3901 return rv;
3441 3902 }
3442 3903  
3443 3904  
... ... @@ -3452,8 +3913,10 @@
3452 3913 struct driver_data *dd = pci_get_drvdata(pdev);
3453 3914 int counter = 0;
3454 3915  
  3916 + set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
  3917 +
3455 3918 if (mtip_check_surprise_removal(pdev)) {
3456   - while (atomic_read(&dd->drv_cleanup_done) == false) {
  3919 + while (!test_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag)) {
3457 3920 counter++;
3458 3921 msleep(20);
3459 3922 if (counter == 10) {
... ... @@ -3463,8 +3926,6 @@
3463 3926 }
3464 3927 }
3465 3928 }
3466   - /* Set the atomic variable as 1 in case of SRSI */
3467   - atomic_set(&dd->drv_cleanup_done, true);
3468 3929  
3469 3930 /* Clean up the block layer. */
3470 3931 mtip_block_remove(dd);
... ... @@ -3493,7 +3954,7 @@
3493 3954 return -EFAULT;
3494 3955 }
3495 3956  
3496   - atomic_set(&dd->resumeflag, true);
  3957 + set_bit(MTIP_DDF_RESUME_BIT, &dd->dd_flag);
3497 3958  
3498 3959 /* Disable ports & interrupts then send standby immediate */
3499 3960 rv = mtip_block_suspend(dd);
... ... @@ -3559,7 +4020,7 @@
3559 4020 dev_err(&pdev->dev, "Unable to resume\n");
3560 4021  
3561 4022 err:
3562   - atomic_set(&dd->resumeflag, false);
  4023 + clear_bit(MTIP_DDF_RESUME_BIT, &dd->dd_flag);
3563 4024  
3564 4025 return rv;
3565 4026 }
3566 4027  
3567 4028  
3568 4029  
3569 4030  
... ... @@ -3608,18 +4069,25 @@
3608 4069 */
3609 4070 static int __init mtip_init(void)
3610 4071 {
  4072 + int error;
  4073 +
3611 4074 printk(KERN_INFO MTIP_DRV_NAME " Version " MTIP_DRV_VERSION "\n");
3612 4075  
3613 4076 /* Allocate a major block device number to use with this driver. */
3614   - mtip_major = register_blkdev(0, MTIP_DRV_NAME);
3615   - if (mtip_major < 0) {
  4077 + error = register_blkdev(0, MTIP_DRV_NAME);
  4078 + if (error <= 0) {
3616 4079 printk(KERN_ERR "Unable to register block device (%d)\n",
3617   - mtip_major);
  4080 + error);
3618 4081 return -EBUSY;
3619 4082 }
  4083 + mtip_major = error;
3620 4084  
3621 4085 /* Register our PCI operations. */
3622   - return pci_register_driver(&mtip_pci_driver);
  4086 + error = pci_register_driver(&mtip_pci_driver);
  4087 + if (error)
  4088 + unregister_blkdev(mtip_major, MTIP_DRV_NAME);
  4089 +
  4090 + return error;
3623 4091 }
3624 4092  
3625 4093 /*
drivers/block/mtip32xx/mtip32xx.h
... ... @@ -34,8 +34,8 @@
34 34 /* offset of Device Control register in PCIe extended capabilites space */
35 35 #define PCIE_CONFIG_EXT_DEVICE_CONTROL_OFFSET 0x48
36 36  
37   -/* # of times to retry timed out IOs */
38   -#define MTIP_MAX_RETRIES 5
  37 +/* # of times to retry timed out/failed IOs */
  38 +#define MTIP_MAX_RETRIES 2
39 39  
40 40 /* Various timeout values in ms */
41 41 #define MTIP_NCQ_COMMAND_TIMEOUT_MS 5000
42 42  
... ... @@ -114,13 +114,42 @@
114 114 #define __force_bit2int (unsigned int __force)
115 115  
116 116 /* below are bit numbers in 'flags' defined in mtip_port */
117   -#define MTIP_FLAG_IC_ACTIVE_BIT 0
118   -#define MTIP_FLAG_EH_ACTIVE_BIT 1
119   -#define MTIP_FLAG_SVC_THD_ACTIVE_BIT 2
120   -#define MTIP_FLAG_ISSUE_CMDS_BIT 4
121   -#define MTIP_FLAG_REBUILD_BIT 5
122   -#define MTIP_FLAG_SVC_THD_SHOULD_STOP_BIT 8
  117 +#define MTIP_PF_IC_ACTIVE_BIT 0 /* pio/ioctl */
  118 +#define MTIP_PF_EH_ACTIVE_BIT 1 /* error handling */
  119 +#define MTIP_PF_SE_ACTIVE_BIT 2 /* secure erase */
  120 +#define MTIP_PF_DM_ACTIVE_BIT 3 /* download microcde */
  121 +#define MTIP_PF_PAUSE_IO ((1 << MTIP_PF_IC_ACTIVE_BIT) | \
  122 + (1 << MTIP_PF_EH_ACTIVE_BIT) | \
  123 + (1 << MTIP_PF_SE_ACTIVE_BIT) | \
  124 + (1 << MTIP_PF_DM_ACTIVE_BIT))
123 125  
  126 +#define MTIP_PF_SVC_THD_ACTIVE_BIT 4
  127 +#define MTIP_PF_ISSUE_CMDS_BIT 5
  128 +#define MTIP_PF_REBUILD_BIT 6
  129 +#define MTIP_PF_SVC_THD_STOP_BIT 8
  130 +
  131 +/* below are bit numbers in 'dd_flag' defined in driver_data */
  132 +#define MTIP_DDF_REMOVE_PENDING_BIT 1
  133 +#define MTIP_DDF_OVER_TEMP_BIT 2
  134 +#define MTIP_DDF_WRITE_PROTECT_BIT 3
  135 +#define MTIP_DDF_STOP_IO ((1 << MTIP_DDF_REMOVE_PENDING_BIT) | \
  136 + (1 << MTIP_DDF_OVER_TEMP_BIT) | \
  137 + (1 << MTIP_DDF_WRITE_PROTECT_BIT))
  138 +
  139 +#define MTIP_DDF_CLEANUP_BIT 5
  140 +#define MTIP_DDF_RESUME_BIT 6
  141 +#define MTIP_DDF_INIT_DONE_BIT 7
  142 +#define MTIP_DDF_REBUILD_FAILED_BIT 8
  143 +
  144 +__packed struct smart_attr{
  145 + u8 attr_id;
  146 + u16 flags;
  147 + u8 cur;
  148 + u8 worst;
  149 + u32 data;
  150 + u8 res[3];
  151 +};
  152 +
124 153 /* Register Frame Information Structure (FIS), host to device. */
125 154 struct host_to_dev_fis {
126 155 /*
... ... @@ -345,6 +374,12 @@
345 374 * when the command slot and all associated data structures
346 375 * are no longer needed.
347 376 */
  377 + u16 *log_buf;
  378 + dma_addr_t log_buf_dma;
  379 +
  380 + u8 *smart_buf;
  381 + dma_addr_t smart_buf_dma;
  382 +
348 383 unsigned long allocated[SLOTBITS_IN_LONGS];
349 384 /*
350 385 * used to queue commands when an internal command is in progress
... ... @@ -368,6 +403,7 @@
368 403 * Timer used to complete commands that have been active for too long.
369 404 */
370 405 struct timer_list cmd_timer;
  406 + unsigned long ic_pause_timer;
371 407 /*
372 408 * Semaphore used to block threads if there are no
373 409 * command slots available.
374 410  
... ... @@ -404,13 +440,9 @@
404 440  
405 441 unsigned slot_groups; /* number of slot groups the product supports */
406 442  
407   - atomic_t drv_cleanup_done; /* Atomic variable for SRSI */
408   -
409 443 unsigned long index; /* Index to determine the disk name */
410 444  
411   - unsigned int ftlrebuildflag; /* FTL rebuild flag */
412   -
413   - atomic_t resumeflag; /* Atomic variable to track suspend/resume */
  445 + unsigned long dd_flag; /* NOTE: use atomic bit operations on this */
414 446  
415 447 struct task_struct *mtip_svc_handler; /* task_struct of svc thd */
416 448 };
drivers/block/virtio_blk.c
... ... @@ -351,6 +351,7 @@
351 351 cap_str_10, cap_str_2);
352 352  
353 353 set_capacity(vblk->disk, capacity);
  354 + revalidate_disk(vblk->disk);
354 355 done:
355 356 mutex_unlock(&vblk->config_lock);
356 357 }
drivers/block/xen-blkback/blkback.c
... ... @@ -321,6 +321,7 @@
321 321 static void xen_blkbk_unmap(struct pending_req *req)
322 322 {
323 323 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
  324 + struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
324 325 unsigned int i, invcount = 0;
325 326 grant_handle_t handle;
326 327 int ret;
327 328  
328 329  
... ... @@ -332,25 +333,12 @@
332 333 gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i),
333 334 GNTMAP_host_map, handle);
334 335 pending_handle(req, i) = BLKBACK_INVALID_HANDLE;
  336 + pages[invcount] = virt_to_page(vaddr(req, i));
335 337 invcount++;
336 338 }
337 339  
338   - ret = HYPERVISOR_grant_table_op(
339   - GNTTABOP_unmap_grant_ref, unmap, invcount);
  340 + ret = gnttab_unmap_refs(unmap, pages, invcount, false);
340 341 BUG_ON(ret);
341   - /*
342   - * Note, we use invcount, so nr->pages, so we can't index
343   - * using vaddr(req, i).
344   - */
345   - for (i = 0; i < invcount; i++) {
346   - ret = m2p_remove_override(
347   - virt_to_page(unmap[i].host_addr), false);
348   - if (ret) {
349   - pr_alert(DRV_PFX "Failed to remove M2P override for %lx\n",
350   - (unsigned long)unmap[i].host_addr);
351   - continue;
352   - }
353   - }
354 342 }
355 343  
356 344 static int xen_blkbk_map(struct blkif_request *req,
... ... @@ -378,7 +366,7 @@
378 366 pending_req->blkif->domid);
379 367 }
380 368  
381   - ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nseg);
  369 + ret = gnttab_map_refs(map, NULL, &blkbk->pending_page(pending_req, 0), nseg);
382 370 BUG_ON(ret);
383 371  
384 372 /*
... ... @@ -398,15 +386,6 @@
398 386 if (ret)
399 387 continue;
400 388  
401   - ret = m2p_add_override(PFN_DOWN(map[i].dev_bus_addr),
402   - blkbk->pending_page(pending_req, i), NULL);
403   - if (ret) {
404   - pr_alert(DRV_PFX "Failed to install M2P override for %lx (ret: %d)\n",
405   - (unsigned long)map[i].dev_bus_addr, ret);
406   - /* We could switch over to GNTTABOP_copy */
407   - continue;
408   - }
409   -
410 389 seg[i].buf = map[i].dev_bus_addr |
411 390 (req->u.rw.seg[i].first_sect << 9);
412 391 }
413 392  
414 393  
... ... @@ -419,22 +398,19 @@
419 398 int err = 0;
420 399 int status = BLKIF_RSP_OKAY;
421 400 struct block_device *bdev = blkif->vbd.bdev;
  401 + unsigned long secure;
422 402  
423 403 blkif->st_ds_req++;
424 404  
425 405 xen_blkif_get(blkif);
426   - if (blkif->blk_backend_type == BLKIF_BACKEND_PHY ||
427   - blkif->blk_backend_type == BLKIF_BACKEND_FILE) {
428   - unsigned long secure = (blkif->vbd.discard_secure &&
429   - (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
430   - BLKDEV_DISCARD_SECURE : 0;
431   - err = blkdev_issue_discard(bdev,
432   - req->u.discard.sector_number,
433   - req->u.discard.nr_sectors,
434   - GFP_KERNEL, secure);
435   - } else
436   - err = -EOPNOTSUPP;
  406 + secure = (blkif->vbd.discard_secure &&
  407 + (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
  408 + BLKDEV_DISCARD_SECURE : 0;
437 409  
  410 + err = blkdev_issue_discard(bdev, req->u.discard.sector_number,
  411 + req->u.discard.nr_sectors,
  412 + GFP_KERNEL, secure);
  413 +
438 414 if (err == -EOPNOTSUPP) {
439 415 pr_debug(DRV_PFX "discard op failed, not supported\n");
440 416 status = BLKIF_RSP_EOPNOTSUPP;
... ... @@ -830,7 +806,7 @@
830 806 int i, mmap_pages;
831 807 int rc = 0;
832 808  
833   - if (!xen_pv_domain())
  809 + if (!xen_domain())
834 810 return -ENODEV;
835 811  
836 812 blkbk = kzalloc(sizeof(struct xen_blkbk), GFP_KERNEL);
drivers/block/xen-blkback/common.h
... ... @@ -146,11 +146,6 @@
146 146 BLKIF_PROTOCOL_X86_64 = 3,
147 147 };
148 148  
149   -enum blkif_backend_type {
150   - BLKIF_BACKEND_PHY = 1,
151   - BLKIF_BACKEND_FILE = 2,
152   -};
153   -
154 149 struct xen_vbd {
155 150 /* What the domain refers to this vbd as. */
156 151 blkif_vdev_t handle;
... ... @@ -177,7 +172,6 @@
177 172 unsigned int irq;
178 173 /* Comms information. */
179 174 enum blkif_protocol blk_protocol;
180   - enum blkif_backend_type blk_backend_type;
181 175 union blkif_back_rings blk_rings;
182 176 void *blk_ring;
183 177 /* The VBD attached to this interface. */
drivers/block/xen-blkback/xenbus.c
... ... @@ -381,72 +381,49 @@
381 381 err = xenbus_printf(xbt, dev->nodename, "feature-flush-cache",
382 382 "%d", state);
383 383 if (err)
384   - xenbus_dev_fatal(dev, err, "writing feature-flush-cache");
  384 + dev_warn(&dev->dev, "writing feature-flush-cache (%d)", err);
385 385  
386 386 return err;
387 387 }
388 388  
389   -int xen_blkbk_discard(struct xenbus_transaction xbt, struct backend_info *be)
  389 +static void xen_blkbk_discard(struct xenbus_transaction xbt, struct backend_info *be)
390 390 {
391 391 struct xenbus_device *dev = be->dev;
392 392 struct xen_blkif *blkif = be->blkif;
393   - char *type;
394 393 int err;
395 394 int state = 0;
  395 + struct block_device *bdev = be->blkif->vbd.bdev;
  396 + struct request_queue *q = bdev_get_queue(bdev);
396 397  
397   - type = xenbus_read(XBT_NIL, dev->nodename, "type", NULL);
398   - if (!IS_ERR(type)) {
399   - if (strncmp(type, "file", 4) == 0) {
400   - state = 1;
401   - blkif->blk_backend_type = BLKIF_BACKEND_FILE;
  398 + if (blk_queue_discard(q)) {
  399 + err = xenbus_printf(xbt, dev->nodename,
  400 + "discard-granularity", "%u",
  401 + q->limits.discard_granularity);
  402 + if (err) {
  403 + dev_warn(&dev->dev, "writing discard-granularity (%d)", err);
  404 + return;
402 405 }
403   - if (strncmp(type, "phy", 3) == 0) {
404   - struct block_device *bdev = be->blkif->vbd.bdev;
405   - struct request_queue *q = bdev_get_queue(bdev);
406   - if (blk_queue_discard(q)) {
407   - err = xenbus_printf(xbt, dev->nodename,
408   - "discard-granularity", "%u",
409   - q->limits.discard_granularity);
410   - if (err) {
411   - xenbus_dev_fatal(dev, err,
412   - "writing discard-granularity");
413   - goto kfree;
414   - }
415   - err = xenbus_printf(xbt, dev->nodename,
416   - "discard-alignment", "%u",
417   - q->limits.discard_alignment);
418   - if (err) {
419   - xenbus_dev_fatal(dev, err,
420   - "writing discard-alignment");
421   - goto kfree;
422   - }
423   - state = 1;
424   - blkif->blk_backend_type = BLKIF_BACKEND_PHY;
425   - }
426   - /* Optional. */
427   - err = xenbus_printf(xbt, dev->nodename,
428   - "discard-secure", "%d",
429   - blkif->vbd.discard_secure);
430   - if (err) {
431   - xenbus_dev_fatal(dev, err,
432   - "writting discard-secure");
433   - goto kfree;
434   - }
  406 + err = xenbus_printf(xbt, dev->nodename,
  407 + "discard-alignment", "%u",
  408 + q->limits.discard_alignment);
  409 + if (err) {
  410 + dev_warn(&dev->dev, "writing discard-alignment (%d)", err);
  411 + return;
435 412 }
436   - } else {
437   - err = PTR_ERR(type);
438   - xenbus_dev_fatal(dev, err, "reading type");
439   - goto out;
  413 + state = 1;
  414 + /* Optional. */
  415 + err = xenbus_printf(xbt, dev->nodename,
  416 + "discard-secure", "%d",
  417 + blkif->vbd.discard_secure);
  418 + if (err) {
  419 + dev_warn(dev-dev, "writing discard-secure (%d)", err);
  420 + return;
  421 + }
440 422 }
441   -
442 423 err = xenbus_printf(xbt, dev->nodename, "feature-discard",
443 424 "%d", state);
444 425 if (err)
445   - xenbus_dev_fatal(dev, err, "writing feature-discard");
446   -kfree:
447   - kfree(type);
448   -out:
449   - return err;
  426 + dev_warn(&dev->dev, "writing feature-discard (%d)", err);
450 427 }
451 428 int xen_blkbk_barrier(struct xenbus_transaction xbt,
452 429 struct backend_info *be, int state)
... ... @@ -457,7 +434,7 @@
457 434 err = xenbus_printf(xbt, dev->nodename, "feature-barrier",
458 435 "%d", state);
459 436 if (err)
460   - xenbus_dev_fatal(dev, err, "writing feature-barrier");
  437 + dev_warn(&dev->dev, "writing feature-barrier (%d)", err);
461 438  
462 439 return err;
463 440 }
464 441  
465 442  
... ... @@ -689,14 +666,12 @@
689 666 return;
690 667 }
691 668  
692   - err = xen_blkbk_flush_diskcache(xbt, be, be->blkif->vbd.flush_support);
693   - if (err)
694   - goto abort;
  669 + /* If we can't advertise it is OK. */
  670 + xen_blkbk_flush_diskcache(xbt, be, be->blkif->vbd.flush_support);
695 671  
696   - err = xen_blkbk_discard(xbt, be);
  672 + xen_blkbk_discard(xbt, be);
697 673  
698   - /* If we can't advertise it is OK. */
699   - err = xen_blkbk_barrier(xbt, be, be->blkif->vbd.flush_support);
  674 + xen_blkbk_barrier(xbt, be, be->blkif->vbd.flush_support);
700 675  
701 676 err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
702 677 (unsigned long long)vbd_sz(&be->blkif->vbd));
drivers/block/xen-blkfront.c
... ... @@ -43,6 +43,7 @@
43 43 #include <linux/slab.h>
44 44 #include <linux/mutex.h>
45 45 #include <linux/scatterlist.h>
  46 +#include <linux/bitmap.h>
46 47  
47 48 #include <xen/xen.h>
48 49 #include <xen/xenbus.h>
... ... @@ -81,6 +82,7 @@
81 82 */
82 83 struct blkfront_info
83 84 {
  85 + spinlock_t io_lock;
84 86 struct mutex mutex;
85 87 struct xenbus_device *xbdev;
86 88 struct gendisk *gd;
... ... @@ -105,8 +107,6 @@
105 107 int is_ready;
106 108 };
107 109  
108   -static DEFINE_SPINLOCK(blkif_io_lock);
109   -
110 110 static unsigned int nr_minors;
111 111 static unsigned long *minors;
112 112 static DEFINE_SPINLOCK(minor_lock);
... ... @@ -177,8 +177,7 @@
177 177  
178 178 spin_lock(&minor_lock);
179 179 if (find_next_bit(minors, end, minor) >= end) {
180   - for (; minor < end; ++minor)
181   - __set_bit(minor, minors);
  180 + bitmap_set(minors, minor, nr);
182 181 rc = 0;
183 182 } else
184 183 rc = -EBUSY;
... ... @@ -193,8 +192,7 @@
193 192  
194 193 BUG_ON(end > nr_minors);
195 194 spin_lock(&minor_lock);
196   - for (; minor < end; ++minor)
197   - __clear_bit(minor, minors);
  195 + bitmap_clear(minors, minor, nr);
198 196 spin_unlock(&minor_lock);
199 197 }
200 198  
... ... @@ -419,7 +417,7 @@
419 417 struct request_queue *rq;
420 418 struct blkfront_info *info = gd->private_data;
421 419  
422   - rq = blk_init_queue(do_blkif_request, &blkif_io_lock);
  420 + rq = blk_init_queue(do_blkif_request, &info->io_lock);
423 421 if (rq == NULL)
424 422 return -1;
425 423  
426 424  
... ... @@ -636,14 +634,14 @@
636 634 if (info->rq == NULL)
637 635 return;
638 636  
639   - spin_lock_irqsave(&blkif_io_lock, flags);
  637 + spin_lock_irqsave(&info->io_lock, flags);
640 638  
641 639 /* No more blkif_request(). */
642 640 blk_stop_queue(info->rq);
643 641  
644 642 /* No more gnttab callback work. */
645 643 gnttab_cancel_free_callback(&info->callback);
646   - spin_unlock_irqrestore(&blkif_io_lock, flags);
  644 + spin_unlock_irqrestore(&info->io_lock, flags);
647 645  
648 646 /* Flush gnttab callback work. Must be done with no locks held. */
649 647 flush_work_sync(&info->work);
650 648  
651 649  
... ... @@ -675,16 +673,16 @@
675 673 {
676 674 struct blkfront_info *info = container_of(work, struct blkfront_info, work);
677 675  
678   - spin_lock_irq(&blkif_io_lock);
  676 + spin_lock_irq(&info->io_lock);
679 677 if (info->connected == BLKIF_STATE_CONNECTED)
680 678 kick_pending_request_queues(info);
681   - spin_unlock_irq(&blkif_io_lock);
  679 + spin_unlock_irq(&info->io_lock);
682 680 }
683 681  
684 682 static void blkif_free(struct blkfront_info *info, int suspend)
685 683 {
686 684 /* Prevent new requests being issued until we fix things up. */
687   - spin_lock_irq(&blkif_io_lock);
  685 + spin_lock_irq(&info->io_lock);
688 686 info->connected = suspend ?
689 687 BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED;
690 688 /* No more blkif_request(). */
... ... @@ -692,7 +690,7 @@
692 690 blk_stop_queue(info->rq);
693 691 /* No more gnttab callback work. */
694 692 gnttab_cancel_free_callback(&info->callback);
695   - spin_unlock_irq(&blkif_io_lock);
  693 + spin_unlock_irq(&info->io_lock);
696 694  
697 695 /* Flush gnttab callback work. Must be done with no locks held. */
698 696 flush_work_sync(&info->work);
699 697  
... ... @@ -728,10 +726,10 @@
728 726 struct blkfront_info *info = (struct blkfront_info *)dev_id;
729 727 int error;
730 728  
731   - spin_lock_irqsave(&blkif_io_lock, flags);
  729 + spin_lock_irqsave(&info->io_lock, flags);
732 730  
733 731 if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
734   - spin_unlock_irqrestore(&blkif_io_lock, flags);
  732 + spin_unlock_irqrestore(&info->io_lock, flags);
735 733 return IRQ_HANDLED;
736 734 }
737 735  
... ... @@ -816,7 +814,7 @@
816 814  
817 815 kick_pending_request_queues(info);
818 816  
819   - spin_unlock_irqrestore(&blkif_io_lock, flags);
  817 + spin_unlock_irqrestore(&info->io_lock, flags);
820 818  
821 819 return IRQ_HANDLED;
822 820 }
... ... @@ -991,6 +989,7 @@
991 989 }
992 990  
993 991 mutex_init(&info->mutex);
  992 + spin_lock_init(&info->io_lock);
994 993 info->xbdev = dev;
995 994 info->vdevice = vdevice;
996 995 info->connected = BLKIF_STATE_DISCONNECTED;
... ... @@ -1068,7 +1067,7 @@
1068 1067  
1069 1068 xenbus_switch_state(info->xbdev, XenbusStateConnected);
1070 1069  
1071   - spin_lock_irq(&blkif_io_lock);
  1070 + spin_lock_irq(&info->io_lock);
1072 1071  
1073 1072 /* Now safe for us to use the shared ring */
1074 1073 info->connected = BLKIF_STATE_CONNECTED;
... ... @@ -1079,7 +1078,7 @@
1079 1078 /* Kick any other new requests queued since we resumed */
1080 1079 kick_pending_request_queues(info);
1081 1080  
1082   - spin_unlock_irq(&blkif_io_lock);
  1081 + spin_unlock_irq(&info->io_lock);
1083 1082  
1084 1083 return 0;
1085 1084 }
1086 1085  
... ... @@ -1277,10 +1276,10 @@
1277 1276 xenbus_switch_state(info->xbdev, XenbusStateConnected);
1278 1277  
1279 1278 /* Kick pending requests. */
1280   - spin_lock_irq(&blkif_io_lock);
  1279 + spin_lock_irq(&info->io_lock);
1281 1280 info->connected = BLKIF_STATE_CONNECTED;
1282 1281 kick_pending_request_queues(info);
1283   - spin_unlock_irq(&blkif_io_lock);
  1282 + spin_unlock_irq(&info->io_lock);
1284 1283  
1285 1284 add_disk(info->gd);
1286 1285  
... ... @@ -1410,7 +1409,6 @@
1410 1409 mutex_lock(&blkfront_mutex);
1411 1410  
1412 1411 bdev = bdget_disk(disk, 0);
1413   - bdput(bdev);
1414 1412  
1415 1413 if (bdev->bd_openers)
1416 1414 goto out;
... ... @@ -1441,6 +1439,7 @@
1441 1439 }
1442 1440  
1443 1441 out:
  1442 + bdput(bdev);
1444 1443 mutex_unlock(&blkfront_mutex);
1445 1444 return 0;
1446 1445 }