Commit 88523a61558a040546bf7d8b079ae0755d8e7005

Authored by Sam Bradshaw
Committed by Jens Axboe
1 parent 5c8a0fbba5

block: Add driver for Micron RealSSD pcie flash cards

This adds mtip32xx, a driver supporting Microns line of
pci-express flash storage cards.

Signed-off-by: Asai Thambi S P <asamymuthupa@micron.com>
Signed-off-by: Sam Bradshaw <sbradshaw@micron.com>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>

Showing 6 changed files with 4043 additions and 0 deletions Side-by-side Diff

drivers/block/Kconfig
... ... @@ -116,6 +116,8 @@
116 116  
117 117 source "drivers/block/paride/Kconfig"
118 118  
  119 +source "drivers/block/mtip32xx/Kconfig"
  120 +
119 121 config BLK_CPQ_DA
120 122 tristate "Compaq SMART2 support"
121 123 depends on PCI && VIRT_TO_BUS
drivers/block/Makefile
... ... @@ -39,6 +39,7 @@
39 39 obj-$(CONFIG_XEN_BLKDEV_BACKEND) += xen-blkback/
40 40 obj-$(CONFIG_BLK_DEV_DRBD) += drbd/
41 41 obj-$(CONFIG_BLK_DEV_RBD) += rbd.o
  42 +obj-$(CONFIG_BLK_DEV_PCIESSD_MTIP32XX) += mtip32xx/
42 43  
43 44 swim_mod-y := swim.o swim_asm.o
drivers/block/mtip32xx/Kconfig
  1 +#
  2 +# mtip32xx device driver configuration
  3 +#
  4 +
  5 +config BLK_DEV_PCIESSD_MTIP32XX
  6 + tristate "Block Device Driver for Micron PCIe SSDs"
  7 + depends on HOTPLUG_PCI_PCIE
  8 + help
  9 + This enables the block driver for Micron PCIe SSDs.
drivers/block/mtip32xx/Makefile
  1 +#
  2 +# Makefile for Block device driver for Micron PCIe SSD
  3 +#
  4 +
  5 +obj-$(CONFIG_BLK_DEV_PCIESSD_MTIP32XX) += mtip32xx.o
drivers/block/mtip32xx/mtip32xx.c
Changes suppressed. Click to show
  1 +/*
  2 + * Driver for the Micron P320 SSD
  3 + * Copyright (C) 2011 Micron Technology, Inc.
  4 + *
  5 + * Portions of this code were derived from works subjected to the
  6 + * following copyright:
  7 + * Copyright (C) 2009 Integrated Device Technology, Inc.
  8 + *
  9 + * This program is free software; you can redistribute it and/or modify
  10 + * it under the terms of the GNU General Public License as published by
  11 + * the Free Software Foundation; either version 2 of the License, or
  12 + * (at your option) any later version.
  13 + *
  14 + * This program is distributed in the hope that it will be useful,
  15 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17 + * GNU General Public License for more details.
  18 + *
  19 + */
  20 +
  21 +#include <linux/pci.h>
  22 +#include <linux/interrupt.h>
  23 +#include <linux/ata.h>
  24 +#include <linux/delay.h>
  25 +#include <linux/hdreg.h>
  26 +#include <linux/uaccess.h>
  27 +#include <linux/random.h>
  28 +#include <linux/smp.h>
  29 +#include <linux/compat.h>
  30 +#include <linux/fs.h>
  31 +#include <linux/genhd.h>
  32 +#include <linux/blkdev.h>
  33 +#include <linux/bio.h>
  34 +#include <linux/dma-mapping.h>
  35 +#include <linux/idr.h>
  36 +#include <../drivers/ata/ahci.h>
  37 +#include "mtip32xx.h"
  38 +
  39 +#define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
  40 +#define HW_CMD_TBL_SZ (AHCI_CMD_TBL_HDR_SZ + (MTIP_MAX_SG * 16))
  41 +#define HW_CMD_TBL_AR_SZ (HW_CMD_TBL_SZ * MTIP_MAX_COMMAND_SLOTS)
  42 +#define HW_PORT_PRIV_DMA_SZ \
  43 + (HW_CMD_SLOT_SZ + HW_CMD_TBL_AR_SZ + AHCI_RX_FIS_SZ)
  44 +
  45 +#define HOST_HSORG 0xFC
  46 +#define HSORG_DISABLE_SLOTGRP_INTR (1<<24)
  47 +#define HSORG_DISABLE_SLOTGRP_PXIS (1<<16)
  48 +#define HSORG_HWREV 0xFF00
  49 +#define HSORG_STYLE 0x8
  50 +#define HSORG_SLOTGROUPS 0x7
  51 +
  52 +#define PORT_COMMAND_ISSUE 0x38
  53 +#define PORT_SDBV 0x7C
  54 +
  55 +#define PORT_OFFSET 0x100
  56 +#define PORT_MEM_SIZE 0x80
  57 +
  58 +#define PORT_IRQ_ERR \
  59 + (PORT_IRQ_HBUS_ERR | PORT_IRQ_IF_ERR | PORT_IRQ_CONNECT | \
  60 + PORT_IRQ_PHYRDY | PORT_IRQ_UNK_FIS | PORT_IRQ_BAD_PMP | \
  61 + PORT_IRQ_TF_ERR | PORT_IRQ_HBUS_DATA_ERR | PORT_IRQ_IF_NONFATAL | \
  62 + PORT_IRQ_OVERFLOW)
  63 +#define PORT_IRQ_LEGACY \
  64 + (PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS)
  65 +#define PORT_IRQ_HANDLED \
  66 + (PORT_IRQ_SDB_FIS | PORT_IRQ_LEGACY | \
  67 + PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR | \
  68 + PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)
  69 +#define DEF_PORT_IRQ \
  70 + (PORT_IRQ_ERR | PORT_IRQ_LEGACY | PORT_IRQ_SDB_FIS)
  71 +
  72 +/* product numbers */
  73 +#define MTIP_PRODUCT_UNKNOWN 0x00
  74 +#define MTIP_PRODUCT_ASICFPGA 0x11
  75 +
  76 +/* Device instance number, incremented each time a device is probed. */
  77 +static int instance;
  78 +
  79 +/*
  80 + * Global variable used to hold the major block device number
  81 + * allocated in mtip_init().
  82 + */
  83 +int mtip_major;
  84 +
  85 +static DEFINE_SPINLOCK(rssd_index_lock);
  86 +static DEFINE_IDA(rssd_index_ida);
  87 +
  88 +struct mtip_compat_ide_task_request_s {
  89 + __u8 io_ports[8];
  90 + __u8 hob_ports[8];
  91 + ide_reg_valid_t out_flags;
  92 + ide_reg_valid_t in_flags;
  93 + int data_phase;
  94 + int req_cmd;
  95 + compat_ulong_t out_size;
  96 + compat_ulong_t in_size;
  97 +};
  98 +
  99 +static int mtip_exec_internal_command(struct mtip_port *port,
  100 + void *fis,
  101 + int fisLen,
  102 + dma_addr_t buffer,
  103 + int bufLen,
  104 + u32 opts,
  105 + gfp_t atomic,
  106 + unsigned long timeout);
  107 +
  108 +/*
  109 + * Obtain an empty command slot.
  110 + *
  111 + * This function needs to be reentrant since it could be called
  112 + * at the same time on multiple CPUs. The allocation of the
  113 + * command slot must be atomic.
  114 + *
  115 + * @port Pointer to the port data structure.
  116 + *
  117 + * return value
  118 + * >= 0 Index of command slot obtained.
  119 + * -1 No command slots available.
  120 + */
  121 +static int get_slot(struct mtip_port *port)
  122 +{
  123 + int slot, i;
  124 + unsigned int num_command_slots = port->dd->slot_groups * 32;
  125 +
  126 + /*
  127 + * Try 10 times, because there is a small race here.
  128 + * that's ok, because it's still cheaper than a lock.
  129 + *
  130 + * Race: Since this section is not protected by lock, same bit
  131 + * could be chosen by different process contexts running in
  132 + * different processor. So instead of costly lock, we are going
  133 + * with loop.
  134 + */
  135 + for (i = 0; i < 10; i++) {
  136 + slot = find_next_zero_bit(port->allocated,
  137 + num_command_slots, 1);
  138 + if ((slot < num_command_slots) &&
  139 + (!test_and_set_bit(slot, port->allocated)))
  140 + return slot;
  141 + }
  142 + dev_warn(&port->dd->pdev->dev, "Failed to get a tag.\n");
  143 +
  144 + if (mtip_check_surprise_removal(port->dd->pdev)) {
  145 + /* Device not present, clean outstanding commands */
  146 + mtip_command_cleanup(port->dd);
  147 + }
  148 + return -1;
  149 +}
  150 +
  151 +/*
  152 + * Release a command slot.
  153 + *
  154 + * @port Pointer to the port data structure.
  155 + * @tag Tag of command to release
  156 + *
  157 + * return value
  158 + * None
  159 + */
  160 +static inline void release_slot(struct mtip_port *port, int tag)
  161 +{
  162 + smp_mb__before_clear_bit();
  163 + clear_bit(tag, port->allocated);
  164 + smp_mb__after_clear_bit();
  165 +}
  166 +
  167 +/*
  168 + * Issue a command to the hardware.
  169 + *
  170 + * Set the appropriate bit in the s_active and Command Issue hardware
  171 + * registers, causing hardware command processing to begin.
  172 + *
  173 + * @port Pointer to the port structure.
  174 + * @tag The tag of the command to be issued.
  175 + *
  176 + * return value
  177 + * None
  178 + */
  179 +static inline void mtip_issue_ncq_command(struct mtip_port *port, int tag)
  180 +{
  181 + unsigned long flags = 0;
  182 +
  183 + atomic_set(&port->commands[tag].active, 1);
  184 +
  185 + spin_lock_irqsave(&port->cmd_issue_lock, flags);
  186 +
  187 + writel((1 << MTIP_TAG_BIT(tag)),
  188 + port->s_active[MTIP_TAG_INDEX(tag)]);
  189 + writel((1 << MTIP_TAG_BIT(tag)),
  190 + port->cmd_issue[MTIP_TAG_INDEX(tag)]);
  191 +
  192 + spin_unlock_irqrestore(&port->cmd_issue_lock, flags);
  193 +}
  194 +
  195 +/*
  196 + * Called periodically to see if any read/write commands are
  197 + * taking too long to complete.
  198 + *
  199 + * @data Pointer to the PORT data structure.
  200 + *
  201 + * return value
  202 + * None
  203 + */
  204 +void mtip_timeout_function(unsigned long int data)
  205 +{
  206 + struct mtip_port *port = (struct mtip_port *) data;
  207 + struct host_to_dev_fis *fis;
  208 + struct mtip_cmd *command;
  209 + int tag, cmdto_cnt = 0;
  210 + unsigned int bit, group;
  211 + unsigned int num_command_slots = port->dd->slot_groups * 32;
  212 +
  213 + if (unlikely(!port))
  214 + return;
  215 +
  216 + if (atomic_read(&port->dd->resumeflag) == true) {
  217 + mod_timer(&port->cmd_timer,
  218 + jiffies + msecs_to_jiffies(30000));
  219 + return;
  220 + }
  221 +
  222 + for (tag = 0; tag < num_command_slots; tag++) {
  223 + /*
  224 + * Skip internal command slot as it has
  225 + * its own timeout mechanism
  226 + */
  227 + if (tag == MTIP_TAG_INTERNAL)
  228 + continue;
  229 +
  230 + if (atomic_read(&port->commands[tag].active) &&
  231 + (time_after(jiffies, port->commands[tag].comp_time))) {
  232 + group = tag >> 5;
  233 + bit = tag & 0x1f;
  234 +
  235 + command = &port->commands[tag];
  236 + fis = (struct host_to_dev_fis *) command->command;
  237 +
  238 + dev_warn(&port->dd->pdev->dev,
  239 + "Timeout for command tag %d\n", tag);
  240 +
  241 + cmdto_cnt++;
  242 + if (cmdto_cnt == 1)
  243 + atomic_inc(&port->dd->eh_active);
  244 +
  245 + /*
  246 + * Clear the completed bit. This should prevent
  247 + * any interrupt handlers from trying to retire
  248 + * the command.
  249 + */
  250 + writel(1 << bit, port->completed[group]);
  251 +
  252 + /* Call the async completion callback. */
  253 + if (likely(command->async_callback))
  254 + command->async_callback(command->async_data,
  255 + -EIO);
  256 + command->async_callback = NULL;
  257 + command->comp_func = NULL;
  258 +
  259 + /* Unmap the DMA scatter list entries */
  260 + dma_unmap_sg(&port->dd->pdev->dev,
  261 + command->sg,
  262 + command->scatter_ents,
  263 + command->direction);
  264 +
  265 + /*
  266 + * Clear the allocated bit and active tag for the
  267 + * command.
  268 + */
  269 + atomic_set(&port->commands[tag].active, 0);
  270 + release_slot(port, tag);
  271 +
  272 + up(&port->cmd_slot);
  273 + }
  274 + }
  275 +
  276 + if (cmdto_cnt) {
  277 + dev_warn(&port->dd->pdev->dev,
  278 + "%d commands timed out: restarting port",
  279 + cmdto_cnt);
  280 + mtip_restart_port(port);
  281 + atomic_dec(&port->dd->eh_active);
  282 + }
  283 +
  284 + /* Restart the timer */
  285 + mod_timer(&port->cmd_timer,
  286 + jiffies + msecs_to_jiffies(MTIP_TIMEOUT_CHECK_PERIOD));
  287 +}
  288 +
  289 +/*
  290 + * IO completion function.
  291 + *
  292 + * This completion function is called by the driver ISR when a
  293 + * command that was issued by the kernel completes. It first calls the
  294 + * asynchronous completion function which normally calls back into the block
  295 + * layer passing the asynchronous callback data, then unmaps the
  296 + * scatter list associated with the completed command, and finally
  297 + * clears the allocated bit associated with the completed command.
  298 + *
  299 + * @port Pointer to the port data structure.
  300 + * @tag Tag of the command.
  301 + * @data Pointer to driver_data.
  302 + * @status Completion status.
  303 + *
  304 + * return value
  305 + * None
  306 + */
  307 +static void mtip_async_complete(struct mtip_port *port,
  308 + int tag,
  309 + void *data,
  310 + int status)
  311 +{
  312 + struct mtip_cmd *command;
  313 + struct driver_data *dd = data;
  314 + int cb_status = status ? -EIO : 0;
  315 +
  316 + if (unlikely(!dd) || unlikely(!port))
  317 + return;
  318 +
  319 + command = &port->commands[tag];
  320 +
  321 + if (unlikely(status == PORT_IRQ_TF_ERR)) {
  322 + dev_warn(&port->dd->pdev->dev,
  323 + "Command tag %d failed due to TFE\n", tag);
  324 + }
  325 +
  326 + /* Upper layer callback */
  327 + if (likely(command->async_callback))
  328 + command->async_callback(command->async_data, cb_status);
  329 +
  330 + command->async_callback = NULL;
  331 + command->comp_func = NULL;
  332 +
  333 + /* Unmap the DMA scatter list entries */
  334 + dma_unmap_sg(&dd->pdev->dev,
  335 + command->sg,
  336 + command->scatter_ents,
  337 + command->direction);
  338 +
  339 + /* Clear the allocated and active bits for the command */
  340 + atomic_set(&port->commands[tag].active, 0);
  341 + release_slot(port, tag);
  342 +
  343 + up(&port->cmd_slot);
  344 +}
  345 +
  346 +/*
  347 + * Internal command completion callback function.
  348 + *
  349 + * This function is normally called by the driver ISR when an internal
  350 + * command completed. This function signals the command completion by
  351 + * calling complete().
  352 + *
  353 + * @port Pointer to the port data structure.
  354 + * @tag Tag of the command that has completed.
  355 + * @data Pointer to a completion structure.
  356 + * @status Completion status.
  357 + *
  358 + * return value
  359 + * None
  360 + */
  361 +static void mtip_completion(struct mtip_port *port,
  362 + int tag,
  363 + void *data,
  364 + int status)
  365 +{
  366 + struct mtip_cmd *command = &port->commands[tag];
  367 + struct completion *waiting = data;
  368 + if (unlikely(status == PORT_IRQ_TF_ERR))
  369 + dev_warn(&port->dd->pdev->dev,
  370 + "Internal command %d completed with TFE\n", tag);
  371 +
  372 + command->async_callback = NULL;
  373 + command->comp_func = NULL;
  374 +
  375 + complete(waiting);
  376 +}
  377 +
  378 +/*
  379 + * Enable/disable the reception of FIS
  380 + *
  381 + * @port Pointer to the port data structure
  382 + * @enable 1 to enable, 0 to disable
  383 + *
  384 + * return value
  385 + * Previous state: 1 enabled, 0 disabled
  386 + */
  387 +static int mtip_enable_fis(struct mtip_port *port, int enable)
  388 +{
  389 + u32 tmp;
  390 +
  391 + /* enable FIS reception */
  392 + tmp = readl(port->mmio + PORT_CMD);
  393 + if (enable)
  394 + writel(tmp | PORT_CMD_FIS_RX, port->mmio + PORT_CMD);
  395 + else
  396 + writel(tmp & ~PORT_CMD_FIS_RX, port->mmio + PORT_CMD);
  397 +
  398 + /* Flush */
  399 + readl(port->mmio + PORT_CMD);
  400 +
  401 + return (((tmp & PORT_CMD_FIS_RX) == PORT_CMD_FIS_RX));
  402 +}
  403 +
  404 +/*
  405 + * Enable/disable the DMA engine
  406 + *
  407 + * @port Pointer to the port data structure
  408 + * @enable 1 to enable, 0 to disable
  409 + *
  410 + * return value
  411 + * Previous state: 1 enabled, 0 disabled.
  412 + */
  413 +static int mtip_enable_engine(struct mtip_port *port, int enable)
  414 +{
  415 + u32 tmp;
  416 +
  417 + /* enable FIS reception */
  418 + tmp = readl(port->mmio + PORT_CMD);
  419 + if (enable)
  420 + writel(tmp | PORT_CMD_START, port->mmio + PORT_CMD);
  421 + else
  422 + writel(tmp & ~PORT_CMD_START, port->mmio + PORT_CMD);
  423 +
  424 + readl(port->mmio + PORT_CMD);
  425 + return (((tmp & PORT_CMD_START) == PORT_CMD_START));
  426 +}
  427 +
  428 +/*
  429 + * Enables the port DMA engine and FIS reception.
  430 + *
  431 + * return value
  432 + * None
  433 + */
  434 +static inline void mtip_start_port(struct mtip_port *port)
  435 +{
  436 + /* Enable FIS reception */
  437 + mtip_enable_fis(port, 1);
  438 +
  439 + /* Enable the DMA engine */
  440 + mtip_enable_engine(port, 1);
  441 +}
  442 +
  443 +/*
  444 + * Deinitialize a port by disabling port interrupts, the DMA engine,
  445 + * and FIS reception.
  446 + *
  447 + * @port Pointer to the port structure
  448 + *
  449 + * return value
  450 + * None
  451 + */
  452 +static inline void mtip_deinit_port(struct mtip_port *port)
  453 +{
  454 + /* Disable interrupts on this port */
  455 + writel(0, port->mmio + PORT_IRQ_MASK);
  456 +
  457 + /* Disable the DMA engine */
  458 + mtip_enable_engine(port, 0);
  459 +
  460 + /* Disable FIS reception */
  461 + mtip_enable_fis(port, 0);
  462 +}
  463 +
  464 +/*
  465 + * Initialize a port.
  466 + *
  467 + * This function deinitializes the port by calling mtip_deinit_port() and
  468 + * then initializes it by setting the command header and RX FIS addresses,
  469 + * clearing the SError register and any pending port interrupts before
  470 + * re-enabling the default set of port interrupts.
  471 + *
  472 + * @port Pointer to the port structure.
  473 + *
  474 + * return value
  475 + * None
  476 + */
  477 +static void mtip_init_port(struct mtip_port *port)
  478 +{
  479 + int i;
  480 + mtip_deinit_port(port);
  481 +
  482 + /* Program the command list base and FIS base addresses */
  483 + if (readl(port->dd->mmio + HOST_CAP) & HOST_CAP_64) {
  484 + writel((port->command_list_dma >> 16) >> 16,
  485 + port->mmio + PORT_LST_ADDR_HI);
  486 + writel((port->rxfis_dma >> 16) >> 16,
  487 + port->mmio + PORT_FIS_ADDR_HI);
  488 + }
  489 +
  490 + writel(port->command_list_dma & 0xffffffff,
  491 + port->mmio + PORT_LST_ADDR);
  492 + writel(port->rxfis_dma & 0xffffffff, port->mmio + PORT_FIS_ADDR);
  493 +
  494 + /* Clear SError */
  495 + writel(readl(port->mmio + PORT_SCR_ERR), port->mmio + PORT_SCR_ERR);
  496 +
  497 + /* reset the completed registers.*/
  498 + for (i = 0; i < port->dd->slot_groups; i++)
  499 + writel(0xFFFFFFFF, port->completed[i]);
  500 +
  501 + /* Clear any pending interrupts for this port */
  502 + writel(readl(port->mmio + PORT_IRQ_STAT), port->mmio + PORT_IRQ_STAT);
  503 +
  504 + /* Enable port interrupts */
  505 + writel(DEF_PORT_IRQ, port->mmio + PORT_IRQ_MASK);
  506 +}
  507 +
  508 +/*
  509 + * Reset the HBA (without sleeping)
  510 + *
  511 + * Just like hba_reset, except does not call sleep, so can be
  512 + * run from interrupt/tasklet context.
  513 + *
  514 + * @dd Pointer to the driver data structure.
  515 + *
  516 + * return value
  517 + * 0 The reset was successful.
  518 + * -1 The HBA Reset bit did not clear.
  519 + */
  520 +int hba_reset_nosleep(struct driver_data *dd)
  521 +{
  522 + unsigned long timeout;
  523 +
  524 + /* Chip quirk: quiesce any chip function */
  525 + mdelay(10);
  526 +
  527 + /* Set the reset bit */
  528 + writel(HOST_RESET, dd->mmio + HOST_CTL);
  529 +
  530 + /* Flush */
  531 + readl(dd->mmio + HOST_CTL);
  532 +
  533 + /*
  534 + * Wait 10ms then spin for up to 1 second
  535 + * waiting for reset acknowledgement
  536 + */
  537 + timeout = jiffies + msecs_to_jiffies(1000);
  538 + mdelay(10);
  539 + while ((readl(dd->mmio + HOST_CTL) & HOST_RESET)
  540 + && time_before(jiffies, timeout))
  541 + mdelay(1);
  542 +
  543 + if (readl(dd->mmio + HOST_CTL) & HOST_RESET)
  544 + return -1;
  545 +
  546 + return 0;
  547 +}
  548 +
  549 +/*
  550 + * Restart a port
  551 + *
  552 + * @port Pointer to the port data structure.
  553 + *
  554 + * return value
  555 + * None
  556 + */
  557 +void mtip_restart_port(struct mtip_port *port)
  558 +{
  559 + unsigned long timeout;
  560 +
  561 + /* Disable the DMA engine */
  562 + mtip_enable_engine(port, 0);
  563 +
  564 + /* Chip quirk: wait up to 500ms for PxCMD.CR == 0 */
  565 + timeout = jiffies + msecs_to_jiffies(500);
  566 + while ((readl(port->mmio + PORT_CMD) & PORT_CMD_LIST_ON)
  567 + && time_before(jiffies, timeout))
  568 + ;
  569 +
  570 + /*
  571 + * Chip quirk: escalate to hba reset if
  572 + * PxCMD.CR not clear after 500 ms
  573 + */
  574 + if (readl(port->mmio + PORT_CMD) & PORT_CMD_LIST_ON) {
  575 + dev_warn(&port->dd->pdev->dev,
  576 + "PxCMD.CR not clear, escalating reset\n");
  577 +
  578 + if (hba_reset_nosleep(port->dd))
  579 + dev_err(&port->dd->pdev->dev,
  580 + "HBA reset escalation failed.\n");
  581 +
  582 + /* 30 ms delay before com reset to quiesce chip */
  583 + mdelay(30);
  584 + }
  585 +
  586 + dev_warn(&port->dd->pdev->dev, "Issuing COM reset\n");
  587 +
  588 + /* Set PxSCTL.DET */
  589 + writel(readl(port->mmio + PORT_SCR_CTL) |
  590 + 1, port->mmio + PORT_SCR_CTL);
  591 + readl(port->mmio + PORT_SCR_CTL);
  592 +
  593 + /* Wait 1 ms to quiesce chip function */
  594 + timeout = jiffies + msecs_to_jiffies(1);
  595 + while (time_before(jiffies, timeout))
  596 + ;
  597 +
  598 + /* Clear PxSCTL.DET */
  599 + writel(readl(port->mmio + PORT_SCR_CTL) & ~1,
  600 + port->mmio + PORT_SCR_CTL);
  601 + readl(port->mmio + PORT_SCR_CTL);
  602 +
  603 + /* Wait 500 ms for bit 0 of PORT_SCR_STS to be set */
  604 + timeout = jiffies + msecs_to_jiffies(500);
  605 + while (((readl(port->mmio + PORT_SCR_STAT) & 0x01) == 0)
  606 + && time_before(jiffies, timeout))
  607 + ;
  608 +
  609 + if ((readl(port->mmio + PORT_SCR_STAT) & 0x01) == 0)
  610 + dev_warn(&port->dd->pdev->dev,
  611 + "COM reset failed\n");
  612 +
  613 + /* Clear SError, the PxSERR.DIAG.x should be set so clear it */
  614 + writel(readl(port->mmio + PORT_SCR_ERR), port->mmio + PORT_SCR_ERR);
  615 +
  616 + /* Enable the DMA engine */
  617 + mtip_enable_engine(port, 1);
  618 +}
  619 +
  620 +/*
  621 + * Helper function for tag logging
  622 + */
  623 +static void print_tags(struct driver_data *dd,
  624 + char *msg,
  625 + unsigned long *tagbits)
  626 +{
  627 + unsigned int tag, count = 0;
  628 +
  629 + for (tag = 0; tag < (dd->slot_groups) * 32; tag++) {
  630 + if (test_bit(tag, tagbits))
  631 + count++;
  632 + }
  633 + if (count)
  634 + dev_info(&dd->pdev->dev, "%s [%i tags]\n", msg, count);
  635 +}
  636 +
  637 +/*
  638 + * Handle an error.
  639 + *
  640 + * @dd Pointer to the DRIVER_DATA structure.
  641 + *
  642 + * return value
  643 + * None
  644 + */
  645 +static void mtip_handle_tfe(struct driver_data *dd)
  646 +{
  647 + int group, tag, bit, reissue;
  648 + struct mtip_port *port;
  649 + struct mtip_cmd *command;
  650 + u32 completed;
  651 + struct host_to_dev_fis *fis;
  652 + unsigned long tagaccum[SLOTBITS_IN_LONGS];
  653 +
  654 + dev_warn(&dd->pdev->dev, "Taskfile error\n");
  655 +
  656 + port = dd->port;
  657 +
  658 + /* Stop the timer to prevent command timeouts. */
  659 + del_timer(&port->cmd_timer);
  660 +
  661 + /* Set eh_active */
  662 + atomic_inc(&dd->eh_active);
  663 +
  664 + /* Loop through all the groups */
  665 + for (group = 0; group < dd->slot_groups; group++) {
  666 + completed = readl(port->completed[group]);
  667 +
  668 + /* clear completed status register in the hardware.*/
  669 + writel(completed, port->completed[group]);
  670 +
  671 + /* clear the tag accumulator */
  672 + memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
  673 +
  674 + /* Process successfully completed commands */
  675 + for (bit = 0; bit < 32 && completed; bit++) {
  676 + if (!(completed & (1<<bit)))
  677 + continue;
  678 + tag = (group << 5) + bit;
  679 +
  680 + /* Skip the internal command slot */
  681 + if (tag == MTIP_TAG_INTERNAL)
  682 + continue;
  683 +
  684 + command = &port->commands[tag];
  685 + if (likely(command->comp_func)) {
  686 + set_bit(tag, tagaccum);
  687 + atomic_set(&port->commands[tag].active, 0);
  688 + command->comp_func(port,
  689 + tag,
  690 + command->comp_data,
  691 + 0);
  692 + } else {
  693 + dev_err(&port->dd->pdev->dev,
  694 + "Missing completion func for tag %d",
  695 + tag);
  696 + if (mtip_check_surprise_removal(dd->pdev)) {
  697 + mtip_command_cleanup(dd);
  698 + /* don't proceed further */
  699 + return;
  700 + }
  701 + }
  702 + }
  703 + }
  704 + print_tags(dd, "TFE tags completed:", tagaccum);
  705 +
  706 + /* Restart the port */
  707 + mdelay(20);
  708 + mtip_restart_port(port);
  709 +
  710 + /* clear the tag accumulator */
  711 + memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
  712 +
  713 + /* Loop through all the groups */
  714 + for (group = 0; group < dd->slot_groups; group++) {
  715 + for (bit = 0; bit < 32; bit++) {
  716 + reissue = 1;
  717 + tag = (group << 5) + bit;
  718 +
  719 + /* If the active bit is set re-issue the command */
  720 + if (atomic_read(&port->commands[tag].active) == 0)
  721 + continue;
  722 +
  723 + fis = (struct host_to_dev_fis *)
  724 + port->commands[tag].command;
  725 +
  726 + /* Should re-issue? */
  727 + if (tag == MTIP_TAG_INTERNAL ||
  728 + fis->command == ATA_CMD_SET_FEATURES)
  729 + reissue = 0;
  730 +
  731 + /*
  732 + * First check if this command has
  733 + * exceeded its retries.
  734 + */
  735 + if (reissue &&
  736 + (port->commands[tag].retries-- > 0)) {
  737 +
  738 + set_bit(tag, tagaccum);
  739 +
  740 + /* Update the timeout value. */
  741 + port->commands[tag].comp_time =
  742 + jiffies + msecs_to_jiffies(
  743 + MTIP_NCQ_COMMAND_TIMEOUT_MS);
  744 + /* Re-issue the command. */
  745 + mtip_issue_ncq_command(port, tag);
  746 +
  747 + continue;
  748 + }
  749 +
  750 + /* Retire a command that will not be reissued */
  751 + dev_warn(&port->dd->pdev->dev,
  752 + "retiring tag %d\n", tag);
  753 + atomic_set(&port->commands[tag].active, 0);
  754 +
  755 + if (port->commands[tag].comp_func)
  756 + port->commands[tag].comp_func(
  757 + port,
  758 + tag,
  759 + port->commands[tag].comp_data,
  760 + PORT_IRQ_TF_ERR);
  761 + else
  762 + dev_warn(&port->dd->pdev->dev,
  763 + "Bad completion for tag %d\n",
  764 + tag);
  765 + }
  766 + }
  767 + print_tags(dd, "TFE tags reissued:", tagaccum);
  768 +
  769 + /* Decrement eh_active */
  770 + atomic_dec(&dd->eh_active);
  771 +
  772 + mod_timer(&port->cmd_timer,
  773 + jiffies + msecs_to_jiffies(MTIP_TIMEOUT_CHECK_PERIOD));
  774 +}
  775 +
  776 +/*
  777 + * Handle a set device bits interrupt
  778 + */
  779 +static inline void mtip_process_sdbf(struct driver_data *dd)
  780 +{
  781 + struct mtip_port *port = dd->port;
  782 + int group, tag, bit;
  783 + u32 completed;
  784 + struct mtip_cmd *command;
  785 +
  786 + /* walk all bits in all slot groups */
  787 + for (group = 0; group < dd->slot_groups; group++) {
  788 + completed = readl(port->completed[group]);
  789 +
  790 + /* clear completed status register in the hardware.*/
  791 + writel(completed, port->completed[group]);
  792 +
  793 + /* Process completed commands. */
  794 + for (bit = 0;
  795 + (bit < 32) && completed;
  796 + bit++, completed >>= 1) {
  797 + if (completed & 0x01) {
  798 + tag = (group << 5) | bit;
  799 +
  800 + /* skip internal command slot. */
  801 + if (unlikely(tag == MTIP_TAG_INTERNAL))
  802 + continue;
  803 +
  804 + command = &port->commands[tag];
  805 +
  806 + /* make internal callback */
  807 + if (likely(command->comp_func)) {
  808 + command->comp_func(
  809 + port,
  810 + tag,
  811 + command->comp_data,
  812 + 0);
  813 + } else {
  814 + dev_warn(&dd->pdev->dev,
  815 + "Null completion "
  816 + "for tag %d",
  817 + tag);
  818 +
  819 + if (mtip_check_surprise_removal(
  820 + dd->pdev)) {
  821 + mtip_command_cleanup(dd);
  822 + return;
  823 + }
  824 + }
  825 + }
  826 + }
  827 + }
  828 +}
  829 +
  830 +/*
  831 + * Process legacy pio and d2h interrupts
  832 + */
  833 +static inline void mtip_process_legacy(struct driver_data *dd, u32 port_stat)
  834 +{
  835 + struct mtip_port *port = dd->port;
  836 + struct mtip_cmd *cmd = &port->commands[MTIP_TAG_INTERNAL];
  837 +
  838 + if (port->internal_cmd_in_progress &&
  839 + cmd != NULL &&
  840 + !(readl(port->cmd_issue[MTIP_TAG_INTERNAL])
  841 + & (1 << MTIP_TAG_INTERNAL))) {
  842 + if (cmd->comp_func) {
  843 + cmd->comp_func(port,
  844 + MTIP_TAG_INTERNAL,
  845 + cmd->comp_data,
  846 + 0);
  847 + return;
  848 + }
  849 + }
  850 +
  851 + dev_warn(&dd->pdev->dev, "IRQ status 0x%x ignored.\n", port_stat);
  852 +
  853 + return;
  854 +}
  855 +
  856 +/*
  857 + * Demux and handle errors
  858 + */
  859 +static inline void mtip_process_errors(struct driver_data *dd, u32 port_stat)
  860 +{
  861 + if (likely(port_stat & (PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR)))
  862 + mtip_handle_tfe(dd);
  863 +
  864 + if (unlikely(port_stat & PORT_IRQ_CONNECT)) {
  865 + dev_warn(&dd->pdev->dev,
  866 + "Clearing PxSERR.DIAG.x\n");
  867 + writel((1 << 26), dd->port->mmio + PORT_SCR_ERR);
  868 + }
  869 +
  870 + if (unlikely(port_stat & PORT_IRQ_PHYRDY)) {
  871 + dev_warn(&dd->pdev->dev,
  872 + "Clearing PxSERR.DIAG.n\n");
  873 + writel((1 << 16), dd->port->mmio + PORT_SCR_ERR);
  874 + }
  875 +
  876 + if (unlikely(port_stat & ~PORT_IRQ_HANDLED)) {
  877 + dev_warn(&dd->pdev->dev,
  878 + "Port stat errors %x unhandled\n",
  879 + (port_stat & ~PORT_IRQ_HANDLED));
  880 + }
  881 +}
  882 +
  883 +static inline irqreturn_t mtip_handle_irq(struct driver_data *data)
  884 +{
  885 + struct driver_data *dd = (struct driver_data *) data;
  886 + struct mtip_port *port = dd->port;
  887 + u32 hba_stat, port_stat;
  888 + int rv = IRQ_NONE;
  889 +
  890 + hba_stat = readl(dd->mmio + HOST_IRQ_STAT);
  891 + if (hba_stat) {
  892 + rv = IRQ_HANDLED;
  893 +
  894 + /* Acknowledge the interrupt status on the port.*/
  895 + port_stat = readl(port->mmio + PORT_IRQ_STAT);
  896 + writel(port_stat, port->mmio + PORT_IRQ_STAT);
  897 +
  898 + /* Demux port status */
  899 + if (likely(port_stat & PORT_IRQ_SDB_FIS))
  900 + mtip_process_sdbf(dd);
  901 +
  902 + if (unlikely(port_stat & PORT_IRQ_ERR)) {
  903 + if (unlikely(mtip_check_surprise_removal(dd->pdev))) {
  904 + mtip_command_cleanup(dd);
  905 + /* don't proceed further */
  906 + return IRQ_HANDLED;
  907 + }
  908 +
  909 + mtip_process_errors(dd, port_stat & PORT_IRQ_ERR);
  910 + }
  911 +
  912 + if (unlikely(port_stat & PORT_IRQ_LEGACY))
  913 + mtip_process_legacy(dd, port_stat & PORT_IRQ_LEGACY);
  914 + }
  915 +
  916 + /* acknowledge interrupt */
  917 + writel(hba_stat, dd->mmio + HOST_IRQ_STAT);
  918 +
  919 + return rv;
  920 +}
  921 +
  922 +/*
  923 + * Wrapper for mtip_handle_irq
  924 + * (ignores return code)
  925 + */
  926 +static void mtip_tasklet(unsigned long data)
  927 +{
  928 + mtip_handle_irq((struct driver_data *) data);
  929 +}
  930 +
  931 +/*
  932 + * HBA interrupt subroutine.
  933 + *
  934 + * @irq IRQ number.
  935 + * @instance Pointer to the driver data structure.
  936 + *
  937 + * return value
  938 + * IRQ_HANDLED A HBA interrupt was pending and handled.
  939 + * IRQ_NONE This interrupt was not for the HBA.
  940 + */
  941 +static irqreturn_t mtip_irq_handler(int irq, void *instance)
  942 +{
  943 + struct driver_data *dd = instance;
  944 + tasklet_schedule(&dd->tasklet);
  945 + return IRQ_HANDLED;
  946 +}
  947 +
  948 +static void mtip_issue_non_ncq_command(struct mtip_port *port, int tag)
  949 +{
  950 + atomic_set(&port->commands[tag].active, 1);
  951 + writel(1 << MTIP_TAG_BIT(tag),
  952 + port->cmd_issue[MTIP_TAG_INDEX(tag)]);
  953 +}
  954 +
  955 +/*
  956 + * Wait for port to quiesce
  957 + *
  958 + * @port Pointer to port data structure
  959 + * @timeout Max duration to wait (ms)
  960 + *
  961 + * return value
  962 + * 0 Success
  963 + * -EBUSY Commands still active
  964 + */
  965 +static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout)
  966 +{
  967 + unsigned long to;
  968 + unsigned int n, active;
  969 +
  970 + to = jiffies + msecs_to_jiffies(timeout);
  971 + do {
  972 + /*
  973 + * Ignore s_active bit 0 of array element 0.
  974 + * This bit will always be set
  975 + */
  976 + active = readl(port->s_active[0]) & 0xfffffffe;
  977 + for (n = 1; n < port->dd->slot_groups; n++)
  978 + active |= readl(port->s_active[n]);
  979 +
  980 + if (!active)
  981 + break;
  982 +
  983 + msleep(20);
  984 + } while (time_before(jiffies, to));
  985 +
  986 + return active ? -EBUSY : 0;
  987 +}
  988 +
  989 +/*
  990 + * Execute an internal command and wait for the completion.
  991 + *
  992 + * @port Pointer to the port data structure.
  993 + * @fis Pointer to the FIS that describes the command.
  994 + * @fisLen Length in WORDS of the FIS.
  995 + * @buffer DMA accessible for command data.
  996 + * @bufLen Length, in bytes, of the data buffer.
  997 + * @opts Command header options, excluding the FIS length
  998 + * and the number of PRD entries.
  999 + * @timeout Time in ms to wait for the command to complete.
  1000 + *
  1001 + * return value
  1002 + * 0 Command completed successfully.
  1003 + * -EFAULT The buffer address is not correctly aligned.
  1004 + * -EBUSY Internal command or other IO in progress.
  1005 + * -EAGAIN Time out waiting for command to complete.
  1006 + */
  1007 +static int mtip_exec_internal_command(struct mtip_port *port,
  1008 + void *fis,
  1009 + int fisLen,
  1010 + dma_addr_t buffer,
  1011 + int bufLen,
  1012 + u32 opts,
  1013 + gfp_t atomic,
  1014 + unsigned long timeout)
  1015 +{
  1016 + struct mtip_cmd_sg *command_sg;
  1017 + DECLARE_COMPLETION_ONSTACK(wait);
  1018 + int rv = 0;
  1019 + struct mtip_cmd *int_cmd = &port->commands[MTIP_TAG_INTERNAL];
  1020 +
  1021 + /* Make sure the buffer is 8 byte aligned. This is asic specific. */
  1022 + if (buffer & 0x00000007) {
  1023 + dev_err(&port->dd->pdev->dev,
  1024 + "SG buffer is not 8 byte aligned\n");
  1025 + return -EFAULT;
  1026 + }
  1027 +
  1028 + /* Only one internal command should be running at a time */
  1029 + if (test_and_set_bit(MTIP_TAG_INTERNAL, port->allocated)) {
  1030 + dev_warn(&port->dd->pdev->dev,
  1031 + "Internal command already active\n");
  1032 + return -EBUSY;
  1033 + }
  1034 + port->internal_cmd_in_progress = 1;
  1035 +
  1036 + if (atomic == GFP_KERNEL) {
  1037 + /* wait for io to complete if non atomic */
  1038 + if (mtip_quiesce_io(port, 5000) < 0) {
  1039 + dev_warn(&port->dd->pdev->dev,
  1040 + "Failed to quiesce IO\n");
  1041 + release_slot(port, MTIP_TAG_INTERNAL);
  1042 + port->internal_cmd_in_progress = 0;
  1043 + return -EBUSY;
  1044 + }
  1045 +
  1046 + /* Set the completion function and data for the command. */
  1047 + int_cmd->comp_data = &wait;
  1048 + int_cmd->comp_func = mtip_completion;
  1049 +
  1050 + } else {
  1051 + /* Clear completion - we're going to poll */
  1052 + int_cmd->comp_data = NULL;
  1053 + int_cmd->comp_func = NULL;
  1054 + }
  1055 +
  1056 + /* Copy the command to the command table */
  1057 + memcpy(int_cmd->command, fis, fisLen*4);
  1058 +
  1059 + /* Populate the SG list */
  1060 + int_cmd->command_header->opts =
  1061 + cpu_to_le32(opts | fisLen);
  1062 + if (bufLen) {
  1063 + command_sg = int_cmd->command + AHCI_CMD_TBL_HDR_SZ;
  1064 +
  1065 + command_sg->info = cpu_to_le32((bufLen-1) & 0x3fffff);
  1066 + command_sg->dba = cpu_to_le32(buffer & 0xffffffff);
  1067 + command_sg->dba_upper = cpu_to_le32((buffer >> 16) >> 16);
  1068 +
  1069 + int_cmd->command_header->opts |= cpu_to_le32((1 << 16));
  1070 + }
  1071 +
  1072 + /* Populate the command header */
  1073 + int_cmd->command_header->byte_count = 0;
  1074 +
  1075 + /* Issue the command to the hardware */
  1076 + mtip_issue_non_ncq_command(port, MTIP_TAG_INTERNAL);
  1077 +
  1078 + /* Poll if atomic, wait_for_completion otherwise */
  1079 + if (atomic == GFP_KERNEL) {
  1080 + /* Wait for the command to complete or timeout. */
  1081 + if (wait_for_completion_timeout(
  1082 + &wait,
  1083 + msecs_to_jiffies(timeout)) == 0) {
  1084 + dev_err(&port->dd->pdev->dev,
  1085 + "Internal command did not complete [%d]\n",
  1086 + atomic);
  1087 + rv = -EAGAIN;
  1088 + }
  1089 +
  1090 + if (readl(port->cmd_issue[MTIP_TAG_INTERNAL])
  1091 + & (1 << MTIP_TAG_INTERNAL)) {
  1092 + dev_warn(&port->dd->pdev->dev,
  1093 + "Retiring internal command but CI is 1.\n");
  1094 + }
  1095 +
  1096 + } else {
  1097 + /* Spin for <timeout> checking if command still outstanding */
  1098 + timeout = jiffies + msecs_to_jiffies(timeout);
  1099 +
  1100 + while ((readl(
  1101 + port->cmd_issue[MTIP_TAG_INTERNAL])
  1102 + & (1 << MTIP_TAG_INTERNAL))
  1103 + && time_before(jiffies, timeout))
  1104 + ;
  1105 +
  1106 + if (readl(port->cmd_issue[MTIP_TAG_INTERNAL])
  1107 + & (1 << MTIP_TAG_INTERNAL)) {
  1108 + dev_err(&port->dd->pdev->dev,
  1109 + "Internal command did not complete [%d]\n",
  1110 + atomic);
  1111 + rv = -EAGAIN;
  1112 + }
  1113 + }
  1114 +
  1115 + /* Clear the allocated and active bits for the internal command. */
  1116 + atomic_set(&int_cmd->active, 0);
  1117 + release_slot(port, MTIP_TAG_INTERNAL);
  1118 + port->internal_cmd_in_progress = 0;
  1119 +
  1120 + return rv;
  1121 +}
  1122 +
  1123 +/*
  1124 + * Byte-swap ATA ID strings.
  1125 + *
  1126 + * ATA identify data contains strings in byte-swapped 16-bit words.
  1127 + * They must be swapped (on all architectures) to be usable as C strings.
  1128 + * This function swaps bytes in-place.
  1129 + *
  1130 + * @buf The buffer location of the string
  1131 + * @len The number of bytes to swap
  1132 + *
  1133 + * return value
  1134 + * None
  1135 + */
  1136 +static inline void ata_swap_string(u16 *buf, unsigned int len)
  1137 +{
  1138 + int i;
  1139 + for (i = 0; i < (len/2); i++)
  1140 + be16_to_cpus(&buf[i]);
  1141 +}
  1142 +
  1143 +/*
  1144 + * Request the device identity information.
  1145 + *
  1146 + * If a user space buffer is not specified, i.e. is NULL, the
  1147 + * identify information is still read from the drive and placed
  1148 + * into the identify data buffer (@e port->identify) in the
  1149 + * port data structure.
  1150 + * When the identify buffer contains valid identify information @e
  1151 + * port->identify_valid is non-zero.
  1152 + *
  1153 + * @port Pointer to the port structure.
  1154 + * @user_buffer A user space buffer where the identify data should be
  1155 + * copied.
  1156 + *
  1157 + * return value
  1158 + * 0 Command completed successfully.
  1159 + * -EFAULT An error occurred while coping data to the user buffer.
  1160 + * -1 Command failed.
  1161 + */
  1162 +static int mtip_get_identify(struct mtip_port *port, void __user *user_buffer)
  1163 +{
  1164 + int rv = 0;
  1165 + struct host_to_dev_fis fis;
  1166 +
  1167 + down_write(&port->dd->internal_sem);
  1168 +
  1169 + /* Build the FIS. */
  1170 + memset(&fis, 0, sizeof(struct host_to_dev_fis));
  1171 + fis.type = 0x27;
  1172 + fis.opts = 1 << 7;
  1173 + fis.command = ATA_CMD_ID_ATA;
  1174 +
  1175 + /* Set the identify information as invalid. */
  1176 + port->identify_valid = 0;
  1177 +
  1178 + /* Clear the identify information. */
  1179 + memset(port->identify, 0, sizeof(u16) * ATA_ID_WORDS);
  1180 +
  1181 + /* Execute the command. */
  1182 + if (mtip_exec_internal_command(port,
  1183 + &fis,
  1184 + 5,
  1185 + port->identify_dma,
  1186 + sizeof(u16) * ATA_ID_WORDS,
  1187 + 0,
  1188 + GFP_KERNEL,
  1189 + MTIP_INTERNAL_COMMAND_TIMEOUT_MS)
  1190 + < 0) {
  1191 + rv = -1;
  1192 + goto out;
  1193 + }
  1194 +
  1195 + /*
  1196 + * Perform any necessary byte-swapping. Yes, the kernel does in fact
  1197 + * perform field-sensitive swapping on the string fields.
  1198 + * See the kernel use of ata_id_string() for proof of this.
  1199 + */
  1200 +#ifdef __LITTLE_ENDIAN
  1201 + ata_swap_string(port->identify + 27, 40); /* model string*/
  1202 + ata_swap_string(port->identify + 23, 8); /* firmware string*/
  1203 + ata_swap_string(port->identify + 10, 20); /* serial# string*/
  1204 +#else
  1205 + {
  1206 + int i;
  1207 + for (i = 0; i < ATA_ID_WORDS; i++)
  1208 + port->identify[i] = le16_to_cpu(port->identify[i]);
  1209 + }
  1210 +#endif
  1211 +
  1212 + /* Set the identify buffer as valid. */
  1213 + port->identify_valid = 1;
  1214 +
  1215 + if (user_buffer) {
  1216 + if (copy_to_user(
  1217 + user_buffer,
  1218 + port->identify,
  1219 + ATA_ID_WORDS * sizeof(u16))) {
  1220 + rv = -EFAULT;
  1221 + goto out;
  1222 + }
  1223 + }
  1224 +
  1225 +out:
  1226 + up_write(&port->dd->internal_sem);
  1227 + return rv;
  1228 +}
  1229 +
  1230 +/*
  1231 + * Issue a standby immediate command to the device.
  1232 + *
  1233 + * @port Pointer to the port structure.
  1234 + *
  1235 + * return value
  1236 + * 0 Command was executed successfully.
  1237 + * -1 An error occurred while executing the command.
  1238 + */
  1239 +static int mtip_standby_immediate(struct mtip_port *port)
  1240 +{
  1241 + int rv;
  1242 + struct host_to_dev_fis fis;
  1243 +
  1244 + down_write(&port->dd->internal_sem);
  1245 +
  1246 + /* Build the FIS. */
  1247 + memset(&fis, 0, sizeof(struct host_to_dev_fis));
  1248 + fis.type = 0x27;
  1249 + fis.opts = 1 << 7;
  1250 + fis.command = ATA_CMD_STANDBYNOW1;
  1251 +
  1252 + /* Execute the command. Use a 15-second timeout for large drives. */
  1253 + rv = mtip_exec_internal_command(port,
  1254 + &fis,
  1255 + 5,
  1256 + 0,
  1257 + 0,
  1258 + 0,
  1259 + GFP_KERNEL,
  1260 + 15000);
  1261 +
  1262 + up_write(&port->dd->internal_sem);
  1263 +
  1264 + return rv;
  1265 +}
  1266 +
  1267 +/*
  1268 + * Get the drive capacity.
  1269 + *
  1270 + * @dd Pointer to the device data structure.
  1271 + * @sectors Pointer to the variable that will receive the sector count.
  1272 + *
  1273 + * return value
  1274 + * 1 Capacity was returned successfully.
  1275 + * 0 The identify information is invalid.
  1276 + */
  1277 +bool mtip_hw_get_capacity(struct driver_data *dd, sector_t *sectors)
  1278 +{
  1279 + struct mtip_port *port = dd->port;
  1280 + u64 total, raw0, raw1, raw2, raw3;
  1281 + raw0 = port->identify[100];
  1282 + raw1 = port->identify[101];
  1283 + raw2 = port->identify[102];
  1284 + raw3 = port->identify[103];
  1285 + total = raw0 | raw1<<16 | raw2<<32 | raw3<<48;
  1286 + *sectors = total;
  1287 + return (bool) !!port->identify_valid;
  1288 +}
  1289 +
  1290 +/*
  1291 + * Reset the HBA.
  1292 + *
  1293 + * Resets the HBA by setting the HBA Reset bit in the Global
  1294 + * HBA Control register. After setting the HBA Reset bit the
  1295 + * function waits for 1 second before reading the HBA Reset
  1296 + * bit to make sure it has cleared. If HBA Reset is not clear
  1297 + * an error is returned. Cannot be used in non-blockable
  1298 + * context.
  1299 + *
  1300 + * @dd Pointer to the driver data structure.
  1301 + *
  1302 + * return value
  1303 + * 0 The reset was successful.
  1304 + * -1 The HBA Reset bit did not clear.
  1305 + */
  1306 +static int mtip_hba_reset(struct driver_data *dd)
  1307 +{
  1308 + mtip_deinit_port(dd->port);
  1309 +
  1310 + /* Set the reset bit */
  1311 + writel(HOST_RESET, dd->mmio + HOST_CTL);
  1312 +
  1313 + /* Flush */
  1314 + readl(dd->mmio + HOST_CTL);
  1315 +
  1316 + /* Wait for reset to clear */
  1317 + ssleep(1);
  1318 +
  1319 + /* Check the bit has cleared */
  1320 + if (readl(dd->mmio + HOST_CTL) & HOST_RESET) {
  1321 + dev_err(&dd->pdev->dev,
  1322 + "Reset bit did not clear.\n");
  1323 + return -1;
  1324 + }
  1325 +
  1326 + return 0;
  1327 +}
  1328 +
  1329 +/*
  1330 + * Display the identify command data.
  1331 + *
  1332 + * @port Pointer to the port data structure.
  1333 + *
  1334 + * return value
  1335 + * None
  1336 + */
  1337 +static void mtip_dump_identify(struct mtip_port *port)
  1338 +{
  1339 + sector_t sectors;
  1340 + unsigned short revid;
  1341 + char cbuf[42];
  1342 +
  1343 + if (!port->identify_valid)
  1344 + return;
  1345 +
  1346 + strlcpy(cbuf, (char *)(port->identify+10), 21);
  1347 + dev_info(&port->dd->pdev->dev,
  1348 + "Serial No.: %s\n", cbuf);
  1349 +
  1350 + strlcpy(cbuf, (char *)(port->identify+23), 9);
  1351 + dev_info(&port->dd->pdev->dev,
  1352 + "Firmware Ver.: %s\n", cbuf);
  1353 +
  1354 + strlcpy(cbuf, (char *)(port->identify+27), 41);
  1355 + dev_info(&port->dd->pdev->dev, "Model: %s\n", cbuf);
  1356 +
  1357 + if (mtip_hw_get_capacity(port->dd, &sectors))
  1358 + dev_info(&port->dd->pdev->dev,
  1359 + "Capacity: %llu sectors (%llu MB)\n",
  1360 + (u64)sectors,
  1361 + ((u64)sectors) * ATA_SECT_SIZE >> 20);
  1362 +
  1363 + pci_read_config_word(port->dd->pdev, PCI_REVISION_ID, &revid);
  1364 + switch (revid & 0xff) {
  1365 + case 0x1:
  1366 + strlcpy(cbuf, "A0", 3);
  1367 + break;
  1368 + case 0x3:
  1369 + strlcpy(cbuf, "A2", 3);
  1370 + break;
  1371 + default:
  1372 + strlcpy(cbuf, "?", 2);
  1373 + break;
  1374 + }
  1375 + dev_info(&port->dd->pdev->dev,
  1376 + "Card Type: %s\n", cbuf);
  1377 +}
  1378 +
  1379 +/*
  1380 + * Map the commands scatter list into the command table.
  1381 + *
  1382 + * @command Pointer to the command.
  1383 + * @nents Number of scatter list entries.
  1384 + *
  1385 + * return value
  1386 + * None
  1387 + */
  1388 +static inline void fill_command_sg(struct driver_data *dd,
  1389 + struct mtip_cmd *command,
  1390 + int nents)
  1391 +{
  1392 + int n;
  1393 + unsigned int dma_len;
  1394 + struct mtip_cmd_sg *command_sg;
  1395 + struct scatterlist *sg = command->sg;
  1396 +
  1397 + command_sg = command->command + AHCI_CMD_TBL_HDR_SZ;
  1398 +
  1399 + for (n = 0; n < nents; n++) {
  1400 + dma_len = sg_dma_len(sg);
  1401 + if (dma_len > 0x400000)
  1402 + dev_err(&dd->pdev->dev,
  1403 + "DMA segment length truncated\n");
  1404 + command_sg->info = cpu_to_le32((dma_len-1) & 0x3fffff);
  1405 +#if (BITS_PER_LONG == 64)
  1406 + *((unsigned long *) &command_sg->dba) =
  1407 + cpu_to_le64(sg_dma_address(sg));
  1408 +#else
  1409 + command_sg->dba = cpu_to_le32(sg_dma_address(sg));
  1410 + command_sg->dba_upper =
  1411 + cpu_to_le32((sg_dma_address(sg) >> 16) >> 16);
  1412 +#endif
  1413 + command_sg++;
  1414 + sg++;
  1415 + }
  1416 +}
  1417 +
  1418 +/*
  1419 + * @brief Execute a drive command.
  1420 + *
  1421 + * return value 0 The command completed successfully.
  1422 + * return value -1 An error occurred while executing the command.
  1423 + */
  1424 +int exec_drive_task(struct mtip_port *port, u8 *command)
  1425 +{
  1426 + struct host_to_dev_fis fis;
  1427 + struct host_to_dev_fis *reply = (port->rxfis + RX_FIS_D2H_REG);
  1428 +
  1429 + /* Lock the internal command semaphore. */
  1430 + down_write(&port->dd->internal_sem);
  1431 +
  1432 + /* Build the FIS. */
  1433 + memset(&fis, 0, sizeof(struct host_to_dev_fis));
  1434 + fis.type = 0x27;
  1435 + fis.opts = 1 << 7;
  1436 + fis.command = command[0];
  1437 + fis.features = command[1];
  1438 + fis.sect_count = command[2];
  1439 + fis.sector = command[3];
  1440 + fis.cyl_low = command[4];
  1441 + fis.cyl_hi = command[5];
  1442 + fis.device = command[6] & ~0x10; /* Clear the dev bit*/
  1443 +
  1444 +
  1445 + dbg_printk(MTIP_DRV_NAME "%s: User Command: cmd %x, feat %x, "
  1446 + "nsect %x, sect %x, lcyl %x, "
  1447 + "hcyl %x, sel %x\n",
  1448 + __func__,
  1449 + command[0],
  1450 + command[1],
  1451 + command[2],
  1452 + command[3],
  1453 + command[4],
  1454 + command[5],
  1455 + command[6]);
  1456 +
  1457 + /* Execute the command. */
  1458 + if (mtip_exec_internal_command(port,
  1459 + &fis,
  1460 + 5,
  1461 + 0,
  1462 + 0,
  1463 + 0,
  1464 + GFP_KERNEL,
  1465 + MTIP_IOCTL_COMMAND_TIMEOUT_MS) < 0) {
  1466 + up_write(&port->dd->internal_sem);
  1467 + return -1;
  1468 + }
  1469 +
  1470 + command[0] = reply->command; /* Status*/
  1471 + command[1] = reply->features; /* Error*/
  1472 + command[4] = reply->cyl_low;
  1473 + command[5] = reply->cyl_hi;
  1474 +
  1475 + dbg_printk(MTIP_DRV_NAME "%s: Completion Status: stat %x, "
  1476 + "err %x , cyl_lo %x cyl_hi %x\n",
  1477 + __func__,
  1478 + command[0],
  1479 + command[1],
  1480 + command[4],
  1481 + command[5]);
  1482 +
  1483 + up_write(&port->dd->internal_sem);
  1484 + return 0;
  1485 +}
  1486 +
  1487 +/*
  1488 + * @brief Execute a drive command.
  1489 + *
  1490 + * @param port Pointer to the port data structure.
  1491 + * @param command Pointer to the user specified command parameters.
  1492 + * @param user_buffer Pointer to the user space buffer where read sector
  1493 + * data should be copied.
  1494 + *
  1495 + * return value 0 The command completed successfully.
  1496 + * return value -EFAULT An error occurred while copying the completion
  1497 + * data to the user space buffer.
  1498 + * return value -1 An error occurred while executing the command.
  1499 + */
  1500 +int exec_drive_command(struct mtip_port *port, u8 *command,
  1501 + void __user *user_buffer)
  1502 +{
  1503 + struct host_to_dev_fis fis;
  1504 + struct host_to_dev_fis *reply = (port->rxfis + RX_FIS_D2H_REG);
  1505 +
  1506 + /* Lock the internal command semaphore. */
  1507 + down_write(&port->dd->internal_sem);
  1508 +
  1509 + /* Build the FIS. */
  1510 + memset(&fis, 0, sizeof(struct host_to_dev_fis));
  1511 + fis.type = 0x27;
  1512 + fis.opts = 1 << 7;
  1513 + fis.command = command[0];
  1514 + fis.features = command[2];
  1515 + fis.sect_count = command[3];
  1516 + if (fis.command == ATA_CMD_SMART) {
  1517 + fis.sector = command[1];
  1518 + fis.cyl_low = 0x4f;
  1519 + fis.cyl_hi = 0xc2;
  1520 + }
  1521 +
  1522 + dbg_printk(MTIP_DRV_NAME
  1523 + "%s: User Command: cmd %x, sect %x, "
  1524 + "feat %x, sectcnt %x\n",
  1525 + __func__,
  1526 + command[0],
  1527 + command[1],
  1528 + command[2],
  1529 + command[3]);
  1530 +
  1531 + memset(port->sector_buffer, 0x00, ATA_SECT_SIZE);
  1532 +
  1533 + /* Execute the command. */
  1534 + if (mtip_exec_internal_command(port,
  1535 + &fis,
  1536 + 5,
  1537 + port->sector_buffer_dma,
  1538 + (command[3] != 0) ? ATA_SECT_SIZE : 0,
  1539 + 0,
  1540 + GFP_KERNEL,
  1541 + MTIP_IOCTL_COMMAND_TIMEOUT_MS)
  1542 + < 0) {
  1543 + up_write(&port->dd->internal_sem);
  1544 + return -1;
  1545 + }
  1546 +
  1547 + /* Collect the completion status. */
  1548 + command[0] = reply->command; /* Status*/
  1549 + command[1] = reply->features; /* Error*/
  1550 + command[2] = command[3];
  1551 +
  1552 + dbg_printk(MTIP_DRV_NAME
  1553 + "%s: Completion Status: stat %x, "
  1554 + "err %x, cmd %x\n",
  1555 + __func__,
  1556 + command[0],
  1557 + command[1],
  1558 + command[2]);
  1559 +
  1560 + if (user_buffer && command[3]) {
  1561 + if (copy_to_user(user_buffer,
  1562 + port->sector_buffer,
  1563 + ATA_SECT_SIZE * command[3])) {
  1564 + up_write(&port->dd->internal_sem);
  1565 + return -EFAULT;
  1566 + }
  1567 + }
  1568 +
  1569 + up_write(&port->dd->internal_sem);
  1570 + return 0;
  1571 +}
  1572 +
  1573 +/*
  1574 + * Indicates whether a command has a single sector payload.
  1575 + *
  1576 + * @command passed to the device to perform the certain event.
  1577 + * @features passed to the device to perform the certain event.
  1578 + *
  1579 + * return value
  1580 + * 1 command is one that always has a single sector payload,
  1581 + * regardless of the value in the Sector Count field.
  1582 + * 0 otherwise
  1583 + *
  1584 + */
  1585 +static unsigned int implicit_sector(unsigned char command,
  1586 + unsigned char features)
  1587 +{
  1588 + unsigned int rv = 0;
  1589 +
  1590 + /* list of commands that have an implicit sector count of 1 */
  1591 + switch (command) {
  1592 + case 0xF1:
  1593 + case 0xF2:
  1594 + case 0xF3:
  1595 + case 0xF4:
  1596 + case 0xF5:
  1597 + case 0xF6:
  1598 + case 0xE4:
  1599 + case 0xE8:
  1600 + rv = 1;
  1601 + break;
  1602 + case 0xF9:
  1603 + if (features == 0x03)
  1604 + rv = 1;
  1605 + break;
  1606 + case 0xB0:
  1607 + if ((features == 0xD0) || (features == 0xD1))
  1608 + rv = 1;
  1609 + break;
  1610 + case 0xB1:
  1611 + if ((features == 0xC2) || (features == 0xC3))
  1612 + rv = 1;
  1613 + break;
  1614 + }
  1615 + return rv;
  1616 +}
  1617 +
  1618 +/*
  1619 + * Executes a taskfile
  1620 + * See ide_taskfile_ioctl() for derivation
  1621 + */
  1622 +static int exec_drive_taskfile(struct driver_data *dd,
  1623 + unsigned long arg,
  1624 + unsigned char compat)
  1625 +{
  1626 + struct host_to_dev_fis fis;
  1627 + struct host_to_dev_fis *reply;
  1628 + ide_task_request_t *req_task;
  1629 + u8 *outbuf = NULL;
  1630 + u8 *inbuf = NULL;
  1631 + dma_addr_t outbuf_dma = (dma_addr_t)NULL;
  1632 + dma_addr_t inbuf_dma = (dma_addr_t)NULL;
  1633 + dma_addr_t dma_buffer = (dma_addr_t)NULL;
  1634 + int err = 0;
  1635 + int tasksize = sizeof(struct ide_task_request_s);
  1636 + unsigned int taskin = 0;
  1637 + unsigned int taskout = 0;
  1638 + u8 nsect = 0;
  1639 + char __user *buf = (char __user *)arg;
  1640 + unsigned int timeout = MTIP_IOCTL_COMMAND_TIMEOUT_MS;
  1641 + unsigned int force_single_sector;
  1642 + unsigned int transfer_size;
  1643 + unsigned long task_file_data;
  1644 + int intotal, outtotal;
  1645 + struct mtip_compat_ide_task_request_s *compat_req_task = NULL;
  1646 + int compat_tasksize = sizeof(struct mtip_compat_ide_task_request_s);
  1647 +
  1648 + req_task = kzalloc(tasksize, GFP_KERNEL);
  1649 + if (req_task == NULL)
  1650 + return -ENOMEM;
  1651 +
  1652 + if (compat == 1) {
  1653 + compat_req_task =
  1654 + (struct mtip_compat_ide_task_request_s __user *) arg;
  1655 +
  1656 + if (copy_from_user(req_task, buf,
  1657 + compat_tasksize -
  1658 + (2 * sizeof(compat_long_t)))) {
  1659 + err = -EFAULT;
  1660 + goto abort;
  1661 + }
  1662 +
  1663 + if (get_user(req_task->out_size, &compat_req_task->out_size)) {
  1664 + err = -EFAULT;
  1665 + goto abort;
  1666 + }
  1667 +
  1668 + if (get_user(req_task->in_size, &compat_req_task->in_size)) {
  1669 + err = -EFAULT;
  1670 + goto abort;
  1671 + }
  1672 +
  1673 + outtotal = compat_tasksize;
  1674 + intotal = compat_tasksize + req_task->out_size;
  1675 + } else {
  1676 + if (copy_from_user(req_task, buf, tasksize)) {
  1677 + kfree(req_task);
  1678 + err = -EFAULT;
  1679 + goto abort;
  1680 + }
  1681 +
  1682 + outtotal = tasksize;
  1683 + intotal = tasksize + req_task->out_size;
  1684 + }
  1685 +
  1686 + taskout = req_task->out_size;
  1687 + taskin = req_task->in_size;
  1688 + /* 130560 = 512 * 0xFF*/
  1689 + if (taskin > 130560 || taskout > 130560) {
  1690 + err = -EINVAL;
  1691 + goto abort;
  1692 + }
  1693 +
  1694 + if (taskout) {
  1695 + outbuf = kzalloc(taskout, GFP_KERNEL);
  1696 + if (outbuf == NULL) {
  1697 + err = -ENOMEM;
  1698 + goto abort;
  1699 + }
  1700 + if (copy_from_user(outbuf, buf + outtotal, taskout)) {
  1701 + err = -EFAULT;
  1702 + goto abort;
  1703 + }
  1704 + outbuf_dma = pci_map_single(dd->pdev,
  1705 + outbuf,
  1706 + taskout,
  1707 + DMA_TO_DEVICE);
  1708 + if (outbuf_dma == (dma_addr_t)NULL) {
  1709 + err = -ENOMEM;
  1710 + goto abort;
  1711 + }
  1712 + dma_buffer = outbuf_dma;
  1713 + }
  1714 +
  1715 + if (taskin) {
  1716 + inbuf = kzalloc(taskin, GFP_KERNEL);
  1717 + if (inbuf == NULL) {
  1718 + err = -ENOMEM;
  1719 + goto abort;
  1720 + }
  1721 +
  1722 + if (copy_from_user(inbuf, buf + intotal, taskin)) {
  1723 + err = -EFAULT;
  1724 + goto abort;
  1725 + }
  1726 + inbuf_dma = pci_map_single(dd->pdev,
  1727 + inbuf,
  1728 + taskin, DMA_FROM_DEVICE);
  1729 + if (inbuf_dma == (dma_addr_t)NULL) {
  1730 + err = -ENOMEM;
  1731 + goto abort;
  1732 + }
  1733 + dma_buffer = inbuf_dma;
  1734 + }
  1735 +
  1736 + /* only supports PIO and non-data commands from this ioctl. */
  1737 + switch (req_task->data_phase) {
  1738 + case TASKFILE_OUT:
  1739 + nsect = taskout / ATA_SECT_SIZE;
  1740 + reply = (dd->port->rxfis + RX_FIS_PIO_SETUP);
  1741 + break;
  1742 + case TASKFILE_IN:
  1743 + reply = (dd->port->rxfis + RX_FIS_PIO_SETUP);
  1744 + break;
  1745 + case TASKFILE_NO_DATA:
  1746 + reply = (dd->port->rxfis + RX_FIS_D2H_REG);
  1747 + break;
  1748 + default:
  1749 + err = -EINVAL;
  1750 + goto abort;
  1751 + }
  1752 +
  1753 + /* Lock the internal command semaphore. */
  1754 + down_write(&dd->internal_sem);
  1755 +
  1756 + /* Build the FIS. */
  1757 + memset(&fis, 0, sizeof(struct host_to_dev_fis));
  1758 +
  1759 + fis.type = 0x27;
  1760 + fis.opts = 1 << 7;
  1761 + fis.command = req_task->io_ports[7];
  1762 + fis.features = req_task->io_ports[1];
  1763 + fis.sect_count = req_task->io_ports[2];
  1764 + fis.lba_low = req_task->io_ports[3];
  1765 + fis.lba_mid = req_task->io_ports[4];
  1766 + fis.lba_hi = req_task->io_ports[5];
  1767 + /* Clear the dev bit*/
  1768 + fis.device = req_task->io_ports[6] & ~0x10;
  1769 +
  1770 + if ((req_task->in_flags.all == 0) && (req_task->out_flags.all & 1)) {
  1771 + req_task->in_flags.all =
  1772 + IDE_TASKFILE_STD_IN_FLAGS |
  1773 + (IDE_HOB_STD_IN_FLAGS << 8);
  1774 + fis.lba_low_ex = req_task->hob_ports[3];
  1775 + fis.lba_mid_ex = req_task->hob_ports[4];
  1776 + fis.lba_hi_ex = req_task->hob_ports[5];
  1777 + fis.features_ex = req_task->hob_ports[1];
  1778 + fis.sect_cnt_ex = req_task->hob_ports[2];
  1779 +
  1780 + } else {
  1781 + req_task->in_flags.all = IDE_TASKFILE_STD_IN_FLAGS;
  1782 + }
  1783 +
  1784 + force_single_sector = implicit_sector(fis.command, fis.features);
  1785 +
  1786 + if ((taskin || taskout) && (!fis.sect_count)) {
  1787 + if (nsect)
  1788 + fis.sect_count = nsect;
  1789 + else {
  1790 + if (!force_single_sector) {
  1791 + dev_warn(&dd->pdev->dev,
  1792 + "data movement but "
  1793 + "sect_count is 0\n");
  1794 + up_write(&dd->internal_sem);
  1795 + err = -EINVAL;
  1796 + goto abort;
  1797 + }
  1798 + }
  1799 + }
  1800 +
  1801 + dbg_printk(MTIP_DRV_NAME
  1802 + "taskfile: cmd %x, feat %x, nsect %x,"
  1803 + " sect/lbal %x, lcyl/lbam %x, hcyl/lbah %x,"
  1804 + " head/dev %x\n",
  1805 + fis.command,
  1806 + fis.features,
  1807 + fis.sect_count,
  1808 + fis.lba_low,
  1809 + fis.lba_mid,
  1810 + fis.lba_hi,
  1811 + fis.device);
  1812 +
  1813 + switch (fis.command) {
  1814 + case 0x92: /* Change timeout for Download Microcode to 60 seconds.*/
  1815 + timeout = 60000;
  1816 + break;
  1817 + case 0xf4: /* Change timeout for Security Erase Unit to 4 minutes.*/
  1818 + timeout = 240000;
  1819 + break;
  1820 + case 0xe0: /* Change timeout for standby immediate to 10 seconds.*/
  1821 + timeout = 10000;
  1822 + break;
  1823 + case 0xf7: /* Change timeout for vendor unique command to 10 secs */
  1824 + timeout = 10000;
  1825 + break;
  1826 + case 0xfa: /* Change timeout for vendor unique command to 10 secs */
  1827 + timeout = 10000;
  1828 + break;
  1829 + default:
  1830 + timeout = MTIP_IOCTL_COMMAND_TIMEOUT_MS;
  1831 + break;
  1832 + }
  1833 +
  1834 + /* Determine the correct transfer size.*/
  1835 + if (force_single_sector)
  1836 + transfer_size = ATA_SECT_SIZE;
  1837 + else
  1838 + transfer_size = ATA_SECT_SIZE * fis.sect_count;
  1839 +
  1840 + /* Execute the command.*/
  1841 + if (mtip_exec_internal_command(dd->port,
  1842 + &fis,
  1843 + 5,
  1844 + dma_buffer,
  1845 + transfer_size,
  1846 + 0,
  1847 + GFP_KERNEL,
  1848 + timeout) < 0) {
  1849 + up_write(&dd->internal_sem);
  1850 + err = -EIO;
  1851 + goto abort;
  1852 + }
  1853 +
  1854 + task_file_data = readl(dd->port->mmio+PORT_TFDATA);
  1855 +
  1856 + if ((req_task->data_phase == TASKFILE_IN) && !(task_file_data & 1)) {
  1857 + reply = dd->port->rxfis + RX_FIS_PIO_SETUP;
  1858 + req_task->io_ports[7] = reply->control;
  1859 + } else {
  1860 + reply = dd->port->rxfis + RX_FIS_D2H_REG;
  1861 + req_task->io_ports[7] = reply->command;
  1862 + }
  1863 +
  1864 + /* reclaim the DMA buffers.*/
  1865 + if (inbuf_dma)
  1866 + pci_unmap_single(dd->pdev, inbuf_dma,
  1867 + taskin, DMA_FROM_DEVICE);
  1868 + if (outbuf_dma)
  1869 + pci_unmap_single(dd->pdev, outbuf_dma,
  1870 + taskout, DMA_TO_DEVICE);
  1871 + inbuf_dma = (dma_addr_t) NULL;
  1872 + outbuf_dma = (dma_addr_t) NULL;
  1873 +
  1874 + /* return the ATA registers to the caller.*/
  1875 + req_task->io_ports[1] = reply->features;
  1876 + req_task->io_ports[2] = reply->sect_count;
  1877 + req_task->io_ports[3] = reply->lba_low;
  1878 + req_task->io_ports[4] = reply->lba_mid;
  1879 + req_task->io_ports[5] = reply->lba_hi;
  1880 + req_task->io_ports[6] = reply->device;
  1881 +
  1882 + if (req_task->out_flags.all & 1) {
  1883 +
  1884 + req_task->hob_ports[3] = reply->lba_low_ex;
  1885 + req_task->hob_ports[4] = reply->lba_mid_ex;
  1886 + req_task->hob_ports[5] = reply->lba_hi_ex;
  1887 + req_task->hob_ports[1] = reply->features_ex;
  1888 + req_task->hob_ports[2] = reply->sect_cnt_ex;
  1889 + }
  1890 +
  1891 + /* Com rest after secure erase or lowlevel format */
  1892 + if (((fis.command == 0xF4) ||
  1893 + ((fis.command == 0xFC) &&
  1894 + (fis.features == 0x27 || fis.features == 0x72 ||
  1895 + fis.features == 0x62 || fis.features == 0x26))) &&
  1896 + !(reply->command & 1)) {
  1897 + mtip_restart_port(dd->port);
  1898 + }
  1899 +
  1900 + dbg_printk(MTIP_DRV_NAME
  1901 + "%s: Completion: stat %x,"
  1902 + "err %x, sect_cnt %x, lbalo %x,"
  1903 + "lbamid %x, lbahi %x, dev %x\n",
  1904 + __func__,
  1905 + req_task->io_ports[7],
  1906 + req_task->io_ports[1],
  1907 + req_task->io_ports[2],
  1908 + req_task->io_ports[3],
  1909 + req_task->io_ports[4],
  1910 + req_task->io_ports[5],
  1911 + req_task->io_ports[6]);
  1912 +
  1913 + up_write(&dd->internal_sem);
  1914 +
  1915 + if (compat == 1) {
  1916 + if (copy_to_user(buf, req_task,
  1917 + compat_tasksize -
  1918 + (2 * sizeof(compat_long_t)))) {
  1919 + err = -EFAULT;
  1920 + goto abort;
  1921 + }
  1922 + if (put_user(req_task->out_size,
  1923 + &compat_req_task->out_size)) {
  1924 + err = -EFAULT;
  1925 + goto abort;
  1926 + }
  1927 + if (put_user(req_task->in_size, &compat_req_task->in_size)) {
  1928 + err = -EFAULT;
  1929 + goto abort;
  1930 + }
  1931 + } else {
  1932 + if (copy_to_user(buf, req_task, tasksize)) {
  1933 + err = -EFAULT;
  1934 + goto abort;
  1935 + }
  1936 + }
  1937 + if (taskout) {
  1938 + if (copy_to_user(buf + outtotal, outbuf, taskout)) {
  1939 + err = -EFAULT;
  1940 + goto abort;
  1941 + }
  1942 + }
  1943 + if (taskin) {
  1944 + if (copy_to_user(buf + intotal, inbuf, taskin)) {
  1945 + err = -EFAULT;
  1946 + goto abort;
  1947 + }
  1948 + }
  1949 +abort:
  1950 + if (inbuf_dma)
  1951 + pci_unmap_single(dd->pdev, inbuf_dma,
  1952 + taskin, DMA_FROM_DEVICE);
  1953 + if (outbuf_dma)
  1954 + pci_unmap_single(dd->pdev, outbuf_dma,
  1955 + taskout, DMA_TO_DEVICE);
  1956 + kfree(req_task);
  1957 + kfree(outbuf);
  1958 + kfree(inbuf);
  1959 +
  1960 + return err;
  1961 +}
  1962 +
  1963 +/*
  1964 + * Handle IOCTL calls from the Block Layer.
  1965 + *
  1966 + * This function is called by the Block Layer when it receives an IOCTL
  1967 + * command that it does not understand. If the IOCTL command is not supported
  1968 + * this function returns -ENOTTY.
  1969 + *
  1970 + * @dd Pointer to the driver data structure.
  1971 + * @cmd IOCTL command passed from the Block Layer.
  1972 + * @arg IOCTL argument passed from the Block Layer.
  1973 + *
  1974 + * return value
  1975 + * 0 The IOCTL completed successfully.
  1976 + * -ENOTTY The specified command is not supported.
  1977 + * -EFAULT An error occurred copying data to a user space buffer.
  1978 + * -EIO An error occurred while executing the command.
  1979 + */
  1980 +int mtip_hw_ioctl(struct driver_data *dd,
  1981 + unsigned int cmd,
  1982 + unsigned long arg,
  1983 + unsigned char compat)
  1984 +{
  1985 + switch (cmd) {
  1986 + case HDIO_GET_IDENTITY:
  1987 + if (mtip_get_identify(dd->port, (void __user *) arg) < 0) {
  1988 + dev_warn(&dd->pdev->dev,
  1989 + "Unable to read identity\n");
  1990 + return -EIO;
  1991 + }
  1992 +
  1993 + break;
  1994 + case HDIO_DRIVE_CMD:
  1995 + {
  1996 + u8 drive_command[4];
  1997 +
  1998 + /* Copy the user command info to our buffer. */
  1999 + if (copy_from_user(drive_command,
  2000 + (void __user *) arg,
  2001 + sizeof(drive_command)))
  2002 + return -EFAULT;
  2003 +
  2004 + /* Execute the drive command. */
  2005 + if (exec_drive_command(dd->port,
  2006 + drive_command,
  2007 + (void __user *) (arg+4)))
  2008 + return -EIO;
  2009 +
  2010 + /* Copy the status back to the users buffer. */
  2011 + if (copy_to_user((void __user *) arg,
  2012 + drive_command,
  2013 + sizeof(drive_command)))
  2014 + return -EFAULT;
  2015 +
  2016 + break;
  2017 + }
  2018 + case HDIO_DRIVE_TASK:
  2019 + {
  2020 + u8 drive_command[7];
  2021 +
  2022 + /* Copy the user command info to our buffer. */
  2023 + if (copy_from_user(drive_command,
  2024 + (void __user *) arg,
  2025 + sizeof(drive_command)))
  2026 + return -EFAULT;
  2027 +
  2028 + /* Execute the drive command. */
  2029 + if (exec_drive_task(dd->port, drive_command))
  2030 + return -EIO;
  2031 +
  2032 + /* Copy the status back to the users buffer. */
  2033 + if (copy_to_user((void __user *) arg,
  2034 + drive_command,
  2035 + sizeof(drive_command)))
  2036 + return -EFAULT;
  2037 +
  2038 + break;
  2039 + }
  2040 + case HDIO_DRIVE_TASKFILE:
  2041 + return exec_drive_taskfile(dd, arg, compat);
  2042 +
  2043 + default:
  2044 + return -EINVAL;
  2045 + }
  2046 + return 0;
  2047 +}
  2048 +
  2049 +/*
  2050 + * Submit an IO to the hw
  2051 + *
  2052 + * This function is called by the block layer to issue an io
  2053 + * to the device. Upon completion, the callback function will
  2054 + * be called with the data parameter passed as the callback data.
  2055 + *
  2056 + * @dd Pointer to the driver data structure.
  2057 + * @start First sector to read.
  2058 + * @nsect Number of sectors to read.
  2059 + * @nents Number of entries in scatter list for the read command.
  2060 + * @tag The tag of this read command.
  2061 + * @callback Pointer to the function that should be called
  2062 + * when the read completes.
  2063 + * @data Callback data passed to the callback function
  2064 + * when the read completes.
  2065 + * @barrier If non-zero, this command must be completed before
  2066 + * issuing any other commands.
  2067 + * @dir Direction (read or write)
  2068 + *
  2069 + * return value
  2070 + * None
  2071 + */
  2072 +void mtip_hw_submit_io(struct driver_data *dd,
  2073 + sector_t start,
  2074 + int nsect,
  2075 + int nents,
  2076 + int tag,
  2077 + void *callback,
  2078 + void *data,
  2079 + int barrier,
  2080 + int dir)
  2081 +{
  2082 + struct host_to_dev_fis *fis;
  2083 + struct mtip_port *port = dd->port;
  2084 + struct mtip_cmd *command = &port->commands[tag];
  2085 +
  2086 + /* Map the scatter list for DMA access */
  2087 + if (dir == READ)
  2088 + nents = dma_map_sg(&dd->pdev->dev, command->sg,
  2089 + nents, DMA_FROM_DEVICE);
  2090 + else
  2091 + nents = dma_map_sg(&dd->pdev->dev, command->sg,
  2092 + nents, DMA_TO_DEVICE);
  2093 +
  2094 + command->scatter_ents = nents;
  2095 +
  2096 + /*
  2097 + * The number of retries for this command before it is
  2098 + * reported as a failure to the upper layers.
  2099 + */
  2100 + command->retries = MTIP_MAX_RETRIES;
  2101 +
  2102 + /* Fill out fis */
  2103 + fis = command->command;
  2104 + fis->type = 0x27;
  2105 + fis->opts = 1 << 7;
  2106 + fis->command =
  2107 + (dir == READ ? ATA_CMD_FPDMA_READ : ATA_CMD_FPDMA_WRITE);
  2108 + *((unsigned int *) &fis->lba_low) = (start & 0xffffff);
  2109 + *((unsigned int *) &fis->lba_low_ex) = ((start >> 24) & 0xffffff);
  2110 + fis->device = 1 << 6;
  2111 + if (barrier)
  2112 + fis->device |= FUA_BIT;
  2113 + fis->features = nsect & 0xff;
  2114 + fis->features_ex = (nsect >> 8) & 0xff;
  2115 + fis->sect_count = ((tag << 3) | (tag >> 5));
  2116 + fis->sect_cnt_ex = 0;
  2117 + fis->control = 0;
  2118 + fis->res2 = 0;
  2119 + fis->res3 = 0;
  2120 + fill_command_sg(dd, command, nents);
  2121 +
  2122 + /* Populate the command header */
  2123 + command->command_header->opts = cpu_to_le32(
  2124 + (nents << 16) | 5 | AHCI_CMD_PREFETCH);
  2125 + command->command_header->byte_count = 0;
  2126 +
  2127 + /*
  2128 + * Set the completion function and data for the command
  2129 + * within this layer.
  2130 + */
  2131 + command->comp_data = dd;
  2132 + command->comp_func = mtip_async_complete;
  2133 + command->direction = (dir == READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
  2134 +
  2135 + /*
  2136 + * Set the completion function and data for the command passed
  2137 + * from the upper layer.
  2138 + */
  2139 + command->async_data = data;
  2140 + command->async_callback = callback;
  2141 +
  2142 + /*
  2143 + * Lock used to prevent this command from being issued
  2144 + * if an internal command is in progress.
  2145 + */
  2146 + down_read(&port->dd->internal_sem);
  2147 +
  2148 + /* Issue the command to the hardware */
  2149 + mtip_issue_ncq_command(port, tag);
  2150 +
  2151 + /* Set the command's timeout value.*/
  2152 + port->commands[tag].comp_time = jiffies + msecs_to_jiffies(
  2153 + MTIP_NCQ_COMMAND_TIMEOUT_MS);
  2154 +
  2155 + up_read(&port->dd->internal_sem);
  2156 +}
  2157 +
  2158 +/*
  2159 + * Release a command slot.
  2160 + *
  2161 + * @dd Pointer to the driver data structure.
  2162 + * @tag Slot tag
  2163 + *
  2164 + * return value
  2165 + * None
  2166 + */
  2167 +void mtip_hw_release_scatterlist(struct driver_data *dd, int tag)
  2168 +{
  2169 + release_slot(dd->port, tag);
  2170 +}
  2171 +
  2172 +/*
  2173 + * Obtain a command slot and return its associated scatter list.
  2174 + *
  2175 + * @dd Pointer to the driver data structure.
  2176 + * @tag Pointer to an int that will receive the allocated command
  2177 + * slot tag.
  2178 + *
  2179 + * return value
  2180 + * Pointer to the scatter list for the allocated command slot
  2181 + * or NULL if no command slots are available.
  2182 + */
  2183 +struct scatterlist *mtip_hw_get_scatterlist(struct driver_data *dd,
  2184 + int *tag)
  2185 +{
  2186 + /*
  2187 + * It is possible that, even with this semaphore, a thread
  2188 + * may think that no command slots are available. Therefore, we
  2189 + * need to make an attempt to get_slot().
  2190 + */
  2191 + down(&dd->port->cmd_slot);
  2192 + *tag = get_slot(dd->port);
  2193 +
  2194 + if (unlikely(*tag < 0))
  2195 + return NULL;
  2196 +
  2197 + return dd->port->commands[*tag].sg;
  2198 +}
  2199 +
  2200 +/*
  2201 + * Sysfs register/status dump.
  2202 + *
  2203 + * @dev Pointer to the device structure, passed by the kernrel.
  2204 + * @attr Pointer to the device_attribute structure passed by the kernel.
  2205 + * @buf Pointer to the char buffer that will receive the stats info.
  2206 + *
  2207 + * return value
  2208 + * The size, in bytes, of the data copied into buf.
  2209 + */
  2210 +static ssize_t hw_show_registers(struct device *dev,
  2211 + struct device_attribute *attr,
  2212 + char *buf)
  2213 +{
  2214 + u32 group_allocated;
  2215 + struct driver_data *dd = dev_to_disk(dev)->private_data;
  2216 + int size = 0;
  2217 + int n;
  2218 +
  2219 + size += sprintf(&buf[size], "%s:\ns_active:\n", __func__);
  2220 +
  2221 + for (n = 0; n < dd->slot_groups; n++)
  2222 + size += sprintf(&buf[size], "0x%08x\n",
  2223 + readl(dd->port->s_active[n]));
  2224 +
  2225 + size += sprintf(&buf[size], "Command Issue:\n");
  2226 +
  2227 + for (n = 0; n < dd->slot_groups; n++)
  2228 + size += sprintf(&buf[size], "0x%08x\n",
  2229 + readl(dd->port->cmd_issue[n]));
  2230 +
  2231 + size += sprintf(&buf[size], "Allocated:\n");
  2232 +
  2233 + for (n = 0; n < dd->slot_groups; n++) {
  2234 + if (sizeof(long) > sizeof(u32))
  2235 + group_allocated =
  2236 + dd->port->allocated[n/2] >> (32*(n&1));
  2237 + else
  2238 + group_allocated = dd->port->allocated[n];
  2239 + size += sprintf(&buf[size], "0x%08x\n",
  2240 + group_allocated);
  2241 + }
  2242 +
  2243 + size += sprintf(&buf[size], "completed:\n");
  2244 +
  2245 + for (n = 0; n < dd->slot_groups; n++)
  2246 + size += sprintf(&buf[size], "0x%08x\n",
  2247 + readl(dd->port->completed[n]));
  2248 +
  2249 + size += sprintf(&buf[size], "PORT_IRQ_STAT 0x%08x\n",
  2250 + readl(dd->port->mmio + PORT_IRQ_STAT));
  2251 + size += sprintf(&buf[size], "HOST_IRQ_STAT 0x%08x\n",
  2252 + readl(dd->mmio + HOST_IRQ_STAT));
  2253 +
  2254 + return size;
  2255 +}
  2256 +static DEVICE_ATTR(registers, S_IRUGO, hw_show_registers, NULL);
  2257 +
  2258 +/*
  2259 + * Create the sysfs related attributes.
  2260 + *
  2261 + * @dd Pointer to the driver data structure.
  2262 + * @kobj Pointer to the kobj for the block device.
  2263 + *
  2264 + * return value
  2265 + * 0 Operation completed successfully.
  2266 + * -EINVAL Invalid parameter.
  2267 + */
  2268 +int mtip_hw_sysfs_init(struct driver_data *dd, struct kobject *kobj)
  2269 +{
  2270 + if (!kobj || !dd)
  2271 + return -EINVAL;
  2272 +
  2273 + if (sysfs_create_file(kobj, &dev_attr_registers.attr))
  2274 + dev_warn(&dd->pdev->dev,
  2275 + "Error creating registers sysfs entry\n");
  2276 + return 0;
  2277 +}
  2278 +
  2279 +/*
  2280 + * Remove the sysfs related attributes.
  2281 + *
  2282 + * @dd Pointer to the driver data structure.
  2283 + * @kobj Pointer to the kobj for the block device.
  2284 + *
  2285 + * return value
  2286 + * 0 Operation completed successfully.
  2287 + * -EINVAL Invalid parameter.
  2288 + */
  2289 +int mtip_hw_sysfs_exit(struct driver_data *dd, struct kobject *kobj)
  2290 +{
  2291 + if (!kobj || !dd)
  2292 + return -EINVAL;
  2293 +
  2294 + sysfs_remove_file(kobj, &dev_attr_registers.attr);
  2295 +
  2296 + return 0;
  2297 +}
  2298 +
  2299 +/*
  2300 + * Perform any init/resume time hardware setup
  2301 + *
  2302 + * @dd Pointer to the driver data structure.
  2303 + *
  2304 + * return value
  2305 + * None
  2306 + */
  2307 +static inline void hba_setup(struct driver_data *dd)
  2308 +{
  2309 + u32 hwdata;
  2310 + hwdata = readl(dd->mmio + HOST_HSORG);
  2311 +
  2312 + /* interrupt bug workaround: use only 1 IS bit.*/
  2313 + writel(hwdata |
  2314 + HSORG_DISABLE_SLOTGRP_INTR |
  2315 + HSORG_DISABLE_SLOTGRP_PXIS,
  2316 + dd->mmio + HOST_HSORG);
  2317 +}
  2318 +
  2319 +/*
  2320 + * Detect the details of the product, and store anything needed
  2321 + * into the driver data structure. This includes product type and
  2322 + * version and number of slot groups.
  2323 + *
  2324 + * @dd Pointer to the driver data structure.
  2325 + *
  2326 + * return value
  2327 + * None
  2328 + */
  2329 +static void mtip_detect_product(struct driver_data *dd)
  2330 +{
  2331 + u32 hwdata;
  2332 + unsigned int rev, slotgroups;
  2333 +
  2334 + /*
  2335 + * HBA base + 0xFC [15:0] - vendor-specific hardware interface
  2336 + * info register:
  2337 + * [15:8] hardware/software interface rev#
  2338 + * [ 3] asic-style interface
  2339 + * [ 2:0] number of slot groups, minus 1 (only valid for asic-style).
  2340 + */
  2341 + hwdata = readl(dd->mmio + HOST_HSORG);
  2342 +
  2343 + dd->product_type = MTIP_PRODUCT_UNKNOWN;
  2344 + dd->slot_groups = 1;
  2345 +
  2346 + if (hwdata & 0x8) {
  2347 + dd->product_type = MTIP_PRODUCT_ASICFPGA;
  2348 + rev = (hwdata & HSORG_HWREV) >> 8;
  2349 + slotgroups = (hwdata & HSORG_SLOTGROUPS) + 1;
  2350 + dev_info(&dd->pdev->dev,
  2351 + "ASIC-FPGA design, HS rev 0x%x, "
  2352 + "%i slot groups [%i slots]\n",
  2353 + rev,
  2354 + slotgroups,
  2355 + slotgroups * 32);
  2356 +
  2357 + if (slotgroups > MTIP_MAX_SLOT_GROUPS) {
  2358 + dev_warn(&dd->pdev->dev,
  2359 + "Warning: driver only supports "
  2360 + "%i slot groups.\n", MTIP_MAX_SLOT_GROUPS);
  2361 + slotgroups = MTIP_MAX_SLOT_GROUPS;
  2362 + }
  2363 + dd->slot_groups = slotgroups;
  2364 + return;
  2365 + }
  2366 +
  2367 + dev_warn(&dd->pdev->dev, "Unrecognized product id\n");
  2368 +}
  2369 +
  2370 +/*
  2371 + * Blocking wait for FTL rebuild to complete
  2372 + *
  2373 + * @dd Pointer to the DRIVER_DATA structure.
  2374 + *
  2375 + * return value
  2376 + * 0 FTL rebuild completed successfully
  2377 + * -EFAULT FTL rebuild error/timeout/interruption
  2378 + */
  2379 +static int mtip_ftl_rebuild_poll(struct driver_data *dd)
  2380 +{
  2381 + unsigned long timeout, cnt = 0, start;
  2382 +
  2383 + dev_warn(&dd->pdev->dev,
  2384 + "FTL rebuild in progress. Polling for completion.\n");
  2385 +
  2386 + start = jiffies;
  2387 + dd->ftlrebuildflag = 1;
  2388 + timeout = jiffies + msecs_to_jiffies(MTIP_FTL_REBUILD_TIMEOUT_MS);
  2389 +
  2390 + do {
  2391 +#ifdef CONFIG_HOTPLUG
  2392 + if (mtip_check_surprise_removal(dd->pdev))
  2393 + return -EFAULT;
  2394 +#endif
  2395 + if (mtip_get_identify(dd->port, NULL) < 0)
  2396 + return -EFAULT;
  2397 +
  2398 + if (*(dd->port->identify + MTIP_FTL_REBUILD_OFFSET) ==
  2399 + MTIP_FTL_REBUILD_MAGIC) {
  2400 + ssleep(1);
  2401 + /* Print message every 3 minutes */
  2402 + if (cnt++ >= 180) {
  2403 + dev_warn(&dd->pdev->dev,
  2404 + "FTL rebuild in progress (%d secs).\n",
  2405 + jiffies_to_msecs(jiffies - start) / 1000);
  2406 + cnt = 0;
  2407 + }
  2408 + } else {
  2409 + dev_warn(&dd->pdev->dev,
  2410 + "FTL rebuild complete (%d secs).\n",
  2411 + jiffies_to_msecs(jiffies - start) / 1000);
  2412 + dd->ftlrebuildflag = 0;
  2413 + break;
  2414 + }
  2415 + ssleep(10);
  2416 + } while (time_before(jiffies, timeout));
  2417 +
  2418 + /* Check for timeout */
  2419 + if (dd->ftlrebuildflag) {
  2420 + dev_err(&dd->pdev->dev,
  2421 + "Timed out waiting for FTL rebuild to complete (%d secs).\n",
  2422 + jiffies_to_msecs(jiffies - start) / 1000);
  2423 + return -EFAULT;
  2424 + }
  2425 +
  2426 + return 0;
  2427 +}
  2428 +
  2429 +/*
  2430 + * Called once for each card.
  2431 + *
  2432 + * @dd Pointer to the driver data structure.
  2433 + *
  2434 + * return value
  2435 + * 0 on success, else an error code.
  2436 + */
  2437 +int mtip_hw_init(struct driver_data *dd)
  2438 +{
  2439 + int i;
  2440 + int rv;
  2441 + unsigned int num_command_slots;
  2442 +
  2443 + dd->mmio = pcim_iomap_table(dd->pdev)[MTIP_ABAR];
  2444 +
  2445 + mtip_detect_product(dd);
  2446 + if (dd->product_type == MTIP_PRODUCT_UNKNOWN) {
  2447 + rv = -EIO;
  2448 + goto out1;
  2449 + }
  2450 + num_command_slots = dd->slot_groups * 32;
  2451 +
  2452 + hba_setup(dd);
  2453 +
  2454 + /*
  2455 + * Initialize the internal semaphore
  2456 + * Use a rw semaphore to enable prioritization of
  2457 + * mgmnt ioctl traffic during heavy IO load
  2458 + */
  2459 + init_rwsem(&dd->internal_sem);
  2460 +
  2461 + tasklet_init(&dd->tasklet, mtip_tasklet, (unsigned long)dd);
  2462 +
  2463 + dd->port = kzalloc(sizeof(struct mtip_port), GFP_KERNEL);
  2464 + if (!dd->port) {
  2465 + dev_err(&dd->pdev->dev,
  2466 + "Memory allocation: port structure\n");
  2467 + return -ENOMEM;
  2468 + }
  2469 +
  2470 + /* Counting semaphore to track command slot usage */
  2471 + sema_init(&dd->port->cmd_slot, num_command_slots - 1);
  2472 +
  2473 + /* Spinlock to prevent concurrent issue */
  2474 + spin_lock_init(&dd->port->cmd_issue_lock);
  2475 +
  2476 + /* Set the port mmio base address. */
  2477 + dd->port->mmio = dd->mmio + PORT_OFFSET;
  2478 + dd->port->dd = dd;
  2479 +
  2480 + /* Allocate memory for the command list. */
  2481 + dd->port->command_list =
  2482 + dmam_alloc_coherent(&dd->pdev->dev,
  2483 + HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 2),
  2484 + &dd->port->command_list_dma,
  2485 + GFP_KERNEL);
  2486 + if (!dd->port->command_list) {
  2487 + dev_err(&dd->pdev->dev,
  2488 + "Memory allocation: command list\n");
  2489 + rv = -ENOMEM;
  2490 + goto out1;
  2491 + }
  2492 +
  2493 + /* Clear the memory we have allocated. */
  2494 + memset(dd->port->command_list,
  2495 + 0,
  2496 + HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 2));
  2497 +
  2498 + /* Setup the addresse of the RX FIS. */
  2499 + dd->port->rxfis = dd->port->command_list + HW_CMD_SLOT_SZ;
  2500 + dd->port->rxfis_dma = dd->port->command_list_dma + HW_CMD_SLOT_SZ;
  2501 +
  2502 + /* Setup the address of the command tables. */
  2503 + dd->port->command_table = dd->port->rxfis + AHCI_RX_FIS_SZ;
  2504 + dd->port->command_tbl_dma = dd->port->rxfis_dma + AHCI_RX_FIS_SZ;
  2505 +
  2506 + /* Setup the address of the identify data. */
  2507 + dd->port->identify = dd->port->command_table +
  2508 + HW_CMD_TBL_AR_SZ;
  2509 + dd->port->identify_dma = dd->port->command_tbl_dma +
  2510 + HW_CMD_TBL_AR_SZ;
  2511 +
  2512 + /* Setup the address of the sector buffer. */
  2513 + dd->port->sector_buffer = (void *) dd->port->identify + ATA_SECT_SIZE;
  2514 + dd->port->sector_buffer_dma = dd->port->identify_dma + ATA_SECT_SIZE;
  2515 +
  2516 + /* Point the command headers at the command tables. */
  2517 + for (i = 0; i < num_command_slots; i++) {
  2518 + dd->port->commands[i].command_header =
  2519 + dd->port->command_list +
  2520 + (sizeof(struct mtip_cmd_hdr) * i);
  2521 + dd->port->commands[i].command_header_dma =
  2522 + dd->port->command_list_dma +
  2523 + (sizeof(struct mtip_cmd_hdr) * i);
  2524 +
  2525 + dd->port->commands[i].command =
  2526 + dd->port->command_table + (HW_CMD_TBL_SZ * i);
  2527 + dd->port->commands[i].command_dma =
  2528 + dd->port->command_tbl_dma + (HW_CMD_TBL_SZ * i);
  2529 +
  2530 + if (readl(dd->mmio + HOST_CAP) & HOST_CAP_64)
  2531 + dd->port->commands[i].command_header->ctbau =
  2532 + cpu_to_le32(
  2533 + (dd->port->commands[i].command_dma >> 16) >> 16);
  2534 + dd->port->commands[i].command_header->ctba = cpu_to_le32(
  2535 + dd->port->commands[i].command_dma & 0xffffffff);
  2536 +
  2537 + /*
  2538 + * If this is not done, a bug is reported by the stock
  2539 + * FC11 i386. Due to the fact that it has lots of kernel
  2540 + * debugging enabled.
  2541 + */
  2542 + sg_init_table(dd->port->commands[i].sg, MTIP_MAX_SG);
  2543 +
  2544 + /* Mark all commands as currently inactive.*/
  2545 + atomic_set(&dd->port->commands[i].active, 0);
  2546 + }
  2547 +
  2548 + /* Setup the pointers to the extended s_active and CI registers. */
  2549 + for (i = 0; i < dd->slot_groups; i++) {
  2550 + dd->port->s_active[i] =
  2551 + dd->port->mmio + i*0x80 + PORT_SCR_ACT;
  2552 + dd->port->cmd_issue[i] =
  2553 + dd->port->mmio + i*0x80 + PORT_COMMAND_ISSUE;
  2554 + dd->port->completed[i] =
  2555 + dd->port->mmio + i*0x80 + PORT_SDBV;
  2556 + }
  2557 +
  2558 + /* Reset the HBA. */
  2559 + if (mtip_hba_reset(dd) < 0) {
  2560 + dev_err(&dd->pdev->dev,
  2561 + "Card did not reset within timeout\n");
  2562 + rv = -EIO;
  2563 + goto out2;
  2564 + }
  2565 +
  2566 + mtip_init_port(dd->port);
  2567 + mtip_start_port(dd->port);
  2568 +
  2569 + /* Setup the ISR and enable interrupts. */
  2570 + rv = devm_request_irq(&dd->pdev->dev,
  2571 + dd->pdev->irq,
  2572 + mtip_irq_handler,
  2573 + IRQF_SHARED,
  2574 + dev_driver_string(&dd->pdev->dev),
  2575 + dd);
  2576 +
  2577 + if (rv) {
  2578 + dev_err(&dd->pdev->dev,
  2579 + "Unable to allocate IRQ %d\n", dd->pdev->irq);
  2580 + goto out2;
  2581 + }
  2582 +
  2583 + /* Enable interrupts on the HBA. */
  2584 + writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN,
  2585 + dd->mmio + HOST_CTL);
  2586 +
  2587 + init_timer(&dd->port->cmd_timer);
  2588 + dd->port->cmd_timer.data = (unsigned long int) dd->port;
  2589 + dd->port->cmd_timer.function = mtip_timeout_function;
  2590 + mod_timer(&dd->port->cmd_timer,
  2591 + jiffies + msecs_to_jiffies(MTIP_TIMEOUT_CHECK_PERIOD));
  2592 +
  2593 + if (mtip_get_identify(dd->port, NULL) < 0) {
  2594 + rv = -EFAULT;
  2595 + goto out3;
  2596 + }
  2597 + mtip_dump_identify(dd->port);
  2598 +
  2599 + if (*(dd->port->identify + MTIP_FTL_REBUILD_OFFSET) ==
  2600 + MTIP_FTL_REBUILD_MAGIC) {
  2601 + return mtip_ftl_rebuild_poll(dd);
  2602 + }
  2603 + return rv;
  2604 +
  2605 +out3:
  2606 + del_timer_sync(&dd->port->cmd_timer);
  2607 +
  2608 + /* Disable interrupts on the HBA. */
  2609 + writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN,
  2610 + dd->mmio + HOST_CTL);
  2611 +
  2612 + /*Release the IRQ. */
  2613 + devm_free_irq(&dd->pdev->dev, dd->pdev->irq, dd);
  2614 +
  2615 +out2:
  2616 + mtip_deinit_port(dd->port);
  2617 +
  2618 + /* Free the command/command header memory. */
  2619 + dmam_free_coherent(&dd->pdev->dev,
  2620 + HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 2),
  2621 + dd->port->command_list,
  2622 + dd->port->command_list_dma);
  2623 +out1:
  2624 + /* Free the memory allocated for the for structure. */
  2625 + kfree(dd->port);
  2626 +
  2627 + return rv;
  2628 +}
  2629 +
  2630 +/*
  2631 + * Called to deinitialize an interface.
  2632 + *
  2633 + * @dd Pointer to the driver data structure.
  2634 + *
  2635 + * return value
  2636 + * 0
  2637 + */
  2638 +int mtip_hw_exit(struct driver_data *dd)
  2639 +{
  2640 + /*
  2641 + * Send standby immediate (E0h) to the drive so that it
  2642 + * saves its state.
  2643 + */
  2644 + if (atomic_read(&dd->drv_cleanup_done) != true) {
  2645 +
  2646 + mtip_standby_immediate(dd->port);
  2647 +
  2648 + /* de-initialize the port. */
  2649 + mtip_deinit_port(dd->port);
  2650 +
  2651 + /* Disable interrupts on the HBA. */
  2652 + writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN,
  2653 + dd->mmio + HOST_CTL);
  2654 + }
  2655 +
  2656 + del_timer_sync(&dd->port->cmd_timer);
  2657 +
  2658 + /* Stop the bottom half tasklet. */
  2659 + tasklet_kill(&dd->tasklet);
  2660 +
  2661 + /* Release the IRQ. */
  2662 + devm_free_irq(&dd->pdev->dev, dd->pdev->irq, dd);
  2663 +
  2664 + /* Free the command/command header memory. */
  2665 + dmam_free_coherent(&dd->pdev->dev,
  2666 + HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 2),
  2667 + dd->port->command_list,
  2668 + dd->port->command_list_dma);
  2669 + /* Free the memory allocated for the for structure. */
  2670 + kfree(dd->port);
  2671 +
  2672 + return 0;
  2673 +}
  2674 +
  2675 +/*
  2676 + * Issue a Standby Immediate command to the device.
  2677 + *
  2678 + * This function is called by the Block Layer just before the
  2679 + * system powers off during a shutdown.
  2680 + *
  2681 + * @dd Pointer to the driver data structure.
  2682 + *
  2683 + * return value
  2684 + * 0
  2685 + */
  2686 +int mtip_hw_shutdown(struct driver_data *dd)
  2687 +{
  2688 + /*
  2689 + * Send standby immediate (E0h) to the drive so that it
  2690 + * saves its state.
  2691 + */
  2692 + mtip_standby_immediate(dd->port);
  2693 +
  2694 + return 0;
  2695 +}
  2696 +
  2697 +/*
  2698 + * Suspend function
  2699 + *
  2700 + * This function is called by the Block Layer just before the
  2701 + * system hibernates.
  2702 + *
  2703 + * @dd Pointer to the driver data structure.
  2704 + *
  2705 + * return value
  2706 + * 0 Suspend was successful
  2707 + * -EFAULT Suspend was not successful
  2708 + */
  2709 +int mtip_hw_suspend(struct driver_data *dd)
  2710 +{
  2711 + /*
  2712 + * Send standby immediate (E0h) to the drive
  2713 + * so that it saves its state.
  2714 + */
  2715 + if (mtip_standby_immediate(dd->port) != 0) {
  2716 + dev_err(&dd->pdev->dev,
  2717 + "Failed standby-immediate command\n");
  2718 + return -EFAULT;
  2719 + }
  2720 +
  2721 + /* Disable interrupts on the HBA.*/
  2722 + writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN,
  2723 + dd->mmio + HOST_CTL);
  2724 + mtip_deinit_port(dd->port);
  2725 +
  2726 + return 0;
  2727 +}
  2728 +
  2729 +/*
  2730 + * Resume function
  2731 + *
  2732 + * This function is called by the Block Layer as the
  2733 + * system resumes.
  2734 + *
  2735 + * @dd Pointer to the driver data structure.
  2736 + *
  2737 + * return value
  2738 + * 0 Resume was successful
  2739 + * -EFAULT Resume was not successful
  2740 + */
  2741 +int mtip_hw_resume(struct driver_data *dd)
  2742 +{
  2743 + /* Perform any needed hardware setup steps */
  2744 + hba_setup(dd);
  2745 +
  2746 + /* Reset the HBA */
  2747 + if (mtip_hba_reset(dd) != 0) {
  2748 + dev_err(&dd->pdev->dev,
  2749 + "Unable to reset the HBA\n");
  2750 + return -EFAULT;
  2751 + }
  2752 +
  2753 + /*
  2754 + * Enable the port, DMA engine, and FIS reception specific
  2755 + * h/w in controller.
  2756 + */
  2757 + mtip_init_port(dd->port);
  2758 + mtip_start_port(dd->port);
  2759 +
  2760 + /* Enable interrupts on the HBA.*/
  2761 + writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN,
  2762 + dd->mmio + HOST_CTL);
  2763 +
  2764 + return 0;
  2765 +}
  2766 +
  2767 +/*
  2768 + * This function is called for clean the pending command in the
  2769 + * command slot during the surprise removal of device and return
  2770 + * error to the upper layer.
  2771 + *
  2772 + * @dd Pointer to the DRIVER_DATA structure.
  2773 + *
  2774 + * return value
  2775 + * None
  2776 + */
  2777 +void mtip_command_cleanup(struct driver_data *dd)
  2778 +{
  2779 + int group = 0, commandslot = 0, commandindex = 0;
  2780 + struct mtip_cmd *command;
  2781 + struct mtip_port *port = dd->port;
  2782 +
  2783 + for (group = 0; group < 4; group++) {
  2784 + for (commandslot = 0; commandslot < 32; commandslot++) {
  2785 + if (!(port->allocated[group] & (1 << commandslot)))
  2786 + continue;
  2787 +
  2788 + commandindex = group << 5 | commandslot;
  2789 + command = &port->commands[commandindex];
  2790 +
  2791 + if (atomic_read(&command->active)
  2792 + && (command->async_callback)) {
  2793 + command->async_callback(command->async_data,
  2794 + -ENODEV);
  2795 + command->async_callback = NULL;
  2796 + command->async_data = NULL;
  2797 + }
  2798 +
  2799 + dma_unmap_sg(&port->dd->pdev->dev,
  2800 + command->sg,
  2801 + command->scatter_ents,
  2802 + command->direction);
  2803 + }
  2804 + }
  2805 +
  2806 + up(&port->cmd_slot);
  2807 +
  2808 + atomic_set(&dd->drv_cleanup_done, true);
  2809 +}
  2810 +
  2811 +/*
  2812 + * Helper function for reusing disk name
  2813 + * upon hot insertion.
  2814 + */
  2815 +static int rssd_disk_name_format(char *prefix,
  2816 + int index,
  2817 + char *buf,
  2818 + int buflen)
  2819 +{
  2820 + const int base = 'z' - 'a' + 1;
  2821 + char *begin = buf + strlen(prefix);
  2822 + char *end = buf + buflen;
  2823 + char *p;
  2824 + int unit;
  2825 +
  2826 + p = end - 1;
  2827 + *p = '\0';
  2828 + unit = base;
  2829 + do {
  2830 + if (p == begin)
  2831 + return -EINVAL;
  2832 + *--p = 'a' + (index % unit);
  2833 + index = (index / unit) - 1;
  2834 + } while (index >= 0);
  2835 +
  2836 + memmove(begin, p, end - p);
  2837 + memcpy(buf, prefix, strlen(prefix));
  2838 +
  2839 + return 0;
  2840 +}
  2841 +
  2842 +/*
  2843 + * Block layer IOCTL handler.
  2844 + *
  2845 + * @dev Pointer to the block_device structure.
  2846 + * @mode ignored
  2847 + * @cmd IOCTL command passed from the user application.
  2848 + * @arg Argument passed from the user application.
  2849 + *
  2850 + * return value
  2851 + * 0 IOCTL completed successfully.
  2852 + * -ENOTTY IOCTL not supported or invalid driver data
  2853 + * structure pointer.
  2854 + */
  2855 +static int mtip_block_ioctl(struct block_device *dev,
  2856 + fmode_t mode,
  2857 + unsigned cmd,
  2858 + unsigned long arg)
  2859 +{
  2860 + struct driver_data *dd = dev->bd_disk->private_data;
  2861 +
  2862 + if (!capable(CAP_SYS_ADMIN))
  2863 + return -EACCES;
  2864 +
  2865 + if (!dd)
  2866 + return -ENOTTY;
  2867 +
  2868 + switch (cmd) {
  2869 + case BLKFLSBUF:
  2870 + return 0;
  2871 + default:
  2872 + return mtip_hw_ioctl(dd, cmd, arg, 0);
  2873 + }
  2874 +}
  2875 +
  2876 +/*
  2877 + * Block layer compat IOCTL handler.
  2878 + *
  2879 + * @dev Pointer to the block_device structure.
  2880 + * @mode ignored
  2881 + * @cmd IOCTL command passed from the user application.
  2882 + * @arg Argument passed from the user application.
  2883 + *
  2884 + * return value
  2885 + * 0 IOCTL completed successfully.
  2886 + * -ENOTTY IOCTL not supported or invalid driver data
  2887 + * structure pointer.
  2888 + */
  2889 +static int mtip_block_compat_ioctl(struct block_device *dev,
  2890 + fmode_t mode,
  2891 + unsigned cmd,
  2892 + unsigned long arg)
  2893 +{
  2894 + struct driver_data *dd = dev->bd_disk->private_data;
  2895 +
  2896 + if (!capable(CAP_SYS_ADMIN))
  2897 + return -EACCES;
  2898 +
  2899 + if (!dd)
  2900 + return -ENOTTY;
  2901 +
  2902 + switch (cmd) {
  2903 + case BLKFLSBUF:
  2904 + return 0;
  2905 + default:
  2906 + return mtip_hw_ioctl(dd, cmd, arg, 1);
  2907 + }
  2908 +}
  2909 +
  2910 +/*
  2911 + * Obtain the geometry of the device.
  2912 + *
  2913 + * You may think that this function is obsolete, but some applications,
  2914 + * fdisk for example still used CHS values. This function describes the
  2915 + * device as having 224 heads and 56 sectors per cylinder. These values are
  2916 + * chosen so that each cylinder is aligned on a 4KB boundary. Since a
  2917 + * partition is described in terms of a start and end cylinder this means
  2918 + * that each partition is also 4KB aligned. Non-aligned partitions adversely
  2919 + * affects performance.
  2920 + *
  2921 + * @dev Pointer to the block_device strucutre.
  2922 + * @geo Pointer to a hd_geometry structure.
  2923 + *
  2924 + * return value
  2925 + * 0 Operation completed successfully.
  2926 + * -ENOTTY An error occurred while reading the drive capacity.
  2927 + */
  2928 +static int mtip_block_getgeo(struct block_device *dev,
  2929 + struct hd_geometry *geo)
  2930 +{
  2931 + struct driver_data *dd = dev->bd_disk->private_data;
  2932 + sector_t capacity;
  2933 +
  2934 + if (!dd)
  2935 + return -ENOTTY;
  2936 +
  2937 + if (!(mtip_hw_get_capacity(dd, &capacity))) {
  2938 + dev_warn(&dd->pdev->dev,
  2939 + "Could not get drive capacity.\n");
  2940 + return -ENOTTY;
  2941 + }
  2942 +
  2943 + geo->heads = 224;
  2944 + geo->sectors = 56;
  2945 +#if BITS_PER_LONG == 64
  2946 + geo->cylinders = capacity / (geo->heads * geo->sectors);
  2947 +#else
  2948 + do_div(capacity, (geo->heads * geo->sectors));
  2949 + geo->cylinders = capacity;
  2950 +#endif
  2951 + return 0;
  2952 +}
  2953 +
  2954 +/*
  2955 + * Block device operation function.
  2956 + *
  2957 + * This structure contains pointers to the functions required by the block
  2958 + * layer.
  2959 + */
  2960 +static const struct block_device_operations mtip_block_ops = {
  2961 + .ioctl = mtip_block_ioctl,
  2962 + .compat_ioctl = mtip_block_compat_ioctl,
  2963 + .getgeo = mtip_block_getgeo,
  2964 + .owner = THIS_MODULE
  2965 +};
  2966 +
  2967 +/*
  2968 + * Block layer make request function.
  2969 + *
  2970 + * This function is called by the kernel to process a BIO for
  2971 + * the P320 device.
  2972 + *
  2973 + * @queue Pointer to the request queue. Unused other than to obtain
  2974 + * the driver data structure.
  2975 + * @bio Pointer to the BIO.
  2976 + *
  2977 + * return value
  2978 + * 0
  2979 + */
  2980 +static int mtip_make_request(struct request_queue *queue, struct bio *bio)
  2981 +{
  2982 + struct driver_data *dd = queue->queuedata;
  2983 + struct scatterlist *sg;
  2984 + struct bio_vec *bvec;
  2985 + int nents = 0;
  2986 + int tag = 0;
  2987 +
  2988 + if (unlikely(!bio_has_data(bio))) {
  2989 + blk_queue_flush(queue, 0);
  2990 + bio_endio(bio, 0);
  2991 + return 0;
  2992 + }
  2993 +
  2994 + if (unlikely(atomic_read(&dd->eh_active))) {
  2995 + bio_endio(bio, -EBUSY);
  2996 + return 0;
  2997 + }
  2998 +
  2999 + sg = mtip_hw_get_scatterlist(dd, &tag);
  3000 + if (likely(sg != NULL)) {
  3001 + blk_queue_bounce(queue, &bio);
  3002 +
  3003 + if (unlikely((bio)->bi_vcnt > MTIP_MAX_SG)) {
  3004 + dev_warn(&dd->pdev->dev,
  3005 + "Maximum number of SGL entries exceeded");
  3006 + bio_io_error(bio);
  3007 + mtip_hw_release_scatterlist(dd, tag);
  3008 + return 0;
  3009 + }
  3010 +
  3011 + /* Create the scatter list for this bio. */
  3012 + bio_for_each_segment(bvec, bio, nents) {
  3013 + sg_set_page(&sg[nents],
  3014 + bvec->bv_page,
  3015 + bvec->bv_len,
  3016 + bvec->bv_offset);
  3017 + }
  3018 +
  3019 + /* Issue the read/write. */
  3020 + mtip_hw_submit_io(dd,
  3021 + bio->bi_sector,
  3022 + bio_sectors(bio),
  3023 + nents,
  3024 + tag,
  3025 + bio_endio,
  3026 + bio,
  3027 + bio->bi_rw & REQ_FLUSH,
  3028 + bio_data_dir(bio));
  3029 + } else {
  3030 + bio_io_error(bio);
  3031 + }
  3032 +
  3033 + return 0;
  3034 +}
  3035 +
  3036 +/*
  3037 + * Block layer initialization function.
  3038 + *
  3039 + * This function is called once by the PCI layer for each P320
  3040 + * device that is connected to the system.
  3041 + *
  3042 + * @dd Pointer to the driver data structure.
  3043 + *
  3044 + * return value
  3045 + * 0 on success else an error code.
  3046 + */
  3047 +int mtip_block_initialize(struct driver_data *dd)
  3048 +{
  3049 + int rv = 0;
  3050 + sector_t capacity;
  3051 + unsigned int index = 0;
  3052 + struct kobject *kobj;
  3053 +
  3054 + /* Initialize the protocol layer. */
  3055 + rv = mtip_hw_init(dd);
  3056 + if (rv < 0) {
  3057 + dev_err(&dd->pdev->dev,
  3058 + "Protocol layer initialization failed\n");
  3059 + rv = -EINVAL;
  3060 + goto protocol_init_error;
  3061 + }
  3062 +
  3063 + /* Allocate the request queue. */
  3064 + dd->queue = blk_alloc_queue(GFP_KERNEL);
  3065 + if (dd->queue == NULL) {
  3066 + dev_err(&dd->pdev->dev,
  3067 + "Unable to allocate request queue\n");
  3068 + rv = -ENOMEM;
  3069 + goto block_queue_alloc_init_error;
  3070 + }
  3071 +
  3072 + /* Attach our request function to the request queue. */
  3073 + blk_queue_make_request(dd->queue, mtip_make_request);
  3074 +
  3075 + /* Set device limits. */
  3076 + set_bit(QUEUE_FLAG_NONROT, &dd->queue->queue_flags);
  3077 + blk_queue_max_segments(dd->queue, MTIP_MAX_SG);
  3078 + blk_queue_physical_block_size(dd->queue, 4096);
  3079 + blk_queue_io_min(dd->queue, 4096);
  3080 +
  3081 + dd->disk = alloc_disk(MTIP_MAX_MINORS);
  3082 + if (dd->disk == NULL) {
  3083 + dev_err(&dd->pdev->dev,
  3084 + "Unable to allocate gendisk structure\n");
  3085 + rv = -EINVAL;
  3086 + goto alloc_disk_error;
  3087 + }
  3088 +
  3089 + /* Generate the disk name, implemented same as in sd.c */
  3090 + do {
  3091 + if (!ida_pre_get(&rssd_index_ida, GFP_KERNEL))
  3092 + goto ida_get_error;
  3093 +
  3094 + spin_lock(&rssd_index_lock);
  3095 + rv = ida_get_new(&rssd_index_ida, &index);
  3096 + spin_unlock(&rssd_index_lock);
  3097 + } while (rv == -EAGAIN);
  3098 +
  3099 + if (rv)
  3100 + goto ida_get_error;
  3101 +
  3102 + rv = rssd_disk_name_format("rssd",
  3103 + index,
  3104 + dd->disk->disk_name,
  3105 + DISK_NAME_LEN);
  3106 + if (rv)
  3107 + goto disk_index_error;
  3108 +
  3109 + dd->disk->driverfs_dev = &dd->pdev->dev;
  3110 + dd->disk->major = dd->major;
  3111 + dd->disk->first_minor = dd->instance * MTIP_MAX_MINORS;
  3112 + dd->disk->fops = &mtip_block_ops;
  3113 + dd->disk->queue = dd->queue;
  3114 + dd->disk->private_data = dd;
  3115 + dd->queue->queuedata = dd;
  3116 + dd->index = index;
  3117 +
  3118 + /* Set the capacity of the device in 512 byte sectors. */
  3119 + if (!(mtip_hw_get_capacity(dd, &capacity))) {
  3120 + dev_warn(&dd->pdev->dev,
  3121 + "Could not read drive capacity\n");
  3122 + rv = -EIO;
  3123 + goto read_capacity_error;
  3124 + }
  3125 + set_capacity(dd->disk, capacity);
  3126 +
  3127 + /* Enable the block device and add it to /dev */
  3128 + add_disk(dd->disk);
  3129 +
  3130 + /*
  3131 + * Now that the disk is active, initialize any sysfs attributes
  3132 + * managed by the protocol layer.
  3133 + */
  3134 + kobj = kobject_get(&disk_to_dev(dd->disk)->kobj);
  3135 + if (kobj) {
  3136 + mtip_hw_sysfs_init(dd, kobj);
  3137 + kobject_put(kobj);
  3138 + }
  3139 +
  3140 + return rv;
  3141 +
  3142 +read_capacity_error:
  3143 + /*
  3144 + * Delete our gendisk structure. This also removes the device
  3145 + * from /dev
  3146 + */
  3147 + del_gendisk(dd->disk);
  3148 +
  3149 +disk_index_error:
  3150 + spin_lock(&rssd_index_lock);
  3151 + ida_remove(&rssd_index_ida, index);
  3152 + spin_unlock(&rssd_index_lock);
  3153 +
  3154 +ida_get_error:
  3155 + put_disk(dd->disk);
  3156 +
  3157 +alloc_disk_error:
  3158 + blk_cleanup_queue(dd->queue);
  3159 +
  3160 +block_queue_alloc_init_error:
  3161 + /* De-initialize the protocol layer. */
  3162 + mtip_hw_exit(dd);
  3163 +
  3164 +protocol_init_error:
  3165 + return rv;
  3166 +}
  3167 +
  3168 +/*
  3169 + * Block layer deinitialization function.
  3170 + *
  3171 + * Called by the PCI layer as each P320 device is removed.
  3172 + *
  3173 + * @dd Pointer to the driver data structure.
  3174 + *
  3175 + * return value
  3176 + * 0
  3177 + */
  3178 +int mtip_block_remove(struct driver_data *dd)
  3179 +{
  3180 + struct kobject *kobj;
  3181 + /* Clean up the sysfs attributes managed by the protocol layer. */
  3182 + kobj = kobject_get(&disk_to_dev(dd->disk)->kobj);
  3183 + if (kobj) {
  3184 + mtip_hw_sysfs_exit(dd, kobj);
  3185 + kobject_put(kobj);
  3186 + }
  3187 +
  3188 + /*
  3189 + * Delete our gendisk structure. This also removes the device
  3190 + * from /dev
  3191 + */
  3192 + del_gendisk(dd->disk);
  3193 + blk_cleanup_queue(dd->queue);
  3194 + dd->disk = NULL;
  3195 + dd->queue = NULL;
  3196 +
  3197 + /* De-initialize the protocol layer. */
  3198 + mtip_hw_exit(dd);
  3199 +
  3200 + return 0;
  3201 +}
  3202 +
  3203 +/*
  3204 + * Function called by the PCI layer when just before the
  3205 + * machine shuts down.
  3206 + *
  3207 + * If a protocol layer shutdown function is present it will be called
  3208 + * by this function.
  3209 + *
  3210 + * @dd Pointer to the driver data structure.
  3211 + *
  3212 + * return value
  3213 + * 0
  3214 + */
  3215 +int mtip_block_shutdown(struct driver_data *dd)
  3216 +{
  3217 + dev_info(&dd->pdev->dev,
  3218 + "Shutting down %s ...\n", dd->disk->disk_name);
  3219 +
  3220 + /* Delete our gendisk structure, and cleanup the blk queue. */
  3221 + del_gendisk(dd->disk);
  3222 + blk_cleanup_queue(dd->queue);
  3223 + dd->disk = NULL;
  3224 + dd->queue = NULL;
  3225 +
  3226 + mtip_hw_shutdown(dd);
  3227 + return 0;
  3228 +}
  3229 +
  3230 +int mtip_block_suspend(struct driver_data *dd)
  3231 +{
  3232 + dev_info(&dd->pdev->dev,
  3233 + "Suspending %s ...\n", dd->disk->disk_name);
  3234 + mtip_hw_suspend(dd);
  3235 + return 0;
  3236 +}
  3237 +
  3238 +int mtip_block_resume(struct driver_data *dd)
  3239 +{
  3240 + dev_info(&dd->pdev->dev, "Resuming %s ...\n",
  3241 + dd->disk->disk_name);
  3242 + mtip_hw_resume(dd);
  3243 + return 0;
  3244 +}
  3245 +
  3246 +/*
  3247 + * Called for each supported PCI device detected.
  3248 + *
  3249 + * This function allocates the private data structure, enables the
  3250 + * PCI device and then calls the block layer initialization function.
  3251 + *
  3252 + * return value
  3253 + * 0 on success else an error code.
  3254 + */
  3255 +static int mtip_pci_probe(struct pci_dev *pdev,
  3256 + const struct pci_device_id *ent)
  3257 +{
  3258 + int rv = 0;
  3259 + struct driver_data *dd = NULL;
  3260 +
  3261 + /* Allocate memory for this devices private data. */
  3262 + dd = kzalloc(sizeof(struct driver_data), GFP_KERNEL);
  3263 + if (dd == NULL) {
  3264 + dev_err(&pdev->dev,
  3265 + "Unable to allocate memory for driver data\n");
  3266 + return -ENOMEM;
  3267 + }
  3268 +
  3269 + /* Set the atomic variable as 1 in case of SRSI */
  3270 + atomic_set(&dd->drv_cleanup_done, true);
  3271 +
  3272 + atomic_set(&dd->resumeflag, false);
  3273 + atomic_set(&dd->eh_active, 0);
  3274 +
  3275 + /* Attach the private data to this PCI device. */
  3276 + pci_set_drvdata(pdev, dd);
  3277 +
  3278 + rv = pcim_enable_device(pdev);
  3279 + if (rv < 0) {
  3280 + dev_err(&pdev->dev, "Unable to enable device\n");
  3281 + goto iomap_err;
  3282 + }
  3283 +
  3284 + /* Map BAR5 to memory. */
  3285 + rv = pcim_iomap_regions(pdev, 1 << MTIP_ABAR, MTIP_DRV_NAME);
  3286 + if (rv < 0) {
  3287 + dev_err(&pdev->dev, "Unable to map regions\n");
  3288 + goto iomap_err;
  3289 + }
  3290 +
  3291 + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
  3292 + rv = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
  3293 +
  3294 + if (rv) {
  3295 + rv = pci_set_consistent_dma_mask(pdev,
  3296 + DMA_BIT_MASK(32));
  3297 + if (rv) {
  3298 + dev_warn(&pdev->dev,
  3299 + "64-bit DMA enable failed\n");
  3300 + goto setmask_err;
  3301 + }
  3302 + }
  3303 + }
  3304 +
  3305 + pci_set_master(pdev);
  3306 +
  3307 + if (pci_enable_msi(pdev)) {
  3308 + dev_warn(&pdev->dev,
  3309 + "Unable to enable MSI interrupt.\n");
  3310 + goto block_initialize_err;
  3311 + }
  3312 +
  3313 + /* Copy the info we may need later into the private data structure. */
  3314 + dd->major = mtip_major;
  3315 + dd->protocol = ent->driver_data;
  3316 + dd->instance = instance;
  3317 + dd->pdev = pdev;
  3318 +
  3319 + /* Initialize the block layer. */
  3320 + rv = mtip_block_initialize(dd);
  3321 + if (rv < 0) {
  3322 + dev_err(&pdev->dev,
  3323 + "Unable to initialize block layer\n");
  3324 + goto block_initialize_err;
  3325 + }
  3326 +
  3327 + /*
  3328 + * Increment the instance count so that each device has a unique
  3329 + * instance number.
  3330 + */
  3331 + instance++;
  3332 +
  3333 + goto done;
  3334 +
  3335 +block_initialize_err:
  3336 + pci_disable_msi(pdev);
  3337 +
  3338 +setmask_err:
  3339 + pcim_iounmap_regions(pdev, 1 << MTIP_ABAR);
  3340 +
  3341 +iomap_err:
  3342 + kfree(dd);
  3343 + pci_set_drvdata(pdev, NULL);
  3344 + return rv;
  3345 +done:
  3346 + /* Set the atomic variable as 0 in case of SRSI */
  3347 + atomic_set(&dd->drv_cleanup_done, true);
  3348 +
  3349 + return rv;
  3350 +}
  3351 +
  3352 +/*
  3353 + * Called for each probed device when the device is removed or the
  3354 + * driver is unloaded.
  3355 + *
  3356 + * return value
  3357 + * None
  3358 + */
  3359 +static void mtip_pci_remove(struct pci_dev *pdev)
  3360 +{
  3361 + struct driver_data *dd = pci_get_drvdata(pdev);
  3362 + int counter = 0;
  3363 +
  3364 + if (mtip_check_surprise_removal(pdev)) {
  3365 + while (atomic_read(&dd->drv_cleanup_done) == false) {
  3366 + counter++;
  3367 + msleep(20);
  3368 + if (counter == 10) {
  3369 + /* Cleanup the outstanding commands */
  3370 + mtip_command_cleanup(dd);
  3371 + break;
  3372 + }
  3373 + }
  3374 + }
  3375 + /* Set the atomic variable as 1 in case of SRSI */
  3376 + atomic_set(&dd->drv_cleanup_done, true);
  3377 +
  3378 + /* Clean up the block layer. */
  3379 + mtip_block_remove(dd);
  3380 +
  3381 + pci_disable_msi(pdev);
  3382 +
  3383 + kfree(dd);
  3384 + pcim_iounmap_regions(pdev, 1 << MTIP_ABAR);
  3385 +}
  3386 +
  3387 +/*
  3388 + * Called for each probed device when the device is suspended.
  3389 + *
  3390 + * return value
  3391 + * 0 Success
  3392 + * <0 Error
  3393 + */
  3394 +static int mtip_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
  3395 +{
  3396 + int rv = 0;
  3397 + struct driver_data *dd = pci_get_drvdata(pdev);
  3398 +
  3399 + if (!dd) {
  3400 + dev_err(&pdev->dev,
  3401 + "Driver private datastructure is NULL\n");
  3402 + return -EFAULT;
  3403 + }
  3404 +
  3405 + atomic_set(&dd->resumeflag, true);
  3406 +
  3407 + /* Disable ports & interrupts then send standby immediate */
  3408 + rv = mtip_block_suspend(dd);
  3409 + if (rv < 0) {
  3410 + dev_err(&pdev->dev,
  3411 + "Failed to suspend controller\n");
  3412 + return rv;
  3413 + }
  3414 +
  3415 + /*
  3416 + * Save the pci config space to pdev structure &
  3417 + * disable the device
  3418 + */
  3419 + pci_save_state(pdev);
  3420 + pci_disable_device(pdev);
  3421 +
  3422 + /* Move to Low power state*/
  3423 + pci_set_power_state(pdev, PCI_D3hot);
  3424 +
  3425 + return rv;
  3426 +}
  3427 +
  3428 +/*
  3429 + * Called for each probed device when the device is resumed.
  3430 + *
  3431 + * return value
  3432 + * 0 Success
  3433 + * <0 Error
  3434 + */
  3435 +static int mtip_pci_resume(struct pci_dev *pdev)
  3436 +{
  3437 + int rv = 0;
  3438 + struct driver_data *dd;
  3439 +
  3440 + dd = pci_get_drvdata(pdev);
  3441 + if (!dd) {
  3442 + dev_err(&pdev->dev,
  3443 + "Driver private datastructure is NULL\n");
  3444 + return -EFAULT;
  3445 + }
  3446 +
  3447 + /* Move the device to active State */
  3448 + pci_set_power_state(pdev, PCI_D0);
  3449 +
  3450 + /* Restore PCI configuration space */
  3451 + pci_restore_state(pdev);
  3452 +
  3453 + /* Enable the PCI device*/
  3454 + rv = pcim_enable_device(pdev);
  3455 + if (rv < 0) {
  3456 + dev_err(&pdev->dev,
  3457 + "Failed to enable card during resume\n");
  3458 + goto err;
  3459 + }
  3460 + pci_set_master(pdev);
  3461 +
  3462 + /*
  3463 + * Calls hbaReset, initPort, & startPort function
  3464 + * then enables interrupts
  3465 + */
  3466 + rv = mtip_block_resume(dd);
  3467 + if (rv < 0)
  3468 + dev_err(&pdev->dev, "Unable to resume\n");
  3469 +
  3470 +err:
  3471 + atomic_set(&dd->resumeflag, false);
  3472 +
  3473 + return rv;
  3474 +}
  3475 +
  3476 +/*
  3477 + * Shutdown routine
  3478 + *
  3479 + * return value
  3480 + * None
  3481 + */
  3482 +static void mtip_pci_shutdown(struct pci_dev *pdev)
  3483 +{
  3484 + struct driver_data *dd = pci_get_drvdata(pdev);
  3485 + if (dd)
  3486 + mtip_block_shutdown(dd);
  3487 +}
  3488 +
  3489 +/*
  3490 + * This function check_for_surprise_removal is called
  3491 + * while card is removed from the system and it will
  3492 + * read the vendor id from the configration space
  3493 + *
  3494 + * @pdev Pointer to the pci_dev structure.
  3495 + *
  3496 + * return value
  3497 + * true if device removed, else false
  3498 + */
  3499 +bool mtip_check_surprise_removal(struct pci_dev *pdev)
  3500 +{
  3501 + u16 vendor_id = 0;
  3502 +
  3503 + /* Read the vendorID from the configuration space */
  3504 + pci_read_config_word(pdev, 0x00, &vendor_id);
  3505 + if (vendor_id == 0xFFFF)
  3506 + return true; /* device removed */
  3507 +
  3508 + return false; /* device present */
  3509 +}
  3510 +
  3511 +/* Table of device ids supported by this driver. */
  3512 +static DEFINE_PCI_DEVICE_TABLE(mtip_pci_tbl) = {
  3513 + { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P320_DEVICE_ID) },
  3514 + { 0 }
  3515 +};
  3516 +
  3517 +/* Structure that describes the PCI driver functions. */
  3518 +struct pci_driver mtip_pci_driver = {
  3519 + .name = MTIP_DRV_NAME,
  3520 + .id_table = mtip_pci_tbl,
  3521 + .probe = mtip_pci_probe,
  3522 + .remove = mtip_pci_remove,
  3523 + .suspend = mtip_pci_suspend,
  3524 + .resume = mtip_pci_resume,
  3525 + .shutdown = mtip_pci_shutdown,
  3526 +};
  3527 +
  3528 +MODULE_DEVICE_TABLE(pci, mtip_pci_tbl);
  3529 +
  3530 +/*
  3531 + * Module initialization function.
  3532 + *
  3533 + * Called once when the module is loaded. This function allocates a major
  3534 + * block device number to the Cyclone devices and registers the PCI layer
  3535 + * of the driver.
  3536 + *
  3537 + * Return value
  3538 + * 0 on success else error code.
  3539 + */
  3540 +static int __init mtip_init(void)
  3541 +{
  3542 + printk(KERN_INFO MTIP_DRV_NAME " Version " MTIP_DRV_VERSION "\n");
  3543 +
  3544 + /* Allocate a major block device number to use with this driver. */
  3545 + mtip_major = register_blkdev(0, MTIP_DRV_NAME);
  3546 + if (mtip_major < 0) {
  3547 + printk(KERN_ERR "Unable to register block device (%d)\n",
  3548 + mtip_major);
  3549 + return -EBUSY;
  3550 + }
  3551 +
  3552 + /* Register our PCI operations. */
  3553 + return pci_register_driver(&mtip_pci_driver);
  3554 +}
  3555 +
  3556 +/*
  3557 + * Module de-initialization function.
  3558 + *
  3559 + * Called once when the module is unloaded. This function deallocates
  3560 + * the major block device number allocated by mtip_init() and
  3561 + * unregisters the PCI layer of the driver.
  3562 + *
  3563 + * Return value
  3564 + * none
  3565 + */
  3566 +static void __exit mtip_exit(void)
  3567 +{
  3568 + /* Release the allocated major block device number. */
  3569 + unregister_blkdev(mtip_major, MTIP_DRV_NAME);
  3570 +
  3571 + /* Unregister the PCI driver. */
  3572 + pci_unregister_driver(&mtip_pci_driver);
  3573 +}
  3574 +
  3575 +MODULE_AUTHOR("Micron Technology, Inc");
  3576 +MODULE_DESCRIPTION("Micron RealSSD PCIe Block Driver");
  3577 +MODULE_LICENSE("GPL");
  3578 +MODULE_VERSION(MTIP_DRV_VERSION);
  3579 +
  3580 +module_init(mtip_init);
  3581 +module_exit(mtip_exit);
drivers/block/mtip32xx/mtip32xx.h
  1 +/*
  2 + * mtip32xx.h - Header file for the P320 SSD Block Driver
  3 + * Copyright (C) 2011 Micron Technology, Inc.
  4 + *
  5 + * Portions of this code were derived from works subjected to the
  6 + * following copyright:
  7 + * Copyright (C) 2009 Integrated Device Technology, Inc.
  8 + *
  9 + * This program is free software; you can redistribute it and/or modify
  10 + * it under the terms of the GNU General Public License as published by
  11 + * the Free Software Foundation; either version 2 of the License, or
  12 + * (at your option) any later version.
  13 + *
  14 + * This program is distributed in the hope that it will be useful,
  15 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17 + * GNU General Public License for more details.
  18 + *
  19 + */
  20 +
  21 +#ifndef __MTIP32XX_H__
  22 +#define __MTIP32XX_H__
  23 +
  24 +#include <linux/spinlock.h>
  25 +#include <linux/rwsem.h>
  26 +#include <linux/ata.h>
  27 +#include <linux/interrupt.h>
  28 +#include <linux/genhd.h>
  29 +#include <linux/version.h>
  30 +
  31 +/* Offset of Subsystem Device ID in pci confoguration space */
  32 +#define PCI_SUBSYSTEM_DEVICEID 0x2E
  33 +
  34 +/* offset of Device Control register in PCIe extended capabilites space */
  35 +#define PCIE_CONFIG_EXT_DEVICE_CONTROL_OFFSET 0x48
  36 +
  37 +/* # of times to retry timed out IOs */
  38 +#define MTIP_MAX_RETRIES 5
  39 +
  40 +/* Various timeout values in ms */
  41 +#define MTIP_NCQ_COMMAND_TIMEOUT_MS 5000
  42 +#define MTIP_IOCTL_COMMAND_TIMEOUT_MS 5000
  43 +#define MTIP_INTERNAL_COMMAND_TIMEOUT_MS 5000
  44 +
  45 +/* check for timeouts every 500ms */
  46 +#define MTIP_TIMEOUT_CHECK_PERIOD 500
  47 +
  48 +/* ftl rebuild */
  49 +#define MTIP_FTL_REBUILD_OFFSET 142
  50 +#define MTIP_FTL_REBUILD_MAGIC 0xed51
  51 +#define MTIP_FTL_REBUILD_TIMEOUT_MS 2400000
  52 +
  53 +/* Macro to extract the tag bit number from a tag value. */
  54 +#define MTIP_TAG_BIT(tag) (tag & 0x1f)
  55 +
  56 +/*
  57 + * Macro to extract the tag index from a tag value. The index
  58 + * is used to access the correct s_active/Command Issue register based
  59 + * on the tag value.
  60 + */
  61 +#define MTIP_TAG_INDEX(tag) (tag >> 5)
  62 +
  63 +/*
  64 + * Maximum number of scatter gather entries
  65 + * a single command may have.
  66 + */
  67 +#define MTIP_MAX_SG 128
  68 +
  69 +/*
  70 + * Maximum number of slot groups (Command Issue & s_active registers)
  71 + * NOTE: This is the driver maximum; check dd->slot_groups for actual value.
  72 + */
  73 +#define MTIP_MAX_SLOT_GROUPS 8
  74 +
  75 +/* Internal command tag. */
  76 +#define MTIP_TAG_INTERNAL 0
  77 +
  78 +/* Micron Vendor ID & P320x SSD Device ID */
  79 +#define PCI_VENDOR_ID_MICRON 0x1344
  80 +#define P320_DEVICE_ID 0x5150
  81 +
  82 +/* Driver name and version strings */
  83 +#define MTIP_DRV_NAME "mtip32xx"
  84 +#define MTIP_DRV_VERSION "1.2.6os2"
  85 +
  86 +/* Maximum number of minor device numbers per device. */
  87 +#define MTIP_MAX_MINORS 16
  88 +
  89 +/* Maximum number of supported command slots. */
  90 +#define MTIP_MAX_COMMAND_SLOTS (MTIP_MAX_SLOT_GROUPS * 32)
  91 +
  92 +/*
  93 + * Per-tag bitfield size in longs.
  94 + * Linux bit manipulation functions
  95 + * (i.e. test_and_set_bit, find_next_zero_bit)
  96 + * manipulate memory in longs, so we try to make the math work.
  97 + * take the slot groups and find the number of longs, rounding up.
  98 + * Careful! i386 and x86_64 use different size longs!
  99 + */
  100 +#define U32_PER_LONG (sizeof(long) / sizeof(u32))
  101 +#define SLOTBITS_IN_LONGS ((MTIP_MAX_SLOT_GROUPS + \
  102 + (U32_PER_LONG-1))/U32_PER_LONG)
  103 +
  104 +/* BAR number used to access the HBA registers. */
  105 +#define MTIP_ABAR 5
  106 +
  107 +/* Forced Unit Access Bit */
  108 +#define FUA_BIT 0x80
  109 +
  110 +#ifdef DEBUG
  111 + #define dbg_printk(format, arg...) \
  112 + printk(pr_fmt(format), ##arg);
  113 +#else
  114 + #define dbg_printk(format, arg...)
  115 +#endif
  116 +
  117 +/* Register Frame Information Structure (FIS), host to device. */
  118 +struct host_to_dev_fis {
  119 + /*
  120 + * FIS type.
  121 + * - 27h Register FIS, host to device.
  122 + * - 34h Register FIS, device to host.
  123 + * - 39h DMA Activate FIS, device to host.
  124 + * - 41h DMA Setup FIS, bi-directional.
  125 + * - 46h Data FIS, bi-directional.
  126 + * - 58h BIST Activate FIS, bi-directional.
  127 + * - 5Fh PIO Setup FIS, device to host.
  128 + * - A1h Set Device Bits FIS, device to host.
  129 + */
  130 + unsigned char type;
  131 + unsigned char opts;
  132 + unsigned char command;
  133 + unsigned char features;
  134 +
  135 + union {
  136 + unsigned char lba_low;
  137 + unsigned char sector;
  138 + };
  139 + union {
  140 + unsigned char lba_mid;
  141 + unsigned char cyl_low;
  142 + };
  143 + union {
  144 + unsigned char lba_hi;
  145 + unsigned char cyl_hi;
  146 + };
  147 + union {
  148 + unsigned char device;
  149 + unsigned char head;
  150 + };
  151 +
  152 + union {
  153 + unsigned char lba_low_ex;
  154 + unsigned char sector_ex;
  155 + };
  156 + union {
  157 + unsigned char lba_mid_ex;
  158 + unsigned char cyl_low_ex;
  159 + };
  160 + union {
  161 + unsigned char lba_hi_ex;
  162 + unsigned char cyl_hi_ex;
  163 + };
  164 + unsigned char features_ex;
  165 +
  166 + unsigned char sect_count;
  167 + unsigned char sect_cnt_ex;
  168 + unsigned char res2;
  169 + unsigned char control;
  170 +
  171 + unsigned int res3;
  172 +};
  173 +
  174 +/* Command header structure. */
  175 +struct mtip_cmd_hdr {
  176 + /*
  177 + * Command options.
  178 + * - Bits 31:16 Number of PRD entries.
  179 + * - Bits 15:8 Unused in this implementation.
  180 + * - Bit 7 Prefetch bit, informs the drive to prefetch PRD entries.
  181 + * - Bit 6 Write bit, should be set when writing data to the device.
  182 + * - Bit 5 Unused in this implementation.
  183 + * - Bits 4:0 Length of the command FIS in DWords (DWord = 4 bytes).
  184 + */
  185 + unsigned int opts;
  186 + /* This field is unsed when using NCQ. */
  187 + union {
  188 + unsigned int byte_count;
  189 + unsigned int status;
  190 + };
  191 + /*
  192 + * Lower 32 bits of the command table address associated with this
  193 + * header. The command table addresses must be 128 byte aligned.
  194 + */
  195 + unsigned int ctba;
  196 + /*
  197 + * If 64 bit addressing is used this field is the upper 32 bits
  198 + * of the command table address associated with this command.
  199 + */
  200 + unsigned int ctbau;
  201 + /* Reserved and unused. */
  202 + unsigned int res[4];
  203 +};
  204 +
  205 +/* Command scatter gather structure (PRD). */
  206 +struct mtip_cmd_sg {
  207 + /*
  208 + * Low 32 bits of the data buffer address. For P320 this
  209 + * address must be 8 byte aligned signified by bits 2:0 being
  210 + * set to 0.
  211 + */
  212 + unsigned int dba;
  213 + /*
  214 + * When 64 bit addressing is used this field is the upper
  215 + * 32 bits of the data buffer address.
  216 + */
  217 + unsigned int dba_upper;
  218 + /* Unused. */
  219 + unsigned int reserved;
  220 + /*
  221 + * Bit 31: interrupt when this data block has been transferred.
  222 + * Bits 30..22: reserved
  223 + * Bits 21..0: byte count (minus 1). For P320 the byte count must be
  224 + * 8 byte aligned signified by bits 2:0 being set to 1.
  225 + */
  226 + unsigned int info;
  227 +};
  228 +struct mtip_port;
  229 +
  230 +/* Structure used to describe a command. */
  231 +struct mtip_cmd {
  232 +
  233 + struct mtip_cmd_hdr *command_header; /* ptr to command header entry */
  234 +
  235 + dma_addr_t command_header_dma; /* corresponding physical address */
  236 +
  237 + void *command; /* ptr to command table entry */
  238 +
  239 + dma_addr_t command_dma; /* corresponding physical address */
  240 +
  241 + void *comp_data; /* data passed to completion function comp_func() */
  242 + /*
  243 + * Completion function called by the ISR upon completion of
  244 + * a command.
  245 + */
  246 + void (*comp_func)(struct mtip_port *port,
  247 + int tag,
  248 + void *data,
  249 + int status);
  250 + /* Additional callback function that may be called by comp_func() */
  251 + void (*async_callback)(void *data, int status);
  252 +
  253 + void *async_data; /* Addl. data passed to async_callback() */
  254 +
  255 + int scatter_ents; /* Number of scatter list entries used */
  256 +
  257 + struct scatterlist sg[MTIP_MAX_SG]; /* Scatter list entries */
  258 +
  259 + int retries; /* The number of retries left for this command. */
  260 +
  261 + int direction; /* Data transfer direction */
  262 +
  263 + unsigned long comp_time; /* command completion time, in jiffies */
  264 +
  265 + atomic_t active; /* declares if this command sent to the drive. */
  266 +};
  267 +
  268 +/* Structure used to describe a port. */
  269 +struct mtip_port {
  270 + /* Pointer back to the driver data for this port. */
  271 + struct driver_data *dd;
  272 + /*
  273 + * Used to determine if the data pointed to by the
  274 + * identify field is valid.
  275 + */
  276 + unsigned long identify_valid;
  277 + /* Base address of the memory mapped IO for the port. */
  278 + void __iomem *mmio;
  279 + /* Array of pointers to the memory mapped s_active registers. */
  280 + void __iomem *s_active[MTIP_MAX_SLOT_GROUPS];
  281 + /* Array of pointers to the memory mapped completed registers. */
  282 + void __iomem *completed[MTIP_MAX_SLOT_GROUPS];
  283 + /* Array of pointers to the memory mapped Command Issue registers. */
  284 + void __iomem *cmd_issue[MTIP_MAX_SLOT_GROUPS];
  285 + /*
  286 + * Pointer to the beginning of the command header memory as used
  287 + * by the driver.
  288 + */
  289 + void *command_list;
  290 + /*
  291 + * Pointer to the beginning of the command header memory as used
  292 + * by the DMA.
  293 + */
  294 + dma_addr_t command_list_dma;
  295 + /*
  296 + * Pointer to the beginning of the RX FIS memory as used
  297 + * by the driver.
  298 + */
  299 + void *rxfis;
  300 + /*
  301 + * Pointer to the beginning of the RX FIS memory as used
  302 + * by the DMA.
  303 + */
  304 + dma_addr_t rxfis_dma;
  305 + /*
  306 + * Pointer to the beginning of the command table memory as used
  307 + * by the driver.
  308 + */
  309 + void *command_table;
  310 + /*
  311 + * Pointer to the beginning of the command table memory as used
  312 + * by the DMA.
  313 + */
  314 + dma_addr_t command_tbl_dma;
  315 + /*
  316 + * Pointer to the beginning of the identify data memory as used
  317 + * by the driver.
  318 + */
  319 + u16 *identify;
  320 + /*
  321 + * Pointer to the beginning of the identify data memory as used
  322 + * by the DMA.
  323 + */
  324 + dma_addr_t identify_dma;
  325 + /*
  326 + * Pointer to the beginning of a sector buffer that is used
  327 + * by the driver when issuing internal commands.
  328 + */
  329 + u16 *sector_buffer;
  330 + /*
  331 + * Pointer to the beginning of a sector buffer that is used
  332 + * by the DMA when the driver issues internal commands.
  333 + */
  334 + dma_addr_t sector_buffer_dma;
  335 + /*
  336 + * Bit significant, used to determine if a command slot has
  337 + * been allocated. i.e. the slot is in use. Bits are cleared
  338 + * when the command slot and all associated data structures
  339 + * are no longer needed.
  340 + */
  341 + unsigned long allocated[SLOTBITS_IN_LONGS];
  342 + /*
  343 + * Array of command slots. Structure includes pointers to the
  344 + * command header and command table, and completion function and data
  345 + * pointers.
  346 + */
  347 + struct mtip_cmd commands[MTIP_MAX_COMMAND_SLOTS];
  348 + /* Non-zero if an internal command is in progress. */
  349 + int internal_cmd_in_progress;
  350 + /*
  351 + * Timer used to complete commands that have been active for too long.
  352 + */
  353 + struct timer_list cmd_timer;
  354 + /*
  355 + * Semaphore used to block threads if there are no
  356 + * command slots available.
  357 + */
  358 + struct semaphore cmd_slot;
  359 + /* Spinlock for working around command-issue bug. */
  360 + spinlock_t cmd_issue_lock;
  361 +};
  362 +
  363 +/*
  364 + * Driver private data structure.
  365 + *
  366 + * One structure is allocated per probed device.
  367 + */
  368 +struct driver_data {
  369 + void __iomem *mmio; /* Base address of the HBA registers. */
  370 +
  371 + int major; /* Major device number. */
  372 +
  373 + int instance; /* Instance number. First device probed is 0, ... */
  374 +
  375 + int protocol; /* FIXME: Protocol ops array index. */
  376 +
  377 + struct gendisk *disk; /* Pointer to our gendisk structure. */
  378 +
  379 + struct pci_dev *pdev; /* Pointer to the PCI device structure. */
  380 +
  381 + struct request_queue *queue; /* Our request queue. */
  382 + /*
  383 + * Semaphore used to lock out read/write commands during the
  384 + * execution of an internal command.
  385 + */
  386 + struct rw_semaphore internal_sem;
  387 +
  388 + struct mtip_port *port; /* Pointer to the port data structure. */
  389 +
  390 + /* Tasklet used to process the bottom half of the ISR. */
  391 + struct tasklet_struct tasklet;
  392 +
  393 + unsigned product_type; /* magic value declaring the product type */
  394 +
  395 + unsigned slot_groups; /* number of slot groups the product supports */
  396 +
  397 + atomic_t drv_cleanup_done; /* Atomic variable for SRSI */
  398 +
  399 + unsigned long index; /* Index to determine the disk name */
  400 +
  401 + unsigned int ftlrebuildflag; /* FTL rebuild flag */
  402 +
  403 + atomic_t resumeflag; /* Atomic variable to track suspend/resume */
  404 +
  405 + atomic_t eh_active; /* Flag for error handling tracking */
  406 +};
  407 +
  408 +/* Function declarations */
  409 +extern int mtip_block_initialize(struct driver_data *dd);
  410 +extern int mtip_block_remove(struct driver_data *dd);
  411 +extern int mtip_block_shutdown(struct driver_data *dd);
  412 +extern int mtip_block_suspend(struct driver_data *dd);
  413 +extern int mtip_block_resume(struct driver_data *dd);
  414 +extern int mtip_hw_init(struct driver_data *dd);
  415 +extern int mtip_hw_exit(struct driver_data *dd);
  416 +extern int mtip_hw_shutdown(struct driver_data *dd);
  417 +extern bool mtip_hw_get_capacity(struct driver_data *dd, sector_t *sectors);
  418 +extern void mtip_hw_release_scatterlist(
  419 + struct driver_data *dd,
  420 + int tag);
  421 +extern struct scatterlist *mtip_hw_get_scatterlist(
  422 + struct driver_data *dd,
  423 + int *tag);
  424 +extern void mtip_hw_submit_io(struct driver_data *dd,
  425 + sector_t start,
  426 + int nsect,
  427 + int nents,
  428 + int tag,
  429 + void *callback,
  430 + void *data,
  431 + int barrier,
  432 + int dir);
  433 +extern int mtip_hw_ioctl(struct driver_data *dd,
  434 + unsigned int cmd,
  435 + unsigned long arg,
  436 + unsigned char compat);
  437 +extern int mtip_hw_sysfs_init(struct driver_data *dd, struct kobject *kobj);
  438 +extern int mtip_hw_sysfs_exit(struct driver_data *dd, struct kobject *kobj);
  439 +extern int mtip_hw_resume(struct driver_data *dd);
  440 +extern int mtip_hw_suspend(struct driver_data *dd);
  441 +void mtip_command_cleanup(struct driver_data *dd);
  442 +bool mtip_check_surprise_removal(struct pci_dev *pdev);
  443 +void mtip_restart_port(struct mtip_port *port);
  444 +
  445 +#endif