Commit 9965c2f19be470c452357ae4f6304467cdeada55

Authored by Benjamin Herrenschmidt
Committed by James Bottomley
1 parent 56163c233d

[SCSI] ibmvscsi: Remove backend abstraction

Now that the iSeries code is gone the backend abstraction
in this driver is no longer necessary, which allows us to
consolidate the driver in one file.

The side effect is that the module name is now ibmvscsi.ko
which matches the driver hotplug name and fixes auto-load
issues.

[jejb:fix up checkpatch.pl errors]
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Acked-by: Robert Jennings <rcj@linux.vnet.ibm.com>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>

Showing 4 changed files with 331 additions and 414 deletions Side-by-side Diff

drivers/scsi/ibmvscsi/Makefile
1   -obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsic.o
2   -
3   -ibmvscsic-y += ibmvscsi.o
4   -ibmvscsic-$(CONFIG_PPC_PSERIES) += rpa_vscsi.o
5   -
  1 +obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsi.o
6 2 obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvstgt.o
7 3 obj-$(CONFIG_SCSI_IBMVFC) += ibmvfc.o
drivers/scsi/ibmvscsi/ibmvscsi.c
... ... @@ -93,13 +93,13 @@
93 93 static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2;
94 94 static int fast_fail = 1;
95 95 static int client_reserve = 1;
  96 +static char partition_name[97] = "UNKNOWN";
  97 +static unsigned int partition_number = -1;
96 98  
97 99 static struct scsi_transport_template *ibmvscsi_transport_template;
98 100  
99 101 #define IBMVSCSI_VERSION "1.5.9"
100 102  
101   -static struct ibmvscsi_ops *ibmvscsi_ops;
102   -
103 103 MODULE_DESCRIPTION("IBM Virtual SCSI");
104 104 MODULE_AUTHOR("Dave Boutcher");
105 105 MODULE_LICENSE("GPL");
106 106  
... ... @@ -118,7 +118,317 @@
118 118 module_param_named(client_reserve, client_reserve, int, S_IRUGO );
119 119 MODULE_PARM_DESC(client_reserve, "Attempt client managed reserve/release");
120 120  
  121 +static void ibmvscsi_handle_crq(struct viosrp_crq *crq,
  122 + struct ibmvscsi_host_data *hostdata);
  123 +
121 124 /* ------------------------------------------------------------
  125 + * Routines for managing the command/response queue
  126 + */
  127 +/**
  128 + * ibmvscsi_handle_event: - Interrupt handler for crq events
  129 + * @irq: number of irq to handle, not used
  130 + * @dev_instance: ibmvscsi_host_data of host that received interrupt
  131 + *
  132 + * Disables interrupts and schedules srp_task
  133 + * Always returns IRQ_HANDLED
  134 + */
  135 +static irqreturn_t ibmvscsi_handle_event(int irq, void *dev_instance)
  136 +{
  137 + struct ibmvscsi_host_data *hostdata =
  138 + (struct ibmvscsi_host_data *)dev_instance;
  139 + vio_disable_interrupts(to_vio_dev(hostdata->dev));
  140 + tasklet_schedule(&hostdata->srp_task);
  141 + return IRQ_HANDLED;
  142 +}
  143 +
  144 +/**
  145 + * release_crq_queue: - Deallocates data and unregisters CRQ
  146 + * @queue: crq_queue to initialize and register
  147 + * @host_data: ibmvscsi_host_data of host
  148 + *
  149 + * Frees irq, deallocates a page for messages, unmaps dma, and unregisters
  150 + * the crq with the hypervisor.
  151 + */
  152 +static void ibmvscsi_release_crq_queue(struct crq_queue *queue,
  153 + struct ibmvscsi_host_data *hostdata,
  154 + int max_requests)
  155 +{
  156 + long rc = 0;
  157 + struct vio_dev *vdev = to_vio_dev(hostdata->dev);
  158 + free_irq(vdev->irq, (void *)hostdata);
  159 + tasklet_kill(&hostdata->srp_task);
  160 + do {
  161 + if (rc)
  162 + msleep(100);
  163 + rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
  164 + } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
  165 + dma_unmap_single(hostdata->dev,
  166 + queue->msg_token,
  167 + queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
  168 + free_page((unsigned long)queue->msgs);
  169 +}
  170 +
  171 +/**
  172 + * crq_queue_next_crq: - Returns the next entry in message queue
  173 + * @queue: crq_queue to use
  174 + *
  175 + * Returns pointer to next entry in queue, or NULL if there are no new
  176 + * entried in the CRQ.
  177 + */
  178 +static struct viosrp_crq *crq_queue_next_crq(struct crq_queue *queue)
  179 +{
  180 + struct viosrp_crq *crq;
  181 + unsigned long flags;
  182 +
  183 + spin_lock_irqsave(&queue->lock, flags);
  184 + crq = &queue->msgs[queue->cur];
  185 + if (crq->valid & 0x80) {
  186 + if (++queue->cur == queue->size)
  187 + queue->cur = 0;
  188 + } else
  189 + crq = NULL;
  190 + spin_unlock_irqrestore(&queue->lock, flags);
  191 +
  192 + return crq;
  193 +}
  194 +
  195 +/**
  196 + * ibmvscsi_send_crq: - Send a CRQ
  197 + * @hostdata: the adapter
  198 + * @word1: the first 64 bits of the data
  199 + * @word2: the second 64 bits of the data
  200 + */
  201 +static int ibmvscsi_send_crq(struct ibmvscsi_host_data *hostdata,
  202 + u64 word1, u64 word2)
  203 +{
  204 + struct vio_dev *vdev = to_vio_dev(hostdata->dev);
  205 +
  206 + return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
  207 +}
  208 +
  209 +/**
  210 + * ibmvscsi_task: - Process srps asynchronously
  211 + * @data: ibmvscsi_host_data of host
  212 + */
  213 +static void ibmvscsi_task(void *data)
  214 +{
  215 + struct ibmvscsi_host_data *hostdata = (struct ibmvscsi_host_data *)data;
  216 + struct vio_dev *vdev = to_vio_dev(hostdata->dev);
  217 + struct viosrp_crq *crq;
  218 + int done = 0;
  219 +
  220 + while (!done) {
  221 + /* Pull all the valid messages off the CRQ */
  222 + while ((crq = crq_queue_next_crq(&hostdata->queue)) != NULL) {
  223 + ibmvscsi_handle_crq(crq, hostdata);
  224 + crq->valid = 0x00;
  225 + }
  226 +
  227 + vio_enable_interrupts(vdev);
  228 + crq = crq_queue_next_crq(&hostdata->queue);
  229 + if (crq != NULL) {
  230 + vio_disable_interrupts(vdev);
  231 + ibmvscsi_handle_crq(crq, hostdata);
  232 + crq->valid = 0x00;
  233 + } else {
  234 + done = 1;
  235 + }
  236 + }
  237 +}
  238 +
  239 +static void gather_partition_info(void)
  240 +{
  241 + struct device_node *rootdn;
  242 +
  243 + const char *ppartition_name;
  244 + const unsigned int *p_number_ptr;
  245 +
  246 + /* Retrieve information about this partition */
  247 + rootdn = of_find_node_by_path("/");
  248 + if (!rootdn) {
  249 + return;
  250 + }
  251 +
  252 + ppartition_name = of_get_property(rootdn, "ibm,partition-name", NULL);
  253 + if (ppartition_name)
  254 + strncpy(partition_name, ppartition_name,
  255 + sizeof(partition_name));
  256 + p_number_ptr = of_get_property(rootdn, "ibm,partition-no", NULL);
  257 + if (p_number_ptr)
  258 + partition_number = *p_number_ptr;
  259 + of_node_put(rootdn);
  260 +}
  261 +
  262 +static void set_adapter_info(struct ibmvscsi_host_data *hostdata)
  263 +{
  264 + memset(&hostdata->madapter_info, 0x00,
  265 + sizeof(hostdata->madapter_info));
  266 +
  267 + dev_info(hostdata->dev, "SRP_VERSION: %s\n", SRP_VERSION);
  268 + strcpy(hostdata->madapter_info.srp_version, SRP_VERSION);
  269 +
  270 + strncpy(hostdata->madapter_info.partition_name, partition_name,
  271 + sizeof(hostdata->madapter_info.partition_name));
  272 +
  273 + hostdata->madapter_info.partition_number = partition_number;
  274 +
  275 + hostdata->madapter_info.mad_version = 1;
  276 + hostdata->madapter_info.os_type = 2;
  277 +}
  278 +
  279 +/**
  280 + * reset_crq_queue: - resets a crq after a failure
  281 + * @queue: crq_queue to initialize and register
  282 + * @hostdata: ibmvscsi_host_data of host
  283 + *
  284 + */
  285 +static int ibmvscsi_reset_crq_queue(struct crq_queue *queue,
  286 + struct ibmvscsi_host_data *hostdata)
  287 +{
  288 + int rc = 0;
  289 + struct vio_dev *vdev = to_vio_dev(hostdata->dev);
  290 +
  291 + /* Close the CRQ */
  292 + do {
  293 + if (rc)
  294 + msleep(100);
  295 + rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
  296 + } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
  297 +
  298 + /* Clean out the queue */
  299 + memset(queue->msgs, 0x00, PAGE_SIZE);
  300 + queue->cur = 0;
  301 +
  302 + set_adapter_info(hostdata);
  303 +
  304 + /* And re-open it again */
  305 + rc = plpar_hcall_norets(H_REG_CRQ,
  306 + vdev->unit_address,
  307 + queue->msg_token, PAGE_SIZE);
  308 + if (rc == 2) {
  309 + /* Adapter is good, but other end is not ready */
  310 + dev_warn(hostdata->dev, "Partner adapter not ready\n");
  311 + } else if (rc != 0) {
  312 + dev_warn(hostdata->dev, "couldn't register crq--rc 0x%x\n", rc);
  313 + }
  314 + return rc;
  315 +}
  316 +
  317 +/**
  318 + * initialize_crq_queue: - Initializes and registers CRQ with hypervisor
  319 + * @queue: crq_queue to initialize and register
  320 + * @hostdata: ibmvscsi_host_data of host
  321 + *
  322 + * Allocates a page for messages, maps it for dma, and registers
  323 + * the crq with the hypervisor.
  324 + * Returns zero on success.
  325 + */
  326 +static int ibmvscsi_init_crq_queue(struct crq_queue *queue,
  327 + struct ibmvscsi_host_data *hostdata,
  328 + int max_requests)
  329 +{
  330 + int rc;
  331 + int retrc;
  332 + struct vio_dev *vdev = to_vio_dev(hostdata->dev);
  333 +
  334 + queue->msgs = (struct viosrp_crq *)get_zeroed_page(GFP_KERNEL);
  335 +
  336 + if (!queue->msgs)
  337 + goto malloc_failed;
  338 + queue->size = PAGE_SIZE / sizeof(*queue->msgs);
  339 +
  340 + queue->msg_token = dma_map_single(hostdata->dev, queue->msgs,
  341 + queue->size * sizeof(*queue->msgs),
  342 + DMA_BIDIRECTIONAL);
  343 +
  344 + if (dma_mapping_error(hostdata->dev, queue->msg_token))
  345 + goto map_failed;
  346 +
  347 + gather_partition_info();
  348 + set_adapter_info(hostdata);
  349 +
  350 + retrc = rc = plpar_hcall_norets(H_REG_CRQ,
  351 + vdev->unit_address,
  352 + queue->msg_token, PAGE_SIZE);
  353 + if (rc == H_RESOURCE)
  354 + /* maybe kexecing and resource is busy. try a reset */
  355 + rc = ibmvscsi_reset_crq_queue(queue,
  356 + hostdata);
  357 +
  358 + if (rc == 2) {
  359 + /* Adapter is good, but other end is not ready */
  360 + dev_warn(hostdata->dev, "Partner adapter not ready\n");
  361 + retrc = 0;
  362 + } else if (rc != 0) {
  363 + dev_warn(hostdata->dev, "Error %d opening adapter\n", rc);
  364 + goto reg_crq_failed;
  365 + }
  366 +
  367 + queue->cur = 0;
  368 + spin_lock_init(&queue->lock);
  369 +
  370 + tasklet_init(&hostdata->srp_task, (void *)ibmvscsi_task,
  371 + (unsigned long)hostdata);
  372 +
  373 + if (request_irq(vdev->irq,
  374 + ibmvscsi_handle_event,
  375 + 0, "ibmvscsi", (void *)hostdata) != 0) {
  376 + dev_err(hostdata->dev, "couldn't register irq 0x%x\n",
  377 + vdev->irq);
  378 + goto req_irq_failed;
  379 + }
  380 +
  381 + rc = vio_enable_interrupts(vdev);
  382 + if (rc != 0) {
  383 + dev_err(hostdata->dev, "Error %d enabling interrupts!!!\n", rc);
  384 + goto req_irq_failed;
  385 + }
  386 +
  387 + return retrc;
  388 +
  389 + req_irq_failed:
  390 + tasklet_kill(&hostdata->srp_task);
  391 + rc = 0;
  392 + do {
  393 + if (rc)
  394 + msleep(100);
  395 + rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
  396 + } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
  397 + reg_crq_failed:
  398 + dma_unmap_single(hostdata->dev,
  399 + queue->msg_token,
  400 + queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
  401 + map_failed:
  402 + free_page((unsigned long)queue->msgs);
  403 + malloc_failed:
  404 + return -1;
  405 +}
  406 +
  407 +/**
  408 + * reenable_crq_queue: - reenables a crq after
  409 + * @queue: crq_queue to initialize and register
  410 + * @hostdata: ibmvscsi_host_data of host
  411 + *
  412 + */
  413 +static int ibmvscsi_reenable_crq_queue(struct crq_queue *queue,
  414 + struct ibmvscsi_host_data *hostdata)
  415 +{
  416 + int rc = 0;
  417 + struct vio_dev *vdev = to_vio_dev(hostdata->dev);
  418 +
  419 + /* Re-enable the CRQ */
  420 + do {
  421 + if (rc)
  422 + msleep(100);
  423 + rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
  424 + } while ((rc == H_IN_PROGRESS) || (rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
  425 +
  426 + if (rc)
  427 + dev_err(hostdata->dev, "Error %d enabling adapter\n", rc);
  428 + return rc;
  429 +}
  430 +
  431 +/* ------------------------------------------------------------
122 432 * Routines for the event pool and event structs
123 433 */
124 434 /**
... ... @@ -611,7 +921,7 @@
611 921 }
612 922  
613 923 if ((rc =
614   - ibmvscsi_ops->send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) {
  924 + ibmvscsi_send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) {
615 925 list_del(&evt_struct->list);
616 926 del_timer(&evt_struct->timer);
617 927  
... ... @@ -1420,8 +1730,8 @@
1420 1730 * @hostdata: ibmvscsi_host_data of host
1421 1731 *
1422 1732 */
1423   -void ibmvscsi_handle_crq(struct viosrp_crq *crq,
1424   - struct ibmvscsi_host_data *hostdata)
  1733 +static void ibmvscsi_handle_crq(struct viosrp_crq *crq,
  1734 + struct ibmvscsi_host_data *hostdata)
1425 1735 {
1426 1736 long rc;
1427 1737 unsigned long flags;
... ... @@ -1433,8 +1743,8 @@
1433 1743 case 0x01: /* Initialization message */
1434 1744 dev_info(hostdata->dev, "partner initialized\n");
1435 1745 /* Send back a response */
1436   - if ((rc = ibmvscsi_ops->send_crq(hostdata,
1437   - 0xC002000000000000LL, 0)) == 0) {
  1746 + rc = ibmvscsi_send_crq(hostdata, 0xC002000000000000LL, 0);
  1747 + if (rc == 0) {
1438 1748 /* Now login */
1439 1749 init_adapter(hostdata);
1440 1750 } else {
1441 1751  
1442 1752  
1443 1753  
... ... @@ -1840,17 +2150,17 @@
1840 2150 smp_rmb();
1841 2151 hostdata->reset_crq = 0;
1842 2152  
1843   - rc = ibmvscsi_ops->reset_crq_queue(&hostdata->queue, hostdata);
  2153 + rc = ibmvscsi_reset_crq_queue(&hostdata->queue, hostdata);
1844 2154 if (!rc)
1845   - rc = ibmvscsi_ops->send_crq(hostdata, 0xC001000000000000LL, 0);
  2155 + rc = ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0);
1846 2156 vio_enable_interrupts(to_vio_dev(hostdata->dev));
1847 2157 } else if (hostdata->reenable_crq) {
1848 2158 smp_rmb();
1849 2159 action = "enable";
1850   - rc = ibmvscsi_ops->reenable_crq_queue(&hostdata->queue, hostdata);
  2160 + rc = ibmvscsi_reenable_crq_queue(&hostdata->queue, hostdata);
1851 2161 hostdata->reenable_crq = 0;
1852 2162 if (!rc)
1853   - rc = ibmvscsi_ops->send_crq(hostdata, 0xC001000000000000LL, 0);
  2163 + rc = ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0);
1854 2164 } else
1855 2165 return;
1856 2166  
... ... @@ -1944,7 +2254,7 @@
1944 2254 goto init_crq_failed;
1945 2255 }
1946 2256  
1947   - rc = ibmvscsi_ops->init_crq_queue(&hostdata->queue, hostdata, max_events);
  2257 + rc = ibmvscsi_init_crq_queue(&hostdata->queue, hostdata, max_events);
1948 2258 if (rc != 0 && rc != H_RESOURCE) {
1949 2259 dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc);
1950 2260 goto kill_kthread;
... ... @@ -1974,7 +2284,7 @@
1974 2284 * to fail if the other end is not acive. In that case we don't
1975 2285 * want to scan
1976 2286 */
1977   - if (ibmvscsi_ops->send_crq(hostdata, 0xC001000000000000LL, 0) == 0
  2287 + if (ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0) == 0
1978 2288 || rc == H_RESOURCE) {
1979 2289 /*
1980 2290 * Wait around max init_timeout secs for the adapter to finish
... ... @@ -2002,7 +2312,7 @@
2002 2312 add_host_failed:
2003 2313 release_event_pool(&hostdata->pool, hostdata);
2004 2314 init_pool_failed:
2005   - ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, max_events);
  2315 + ibmvscsi_release_crq_queue(&hostdata->queue, hostdata, max_events);
2006 2316 kill_kthread:
2007 2317 kthread_stop(hostdata->work_thread);
2008 2318 init_crq_failed:
... ... @@ -2018,7 +2328,7 @@
2018 2328 struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev);
2019 2329 unmap_persist_bufs(hostdata);
2020 2330 release_event_pool(&hostdata->pool, hostdata);
2021   - ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata,
  2331 + ibmvscsi_release_crq_queue(&hostdata->queue, hostdata,
2022 2332 max_events);
2023 2333  
2024 2334 kthread_stop(hostdata->work_thread);
... ... @@ -2039,7 +2349,10 @@
2039 2349 static int ibmvscsi_resume(struct device *dev)
2040 2350 {
2041 2351 struct ibmvscsi_host_data *hostdata = dev_get_drvdata(dev);
2042   - return ibmvscsi_ops->resume(hostdata);
  2352 + vio_disable_interrupts(to_vio_dev(hostdata->dev));
  2353 + tasklet_schedule(&hostdata->srp_task);
  2354 +
  2355 + return 0;
2043 2356 }
2044 2357  
2045 2358 /**
... ... @@ -2076,9 +2389,7 @@
2076 2389 driver_template.can_queue = max_requests;
2077 2390 max_events = max_requests + 2;
2078 2391  
2079   - if (firmware_has_feature(FW_FEATURE_VIO))
2080   - ibmvscsi_ops = &rpavscsi_ops;
2081   - else
  2392 + if (!firmware_has_feature(FW_FEATURE_VIO))
2082 2393 return -ENODEV;
2083 2394  
2084 2395 ibmvscsi_transport_template =
drivers/scsi/ibmvscsi/ibmvscsi.h
... ... @@ -107,27 +107,5 @@
107 107 dma_addr_t adapter_info_addr;
108 108 };
109 109  
110   -/* routines for managing a command/response queue */
111   -void ibmvscsi_handle_crq(struct viosrp_crq *crq,
112   - struct ibmvscsi_host_data *hostdata);
113   -
114   -struct ibmvscsi_ops {
115   - int (*init_crq_queue)(struct crq_queue *queue,
116   - struct ibmvscsi_host_data *hostdata,
117   - int max_requests);
118   - void (*release_crq_queue)(struct crq_queue *queue,
119   - struct ibmvscsi_host_data *hostdata,
120   - int max_requests);
121   - int (*reset_crq_queue)(struct crq_queue *queue,
122   - struct ibmvscsi_host_data *hostdata);
123   - int (*reenable_crq_queue)(struct crq_queue *queue,
124   - struct ibmvscsi_host_data *hostdata);
125   - int (*send_crq)(struct ibmvscsi_host_data *hostdata,
126   - u64 word1, u64 word2);
127   - int (*resume) (struct ibmvscsi_host_data *hostdata);
128   -};
129   -
130   -extern struct ibmvscsi_ops rpavscsi_ops;
131   -
132 110 #endif /* IBMVSCSI_H */
drivers/scsi/ibmvscsi/rpa_vscsi.c
1   -/* ------------------------------------------------------------
2   - * rpa_vscsi.c
3   - * (C) Copyright IBM Corporation 1994, 2003
4   - * Authors: Colin DeVilbiss (devilbis@us.ibm.com)
5   - * Santiago Leon (santil@us.ibm.com)
6   - *
7   - * This program is free software; you can redistribute it and/or modify
8   - * it under the terms of the GNU General Public License as published by
9   - * the Free Software Foundation; either version 2 of the License, or
10   - * (at your option) any later version.
11   - *
12   - * This program is distributed in the hope that it will be useful,
13   - * but WITHOUT ANY WARRANTY; without even the implied warranty of
14   - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15   - * GNU General Public License for more details.
16   - *
17   - * You should have received a copy of the GNU General Public License
18   - * along with this program; if not, write to the Free Software
19   - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20   - * USA
21   - *
22   - * ------------------------------------------------------------
23   - * RPA-specific functions of the SCSI host adapter for Virtual I/O devices
24   - *
25   - * This driver allows the Linux SCSI peripheral drivers to directly
26   - * access devices in the hosting partition, either on an iSeries
27   - * hypervisor system or a converged hypervisor system.
28   - */
29   -
30   -#include <asm/vio.h>
31   -#include <asm/prom.h>
32   -#include <asm/iommu.h>
33   -#include <asm/hvcall.h>
34   -#include <linux/delay.h>
35   -#include <linux/dma-mapping.h>
36   -#include <linux/gfp.h>
37   -#include <linux/interrupt.h>
38   -#include "ibmvscsi.h"
39   -
40   -static char partition_name[97] = "UNKNOWN";
41   -static unsigned int partition_number = -1;
42   -
43   -/* ------------------------------------------------------------
44   - * Routines for managing the command/response queue
45   - */
46   -/**
47   - * rpavscsi_handle_event: - Interrupt handler for crq events
48   - * @irq: number of irq to handle, not used
49   - * @dev_instance: ibmvscsi_host_data of host that received interrupt
50   - *
51   - * Disables interrupts and schedules srp_task
52   - * Always returns IRQ_HANDLED
53   - */
54   -static irqreturn_t rpavscsi_handle_event(int irq, void *dev_instance)
55   -{
56   - struct ibmvscsi_host_data *hostdata =
57   - (struct ibmvscsi_host_data *)dev_instance;
58   - vio_disable_interrupts(to_vio_dev(hostdata->dev));
59   - tasklet_schedule(&hostdata->srp_task);
60   - return IRQ_HANDLED;
61   -}
62   -
63   -/**
64   - * release_crq_queue: - Deallocates data and unregisters CRQ
65   - * @queue: crq_queue to initialize and register
66   - * @host_data: ibmvscsi_host_data of host
67   - *
68   - * Frees irq, deallocates a page for messages, unmaps dma, and unregisters
69   - * the crq with the hypervisor.
70   - */
71   -static void rpavscsi_release_crq_queue(struct crq_queue *queue,
72   - struct ibmvscsi_host_data *hostdata,
73   - int max_requests)
74   -{
75   - long rc = 0;
76   - struct vio_dev *vdev = to_vio_dev(hostdata->dev);
77   - free_irq(vdev->irq, (void *)hostdata);
78   - tasklet_kill(&hostdata->srp_task);
79   - do {
80   - if (rc)
81   - msleep(100);
82   - rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
83   - } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
84   - dma_unmap_single(hostdata->dev,
85   - queue->msg_token,
86   - queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
87   - free_page((unsigned long)queue->msgs);
88   -}
89   -
90   -/**
91   - * crq_queue_next_crq: - Returns the next entry in message queue
92   - * @queue: crq_queue to use
93   - *
94   - * Returns pointer to next entry in queue, or NULL if there are no new
95   - * entried in the CRQ.
96   - */
97   -static struct viosrp_crq *crq_queue_next_crq(struct crq_queue *queue)
98   -{
99   - struct viosrp_crq *crq;
100   - unsigned long flags;
101   -
102   - spin_lock_irqsave(&queue->lock, flags);
103   - crq = &queue->msgs[queue->cur];
104   - if (crq->valid & 0x80) {
105   - if (++queue->cur == queue->size)
106   - queue->cur = 0;
107   - } else
108   - crq = NULL;
109   - spin_unlock_irqrestore(&queue->lock, flags);
110   -
111   - return crq;
112   -}
113   -
114   -/**
115   - * rpavscsi_send_crq: - Send a CRQ
116   - * @hostdata: the adapter
117   - * @word1: the first 64 bits of the data
118   - * @word2: the second 64 bits of the data
119   - */
120   -static int rpavscsi_send_crq(struct ibmvscsi_host_data *hostdata,
121   - u64 word1, u64 word2)
122   -{
123   - struct vio_dev *vdev = to_vio_dev(hostdata->dev);
124   -
125   - return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
126   -}
127   -
128   -/**
129   - * rpavscsi_task: - Process srps asynchronously
130   - * @data: ibmvscsi_host_data of host
131   - */
132   -static void rpavscsi_task(void *data)
133   -{
134   - struct ibmvscsi_host_data *hostdata = (struct ibmvscsi_host_data *)data;
135   - struct vio_dev *vdev = to_vio_dev(hostdata->dev);
136   - struct viosrp_crq *crq;
137   - int done = 0;
138   -
139   - while (!done) {
140   - /* Pull all the valid messages off the CRQ */
141   - while ((crq = crq_queue_next_crq(&hostdata->queue)) != NULL) {
142   - ibmvscsi_handle_crq(crq, hostdata);
143   - crq->valid = 0x00;
144   - }
145   -
146   - vio_enable_interrupts(vdev);
147   - if ((crq = crq_queue_next_crq(&hostdata->queue)) != NULL) {
148   - vio_disable_interrupts(vdev);
149   - ibmvscsi_handle_crq(crq, hostdata);
150   - crq->valid = 0x00;
151   - } else {
152   - done = 1;
153   - }
154   - }
155   -}
156   -
157   -static void gather_partition_info(void)
158   -{
159   - struct device_node *rootdn;
160   -
161   - const char *ppartition_name;
162   - const unsigned int *p_number_ptr;
163   -
164   - /* Retrieve information about this partition */
165   - rootdn = of_find_node_by_path("/");
166   - if (!rootdn) {
167   - return;
168   - }
169   -
170   - ppartition_name = of_get_property(rootdn, "ibm,partition-name", NULL);
171   - if (ppartition_name)
172   - strncpy(partition_name, ppartition_name,
173   - sizeof(partition_name));
174   - p_number_ptr = of_get_property(rootdn, "ibm,partition-no", NULL);
175   - if (p_number_ptr)
176   - partition_number = *p_number_ptr;
177   - of_node_put(rootdn);
178   -}
179   -
180   -static void set_adapter_info(struct ibmvscsi_host_data *hostdata)
181   -{
182   - memset(&hostdata->madapter_info, 0x00,
183   - sizeof(hostdata->madapter_info));
184   -
185   - dev_info(hostdata->dev, "SRP_VERSION: %s\n", SRP_VERSION);
186   - strcpy(hostdata->madapter_info.srp_version, SRP_VERSION);
187   -
188   - strncpy(hostdata->madapter_info.partition_name, partition_name,
189   - sizeof(hostdata->madapter_info.partition_name));
190   -
191   - hostdata->madapter_info.partition_number = partition_number;
192   -
193   - hostdata->madapter_info.mad_version = 1;
194   - hostdata->madapter_info.os_type = 2;
195   -}
196   -
197   -/**
198   - * reset_crq_queue: - resets a crq after a failure
199   - * @queue: crq_queue to initialize and register
200   - * @hostdata: ibmvscsi_host_data of host
201   - *
202   - */
203   -static int rpavscsi_reset_crq_queue(struct crq_queue *queue,
204   - struct ibmvscsi_host_data *hostdata)
205   -{
206   - int rc = 0;
207   - struct vio_dev *vdev = to_vio_dev(hostdata->dev);
208   -
209   - /* Close the CRQ */
210   - do {
211   - if (rc)
212   - msleep(100);
213   - rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
214   - } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
215   -
216   - /* Clean out the queue */
217   - memset(queue->msgs, 0x00, PAGE_SIZE);
218   - queue->cur = 0;
219   -
220   - set_adapter_info(hostdata);
221   -
222   - /* And re-open it again */
223   - rc = plpar_hcall_norets(H_REG_CRQ,
224   - vdev->unit_address,
225   - queue->msg_token, PAGE_SIZE);
226   - if (rc == 2) {
227   - /* Adapter is good, but other end is not ready */
228   - dev_warn(hostdata->dev, "Partner adapter not ready\n");
229   - } else if (rc != 0) {
230   - dev_warn(hostdata->dev, "couldn't register crq--rc 0x%x\n", rc);
231   - }
232   - return rc;
233   -}
234   -
235   -/**
236   - * initialize_crq_queue: - Initializes and registers CRQ with hypervisor
237   - * @queue: crq_queue to initialize and register
238   - * @hostdata: ibmvscsi_host_data of host
239   - *
240   - * Allocates a page for messages, maps it for dma, and registers
241   - * the crq with the hypervisor.
242   - * Returns zero on success.
243   - */
244   -static int rpavscsi_init_crq_queue(struct crq_queue *queue,
245   - struct ibmvscsi_host_data *hostdata,
246   - int max_requests)
247   -{
248   - int rc;
249   - int retrc;
250   - struct vio_dev *vdev = to_vio_dev(hostdata->dev);
251   -
252   - queue->msgs = (struct viosrp_crq *)get_zeroed_page(GFP_KERNEL);
253   -
254   - if (!queue->msgs)
255   - goto malloc_failed;
256   - queue->size = PAGE_SIZE / sizeof(*queue->msgs);
257   -
258   - queue->msg_token = dma_map_single(hostdata->dev, queue->msgs,
259   - queue->size * sizeof(*queue->msgs),
260   - DMA_BIDIRECTIONAL);
261   -
262   - if (dma_mapping_error(hostdata->dev, queue->msg_token))
263   - goto map_failed;
264   -
265   - gather_partition_info();
266   - set_adapter_info(hostdata);
267   -
268   - retrc = rc = plpar_hcall_norets(H_REG_CRQ,
269   - vdev->unit_address,
270   - queue->msg_token, PAGE_SIZE);
271   - if (rc == H_RESOURCE)
272   - /* maybe kexecing and resource is busy. try a reset */
273   - rc = rpavscsi_reset_crq_queue(queue,
274   - hostdata);
275   -
276   - if (rc == 2) {
277   - /* Adapter is good, but other end is not ready */
278   - dev_warn(hostdata->dev, "Partner adapter not ready\n");
279   - retrc = 0;
280   - } else if (rc != 0) {
281   - dev_warn(hostdata->dev, "Error %d opening adapter\n", rc);
282   - goto reg_crq_failed;
283   - }
284   -
285   - queue->cur = 0;
286   - spin_lock_init(&queue->lock);
287   -
288   - tasklet_init(&hostdata->srp_task, (void *)rpavscsi_task,
289   - (unsigned long)hostdata);
290   -
291   - if (request_irq(vdev->irq,
292   - rpavscsi_handle_event,
293   - 0, "ibmvscsi", (void *)hostdata) != 0) {
294   - dev_err(hostdata->dev, "couldn't register irq 0x%x\n",
295   - vdev->irq);
296   - goto req_irq_failed;
297   - }
298   -
299   - rc = vio_enable_interrupts(vdev);
300   - if (rc != 0) {
301   - dev_err(hostdata->dev, "Error %d enabling interrupts!!!\n", rc);
302   - goto req_irq_failed;
303   - }
304   -
305   - return retrc;
306   -
307   - req_irq_failed:
308   - tasklet_kill(&hostdata->srp_task);
309   - rc = 0;
310   - do {
311   - if (rc)
312   - msleep(100);
313   - rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
314   - } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
315   - reg_crq_failed:
316   - dma_unmap_single(hostdata->dev,
317   - queue->msg_token,
318   - queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
319   - map_failed:
320   - free_page((unsigned long)queue->msgs);
321   - malloc_failed:
322   - return -1;
323   -}
324   -
325   -/**
326   - * reenable_crq_queue: - reenables a crq after
327   - * @queue: crq_queue to initialize and register
328   - * @hostdata: ibmvscsi_host_data of host
329   - *
330   - */
331   -static int rpavscsi_reenable_crq_queue(struct crq_queue *queue,
332   - struct ibmvscsi_host_data *hostdata)
333   -{
334   - int rc = 0;
335   - struct vio_dev *vdev = to_vio_dev(hostdata->dev);
336   -
337   - /* Re-enable the CRQ */
338   - do {
339   - if (rc)
340   - msleep(100);
341   - rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
342   - } while ((rc == H_IN_PROGRESS) || (rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
343   -
344   - if (rc)
345   - dev_err(hostdata->dev, "Error %d enabling adapter\n", rc);
346   - return rc;
347   -}
348   -
349   -/**
350   - * rpavscsi_resume: - resume after suspend
351   - * @hostdata: ibmvscsi_host_data of host
352   - *
353   - */
354   -static int rpavscsi_resume(struct ibmvscsi_host_data *hostdata)
355   -{
356   - vio_disable_interrupts(to_vio_dev(hostdata->dev));
357   - tasklet_schedule(&hostdata->srp_task);
358   - return 0;
359   -}
360   -
361   -struct ibmvscsi_ops rpavscsi_ops = {
362   - .init_crq_queue = rpavscsi_init_crq_queue,
363   - .release_crq_queue = rpavscsi_release_crq_queue,
364   - .reset_crq_queue = rpavscsi_reset_crq_queue,
365   - .reenable_crq_queue = rpavscsi_reenable_crq_queue,
366   - .send_crq = rpavscsi_send_crq,
367   - .resume = rpavscsi_resume,
368   -};