Commit 23ab274ddf83931ecc21968d11c773a2a972e462

Authored by Benjamin Poirier
Committed by Greg Kroah-Hartman
1 parent 5f70407192

scsi: bfa: Increase requested firmware version to 3.2.5.1

[ Upstream commit 2d1148f0f45079d25a0fa0d67e4fdb2a656d12fb ]

bna & bfa firmware version 3.2.5.1 was submitted to linux-firmware on
Feb 17 19:10:20 2015 -0500 in 0ab54ff1dc ("linux-firmware: Add QLogic BR
Series Adapter Firmware").

bna was updated to use the newer firmware on Feb 19 16:02:32 2015 -0500 in
3f307c3d70 ("bna: Update the Driver and Firmware Version")

bfa was not updated. I presume this was an oversight but it broke support
for bfa+bna cards such as the following
	04:00.0 Fibre Channel [0c04]: Brocade Communications Systems, Inc.
		1010/1020/1007/1741 10Gbps CNA [1657:0014] (rev 01)
	04:00.1 Fibre Channel [0c04]: Brocade Communications Systems, Inc.
		1010/1020/1007/1741 10Gbps CNA [1657:0014] (rev 01)
	04:00.2 Ethernet controller [0200]: Brocade Communications Systems,
		Inc. 1010/1020/1007/1741 10Gbps CNA [1657:0014] (rev 01)
	04:00.3 Ethernet controller [0200]: Brocade Communications Systems,
		Inc. 1010/1020/1007/1741 10Gbps CNA [1657:0014] (rev 01)

Currently, if the bfa module is loaded first, bna fails to probe the
respective devices with
[  215.026787] bna: QLogic BR-series 10G Ethernet driver - version: 3.2.25.1
[  215.043707] bna 0000:04:00.2: bar0 mapped to ffffc90001fc0000, len 262144
[  215.060656] bna 0000:04:00.2: initialization failed err=1
[  215.073893] bna 0000:04:00.3: bar0 mapped to ffffc90002040000, len 262144
[  215.090644] bna 0000:04:00.3: initialization failed err=1

Whereas if bna is loaded first, bfa fails with
[  249.592109] QLogic BR-series BFA FC/FCOE SCSI driver - version: 3.2.25.0
[  249.610738] bfa 0000:04:00.0: Running firmware version is incompatible with the driver version
[  249.833513] bfa 0000:04:00.0: bfa init failed
[  249.833919] scsi host6: QLogic BR-series FC/FCOE Adapter, hwpath: 0000:04:00.0 driver: 3.2.25.0
[  249.841446] bfa 0000:04:00.1: Running firmware version is incompatible with the driver version
[  250.045449] bfa 0000:04:00.1: bfa init failed
[  250.045962] scsi host7: QLogic BR-series FC/FCOE Adapter, hwpath: 0000:04:00.1 driver: 3.2.25.0

Increase bfa's requested firmware version. Also increase the driver
version.  I only tested that all of the devices probe without error.

Reported-by: Tim Ehlers <tehlers@gwdg.de>
Signed-off-by: Benjamin Poirier <bpoirier@suse.com>
Acked-by: Rasesh Mody <rasesh.mody@cavium.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

Showing 2 changed files with 4 additions and 4 deletions Inline Diff

drivers/scsi/bfa/bfad.c
1 /* 1 /*
2 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. 2 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
3 * Copyright (c) 2014- QLogic Corporation. 3 * Copyright (c) 2014- QLogic Corporation.
4 * All rights reserved 4 * All rights reserved
5 * www.qlogic.com 5 * www.qlogic.com
6 * 6 *
7 * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. 7 * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
8 * 8 *
9 * This program is free software; you can redistribute it and/or modify it 9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License (GPL) Version 2 as 10 * under the terms of the GNU General Public License (GPL) Version 2 as
11 * published by the Free Software Foundation 11 * published by the Free Software Foundation
12 * 12 *
13 * This program is distributed in the hope that it will be useful, but 13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of 14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details. 16 * General Public License for more details.
17 */ 17 */
18 18
19 /* 19 /*
20 * bfad.c Linux driver PCI interface module. 20 * bfad.c Linux driver PCI interface module.
21 */ 21 */
22 #include <linux/module.h> 22 #include <linux/module.h>
23 #include <linux/kthread.h> 23 #include <linux/kthread.h>
24 #include <linux/errno.h> 24 #include <linux/errno.h>
25 #include <linux/sched.h> 25 #include <linux/sched.h>
26 #include <linux/init.h> 26 #include <linux/init.h>
27 #include <linux/fs.h> 27 #include <linux/fs.h>
28 #include <linux/pci.h> 28 #include <linux/pci.h>
29 #include <linux/firmware.h> 29 #include <linux/firmware.h>
30 #include <asm/uaccess.h> 30 #include <asm/uaccess.h>
31 #include <asm/fcntl.h> 31 #include <asm/fcntl.h>
32 32
33 #include "bfad_drv.h" 33 #include "bfad_drv.h"
34 #include "bfad_im.h" 34 #include "bfad_im.h"
35 #include "bfa_fcs.h" 35 #include "bfa_fcs.h"
36 #include "bfa_defs.h" 36 #include "bfa_defs.h"
37 #include "bfa.h" 37 #include "bfa.h"
38 38
39 BFA_TRC_FILE(LDRV, BFAD); 39 BFA_TRC_FILE(LDRV, BFAD);
40 DEFINE_MUTEX(bfad_mutex); 40 DEFINE_MUTEX(bfad_mutex);
41 LIST_HEAD(bfad_list); 41 LIST_HEAD(bfad_list);
42 42
43 static int bfad_inst; 43 static int bfad_inst;
44 static int num_sgpgs_parm; 44 static int num_sgpgs_parm;
45 int supported_fc4s; 45 int supported_fc4s;
46 char *host_name, *os_name, *os_patch; 46 char *host_name, *os_name, *os_patch;
47 int num_rports, num_ios, num_tms; 47 int num_rports, num_ios, num_tms;
48 int num_fcxps, num_ufbufs; 48 int num_fcxps, num_ufbufs;
49 int reqq_size, rspq_size, num_sgpgs; 49 int reqq_size, rspq_size, num_sgpgs;
50 int rport_del_timeout = BFA_FCS_RPORT_DEF_DEL_TIMEOUT; 50 int rport_del_timeout = BFA_FCS_RPORT_DEF_DEL_TIMEOUT;
51 int bfa_lun_queue_depth = BFAD_LUN_QUEUE_DEPTH; 51 int bfa_lun_queue_depth = BFAD_LUN_QUEUE_DEPTH;
52 int bfa_io_max_sge = BFAD_IO_MAX_SGE; 52 int bfa_io_max_sge = BFAD_IO_MAX_SGE;
53 int bfa_log_level = 3; /* WARNING log level */ 53 int bfa_log_level = 3; /* WARNING log level */
54 int ioc_auto_recover = BFA_TRUE; 54 int ioc_auto_recover = BFA_TRUE;
55 int bfa_linkup_delay = -1; 55 int bfa_linkup_delay = -1;
56 int fdmi_enable = BFA_TRUE; 56 int fdmi_enable = BFA_TRUE;
57 int pcie_max_read_reqsz; 57 int pcie_max_read_reqsz;
58 int bfa_debugfs_enable = 1; 58 int bfa_debugfs_enable = 1;
59 int msix_disable_cb = 0, msix_disable_ct = 0; 59 int msix_disable_cb = 0, msix_disable_ct = 0;
60 int max_xfer_size = BFAD_MAX_SECTORS >> 1; 60 int max_xfer_size = BFAD_MAX_SECTORS >> 1;
61 int max_rport_logins = BFA_FCS_MAX_RPORT_LOGINS; 61 int max_rport_logins = BFA_FCS_MAX_RPORT_LOGINS;
62 62
63 /* Firmware releated */ 63 /* Firmware releated */
64 u32 bfi_image_cb_size, bfi_image_ct_size, bfi_image_ct2_size; 64 u32 bfi_image_cb_size, bfi_image_ct_size, bfi_image_ct2_size;
65 u32 *bfi_image_cb, *bfi_image_ct, *bfi_image_ct2; 65 u32 *bfi_image_cb, *bfi_image_ct, *bfi_image_ct2;
66 66
67 #define BFAD_FW_FILE_CB "cbfw-3.2.3.0.bin" 67 #define BFAD_FW_FILE_CB "cbfw-3.2.5.1.bin"
68 #define BFAD_FW_FILE_CT "ctfw-3.2.3.0.bin" 68 #define BFAD_FW_FILE_CT "ctfw-3.2.5.1.bin"
69 #define BFAD_FW_FILE_CT2 "ct2fw-3.2.3.0.bin" 69 #define BFAD_FW_FILE_CT2 "ct2fw-3.2.5.1.bin"
70 70
71 static u32 *bfad_load_fwimg(struct pci_dev *pdev); 71 static u32 *bfad_load_fwimg(struct pci_dev *pdev);
72 static void bfad_free_fwimg(void); 72 static void bfad_free_fwimg(void);
73 static void bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image, 73 static void bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
74 u32 *bfi_image_size, char *fw_name); 74 u32 *bfi_image_size, char *fw_name);
75 75
76 static const char *msix_name_ct[] = { 76 static const char *msix_name_ct[] = {
77 "ctrl", 77 "ctrl",
78 "cpe0", "cpe1", "cpe2", "cpe3", 78 "cpe0", "cpe1", "cpe2", "cpe3",
79 "rme0", "rme1", "rme2", "rme3" }; 79 "rme0", "rme1", "rme2", "rme3" };
80 80
81 static const char *msix_name_cb[] = { 81 static const char *msix_name_cb[] = {
82 "cpe0", "cpe1", "cpe2", "cpe3", 82 "cpe0", "cpe1", "cpe2", "cpe3",
83 "rme0", "rme1", "rme2", "rme3", 83 "rme0", "rme1", "rme2", "rme3",
84 "eemc", "elpu0", "elpu1", "epss", "mlpu" }; 84 "eemc", "elpu0", "elpu1", "epss", "mlpu" };
85 85
86 MODULE_FIRMWARE(BFAD_FW_FILE_CB); 86 MODULE_FIRMWARE(BFAD_FW_FILE_CB);
87 MODULE_FIRMWARE(BFAD_FW_FILE_CT); 87 MODULE_FIRMWARE(BFAD_FW_FILE_CT);
88 MODULE_FIRMWARE(BFAD_FW_FILE_CT2); 88 MODULE_FIRMWARE(BFAD_FW_FILE_CT2);
89 89
90 module_param(os_name, charp, S_IRUGO | S_IWUSR); 90 module_param(os_name, charp, S_IRUGO | S_IWUSR);
91 MODULE_PARM_DESC(os_name, "OS name of the hba host machine"); 91 MODULE_PARM_DESC(os_name, "OS name of the hba host machine");
92 module_param(os_patch, charp, S_IRUGO | S_IWUSR); 92 module_param(os_patch, charp, S_IRUGO | S_IWUSR);
93 MODULE_PARM_DESC(os_patch, "OS patch level of the hba host machine"); 93 MODULE_PARM_DESC(os_patch, "OS patch level of the hba host machine");
94 module_param(host_name, charp, S_IRUGO | S_IWUSR); 94 module_param(host_name, charp, S_IRUGO | S_IWUSR);
95 MODULE_PARM_DESC(host_name, "Hostname of the hba host machine"); 95 MODULE_PARM_DESC(host_name, "Hostname of the hba host machine");
96 module_param(num_rports, int, S_IRUGO | S_IWUSR); 96 module_param(num_rports, int, S_IRUGO | S_IWUSR);
97 MODULE_PARM_DESC(num_rports, "Max number of rports supported per port " 97 MODULE_PARM_DESC(num_rports, "Max number of rports supported per port "
98 "(physical/logical), default=1024"); 98 "(physical/logical), default=1024");
99 module_param(num_ios, int, S_IRUGO | S_IWUSR); 99 module_param(num_ios, int, S_IRUGO | S_IWUSR);
100 MODULE_PARM_DESC(num_ios, "Max number of ioim requests, default=2000"); 100 MODULE_PARM_DESC(num_ios, "Max number of ioim requests, default=2000");
101 module_param(num_tms, int, S_IRUGO | S_IWUSR); 101 module_param(num_tms, int, S_IRUGO | S_IWUSR);
102 MODULE_PARM_DESC(num_tms, "Max number of task im requests, default=128"); 102 MODULE_PARM_DESC(num_tms, "Max number of task im requests, default=128");
103 module_param(num_fcxps, int, S_IRUGO | S_IWUSR); 103 module_param(num_fcxps, int, S_IRUGO | S_IWUSR);
104 MODULE_PARM_DESC(num_fcxps, "Max number of fcxp requests, default=64"); 104 MODULE_PARM_DESC(num_fcxps, "Max number of fcxp requests, default=64");
105 module_param(num_ufbufs, int, S_IRUGO | S_IWUSR); 105 module_param(num_ufbufs, int, S_IRUGO | S_IWUSR);
106 MODULE_PARM_DESC(num_ufbufs, "Max number of unsolicited frame " 106 MODULE_PARM_DESC(num_ufbufs, "Max number of unsolicited frame "
107 "buffers, default=64"); 107 "buffers, default=64");
108 module_param(reqq_size, int, S_IRUGO | S_IWUSR); 108 module_param(reqq_size, int, S_IRUGO | S_IWUSR);
109 MODULE_PARM_DESC(reqq_size, "Max number of request queue elements, " 109 MODULE_PARM_DESC(reqq_size, "Max number of request queue elements, "
110 "default=256"); 110 "default=256");
111 module_param(rspq_size, int, S_IRUGO | S_IWUSR); 111 module_param(rspq_size, int, S_IRUGO | S_IWUSR);
112 MODULE_PARM_DESC(rspq_size, "Max number of response queue elements, " 112 MODULE_PARM_DESC(rspq_size, "Max number of response queue elements, "
113 "default=64"); 113 "default=64");
114 module_param(num_sgpgs, int, S_IRUGO | S_IWUSR); 114 module_param(num_sgpgs, int, S_IRUGO | S_IWUSR);
115 MODULE_PARM_DESC(num_sgpgs, "Number of scatter/gather pages, default=2048"); 115 MODULE_PARM_DESC(num_sgpgs, "Number of scatter/gather pages, default=2048");
116 module_param(rport_del_timeout, int, S_IRUGO | S_IWUSR); 116 module_param(rport_del_timeout, int, S_IRUGO | S_IWUSR);
117 MODULE_PARM_DESC(rport_del_timeout, "Rport delete timeout, default=90 secs, " 117 MODULE_PARM_DESC(rport_del_timeout, "Rport delete timeout, default=90 secs, "
118 "Range[>0]"); 118 "Range[>0]");
119 module_param(bfa_lun_queue_depth, int, S_IRUGO | S_IWUSR); 119 module_param(bfa_lun_queue_depth, int, S_IRUGO | S_IWUSR);
120 MODULE_PARM_DESC(bfa_lun_queue_depth, "Lun queue depth, default=32, Range[>0]"); 120 MODULE_PARM_DESC(bfa_lun_queue_depth, "Lun queue depth, default=32, Range[>0]");
121 module_param(bfa_io_max_sge, int, S_IRUGO | S_IWUSR); 121 module_param(bfa_io_max_sge, int, S_IRUGO | S_IWUSR);
122 MODULE_PARM_DESC(bfa_io_max_sge, "Max io scatter/gather elements, default=255"); 122 MODULE_PARM_DESC(bfa_io_max_sge, "Max io scatter/gather elements, default=255");
123 module_param(bfa_log_level, int, S_IRUGO | S_IWUSR); 123 module_param(bfa_log_level, int, S_IRUGO | S_IWUSR);
124 MODULE_PARM_DESC(bfa_log_level, "Driver log level, default=3, " 124 MODULE_PARM_DESC(bfa_log_level, "Driver log level, default=3, "
125 "Range[Critical:1|Error:2|Warning:3|Info:4]"); 125 "Range[Critical:1|Error:2|Warning:3|Info:4]");
126 module_param(ioc_auto_recover, int, S_IRUGO | S_IWUSR); 126 module_param(ioc_auto_recover, int, S_IRUGO | S_IWUSR);
127 MODULE_PARM_DESC(ioc_auto_recover, "IOC auto recovery, default=1, " 127 MODULE_PARM_DESC(ioc_auto_recover, "IOC auto recovery, default=1, "
128 "Range[off:0|on:1]"); 128 "Range[off:0|on:1]");
129 module_param(bfa_linkup_delay, int, S_IRUGO | S_IWUSR); 129 module_param(bfa_linkup_delay, int, S_IRUGO | S_IWUSR);
130 MODULE_PARM_DESC(bfa_linkup_delay, "Link up delay, default=30 secs for " 130 MODULE_PARM_DESC(bfa_linkup_delay, "Link up delay, default=30 secs for "
131 "boot port. Otherwise 10 secs in RHEL4 & 0 for " 131 "boot port. Otherwise 10 secs in RHEL4 & 0 for "
132 "[RHEL5, SLES10, ESX40] Range[>0]"); 132 "[RHEL5, SLES10, ESX40] Range[>0]");
133 module_param(msix_disable_cb, int, S_IRUGO | S_IWUSR); 133 module_param(msix_disable_cb, int, S_IRUGO | S_IWUSR);
134 MODULE_PARM_DESC(msix_disable_cb, "Disable Message Signaled Interrupts for QLogic-415/425/815/825 cards, default=0 Range[false:0|true:1]"); 134 MODULE_PARM_DESC(msix_disable_cb, "Disable Message Signaled Interrupts for QLogic-415/425/815/825 cards, default=0 Range[false:0|true:1]");
135 module_param(msix_disable_ct, int, S_IRUGO | S_IWUSR); 135 module_param(msix_disable_ct, int, S_IRUGO | S_IWUSR);
136 MODULE_PARM_DESC(msix_disable_ct, "Disable Message Signaled Interrupts if possible for QLogic-1010/1020/804/1007/902/1741 cards, default=0, Range[false:0|true:1]"); 136 MODULE_PARM_DESC(msix_disable_ct, "Disable Message Signaled Interrupts if possible for QLogic-1010/1020/804/1007/902/1741 cards, default=0, Range[false:0|true:1]");
137 module_param(fdmi_enable, int, S_IRUGO | S_IWUSR); 137 module_param(fdmi_enable, int, S_IRUGO | S_IWUSR);
138 MODULE_PARM_DESC(fdmi_enable, "Enables fdmi registration, default=1, " 138 MODULE_PARM_DESC(fdmi_enable, "Enables fdmi registration, default=1, "
139 "Range[false:0|true:1]"); 139 "Range[false:0|true:1]");
140 module_param(pcie_max_read_reqsz, int, S_IRUGO | S_IWUSR); 140 module_param(pcie_max_read_reqsz, int, S_IRUGO | S_IWUSR);
141 MODULE_PARM_DESC(pcie_max_read_reqsz, "PCIe max read request size, default=0 " 141 MODULE_PARM_DESC(pcie_max_read_reqsz, "PCIe max read request size, default=0 "
142 "(use system setting), Range[128|256|512|1024|2048|4096]"); 142 "(use system setting), Range[128|256|512|1024|2048|4096]");
143 module_param(bfa_debugfs_enable, int, S_IRUGO | S_IWUSR); 143 module_param(bfa_debugfs_enable, int, S_IRUGO | S_IWUSR);
144 MODULE_PARM_DESC(bfa_debugfs_enable, "Enables debugfs feature, default=1," 144 MODULE_PARM_DESC(bfa_debugfs_enable, "Enables debugfs feature, default=1,"
145 " Range[false:0|true:1]"); 145 " Range[false:0|true:1]");
146 module_param(max_xfer_size, int, S_IRUGO | S_IWUSR); 146 module_param(max_xfer_size, int, S_IRUGO | S_IWUSR);
147 MODULE_PARM_DESC(max_xfer_size, "default=32MB," 147 MODULE_PARM_DESC(max_xfer_size, "default=32MB,"
148 " Range[64k|128k|256k|512k|1024k|2048k]"); 148 " Range[64k|128k|256k|512k|1024k|2048k]");
149 module_param(max_rport_logins, int, S_IRUGO | S_IWUSR); 149 module_param(max_rport_logins, int, S_IRUGO | S_IWUSR);
150 MODULE_PARM_DESC(max_rport_logins, "Max number of logins to initiator and target rports on a port (physical/logical), default=1024"); 150 MODULE_PARM_DESC(max_rport_logins, "Max number of logins to initiator and target rports on a port (physical/logical), default=1024");
151 151
152 static void 152 static void
153 bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event); 153 bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event);
154 static void 154 static void
155 bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event); 155 bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event);
156 static void 156 static void
157 bfad_sm_initializing(struct bfad_s *bfad, enum bfad_sm_event event); 157 bfad_sm_initializing(struct bfad_s *bfad, enum bfad_sm_event event);
158 static void 158 static void
159 bfad_sm_operational(struct bfad_s *bfad, enum bfad_sm_event event); 159 bfad_sm_operational(struct bfad_s *bfad, enum bfad_sm_event event);
160 static void 160 static void
161 bfad_sm_stopping(struct bfad_s *bfad, enum bfad_sm_event event); 161 bfad_sm_stopping(struct bfad_s *bfad, enum bfad_sm_event event);
162 static void 162 static void
163 bfad_sm_failed(struct bfad_s *bfad, enum bfad_sm_event event); 163 bfad_sm_failed(struct bfad_s *bfad, enum bfad_sm_event event);
164 static void 164 static void
165 bfad_sm_fcs_exit(struct bfad_s *bfad, enum bfad_sm_event event); 165 bfad_sm_fcs_exit(struct bfad_s *bfad, enum bfad_sm_event event);
166 166
167 /* 167 /*
168 * Beginning state for the driver instance, awaiting the pci_probe event 168 * Beginning state for the driver instance, awaiting the pci_probe event
169 */ 169 */
170 static void 170 static void
171 bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event) 171 bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event)
172 { 172 {
173 bfa_trc(bfad, event); 173 bfa_trc(bfad, event);
174 174
175 switch (event) { 175 switch (event) {
176 case BFAD_E_CREATE: 176 case BFAD_E_CREATE:
177 bfa_sm_set_state(bfad, bfad_sm_created); 177 bfa_sm_set_state(bfad, bfad_sm_created);
178 bfad->bfad_tsk = kthread_create(bfad_worker, (void *) bfad, 178 bfad->bfad_tsk = kthread_create(bfad_worker, (void *) bfad,
179 "%s", "bfad_worker"); 179 "%s", "bfad_worker");
180 if (IS_ERR(bfad->bfad_tsk)) { 180 if (IS_ERR(bfad->bfad_tsk)) {
181 printk(KERN_INFO "bfad[%d]: Kernel thread " 181 printk(KERN_INFO "bfad[%d]: Kernel thread "
182 "creation failed!\n", bfad->inst_no); 182 "creation failed!\n", bfad->inst_no);
183 bfa_sm_send_event(bfad, BFAD_E_KTHREAD_CREATE_FAILED); 183 bfa_sm_send_event(bfad, BFAD_E_KTHREAD_CREATE_FAILED);
184 } 184 }
185 bfa_sm_send_event(bfad, BFAD_E_INIT); 185 bfa_sm_send_event(bfad, BFAD_E_INIT);
186 break; 186 break;
187 187
188 case BFAD_E_STOP: 188 case BFAD_E_STOP:
189 /* Ignore stop; already in uninit */ 189 /* Ignore stop; already in uninit */
190 break; 190 break;
191 191
192 default: 192 default:
193 bfa_sm_fault(bfad, event); 193 bfa_sm_fault(bfad, event);
194 } 194 }
195 } 195 }
196 196
197 /* 197 /*
198 * Driver Instance is created, awaiting event INIT to initialize the bfad 198 * Driver Instance is created, awaiting event INIT to initialize the bfad
199 */ 199 */
200 static void 200 static void
201 bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event) 201 bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event)
202 { 202 {
203 unsigned long flags; 203 unsigned long flags;
204 bfa_status_t ret; 204 bfa_status_t ret;
205 205
206 bfa_trc(bfad, event); 206 bfa_trc(bfad, event);
207 207
208 switch (event) { 208 switch (event) {
209 case BFAD_E_INIT: 209 case BFAD_E_INIT:
210 bfa_sm_set_state(bfad, bfad_sm_initializing); 210 bfa_sm_set_state(bfad, bfad_sm_initializing);
211 211
212 init_completion(&bfad->comp); 212 init_completion(&bfad->comp);
213 213
214 /* Enable Interrupt and wait bfa_init completion */ 214 /* Enable Interrupt and wait bfa_init completion */
215 if (bfad_setup_intr(bfad)) { 215 if (bfad_setup_intr(bfad)) {
216 printk(KERN_WARNING "bfad%d: bfad_setup_intr failed\n", 216 printk(KERN_WARNING "bfad%d: bfad_setup_intr failed\n",
217 bfad->inst_no); 217 bfad->inst_no);
218 bfa_sm_send_event(bfad, BFAD_E_INIT_FAILED); 218 bfa_sm_send_event(bfad, BFAD_E_INIT_FAILED);
219 break; 219 break;
220 } 220 }
221 221
222 spin_lock_irqsave(&bfad->bfad_lock, flags); 222 spin_lock_irqsave(&bfad->bfad_lock, flags);
223 bfa_iocfc_init(&bfad->bfa); 223 bfa_iocfc_init(&bfad->bfa);
224 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 224 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
225 225
226 /* Set up interrupt handler for each vectors */ 226 /* Set up interrupt handler for each vectors */
227 if ((bfad->bfad_flags & BFAD_MSIX_ON) && 227 if ((bfad->bfad_flags & BFAD_MSIX_ON) &&
228 bfad_install_msix_handler(bfad)) { 228 bfad_install_msix_handler(bfad)) {
229 printk(KERN_WARNING "%s: install_msix failed, bfad%d\n", 229 printk(KERN_WARNING "%s: install_msix failed, bfad%d\n",
230 __func__, bfad->inst_no); 230 __func__, bfad->inst_no);
231 } 231 }
232 232
233 bfad_init_timer(bfad); 233 bfad_init_timer(bfad);
234 234
235 wait_for_completion(&bfad->comp); 235 wait_for_completion(&bfad->comp);
236 236
237 if ((bfad->bfad_flags & BFAD_HAL_INIT_DONE)) { 237 if ((bfad->bfad_flags & BFAD_HAL_INIT_DONE)) {
238 bfa_sm_send_event(bfad, BFAD_E_INIT_SUCCESS); 238 bfa_sm_send_event(bfad, BFAD_E_INIT_SUCCESS);
239 } else { 239 } else {
240 printk(KERN_WARNING 240 printk(KERN_WARNING
241 "bfa %s: bfa init failed\n", 241 "bfa %s: bfa init failed\n",
242 bfad->pci_name); 242 bfad->pci_name);
243 spin_lock_irqsave(&bfad->bfad_lock, flags); 243 spin_lock_irqsave(&bfad->bfad_lock, flags);
244 bfa_fcs_init(&bfad->bfa_fcs); 244 bfa_fcs_init(&bfad->bfa_fcs);
245 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 245 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
246 246
247 ret = bfad_cfg_pport(bfad, BFA_LPORT_ROLE_FCP_IM); 247 ret = bfad_cfg_pport(bfad, BFA_LPORT_ROLE_FCP_IM);
248 if (ret != BFA_STATUS_OK) { 248 if (ret != BFA_STATUS_OK) {
249 init_completion(&bfad->comp); 249 init_completion(&bfad->comp);
250 250
251 spin_lock_irqsave(&bfad->bfad_lock, flags); 251 spin_lock_irqsave(&bfad->bfad_lock, flags);
252 bfad->pport.flags |= BFAD_PORT_DELETE; 252 bfad->pport.flags |= BFAD_PORT_DELETE;
253 bfa_fcs_exit(&bfad->bfa_fcs); 253 bfa_fcs_exit(&bfad->bfa_fcs);
254 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 254 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
255 255
256 wait_for_completion(&bfad->comp); 256 wait_for_completion(&bfad->comp);
257 257
258 bfa_sm_send_event(bfad, BFAD_E_INIT_FAILED); 258 bfa_sm_send_event(bfad, BFAD_E_INIT_FAILED);
259 break; 259 break;
260 } 260 }
261 bfad->bfad_flags |= BFAD_HAL_INIT_FAIL; 261 bfad->bfad_flags |= BFAD_HAL_INIT_FAIL;
262 bfa_sm_send_event(bfad, BFAD_E_HAL_INIT_FAILED); 262 bfa_sm_send_event(bfad, BFAD_E_HAL_INIT_FAILED);
263 } 263 }
264 264
265 break; 265 break;
266 266
267 case BFAD_E_KTHREAD_CREATE_FAILED: 267 case BFAD_E_KTHREAD_CREATE_FAILED:
268 bfa_sm_set_state(bfad, bfad_sm_uninit); 268 bfa_sm_set_state(bfad, bfad_sm_uninit);
269 break; 269 break;
270 270
271 default: 271 default:
272 bfa_sm_fault(bfad, event); 272 bfa_sm_fault(bfad, event);
273 } 273 }
274 } 274 }
275 275
276 static void 276 static void
277 bfad_sm_initializing(struct bfad_s *bfad, enum bfad_sm_event event) 277 bfad_sm_initializing(struct bfad_s *bfad, enum bfad_sm_event event)
278 { 278 {
279 int retval; 279 int retval;
280 unsigned long flags; 280 unsigned long flags;
281 281
282 bfa_trc(bfad, event); 282 bfa_trc(bfad, event);
283 283
284 switch (event) { 284 switch (event) {
285 case BFAD_E_INIT_SUCCESS: 285 case BFAD_E_INIT_SUCCESS:
286 kthread_stop(bfad->bfad_tsk); 286 kthread_stop(bfad->bfad_tsk);
287 spin_lock_irqsave(&bfad->bfad_lock, flags); 287 spin_lock_irqsave(&bfad->bfad_lock, flags);
288 bfad->bfad_tsk = NULL; 288 bfad->bfad_tsk = NULL;
289 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 289 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
290 290
291 retval = bfad_start_ops(bfad); 291 retval = bfad_start_ops(bfad);
292 if (retval != BFA_STATUS_OK) { 292 if (retval != BFA_STATUS_OK) {
293 bfa_sm_set_state(bfad, bfad_sm_failed); 293 bfa_sm_set_state(bfad, bfad_sm_failed);
294 break; 294 break;
295 } 295 }
296 bfa_sm_set_state(bfad, bfad_sm_operational); 296 bfa_sm_set_state(bfad, bfad_sm_operational);
297 break; 297 break;
298 298
299 case BFAD_E_INIT_FAILED: 299 case BFAD_E_INIT_FAILED:
300 bfa_sm_set_state(bfad, bfad_sm_uninit); 300 bfa_sm_set_state(bfad, bfad_sm_uninit);
301 kthread_stop(bfad->bfad_tsk); 301 kthread_stop(bfad->bfad_tsk);
302 spin_lock_irqsave(&bfad->bfad_lock, flags); 302 spin_lock_irqsave(&bfad->bfad_lock, flags);
303 bfad->bfad_tsk = NULL; 303 bfad->bfad_tsk = NULL;
304 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 304 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
305 break; 305 break;
306 306
307 case BFAD_E_HAL_INIT_FAILED: 307 case BFAD_E_HAL_INIT_FAILED:
308 bfa_sm_set_state(bfad, bfad_sm_failed); 308 bfa_sm_set_state(bfad, bfad_sm_failed);
309 break; 309 break;
310 default: 310 default:
311 bfa_sm_fault(bfad, event); 311 bfa_sm_fault(bfad, event);
312 } 312 }
313 } 313 }
314 314
315 static void 315 static void
316 bfad_sm_failed(struct bfad_s *bfad, enum bfad_sm_event event) 316 bfad_sm_failed(struct bfad_s *bfad, enum bfad_sm_event event)
317 { 317 {
318 int retval; 318 int retval;
319 319
320 bfa_trc(bfad, event); 320 bfa_trc(bfad, event);
321 321
322 switch (event) { 322 switch (event) {
323 case BFAD_E_INIT_SUCCESS: 323 case BFAD_E_INIT_SUCCESS:
324 retval = bfad_start_ops(bfad); 324 retval = bfad_start_ops(bfad);
325 if (retval != BFA_STATUS_OK) 325 if (retval != BFA_STATUS_OK)
326 break; 326 break;
327 bfa_sm_set_state(bfad, bfad_sm_operational); 327 bfa_sm_set_state(bfad, bfad_sm_operational);
328 break; 328 break;
329 329
330 case BFAD_E_STOP: 330 case BFAD_E_STOP:
331 bfa_sm_set_state(bfad, bfad_sm_fcs_exit); 331 bfa_sm_set_state(bfad, bfad_sm_fcs_exit);
332 bfa_sm_send_event(bfad, BFAD_E_FCS_EXIT_COMP); 332 bfa_sm_send_event(bfad, BFAD_E_FCS_EXIT_COMP);
333 break; 333 break;
334 334
335 case BFAD_E_EXIT_COMP: 335 case BFAD_E_EXIT_COMP:
336 bfa_sm_set_state(bfad, bfad_sm_uninit); 336 bfa_sm_set_state(bfad, bfad_sm_uninit);
337 bfad_remove_intr(bfad); 337 bfad_remove_intr(bfad);
338 del_timer_sync(&bfad->hal_tmo); 338 del_timer_sync(&bfad->hal_tmo);
339 break; 339 break;
340 340
341 default: 341 default:
342 bfa_sm_fault(bfad, event); 342 bfa_sm_fault(bfad, event);
343 } 343 }
344 } 344 }
345 345
346 static void 346 static void
347 bfad_sm_operational(struct bfad_s *bfad, enum bfad_sm_event event) 347 bfad_sm_operational(struct bfad_s *bfad, enum bfad_sm_event event)
348 { 348 {
349 bfa_trc(bfad, event); 349 bfa_trc(bfad, event);
350 350
351 switch (event) { 351 switch (event) {
352 case BFAD_E_STOP: 352 case BFAD_E_STOP:
353 bfa_sm_set_state(bfad, bfad_sm_fcs_exit); 353 bfa_sm_set_state(bfad, bfad_sm_fcs_exit);
354 bfad_fcs_stop(bfad); 354 bfad_fcs_stop(bfad);
355 break; 355 break;
356 356
357 default: 357 default:
358 bfa_sm_fault(bfad, event); 358 bfa_sm_fault(bfad, event);
359 } 359 }
360 } 360 }
361 361
362 static void 362 static void
363 bfad_sm_fcs_exit(struct bfad_s *bfad, enum bfad_sm_event event) 363 bfad_sm_fcs_exit(struct bfad_s *bfad, enum bfad_sm_event event)
364 { 364 {
365 bfa_trc(bfad, event); 365 bfa_trc(bfad, event);
366 366
367 switch (event) { 367 switch (event) {
368 case BFAD_E_FCS_EXIT_COMP: 368 case BFAD_E_FCS_EXIT_COMP:
369 bfa_sm_set_state(bfad, bfad_sm_stopping); 369 bfa_sm_set_state(bfad, bfad_sm_stopping);
370 bfad_stop(bfad); 370 bfad_stop(bfad);
371 break; 371 break;
372 372
373 default: 373 default:
374 bfa_sm_fault(bfad, event); 374 bfa_sm_fault(bfad, event);
375 } 375 }
376 } 376 }
377 377
378 static void 378 static void
379 bfad_sm_stopping(struct bfad_s *bfad, enum bfad_sm_event event) 379 bfad_sm_stopping(struct bfad_s *bfad, enum bfad_sm_event event)
380 { 380 {
381 bfa_trc(bfad, event); 381 bfa_trc(bfad, event);
382 382
383 switch (event) { 383 switch (event) {
384 case BFAD_E_EXIT_COMP: 384 case BFAD_E_EXIT_COMP:
385 bfa_sm_set_state(bfad, bfad_sm_uninit); 385 bfa_sm_set_state(bfad, bfad_sm_uninit);
386 bfad_remove_intr(bfad); 386 bfad_remove_intr(bfad);
387 del_timer_sync(&bfad->hal_tmo); 387 del_timer_sync(&bfad->hal_tmo);
388 bfad_im_probe_undo(bfad); 388 bfad_im_probe_undo(bfad);
389 bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE; 389 bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE;
390 bfad_uncfg_pport(bfad); 390 bfad_uncfg_pport(bfad);
391 break; 391 break;
392 392
393 default: 393 default:
394 bfa_sm_fault(bfad, event); 394 bfa_sm_fault(bfad, event);
395 break; 395 break;
396 } 396 }
397 } 397 }
398 398
399 /* 399 /*
400 * BFA callbacks 400 * BFA callbacks
401 */ 401 */
402 void 402 void
403 bfad_hcb_comp(void *arg, bfa_status_t status) 403 bfad_hcb_comp(void *arg, bfa_status_t status)
404 { 404 {
405 struct bfad_hal_comp *fcomp = (struct bfad_hal_comp *)arg; 405 struct bfad_hal_comp *fcomp = (struct bfad_hal_comp *)arg;
406 406
407 fcomp->status = status; 407 fcomp->status = status;
408 complete(&fcomp->comp); 408 complete(&fcomp->comp);
409 } 409 }
410 410
411 /* 411 /*
412 * bfa_init callback 412 * bfa_init callback
413 */ 413 */
414 void 414 void
415 bfa_cb_init(void *drv, bfa_status_t init_status) 415 bfa_cb_init(void *drv, bfa_status_t init_status)
416 { 416 {
417 struct bfad_s *bfad = drv; 417 struct bfad_s *bfad = drv;
418 418
419 if (init_status == BFA_STATUS_OK) { 419 if (init_status == BFA_STATUS_OK) {
420 bfad->bfad_flags |= BFAD_HAL_INIT_DONE; 420 bfad->bfad_flags |= BFAD_HAL_INIT_DONE;
421 421
422 /* 422 /*
423 * If BFAD_HAL_INIT_FAIL flag is set: 423 * If BFAD_HAL_INIT_FAIL flag is set:
424 * Wake up the kernel thread to start 424 * Wake up the kernel thread to start
425 * the bfad operations after HAL init done 425 * the bfad operations after HAL init done
426 */ 426 */
427 if ((bfad->bfad_flags & BFAD_HAL_INIT_FAIL)) { 427 if ((bfad->bfad_flags & BFAD_HAL_INIT_FAIL)) {
428 bfad->bfad_flags &= ~BFAD_HAL_INIT_FAIL; 428 bfad->bfad_flags &= ~BFAD_HAL_INIT_FAIL;
429 wake_up_process(bfad->bfad_tsk); 429 wake_up_process(bfad->bfad_tsk);
430 } 430 }
431 } 431 }
432 432
433 complete(&bfad->comp); 433 complete(&bfad->comp);
434 } 434 }
435 435
436 /* 436 /*
437 * BFA_FCS callbacks 437 * BFA_FCS callbacks
438 */ 438 */
439 struct bfad_port_s * 439 struct bfad_port_s *
440 bfa_fcb_lport_new(struct bfad_s *bfad, struct bfa_fcs_lport_s *port, 440 bfa_fcb_lport_new(struct bfad_s *bfad, struct bfa_fcs_lport_s *port,
441 enum bfa_lport_role roles, struct bfad_vf_s *vf_drv, 441 enum bfa_lport_role roles, struct bfad_vf_s *vf_drv,
442 struct bfad_vport_s *vp_drv) 442 struct bfad_vport_s *vp_drv)
443 { 443 {
444 bfa_status_t rc; 444 bfa_status_t rc;
445 struct bfad_port_s *port_drv; 445 struct bfad_port_s *port_drv;
446 446
447 if (!vp_drv && !vf_drv) { 447 if (!vp_drv && !vf_drv) {
448 port_drv = &bfad->pport; 448 port_drv = &bfad->pport;
449 port_drv->pvb_type = BFAD_PORT_PHYS_BASE; 449 port_drv->pvb_type = BFAD_PORT_PHYS_BASE;
450 } else if (!vp_drv && vf_drv) { 450 } else if (!vp_drv && vf_drv) {
451 port_drv = &vf_drv->base_port; 451 port_drv = &vf_drv->base_port;
452 port_drv->pvb_type = BFAD_PORT_VF_BASE; 452 port_drv->pvb_type = BFAD_PORT_VF_BASE;
453 } else if (vp_drv && !vf_drv) { 453 } else if (vp_drv && !vf_drv) {
454 port_drv = &vp_drv->drv_port; 454 port_drv = &vp_drv->drv_port;
455 port_drv->pvb_type = BFAD_PORT_PHYS_VPORT; 455 port_drv->pvb_type = BFAD_PORT_PHYS_VPORT;
456 } else { 456 } else {
457 port_drv = &vp_drv->drv_port; 457 port_drv = &vp_drv->drv_port;
458 port_drv->pvb_type = BFAD_PORT_VF_VPORT; 458 port_drv->pvb_type = BFAD_PORT_VF_VPORT;
459 } 459 }
460 460
461 port_drv->fcs_port = port; 461 port_drv->fcs_port = port;
462 port_drv->roles = roles; 462 port_drv->roles = roles;
463 463
464 if (roles & BFA_LPORT_ROLE_FCP_IM) { 464 if (roles & BFA_LPORT_ROLE_FCP_IM) {
465 rc = bfad_im_port_new(bfad, port_drv); 465 rc = bfad_im_port_new(bfad, port_drv);
466 if (rc != BFA_STATUS_OK) { 466 if (rc != BFA_STATUS_OK) {
467 bfad_im_port_delete(bfad, port_drv); 467 bfad_im_port_delete(bfad, port_drv);
468 port_drv = NULL; 468 port_drv = NULL;
469 } 469 }
470 } 470 }
471 471
472 return port_drv; 472 return port_drv;
473 } 473 }
474 474
475 /* 475 /*
476 * FCS RPORT alloc callback, after successful PLOGI by FCS 476 * FCS RPORT alloc callback, after successful PLOGI by FCS
477 */ 477 */
478 bfa_status_t 478 bfa_status_t
479 bfa_fcb_rport_alloc(struct bfad_s *bfad, struct bfa_fcs_rport_s **rport, 479 bfa_fcb_rport_alloc(struct bfad_s *bfad, struct bfa_fcs_rport_s **rport,
480 struct bfad_rport_s **rport_drv) 480 struct bfad_rport_s **rport_drv)
481 { 481 {
482 bfa_status_t rc = BFA_STATUS_OK; 482 bfa_status_t rc = BFA_STATUS_OK;
483 483
484 *rport_drv = kzalloc(sizeof(struct bfad_rport_s), GFP_ATOMIC); 484 *rport_drv = kzalloc(sizeof(struct bfad_rport_s), GFP_ATOMIC);
485 if (*rport_drv == NULL) { 485 if (*rport_drv == NULL) {
486 rc = BFA_STATUS_ENOMEM; 486 rc = BFA_STATUS_ENOMEM;
487 goto ext; 487 goto ext;
488 } 488 }
489 489
490 *rport = &(*rport_drv)->fcs_rport; 490 *rport = &(*rport_drv)->fcs_rport;
491 491
492 ext: 492 ext:
493 return rc; 493 return rc;
494 } 494 }
495 495
496 /* 496 /*
497 * FCS PBC VPORT Create 497 * FCS PBC VPORT Create
498 */ 498 */
499 void 499 void
500 bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s pbc_vport) 500 bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s pbc_vport)
501 { 501 {
502 502
503 struct bfa_lport_cfg_s port_cfg = {0}; 503 struct bfa_lport_cfg_s port_cfg = {0};
504 struct bfad_vport_s *vport; 504 struct bfad_vport_s *vport;
505 int rc; 505 int rc;
506 506
507 vport = kzalloc(sizeof(struct bfad_vport_s), GFP_ATOMIC); 507 vport = kzalloc(sizeof(struct bfad_vport_s), GFP_ATOMIC);
508 if (!vport) { 508 if (!vport) {
509 bfa_trc(bfad, 0); 509 bfa_trc(bfad, 0);
510 return; 510 return;
511 } 511 }
512 512
513 vport->drv_port.bfad = bfad; 513 vport->drv_port.bfad = bfad;
514 port_cfg.roles = BFA_LPORT_ROLE_FCP_IM; 514 port_cfg.roles = BFA_LPORT_ROLE_FCP_IM;
515 port_cfg.pwwn = pbc_vport.vp_pwwn; 515 port_cfg.pwwn = pbc_vport.vp_pwwn;
516 port_cfg.nwwn = pbc_vport.vp_nwwn; 516 port_cfg.nwwn = pbc_vport.vp_nwwn;
517 port_cfg.preboot_vp = BFA_TRUE; 517 port_cfg.preboot_vp = BFA_TRUE;
518 518
519 rc = bfa_fcs_pbc_vport_create(&vport->fcs_vport, &bfad->bfa_fcs, 0, 519 rc = bfa_fcs_pbc_vport_create(&vport->fcs_vport, &bfad->bfa_fcs, 0,
520 &port_cfg, vport); 520 &port_cfg, vport);
521 521
522 if (rc != BFA_STATUS_OK) { 522 if (rc != BFA_STATUS_OK) {
523 bfa_trc(bfad, 0); 523 bfa_trc(bfad, 0);
524 return; 524 return;
525 } 525 }
526 526
527 list_add_tail(&vport->list_entry, &bfad->pbc_vport_list); 527 list_add_tail(&vport->list_entry, &bfad->pbc_vport_list);
528 } 528 }
529 529
530 void 530 void
531 bfad_hal_mem_release(struct bfad_s *bfad) 531 bfad_hal_mem_release(struct bfad_s *bfad)
532 { 532 {
533 struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo; 533 struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo;
534 struct bfa_mem_dma_s *dma_info, *dma_elem; 534 struct bfa_mem_dma_s *dma_info, *dma_elem;
535 struct bfa_mem_kva_s *kva_info, *kva_elem; 535 struct bfa_mem_kva_s *kva_info, *kva_elem;
536 struct list_head *dm_qe, *km_qe; 536 struct list_head *dm_qe, *km_qe;
537 537
538 dma_info = &hal_meminfo->dma_info; 538 dma_info = &hal_meminfo->dma_info;
539 kva_info = &hal_meminfo->kva_info; 539 kva_info = &hal_meminfo->kva_info;
540 540
541 /* Iterate through the KVA meminfo queue */ 541 /* Iterate through the KVA meminfo queue */
542 list_for_each(km_qe, &kva_info->qe) { 542 list_for_each(km_qe, &kva_info->qe) {
543 kva_elem = (struct bfa_mem_kva_s *) km_qe; 543 kva_elem = (struct bfa_mem_kva_s *) km_qe;
544 vfree(kva_elem->kva); 544 vfree(kva_elem->kva);
545 } 545 }
546 546
547 /* Iterate through the DMA meminfo queue */ 547 /* Iterate through the DMA meminfo queue */
548 list_for_each(dm_qe, &dma_info->qe) { 548 list_for_each(dm_qe, &dma_info->qe) {
549 dma_elem = (struct bfa_mem_dma_s *) dm_qe; 549 dma_elem = (struct bfa_mem_dma_s *) dm_qe;
550 dma_free_coherent(&bfad->pcidev->dev, 550 dma_free_coherent(&bfad->pcidev->dev,
551 dma_elem->mem_len, dma_elem->kva, 551 dma_elem->mem_len, dma_elem->kva,
552 (dma_addr_t) dma_elem->dma); 552 (dma_addr_t) dma_elem->dma);
553 } 553 }
554 554
555 memset(hal_meminfo, 0, sizeof(struct bfa_meminfo_s)); 555 memset(hal_meminfo, 0, sizeof(struct bfa_meminfo_s));
556 } 556 }
557 557
558 void 558 void
559 bfad_update_hal_cfg(struct bfa_iocfc_cfg_s *bfa_cfg) 559 bfad_update_hal_cfg(struct bfa_iocfc_cfg_s *bfa_cfg)
560 { 560 {
561 if (num_rports > 0) 561 if (num_rports > 0)
562 bfa_cfg->fwcfg.num_rports = num_rports; 562 bfa_cfg->fwcfg.num_rports = num_rports;
563 if (num_ios > 0) 563 if (num_ios > 0)
564 bfa_cfg->fwcfg.num_ioim_reqs = num_ios; 564 bfa_cfg->fwcfg.num_ioim_reqs = num_ios;
565 if (num_tms > 0) 565 if (num_tms > 0)
566 bfa_cfg->fwcfg.num_tskim_reqs = num_tms; 566 bfa_cfg->fwcfg.num_tskim_reqs = num_tms;
567 if (num_fcxps > 0 && num_fcxps <= BFA_FCXP_MAX) 567 if (num_fcxps > 0 && num_fcxps <= BFA_FCXP_MAX)
568 bfa_cfg->fwcfg.num_fcxp_reqs = num_fcxps; 568 bfa_cfg->fwcfg.num_fcxp_reqs = num_fcxps;
569 if (num_ufbufs > 0 && num_ufbufs <= BFA_UF_MAX) 569 if (num_ufbufs > 0 && num_ufbufs <= BFA_UF_MAX)
570 bfa_cfg->fwcfg.num_uf_bufs = num_ufbufs; 570 bfa_cfg->fwcfg.num_uf_bufs = num_ufbufs;
571 if (reqq_size > 0) 571 if (reqq_size > 0)
572 bfa_cfg->drvcfg.num_reqq_elems = reqq_size; 572 bfa_cfg->drvcfg.num_reqq_elems = reqq_size;
573 if (rspq_size > 0) 573 if (rspq_size > 0)
574 bfa_cfg->drvcfg.num_rspq_elems = rspq_size; 574 bfa_cfg->drvcfg.num_rspq_elems = rspq_size;
575 if (num_sgpgs > 0 && num_sgpgs <= BFA_SGPG_MAX) 575 if (num_sgpgs > 0 && num_sgpgs <= BFA_SGPG_MAX)
576 bfa_cfg->drvcfg.num_sgpgs = num_sgpgs; 576 bfa_cfg->drvcfg.num_sgpgs = num_sgpgs;
577 577
578 /* 578 /*
579 * populate the hal values back to the driver for sysfs use. 579 * populate the hal values back to the driver for sysfs use.
580 * otherwise, the default values will be shown as 0 in sysfs 580 * otherwise, the default values will be shown as 0 in sysfs
581 */ 581 */
582 num_rports = bfa_cfg->fwcfg.num_rports; 582 num_rports = bfa_cfg->fwcfg.num_rports;
583 num_ios = bfa_cfg->fwcfg.num_ioim_reqs; 583 num_ios = bfa_cfg->fwcfg.num_ioim_reqs;
584 num_tms = bfa_cfg->fwcfg.num_tskim_reqs; 584 num_tms = bfa_cfg->fwcfg.num_tskim_reqs;
585 num_fcxps = bfa_cfg->fwcfg.num_fcxp_reqs; 585 num_fcxps = bfa_cfg->fwcfg.num_fcxp_reqs;
586 num_ufbufs = bfa_cfg->fwcfg.num_uf_bufs; 586 num_ufbufs = bfa_cfg->fwcfg.num_uf_bufs;
587 reqq_size = bfa_cfg->drvcfg.num_reqq_elems; 587 reqq_size = bfa_cfg->drvcfg.num_reqq_elems;
588 rspq_size = bfa_cfg->drvcfg.num_rspq_elems; 588 rspq_size = bfa_cfg->drvcfg.num_rspq_elems;
589 num_sgpgs = bfa_cfg->drvcfg.num_sgpgs; 589 num_sgpgs = bfa_cfg->drvcfg.num_sgpgs;
590 } 590 }
591 591
592 bfa_status_t 592 bfa_status_t
593 bfad_hal_mem_alloc(struct bfad_s *bfad) 593 bfad_hal_mem_alloc(struct bfad_s *bfad)
594 { 594 {
595 struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo; 595 struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo;
596 struct bfa_mem_dma_s *dma_info, *dma_elem; 596 struct bfa_mem_dma_s *dma_info, *dma_elem;
597 struct bfa_mem_kva_s *kva_info, *kva_elem; 597 struct bfa_mem_kva_s *kva_info, *kva_elem;
598 struct list_head *dm_qe, *km_qe; 598 struct list_head *dm_qe, *km_qe;
599 bfa_status_t rc = BFA_STATUS_OK; 599 bfa_status_t rc = BFA_STATUS_OK;
600 dma_addr_t phys_addr; 600 dma_addr_t phys_addr;
601 601
602 bfa_cfg_get_default(&bfad->ioc_cfg); 602 bfa_cfg_get_default(&bfad->ioc_cfg);
603 bfad_update_hal_cfg(&bfad->ioc_cfg); 603 bfad_update_hal_cfg(&bfad->ioc_cfg);
604 bfad->cfg_data.ioc_queue_depth = bfad->ioc_cfg.fwcfg.num_ioim_reqs; 604 bfad->cfg_data.ioc_queue_depth = bfad->ioc_cfg.fwcfg.num_ioim_reqs;
605 bfa_cfg_get_meminfo(&bfad->ioc_cfg, hal_meminfo, &bfad->bfa); 605 bfa_cfg_get_meminfo(&bfad->ioc_cfg, hal_meminfo, &bfad->bfa);
606 606
607 dma_info = &hal_meminfo->dma_info; 607 dma_info = &hal_meminfo->dma_info;
608 kva_info = &hal_meminfo->kva_info; 608 kva_info = &hal_meminfo->kva_info;
609 609
610 /* Iterate through the KVA meminfo queue */ 610 /* Iterate through the KVA meminfo queue */
611 list_for_each(km_qe, &kva_info->qe) { 611 list_for_each(km_qe, &kva_info->qe) {
612 kva_elem = (struct bfa_mem_kva_s *) km_qe; 612 kva_elem = (struct bfa_mem_kva_s *) km_qe;
613 kva_elem->kva = vmalloc(kva_elem->mem_len); 613 kva_elem->kva = vmalloc(kva_elem->mem_len);
614 if (kva_elem->kva == NULL) { 614 if (kva_elem->kva == NULL) {
615 bfad_hal_mem_release(bfad); 615 bfad_hal_mem_release(bfad);
616 rc = BFA_STATUS_ENOMEM; 616 rc = BFA_STATUS_ENOMEM;
617 goto ext; 617 goto ext;
618 } 618 }
619 memset(kva_elem->kva, 0, kva_elem->mem_len); 619 memset(kva_elem->kva, 0, kva_elem->mem_len);
620 } 620 }
621 621
622 /* Iterate through the DMA meminfo queue */ 622 /* Iterate through the DMA meminfo queue */
623 list_for_each(dm_qe, &dma_info->qe) { 623 list_for_each(dm_qe, &dma_info->qe) {
624 dma_elem = (struct bfa_mem_dma_s *) dm_qe; 624 dma_elem = (struct bfa_mem_dma_s *) dm_qe;
625 dma_elem->kva = dma_alloc_coherent(&bfad->pcidev->dev, 625 dma_elem->kva = dma_alloc_coherent(&bfad->pcidev->dev,
626 dma_elem->mem_len, 626 dma_elem->mem_len,
627 &phys_addr, GFP_KERNEL); 627 &phys_addr, GFP_KERNEL);
628 if (dma_elem->kva == NULL) { 628 if (dma_elem->kva == NULL) {
629 bfad_hal_mem_release(bfad); 629 bfad_hal_mem_release(bfad);
630 rc = BFA_STATUS_ENOMEM; 630 rc = BFA_STATUS_ENOMEM;
631 goto ext; 631 goto ext;
632 } 632 }
633 dma_elem->dma = phys_addr; 633 dma_elem->dma = phys_addr;
634 memset(dma_elem->kva, 0, dma_elem->mem_len); 634 memset(dma_elem->kva, 0, dma_elem->mem_len);
635 } 635 }
636 ext: 636 ext:
637 return rc; 637 return rc;
638 } 638 }
639 639
640 /* 640 /*
641 * Create a vport under a vf. 641 * Create a vport under a vf.
642 */ 642 */
643 bfa_status_t 643 bfa_status_t
644 bfad_vport_create(struct bfad_s *bfad, u16 vf_id, 644 bfad_vport_create(struct bfad_s *bfad, u16 vf_id,
645 struct bfa_lport_cfg_s *port_cfg, struct device *dev) 645 struct bfa_lport_cfg_s *port_cfg, struct device *dev)
646 { 646 {
647 struct bfad_vport_s *vport; 647 struct bfad_vport_s *vport;
648 int rc = BFA_STATUS_OK; 648 int rc = BFA_STATUS_OK;
649 unsigned long flags; 649 unsigned long flags;
650 struct completion fcomp; 650 struct completion fcomp;
651 651
652 vport = kzalloc(sizeof(struct bfad_vport_s), GFP_KERNEL); 652 vport = kzalloc(sizeof(struct bfad_vport_s), GFP_KERNEL);
653 if (!vport) { 653 if (!vport) {
654 rc = BFA_STATUS_ENOMEM; 654 rc = BFA_STATUS_ENOMEM;
655 goto ext; 655 goto ext;
656 } 656 }
657 657
658 vport->drv_port.bfad = bfad; 658 vport->drv_port.bfad = bfad;
659 spin_lock_irqsave(&bfad->bfad_lock, flags); 659 spin_lock_irqsave(&bfad->bfad_lock, flags);
660 rc = bfa_fcs_vport_create(&vport->fcs_vport, &bfad->bfa_fcs, vf_id, 660 rc = bfa_fcs_vport_create(&vport->fcs_vport, &bfad->bfa_fcs, vf_id,
661 port_cfg, vport); 661 port_cfg, vport);
662 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 662 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
663 663
664 if (rc != BFA_STATUS_OK) 664 if (rc != BFA_STATUS_OK)
665 goto ext_free_vport; 665 goto ext_free_vport;
666 666
667 if (port_cfg->roles & BFA_LPORT_ROLE_FCP_IM) { 667 if (port_cfg->roles & BFA_LPORT_ROLE_FCP_IM) {
668 rc = bfad_im_scsi_host_alloc(bfad, vport->drv_port.im_port, 668 rc = bfad_im_scsi_host_alloc(bfad, vport->drv_port.im_port,
669 dev); 669 dev);
670 if (rc != BFA_STATUS_OK) 670 if (rc != BFA_STATUS_OK)
671 goto ext_free_fcs_vport; 671 goto ext_free_fcs_vport;
672 } 672 }
673 673
674 spin_lock_irqsave(&bfad->bfad_lock, flags); 674 spin_lock_irqsave(&bfad->bfad_lock, flags);
675 bfa_fcs_vport_start(&vport->fcs_vport); 675 bfa_fcs_vport_start(&vport->fcs_vport);
676 list_add_tail(&vport->list_entry, &bfad->vport_list); 676 list_add_tail(&vport->list_entry, &bfad->vport_list);
677 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 677 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
678 678
679 return BFA_STATUS_OK; 679 return BFA_STATUS_OK;
680 680
681 ext_free_fcs_vport: 681 ext_free_fcs_vport:
682 spin_lock_irqsave(&bfad->bfad_lock, flags); 682 spin_lock_irqsave(&bfad->bfad_lock, flags);
683 vport->comp_del = &fcomp; 683 vport->comp_del = &fcomp;
684 init_completion(vport->comp_del); 684 init_completion(vport->comp_del);
685 bfa_fcs_vport_delete(&vport->fcs_vport); 685 bfa_fcs_vport_delete(&vport->fcs_vport);
686 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 686 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
687 wait_for_completion(vport->comp_del); 687 wait_for_completion(vport->comp_del);
688 ext_free_vport: 688 ext_free_vport:
689 kfree(vport); 689 kfree(vport);
690 ext: 690 ext:
691 return rc; 691 return rc;
692 } 692 }
693 693
694 void 694 void
695 bfad_bfa_tmo(unsigned long data) 695 bfad_bfa_tmo(unsigned long data)
696 { 696 {
697 struct bfad_s *bfad = (struct bfad_s *) data; 697 struct bfad_s *bfad = (struct bfad_s *) data;
698 unsigned long flags; 698 unsigned long flags;
699 struct list_head doneq; 699 struct list_head doneq;
700 700
701 spin_lock_irqsave(&bfad->bfad_lock, flags); 701 spin_lock_irqsave(&bfad->bfad_lock, flags);
702 702
703 bfa_timer_beat(&bfad->bfa.timer_mod); 703 bfa_timer_beat(&bfad->bfa.timer_mod);
704 704
705 bfa_comp_deq(&bfad->bfa, &doneq); 705 bfa_comp_deq(&bfad->bfa, &doneq);
706 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 706 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
707 707
708 if (!list_empty(&doneq)) { 708 if (!list_empty(&doneq)) {
709 bfa_comp_process(&bfad->bfa, &doneq); 709 bfa_comp_process(&bfad->bfa, &doneq);
710 spin_lock_irqsave(&bfad->bfad_lock, flags); 710 spin_lock_irqsave(&bfad->bfad_lock, flags);
711 bfa_comp_free(&bfad->bfa, &doneq); 711 bfa_comp_free(&bfad->bfa, &doneq);
712 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 712 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
713 } 713 }
714 714
715 mod_timer(&bfad->hal_tmo, 715 mod_timer(&bfad->hal_tmo,
716 jiffies + msecs_to_jiffies(BFA_TIMER_FREQ)); 716 jiffies + msecs_to_jiffies(BFA_TIMER_FREQ));
717 } 717 }
718 718
719 void 719 void
720 bfad_init_timer(struct bfad_s *bfad) 720 bfad_init_timer(struct bfad_s *bfad)
721 { 721 {
722 init_timer(&bfad->hal_tmo); 722 init_timer(&bfad->hal_tmo);
723 bfad->hal_tmo.function = bfad_bfa_tmo; 723 bfad->hal_tmo.function = bfad_bfa_tmo;
724 bfad->hal_tmo.data = (unsigned long)bfad; 724 bfad->hal_tmo.data = (unsigned long)bfad;
725 725
726 mod_timer(&bfad->hal_tmo, 726 mod_timer(&bfad->hal_tmo,
727 jiffies + msecs_to_jiffies(BFA_TIMER_FREQ)); 727 jiffies + msecs_to_jiffies(BFA_TIMER_FREQ));
728 } 728 }
729 729
730 int 730 int
731 bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad) 731 bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
732 { 732 {
733 int rc = -ENODEV; 733 int rc = -ENODEV;
734 734
735 if (pci_enable_device(pdev)) { 735 if (pci_enable_device(pdev)) {
736 printk(KERN_ERR "pci_enable_device fail %p\n", pdev); 736 printk(KERN_ERR "pci_enable_device fail %p\n", pdev);
737 goto out; 737 goto out;
738 } 738 }
739 739
740 if (pci_request_regions(pdev, BFAD_DRIVER_NAME)) 740 if (pci_request_regions(pdev, BFAD_DRIVER_NAME))
741 goto out_disable_device; 741 goto out_disable_device;
742 742
743 pci_set_master(pdev); 743 pci_set_master(pdev);
744 744
745 745
746 if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) || 746 if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) ||
747 (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)) { 747 (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)) {
748 if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) || 748 if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) ||
749 (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)) { 749 (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)) {
750 printk(KERN_ERR "pci_set_dma_mask fail %p\n", pdev); 750 printk(KERN_ERR "pci_set_dma_mask fail %p\n", pdev);
751 goto out_release_region; 751 goto out_release_region;
752 } 752 }
753 } 753 }
754 754
755 /* Enable PCIE Advanced Error Recovery (AER) if kernel supports */ 755 /* Enable PCIE Advanced Error Recovery (AER) if kernel supports */
756 pci_enable_pcie_error_reporting(pdev); 756 pci_enable_pcie_error_reporting(pdev);
757 757
758 bfad->pci_bar0_kva = pci_iomap(pdev, 0, pci_resource_len(pdev, 0)); 758 bfad->pci_bar0_kva = pci_iomap(pdev, 0, pci_resource_len(pdev, 0));
759 bfad->pci_bar2_kva = pci_iomap(pdev, 2, pci_resource_len(pdev, 2)); 759 bfad->pci_bar2_kva = pci_iomap(pdev, 2, pci_resource_len(pdev, 2));
760 760
761 if (bfad->pci_bar0_kva == NULL) { 761 if (bfad->pci_bar0_kva == NULL) {
762 printk(KERN_ERR "Fail to map bar0\n"); 762 printk(KERN_ERR "Fail to map bar0\n");
763 goto out_release_region; 763 goto out_release_region;
764 } 764 }
765 765
766 bfad->hal_pcidev.pci_slot = PCI_SLOT(pdev->devfn); 766 bfad->hal_pcidev.pci_slot = PCI_SLOT(pdev->devfn);
767 bfad->hal_pcidev.pci_func = PCI_FUNC(pdev->devfn); 767 bfad->hal_pcidev.pci_func = PCI_FUNC(pdev->devfn);
768 bfad->hal_pcidev.pci_bar_kva = bfad->pci_bar0_kva; 768 bfad->hal_pcidev.pci_bar_kva = bfad->pci_bar0_kva;
769 bfad->hal_pcidev.device_id = pdev->device; 769 bfad->hal_pcidev.device_id = pdev->device;
770 bfad->hal_pcidev.ssid = pdev->subsystem_device; 770 bfad->hal_pcidev.ssid = pdev->subsystem_device;
771 bfad->pci_name = pci_name(pdev); 771 bfad->pci_name = pci_name(pdev);
772 772
773 bfad->pci_attr.vendor_id = pdev->vendor; 773 bfad->pci_attr.vendor_id = pdev->vendor;
774 bfad->pci_attr.device_id = pdev->device; 774 bfad->pci_attr.device_id = pdev->device;
775 bfad->pci_attr.ssid = pdev->subsystem_device; 775 bfad->pci_attr.ssid = pdev->subsystem_device;
776 bfad->pci_attr.ssvid = pdev->subsystem_vendor; 776 bfad->pci_attr.ssvid = pdev->subsystem_vendor;
777 bfad->pci_attr.pcifn = PCI_FUNC(pdev->devfn); 777 bfad->pci_attr.pcifn = PCI_FUNC(pdev->devfn);
778 778
779 bfad->pcidev = pdev; 779 bfad->pcidev = pdev;
780 780
781 /* Adjust PCIe Maximum Read Request Size */ 781 /* Adjust PCIe Maximum Read Request Size */
782 if (pci_is_pcie(pdev) && pcie_max_read_reqsz) { 782 if (pci_is_pcie(pdev) && pcie_max_read_reqsz) {
783 if (pcie_max_read_reqsz >= 128 && 783 if (pcie_max_read_reqsz >= 128 &&
784 pcie_max_read_reqsz <= 4096 && 784 pcie_max_read_reqsz <= 4096 &&
785 is_power_of_2(pcie_max_read_reqsz)) { 785 is_power_of_2(pcie_max_read_reqsz)) {
786 int max_rq = pcie_get_readrq(pdev); 786 int max_rq = pcie_get_readrq(pdev);
787 printk(KERN_WARNING "BFA[%s]: " 787 printk(KERN_WARNING "BFA[%s]: "
788 "pcie_max_read_request_size is %d, " 788 "pcie_max_read_request_size is %d, "
789 "reset to %d\n", bfad->pci_name, max_rq, 789 "reset to %d\n", bfad->pci_name, max_rq,
790 pcie_max_read_reqsz); 790 pcie_max_read_reqsz);
791 pcie_set_readrq(pdev, pcie_max_read_reqsz); 791 pcie_set_readrq(pdev, pcie_max_read_reqsz);
792 } else { 792 } else {
793 printk(KERN_WARNING "BFA[%s]: invalid " 793 printk(KERN_WARNING "BFA[%s]: invalid "
794 "pcie_max_read_request_size %d ignored\n", 794 "pcie_max_read_request_size %d ignored\n",
795 bfad->pci_name, pcie_max_read_reqsz); 795 bfad->pci_name, pcie_max_read_reqsz);
796 } 796 }
797 } 797 }
798 798
799 pci_save_state(pdev); 799 pci_save_state(pdev);
800 800
801 return 0; 801 return 0;
802 802
803 out_release_region: 803 out_release_region:
804 pci_release_regions(pdev); 804 pci_release_regions(pdev);
805 out_disable_device: 805 out_disable_device:
806 pci_disable_device(pdev); 806 pci_disable_device(pdev);
807 out: 807 out:
808 return rc; 808 return rc;
809 } 809 }
810 810
811 void 811 void
812 bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad) 812 bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad)
813 { 813 {
814 pci_iounmap(pdev, bfad->pci_bar0_kva); 814 pci_iounmap(pdev, bfad->pci_bar0_kva);
815 pci_iounmap(pdev, bfad->pci_bar2_kva); 815 pci_iounmap(pdev, bfad->pci_bar2_kva);
816 pci_release_regions(pdev); 816 pci_release_regions(pdev);
817 /* Disable PCIE Advanced Error Recovery (AER) */ 817 /* Disable PCIE Advanced Error Recovery (AER) */
818 pci_disable_pcie_error_reporting(pdev); 818 pci_disable_pcie_error_reporting(pdev);
819 pci_disable_device(pdev); 819 pci_disable_device(pdev);
820 } 820 }
821 821
822 bfa_status_t 822 bfa_status_t
823 bfad_drv_init(struct bfad_s *bfad) 823 bfad_drv_init(struct bfad_s *bfad)
824 { 824 {
825 bfa_status_t rc; 825 bfa_status_t rc;
826 unsigned long flags; 826 unsigned long flags;
827 827
828 bfad->cfg_data.rport_del_timeout = rport_del_timeout; 828 bfad->cfg_data.rport_del_timeout = rport_del_timeout;
829 bfad->cfg_data.lun_queue_depth = bfa_lun_queue_depth; 829 bfad->cfg_data.lun_queue_depth = bfa_lun_queue_depth;
830 bfad->cfg_data.io_max_sge = bfa_io_max_sge; 830 bfad->cfg_data.io_max_sge = bfa_io_max_sge;
831 bfad->cfg_data.binding_method = FCP_PWWN_BINDING; 831 bfad->cfg_data.binding_method = FCP_PWWN_BINDING;
832 832
833 rc = bfad_hal_mem_alloc(bfad); 833 rc = bfad_hal_mem_alloc(bfad);
834 if (rc != BFA_STATUS_OK) { 834 if (rc != BFA_STATUS_OK) {
835 printk(KERN_WARNING "bfad%d bfad_hal_mem_alloc failure\n", 835 printk(KERN_WARNING "bfad%d bfad_hal_mem_alloc failure\n",
836 bfad->inst_no); 836 bfad->inst_no);
837 printk(KERN_WARNING 837 printk(KERN_WARNING
838 "Not enough memory to attach all QLogic BR-series HBA ports. System may need more memory.\n"); 838 "Not enough memory to attach all QLogic BR-series HBA ports. System may need more memory.\n");
839 return BFA_STATUS_FAILED; 839 return BFA_STATUS_FAILED;
840 } 840 }
841 841
842 bfad->bfa.trcmod = bfad->trcmod; 842 bfad->bfa.trcmod = bfad->trcmod;
843 bfad->bfa.plog = &bfad->plog_buf; 843 bfad->bfa.plog = &bfad->plog_buf;
844 bfa_plog_init(&bfad->plog_buf); 844 bfa_plog_init(&bfad->plog_buf);
845 bfa_plog_str(&bfad->plog_buf, BFA_PL_MID_DRVR, BFA_PL_EID_DRIVER_START, 845 bfa_plog_str(&bfad->plog_buf, BFA_PL_MID_DRVR, BFA_PL_EID_DRIVER_START,
846 0, "Driver Attach"); 846 0, "Driver Attach");
847 847
848 bfa_attach(&bfad->bfa, bfad, &bfad->ioc_cfg, &bfad->meminfo, 848 bfa_attach(&bfad->bfa, bfad, &bfad->ioc_cfg, &bfad->meminfo,
849 &bfad->hal_pcidev); 849 &bfad->hal_pcidev);
850 850
851 /* FCS INIT */ 851 /* FCS INIT */
852 spin_lock_irqsave(&bfad->bfad_lock, flags); 852 spin_lock_irqsave(&bfad->bfad_lock, flags);
853 bfad->bfa_fcs.trcmod = bfad->trcmod; 853 bfad->bfa_fcs.trcmod = bfad->trcmod;
854 bfa_fcs_attach(&bfad->bfa_fcs, &bfad->bfa, bfad, BFA_FALSE); 854 bfa_fcs_attach(&bfad->bfa_fcs, &bfad->bfa, bfad, BFA_FALSE);
855 bfad->bfa_fcs.fdmi_enabled = fdmi_enable; 855 bfad->bfa_fcs.fdmi_enabled = fdmi_enable;
856 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 856 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
857 857
858 bfad->bfad_flags |= BFAD_DRV_INIT_DONE; 858 bfad->bfad_flags |= BFAD_DRV_INIT_DONE;
859 859
860 return BFA_STATUS_OK; 860 return BFA_STATUS_OK;
861 } 861 }
862 862
863 void 863 void
864 bfad_drv_uninit(struct bfad_s *bfad) 864 bfad_drv_uninit(struct bfad_s *bfad)
865 { 865 {
866 unsigned long flags; 866 unsigned long flags;
867 867
868 spin_lock_irqsave(&bfad->bfad_lock, flags); 868 spin_lock_irqsave(&bfad->bfad_lock, flags);
869 init_completion(&bfad->comp); 869 init_completion(&bfad->comp);
870 bfa_iocfc_stop(&bfad->bfa); 870 bfa_iocfc_stop(&bfad->bfa);
871 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 871 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
872 wait_for_completion(&bfad->comp); 872 wait_for_completion(&bfad->comp);
873 873
874 del_timer_sync(&bfad->hal_tmo); 874 del_timer_sync(&bfad->hal_tmo);
875 bfa_isr_disable(&bfad->bfa); 875 bfa_isr_disable(&bfad->bfa);
876 bfa_detach(&bfad->bfa); 876 bfa_detach(&bfad->bfa);
877 bfad_remove_intr(bfad); 877 bfad_remove_intr(bfad);
878 bfad_hal_mem_release(bfad); 878 bfad_hal_mem_release(bfad);
879 879
880 bfad->bfad_flags &= ~BFAD_DRV_INIT_DONE; 880 bfad->bfad_flags &= ~BFAD_DRV_INIT_DONE;
881 } 881 }
882 882
883 void 883 void
884 bfad_drv_start(struct bfad_s *bfad) 884 bfad_drv_start(struct bfad_s *bfad)
885 { 885 {
886 unsigned long flags; 886 unsigned long flags;
887 887
888 spin_lock_irqsave(&bfad->bfad_lock, flags); 888 spin_lock_irqsave(&bfad->bfad_lock, flags);
889 bfa_iocfc_start(&bfad->bfa); 889 bfa_iocfc_start(&bfad->bfa);
890 bfa_fcs_pbc_vport_init(&bfad->bfa_fcs); 890 bfa_fcs_pbc_vport_init(&bfad->bfa_fcs);
891 bfa_fcs_fabric_modstart(&bfad->bfa_fcs); 891 bfa_fcs_fabric_modstart(&bfad->bfa_fcs);
892 bfad->bfad_flags |= BFAD_HAL_START_DONE; 892 bfad->bfad_flags |= BFAD_HAL_START_DONE;
893 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 893 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
894 894
895 if (bfad->im) 895 if (bfad->im)
896 flush_workqueue(bfad->im->drv_workq); 896 flush_workqueue(bfad->im->drv_workq);
897 } 897 }
898 898
899 void 899 void
900 bfad_fcs_stop(struct bfad_s *bfad) 900 bfad_fcs_stop(struct bfad_s *bfad)
901 { 901 {
902 unsigned long flags; 902 unsigned long flags;
903 903
904 spin_lock_irqsave(&bfad->bfad_lock, flags); 904 spin_lock_irqsave(&bfad->bfad_lock, flags);
905 init_completion(&bfad->comp); 905 init_completion(&bfad->comp);
906 bfad->pport.flags |= BFAD_PORT_DELETE; 906 bfad->pport.flags |= BFAD_PORT_DELETE;
907 bfa_fcs_exit(&bfad->bfa_fcs); 907 bfa_fcs_exit(&bfad->bfa_fcs);
908 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 908 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
909 wait_for_completion(&bfad->comp); 909 wait_for_completion(&bfad->comp);
910 910
911 bfa_sm_send_event(bfad, BFAD_E_FCS_EXIT_COMP); 911 bfa_sm_send_event(bfad, BFAD_E_FCS_EXIT_COMP);
912 } 912 }
913 913
914 void 914 void
915 bfad_stop(struct bfad_s *bfad) 915 bfad_stop(struct bfad_s *bfad)
916 { 916 {
917 unsigned long flags; 917 unsigned long flags;
918 918
919 spin_lock_irqsave(&bfad->bfad_lock, flags); 919 spin_lock_irqsave(&bfad->bfad_lock, flags);
920 init_completion(&bfad->comp); 920 init_completion(&bfad->comp);
921 bfa_iocfc_stop(&bfad->bfa); 921 bfa_iocfc_stop(&bfad->bfa);
922 bfad->bfad_flags &= ~BFAD_HAL_START_DONE; 922 bfad->bfad_flags &= ~BFAD_HAL_START_DONE;
923 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 923 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
924 wait_for_completion(&bfad->comp); 924 wait_for_completion(&bfad->comp);
925 925
926 bfa_sm_send_event(bfad, BFAD_E_EXIT_COMP); 926 bfa_sm_send_event(bfad, BFAD_E_EXIT_COMP);
927 } 927 }
928 928
929 bfa_status_t 929 bfa_status_t
930 bfad_cfg_pport(struct bfad_s *bfad, enum bfa_lport_role role) 930 bfad_cfg_pport(struct bfad_s *bfad, enum bfa_lport_role role)
931 { 931 {
932 int rc = BFA_STATUS_OK; 932 int rc = BFA_STATUS_OK;
933 933
934 /* Allocate scsi_host for the physical port */ 934 /* Allocate scsi_host for the physical port */
935 if ((supported_fc4s & BFA_LPORT_ROLE_FCP_IM) && 935 if ((supported_fc4s & BFA_LPORT_ROLE_FCP_IM) &&
936 (role & BFA_LPORT_ROLE_FCP_IM)) { 936 (role & BFA_LPORT_ROLE_FCP_IM)) {
937 if (bfad->pport.im_port == NULL) { 937 if (bfad->pport.im_port == NULL) {
938 rc = BFA_STATUS_FAILED; 938 rc = BFA_STATUS_FAILED;
939 goto out; 939 goto out;
940 } 940 }
941 941
942 rc = bfad_im_scsi_host_alloc(bfad, bfad->pport.im_port, 942 rc = bfad_im_scsi_host_alloc(bfad, bfad->pport.im_port,
943 &bfad->pcidev->dev); 943 &bfad->pcidev->dev);
944 if (rc != BFA_STATUS_OK) 944 if (rc != BFA_STATUS_OK)
945 goto out; 945 goto out;
946 946
947 bfad->pport.roles |= BFA_LPORT_ROLE_FCP_IM; 947 bfad->pport.roles |= BFA_LPORT_ROLE_FCP_IM;
948 } 948 }
949 949
950 bfad->bfad_flags |= BFAD_CFG_PPORT_DONE; 950 bfad->bfad_flags |= BFAD_CFG_PPORT_DONE;
951 951
952 out: 952 out:
953 return rc; 953 return rc;
954 } 954 }
955 955
956 void 956 void
957 bfad_uncfg_pport(struct bfad_s *bfad) 957 bfad_uncfg_pport(struct bfad_s *bfad)
958 { 958 {
959 if ((supported_fc4s & BFA_LPORT_ROLE_FCP_IM) && 959 if ((supported_fc4s & BFA_LPORT_ROLE_FCP_IM) &&
960 (bfad->pport.roles & BFA_LPORT_ROLE_FCP_IM)) { 960 (bfad->pport.roles & BFA_LPORT_ROLE_FCP_IM)) {
961 bfad_im_scsi_host_free(bfad, bfad->pport.im_port); 961 bfad_im_scsi_host_free(bfad, bfad->pport.im_port);
962 bfad_im_port_clean(bfad->pport.im_port); 962 bfad_im_port_clean(bfad->pport.im_port);
963 kfree(bfad->pport.im_port); 963 kfree(bfad->pport.im_port);
964 bfad->pport.roles &= ~BFA_LPORT_ROLE_FCP_IM; 964 bfad->pport.roles &= ~BFA_LPORT_ROLE_FCP_IM;
965 } 965 }
966 966
967 bfad->bfad_flags &= ~BFAD_CFG_PPORT_DONE; 967 bfad->bfad_flags &= ~BFAD_CFG_PPORT_DONE;
968 } 968 }
969 969
970 bfa_status_t 970 bfa_status_t
971 bfad_start_ops(struct bfad_s *bfad) { 971 bfad_start_ops(struct bfad_s *bfad) {
972 972
973 int retval; 973 int retval;
974 unsigned long flags; 974 unsigned long flags;
975 struct bfad_vport_s *vport, *vport_new; 975 struct bfad_vport_s *vport, *vport_new;
976 struct bfa_fcs_driver_info_s driver_info; 976 struct bfa_fcs_driver_info_s driver_info;
977 977
978 /* Limit min/max. xfer size to [64k-32MB] */ 978 /* Limit min/max. xfer size to [64k-32MB] */
979 if (max_xfer_size < BFAD_MIN_SECTORS >> 1) 979 if (max_xfer_size < BFAD_MIN_SECTORS >> 1)
980 max_xfer_size = BFAD_MIN_SECTORS >> 1; 980 max_xfer_size = BFAD_MIN_SECTORS >> 1;
981 if (max_xfer_size > BFAD_MAX_SECTORS >> 1) 981 if (max_xfer_size > BFAD_MAX_SECTORS >> 1)
982 max_xfer_size = BFAD_MAX_SECTORS >> 1; 982 max_xfer_size = BFAD_MAX_SECTORS >> 1;
983 983
984 /* Fill the driver_info info to fcs*/ 984 /* Fill the driver_info info to fcs*/
985 memset(&driver_info, 0, sizeof(driver_info)); 985 memset(&driver_info, 0, sizeof(driver_info));
986 strncpy(driver_info.version, BFAD_DRIVER_VERSION, 986 strncpy(driver_info.version, BFAD_DRIVER_VERSION,
987 sizeof(driver_info.version) - 1); 987 sizeof(driver_info.version) - 1);
988 if (host_name) 988 if (host_name)
989 strncpy(driver_info.host_machine_name, host_name, 989 strncpy(driver_info.host_machine_name, host_name,
990 sizeof(driver_info.host_machine_name) - 1); 990 sizeof(driver_info.host_machine_name) - 1);
991 if (os_name) 991 if (os_name)
992 strncpy(driver_info.host_os_name, os_name, 992 strncpy(driver_info.host_os_name, os_name,
993 sizeof(driver_info.host_os_name) - 1); 993 sizeof(driver_info.host_os_name) - 1);
994 if (os_patch) 994 if (os_patch)
995 strncpy(driver_info.host_os_patch, os_patch, 995 strncpy(driver_info.host_os_patch, os_patch,
996 sizeof(driver_info.host_os_patch) - 1); 996 sizeof(driver_info.host_os_patch) - 1);
997 997
998 strncpy(driver_info.os_device_name, bfad->pci_name, 998 strncpy(driver_info.os_device_name, bfad->pci_name,
999 sizeof(driver_info.os_device_name) - 1); 999 sizeof(driver_info.os_device_name) - 1);
1000 1000
1001 /* FCS driver info init */ 1001 /* FCS driver info init */
1002 spin_lock_irqsave(&bfad->bfad_lock, flags); 1002 spin_lock_irqsave(&bfad->bfad_lock, flags);
1003 bfa_fcs_driver_info_init(&bfad->bfa_fcs, &driver_info); 1003 bfa_fcs_driver_info_init(&bfad->bfa_fcs, &driver_info);
1004 1004
1005 if (bfad->bfad_flags & BFAD_CFG_PPORT_DONE) 1005 if (bfad->bfad_flags & BFAD_CFG_PPORT_DONE)
1006 bfa_fcs_update_cfg(&bfad->bfa_fcs); 1006 bfa_fcs_update_cfg(&bfad->bfa_fcs);
1007 else 1007 else
1008 bfa_fcs_init(&bfad->bfa_fcs); 1008 bfa_fcs_init(&bfad->bfa_fcs);
1009 1009
1010 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1010 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1011 1011
1012 if (!(bfad->bfad_flags & BFAD_CFG_PPORT_DONE)) { 1012 if (!(bfad->bfad_flags & BFAD_CFG_PPORT_DONE)) {
1013 retval = bfad_cfg_pport(bfad, BFA_LPORT_ROLE_FCP_IM); 1013 retval = bfad_cfg_pport(bfad, BFA_LPORT_ROLE_FCP_IM);
1014 if (retval != BFA_STATUS_OK) 1014 if (retval != BFA_STATUS_OK)
1015 return BFA_STATUS_FAILED; 1015 return BFA_STATUS_FAILED;
1016 } 1016 }
1017 1017
1018 /* Setup fc host fixed attribute if the lk supports */ 1018 /* Setup fc host fixed attribute if the lk supports */
1019 bfad_fc_host_init(bfad->pport.im_port); 1019 bfad_fc_host_init(bfad->pport.im_port);
1020 1020
1021 /* BFAD level FC4 IM specific resource allocation */ 1021 /* BFAD level FC4 IM specific resource allocation */
1022 retval = bfad_im_probe(bfad); 1022 retval = bfad_im_probe(bfad);
1023 if (retval != BFA_STATUS_OK) { 1023 if (retval != BFA_STATUS_OK) {
1024 printk(KERN_WARNING "bfad_im_probe failed\n"); 1024 printk(KERN_WARNING "bfad_im_probe failed\n");
1025 if (bfa_sm_cmp_state(bfad, bfad_sm_initializing)) 1025 if (bfa_sm_cmp_state(bfad, bfad_sm_initializing))
1026 bfa_sm_set_state(bfad, bfad_sm_failed); 1026 bfa_sm_set_state(bfad, bfad_sm_failed);
1027 return BFA_STATUS_FAILED; 1027 return BFA_STATUS_FAILED;
1028 } else 1028 } else
1029 bfad->bfad_flags |= BFAD_FC4_PROBE_DONE; 1029 bfad->bfad_flags |= BFAD_FC4_PROBE_DONE;
1030 1030
1031 bfad_drv_start(bfad); 1031 bfad_drv_start(bfad);
1032 1032
1033 /* Complete pbc vport create */ 1033 /* Complete pbc vport create */
1034 list_for_each_entry_safe(vport, vport_new, &bfad->pbc_vport_list, 1034 list_for_each_entry_safe(vport, vport_new, &bfad->pbc_vport_list,
1035 list_entry) { 1035 list_entry) {
1036 struct fc_vport_identifiers vid; 1036 struct fc_vport_identifiers vid;
1037 struct fc_vport *fc_vport; 1037 struct fc_vport *fc_vport;
1038 char pwwn_buf[BFA_STRING_32]; 1038 char pwwn_buf[BFA_STRING_32];
1039 1039
1040 memset(&vid, 0, sizeof(vid)); 1040 memset(&vid, 0, sizeof(vid));
1041 vid.roles = FC_PORT_ROLE_FCP_INITIATOR; 1041 vid.roles = FC_PORT_ROLE_FCP_INITIATOR;
1042 vid.vport_type = FC_PORTTYPE_NPIV; 1042 vid.vport_type = FC_PORTTYPE_NPIV;
1043 vid.disable = false; 1043 vid.disable = false;
1044 vid.node_name = wwn_to_u64((u8 *) 1044 vid.node_name = wwn_to_u64((u8 *)
1045 (&((vport->fcs_vport).lport.port_cfg.nwwn))); 1045 (&((vport->fcs_vport).lport.port_cfg.nwwn)));
1046 vid.port_name = wwn_to_u64((u8 *) 1046 vid.port_name = wwn_to_u64((u8 *)
1047 (&((vport->fcs_vport).lport.port_cfg.pwwn))); 1047 (&((vport->fcs_vport).lport.port_cfg.pwwn)));
1048 fc_vport = fc_vport_create(bfad->pport.im_port->shost, 0, &vid); 1048 fc_vport = fc_vport_create(bfad->pport.im_port->shost, 0, &vid);
1049 if (!fc_vport) { 1049 if (!fc_vport) {
1050 wwn2str(pwwn_buf, vid.port_name); 1050 wwn2str(pwwn_buf, vid.port_name);
1051 printk(KERN_WARNING "bfad%d: failed to create pbc vport" 1051 printk(KERN_WARNING "bfad%d: failed to create pbc vport"
1052 " %s\n", bfad->inst_no, pwwn_buf); 1052 " %s\n", bfad->inst_no, pwwn_buf);
1053 } 1053 }
1054 list_del(&vport->list_entry); 1054 list_del(&vport->list_entry);
1055 kfree(vport); 1055 kfree(vport);
1056 } 1056 }
1057 1057
1058 /* 1058 /*
1059 * If bfa_linkup_delay is set to -1 default; try to retrive the 1059 * If bfa_linkup_delay is set to -1 default; try to retrive the
1060 * value using the bfad_get_linkup_delay(); else use the 1060 * value using the bfad_get_linkup_delay(); else use the
1061 * passed in module param value as the bfa_linkup_delay. 1061 * passed in module param value as the bfa_linkup_delay.
1062 */ 1062 */
1063 if (bfa_linkup_delay < 0) { 1063 if (bfa_linkup_delay < 0) {
1064 bfa_linkup_delay = bfad_get_linkup_delay(bfad); 1064 bfa_linkup_delay = bfad_get_linkup_delay(bfad);
1065 bfad_rport_online_wait(bfad); 1065 bfad_rport_online_wait(bfad);
1066 bfa_linkup_delay = -1; 1066 bfa_linkup_delay = -1;
1067 } else 1067 } else
1068 bfad_rport_online_wait(bfad); 1068 bfad_rport_online_wait(bfad);
1069 1069
1070 BFA_LOG(KERN_INFO, bfad, bfa_log_level, "bfa device claimed\n"); 1070 BFA_LOG(KERN_INFO, bfad, bfa_log_level, "bfa device claimed\n");
1071 1071
1072 return BFA_STATUS_OK; 1072 return BFA_STATUS_OK;
1073 } 1073 }
1074 1074
1075 int 1075 int
1076 bfad_worker(void *ptr) 1076 bfad_worker(void *ptr)
1077 { 1077 {
1078 struct bfad_s *bfad = ptr; 1078 struct bfad_s *bfad = ptr;
1079 unsigned long flags; 1079 unsigned long flags;
1080 1080
1081 if (kthread_should_stop()) 1081 if (kthread_should_stop())
1082 return 0; 1082 return 0;
1083 1083
1084 /* Send event BFAD_E_INIT_SUCCESS */ 1084 /* Send event BFAD_E_INIT_SUCCESS */
1085 bfa_sm_send_event(bfad, BFAD_E_INIT_SUCCESS); 1085 bfa_sm_send_event(bfad, BFAD_E_INIT_SUCCESS);
1086 1086
1087 spin_lock_irqsave(&bfad->bfad_lock, flags); 1087 spin_lock_irqsave(&bfad->bfad_lock, flags);
1088 bfad->bfad_tsk = NULL; 1088 bfad->bfad_tsk = NULL;
1089 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1089 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1090 1090
1091 return 0; 1091 return 0;
1092 } 1092 }
1093 1093
1094 /* 1094 /*
1095 * BFA driver interrupt functions 1095 * BFA driver interrupt functions
1096 */ 1096 */
1097 irqreturn_t 1097 irqreturn_t
1098 bfad_intx(int irq, void *dev_id) 1098 bfad_intx(int irq, void *dev_id)
1099 { 1099 {
1100 struct bfad_s *bfad = dev_id; 1100 struct bfad_s *bfad = dev_id;
1101 struct list_head doneq; 1101 struct list_head doneq;
1102 unsigned long flags; 1102 unsigned long flags;
1103 bfa_boolean_t rc; 1103 bfa_boolean_t rc;
1104 1104
1105 spin_lock_irqsave(&bfad->bfad_lock, flags); 1105 spin_lock_irqsave(&bfad->bfad_lock, flags);
1106 rc = bfa_intx(&bfad->bfa); 1106 rc = bfa_intx(&bfad->bfa);
1107 if (!rc) { 1107 if (!rc) {
1108 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1108 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1109 return IRQ_NONE; 1109 return IRQ_NONE;
1110 } 1110 }
1111 1111
1112 bfa_comp_deq(&bfad->bfa, &doneq); 1112 bfa_comp_deq(&bfad->bfa, &doneq);
1113 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1113 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1114 1114
1115 if (!list_empty(&doneq)) { 1115 if (!list_empty(&doneq)) {
1116 bfa_comp_process(&bfad->bfa, &doneq); 1116 bfa_comp_process(&bfad->bfa, &doneq);
1117 1117
1118 spin_lock_irqsave(&bfad->bfad_lock, flags); 1118 spin_lock_irqsave(&bfad->bfad_lock, flags);
1119 bfa_comp_free(&bfad->bfa, &doneq); 1119 bfa_comp_free(&bfad->bfa, &doneq);
1120 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1120 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1121 } 1121 }
1122 1122
1123 return IRQ_HANDLED; 1123 return IRQ_HANDLED;
1124 1124
1125 } 1125 }
1126 1126
1127 static irqreturn_t 1127 static irqreturn_t
1128 bfad_msix(int irq, void *dev_id) 1128 bfad_msix(int irq, void *dev_id)
1129 { 1129 {
1130 struct bfad_msix_s *vec = dev_id; 1130 struct bfad_msix_s *vec = dev_id;
1131 struct bfad_s *bfad = vec->bfad; 1131 struct bfad_s *bfad = vec->bfad;
1132 struct list_head doneq; 1132 struct list_head doneq;
1133 unsigned long flags; 1133 unsigned long flags;
1134 1134
1135 spin_lock_irqsave(&bfad->bfad_lock, flags); 1135 spin_lock_irqsave(&bfad->bfad_lock, flags);
1136 1136
1137 bfa_msix(&bfad->bfa, vec->msix.entry); 1137 bfa_msix(&bfad->bfa, vec->msix.entry);
1138 bfa_comp_deq(&bfad->bfa, &doneq); 1138 bfa_comp_deq(&bfad->bfa, &doneq);
1139 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1139 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1140 1140
1141 if (!list_empty(&doneq)) { 1141 if (!list_empty(&doneq)) {
1142 bfa_comp_process(&bfad->bfa, &doneq); 1142 bfa_comp_process(&bfad->bfa, &doneq);
1143 1143
1144 spin_lock_irqsave(&bfad->bfad_lock, flags); 1144 spin_lock_irqsave(&bfad->bfad_lock, flags);
1145 bfa_comp_free(&bfad->bfa, &doneq); 1145 bfa_comp_free(&bfad->bfa, &doneq);
1146 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1146 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1147 } 1147 }
1148 1148
1149 return IRQ_HANDLED; 1149 return IRQ_HANDLED;
1150 } 1150 }
1151 1151
1152 /* 1152 /*
1153 * Initialize the MSIX entry table. 1153 * Initialize the MSIX entry table.
1154 */ 1154 */
1155 static void 1155 static void
1156 bfad_init_msix_entry(struct bfad_s *bfad, struct msix_entry *msix_entries, 1156 bfad_init_msix_entry(struct bfad_s *bfad, struct msix_entry *msix_entries,
1157 int mask, int max_bit) 1157 int mask, int max_bit)
1158 { 1158 {
1159 int i; 1159 int i;
1160 int match = 0x00000001; 1160 int match = 0x00000001;
1161 1161
1162 for (i = 0, bfad->nvec = 0; i < MAX_MSIX_ENTRY; i++) { 1162 for (i = 0, bfad->nvec = 0; i < MAX_MSIX_ENTRY; i++) {
1163 if (mask & match) { 1163 if (mask & match) {
1164 bfad->msix_tab[bfad->nvec].msix.entry = i; 1164 bfad->msix_tab[bfad->nvec].msix.entry = i;
1165 bfad->msix_tab[bfad->nvec].bfad = bfad; 1165 bfad->msix_tab[bfad->nvec].bfad = bfad;
1166 msix_entries[bfad->nvec].entry = i; 1166 msix_entries[bfad->nvec].entry = i;
1167 bfad->nvec++; 1167 bfad->nvec++;
1168 } 1168 }
1169 1169
1170 match <<= 1; 1170 match <<= 1;
1171 } 1171 }
1172 1172
1173 } 1173 }
1174 1174
1175 int 1175 int
1176 bfad_install_msix_handler(struct bfad_s *bfad) 1176 bfad_install_msix_handler(struct bfad_s *bfad)
1177 { 1177 {
1178 int i, error = 0; 1178 int i, error = 0;
1179 1179
1180 for (i = 0; i < bfad->nvec; i++) { 1180 for (i = 0; i < bfad->nvec; i++) {
1181 sprintf(bfad->msix_tab[i].name, "bfa-%s-%s", 1181 sprintf(bfad->msix_tab[i].name, "bfa-%s-%s",
1182 bfad->pci_name, 1182 bfad->pci_name,
1183 ((bfa_asic_id_cb(bfad->hal_pcidev.device_id)) ? 1183 ((bfa_asic_id_cb(bfad->hal_pcidev.device_id)) ?
1184 msix_name_cb[i] : msix_name_ct[i])); 1184 msix_name_cb[i] : msix_name_ct[i]));
1185 1185
1186 error = request_irq(bfad->msix_tab[i].msix.vector, 1186 error = request_irq(bfad->msix_tab[i].msix.vector,
1187 (irq_handler_t) bfad_msix, 0, 1187 (irq_handler_t) bfad_msix, 0,
1188 bfad->msix_tab[i].name, &bfad->msix_tab[i]); 1188 bfad->msix_tab[i].name, &bfad->msix_tab[i]);
1189 bfa_trc(bfad, i); 1189 bfa_trc(bfad, i);
1190 bfa_trc(bfad, bfad->msix_tab[i].msix.vector); 1190 bfa_trc(bfad, bfad->msix_tab[i].msix.vector);
1191 if (error) { 1191 if (error) {
1192 int j; 1192 int j;
1193 1193
1194 for (j = 0; j < i; j++) 1194 for (j = 0; j < i; j++)
1195 free_irq(bfad->msix_tab[j].msix.vector, 1195 free_irq(bfad->msix_tab[j].msix.vector,
1196 &bfad->msix_tab[j]); 1196 &bfad->msix_tab[j]);
1197 1197
1198 bfad->bfad_flags &= ~BFAD_MSIX_ON; 1198 bfad->bfad_flags &= ~BFAD_MSIX_ON;
1199 pci_disable_msix(bfad->pcidev); 1199 pci_disable_msix(bfad->pcidev);
1200 1200
1201 return 1; 1201 return 1;
1202 } 1202 }
1203 } 1203 }
1204 1204
1205 return 0; 1205 return 0;
1206 } 1206 }
1207 1207
1208 /* 1208 /*
1209 * Setup MSIX based interrupt. 1209 * Setup MSIX based interrupt.
1210 */ 1210 */
1211 int 1211 int
1212 bfad_setup_intr(struct bfad_s *bfad) 1212 bfad_setup_intr(struct bfad_s *bfad)
1213 { 1213 {
1214 int error; 1214 int error;
1215 u32 mask = 0, i, num_bit = 0, max_bit = 0; 1215 u32 mask = 0, i, num_bit = 0, max_bit = 0;
1216 struct msix_entry msix_entries[MAX_MSIX_ENTRY]; 1216 struct msix_entry msix_entries[MAX_MSIX_ENTRY];
1217 struct pci_dev *pdev = bfad->pcidev; 1217 struct pci_dev *pdev = bfad->pcidev;
1218 u16 reg; 1218 u16 reg;
1219 1219
1220 /* Call BFA to get the msix map for this PCI function. */ 1220 /* Call BFA to get the msix map for this PCI function. */
1221 bfa_msix_getvecs(&bfad->bfa, &mask, &num_bit, &max_bit); 1221 bfa_msix_getvecs(&bfad->bfa, &mask, &num_bit, &max_bit);
1222 1222
1223 /* Set up the msix entry table */ 1223 /* Set up the msix entry table */
1224 bfad_init_msix_entry(bfad, msix_entries, mask, max_bit); 1224 bfad_init_msix_entry(bfad, msix_entries, mask, max_bit);
1225 1225
1226 if ((bfa_asic_id_ctc(pdev->device) && !msix_disable_ct) || 1226 if ((bfa_asic_id_ctc(pdev->device) && !msix_disable_ct) ||
1227 (bfa_asic_id_cb(pdev->device) && !msix_disable_cb)) { 1227 (bfa_asic_id_cb(pdev->device) && !msix_disable_cb)) {
1228 1228
1229 error = pci_enable_msix_exact(bfad->pcidev, 1229 error = pci_enable_msix_exact(bfad->pcidev,
1230 msix_entries, bfad->nvec); 1230 msix_entries, bfad->nvec);
1231 /* In CT1 & CT2, try to allocate just one vector */ 1231 /* In CT1 & CT2, try to allocate just one vector */
1232 if (error == -ENOSPC && bfa_asic_id_ctc(pdev->device)) { 1232 if (error == -ENOSPC && bfa_asic_id_ctc(pdev->device)) {
1233 printk(KERN_WARNING "bfa %s: trying one msix " 1233 printk(KERN_WARNING "bfa %s: trying one msix "
1234 "vector failed to allocate %d[%d]\n", 1234 "vector failed to allocate %d[%d]\n",
1235 bfad->pci_name, bfad->nvec, error); 1235 bfad->pci_name, bfad->nvec, error);
1236 bfad->nvec = 1; 1236 bfad->nvec = 1;
1237 error = pci_enable_msix_exact(bfad->pcidev, 1237 error = pci_enable_msix_exact(bfad->pcidev,
1238 msix_entries, 1); 1238 msix_entries, 1);
1239 } 1239 }
1240 1240
1241 if (error) { 1241 if (error) {
1242 printk(KERN_WARNING "bfad%d: " 1242 printk(KERN_WARNING "bfad%d: "
1243 "pci_enable_msix_exact failed (%d), " 1243 "pci_enable_msix_exact failed (%d), "
1244 "use line based.\n", 1244 "use line based.\n",
1245 bfad->inst_no, error); 1245 bfad->inst_no, error);
1246 goto line_based; 1246 goto line_based;
1247 } 1247 }
1248 1248
1249 /* Disable INTX in MSI-X mode */ 1249 /* Disable INTX in MSI-X mode */
1250 pci_read_config_word(pdev, PCI_COMMAND, &reg); 1250 pci_read_config_word(pdev, PCI_COMMAND, &reg);
1251 1251
1252 if (!(reg & PCI_COMMAND_INTX_DISABLE)) 1252 if (!(reg & PCI_COMMAND_INTX_DISABLE))
1253 pci_write_config_word(pdev, PCI_COMMAND, 1253 pci_write_config_word(pdev, PCI_COMMAND,
1254 reg | PCI_COMMAND_INTX_DISABLE); 1254 reg | PCI_COMMAND_INTX_DISABLE);
1255 1255
1256 /* Save the vectors */ 1256 /* Save the vectors */
1257 for (i = 0; i < bfad->nvec; i++) { 1257 for (i = 0; i < bfad->nvec; i++) {
1258 bfa_trc(bfad, msix_entries[i].vector); 1258 bfa_trc(bfad, msix_entries[i].vector);
1259 bfad->msix_tab[i].msix.vector = msix_entries[i].vector; 1259 bfad->msix_tab[i].msix.vector = msix_entries[i].vector;
1260 } 1260 }
1261 1261
1262 bfa_msix_init(&bfad->bfa, bfad->nvec); 1262 bfa_msix_init(&bfad->bfa, bfad->nvec);
1263 1263
1264 bfad->bfad_flags |= BFAD_MSIX_ON; 1264 bfad->bfad_flags |= BFAD_MSIX_ON;
1265 1265
1266 return 0; 1266 return 0;
1267 } 1267 }
1268 1268
1269 line_based: 1269 line_based:
1270 error = request_irq(bfad->pcidev->irq, (irq_handler_t)bfad_intx, 1270 error = request_irq(bfad->pcidev->irq, (irq_handler_t)bfad_intx,
1271 BFAD_IRQ_FLAGS, BFAD_DRIVER_NAME, bfad); 1271 BFAD_IRQ_FLAGS, BFAD_DRIVER_NAME, bfad);
1272 if (error) 1272 if (error)
1273 return error; 1273 return error;
1274 1274
1275 bfad->bfad_flags |= BFAD_INTX_ON; 1275 bfad->bfad_flags |= BFAD_INTX_ON;
1276 1276
1277 return 0; 1277 return 0;
1278 } 1278 }
1279 1279
1280 void 1280 void
1281 bfad_remove_intr(struct bfad_s *bfad) 1281 bfad_remove_intr(struct bfad_s *bfad)
1282 { 1282 {
1283 int i; 1283 int i;
1284 1284
1285 if (bfad->bfad_flags & BFAD_MSIX_ON) { 1285 if (bfad->bfad_flags & BFAD_MSIX_ON) {
1286 for (i = 0; i < bfad->nvec; i++) 1286 for (i = 0; i < bfad->nvec; i++)
1287 free_irq(bfad->msix_tab[i].msix.vector, 1287 free_irq(bfad->msix_tab[i].msix.vector,
1288 &bfad->msix_tab[i]); 1288 &bfad->msix_tab[i]);
1289 1289
1290 pci_disable_msix(bfad->pcidev); 1290 pci_disable_msix(bfad->pcidev);
1291 bfad->bfad_flags &= ~BFAD_MSIX_ON; 1291 bfad->bfad_flags &= ~BFAD_MSIX_ON;
1292 } else if (bfad->bfad_flags & BFAD_INTX_ON) { 1292 } else if (bfad->bfad_flags & BFAD_INTX_ON) {
1293 free_irq(bfad->pcidev->irq, bfad); 1293 free_irq(bfad->pcidev->irq, bfad);
1294 } 1294 }
1295 } 1295 }
1296 1296
1297 /* 1297 /*
1298 * PCI probe entry. 1298 * PCI probe entry.
1299 */ 1299 */
1300 int 1300 int
1301 bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) 1301 bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
1302 { 1302 {
1303 struct bfad_s *bfad; 1303 struct bfad_s *bfad;
1304 int error = -ENODEV, retval, i; 1304 int error = -ENODEV, retval, i;
1305 1305
1306 /* For single port cards - only claim function 0 */ 1306 /* For single port cards - only claim function 0 */
1307 if ((pdev->device == BFA_PCI_DEVICE_ID_FC_8G1P) && 1307 if ((pdev->device == BFA_PCI_DEVICE_ID_FC_8G1P) &&
1308 (PCI_FUNC(pdev->devfn) != 0)) 1308 (PCI_FUNC(pdev->devfn) != 0))
1309 return -ENODEV; 1309 return -ENODEV;
1310 1310
1311 bfad = kzalloc(sizeof(struct bfad_s), GFP_KERNEL); 1311 bfad = kzalloc(sizeof(struct bfad_s), GFP_KERNEL);
1312 if (!bfad) { 1312 if (!bfad) {
1313 error = -ENOMEM; 1313 error = -ENOMEM;
1314 goto out; 1314 goto out;
1315 } 1315 }
1316 1316
1317 bfad->trcmod = kzalloc(sizeof(struct bfa_trc_mod_s), GFP_KERNEL); 1317 bfad->trcmod = kzalloc(sizeof(struct bfa_trc_mod_s), GFP_KERNEL);
1318 if (!bfad->trcmod) { 1318 if (!bfad->trcmod) {
1319 printk(KERN_WARNING "Error alloc trace buffer!\n"); 1319 printk(KERN_WARNING "Error alloc trace buffer!\n");
1320 error = -ENOMEM; 1320 error = -ENOMEM;
1321 goto out_alloc_trace_failure; 1321 goto out_alloc_trace_failure;
1322 } 1322 }
1323 1323
1324 /* TRACE INIT */ 1324 /* TRACE INIT */
1325 bfa_trc_init(bfad->trcmod); 1325 bfa_trc_init(bfad->trcmod);
1326 bfa_trc(bfad, bfad_inst); 1326 bfa_trc(bfad, bfad_inst);
1327 1327
1328 /* AEN INIT */ 1328 /* AEN INIT */
1329 INIT_LIST_HEAD(&bfad->free_aen_q); 1329 INIT_LIST_HEAD(&bfad->free_aen_q);
1330 INIT_LIST_HEAD(&bfad->active_aen_q); 1330 INIT_LIST_HEAD(&bfad->active_aen_q);
1331 for (i = 0; i < BFA_AEN_MAX_ENTRY; i++) 1331 for (i = 0; i < BFA_AEN_MAX_ENTRY; i++)
1332 list_add_tail(&bfad->aen_list[i].qe, &bfad->free_aen_q); 1332 list_add_tail(&bfad->aen_list[i].qe, &bfad->free_aen_q);
1333 1333
1334 if (!(bfad_load_fwimg(pdev))) { 1334 if (!(bfad_load_fwimg(pdev))) {
1335 kfree(bfad->trcmod); 1335 kfree(bfad->trcmod);
1336 goto out_alloc_trace_failure; 1336 goto out_alloc_trace_failure;
1337 } 1337 }
1338 1338
1339 retval = bfad_pci_init(pdev, bfad); 1339 retval = bfad_pci_init(pdev, bfad);
1340 if (retval) { 1340 if (retval) {
1341 printk(KERN_WARNING "bfad_pci_init failure!\n"); 1341 printk(KERN_WARNING "bfad_pci_init failure!\n");
1342 error = retval; 1342 error = retval;
1343 goto out_pci_init_failure; 1343 goto out_pci_init_failure;
1344 } 1344 }
1345 1345
1346 mutex_lock(&bfad_mutex); 1346 mutex_lock(&bfad_mutex);
1347 bfad->inst_no = bfad_inst++; 1347 bfad->inst_no = bfad_inst++;
1348 list_add_tail(&bfad->list_entry, &bfad_list); 1348 list_add_tail(&bfad->list_entry, &bfad_list);
1349 mutex_unlock(&bfad_mutex); 1349 mutex_unlock(&bfad_mutex);
1350 1350
1351 /* Initializing the state machine: State set to uninit */ 1351 /* Initializing the state machine: State set to uninit */
1352 bfa_sm_set_state(bfad, bfad_sm_uninit); 1352 bfa_sm_set_state(bfad, bfad_sm_uninit);
1353 1353
1354 spin_lock_init(&bfad->bfad_lock); 1354 spin_lock_init(&bfad->bfad_lock);
1355 spin_lock_init(&bfad->bfad_aen_spinlock); 1355 spin_lock_init(&bfad->bfad_aen_spinlock);
1356 1356
1357 pci_set_drvdata(pdev, bfad); 1357 pci_set_drvdata(pdev, bfad);
1358 1358
1359 bfad->ref_count = 0; 1359 bfad->ref_count = 0;
1360 bfad->pport.bfad = bfad; 1360 bfad->pport.bfad = bfad;
1361 INIT_LIST_HEAD(&bfad->pbc_vport_list); 1361 INIT_LIST_HEAD(&bfad->pbc_vport_list);
1362 INIT_LIST_HEAD(&bfad->vport_list); 1362 INIT_LIST_HEAD(&bfad->vport_list);
1363 1363
1364 /* Setup the debugfs node for this bfad */ 1364 /* Setup the debugfs node for this bfad */
1365 if (bfa_debugfs_enable) 1365 if (bfa_debugfs_enable)
1366 bfad_debugfs_init(&bfad->pport); 1366 bfad_debugfs_init(&bfad->pport);
1367 1367
1368 retval = bfad_drv_init(bfad); 1368 retval = bfad_drv_init(bfad);
1369 if (retval != BFA_STATUS_OK) 1369 if (retval != BFA_STATUS_OK)
1370 goto out_drv_init_failure; 1370 goto out_drv_init_failure;
1371 1371
1372 bfa_sm_send_event(bfad, BFAD_E_CREATE); 1372 bfa_sm_send_event(bfad, BFAD_E_CREATE);
1373 1373
1374 if (bfa_sm_cmp_state(bfad, bfad_sm_uninit)) 1374 if (bfa_sm_cmp_state(bfad, bfad_sm_uninit))
1375 goto out_bfad_sm_failure; 1375 goto out_bfad_sm_failure;
1376 1376
1377 return 0; 1377 return 0;
1378 1378
1379 out_bfad_sm_failure: 1379 out_bfad_sm_failure:
1380 bfad_hal_mem_release(bfad); 1380 bfad_hal_mem_release(bfad);
1381 out_drv_init_failure: 1381 out_drv_init_failure:
1382 /* Remove the debugfs node for this bfad */ 1382 /* Remove the debugfs node for this bfad */
1383 kfree(bfad->regdata); 1383 kfree(bfad->regdata);
1384 bfad_debugfs_exit(&bfad->pport); 1384 bfad_debugfs_exit(&bfad->pport);
1385 mutex_lock(&bfad_mutex); 1385 mutex_lock(&bfad_mutex);
1386 bfad_inst--; 1386 bfad_inst--;
1387 list_del(&bfad->list_entry); 1387 list_del(&bfad->list_entry);
1388 mutex_unlock(&bfad_mutex); 1388 mutex_unlock(&bfad_mutex);
1389 bfad_pci_uninit(pdev, bfad); 1389 bfad_pci_uninit(pdev, bfad);
1390 out_pci_init_failure: 1390 out_pci_init_failure:
1391 kfree(bfad->trcmod); 1391 kfree(bfad->trcmod);
1392 out_alloc_trace_failure: 1392 out_alloc_trace_failure:
1393 kfree(bfad); 1393 kfree(bfad);
1394 out: 1394 out:
1395 return error; 1395 return error;
1396 } 1396 }
1397 1397
1398 /* 1398 /*
1399 * PCI remove entry. 1399 * PCI remove entry.
1400 */ 1400 */
1401 void 1401 void
1402 bfad_pci_remove(struct pci_dev *pdev) 1402 bfad_pci_remove(struct pci_dev *pdev)
1403 { 1403 {
1404 struct bfad_s *bfad = pci_get_drvdata(pdev); 1404 struct bfad_s *bfad = pci_get_drvdata(pdev);
1405 unsigned long flags; 1405 unsigned long flags;
1406 1406
1407 bfa_trc(bfad, bfad->inst_no); 1407 bfa_trc(bfad, bfad->inst_no);
1408 1408
1409 spin_lock_irqsave(&bfad->bfad_lock, flags); 1409 spin_lock_irqsave(&bfad->bfad_lock, flags);
1410 if (bfad->bfad_tsk != NULL) { 1410 if (bfad->bfad_tsk != NULL) {
1411 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1411 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1412 kthread_stop(bfad->bfad_tsk); 1412 kthread_stop(bfad->bfad_tsk);
1413 } else { 1413 } else {
1414 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1414 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1415 } 1415 }
1416 1416
1417 /* Send Event BFAD_E_STOP */ 1417 /* Send Event BFAD_E_STOP */
1418 bfa_sm_send_event(bfad, BFAD_E_STOP); 1418 bfa_sm_send_event(bfad, BFAD_E_STOP);
1419 1419
1420 /* Driver detach and dealloc mem */ 1420 /* Driver detach and dealloc mem */
1421 spin_lock_irqsave(&bfad->bfad_lock, flags); 1421 spin_lock_irqsave(&bfad->bfad_lock, flags);
1422 bfa_detach(&bfad->bfa); 1422 bfa_detach(&bfad->bfa);
1423 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1423 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1424 bfad_hal_mem_release(bfad); 1424 bfad_hal_mem_release(bfad);
1425 1425
1426 /* Remove the debugfs node for this bfad */ 1426 /* Remove the debugfs node for this bfad */
1427 kfree(bfad->regdata); 1427 kfree(bfad->regdata);
1428 bfad_debugfs_exit(&bfad->pport); 1428 bfad_debugfs_exit(&bfad->pport);
1429 1429
1430 /* Cleaning the BFAD instance */ 1430 /* Cleaning the BFAD instance */
1431 mutex_lock(&bfad_mutex); 1431 mutex_lock(&bfad_mutex);
1432 bfad_inst--; 1432 bfad_inst--;
1433 list_del(&bfad->list_entry); 1433 list_del(&bfad->list_entry);
1434 mutex_unlock(&bfad_mutex); 1434 mutex_unlock(&bfad_mutex);
1435 bfad_pci_uninit(pdev, bfad); 1435 bfad_pci_uninit(pdev, bfad);
1436 1436
1437 kfree(bfad->trcmod); 1437 kfree(bfad->trcmod);
1438 kfree(bfad); 1438 kfree(bfad);
1439 } 1439 }
1440 1440
1441 /* 1441 /*
1442 * PCI Error Recovery entry, error detected. 1442 * PCI Error Recovery entry, error detected.
1443 */ 1443 */
1444 static pci_ers_result_t 1444 static pci_ers_result_t
1445 bfad_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 1445 bfad_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
1446 { 1446 {
1447 struct bfad_s *bfad = pci_get_drvdata(pdev); 1447 struct bfad_s *bfad = pci_get_drvdata(pdev);
1448 unsigned long flags; 1448 unsigned long flags;
1449 pci_ers_result_t ret = PCI_ERS_RESULT_NONE; 1449 pci_ers_result_t ret = PCI_ERS_RESULT_NONE;
1450 1450
1451 dev_printk(KERN_ERR, &pdev->dev, 1451 dev_printk(KERN_ERR, &pdev->dev,
1452 "error detected state: %d - flags: 0x%x\n", 1452 "error detected state: %d - flags: 0x%x\n",
1453 state, bfad->bfad_flags); 1453 state, bfad->bfad_flags);
1454 1454
1455 switch (state) { 1455 switch (state) {
1456 case pci_channel_io_normal: /* non-fatal error */ 1456 case pci_channel_io_normal: /* non-fatal error */
1457 spin_lock_irqsave(&bfad->bfad_lock, flags); 1457 spin_lock_irqsave(&bfad->bfad_lock, flags);
1458 bfad->bfad_flags &= ~BFAD_EEH_BUSY; 1458 bfad->bfad_flags &= ~BFAD_EEH_BUSY;
1459 /* Suspend/fail all bfa operations */ 1459 /* Suspend/fail all bfa operations */
1460 bfa_ioc_suspend(&bfad->bfa.ioc); 1460 bfa_ioc_suspend(&bfad->bfa.ioc);
1461 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1461 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1462 del_timer_sync(&bfad->hal_tmo); 1462 del_timer_sync(&bfad->hal_tmo);
1463 ret = PCI_ERS_RESULT_CAN_RECOVER; 1463 ret = PCI_ERS_RESULT_CAN_RECOVER;
1464 break; 1464 break;
1465 case pci_channel_io_frozen: /* fatal error */ 1465 case pci_channel_io_frozen: /* fatal error */
1466 init_completion(&bfad->comp); 1466 init_completion(&bfad->comp);
1467 spin_lock_irqsave(&bfad->bfad_lock, flags); 1467 spin_lock_irqsave(&bfad->bfad_lock, flags);
1468 bfad->bfad_flags |= BFAD_EEH_BUSY; 1468 bfad->bfad_flags |= BFAD_EEH_BUSY;
1469 /* Suspend/fail all bfa operations */ 1469 /* Suspend/fail all bfa operations */
1470 bfa_ioc_suspend(&bfad->bfa.ioc); 1470 bfa_ioc_suspend(&bfad->bfa.ioc);
1471 bfa_fcs_stop(&bfad->bfa_fcs); 1471 bfa_fcs_stop(&bfad->bfa_fcs);
1472 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1472 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1473 wait_for_completion(&bfad->comp); 1473 wait_for_completion(&bfad->comp);
1474 1474
1475 bfad_remove_intr(bfad); 1475 bfad_remove_intr(bfad);
1476 del_timer_sync(&bfad->hal_tmo); 1476 del_timer_sync(&bfad->hal_tmo);
1477 pci_disable_device(pdev); 1477 pci_disable_device(pdev);
1478 ret = PCI_ERS_RESULT_NEED_RESET; 1478 ret = PCI_ERS_RESULT_NEED_RESET;
1479 break; 1479 break;
1480 case pci_channel_io_perm_failure: /* PCI Card is DEAD */ 1480 case pci_channel_io_perm_failure: /* PCI Card is DEAD */
1481 spin_lock_irqsave(&bfad->bfad_lock, flags); 1481 spin_lock_irqsave(&bfad->bfad_lock, flags);
1482 bfad->bfad_flags |= BFAD_EEH_BUSY | 1482 bfad->bfad_flags |= BFAD_EEH_BUSY |
1483 BFAD_EEH_PCI_CHANNEL_IO_PERM_FAILURE; 1483 BFAD_EEH_PCI_CHANNEL_IO_PERM_FAILURE;
1484 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1484 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1485 1485
1486 /* If the error_detected handler is called with the reason 1486 /* If the error_detected handler is called with the reason
1487 * pci_channel_io_perm_failure - it will subsequently call 1487 * pci_channel_io_perm_failure - it will subsequently call
1488 * pci_remove() entry point to remove the pci device from the 1488 * pci_remove() entry point to remove the pci device from the
1489 * system - So defer the cleanup to pci_remove(); cleaning up 1489 * system - So defer the cleanup to pci_remove(); cleaning up
1490 * here causes inconsistent state during pci_remove(). 1490 * here causes inconsistent state during pci_remove().
1491 */ 1491 */
1492 ret = PCI_ERS_RESULT_DISCONNECT; 1492 ret = PCI_ERS_RESULT_DISCONNECT;
1493 break; 1493 break;
1494 default: 1494 default:
1495 WARN_ON(1); 1495 WARN_ON(1);
1496 } 1496 }
1497 1497
1498 return ret; 1498 return ret;
1499 } 1499 }
1500 1500
1501 int 1501 int
1502 restart_bfa(struct bfad_s *bfad) 1502 restart_bfa(struct bfad_s *bfad)
1503 { 1503 {
1504 unsigned long flags; 1504 unsigned long flags;
1505 struct pci_dev *pdev = bfad->pcidev; 1505 struct pci_dev *pdev = bfad->pcidev;
1506 1506
1507 bfa_attach(&bfad->bfa, bfad, &bfad->ioc_cfg, 1507 bfa_attach(&bfad->bfa, bfad, &bfad->ioc_cfg,
1508 &bfad->meminfo, &bfad->hal_pcidev); 1508 &bfad->meminfo, &bfad->hal_pcidev);
1509 1509
1510 /* Enable Interrupt and wait bfa_init completion */ 1510 /* Enable Interrupt and wait bfa_init completion */
1511 if (bfad_setup_intr(bfad)) { 1511 if (bfad_setup_intr(bfad)) {
1512 dev_printk(KERN_WARNING, &pdev->dev, 1512 dev_printk(KERN_WARNING, &pdev->dev,
1513 "%s: bfad_setup_intr failed\n", bfad->pci_name); 1513 "%s: bfad_setup_intr failed\n", bfad->pci_name);
1514 bfa_sm_send_event(bfad, BFAD_E_INIT_FAILED); 1514 bfa_sm_send_event(bfad, BFAD_E_INIT_FAILED);
1515 return -1; 1515 return -1;
1516 } 1516 }
1517 1517
1518 init_completion(&bfad->comp); 1518 init_completion(&bfad->comp);
1519 spin_lock_irqsave(&bfad->bfad_lock, flags); 1519 spin_lock_irqsave(&bfad->bfad_lock, flags);
1520 bfa_iocfc_init(&bfad->bfa); 1520 bfa_iocfc_init(&bfad->bfa);
1521 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1521 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1522 1522
1523 /* Set up interrupt handler for each vectors */ 1523 /* Set up interrupt handler for each vectors */
1524 if ((bfad->bfad_flags & BFAD_MSIX_ON) && 1524 if ((bfad->bfad_flags & BFAD_MSIX_ON) &&
1525 bfad_install_msix_handler(bfad)) 1525 bfad_install_msix_handler(bfad))
1526 dev_printk(KERN_WARNING, &pdev->dev, 1526 dev_printk(KERN_WARNING, &pdev->dev,
1527 "%s: install_msix failed.\n", bfad->pci_name); 1527 "%s: install_msix failed.\n", bfad->pci_name);
1528 1528
1529 bfad_init_timer(bfad); 1529 bfad_init_timer(bfad);
1530 wait_for_completion(&bfad->comp); 1530 wait_for_completion(&bfad->comp);
1531 bfad_drv_start(bfad); 1531 bfad_drv_start(bfad);
1532 1532
1533 return 0; 1533 return 0;
1534 } 1534 }
1535 1535
1536 /* 1536 /*
1537 * PCI Error Recovery entry, re-initialize the chip. 1537 * PCI Error Recovery entry, re-initialize the chip.
1538 */ 1538 */
1539 static pci_ers_result_t 1539 static pci_ers_result_t
1540 bfad_pci_slot_reset(struct pci_dev *pdev) 1540 bfad_pci_slot_reset(struct pci_dev *pdev)
1541 { 1541 {
1542 struct bfad_s *bfad = pci_get_drvdata(pdev); 1542 struct bfad_s *bfad = pci_get_drvdata(pdev);
1543 u8 byte; 1543 u8 byte;
1544 1544
1545 dev_printk(KERN_ERR, &pdev->dev, 1545 dev_printk(KERN_ERR, &pdev->dev,
1546 "bfad_pci_slot_reset flags: 0x%x\n", bfad->bfad_flags); 1546 "bfad_pci_slot_reset flags: 0x%x\n", bfad->bfad_flags);
1547 1547
1548 if (pci_enable_device(pdev)) { 1548 if (pci_enable_device(pdev)) {
1549 dev_printk(KERN_ERR, &pdev->dev, "Cannot re-enable " 1549 dev_printk(KERN_ERR, &pdev->dev, "Cannot re-enable "
1550 "PCI device after reset.\n"); 1550 "PCI device after reset.\n");
1551 return PCI_ERS_RESULT_DISCONNECT; 1551 return PCI_ERS_RESULT_DISCONNECT;
1552 } 1552 }
1553 1553
1554 pci_restore_state(pdev); 1554 pci_restore_state(pdev);
1555 1555
1556 /* 1556 /*
1557 * Read some byte (e.g. DMA max. payload size which can't 1557 * Read some byte (e.g. DMA max. payload size which can't
1558 * be 0xff any time) to make sure - we did not hit another PCI error 1558 * be 0xff any time) to make sure - we did not hit another PCI error
1559 * in the middle of recovery. If we did, then declare permanent failure. 1559 * in the middle of recovery. If we did, then declare permanent failure.
1560 */ 1560 */
1561 pci_read_config_byte(pdev, 0x68, &byte); 1561 pci_read_config_byte(pdev, 0x68, &byte);
1562 if (byte == 0xff) { 1562 if (byte == 0xff) {
1563 dev_printk(KERN_ERR, &pdev->dev, 1563 dev_printk(KERN_ERR, &pdev->dev,
1564 "slot_reset failed ... got another PCI error !\n"); 1564 "slot_reset failed ... got another PCI error !\n");
1565 goto out_disable_device; 1565 goto out_disable_device;
1566 } 1566 }
1567 1567
1568 pci_save_state(pdev); 1568 pci_save_state(pdev);
1569 pci_set_master(pdev); 1569 pci_set_master(pdev);
1570 1570
1571 if (pci_set_dma_mask(bfad->pcidev, DMA_BIT_MASK(64)) != 0) 1571 if (pci_set_dma_mask(bfad->pcidev, DMA_BIT_MASK(64)) != 0)
1572 if (pci_set_dma_mask(bfad->pcidev, DMA_BIT_MASK(32)) != 0) 1572 if (pci_set_dma_mask(bfad->pcidev, DMA_BIT_MASK(32)) != 0)
1573 goto out_disable_device; 1573 goto out_disable_device;
1574 1574
1575 pci_cleanup_aer_uncorrect_error_status(pdev); 1575 pci_cleanup_aer_uncorrect_error_status(pdev);
1576 1576
1577 if (restart_bfa(bfad) == -1) 1577 if (restart_bfa(bfad) == -1)
1578 goto out_disable_device; 1578 goto out_disable_device;
1579 1579
1580 pci_enable_pcie_error_reporting(pdev); 1580 pci_enable_pcie_error_reporting(pdev);
1581 dev_printk(KERN_WARNING, &pdev->dev, 1581 dev_printk(KERN_WARNING, &pdev->dev,
1582 "slot_reset completed flags: 0x%x!\n", bfad->bfad_flags); 1582 "slot_reset completed flags: 0x%x!\n", bfad->bfad_flags);
1583 1583
1584 return PCI_ERS_RESULT_RECOVERED; 1584 return PCI_ERS_RESULT_RECOVERED;
1585 1585
1586 out_disable_device: 1586 out_disable_device:
1587 pci_disable_device(pdev); 1587 pci_disable_device(pdev);
1588 return PCI_ERS_RESULT_DISCONNECT; 1588 return PCI_ERS_RESULT_DISCONNECT;
1589 } 1589 }
1590 1590
1591 static pci_ers_result_t 1591 static pci_ers_result_t
1592 bfad_pci_mmio_enabled(struct pci_dev *pdev) 1592 bfad_pci_mmio_enabled(struct pci_dev *pdev)
1593 { 1593 {
1594 unsigned long flags; 1594 unsigned long flags;
1595 struct bfad_s *bfad = pci_get_drvdata(pdev); 1595 struct bfad_s *bfad = pci_get_drvdata(pdev);
1596 1596
1597 dev_printk(KERN_INFO, &pdev->dev, "mmio_enabled\n"); 1597 dev_printk(KERN_INFO, &pdev->dev, "mmio_enabled\n");
1598 1598
1599 /* Fetch FW diagnostic information */ 1599 /* Fetch FW diagnostic information */
1600 bfa_ioc_debug_save_ftrc(&bfad->bfa.ioc); 1600 bfa_ioc_debug_save_ftrc(&bfad->bfa.ioc);
1601 1601
1602 /* Cancel all pending IOs */ 1602 /* Cancel all pending IOs */
1603 spin_lock_irqsave(&bfad->bfad_lock, flags); 1603 spin_lock_irqsave(&bfad->bfad_lock, flags);
1604 init_completion(&bfad->comp); 1604 init_completion(&bfad->comp);
1605 bfa_fcs_stop(&bfad->bfa_fcs); 1605 bfa_fcs_stop(&bfad->bfa_fcs);
1606 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1606 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1607 wait_for_completion(&bfad->comp); 1607 wait_for_completion(&bfad->comp);
1608 1608
1609 bfad_remove_intr(bfad); 1609 bfad_remove_intr(bfad);
1610 del_timer_sync(&bfad->hal_tmo); 1610 del_timer_sync(&bfad->hal_tmo);
1611 pci_disable_device(pdev); 1611 pci_disable_device(pdev);
1612 1612
1613 return PCI_ERS_RESULT_NEED_RESET; 1613 return PCI_ERS_RESULT_NEED_RESET;
1614 } 1614 }
1615 1615
1616 static void 1616 static void
1617 bfad_pci_resume(struct pci_dev *pdev) 1617 bfad_pci_resume(struct pci_dev *pdev)
1618 { 1618 {
1619 unsigned long flags; 1619 unsigned long flags;
1620 struct bfad_s *bfad = pci_get_drvdata(pdev); 1620 struct bfad_s *bfad = pci_get_drvdata(pdev);
1621 1621
1622 dev_printk(KERN_WARNING, &pdev->dev, "resume\n"); 1622 dev_printk(KERN_WARNING, &pdev->dev, "resume\n");
1623 1623
1624 /* wait until the link is online */ 1624 /* wait until the link is online */
1625 bfad_rport_online_wait(bfad); 1625 bfad_rport_online_wait(bfad);
1626 1626
1627 spin_lock_irqsave(&bfad->bfad_lock, flags); 1627 spin_lock_irqsave(&bfad->bfad_lock, flags);
1628 bfad->bfad_flags &= ~BFAD_EEH_BUSY; 1628 bfad->bfad_flags &= ~BFAD_EEH_BUSY;
1629 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1629 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1630 } 1630 }
1631 1631
1632 struct pci_device_id bfad_id_table[] = { 1632 struct pci_device_id bfad_id_table[] = {
1633 { 1633 {
1634 .vendor = BFA_PCI_VENDOR_ID_BROCADE, 1634 .vendor = BFA_PCI_VENDOR_ID_BROCADE,
1635 .device = BFA_PCI_DEVICE_ID_FC_8G2P, 1635 .device = BFA_PCI_DEVICE_ID_FC_8G2P,
1636 .subvendor = PCI_ANY_ID, 1636 .subvendor = PCI_ANY_ID,
1637 .subdevice = PCI_ANY_ID, 1637 .subdevice = PCI_ANY_ID,
1638 }, 1638 },
1639 { 1639 {
1640 .vendor = BFA_PCI_VENDOR_ID_BROCADE, 1640 .vendor = BFA_PCI_VENDOR_ID_BROCADE,
1641 .device = BFA_PCI_DEVICE_ID_FC_8G1P, 1641 .device = BFA_PCI_DEVICE_ID_FC_8G1P,
1642 .subvendor = PCI_ANY_ID, 1642 .subvendor = PCI_ANY_ID,
1643 .subdevice = PCI_ANY_ID, 1643 .subdevice = PCI_ANY_ID,
1644 }, 1644 },
1645 { 1645 {
1646 .vendor = BFA_PCI_VENDOR_ID_BROCADE, 1646 .vendor = BFA_PCI_VENDOR_ID_BROCADE,
1647 .device = BFA_PCI_DEVICE_ID_CT, 1647 .device = BFA_PCI_DEVICE_ID_CT,
1648 .subvendor = PCI_ANY_ID, 1648 .subvendor = PCI_ANY_ID,
1649 .subdevice = PCI_ANY_ID, 1649 .subdevice = PCI_ANY_ID,
1650 .class = (PCI_CLASS_SERIAL_FIBER << 8), 1650 .class = (PCI_CLASS_SERIAL_FIBER << 8),
1651 .class_mask = ~0, 1651 .class_mask = ~0,
1652 }, 1652 },
1653 { 1653 {
1654 .vendor = BFA_PCI_VENDOR_ID_BROCADE, 1654 .vendor = BFA_PCI_VENDOR_ID_BROCADE,
1655 .device = BFA_PCI_DEVICE_ID_CT_FC, 1655 .device = BFA_PCI_DEVICE_ID_CT_FC,
1656 .subvendor = PCI_ANY_ID, 1656 .subvendor = PCI_ANY_ID,
1657 .subdevice = PCI_ANY_ID, 1657 .subdevice = PCI_ANY_ID,
1658 .class = (PCI_CLASS_SERIAL_FIBER << 8), 1658 .class = (PCI_CLASS_SERIAL_FIBER << 8),
1659 .class_mask = ~0, 1659 .class_mask = ~0,
1660 }, 1660 },
1661 { 1661 {
1662 .vendor = BFA_PCI_VENDOR_ID_BROCADE, 1662 .vendor = BFA_PCI_VENDOR_ID_BROCADE,
1663 .device = BFA_PCI_DEVICE_ID_CT2, 1663 .device = BFA_PCI_DEVICE_ID_CT2,
1664 .subvendor = PCI_ANY_ID, 1664 .subvendor = PCI_ANY_ID,
1665 .subdevice = PCI_ANY_ID, 1665 .subdevice = PCI_ANY_ID,
1666 .class = (PCI_CLASS_SERIAL_FIBER << 8), 1666 .class = (PCI_CLASS_SERIAL_FIBER << 8),
1667 .class_mask = ~0, 1667 .class_mask = ~0,
1668 }, 1668 },
1669 1669
1670 { 1670 {
1671 .vendor = BFA_PCI_VENDOR_ID_BROCADE, 1671 .vendor = BFA_PCI_VENDOR_ID_BROCADE,
1672 .device = BFA_PCI_DEVICE_ID_CT2_QUAD, 1672 .device = BFA_PCI_DEVICE_ID_CT2_QUAD,
1673 .subvendor = PCI_ANY_ID, 1673 .subvendor = PCI_ANY_ID,
1674 .subdevice = PCI_ANY_ID, 1674 .subdevice = PCI_ANY_ID,
1675 .class = (PCI_CLASS_SERIAL_FIBER << 8), 1675 .class = (PCI_CLASS_SERIAL_FIBER << 8),
1676 .class_mask = ~0, 1676 .class_mask = ~0,
1677 }, 1677 },
1678 {0, 0}, 1678 {0, 0},
1679 }; 1679 };
1680 1680
1681 MODULE_DEVICE_TABLE(pci, bfad_id_table); 1681 MODULE_DEVICE_TABLE(pci, bfad_id_table);
1682 1682
1683 /* 1683 /*
1684 * PCI error recovery handlers. 1684 * PCI error recovery handlers.
1685 */ 1685 */
1686 static struct pci_error_handlers bfad_err_handler = { 1686 static struct pci_error_handlers bfad_err_handler = {
1687 .error_detected = bfad_pci_error_detected, 1687 .error_detected = bfad_pci_error_detected,
1688 .slot_reset = bfad_pci_slot_reset, 1688 .slot_reset = bfad_pci_slot_reset,
1689 .mmio_enabled = bfad_pci_mmio_enabled, 1689 .mmio_enabled = bfad_pci_mmio_enabled,
1690 .resume = bfad_pci_resume, 1690 .resume = bfad_pci_resume,
1691 }; 1691 };
1692 1692
1693 static struct pci_driver bfad_pci_driver = { 1693 static struct pci_driver bfad_pci_driver = {
1694 .name = BFAD_DRIVER_NAME, 1694 .name = BFAD_DRIVER_NAME,
1695 .id_table = bfad_id_table, 1695 .id_table = bfad_id_table,
1696 .probe = bfad_pci_probe, 1696 .probe = bfad_pci_probe,
1697 .remove = bfad_pci_remove, 1697 .remove = bfad_pci_remove,
1698 .err_handler = &bfad_err_handler, 1698 .err_handler = &bfad_err_handler,
1699 }; 1699 };
1700 1700
1701 /* 1701 /*
1702 * Driver module init. 1702 * Driver module init.
1703 */ 1703 */
1704 static int __init 1704 static int __init
1705 bfad_init(void) 1705 bfad_init(void)
1706 { 1706 {
1707 int error = 0; 1707 int error = 0;
1708 1708
1709 pr_info("QLogic BR-series BFA FC/FCOE SCSI driver - version: %s\n", 1709 pr_info("QLogic BR-series BFA FC/FCOE SCSI driver - version: %s\n",
1710 BFAD_DRIVER_VERSION); 1710 BFAD_DRIVER_VERSION);
1711 1711
1712 if (num_sgpgs > 0) 1712 if (num_sgpgs > 0)
1713 num_sgpgs_parm = num_sgpgs; 1713 num_sgpgs_parm = num_sgpgs;
1714 1714
1715 error = bfad_im_module_init(); 1715 error = bfad_im_module_init();
1716 if (error) { 1716 if (error) {
1717 error = -ENOMEM; 1717 error = -ENOMEM;
1718 printk(KERN_WARNING "bfad_im_module_init failure\n"); 1718 printk(KERN_WARNING "bfad_im_module_init failure\n");
1719 goto ext; 1719 goto ext;
1720 } 1720 }
1721 1721
1722 if (strcmp(FCPI_NAME, " fcpim") == 0) 1722 if (strcmp(FCPI_NAME, " fcpim") == 0)
1723 supported_fc4s |= BFA_LPORT_ROLE_FCP_IM; 1723 supported_fc4s |= BFA_LPORT_ROLE_FCP_IM;
1724 1724
1725 bfa_auto_recover = ioc_auto_recover; 1725 bfa_auto_recover = ioc_auto_recover;
1726 bfa_fcs_rport_set_del_timeout(rport_del_timeout); 1726 bfa_fcs_rport_set_del_timeout(rport_del_timeout);
1727 bfa_fcs_rport_set_max_logins(max_rport_logins); 1727 bfa_fcs_rport_set_max_logins(max_rport_logins);
1728 1728
1729 error = pci_register_driver(&bfad_pci_driver); 1729 error = pci_register_driver(&bfad_pci_driver);
1730 if (error) { 1730 if (error) {
1731 printk(KERN_WARNING "pci_register_driver failure\n"); 1731 printk(KERN_WARNING "pci_register_driver failure\n");
1732 goto ext; 1732 goto ext;
1733 } 1733 }
1734 1734
1735 return 0; 1735 return 0;
1736 1736
1737 ext: 1737 ext:
1738 bfad_im_module_exit(); 1738 bfad_im_module_exit();
1739 return error; 1739 return error;
1740 } 1740 }
1741 1741
1742 /* 1742 /*
1743 * Driver module exit. 1743 * Driver module exit.
1744 */ 1744 */
1745 static void __exit 1745 static void __exit
1746 bfad_exit(void) 1746 bfad_exit(void)
1747 { 1747 {
1748 pci_unregister_driver(&bfad_pci_driver); 1748 pci_unregister_driver(&bfad_pci_driver);
1749 bfad_im_module_exit(); 1749 bfad_im_module_exit();
1750 bfad_free_fwimg(); 1750 bfad_free_fwimg();
1751 } 1751 }
1752 1752
1753 /* Firmware handling */ 1753 /* Firmware handling */
1754 static void 1754 static void
1755 bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image, 1755 bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
1756 u32 *bfi_image_size, char *fw_name) 1756 u32 *bfi_image_size, char *fw_name)
1757 { 1757 {
1758 const struct firmware *fw; 1758 const struct firmware *fw;
1759 1759
1760 if (request_firmware(&fw, fw_name, &pdev->dev)) { 1760 if (request_firmware(&fw, fw_name, &pdev->dev)) {
1761 printk(KERN_ALERT "Can't locate firmware %s\n", fw_name); 1761 printk(KERN_ALERT "Can't locate firmware %s\n", fw_name);
1762 *bfi_image = NULL; 1762 *bfi_image = NULL;
1763 goto out; 1763 goto out;
1764 } 1764 }
1765 1765
1766 *bfi_image = vmalloc(fw->size); 1766 *bfi_image = vmalloc(fw->size);
1767 if (NULL == *bfi_image) { 1767 if (NULL == *bfi_image) {
1768 printk(KERN_ALERT "Fail to allocate buffer for fw image " 1768 printk(KERN_ALERT "Fail to allocate buffer for fw image "
1769 "size=%x!\n", (u32) fw->size); 1769 "size=%x!\n", (u32) fw->size);
1770 goto out; 1770 goto out;
1771 } 1771 }
1772 1772
1773 memcpy(*bfi_image, fw->data, fw->size); 1773 memcpy(*bfi_image, fw->data, fw->size);
1774 *bfi_image_size = fw->size/sizeof(u32); 1774 *bfi_image_size = fw->size/sizeof(u32);
1775 out: 1775 out:
1776 release_firmware(fw); 1776 release_firmware(fw);
1777 } 1777 }
1778 1778
1779 static u32 * 1779 static u32 *
1780 bfad_load_fwimg(struct pci_dev *pdev) 1780 bfad_load_fwimg(struct pci_dev *pdev)
1781 { 1781 {
1782 if (bfa_asic_id_ct2(pdev->device)) { 1782 if (bfa_asic_id_ct2(pdev->device)) {
1783 if (bfi_image_ct2_size == 0) 1783 if (bfi_image_ct2_size == 0)
1784 bfad_read_firmware(pdev, &bfi_image_ct2, 1784 bfad_read_firmware(pdev, &bfi_image_ct2,
1785 &bfi_image_ct2_size, BFAD_FW_FILE_CT2); 1785 &bfi_image_ct2_size, BFAD_FW_FILE_CT2);
1786 return bfi_image_ct2; 1786 return bfi_image_ct2;
1787 } else if (bfa_asic_id_ct(pdev->device)) { 1787 } else if (bfa_asic_id_ct(pdev->device)) {
1788 if (bfi_image_ct_size == 0) 1788 if (bfi_image_ct_size == 0)
1789 bfad_read_firmware(pdev, &bfi_image_ct, 1789 bfad_read_firmware(pdev, &bfi_image_ct,
1790 &bfi_image_ct_size, BFAD_FW_FILE_CT); 1790 &bfi_image_ct_size, BFAD_FW_FILE_CT);
1791 return bfi_image_ct; 1791 return bfi_image_ct;
1792 } else if (bfa_asic_id_cb(pdev->device)) { 1792 } else if (bfa_asic_id_cb(pdev->device)) {
1793 if (bfi_image_cb_size == 0) 1793 if (bfi_image_cb_size == 0)
1794 bfad_read_firmware(pdev, &bfi_image_cb, 1794 bfad_read_firmware(pdev, &bfi_image_cb,
1795 &bfi_image_cb_size, BFAD_FW_FILE_CB); 1795 &bfi_image_cb_size, BFAD_FW_FILE_CB);
1796 return bfi_image_cb; 1796 return bfi_image_cb;
1797 } 1797 }
1798 1798
1799 return NULL; 1799 return NULL;
1800 } 1800 }
1801 1801
1802 static void 1802 static void
1803 bfad_free_fwimg(void) 1803 bfad_free_fwimg(void)
1804 { 1804 {
1805 if (bfi_image_ct2_size && bfi_image_ct2) 1805 if (bfi_image_ct2_size && bfi_image_ct2)
1806 vfree(bfi_image_ct2); 1806 vfree(bfi_image_ct2);
1807 if (bfi_image_ct_size && bfi_image_ct) 1807 if (bfi_image_ct_size && bfi_image_ct)
1808 vfree(bfi_image_ct); 1808 vfree(bfi_image_ct);
1809 if (bfi_image_cb_size && bfi_image_cb) 1809 if (bfi_image_cb_size && bfi_image_cb)
1810 vfree(bfi_image_cb); 1810 vfree(bfi_image_cb);
1811 } 1811 }
1812 1812
1813 module_init(bfad_init); 1813 module_init(bfad_init);
1814 module_exit(bfad_exit); 1814 module_exit(bfad_exit);
1815 MODULE_LICENSE("GPL"); 1815 MODULE_LICENSE("GPL");
1816 MODULE_DESCRIPTION("QLogic BR-series Fibre Channel HBA Driver" BFAD_PROTO_NAME); 1816 MODULE_DESCRIPTION("QLogic BR-series Fibre Channel HBA Driver" BFAD_PROTO_NAME);
1817 MODULE_AUTHOR("QLogic Corporation"); 1817 MODULE_AUTHOR("QLogic Corporation");
1818 MODULE_VERSION(BFAD_DRIVER_VERSION); 1818 MODULE_VERSION(BFAD_DRIVER_VERSION);
1819 1819
drivers/scsi/bfa/bfad_drv.h
1 /* 1 /*
2 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. 2 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
3 * Copyright (c) 2014- QLogic Corporation. 3 * Copyright (c) 2014- QLogic Corporation.
4 * All rights reserved 4 * All rights reserved
5 * www.qlogic.com 5 * www.qlogic.com
6 * 6 *
7 * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. 7 * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
8 * 8 *
9 * This program is free software; you can redistribute it and/or modify it 9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License (GPL) Version 2 as 10 * under the terms of the GNU General Public License (GPL) Version 2 as
11 * published by the Free Software Foundation 11 * published by the Free Software Foundation
12 * 12 *
13 * This program is distributed in the hope that it will be useful, but 13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of 14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details. 16 * General Public License for more details.
17 */ 17 */
18 18
19 /* 19 /*
20 * Contains base driver definitions. 20 * Contains base driver definitions.
21 */ 21 */
22 22
23 /* 23 /*
24 * bfa_drv.h Linux driver data structures. 24 * bfa_drv.h Linux driver data structures.
25 */ 25 */
26 26
27 #ifndef __BFAD_DRV_H__ 27 #ifndef __BFAD_DRV_H__
28 #define __BFAD_DRV_H__ 28 #define __BFAD_DRV_H__
29 29
30 #include <linux/types.h> 30 #include <linux/types.h>
31 #include <linux/pci.h> 31 #include <linux/pci.h>
32 #include <linux/dma-mapping.h> 32 #include <linux/dma-mapping.h>
33 #include <linux/idr.h> 33 #include <linux/idr.h>
34 #include <linux/interrupt.h> 34 #include <linux/interrupt.h>
35 #include <linux/cdev.h> 35 #include <linux/cdev.h>
36 #include <linux/fs.h> 36 #include <linux/fs.h>
37 #include <linux/delay.h> 37 #include <linux/delay.h>
38 #include <linux/vmalloc.h> 38 #include <linux/vmalloc.h>
39 #include <linux/workqueue.h> 39 #include <linux/workqueue.h>
40 #include <linux/bitops.h> 40 #include <linux/bitops.h>
41 #include <linux/aer.h> 41 #include <linux/aer.h>
42 #include <scsi/scsi.h> 42 #include <scsi/scsi.h>
43 #include <scsi/scsi_host.h> 43 #include <scsi/scsi_host.h>
44 #include <scsi/scsi_tcq.h> 44 #include <scsi/scsi_tcq.h>
45 #include <scsi/scsi_transport_fc.h> 45 #include <scsi/scsi_transport_fc.h>
46 #include <scsi/scsi_transport.h> 46 #include <scsi/scsi_transport.h>
47 #include <scsi/scsi_bsg_fc.h> 47 #include <scsi/scsi_bsg_fc.h>
48 #include <scsi/scsi_devinfo.h> 48 #include <scsi/scsi_devinfo.h>
49 49
50 #include "bfa_modules.h" 50 #include "bfa_modules.h"
51 #include "bfa_fcs.h" 51 #include "bfa_fcs.h"
52 #include "bfa_defs_fcs.h" 52 #include "bfa_defs_fcs.h"
53 53
54 #include "bfa_plog.h" 54 #include "bfa_plog.h"
55 #include "bfa_cs.h" 55 #include "bfa_cs.h"
56 56
57 #define BFAD_DRIVER_NAME "bfa" 57 #define BFAD_DRIVER_NAME "bfa"
58 #ifdef BFA_DRIVER_VERSION 58 #ifdef BFA_DRIVER_VERSION
59 #define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION 59 #define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION
60 #else 60 #else
61 #define BFAD_DRIVER_VERSION "3.2.25.0" 61 #define BFAD_DRIVER_VERSION "3.2.25.1"
62 #endif 62 #endif
63 63
64 #define BFAD_PROTO_NAME FCPI_NAME 64 #define BFAD_PROTO_NAME FCPI_NAME
65 #define BFAD_IRQ_FLAGS IRQF_SHARED 65 #define BFAD_IRQ_FLAGS IRQF_SHARED
66 66
67 #ifndef FC_PORTSPEED_8GBIT 67 #ifndef FC_PORTSPEED_8GBIT
68 #define FC_PORTSPEED_8GBIT 0x10 68 #define FC_PORTSPEED_8GBIT 0x10
69 #endif 69 #endif
70 70
71 /* 71 /*
72 * BFAD flags 72 * BFAD flags
73 */ 73 */
74 #define BFAD_MSIX_ON 0x00000001 74 #define BFAD_MSIX_ON 0x00000001
75 #define BFAD_HAL_INIT_DONE 0x00000002 75 #define BFAD_HAL_INIT_DONE 0x00000002
76 #define BFAD_DRV_INIT_DONE 0x00000004 76 #define BFAD_DRV_INIT_DONE 0x00000004
77 #define BFAD_CFG_PPORT_DONE 0x00000008 77 #define BFAD_CFG_PPORT_DONE 0x00000008
78 #define BFAD_HAL_START_DONE 0x00000010 78 #define BFAD_HAL_START_DONE 0x00000010
79 #define BFAD_PORT_ONLINE 0x00000020 79 #define BFAD_PORT_ONLINE 0x00000020
80 #define BFAD_RPORT_ONLINE 0x00000040 80 #define BFAD_RPORT_ONLINE 0x00000040
81 #define BFAD_FCS_INIT_DONE 0x00000080 81 #define BFAD_FCS_INIT_DONE 0x00000080
82 #define BFAD_HAL_INIT_FAIL 0x00000100 82 #define BFAD_HAL_INIT_FAIL 0x00000100
83 #define BFAD_FC4_PROBE_DONE 0x00000200 83 #define BFAD_FC4_PROBE_DONE 0x00000200
84 #define BFAD_PORT_DELETE 0x00000001 84 #define BFAD_PORT_DELETE 0x00000001
85 #define BFAD_INTX_ON 0x00000400 85 #define BFAD_INTX_ON 0x00000400
86 #define BFAD_EEH_BUSY 0x00000800 86 #define BFAD_EEH_BUSY 0x00000800
87 #define BFAD_EEH_PCI_CHANNEL_IO_PERM_FAILURE 0x00001000 87 #define BFAD_EEH_PCI_CHANNEL_IO_PERM_FAILURE 0x00001000
88 /* 88 /*
89 * BFAD related definition 89 * BFAD related definition
90 */ 90 */
91 #define SCSI_SCAN_DELAY HZ 91 #define SCSI_SCAN_DELAY HZ
92 #define BFAD_STOP_TIMEOUT 30 92 #define BFAD_STOP_TIMEOUT 30
93 #define BFAD_SUSPEND_TIMEOUT BFAD_STOP_TIMEOUT 93 #define BFAD_SUSPEND_TIMEOUT BFAD_STOP_TIMEOUT
94 94
95 /* 95 /*
96 * BFAD configuration parameter default values 96 * BFAD configuration parameter default values
97 */ 97 */
98 #define BFAD_LUN_QUEUE_DEPTH 32 98 #define BFAD_LUN_QUEUE_DEPTH 32
99 #define BFAD_IO_MAX_SGE SG_ALL 99 #define BFAD_IO_MAX_SGE SG_ALL
100 #define BFAD_MIN_SECTORS 128 /* 64k */ 100 #define BFAD_MIN_SECTORS 128 /* 64k */
101 #define BFAD_MAX_SECTORS 0xFFFF /* 32 MB */ 101 #define BFAD_MAX_SECTORS 0xFFFF /* 32 MB */
102 102
103 #define bfad_isr_t irq_handler_t 103 #define bfad_isr_t irq_handler_t
104 104
105 #define MAX_MSIX_ENTRY 22 105 #define MAX_MSIX_ENTRY 22
106 106
107 struct bfad_msix_s { 107 struct bfad_msix_s {
108 struct bfad_s *bfad; 108 struct bfad_s *bfad;
109 struct msix_entry msix; 109 struct msix_entry msix;
110 char name[32]; 110 char name[32];
111 }; 111 };
112 112
113 /* 113 /*
114 * Only append to the enums defined here to avoid any versioning 114 * Only append to the enums defined here to avoid any versioning
115 * needed between trace utility and driver version 115 * needed between trace utility and driver version
116 */ 116 */
117 enum { 117 enum {
118 BFA_TRC_LDRV_BFAD = 1, 118 BFA_TRC_LDRV_BFAD = 1,
119 BFA_TRC_LDRV_IM = 2, 119 BFA_TRC_LDRV_IM = 2,
120 BFA_TRC_LDRV_BSG = 3, 120 BFA_TRC_LDRV_BSG = 3,
121 }; 121 };
122 122
123 enum bfad_port_pvb_type { 123 enum bfad_port_pvb_type {
124 BFAD_PORT_PHYS_BASE = 0, 124 BFAD_PORT_PHYS_BASE = 0,
125 BFAD_PORT_PHYS_VPORT = 1, 125 BFAD_PORT_PHYS_VPORT = 1,
126 BFAD_PORT_VF_BASE = 2, 126 BFAD_PORT_VF_BASE = 2,
127 BFAD_PORT_VF_VPORT = 3, 127 BFAD_PORT_VF_VPORT = 3,
128 }; 128 };
129 129
130 /* 130 /*
131 * PORT data structure 131 * PORT data structure
132 */ 132 */
133 struct bfad_port_s { 133 struct bfad_port_s {
134 struct list_head list_entry; 134 struct list_head list_entry;
135 struct bfad_s *bfad; 135 struct bfad_s *bfad;
136 struct bfa_fcs_lport_s *fcs_port; 136 struct bfa_fcs_lport_s *fcs_port;
137 u32 roles; 137 u32 roles;
138 s32 flags; 138 s32 flags;
139 u32 supported_fc4s; 139 u32 supported_fc4s;
140 enum bfad_port_pvb_type pvb_type; 140 enum bfad_port_pvb_type pvb_type;
141 struct bfad_im_port_s *im_port; /* IM specific data */ 141 struct bfad_im_port_s *im_port; /* IM specific data */
142 /* port debugfs specific data */ 142 /* port debugfs specific data */
143 struct dentry *port_debugfs_root; 143 struct dentry *port_debugfs_root;
144 }; 144 };
145 145
146 /* 146 /*
147 * VPORT data structure 147 * VPORT data structure
148 */ 148 */
149 struct bfad_vport_s { 149 struct bfad_vport_s {
150 struct bfad_port_s drv_port; 150 struct bfad_port_s drv_port;
151 struct bfa_fcs_vport_s fcs_vport; 151 struct bfa_fcs_vport_s fcs_vport;
152 struct completion *comp_del; 152 struct completion *comp_del;
153 struct list_head list_entry; 153 struct list_head list_entry;
154 }; 154 };
155 155
156 /* 156 /*
157 * VF data structure 157 * VF data structure
158 */ 158 */
159 struct bfad_vf_s { 159 struct bfad_vf_s {
160 bfa_fcs_vf_t fcs_vf; 160 bfa_fcs_vf_t fcs_vf;
161 struct bfad_port_s base_port; /* base port for vf */ 161 struct bfad_port_s base_port; /* base port for vf */
162 struct bfad_s *bfad; 162 struct bfad_s *bfad;
163 }; 163 };
164 164
165 struct bfad_cfg_param_s { 165 struct bfad_cfg_param_s {
166 u32 rport_del_timeout; 166 u32 rport_del_timeout;
167 u32 ioc_queue_depth; 167 u32 ioc_queue_depth;
168 u32 lun_queue_depth; 168 u32 lun_queue_depth;
169 u32 io_max_sge; 169 u32 io_max_sge;
170 u32 binding_method; 170 u32 binding_method;
171 }; 171 };
172 172
173 union bfad_tmp_buf { 173 union bfad_tmp_buf {
174 /* From struct bfa_adapter_attr_s */ 174 /* From struct bfa_adapter_attr_s */
175 char manufacturer[BFA_ADAPTER_MFG_NAME_LEN]; 175 char manufacturer[BFA_ADAPTER_MFG_NAME_LEN];
176 char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN]; 176 char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
177 char model[BFA_ADAPTER_MODEL_NAME_LEN]; 177 char model[BFA_ADAPTER_MODEL_NAME_LEN];
178 char fw_ver[BFA_VERSION_LEN]; 178 char fw_ver[BFA_VERSION_LEN];
179 char optrom_ver[BFA_VERSION_LEN]; 179 char optrom_ver[BFA_VERSION_LEN];
180 180
181 /* From struct bfa_ioc_pci_attr_s */ 181 /* From struct bfa_ioc_pci_attr_s */
182 u8 chip_rev[BFA_IOC_CHIP_REV_LEN]; /* chip revision */ 182 u8 chip_rev[BFA_IOC_CHIP_REV_LEN]; /* chip revision */
183 183
184 wwn_t wwn[BFA_FCS_MAX_LPORTS]; 184 wwn_t wwn[BFA_FCS_MAX_LPORTS];
185 }; 185 };
186 186
187 /* 187 /*
188 * BFAD (PCI function) data structure 188 * BFAD (PCI function) data structure
189 */ 189 */
190 struct bfad_s { 190 struct bfad_s {
191 bfa_sm_t sm; /* state machine */ 191 bfa_sm_t sm; /* state machine */
192 struct list_head list_entry; 192 struct list_head list_entry;
193 struct bfa_s bfa; 193 struct bfa_s bfa;
194 struct bfa_fcs_s bfa_fcs; 194 struct bfa_fcs_s bfa_fcs;
195 struct pci_dev *pcidev; 195 struct pci_dev *pcidev;
196 const char *pci_name; 196 const char *pci_name;
197 struct bfa_pcidev_s hal_pcidev; 197 struct bfa_pcidev_s hal_pcidev;
198 struct bfa_ioc_pci_attr_s pci_attr; 198 struct bfa_ioc_pci_attr_s pci_attr;
199 void __iomem *pci_bar0_kva; 199 void __iomem *pci_bar0_kva;
200 void __iomem *pci_bar2_kva; 200 void __iomem *pci_bar2_kva;
201 struct completion comp; 201 struct completion comp;
202 struct completion suspend; 202 struct completion suspend;
203 struct completion enable_comp; 203 struct completion enable_comp;
204 struct completion disable_comp; 204 struct completion disable_comp;
205 bfa_boolean_t disable_active; 205 bfa_boolean_t disable_active;
206 struct bfad_port_s pport; /* physical port of the BFAD */ 206 struct bfad_port_s pport; /* physical port of the BFAD */
207 struct bfa_meminfo_s meminfo; 207 struct bfa_meminfo_s meminfo;
208 struct bfa_iocfc_cfg_s ioc_cfg; 208 struct bfa_iocfc_cfg_s ioc_cfg;
209 u32 inst_no; /* BFAD instance number */ 209 u32 inst_no; /* BFAD instance number */
210 u32 bfad_flags; 210 u32 bfad_flags;
211 spinlock_t bfad_lock; 211 spinlock_t bfad_lock;
212 struct task_struct *bfad_tsk; 212 struct task_struct *bfad_tsk;
213 struct bfad_cfg_param_s cfg_data; 213 struct bfad_cfg_param_s cfg_data;
214 struct bfad_msix_s msix_tab[MAX_MSIX_ENTRY]; 214 struct bfad_msix_s msix_tab[MAX_MSIX_ENTRY];
215 int nvec; 215 int nvec;
216 char adapter_name[BFA_ADAPTER_SYM_NAME_LEN]; 216 char adapter_name[BFA_ADAPTER_SYM_NAME_LEN];
217 char port_name[BFA_ADAPTER_SYM_NAME_LEN]; 217 char port_name[BFA_ADAPTER_SYM_NAME_LEN];
218 struct timer_list hal_tmo; 218 struct timer_list hal_tmo;
219 unsigned long hs_start; 219 unsigned long hs_start;
220 struct bfad_im_s *im; /* IM specific data */ 220 struct bfad_im_s *im; /* IM specific data */
221 struct bfa_trc_mod_s *trcmod; 221 struct bfa_trc_mod_s *trcmod;
222 struct bfa_plog_s plog_buf; 222 struct bfa_plog_s plog_buf;
223 int ref_count; 223 int ref_count;
224 union bfad_tmp_buf tmp_buf; 224 union bfad_tmp_buf tmp_buf;
225 struct fc_host_statistics link_stats; 225 struct fc_host_statistics link_stats;
226 struct list_head pbc_vport_list; 226 struct list_head pbc_vport_list;
227 /* debugfs specific data */ 227 /* debugfs specific data */
228 char *regdata; 228 char *regdata;
229 u32 reglen; 229 u32 reglen;
230 struct dentry *bfad_dentry_files[5]; 230 struct dentry *bfad_dentry_files[5];
231 struct list_head free_aen_q; 231 struct list_head free_aen_q;
232 struct list_head active_aen_q; 232 struct list_head active_aen_q;
233 struct bfa_aen_entry_s aen_list[BFA_AEN_MAX_ENTRY]; 233 struct bfa_aen_entry_s aen_list[BFA_AEN_MAX_ENTRY];
234 spinlock_t bfad_aen_spinlock; 234 spinlock_t bfad_aen_spinlock;
235 struct list_head vport_list; 235 struct list_head vport_list;
236 }; 236 };
237 237
238 /* BFAD state machine events */ 238 /* BFAD state machine events */
239 enum bfad_sm_event { 239 enum bfad_sm_event {
240 BFAD_E_CREATE = 1, 240 BFAD_E_CREATE = 1,
241 BFAD_E_KTHREAD_CREATE_FAILED = 2, 241 BFAD_E_KTHREAD_CREATE_FAILED = 2,
242 BFAD_E_INIT = 3, 242 BFAD_E_INIT = 3,
243 BFAD_E_INIT_SUCCESS = 4, 243 BFAD_E_INIT_SUCCESS = 4,
244 BFAD_E_HAL_INIT_FAILED = 5, 244 BFAD_E_HAL_INIT_FAILED = 5,
245 BFAD_E_INIT_FAILED = 6, 245 BFAD_E_INIT_FAILED = 6,
246 BFAD_E_FCS_EXIT_COMP = 7, 246 BFAD_E_FCS_EXIT_COMP = 7,
247 BFAD_E_EXIT_COMP = 8, 247 BFAD_E_EXIT_COMP = 8,
248 BFAD_E_STOP = 9 248 BFAD_E_STOP = 9
249 }; 249 };
250 250
251 /* 251 /*
252 * RPORT data structure 252 * RPORT data structure
253 */ 253 */
254 struct bfad_rport_s { 254 struct bfad_rport_s {
255 struct bfa_fcs_rport_s fcs_rport; 255 struct bfa_fcs_rport_s fcs_rport;
256 }; 256 };
257 257
258 struct bfad_buf_info { 258 struct bfad_buf_info {
259 void *virt; 259 void *virt;
260 dma_addr_t phys; 260 dma_addr_t phys;
261 u32 size; 261 u32 size;
262 }; 262 };
263 263
264 struct bfad_fcxp { 264 struct bfad_fcxp {
265 struct bfad_port_s *port; 265 struct bfad_port_s *port;
266 struct bfa_rport_s *bfa_rport; 266 struct bfa_rport_s *bfa_rport;
267 bfa_status_t req_status; 267 bfa_status_t req_status;
268 u16 tag; 268 u16 tag;
269 u16 rsp_len; 269 u16 rsp_len;
270 u16 rsp_maxlen; 270 u16 rsp_maxlen;
271 u8 use_ireqbuf; 271 u8 use_ireqbuf;
272 u8 use_irspbuf; 272 u8 use_irspbuf;
273 u32 num_req_sgles; 273 u32 num_req_sgles;
274 u32 num_rsp_sgles; 274 u32 num_rsp_sgles;
275 struct fchs_s fchs; 275 struct fchs_s fchs;
276 void *reqbuf_info; 276 void *reqbuf_info;
277 void *rspbuf_info; 277 void *rspbuf_info;
278 struct bfa_sge_s *req_sge; 278 struct bfa_sge_s *req_sge;
279 struct bfa_sge_s *rsp_sge; 279 struct bfa_sge_s *rsp_sge;
280 fcxp_send_cb_t send_cbfn; 280 fcxp_send_cb_t send_cbfn;
281 void *send_cbarg; 281 void *send_cbarg;
282 void *bfa_fcxp; 282 void *bfa_fcxp;
283 struct completion comp; 283 struct completion comp;
284 }; 284 };
285 285
286 struct bfad_hal_comp { 286 struct bfad_hal_comp {
287 bfa_status_t status; 287 bfa_status_t status;
288 struct completion comp; 288 struct completion comp;
289 }; 289 };
290 290
291 #define BFA_LOG(level, bfad, mask, fmt, arg...) \ 291 #define BFA_LOG(level, bfad, mask, fmt, arg...) \
292 do { \ 292 do { \
293 if (((mask) == 4) || (level[1] <= '4')) \ 293 if (((mask) == 4) || (level[1] <= '4')) \
294 dev_printk(level, &((bfad)->pcidev)->dev, fmt, ##arg); \ 294 dev_printk(level, &((bfad)->pcidev)->dev, fmt, ##arg); \
295 } while (0) 295 } while (0)
296 296
297 bfa_status_t bfad_vport_create(struct bfad_s *bfad, u16 vf_id, 297 bfa_status_t bfad_vport_create(struct bfad_s *bfad, u16 vf_id,
298 struct bfa_lport_cfg_s *port_cfg, 298 struct bfa_lport_cfg_s *port_cfg,
299 struct device *dev); 299 struct device *dev);
300 bfa_status_t bfad_vf_create(struct bfad_s *bfad, u16 vf_id, 300 bfa_status_t bfad_vf_create(struct bfad_s *bfad, u16 vf_id,
301 struct bfa_lport_cfg_s *port_cfg); 301 struct bfa_lport_cfg_s *port_cfg);
302 bfa_status_t bfad_cfg_pport(struct bfad_s *bfad, enum bfa_lport_role role); 302 bfa_status_t bfad_cfg_pport(struct bfad_s *bfad, enum bfa_lport_role role);
303 bfa_status_t bfad_drv_init(struct bfad_s *bfad); 303 bfa_status_t bfad_drv_init(struct bfad_s *bfad);
304 bfa_status_t bfad_start_ops(struct bfad_s *bfad); 304 bfa_status_t bfad_start_ops(struct bfad_s *bfad);
305 void bfad_drv_start(struct bfad_s *bfad); 305 void bfad_drv_start(struct bfad_s *bfad);
306 void bfad_uncfg_pport(struct bfad_s *bfad); 306 void bfad_uncfg_pport(struct bfad_s *bfad);
307 void bfad_stop(struct bfad_s *bfad); 307 void bfad_stop(struct bfad_s *bfad);
308 void bfad_fcs_stop(struct bfad_s *bfad); 308 void bfad_fcs_stop(struct bfad_s *bfad);
309 void bfad_remove_intr(struct bfad_s *bfad); 309 void bfad_remove_intr(struct bfad_s *bfad);
310 void bfad_hal_mem_release(struct bfad_s *bfad); 310 void bfad_hal_mem_release(struct bfad_s *bfad);
311 void bfad_hcb_comp(void *arg, bfa_status_t status); 311 void bfad_hcb_comp(void *arg, bfa_status_t status);
312 312
313 int bfad_setup_intr(struct bfad_s *bfad); 313 int bfad_setup_intr(struct bfad_s *bfad);
314 void bfad_remove_intr(struct bfad_s *bfad); 314 void bfad_remove_intr(struct bfad_s *bfad);
315 void bfad_update_hal_cfg(struct bfa_iocfc_cfg_s *bfa_cfg); 315 void bfad_update_hal_cfg(struct bfa_iocfc_cfg_s *bfa_cfg);
316 bfa_status_t bfad_hal_mem_alloc(struct bfad_s *bfad); 316 bfa_status_t bfad_hal_mem_alloc(struct bfad_s *bfad);
317 void bfad_bfa_tmo(unsigned long data); 317 void bfad_bfa_tmo(unsigned long data);
318 void bfad_init_timer(struct bfad_s *bfad); 318 void bfad_init_timer(struct bfad_s *bfad);
319 int bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad); 319 int bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad);
320 void bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad); 320 void bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad);
321 void bfad_drv_uninit(struct bfad_s *bfad); 321 void bfad_drv_uninit(struct bfad_s *bfad);
322 int bfad_worker(void *ptr); 322 int bfad_worker(void *ptr);
323 void bfad_debugfs_init(struct bfad_port_s *port); 323 void bfad_debugfs_init(struct bfad_port_s *port);
324 void bfad_debugfs_exit(struct bfad_port_s *port); 324 void bfad_debugfs_exit(struct bfad_port_s *port);
325 325
326 void bfad_pci_remove(struct pci_dev *pdev); 326 void bfad_pci_remove(struct pci_dev *pdev);
327 int bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid); 327 int bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid);
328 void bfad_rport_online_wait(struct bfad_s *bfad); 328 void bfad_rport_online_wait(struct bfad_s *bfad);
329 int bfad_get_linkup_delay(struct bfad_s *bfad); 329 int bfad_get_linkup_delay(struct bfad_s *bfad);
330 int bfad_install_msix_handler(struct bfad_s *bfad); 330 int bfad_install_msix_handler(struct bfad_s *bfad);
331 331
332 extern struct idr bfad_im_port_index; 332 extern struct idr bfad_im_port_index;
333 extern struct pci_device_id bfad_id_table[]; 333 extern struct pci_device_id bfad_id_table[];
334 extern struct list_head bfad_list; 334 extern struct list_head bfad_list;
335 extern char *os_name; 335 extern char *os_name;
336 extern char *os_patch; 336 extern char *os_patch;
337 extern char *host_name; 337 extern char *host_name;
338 extern int num_rports; 338 extern int num_rports;
339 extern int num_ios; 339 extern int num_ios;
340 extern int num_tms; 340 extern int num_tms;
341 extern int num_fcxps; 341 extern int num_fcxps;
342 extern int num_ufbufs; 342 extern int num_ufbufs;
343 extern int reqq_size; 343 extern int reqq_size;
344 extern int rspq_size; 344 extern int rspq_size;
345 extern int num_sgpgs; 345 extern int num_sgpgs;
346 extern int rport_del_timeout; 346 extern int rport_del_timeout;
347 extern int bfa_lun_queue_depth; 347 extern int bfa_lun_queue_depth;
348 extern int bfa_io_max_sge; 348 extern int bfa_io_max_sge;
349 extern int bfa_log_level; 349 extern int bfa_log_level;
350 extern int ioc_auto_recover; 350 extern int ioc_auto_recover;
351 extern int bfa_linkup_delay; 351 extern int bfa_linkup_delay;
352 extern int msix_disable_cb; 352 extern int msix_disable_cb;
353 extern int msix_disable_ct; 353 extern int msix_disable_ct;
354 extern int fdmi_enable; 354 extern int fdmi_enable;
355 extern int supported_fc4s; 355 extern int supported_fc4s;
356 extern int pcie_max_read_reqsz; 356 extern int pcie_max_read_reqsz;
357 extern int max_xfer_size; 357 extern int max_xfer_size;
358 extern int bfa_debugfs_enable; 358 extern int bfa_debugfs_enable;
359 extern struct mutex bfad_mutex; 359 extern struct mutex bfad_mutex;
360 360
361 #endif /* __BFAD_DRV_H__ */ 361 #endif /* __BFAD_DRV_H__ */
362 362