Commit 2f2f40a45845e52fbbe07bcd3e09ccff44feb01b

Authored by Jing Huang
Committed by James Bottomley
1 parent d4b671c58e

[SCSI] bfa: remove inactive functions

This patch removes some inactive functions and macros.

Signed-off-by: Jing Huang <huangj@brocade.com>
Signed-off-by: James Bottomley <James.Bottomley@suse.de>

Showing 7 changed files with 0 additions and 74 deletions Inline Diff

drivers/scsi/bfa/Makefile
1 obj-$(CONFIG_SCSI_BFA_FC) := bfa.o 1 obj-$(CONFIG_SCSI_BFA_FC) := bfa.o
2 2
3 bfa-y := bfad.o bfad_im.o bfad_attr.o bfad_debugfs.o 3 bfa-y := bfad.o bfad_im.o bfad_attr.o bfad_debugfs.o
4 bfa-y += bfa_ioc.o bfa_ioc_cb.o bfa_ioc_ct.o bfa_hw_cb.o bfa_hw_ct.o 4 bfa-y += bfa_ioc.o bfa_ioc_cb.o bfa_ioc_ct.o bfa_hw_cb.o bfa_hw_ct.o
5 bfa-y += bfa_fcs.o bfa_fcs_lport.o bfa_fcs_rport.o bfa_fcs_fcpim.o bfa_fcbuild.o 5 bfa-y += bfa_fcs.o bfa_fcs_lport.o bfa_fcs_rport.o bfa_fcs_fcpim.o bfa_fcbuild.o
6 bfa-y += bfa_port.o bfa_fcpim.o bfa_core.o bfa_svc.o 6 bfa-y += bfa_port.o bfa_fcpim.o bfa_core.o bfa_svc.o
7
8 ccflags-y := -DBFA_PERF_BUILD
9 7
drivers/scsi/bfa/bfa_core.c
1 /* 1 /*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved 3 * All rights reserved
4 * www.brocade.com 4 * www.brocade.com
5 * 5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter. 6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify it 8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as 9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation 10 * published by the Free Software Foundation
11 * 11 *
12 * This program is distributed in the hope that it will be useful, but 12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of 13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17 17
18 #include "bfad_drv.h" 18 #include "bfad_drv.h"
19 #include "bfa_modules.h" 19 #include "bfa_modules.h"
20 #include "bfi_ctreg.h" 20 #include "bfi_ctreg.h"
21 21
22 BFA_TRC_FILE(HAL, CORE); 22 BFA_TRC_FILE(HAL, CORE);
23 23
24 /* 24 /*
25 * BFA module list terminated by NULL 25 * BFA module list terminated by NULL
26 */ 26 */
27 static struct bfa_module_s *hal_mods[] = { 27 static struct bfa_module_s *hal_mods[] = {
28 &hal_mod_sgpg, 28 &hal_mod_sgpg,
29 &hal_mod_fcport, 29 &hal_mod_fcport,
30 &hal_mod_fcxp, 30 &hal_mod_fcxp,
31 &hal_mod_lps, 31 &hal_mod_lps,
32 &hal_mod_uf, 32 &hal_mod_uf,
33 &hal_mod_rport, 33 &hal_mod_rport,
34 &hal_mod_fcpim, 34 &hal_mod_fcpim,
35 NULL 35 NULL
36 }; 36 };
37 37
38 /* 38 /*
39 * Message handlers for various modules. 39 * Message handlers for various modules.
40 */ 40 */
41 static bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = { 41 static bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = {
42 bfa_isr_unhandled, /* NONE */ 42 bfa_isr_unhandled, /* NONE */
43 bfa_isr_unhandled, /* BFI_MC_IOC */ 43 bfa_isr_unhandled, /* BFI_MC_IOC */
44 bfa_isr_unhandled, /* BFI_MC_DIAG */ 44 bfa_isr_unhandled, /* BFI_MC_DIAG */
45 bfa_isr_unhandled, /* BFI_MC_FLASH */ 45 bfa_isr_unhandled, /* BFI_MC_FLASH */
46 bfa_isr_unhandled, /* BFI_MC_CEE */ 46 bfa_isr_unhandled, /* BFI_MC_CEE */
47 bfa_fcport_isr, /* BFI_MC_FCPORT */ 47 bfa_fcport_isr, /* BFI_MC_FCPORT */
48 bfa_isr_unhandled, /* BFI_MC_IOCFC */ 48 bfa_isr_unhandled, /* BFI_MC_IOCFC */
49 bfa_isr_unhandled, /* BFI_MC_LL */ 49 bfa_isr_unhandled, /* BFI_MC_LL */
50 bfa_uf_isr, /* BFI_MC_UF */ 50 bfa_uf_isr, /* BFI_MC_UF */
51 bfa_fcxp_isr, /* BFI_MC_FCXP */ 51 bfa_fcxp_isr, /* BFI_MC_FCXP */
52 bfa_lps_isr, /* BFI_MC_LPS */ 52 bfa_lps_isr, /* BFI_MC_LPS */
53 bfa_rport_isr, /* BFI_MC_RPORT */ 53 bfa_rport_isr, /* BFI_MC_RPORT */
54 bfa_itnim_isr, /* BFI_MC_ITNIM */ 54 bfa_itnim_isr, /* BFI_MC_ITNIM */
55 bfa_isr_unhandled, /* BFI_MC_IOIM_READ */ 55 bfa_isr_unhandled, /* BFI_MC_IOIM_READ */
56 bfa_isr_unhandled, /* BFI_MC_IOIM_WRITE */ 56 bfa_isr_unhandled, /* BFI_MC_IOIM_WRITE */
57 bfa_isr_unhandled, /* BFI_MC_IOIM_IO */ 57 bfa_isr_unhandled, /* BFI_MC_IOIM_IO */
58 bfa_ioim_isr, /* BFI_MC_IOIM */ 58 bfa_ioim_isr, /* BFI_MC_IOIM */
59 bfa_ioim_good_comp_isr, /* BFI_MC_IOIM_IOCOM */ 59 bfa_ioim_good_comp_isr, /* BFI_MC_IOIM_IOCOM */
60 bfa_tskim_isr, /* BFI_MC_TSKIM */ 60 bfa_tskim_isr, /* BFI_MC_TSKIM */
61 bfa_isr_unhandled, /* BFI_MC_SBOOT */ 61 bfa_isr_unhandled, /* BFI_MC_SBOOT */
62 bfa_isr_unhandled, /* BFI_MC_IPFC */ 62 bfa_isr_unhandled, /* BFI_MC_IPFC */
63 bfa_isr_unhandled, /* BFI_MC_PORT */ 63 bfa_isr_unhandled, /* BFI_MC_PORT */
64 bfa_isr_unhandled, /* --------- */ 64 bfa_isr_unhandled, /* --------- */
65 bfa_isr_unhandled, /* --------- */ 65 bfa_isr_unhandled, /* --------- */
66 bfa_isr_unhandled, /* --------- */ 66 bfa_isr_unhandled, /* --------- */
67 bfa_isr_unhandled, /* --------- */ 67 bfa_isr_unhandled, /* --------- */
68 bfa_isr_unhandled, /* --------- */ 68 bfa_isr_unhandled, /* --------- */
69 bfa_isr_unhandled, /* --------- */ 69 bfa_isr_unhandled, /* --------- */
70 bfa_isr_unhandled, /* --------- */ 70 bfa_isr_unhandled, /* --------- */
71 bfa_isr_unhandled, /* --------- */ 71 bfa_isr_unhandled, /* --------- */
72 bfa_isr_unhandled, /* --------- */ 72 bfa_isr_unhandled, /* --------- */
73 bfa_isr_unhandled, /* --------- */ 73 bfa_isr_unhandled, /* --------- */
74 }; 74 };
75 /* 75 /*
76 * Message handlers for mailbox command classes 76 * Message handlers for mailbox command classes
77 */ 77 */
78 static bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[BFI_MC_MAX] = { 78 static bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[BFI_MC_MAX] = {
79 NULL, 79 NULL,
80 NULL, /* BFI_MC_IOC */ 80 NULL, /* BFI_MC_IOC */
81 NULL, /* BFI_MC_DIAG */ 81 NULL, /* BFI_MC_DIAG */
82 NULL, /* BFI_MC_FLASH */ 82 NULL, /* BFI_MC_FLASH */
83 NULL, /* BFI_MC_CEE */ 83 NULL, /* BFI_MC_CEE */
84 NULL, /* BFI_MC_PORT */ 84 NULL, /* BFI_MC_PORT */
85 bfa_iocfc_isr, /* BFI_MC_IOCFC */ 85 bfa_iocfc_isr, /* BFI_MC_IOCFC */
86 NULL, 86 NULL,
87 }; 87 };
88 88
89 89
90 90
91 static void 91 static void
92 bfa_com_port_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi) 92 bfa_com_port_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi)
93 { 93 {
94 struct bfa_port_s *port = &bfa->modules.port; 94 struct bfa_port_s *port = &bfa->modules.port;
95 u32 dm_len; 95 u32 dm_len;
96 u8 *dm_kva; 96 u8 *dm_kva;
97 u64 dm_pa; 97 u64 dm_pa;
98 98
99 dm_len = bfa_port_meminfo(); 99 dm_len = bfa_port_meminfo();
100 dm_kva = bfa_meminfo_dma_virt(mi); 100 dm_kva = bfa_meminfo_dma_virt(mi);
101 dm_pa = bfa_meminfo_dma_phys(mi); 101 dm_pa = bfa_meminfo_dma_phys(mi);
102 102
103 memset(port, 0, sizeof(struct bfa_port_s)); 103 memset(port, 0, sizeof(struct bfa_port_s));
104 bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod); 104 bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod);
105 bfa_port_mem_claim(port, dm_kva, dm_pa); 105 bfa_port_mem_claim(port, dm_kva, dm_pa);
106 106
107 bfa_meminfo_dma_virt(mi) = dm_kva + dm_len; 107 bfa_meminfo_dma_virt(mi) = dm_kva + dm_len;
108 bfa_meminfo_dma_phys(mi) = dm_pa + dm_len; 108 bfa_meminfo_dma_phys(mi) = dm_pa + dm_len;
109 } 109 }
110 110
111 /* 111 /*
112 * BFA IOC FC related definitions 112 * BFA IOC FC related definitions
113 */ 113 */
114 114
115 /* 115 /*
116 * IOC local definitions 116 * IOC local definitions
117 */ 117 */
118 #define BFA_IOCFC_TOV 5000 /* msecs */ 118 #define BFA_IOCFC_TOV 5000 /* msecs */
119 119
120 enum { 120 enum {
121 BFA_IOCFC_ACT_NONE = 0, 121 BFA_IOCFC_ACT_NONE = 0,
122 BFA_IOCFC_ACT_INIT = 1, 122 BFA_IOCFC_ACT_INIT = 1,
123 BFA_IOCFC_ACT_STOP = 2, 123 BFA_IOCFC_ACT_STOP = 2,
124 BFA_IOCFC_ACT_DISABLE = 3, 124 BFA_IOCFC_ACT_DISABLE = 3,
125 }; 125 };
126 126
127 #define DEF_CFG_NUM_FABRICS 1 127 #define DEF_CFG_NUM_FABRICS 1
128 #define DEF_CFG_NUM_LPORTS 256 128 #define DEF_CFG_NUM_LPORTS 256
129 #define DEF_CFG_NUM_CQS 4 129 #define DEF_CFG_NUM_CQS 4
130 #define DEF_CFG_NUM_IOIM_REQS (BFA_IOIM_MAX) 130 #define DEF_CFG_NUM_IOIM_REQS (BFA_IOIM_MAX)
131 #define DEF_CFG_NUM_TSKIM_REQS 128 131 #define DEF_CFG_NUM_TSKIM_REQS 128
132 #define DEF_CFG_NUM_FCXP_REQS 64 132 #define DEF_CFG_NUM_FCXP_REQS 64
133 #define DEF_CFG_NUM_UF_BUFS 64 133 #define DEF_CFG_NUM_UF_BUFS 64
134 #define DEF_CFG_NUM_RPORTS 1024 134 #define DEF_CFG_NUM_RPORTS 1024
135 #define DEF_CFG_NUM_ITNIMS (DEF_CFG_NUM_RPORTS) 135 #define DEF_CFG_NUM_ITNIMS (DEF_CFG_NUM_RPORTS)
136 #define DEF_CFG_NUM_TINS 256 136 #define DEF_CFG_NUM_TINS 256
137 137
138 #define DEF_CFG_NUM_SGPGS 2048 138 #define DEF_CFG_NUM_SGPGS 2048
139 #define DEF_CFG_NUM_REQQ_ELEMS 256 139 #define DEF_CFG_NUM_REQQ_ELEMS 256
140 #define DEF_CFG_NUM_RSPQ_ELEMS 64 140 #define DEF_CFG_NUM_RSPQ_ELEMS 64
141 #define DEF_CFG_NUM_SBOOT_TGTS 16 141 #define DEF_CFG_NUM_SBOOT_TGTS 16
142 #define DEF_CFG_NUM_SBOOT_LUNS 16 142 #define DEF_CFG_NUM_SBOOT_LUNS 16
143 143
144 /* 144 /*
145 * forward declaration for IOC FC functions 145 * forward declaration for IOC FC functions
146 */ 146 */
147 static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status); 147 static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status);
148 static void bfa_iocfc_disable_cbfn(void *bfa_arg); 148 static void bfa_iocfc_disable_cbfn(void *bfa_arg);
149 static void bfa_iocfc_hbfail_cbfn(void *bfa_arg); 149 static void bfa_iocfc_hbfail_cbfn(void *bfa_arg);
150 static void bfa_iocfc_reset_cbfn(void *bfa_arg); 150 static void bfa_iocfc_reset_cbfn(void *bfa_arg);
151 static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn; 151 static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn;
152 152
153 /* 153 /*
154 * BFA Interrupt handling functions 154 * BFA Interrupt handling functions
155 */ 155 */
156 static void 156 static void
157 bfa_reqq_resume(struct bfa_s *bfa, int qid) 157 bfa_reqq_resume(struct bfa_s *bfa, int qid)
158 { 158 {
159 struct list_head *waitq, *qe, *qen; 159 struct list_head *waitq, *qe, *qen;
160 struct bfa_reqq_wait_s *wqe; 160 struct bfa_reqq_wait_s *wqe;
161 161
162 waitq = bfa_reqq(bfa, qid); 162 waitq = bfa_reqq(bfa, qid);
163 list_for_each_safe(qe, qen, waitq) { 163 list_for_each_safe(qe, qen, waitq) {
164 /* 164 /*
165 * Callback only as long as there is room in request queue 165 * Callback only as long as there is room in request queue
166 */ 166 */
167 if (bfa_reqq_full(bfa, qid)) 167 if (bfa_reqq_full(bfa, qid))
168 break; 168 break;
169 169
170 list_del(qe); 170 list_del(qe);
171 wqe = (struct bfa_reqq_wait_s *) qe; 171 wqe = (struct bfa_reqq_wait_s *) qe;
172 wqe->qresume(wqe->cbarg); 172 wqe->qresume(wqe->cbarg);
173 } 173 }
174 } 174 }
175 175
176 void 176 void
177 bfa_msix_all(struct bfa_s *bfa, int vec) 177 bfa_msix_all(struct bfa_s *bfa, int vec)
178 { 178 {
179 bfa_intx(bfa); 179 bfa_intx(bfa);
180 } 180 }
181 181
182 bfa_boolean_t 182 bfa_boolean_t
183 bfa_intx(struct bfa_s *bfa) 183 bfa_intx(struct bfa_s *bfa)
184 { 184 {
185 u32 intr, qintr; 185 u32 intr, qintr;
186 int queue; 186 int queue;
187 187
188 intr = readl(bfa->iocfc.bfa_regs.intr_status); 188 intr = readl(bfa->iocfc.bfa_regs.intr_status);
189 if (!intr) 189 if (!intr)
190 return BFA_FALSE; 190 return BFA_FALSE;
191 191
192 /* 192 /*
193 * RME completion queue interrupt 193 * RME completion queue interrupt
194 */ 194 */
195 qintr = intr & __HFN_INT_RME_MASK; 195 qintr = intr & __HFN_INT_RME_MASK;
196 writel(qintr, bfa->iocfc.bfa_regs.intr_status); 196 writel(qintr, bfa->iocfc.bfa_regs.intr_status);
197 197
198 for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) { 198 for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
199 if (intr & (__HFN_INT_RME_Q0 << queue)) 199 if (intr & (__HFN_INT_RME_Q0 << queue))
200 bfa_msix_rspq(bfa, queue & (BFI_IOC_MAX_CQS - 1)); 200 bfa_msix_rspq(bfa, queue & (BFI_IOC_MAX_CQS - 1));
201 } 201 }
202 intr &= ~qintr; 202 intr &= ~qintr;
203 if (!intr) 203 if (!intr)
204 return BFA_TRUE; 204 return BFA_TRUE;
205 205
206 /* 206 /*
207 * CPE completion queue interrupt 207 * CPE completion queue interrupt
208 */ 208 */
209 qintr = intr & __HFN_INT_CPE_MASK; 209 qintr = intr & __HFN_INT_CPE_MASK;
210 writel(qintr, bfa->iocfc.bfa_regs.intr_status); 210 writel(qintr, bfa->iocfc.bfa_regs.intr_status);
211 211
212 for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) { 212 for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
213 if (intr & (__HFN_INT_CPE_Q0 << queue)) 213 if (intr & (__HFN_INT_CPE_Q0 << queue))
214 bfa_msix_reqq(bfa, queue & (BFI_IOC_MAX_CQS - 1)); 214 bfa_msix_reqq(bfa, queue & (BFI_IOC_MAX_CQS - 1));
215 } 215 }
216 intr &= ~qintr; 216 intr &= ~qintr;
217 if (!intr) 217 if (!intr)
218 return BFA_TRUE; 218 return BFA_TRUE;
219 219
220 bfa_msix_lpu_err(bfa, intr); 220 bfa_msix_lpu_err(bfa, intr);
221 221
222 return BFA_TRUE; 222 return BFA_TRUE;
223 } 223 }
224 224
225 void 225 void
226 bfa_isr_enable(struct bfa_s *bfa) 226 bfa_isr_enable(struct bfa_s *bfa)
227 { 227 {
228 u32 intr_unmask; 228 u32 intr_unmask;
229 int pci_func = bfa_ioc_pcifn(&bfa->ioc); 229 int pci_func = bfa_ioc_pcifn(&bfa->ioc);
230 230
231 bfa_trc(bfa, pci_func); 231 bfa_trc(bfa, pci_func);
232 232
233 bfa_msix_install(bfa); 233 bfa_msix_install(bfa);
234 intr_unmask = (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 | 234 intr_unmask = (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
235 __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS | 235 __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS |
236 __HFN_INT_LL_HALT); 236 __HFN_INT_LL_HALT);
237 237
238 if (pci_func == 0) 238 if (pci_func == 0)
239 intr_unmask |= (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 | 239 intr_unmask |= (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 |
240 __HFN_INT_CPE_Q2 | __HFN_INT_CPE_Q3 | 240 __HFN_INT_CPE_Q2 | __HFN_INT_CPE_Q3 |
241 __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 | 241 __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 |
242 __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 | 242 __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 |
243 __HFN_INT_MBOX_LPU0); 243 __HFN_INT_MBOX_LPU0);
244 else 244 else
245 intr_unmask |= (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 | 245 intr_unmask |= (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 |
246 __HFN_INT_CPE_Q6 | __HFN_INT_CPE_Q7 | 246 __HFN_INT_CPE_Q6 | __HFN_INT_CPE_Q7 |
247 __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 | 247 __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 |
248 __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 | 248 __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 |
249 __HFN_INT_MBOX_LPU1); 249 __HFN_INT_MBOX_LPU1);
250 250
251 writel(intr_unmask, bfa->iocfc.bfa_regs.intr_status); 251 writel(intr_unmask, bfa->iocfc.bfa_regs.intr_status);
252 writel(~intr_unmask, bfa->iocfc.bfa_regs.intr_mask); 252 writel(~intr_unmask, bfa->iocfc.bfa_regs.intr_mask);
253 bfa->iocfc.intr_mask = ~intr_unmask; 253 bfa->iocfc.intr_mask = ~intr_unmask;
254 bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0); 254 bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0);
255 } 255 }
256 256
257 void 257 void
258 bfa_isr_disable(struct bfa_s *bfa) 258 bfa_isr_disable(struct bfa_s *bfa)
259 { 259 {
260 bfa_isr_mode_set(bfa, BFA_FALSE); 260 bfa_isr_mode_set(bfa, BFA_FALSE);
261 writel(-1L, bfa->iocfc.bfa_regs.intr_mask); 261 writel(-1L, bfa->iocfc.bfa_regs.intr_mask);
262 bfa_msix_uninstall(bfa); 262 bfa_msix_uninstall(bfa);
263 } 263 }
264 264
265 void 265 void
266 bfa_msix_reqq(struct bfa_s *bfa, int qid) 266 bfa_msix_reqq(struct bfa_s *bfa, int qid)
267 { 267 {
268 struct list_head *waitq; 268 struct list_head *waitq;
269 269
270 qid &= (BFI_IOC_MAX_CQS - 1); 270 qid &= (BFI_IOC_MAX_CQS - 1);
271 271
272 bfa->iocfc.hwif.hw_reqq_ack(bfa, qid); 272 bfa->iocfc.hwif.hw_reqq_ack(bfa, qid);
273 273
274 /* 274 /*
275 * Resume any pending requests in the corresponding reqq. 275 * Resume any pending requests in the corresponding reqq.
276 */ 276 */
277 waitq = bfa_reqq(bfa, qid); 277 waitq = bfa_reqq(bfa, qid);
278 if (!list_empty(waitq)) 278 if (!list_empty(waitq))
279 bfa_reqq_resume(bfa, qid); 279 bfa_reqq_resume(bfa, qid);
280 } 280 }
281 281
282 void 282 void
283 bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m) 283 bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m)
284 { 284 {
285 bfa_trc(bfa, m->mhdr.msg_class); 285 bfa_trc(bfa, m->mhdr.msg_class);
286 bfa_trc(bfa, m->mhdr.msg_id); 286 bfa_trc(bfa, m->mhdr.msg_id);
287 bfa_trc(bfa, m->mhdr.mtag.i2htok); 287 bfa_trc(bfa, m->mhdr.mtag.i2htok);
288 WARN_ON(1); 288 WARN_ON(1);
289 bfa_trc_stop(bfa->trcmod); 289 bfa_trc_stop(bfa->trcmod);
290 } 290 }
291 291
292 void 292 void
293 bfa_msix_rspq(struct bfa_s *bfa, int qid) 293 bfa_msix_rspq(struct bfa_s *bfa, int qid)
294 { 294 {
295 struct bfi_msg_s *m; 295 struct bfi_msg_s *m;
296 u32 pi, ci; 296 u32 pi, ci;
297 struct list_head *waitq; 297 struct list_head *waitq;
298 298
299 bfa_trc_fp(bfa, qid);
300
301 qid &= (BFI_IOC_MAX_CQS - 1); 299 qid &= (BFI_IOC_MAX_CQS - 1);
302 300
303 bfa->iocfc.hwif.hw_rspq_ack(bfa, qid); 301 bfa->iocfc.hwif.hw_rspq_ack(bfa, qid);
304 302
305 ci = bfa_rspq_ci(bfa, qid); 303 ci = bfa_rspq_ci(bfa, qid);
306 pi = bfa_rspq_pi(bfa, qid); 304 pi = bfa_rspq_pi(bfa, qid);
307 305
308 bfa_trc_fp(bfa, ci);
309 bfa_trc_fp(bfa, pi);
310
311 if (bfa->rme_process) { 306 if (bfa->rme_process) {
312 while (ci != pi) { 307 while (ci != pi) {
313 m = bfa_rspq_elem(bfa, qid, ci); 308 m = bfa_rspq_elem(bfa, qid, ci);
314 bfa_assert_fp(m->mhdr.msg_class < BFI_MC_MAX);
315
316 bfa_isrs[m->mhdr.msg_class] (bfa, m); 309 bfa_isrs[m->mhdr.msg_class] (bfa, m);
317
318 CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems); 310 CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems);
319 } 311 }
320 } 312 }
321 313
322 /* 314 /*
323 * update CI 315 * update CI
324 */ 316 */
325 bfa_rspq_ci(bfa, qid) = pi; 317 bfa_rspq_ci(bfa, qid) = pi;
326 writel(pi, bfa->iocfc.bfa_regs.rme_q_ci[qid]); 318 writel(pi, bfa->iocfc.bfa_regs.rme_q_ci[qid]);
327 mmiowb(); 319 mmiowb();
328 320
329 /* 321 /*
330 * Resume any pending requests in the corresponding reqq. 322 * Resume any pending requests in the corresponding reqq.
331 */ 323 */
332 waitq = bfa_reqq(bfa, qid); 324 waitq = bfa_reqq(bfa, qid);
333 if (!list_empty(waitq)) 325 if (!list_empty(waitq))
334 bfa_reqq_resume(bfa, qid); 326 bfa_reqq_resume(bfa, qid);
335 } 327 }
336 328
337 void 329 void
338 bfa_msix_lpu_err(struct bfa_s *bfa, int vec) 330 bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
339 { 331 {
340 u32 intr, curr_value; 332 u32 intr, curr_value;
341 333
342 intr = readl(bfa->iocfc.bfa_regs.intr_status); 334 intr = readl(bfa->iocfc.bfa_regs.intr_status);
343 335
344 if (intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1)) 336 if (intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1))
345 bfa_ioc_mbox_isr(&bfa->ioc); 337 bfa_ioc_mbox_isr(&bfa->ioc);
346 338
347 intr &= (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 | 339 intr &= (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
348 __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS | __HFN_INT_LL_HALT); 340 __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS | __HFN_INT_LL_HALT);
349 341
350 if (intr) { 342 if (intr) {
351 if (intr & __HFN_INT_LL_HALT) { 343 if (intr & __HFN_INT_LL_HALT) {
352 /* 344 /*
353 * If LL_HALT bit is set then FW Init Halt LL Port 345 * If LL_HALT bit is set then FW Init Halt LL Port
354 * Register needs to be cleared as well so Interrupt 346 * Register needs to be cleared as well so Interrupt
355 * Status Register will be cleared. 347 * Status Register will be cleared.
356 */ 348 */
357 curr_value = readl(bfa->ioc.ioc_regs.ll_halt); 349 curr_value = readl(bfa->ioc.ioc_regs.ll_halt);
358 curr_value &= ~__FW_INIT_HALT_P; 350 curr_value &= ~__FW_INIT_HALT_P;
359 writel(curr_value, bfa->ioc.ioc_regs.ll_halt); 351 writel(curr_value, bfa->ioc.ioc_regs.ll_halt);
360 } 352 }
361 353
362 if (intr & __HFN_INT_ERR_PSS) { 354 if (intr & __HFN_INT_ERR_PSS) {
363 /* 355 /*
364 * ERR_PSS bit needs to be cleared as well in case 356 * ERR_PSS bit needs to be cleared as well in case
365 * interrups are shared so driver's interrupt handler is 357 * interrups are shared so driver's interrupt handler is
366 * still called eventhough it is already masked out. 358 * still called eventhough it is already masked out.
367 */ 359 */
368 curr_value = readl( 360 curr_value = readl(
369 bfa->ioc.ioc_regs.pss_err_status_reg); 361 bfa->ioc.ioc_regs.pss_err_status_reg);
370 curr_value &= __PSS_ERR_STATUS_SET; 362 curr_value &= __PSS_ERR_STATUS_SET;
371 writel(curr_value, 363 writel(curr_value,
372 bfa->ioc.ioc_regs.pss_err_status_reg); 364 bfa->ioc.ioc_regs.pss_err_status_reg);
373 } 365 }
374 366
375 writel(intr, bfa->iocfc.bfa_regs.intr_status); 367 writel(intr, bfa->iocfc.bfa_regs.intr_status);
376 bfa_ioc_error_isr(&bfa->ioc); 368 bfa_ioc_error_isr(&bfa->ioc);
377 } 369 }
378 } 370 }
379 371
380 /* 372 /*
381 * BFA IOC FC related functions 373 * BFA IOC FC related functions
382 */ 374 */
383 375
384 /* 376 /*
385 * BFA IOC private functions 377 * BFA IOC private functions
386 */ 378 */
387 379
388 static void 380 static void
389 bfa_iocfc_cqs_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len) 381 bfa_iocfc_cqs_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len)
390 { 382 {
391 int i, per_reqq_sz, per_rspq_sz; 383 int i, per_reqq_sz, per_rspq_sz;
392 384
393 per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ), 385 per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
394 BFA_DMA_ALIGN_SZ); 386 BFA_DMA_ALIGN_SZ);
395 per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ), 387 per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
396 BFA_DMA_ALIGN_SZ); 388 BFA_DMA_ALIGN_SZ);
397 389
398 /* 390 /*
399 * Calculate CQ size 391 * Calculate CQ size
400 */ 392 */
401 for (i = 0; i < cfg->fwcfg.num_cqs; i++) { 393 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
402 *dm_len = *dm_len + per_reqq_sz; 394 *dm_len = *dm_len + per_reqq_sz;
403 *dm_len = *dm_len + per_rspq_sz; 395 *dm_len = *dm_len + per_rspq_sz;
404 } 396 }
405 397
406 /* 398 /*
407 * Calculate Shadow CI/PI size 399 * Calculate Shadow CI/PI size
408 */ 400 */
409 for (i = 0; i < cfg->fwcfg.num_cqs; i++) 401 for (i = 0; i < cfg->fwcfg.num_cqs; i++)
410 *dm_len += (2 * BFA_CACHELINE_SZ); 402 *dm_len += (2 * BFA_CACHELINE_SZ);
411 } 403 }
412 404
413 static void 405 static void
414 bfa_iocfc_fw_cfg_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len) 406 bfa_iocfc_fw_cfg_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len)
415 { 407 {
416 *dm_len += 408 *dm_len +=
417 BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ); 409 BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
418 *dm_len += 410 *dm_len +=
419 BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s), 411 BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
420 BFA_CACHELINE_SZ); 412 BFA_CACHELINE_SZ);
421 } 413 }
422 414
423 /* 415 /*
424 * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ 416 * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ
425 */ 417 */
426 static void 418 static void
427 bfa_iocfc_send_cfg(void *bfa_arg) 419 bfa_iocfc_send_cfg(void *bfa_arg)
428 { 420 {
429 struct bfa_s *bfa = bfa_arg; 421 struct bfa_s *bfa = bfa_arg;
430 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 422 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
431 struct bfi_iocfc_cfg_req_s cfg_req; 423 struct bfi_iocfc_cfg_req_s cfg_req;
432 struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo; 424 struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo;
433 struct bfa_iocfc_cfg_s *cfg = &iocfc->cfg; 425 struct bfa_iocfc_cfg_s *cfg = &iocfc->cfg;
434 int i; 426 int i;
435 427
436 WARN_ON(cfg->fwcfg.num_cqs > BFI_IOC_MAX_CQS); 428 WARN_ON(cfg->fwcfg.num_cqs > BFI_IOC_MAX_CQS);
437 bfa_trc(bfa, cfg->fwcfg.num_cqs); 429 bfa_trc(bfa, cfg->fwcfg.num_cqs);
438 430
439 bfa_iocfc_reset_queues(bfa); 431 bfa_iocfc_reset_queues(bfa);
440 432
441 /* 433 /*
442 * initialize IOC configuration info 434 * initialize IOC configuration info
443 */ 435 */
444 cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG; 436 cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG;
445 cfg_info->num_cqs = cfg->fwcfg.num_cqs; 437 cfg_info->num_cqs = cfg->fwcfg.num_cqs;
446 438
447 bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa); 439 bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa);
448 /* 440 /*
449 * dma map REQ and RSP circular queues and shadow pointers 441 * dma map REQ and RSP circular queues and shadow pointers
450 */ 442 */
451 for (i = 0; i < cfg->fwcfg.num_cqs; i++) { 443 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
452 bfa_dma_be_addr_set(cfg_info->req_cq_ba[i], 444 bfa_dma_be_addr_set(cfg_info->req_cq_ba[i],
453 iocfc->req_cq_ba[i].pa); 445 iocfc->req_cq_ba[i].pa);
454 bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i], 446 bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i],
455 iocfc->req_cq_shadow_ci[i].pa); 447 iocfc->req_cq_shadow_ci[i].pa);
456 cfg_info->req_cq_elems[i] = 448 cfg_info->req_cq_elems[i] =
457 cpu_to_be16(cfg->drvcfg.num_reqq_elems); 449 cpu_to_be16(cfg->drvcfg.num_reqq_elems);
458 450
459 bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i], 451 bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i],
460 iocfc->rsp_cq_ba[i].pa); 452 iocfc->rsp_cq_ba[i].pa);
461 bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i], 453 bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i],
462 iocfc->rsp_cq_shadow_pi[i].pa); 454 iocfc->rsp_cq_shadow_pi[i].pa);
463 cfg_info->rsp_cq_elems[i] = 455 cfg_info->rsp_cq_elems[i] =
464 cpu_to_be16(cfg->drvcfg.num_rspq_elems); 456 cpu_to_be16(cfg->drvcfg.num_rspq_elems);
465 } 457 }
466 458
467 /* 459 /*
468 * Enable interrupt coalescing if it is driver init path 460 * Enable interrupt coalescing if it is driver init path
469 * and not ioc disable/enable path. 461 * and not ioc disable/enable path.
470 */ 462 */
471 if (!iocfc->cfgdone) 463 if (!iocfc->cfgdone)
472 cfg_info->intr_attr.coalesce = BFA_TRUE; 464 cfg_info->intr_attr.coalesce = BFA_TRUE;
473 465
474 iocfc->cfgdone = BFA_FALSE; 466 iocfc->cfgdone = BFA_FALSE;
475 467
476 /* 468 /*
477 * dma map IOC configuration itself 469 * dma map IOC configuration itself
478 */ 470 */
479 bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ, 471 bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ,
480 bfa_lpuid(bfa)); 472 bfa_lpuid(bfa));
481 bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa); 473 bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa);
482 474
483 bfa_ioc_mbox_send(&bfa->ioc, &cfg_req, 475 bfa_ioc_mbox_send(&bfa->ioc, &cfg_req,
484 sizeof(struct bfi_iocfc_cfg_req_s)); 476 sizeof(struct bfi_iocfc_cfg_req_s));
485 } 477 }
486 478
487 static void 479 static void
488 bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, 480 bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
489 struct bfa_pcidev_s *pcidev) 481 struct bfa_pcidev_s *pcidev)
490 { 482 {
491 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 483 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
492 484
493 bfa->bfad = bfad; 485 bfa->bfad = bfad;
494 iocfc->bfa = bfa; 486 iocfc->bfa = bfa;
495 iocfc->action = BFA_IOCFC_ACT_NONE; 487 iocfc->action = BFA_IOCFC_ACT_NONE;
496 488
497 iocfc->cfg = *cfg; 489 iocfc->cfg = *cfg;
498 490
499 /* 491 /*
500 * Initialize chip specific handlers. 492 * Initialize chip specific handlers.
501 */ 493 */
502 if (bfa_asic_id_ct(bfa_ioc_devid(&bfa->ioc))) { 494 if (bfa_asic_id_ct(bfa_ioc_devid(&bfa->ioc))) {
503 iocfc->hwif.hw_reginit = bfa_hwct_reginit; 495 iocfc->hwif.hw_reginit = bfa_hwct_reginit;
504 iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack; 496 iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack;
505 iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack; 497 iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack;
506 iocfc->hwif.hw_msix_init = bfa_hwct_msix_init; 498 iocfc->hwif.hw_msix_init = bfa_hwct_msix_init;
507 iocfc->hwif.hw_msix_install = bfa_hwct_msix_install; 499 iocfc->hwif.hw_msix_install = bfa_hwct_msix_install;
508 iocfc->hwif.hw_msix_uninstall = bfa_hwct_msix_uninstall; 500 iocfc->hwif.hw_msix_uninstall = bfa_hwct_msix_uninstall;
509 iocfc->hwif.hw_isr_mode_set = bfa_hwct_isr_mode_set; 501 iocfc->hwif.hw_isr_mode_set = bfa_hwct_isr_mode_set;
510 iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs; 502 iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs;
511 iocfc->hwif.hw_msix_get_rme_range = bfa_hwct_msix_get_rme_range; 503 iocfc->hwif.hw_msix_get_rme_range = bfa_hwct_msix_get_rme_range;
512 } else { 504 } else {
513 iocfc->hwif.hw_reginit = bfa_hwcb_reginit; 505 iocfc->hwif.hw_reginit = bfa_hwcb_reginit;
514 iocfc->hwif.hw_reqq_ack = bfa_hwcb_reqq_ack; 506 iocfc->hwif.hw_reqq_ack = bfa_hwcb_reqq_ack;
515 iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack; 507 iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack;
516 iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init; 508 iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init;
517 iocfc->hwif.hw_msix_install = bfa_hwcb_msix_install; 509 iocfc->hwif.hw_msix_install = bfa_hwcb_msix_install;
518 iocfc->hwif.hw_msix_uninstall = bfa_hwcb_msix_uninstall; 510 iocfc->hwif.hw_msix_uninstall = bfa_hwcb_msix_uninstall;
519 iocfc->hwif.hw_isr_mode_set = bfa_hwcb_isr_mode_set; 511 iocfc->hwif.hw_isr_mode_set = bfa_hwcb_isr_mode_set;
520 iocfc->hwif.hw_msix_getvecs = bfa_hwcb_msix_getvecs; 512 iocfc->hwif.hw_msix_getvecs = bfa_hwcb_msix_getvecs;
521 iocfc->hwif.hw_msix_get_rme_range = bfa_hwcb_msix_get_rme_range; 513 iocfc->hwif.hw_msix_get_rme_range = bfa_hwcb_msix_get_rme_range;
522 } 514 }
523 515
524 iocfc->hwif.hw_reginit(bfa); 516 iocfc->hwif.hw_reginit(bfa);
525 bfa->msix.nvecs = 0; 517 bfa->msix.nvecs = 0;
526 } 518 }
527 519
528 static void 520 static void
529 bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg, 521 bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg,
530 struct bfa_meminfo_s *meminfo) 522 struct bfa_meminfo_s *meminfo)
531 { 523 {
532 u8 *dm_kva; 524 u8 *dm_kva;
533 u64 dm_pa; 525 u64 dm_pa;
534 int i, per_reqq_sz, per_rspq_sz; 526 int i, per_reqq_sz, per_rspq_sz;
535 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 527 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
536 int dbgsz; 528 int dbgsz;
537 529
538 dm_kva = bfa_meminfo_dma_virt(meminfo); 530 dm_kva = bfa_meminfo_dma_virt(meminfo);
539 dm_pa = bfa_meminfo_dma_phys(meminfo); 531 dm_pa = bfa_meminfo_dma_phys(meminfo);
540 532
541 /* 533 /*
542 * First allocate dma memory for IOC. 534 * First allocate dma memory for IOC.
543 */ 535 */
544 bfa_ioc_mem_claim(&bfa->ioc, dm_kva, dm_pa); 536 bfa_ioc_mem_claim(&bfa->ioc, dm_kva, dm_pa);
545 dm_kva += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ); 537 dm_kva += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
546 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ); 538 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
547 539
548 /* 540 /*
549 * Claim DMA-able memory for the request/response queues and for shadow 541 * Claim DMA-able memory for the request/response queues and for shadow
550 * ci/pi registers 542 * ci/pi registers
551 */ 543 */
552 per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ), 544 per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
553 BFA_DMA_ALIGN_SZ); 545 BFA_DMA_ALIGN_SZ);
554 per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ), 546 per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
555 BFA_DMA_ALIGN_SZ); 547 BFA_DMA_ALIGN_SZ);
556 548
557 for (i = 0; i < cfg->fwcfg.num_cqs; i++) { 549 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
558 iocfc->req_cq_ba[i].kva = dm_kva; 550 iocfc->req_cq_ba[i].kva = dm_kva;
559 iocfc->req_cq_ba[i].pa = dm_pa; 551 iocfc->req_cq_ba[i].pa = dm_pa;
560 memset(dm_kva, 0, per_reqq_sz); 552 memset(dm_kva, 0, per_reqq_sz);
561 dm_kva += per_reqq_sz; 553 dm_kva += per_reqq_sz;
562 dm_pa += per_reqq_sz; 554 dm_pa += per_reqq_sz;
563 555
564 iocfc->rsp_cq_ba[i].kva = dm_kva; 556 iocfc->rsp_cq_ba[i].kva = dm_kva;
565 iocfc->rsp_cq_ba[i].pa = dm_pa; 557 iocfc->rsp_cq_ba[i].pa = dm_pa;
566 memset(dm_kva, 0, per_rspq_sz); 558 memset(dm_kva, 0, per_rspq_sz);
567 dm_kva += per_rspq_sz; 559 dm_kva += per_rspq_sz;
568 dm_pa += per_rspq_sz; 560 dm_pa += per_rspq_sz;
569 } 561 }
570 562
571 for (i = 0; i < cfg->fwcfg.num_cqs; i++) { 563 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
572 iocfc->req_cq_shadow_ci[i].kva = dm_kva; 564 iocfc->req_cq_shadow_ci[i].kva = dm_kva;
573 iocfc->req_cq_shadow_ci[i].pa = dm_pa; 565 iocfc->req_cq_shadow_ci[i].pa = dm_pa;
574 dm_kva += BFA_CACHELINE_SZ; 566 dm_kva += BFA_CACHELINE_SZ;
575 dm_pa += BFA_CACHELINE_SZ; 567 dm_pa += BFA_CACHELINE_SZ;
576 568
577 iocfc->rsp_cq_shadow_pi[i].kva = dm_kva; 569 iocfc->rsp_cq_shadow_pi[i].kva = dm_kva;
578 iocfc->rsp_cq_shadow_pi[i].pa = dm_pa; 570 iocfc->rsp_cq_shadow_pi[i].pa = dm_pa;
579 dm_kva += BFA_CACHELINE_SZ; 571 dm_kva += BFA_CACHELINE_SZ;
580 dm_pa += BFA_CACHELINE_SZ; 572 dm_pa += BFA_CACHELINE_SZ;
581 } 573 }
582 574
583 /* 575 /*
584 * Claim DMA-able memory for the config info page 576 * Claim DMA-able memory for the config info page
585 */ 577 */
586 bfa->iocfc.cfg_info.kva = dm_kva; 578 bfa->iocfc.cfg_info.kva = dm_kva;
587 bfa->iocfc.cfg_info.pa = dm_pa; 579 bfa->iocfc.cfg_info.pa = dm_pa;
588 bfa->iocfc.cfginfo = (struct bfi_iocfc_cfg_s *) dm_kva; 580 bfa->iocfc.cfginfo = (struct bfi_iocfc_cfg_s *) dm_kva;
589 dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ); 581 dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
590 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ); 582 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
591 583
592 /* 584 /*
593 * Claim DMA-able memory for the config response 585 * Claim DMA-able memory for the config response
594 */ 586 */
595 bfa->iocfc.cfgrsp_dma.kva = dm_kva; 587 bfa->iocfc.cfgrsp_dma.kva = dm_kva;
596 bfa->iocfc.cfgrsp_dma.pa = dm_pa; 588 bfa->iocfc.cfgrsp_dma.pa = dm_pa;
597 bfa->iocfc.cfgrsp = (struct bfi_iocfc_cfgrsp_s *) dm_kva; 589 bfa->iocfc.cfgrsp = (struct bfi_iocfc_cfgrsp_s *) dm_kva;
598 590
599 dm_kva += 591 dm_kva +=
600 BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s), 592 BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
601 BFA_CACHELINE_SZ); 593 BFA_CACHELINE_SZ);
602 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s), 594 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
603 BFA_CACHELINE_SZ); 595 BFA_CACHELINE_SZ);
604 596
605 597
606 bfa_meminfo_dma_virt(meminfo) = dm_kva; 598 bfa_meminfo_dma_virt(meminfo) = dm_kva;
607 bfa_meminfo_dma_phys(meminfo) = dm_pa; 599 bfa_meminfo_dma_phys(meminfo) = dm_pa;
608 600
609 dbgsz = (bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0; 601 dbgsz = (bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
610 if (dbgsz > 0) { 602 if (dbgsz > 0) {
611 bfa_ioc_debug_memclaim(&bfa->ioc, bfa_meminfo_kva(meminfo)); 603 bfa_ioc_debug_memclaim(&bfa->ioc, bfa_meminfo_kva(meminfo));
612 bfa_meminfo_kva(meminfo) += dbgsz; 604 bfa_meminfo_kva(meminfo) += dbgsz;
613 } 605 }
614 } 606 }
615 607
616 /* 608 /*
617 * Start BFA submodules. 609 * Start BFA submodules.
618 */ 610 */
619 static void 611 static void
620 bfa_iocfc_start_submod(struct bfa_s *bfa) 612 bfa_iocfc_start_submod(struct bfa_s *bfa)
621 { 613 {
622 int i; 614 int i;
623 615
624 bfa->rme_process = BFA_TRUE; 616 bfa->rme_process = BFA_TRUE;
625 617
626 for (i = 0; hal_mods[i]; i++) 618 for (i = 0; hal_mods[i]; i++)
627 hal_mods[i]->start(bfa); 619 hal_mods[i]->start(bfa);
628 } 620 }
629 621
630 /* 622 /*
631 * Disable BFA submodules. 623 * Disable BFA submodules.
632 */ 624 */
633 static void 625 static void
634 bfa_iocfc_disable_submod(struct bfa_s *bfa) 626 bfa_iocfc_disable_submod(struct bfa_s *bfa)
635 { 627 {
636 int i; 628 int i;
637 629
638 for (i = 0; hal_mods[i]; i++) 630 for (i = 0; hal_mods[i]; i++)
639 hal_mods[i]->iocdisable(bfa); 631 hal_mods[i]->iocdisable(bfa);
640 } 632 }
641 633
642 static void 634 static void
643 bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete) 635 bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete)
644 { 636 {
645 struct bfa_s *bfa = bfa_arg; 637 struct bfa_s *bfa = bfa_arg;
646 638
647 if (complete) { 639 if (complete) {
648 if (bfa->iocfc.cfgdone) 640 if (bfa->iocfc.cfgdone)
649 bfa_cb_init(bfa->bfad, BFA_STATUS_OK); 641 bfa_cb_init(bfa->bfad, BFA_STATUS_OK);
650 else 642 else
651 bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED); 643 bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED);
652 } else { 644 } else {
653 if (bfa->iocfc.cfgdone) 645 if (bfa->iocfc.cfgdone)
654 bfa->iocfc.action = BFA_IOCFC_ACT_NONE; 646 bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
655 } 647 }
656 } 648 }
657 649
658 static void 650 static void
659 bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl) 651 bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl)
660 { 652 {
661 struct bfa_s *bfa = bfa_arg; 653 struct bfa_s *bfa = bfa_arg;
662 struct bfad_s *bfad = bfa->bfad; 654 struct bfad_s *bfad = bfa->bfad;
663 655
664 if (compl) 656 if (compl)
665 complete(&bfad->comp); 657 complete(&bfad->comp);
666 else 658 else
667 bfa->iocfc.action = BFA_IOCFC_ACT_NONE; 659 bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
668 } 660 }
669 661
670 static void 662 static void
671 bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl) 663 bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl)
672 { 664 {
673 struct bfa_s *bfa = bfa_arg; 665 struct bfa_s *bfa = bfa_arg;
674 struct bfad_s *bfad = bfa->bfad; 666 struct bfad_s *bfad = bfa->bfad;
675 667
676 if (compl) 668 if (compl)
677 complete(&bfad->disable_comp); 669 complete(&bfad->disable_comp);
678 } 670 }
679 671
680 /* 672 /*
681 * Update BFA configuration from firmware configuration. 673 * Update BFA configuration from firmware configuration.
682 */ 674 */
683 static void 675 static void
684 bfa_iocfc_cfgrsp(struct bfa_s *bfa) 676 bfa_iocfc_cfgrsp(struct bfa_s *bfa)
685 { 677 {
686 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 678 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
687 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; 679 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
688 struct bfa_iocfc_fwcfg_s *fwcfg = &cfgrsp->fwcfg; 680 struct bfa_iocfc_fwcfg_s *fwcfg = &cfgrsp->fwcfg;
689 681
690 fwcfg->num_cqs = fwcfg->num_cqs; 682 fwcfg->num_cqs = fwcfg->num_cqs;
691 fwcfg->num_ioim_reqs = be16_to_cpu(fwcfg->num_ioim_reqs); 683 fwcfg->num_ioim_reqs = be16_to_cpu(fwcfg->num_ioim_reqs);
692 fwcfg->num_tskim_reqs = be16_to_cpu(fwcfg->num_tskim_reqs); 684 fwcfg->num_tskim_reqs = be16_to_cpu(fwcfg->num_tskim_reqs);
693 fwcfg->num_fcxp_reqs = be16_to_cpu(fwcfg->num_fcxp_reqs); 685 fwcfg->num_fcxp_reqs = be16_to_cpu(fwcfg->num_fcxp_reqs);
694 fwcfg->num_uf_bufs = be16_to_cpu(fwcfg->num_uf_bufs); 686 fwcfg->num_uf_bufs = be16_to_cpu(fwcfg->num_uf_bufs);
695 fwcfg->num_rports = be16_to_cpu(fwcfg->num_rports); 687 fwcfg->num_rports = be16_to_cpu(fwcfg->num_rports);
696 688
697 iocfc->cfgdone = BFA_TRUE; 689 iocfc->cfgdone = BFA_TRUE;
698 690
699 /* 691 /*
700 * Configuration is complete - initialize/start submodules 692 * Configuration is complete - initialize/start submodules
701 */ 693 */
702 bfa_fcport_init(bfa); 694 bfa_fcport_init(bfa);
703 695
704 if (iocfc->action == BFA_IOCFC_ACT_INIT) 696 if (iocfc->action == BFA_IOCFC_ACT_INIT)
705 bfa_cb_queue(bfa, &iocfc->init_hcb_qe, bfa_iocfc_init_cb, bfa); 697 bfa_cb_queue(bfa, &iocfc->init_hcb_qe, bfa_iocfc_init_cb, bfa);
706 else 698 else
707 bfa_iocfc_start_submod(bfa); 699 bfa_iocfc_start_submod(bfa);
708 } 700 }
709 void 701 void
710 bfa_iocfc_reset_queues(struct bfa_s *bfa) 702 bfa_iocfc_reset_queues(struct bfa_s *bfa)
711 { 703 {
712 int q; 704 int q;
713 705
714 for (q = 0; q < BFI_IOC_MAX_CQS; q++) { 706 for (q = 0; q < BFI_IOC_MAX_CQS; q++) {
715 bfa_reqq_ci(bfa, q) = 0; 707 bfa_reqq_ci(bfa, q) = 0;
716 bfa_reqq_pi(bfa, q) = 0; 708 bfa_reqq_pi(bfa, q) = 0;
717 bfa_rspq_ci(bfa, q) = 0; 709 bfa_rspq_ci(bfa, q) = 0;
718 bfa_rspq_pi(bfa, q) = 0; 710 bfa_rspq_pi(bfa, q) = 0;
719 } 711 }
720 } 712 }
721 713
722 /* 714 /*
723 * IOC enable request is complete 715 * IOC enable request is complete
724 */ 716 */
725 static void 717 static void
726 bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status) 718 bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status)
727 { 719 {
728 struct bfa_s *bfa = bfa_arg; 720 struct bfa_s *bfa = bfa_arg;
729 721
730 if (status != BFA_STATUS_OK) { 722 if (status != BFA_STATUS_OK) {
731 bfa_isr_disable(bfa); 723 bfa_isr_disable(bfa);
732 if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT) 724 if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
733 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, 725 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
734 bfa_iocfc_init_cb, bfa); 726 bfa_iocfc_init_cb, bfa);
735 return; 727 return;
736 } 728 }
737 729
738 bfa_iocfc_send_cfg(bfa); 730 bfa_iocfc_send_cfg(bfa);
739 } 731 }
740 732
741 /* 733 /*
742 * IOC disable request is complete 734 * IOC disable request is complete
743 */ 735 */
744 static void 736 static void
745 bfa_iocfc_disable_cbfn(void *bfa_arg) 737 bfa_iocfc_disable_cbfn(void *bfa_arg)
746 { 738 {
747 struct bfa_s *bfa = bfa_arg; 739 struct bfa_s *bfa = bfa_arg;
748 740
749 bfa_isr_disable(bfa); 741 bfa_isr_disable(bfa);
750 bfa_iocfc_disable_submod(bfa); 742 bfa_iocfc_disable_submod(bfa);
751 743
752 if (bfa->iocfc.action == BFA_IOCFC_ACT_STOP) 744 if (bfa->iocfc.action == BFA_IOCFC_ACT_STOP)
753 bfa_cb_queue(bfa, &bfa->iocfc.stop_hcb_qe, bfa_iocfc_stop_cb, 745 bfa_cb_queue(bfa, &bfa->iocfc.stop_hcb_qe, bfa_iocfc_stop_cb,
754 bfa); 746 bfa);
755 else { 747 else {
756 WARN_ON(bfa->iocfc.action != BFA_IOCFC_ACT_DISABLE); 748 WARN_ON(bfa->iocfc.action != BFA_IOCFC_ACT_DISABLE);
757 bfa_cb_queue(bfa, &bfa->iocfc.dis_hcb_qe, bfa_iocfc_disable_cb, 749 bfa_cb_queue(bfa, &bfa->iocfc.dis_hcb_qe, bfa_iocfc_disable_cb,
758 bfa); 750 bfa);
759 } 751 }
760 } 752 }
761 753
762 /* 754 /*
763 * Notify sub-modules of hardware failure. 755 * Notify sub-modules of hardware failure.
764 */ 756 */
765 static void 757 static void
766 bfa_iocfc_hbfail_cbfn(void *bfa_arg) 758 bfa_iocfc_hbfail_cbfn(void *bfa_arg)
767 { 759 {
768 struct bfa_s *bfa = bfa_arg; 760 struct bfa_s *bfa = bfa_arg;
769 761
770 bfa->rme_process = BFA_FALSE; 762 bfa->rme_process = BFA_FALSE;
771 763
772 bfa_isr_disable(bfa); 764 bfa_isr_disable(bfa);
773 bfa_iocfc_disable_submod(bfa); 765 bfa_iocfc_disable_submod(bfa);
774 766
775 if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT) 767 if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
776 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, bfa_iocfc_init_cb, 768 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, bfa_iocfc_init_cb,
777 bfa); 769 bfa);
778 } 770 }
779 771
780 /* 772 /*
781 * Actions on chip-reset completion. 773 * Actions on chip-reset completion.
782 */ 774 */
783 static void 775 static void
784 bfa_iocfc_reset_cbfn(void *bfa_arg) 776 bfa_iocfc_reset_cbfn(void *bfa_arg)
785 { 777 {
786 struct bfa_s *bfa = bfa_arg; 778 struct bfa_s *bfa = bfa_arg;
787 779
788 bfa_iocfc_reset_queues(bfa); 780 bfa_iocfc_reset_queues(bfa);
789 bfa_isr_enable(bfa); 781 bfa_isr_enable(bfa);
790 } 782 }
791 783
792 784
793 /* 785 /*
794 * Query IOC memory requirement information. 786 * Query IOC memory requirement information.
795 */ 787 */
796 void 788 void
797 bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len, 789 bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
798 u32 *dm_len) 790 u32 *dm_len)
799 { 791 {
800 /* dma memory for IOC */ 792 /* dma memory for IOC */
801 *dm_len += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ); 793 *dm_len += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
802 794
803 bfa_iocfc_fw_cfg_sz(cfg, dm_len); 795 bfa_iocfc_fw_cfg_sz(cfg, dm_len);
804 bfa_iocfc_cqs_sz(cfg, dm_len); 796 bfa_iocfc_cqs_sz(cfg, dm_len);
805 *km_len += (bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0; 797 *km_len += (bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
806 } 798 }
807 799
808 /* 800 /*
809 * Query IOC memory requirement information. 801 * Query IOC memory requirement information.
810 */ 802 */
811 void 803 void
812 bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, 804 bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
813 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) 805 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
814 { 806 {
815 int i; 807 int i;
816 struct bfa_ioc_s *ioc = &bfa->ioc; 808 struct bfa_ioc_s *ioc = &bfa->ioc;
817 809
818 bfa_iocfc_cbfn.enable_cbfn = bfa_iocfc_enable_cbfn; 810 bfa_iocfc_cbfn.enable_cbfn = bfa_iocfc_enable_cbfn;
819 bfa_iocfc_cbfn.disable_cbfn = bfa_iocfc_disable_cbfn; 811 bfa_iocfc_cbfn.disable_cbfn = bfa_iocfc_disable_cbfn;
820 bfa_iocfc_cbfn.hbfail_cbfn = bfa_iocfc_hbfail_cbfn; 812 bfa_iocfc_cbfn.hbfail_cbfn = bfa_iocfc_hbfail_cbfn;
821 bfa_iocfc_cbfn.reset_cbfn = bfa_iocfc_reset_cbfn; 813 bfa_iocfc_cbfn.reset_cbfn = bfa_iocfc_reset_cbfn;
822 814
823 ioc->trcmod = bfa->trcmod; 815 ioc->trcmod = bfa->trcmod;
824 bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod); 816 bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod);
825 817
826 /* 818 /*
827 * Set FC mode for BFA_PCI_DEVICE_ID_CT_FC. 819 * Set FC mode for BFA_PCI_DEVICE_ID_CT_FC.
828 */ 820 */
829 if (pcidev->device_id == BFA_PCI_DEVICE_ID_CT_FC) 821 if (pcidev->device_id == BFA_PCI_DEVICE_ID_CT_FC)
830 bfa_ioc_set_fcmode(&bfa->ioc); 822 bfa_ioc_set_fcmode(&bfa->ioc);
831 823
832 bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_MC_IOCFC); 824 bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_MC_IOCFC);
833 bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs); 825 bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs);
834 826
835 bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev); 827 bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev);
836 bfa_iocfc_mem_claim(bfa, cfg, meminfo); 828 bfa_iocfc_mem_claim(bfa, cfg, meminfo);
837 INIT_LIST_HEAD(&bfa->timer_mod.timer_q); 829 INIT_LIST_HEAD(&bfa->timer_mod.timer_q);
838 830
839 INIT_LIST_HEAD(&bfa->comp_q); 831 INIT_LIST_HEAD(&bfa->comp_q);
840 for (i = 0; i < BFI_IOC_MAX_CQS; i++) 832 for (i = 0; i < BFI_IOC_MAX_CQS; i++)
841 INIT_LIST_HEAD(&bfa->reqq_waitq[i]); 833 INIT_LIST_HEAD(&bfa->reqq_waitq[i]);
842 } 834 }
843 835
844 /* 836 /*
845 * Query IOC memory requirement information. 837 * Query IOC memory requirement information.
846 */ 838 */
847 void 839 void
848 bfa_iocfc_init(struct bfa_s *bfa) 840 bfa_iocfc_init(struct bfa_s *bfa)
849 { 841 {
850 bfa->iocfc.action = BFA_IOCFC_ACT_INIT; 842 bfa->iocfc.action = BFA_IOCFC_ACT_INIT;
851 bfa_ioc_enable(&bfa->ioc); 843 bfa_ioc_enable(&bfa->ioc);
852 } 844 }
853 845
854 /* 846 /*
855 * IOC start called from bfa_start(). Called to start IOC operations 847 * IOC start called from bfa_start(). Called to start IOC operations
856 * at driver instantiation for this instance. 848 * at driver instantiation for this instance.
857 */ 849 */
858 void 850 void
859 bfa_iocfc_start(struct bfa_s *bfa) 851 bfa_iocfc_start(struct bfa_s *bfa)
860 { 852 {
861 if (bfa->iocfc.cfgdone) 853 if (bfa->iocfc.cfgdone)
862 bfa_iocfc_start_submod(bfa); 854 bfa_iocfc_start_submod(bfa);
863 } 855 }
864 856
865 /* 857 /*
866 * IOC stop called from bfa_stop(). Called only when driver is unloaded 858 * IOC stop called from bfa_stop(). Called only when driver is unloaded
867 * for this instance. 859 * for this instance.
868 */ 860 */
869 void 861 void
870 bfa_iocfc_stop(struct bfa_s *bfa) 862 bfa_iocfc_stop(struct bfa_s *bfa)
871 { 863 {
872 bfa->iocfc.action = BFA_IOCFC_ACT_STOP; 864 bfa->iocfc.action = BFA_IOCFC_ACT_STOP;
873 865
874 bfa->rme_process = BFA_FALSE; 866 bfa->rme_process = BFA_FALSE;
875 bfa_ioc_disable(&bfa->ioc); 867 bfa_ioc_disable(&bfa->ioc);
876 } 868 }
877 869
878 void 870 void
879 bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m) 871 bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m)
880 { 872 {
881 struct bfa_s *bfa = bfaarg; 873 struct bfa_s *bfa = bfaarg;
882 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 874 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
883 union bfi_iocfc_i2h_msg_u *msg; 875 union bfi_iocfc_i2h_msg_u *msg;
884 876
885 msg = (union bfi_iocfc_i2h_msg_u *) m; 877 msg = (union bfi_iocfc_i2h_msg_u *) m;
886 bfa_trc(bfa, msg->mh.msg_id); 878 bfa_trc(bfa, msg->mh.msg_id);
887 879
888 switch (msg->mh.msg_id) { 880 switch (msg->mh.msg_id) {
889 case BFI_IOCFC_I2H_CFG_REPLY: 881 case BFI_IOCFC_I2H_CFG_REPLY:
890 iocfc->cfg_reply = &msg->cfg_reply; 882 iocfc->cfg_reply = &msg->cfg_reply;
891 bfa_iocfc_cfgrsp(bfa); 883 bfa_iocfc_cfgrsp(bfa);
892 break; 884 break;
893 case BFI_IOCFC_I2H_UPDATEQ_RSP: 885 case BFI_IOCFC_I2H_UPDATEQ_RSP:
894 iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK); 886 iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK);
895 break; 887 break;
896 default: 888 default:
897 WARN_ON(1); 889 WARN_ON(1);
898 } 890 }
899 } 891 }
900 892
901 void 893 void
902 bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr) 894 bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr)
903 { 895 {
904 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 896 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
905 897
906 attr->intr_attr.coalesce = iocfc->cfginfo->intr_attr.coalesce; 898 attr->intr_attr.coalesce = iocfc->cfginfo->intr_attr.coalesce;
907 899
908 attr->intr_attr.delay = iocfc->cfginfo->intr_attr.delay ? 900 attr->intr_attr.delay = iocfc->cfginfo->intr_attr.delay ?
909 be16_to_cpu(iocfc->cfginfo->intr_attr.delay) : 901 be16_to_cpu(iocfc->cfginfo->intr_attr.delay) :
910 be16_to_cpu(iocfc->cfgrsp->intr_attr.delay); 902 be16_to_cpu(iocfc->cfgrsp->intr_attr.delay);
911 903
912 attr->intr_attr.latency = iocfc->cfginfo->intr_attr.latency ? 904 attr->intr_attr.latency = iocfc->cfginfo->intr_attr.latency ?
913 be16_to_cpu(iocfc->cfginfo->intr_attr.latency) : 905 be16_to_cpu(iocfc->cfginfo->intr_attr.latency) :
914 be16_to_cpu(iocfc->cfgrsp->intr_attr.latency); 906 be16_to_cpu(iocfc->cfgrsp->intr_attr.latency);
915 907
916 attr->config = iocfc->cfg; 908 attr->config = iocfc->cfg;
917 } 909 }
918 910
919 bfa_status_t 911 bfa_status_t
920 bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr) 912 bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr)
921 { 913 {
922 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 914 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
923 struct bfi_iocfc_set_intr_req_s *m; 915 struct bfi_iocfc_set_intr_req_s *m;
924 916
925 iocfc->cfginfo->intr_attr.coalesce = attr->coalesce; 917 iocfc->cfginfo->intr_attr.coalesce = attr->coalesce;
926 iocfc->cfginfo->intr_attr.delay = cpu_to_be16(attr->delay); 918 iocfc->cfginfo->intr_attr.delay = cpu_to_be16(attr->delay);
927 iocfc->cfginfo->intr_attr.latency = cpu_to_be16(attr->latency); 919 iocfc->cfginfo->intr_attr.latency = cpu_to_be16(attr->latency);
928 920
929 if (!bfa_iocfc_is_operational(bfa)) 921 if (!bfa_iocfc_is_operational(bfa))
930 return BFA_STATUS_OK; 922 return BFA_STATUS_OK;
931 923
932 m = bfa_reqq_next(bfa, BFA_REQQ_IOC); 924 m = bfa_reqq_next(bfa, BFA_REQQ_IOC);
933 if (!m) 925 if (!m)
934 return BFA_STATUS_DEVBUSY; 926 return BFA_STATUS_DEVBUSY;
935 927
936 bfi_h2i_set(m->mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_SET_INTR_REQ, 928 bfi_h2i_set(m->mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_SET_INTR_REQ,
937 bfa_lpuid(bfa)); 929 bfa_lpuid(bfa));
938 m->coalesce = iocfc->cfginfo->intr_attr.coalesce; 930 m->coalesce = iocfc->cfginfo->intr_attr.coalesce;
939 m->delay = iocfc->cfginfo->intr_attr.delay; 931 m->delay = iocfc->cfginfo->intr_attr.delay;
940 m->latency = iocfc->cfginfo->intr_attr.latency; 932 m->latency = iocfc->cfginfo->intr_attr.latency;
941 933
942 bfa_trc(bfa, attr->delay); 934 bfa_trc(bfa, attr->delay);
943 bfa_trc(bfa, attr->latency); 935 bfa_trc(bfa, attr->latency);
944 936
945 bfa_reqq_produce(bfa, BFA_REQQ_IOC); 937 bfa_reqq_produce(bfa, BFA_REQQ_IOC);
946 return BFA_STATUS_OK; 938 return BFA_STATUS_OK;
947 } 939 }
948 940
949 void 941 void
950 bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa) 942 bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa)
951 { 943 {
952 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 944 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
953 945
954 iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1); 946 iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1);
955 bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase, snsbase_pa); 947 bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase, snsbase_pa);
956 } 948 }
957 /* 949 /*
958 * Enable IOC after it is disabled. 950 * Enable IOC after it is disabled.
959 */ 951 */
960 void 952 void
961 bfa_iocfc_enable(struct bfa_s *bfa) 953 bfa_iocfc_enable(struct bfa_s *bfa)
962 { 954 {
963 bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0, 955 bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
964 "IOC Enable"); 956 "IOC Enable");
965 bfa_ioc_enable(&bfa->ioc); 957 bfa_ioc_enable(&bfa->ioc);
966 } 958 }
967 959
968 void 960 void
969 bfa_iocfc_disable(struct bfa_s *bfa) 961 bfa_iocfc_disable(struct bfa_s *bfa)
970 { 962 {
971 bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0, 963 bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
972 "IOC Disable"); 964 "IOC Disable");
973 bfa->iocfc.action = BFA_IOCFC_ACT_DISABLE; 965 bfa->iocfc.action = BFA_IOCFC_ACT_DISABLE;
974 966
975 bfa->rme_process = BFA_FALSE; 967 bfa->rme_process = BFA_FALSE;
976 bfa_ioc_disable(&bfa->ioc); 968 bfa_ioc_disable(&bfa->ioc);
977 } 969 }
978 970
979 971
980 bfa_boolean_t 972 bfa_boolean_t
981 bfa_iocfc_is_operational(struct bfa_s *bfa) 973 bfa_iocfc_is_operational(struct bfa_s *bfa)
982 { 974 {
983 return bfa_ioc_is_operational(&bfa->ioc) && bfa->iocfc.cfgdone; 975 return bfa_ioc_is_operational(&bfa->ioc) && bfa->iocfc.cfgdone;
984 } 976 }
985 977
986 /* 978 /*
987 * Return boot target port wwns -- read from boot information in flash. 979 * Return boot target port wwns -- read from boot information in flash.
988 */ 980 */
989 void 981 void
990 bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns) 982 bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns)
991 { 983 {
992 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 984 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
993 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; 985 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
994 int i; 986 int i;
995 987
996 if (cfgrsp->pbc_cfg.boot_enabled && cfgrsp->pbc_cfg.nbluns) { 988 if (cfgrsp->pbc_cfg.boot_enabled && cfgrsp->pbc_cfg.nbluns) {
997 bfa_trc(bfa, cfgrsp->pbc_cfg.nbluns); 989 bfa_trc(bfa, cfgrsp->pbc_cfg.nbluns);
998 *nwwns = cfgrsp->pbc_cfg.nbluns; 990 *nwwns = cfgrsp->pbc_cfg.nbluns;
999 for (i = 0; i < cfgrsp->pbc_cfg.nbluns; i++) 991 for (i = 0; i < cfgrsp->pbc_cfg.nbluns; i++)
1000 wwns[i] = cfgrsp->pbc_cfg.blun[i].tgt_pwwn; 992 wwns[i] = cfgrsp->pbc_cfg.blun[i].tgt_pwwn;
1001 993
1002 return; 994 return;
1003 } 995 }
1004 996
1005 *nwwns = cfgrsp->bootwwns.nwwns; 997 *nwwns = cfgrsp->bootwwns.nwwns;
1006 memcpy(wwns, cfgrsp->bootwwns.wwn, sizeof(cfgrsp->bootwwns.wwn)); 998 memcpy(wwns, cfgrsp->bootwwns.wwn, sizeof(cfgrsp->bootwwns.wwn));
1007 } 999 }
1008 1000
1009 void 1001 void
1010 bfa_iocfc_get_pbc_boot_cfg(struct bfa_s *bfa, struct bfa_boot_pbc_s *pbcfg) 1002 bfa_iocfc_get_pbc_boot_cfg(struct bfa_s *bfa, struct bfa_boot_pbc_s *pbcfg)
1011 { 1003 {
1012 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 1004 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1013 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; 1005 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
1014 1006
1015 pbcfg->enable = cfgrsp->pbc_cfg.boot_enabled; 1007 pbcfg->enable = cfgrsp->pbc_cfg.boot_enabled;
1016 pbcfg->nbluns = cfgrsp->pbc_cfg.nbluns; 1008 pbcfg->nbluns = cfgrsp->pbc_cfg.nbluns;
1017 pbcfg->speed = cfgrsp->pbc_cfg.port_speed; 1009 pbcfg->speed = cfgrsp->pbc_cfg.port_speed;
1018 memcpy(pbcfg->pblun, cfgrsp->pbc_cfg.blun, sizeof(pbcfg->pblun)); 1010 memcpy(pbcfg->pblun, cfgrsp->pbc_cfg.blun, sizeof(pbcfg->pblun));
1019 } 1011 }
1020 1012
1021 int 1013 int
1022 bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport) 1014 bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport)
1023 { 1015 {
1024 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 1016 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1025 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; 1017 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
1026 1018
1027 memcpy(pbc_vport, cfgrsp->pbc_cfg.vport, sizeof(cfgrsp->pbc_cfg.vport)); 1019 memcpy(pbc_vport, cfgrsp->pbc_cfg.vport, sizeof(cfgrsp->pbc_cfg.vport));
1028 return cfgrsp->pbc_cfg.nvports; 1020 return cfgrsp->pbc_cfg.nvports;
1029 } 1021 }
1030 1022
1031 1023
1032 /* 1024 /*
1033 * Use this function query the memory requirement of the BFA library. 1025 * Use this function query the memory requirement of the BFA library.
1034 * This function needs to be called before bfa_attach() to get the 1026 * This function needs to be called before bfa_attach() to get the
1035 * memory required of the BFA layer for a given driver configuration. 1027 * memory required of the BFA layer for a given driver configuration.
1036 * 1028 *
1037 * This call will fail, if the cap is out of range compared to pre-defined 1029 * This call will fail, if the cap is out of range compared to pre-defined
1038 * values within the BFA library 1030 * values within the BFA library
1039 * 1031 *
1040 * @param[in] cfg - pointer to bfa_ioc_cfg_t. Driver layer should indicate 1032 * @param[in] cfg - pointer to bfa_ioc_cfg_t. Driver layer should indicate
1041 * its configuration in this structure. 1033 * its configuration in this structure.
1042 * The default values for struct bfa_iocfc_cfg_s can be 1034 * The default values for struct bfa_iocfc_cfg_s can be
1043 * fetched using bfa_cfg_get_default() API. 1035 * fetched using bfa_cfg_get_default() API.
1044 * 1036 *
1045 * If cap's boundary check fails, the library will use 1037 * If cap's boundary check fails, the library will use
1046 * the default bfa_cap_t values (and log a warning msg). 1038 * the default bfa_cap_t values (and log a warning msg).
1047 * 1039 *
1048 * @param[out] meminfo - pointer to bfa_meminfo_t. This content 1040 * @param[out] meminfo - pointer to bfa_meminfo_t. This content
1049 * indicates the memory type (see bfa_mem_type_t) and 1041 * indicates the memory type (see bfa_mem_type_t) and
1050 * amount of memory required. 1042 * amount of memory required.
1051 * 1043 *
1052 * Driver should allocate the memory, populate the 1044 * Driver should allocate the memory, populate the
1053 * starting address for each block and provide the same 1045 * starting address for each block and provide the same
1054 * structure as input parameter to bfa_attach() call. 1046 * structure as input parameter to bfa_attach() call.
1055 * 1047 *
1056 * @return void 1048 * @return void
1057 * 1049 *
1058 * Special Considerations: @note 1050 * Special Considerations: @note
1059 */ 1051 */
1060 void 1052 void
1061 bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo) 1053 bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo)
1062 { 1054 {
1063 int i; 1055 int i;
1064 u32 km_len = 0, dm_len = 0; 1056 u32 km_len = 0, dm_len = 0;
1065 1057
1066 WARN_ON((cfg == NULL) || (meminfo == NULL)); 1058 WARN_ON((cfg == NULL) || (meminfo == NULL));
1067 1059
1068 memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s)); 1060 memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s));
1069 meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_type = 1061 meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_type =
1070 BFA_MEM_TYPE_KVA; 1062 BFA_MEM_TYPE_KVA;
1071 meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_type = 1063 meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_type =
1072 BFA_MEM_TYPE_DMA; 1064 BFA_MEM_TYPE_DMA;
1073 1065
1074 bfa_iocfc_meminfo(cfg, &km_len, &dm_len); 1066 bfa_iocfc_meminfo(cfg, &km_len, &dm_len);
1075 1067
1076 for (i = 0; hal_mods[i]; i++) 1068 for (i = 0; hal_mods[i]; i++)
1077 hal_mods[i]->meminfo(cfg, &km_len, &dm_len); 1069 hal_mods[i]->meminfo(cfg, &km_len, &dm_len);
1078 1070
1079 dm_len += bfa_port_meminfo(); 1071 dm_len += bfa_port_meminfo();
1080 1072
1081 meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_len = km_len; 1073 meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_len = km_len;
1082 meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_len = dm_len; 1074 meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_len = dm_len;
1083 } 1075 }
1084 1076
1085 /* 1077 /*
1086 * Use this function to do attach the driver instance with the BFA 1078 * Use this function to do attach the driver instance with the BFA
1087 * library. This function will not trigger any HW initialization 1079 * library. This function will not trigger any HW initialization
1088 * process (which will be done in bfa_init() call) 1080 * process (which will be done in bfa_init() call)
1089 * 1081 *
1090 * This call will fail, if the cap is out of range compared to 1082 * This call will fail, if the cap is out of range compared to
1091 * pre-defined values within the BFA library 1083 * pre-defined values within the BFA library
1092 * 1084 *
1093 * @param[out] bfa Pointer to bfa_t. 1085 * @param[out] bfa Pointer to bfa_t.
1094 * @param[in] bfad Opaque handle back to the driver's IOC structure 1086 * @param[in] bfad Opaque handle back to the driver's IOC structure
1095 * @param[in] cfg Pointer to bfa_ioc_cfg_t. Should be same structure 1087 * @param[in] cfg Pointer to bfa_ioc_cfg_t. Should be same structure
1096 * that was used in bfa_cfg_get_meminfo(). 1088 * that was used in bfa_cfg_get_meminfo().
1097 * @param[in] meminfo Pointer to bfa_meminfo_t. The driver should 1089 * @param[in] meminfo Pointer to bfa_meminfo_t. The driver should
1098 * use the bfa_cfg_get_meminfo() call to 1090 * use the bfa_cfg_get_meminfo() call to
1099 * find the memory blocks required, allocate the 1091 * find the memory blocks required, allocate the
1100 * required memory and provide the starting addresses. 1092 * required memory and provide the starting addresses.
1101 * @param[in] pcidev pointer to struct bfa_pcidev_s 1093 * @param[in] pcidev pointer to struct bfa_pcidev_s
1102 * 1094 *
1103 * @return 1095 * @return
1104 * void 1096 * void
1105 * 1097 *
1106 * Special Considerations: 1098 * Special Considerations:
1107 * 1099 *
1108 * @note 1100 * @note
1109 * 1101 *
1110 */ 1102 */
1111 void 1103 void
1112 bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, 1104 bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1113 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) 1105 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
1114 { 1106 {
1115 int i; 1107 int i;
1116 struct bfa_mem_elem_s *melem; 1108 struct bfa_mem_elem_s *melem;
1117 1109
1118 bfa->fcs = BFA_FALSE; 1110 bfa->fcs = BFA_FALSE;
1119 1111
1120 WARN_ON((cfg == NULL) || (meminfo == NULL)); 1112 WARN_ON((cfg == NULL) || (meminfo == NULL));
1121 1113
1122 /* 1114 /*
1123 * initialize all memory pointers for iterative allocation 1115 * initialize all memory pointers for iterative allocation
1124 */ 1116 */
1125 for (i = 0; i < BFA_MEM_TYPE_MAX; i++) { 1117 for (i = 0; i < BFA_MEM_TYPE_MAX; i++) {
1126 melem = meminfo->meminfo + i; 1118 melem = meminfo->meminfo + i;
1127 melem->kva_curp = melem->kva; 1119 melem->kva_curp = melem->kva;
1128 melem->dma_curp = melem->dma; 1120 melem->dma_curp = melem->dma;
1129 } 1121 }
1130 1122
1131 bfa_iocfc_attach(bfa, bfad, cfg, meminfo, pcidev); 1123 bfa_iocfc_attach(bfa, bfad, cfg, meminfo, pcidev);
1132 1124
1133 for (i = 0; hal_mods[i]; i++) 1125 for (i = 0; hal_mods[i]; i++)
1134 hal_mods[i]->attach(bfa, bfad, cfg, meminfo, pcidev); 1126 hal_mods[i]->attach(bfa, bfad, cfg, meminfo, pcidev);
1135 1127
1136 bfa_com_port_attach(bfa, meminfo); 1128 bfa_com_port_attach(bfa, meminfo);
1137 } 1129 }
1138 1130
1139 /* 1131 /*
1140 * Use this function to delete a BFA IOC. IOC should be stopped (by 1132 * Use this function to delete a BFA IOC. IOC should be stopped (by
1141 * calling bfa_stop()) before this function call. 1133 * calling bfa_stop()) before this function call.
1142 * 1134 *
1143 * @param[in] bfa - pointer to bfa_t. 1135 * @param[in] bfa - pointer to bfa_t.
1144 * 1136 *
1145 * @return 1137 * @return
1146 * void 1138 * void
1147 * 1139 *
1148 * Special Considerations: 1140 * Special Considerations:
1149 * 1141 *
1150 * @note 1142 * @note
1151 */ 1143 */
1152 void 1144 void
1153 bfa_detach(struct bfa_s *bfa) 1145 bfa_detach(struct bfa_s *bfa)
1154 { 1146 {
1155 int i; 1147 int i;
1156 1148
1157 for (i = 0; hal_mods[i]; i++) 1149 for (i = 0; hal_mods[i]; i++)
1158 hal_mods[i]->detach(bfa); 1150 hal_mods[i]->detach(bfa);
1159 bfa_ioc_detach(&bfa->ioc); 1151 bfa_ioc_detach(&bfa->ioc);
1160 } 1152 }
1161 1153
1162 void 1154 void
1163 bfa_comp_deq(struct bfa_s *bfa, struct list_head *comp_q) 1155 bfa_comp_deq(struct bfa_s *bfa, struct list_head *comp_q)
1164 { 1156 {
1165 INIT_LIST_HEAD(comp_q); 1157 INIT_LIST_HEAD(comp_q);
1166 list_splice_tail_init(&bfa->comp_q, comp_q); 1158 list_splice_tail_init(&bfa->comp_q, comp_q);
1167 } 1159 }
1168 1160
1169 void 1161 void
1170 bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q) 1162 bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q)
1171 { 1163 {
1172 struct list_head *qe; 1164 struct list_head *qe;
1173 struct list_head *qen; 1165 struct list_head *qen;
1174 struct bfa_cb_qe_s *hcb_qe; 1166 struct bfa_cb_qe_s *hcb_qe;
1175 1167
1176 list_for_each_safe(qe, qen, comp_q) { 1168 list_for_each_safe(qe, qen, comp_q) {
1177 hcb_qe = (struct bfa_cb_qe_s *) qe; 1169 hcb_qe = (struct bfa_cb_qe_s *) qe;
1178 hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE); 1170 hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE);
1179 } 1171 }
1180 } 1172 }
1181 1173
1182 void 1174 void
1183 bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q) 1175 bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q)
1184 { 1176 {
1185 struct list_head *qe; 1177 struct list_head *qe;
1186 struct bfa_cb_qe_s *hcb_qe; 1178 struct bfa_cb_qe_s *hcb_qe;
1187 1179
1188 while (!list_empty(comp_q)) { 1180 while (!list_empty(comp_q)) {
1189 bfa_q_deq(comp_q, &qe); 1181 bfa_q_deq(comp_q, &qe);
1190 hcb_qe = (struct bfa_cb_qe_s *) qe; 1182 hcb_qe = (struct bfa_cb_qe_s *) qe;
1191 hcb_qe->cbfn(hcb_qe->cbarg, BFA_FALSE); 1183 hcb_qe->cbfn(hcb_qe->cbarg, BFA_FALSE);
1192 } 1184 }
1193 } 1185 }
1194 1186
1195 1187
1196 /* 1188 /*
1197 * Return the list of PCI vendor/device id lists supported by this 1189 * Return the list of PCI vendor/device id lists supported by this
1198 * BFA instance. 1190 * BFA instance.
1199 */ 1191 */
1200 void 1192 void
1201 bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids) 1193 bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids)
1202 { 1194 {
1203 static struct bfa_pciid_s __pciids[] = { 1195 static struct bfa_pciid_s __pciids[] = {
1204 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G2P}, 1196 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G2P},
1205 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G1P}, 1197 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G1P},
1206 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT}, 1198 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT},
1207 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT_FC}, 1199 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT_FC},
1208 }; 1200 };
1209 1201
1210 *npciids = sizeof(__pciids) / sizeof(__pciids[0]); 1202 *npciids = sizeof(__pciids) / sizeof(__pciids[0]);
1211 *pciids = __pciids; 1203 *pciids = __pciids;
1212 } 1204 }
1213 1205
1214 /* 1206 /*
1215 * Use this function query the default struct bfa_iocfc_cfg_s value (compiled 1207 * Use this function query the default struct bfa_iocfc_cfg_s value (compiled
1216 * into BFA layer). The OS driver can then turn back and overwrite entries that 1208 * into BFA layer). The OS driver can then turn back and overwrite entries that
1217 * have been configured by the user. 1209 * have been configured by the user.
1218 * 1210 *
1219 * @param[in] cfg - pointer to bfa_ioc_cfg_t 1211 * @param[in] cfg - pointer to bfa_ioc_cfg_t
1220 * 1212 *
1221 * @return 1213 * @return
1222 * void 1214 * void
1223 * 1215 *
1224 * Special Considerations: 1216 * Special Considerations:
1225 * note 1217 * note
1226 */ 1218 */
1227 void 1219 void
1228 bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg) 1220 bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg)
1229 { 1221 {
1230 cfg->fwcfg.num_fabrics = DEF_CFG_NUM_FABRICS; 1222 cfg->fwcfg.num_fabrics = DEF_CFG_NUM_FABRICS;
1231 cfg->fwcfg.num_lports = DEF_CFG_NUM_LPORTS; 1223 cfg->fwcfg.num_lports = DEF_CFG_NUM_LPORTS;
1232 cfg->fwcfg.num_rports = DEF_CFG_NUM_RPORTS; 1224 cfg->fwcfg.num_rports = DEF_CFG_NUM_RPORTS;
1233 cfg->fwcfg.num_ioim_reqs = DEF_CFG_NUM_IOIM_REQS; 1225 cfg->fwcfg.num_ioim_reqs = DEF_CFG_NUM_IOIM_REQS;
1234 cfg->fwcfg.num_tskim_reqs = DEF_CFG_NUM_TSKIM_REQS; 1226 cfg->fwcfg.num_tskim_reqs = DEF_CFG_NUM_TSKIM_REQS;
1235 cfg->fwcfg.num_fcxp_reqs = DEF_CFG_NUM_FCXP_REQS; 1227 cfg->fwcfg.num_fcxp_reqs = DEF_CFG_NUM_FCXP_REQS;
1236 cfg->fwcfg.num_uf_bufs = DEF_CFG_NUM_UF_BUFS; 1228 cfg->fwcfg.num_uf_bufs = DEF_CFG_NUM_UF_BUFS;
1237 cfg->fwcfg.num_cqs = DEF_CFG_NUM_CQS; 1229 cfg->fwcfg.num_cqs = DEF_CFG_NUM_CQS;
1238 1230
1239 cfg->drvcfg.num_reqq_elems = DEF_CFG_NUM_REQQ_ELEMS; 1231 cfg->drvcfg.num_reqq_elems = DEF_CFG_NUM_REQQ_ELEMS;
1240 cfg->drvcfg.num_rspq_elems = DEF_CFG_NUM_RSPQ_ELEMS; 1232 cfg->drvcfg.num_rspq_elems = DEF_CFG_NUM_RSPQ_ELEMS;
1241 cfg->drvcfg.num_sgpgs = DEF_CFG_NUM_SGPGS; 1233 cfg->drvcfg.num_sgpgs = DEF_CFG_NUM_SGPGS;
1242 cfg->drvcfg.num_sboot_tgts = DEF_CFG_NUM_SBOOT_TGTS; 1234 cfg->drvcfg.num_sboot_tgts = DEF_CFG_NUM_SBOOT_TGTS;
1243 cfg->drvcfg.num_sboot_luns = DEF_CFG_NUM_SBOOT_LUNS; 1235 cfg->drvcfg.num_sboot_luns = DEF_CFG_NUM_SBOOT_LUNS;
1244 cfg->drvcfg.path_tov = BFA_FCPIM_PATHTOV_DEF; 1236 cfg->drvcfg.path_tov = BFA_FCPIM_PATHTOV_DEF;
1245 cfg->drvcfg.ioc_recover = BFA_FALSE; 1237 cfg->drvcfg.ioc_recover = BFA_FALSE;
1246 cfg->drvcfg.delay_comp = BFA_FALSE; 1238 cfg->drvcfg.delay_comp = BFA_FALSE;
1247 1239
1248 } 1240 }
1249 1241
1250 void 1242 void
1251 bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg) 1243 bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg)
1252 { 1244 {
1253 bfa_cfg_get_default(cfg); 1245 bfa_cfg_get_default(cfg);
1254 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN; 1246 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
1255 cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN; 1247 cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN;
1256 cfg->fwcfg.num_fcxp_reqs = BFA_FCXP_MIN; 1248 cfg->fwcfg.num_fcxp_reqs = BFA_FCXP_MIN;
1257 cfg->fwcfg.num_uf_bufs = BFA_UF_MIN; 1249 cfg->fwcfg.num_uf_bufs = BFA_UF_MIN;
1258 cfg->fwcfg.num_rports = BFA_RPORT_MIN; 1250 cfg->fwcfg.num_rports = BFA_RPORT_MIN;
1259 1251
1260 cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN; 1252 cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
1261 cfg->drvcfg.num_reqq_elems = BFA_REQQ_NELEMS_MIN; 1253 cfg->drvcfg.num_reqq_elems = BFA_REQQ_NELEMS_MIN;
1262 cfg->drvcfg.num_rspq_elems = BFA_RSPQ_NELEMS_MIN; 1254 cfg->drvcfg.num_rspq_elems = BFA_RSPQ_NELEMS_MIN;
1263 cfg->drvcfg.min_cfg = BFA_TRUE; 1255 cfg->drvcfg.min_cfg = BFA_TRUE;
1264 } 1256 }
1265 1257
drivers/scsi/bfa/bfa_cs.h
1 /* 1 /*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved 3 * All rights reserved
4 * www.brocade.com 4 * www.brocade.com
5 * 5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter. 6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify it 8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as 9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation 10 * published by the Free Software Foundation
11 * 11 *
12 * This program is distributed in the hope that it will be useful, but 12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of 13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17 17
18 /* 18 /*
19 * bfa_cs.h BFA common services 19 * bfa_cs.h BFA common services
20 */ 20 */
21 21
22 #ifndef __BFA_CS_H__ 22 #ifndef __BFA_CS_H__
23 #define __BFA_CS_H__ 23 #define __BFA_CS_H__
24 24
25 #include "bfad_drv.h" 25 #include "bfad_drv.h"
26 26
27 /* 27 /*
28 * BFA TRC 28 * BFA TRC
29 */ 29 */
30 30
31 #ifndef BFA_TRC_MAX 31 #ifndef BFA_TRC_MAX
32 #define BFA_TRC_MAX (4 * 1024) 32 #define BFA_TRC_MAX (4 * 1024)
33 #endif 33 #endif
34 34
35 #define BFA_TRC_TS(_trcm) \ 35 #define BFA_TRC_TS(_trcm) \
36 ({ \ 36 ({ \
37 struct timeval tv; \ 37 struct timeval tv; \
38 \ 38 \
39 do_gettimeofday(&tv); \ 39 do_gettimeofday(&tv); \
40 (tv.tv_sec*1000000+tv.tv_usec); \ 40 (tv.tv_sec*1000000+tv.tv_usec); \
41 }) 41 })
42 42
43 #ifndef BFA_TRC_TS 43 #ifndef BFA_TRC_TS
44 #define BFA_TRC_TS(_trcm) ((_trcm)->ticks++) 44 #define BFA_TRC_TS(_trcm) ((_trcm)->ticks++)
45 #endif 45 #endif
46 46
47 struct bfa_trc_s { 47 struct bfa_trc_s {
48 #ifdef __BIG_ENDIAN 48 #ifdef __BIG_ENDIAN
49 u16 fileno; 49 u16 fileno;
50 u16 line; 50 u16 line;
51 #else 51 #else
52 u16 line; 52 u16 line;
53 u16 fileno; 53 u16 fileno;
54 #endif 54 #endif
55 u32 timestamp; 55 u32 timestamp;
56 union { 56 union {
57 struct { 57 struct {
58 u32 rsvd; 58 u32 rsvd;
59 u32 u32; 59 u32 u32;
60 } u32; 60 } u32;
61 u64 u64; 61 u64 u64;
62 } data; 62 } data;
63 }; 63 };
64 64
65 struct bfa_trc_mod_s { 65 struct bfa_trc_mod_s {
66 u32 head; 66 u32 head;
67 u32 tail; 67 u32 tail;
68 u32 ntrc; 68 u32 ntrc;
69 u32 stopped; 69 u32 stopped;
70 u32 ticks; 70 u32 ticks;
71 u32 rsvd[3]; 71 u32 rsvd[3];
72 struct bfa_trc_s trc[BFA_TRC_MAX]; 72 struct bfa_trc_s trc[BFA_TRC_MAX];
73 }; 73 };
74 74
75 enum { 75 enum {
76 BFA_TRC_HAL = 1, /* BFA modules */ 76 BFA_TRC_HAL = 1, /* BFA modules */
77 BFA_TRC_FCS = 2, /* BFA FCS modules */ 77 BFA_TRC_FCS = 2, /* BFA FCS modules */
78 BFA_TRC_LDRV = 3, /* Linux driver modules */ 78 BFA_TRC_LDRV = 3, /* Linux driver modules */
79 BFA_TRC_CNA = 4, /* Common modules */ 79 BFA_TRC_CNA = 4, /* Common modules */
80 }; 80 };
81 #define BFA_TRC_MOD_SH 10 81 #define BFA_TRC_MOD_SH 10
82 #define BFA_TRC_MOD(__mod) ((BFA_TRC_ ## __mod) << BFA_TRC_MOD_SH) 82 #define BFA_TRC_MOD(__mod) ((BFA_TRC_ ## __mod) << BFA_TRC_MOD_SH)
83 83
84 /* 84 /*
85 * Define a new tracing file (module). Module should match one defined above. 85 * Define a new tracing file (module). Module should match one defined above.
86 */ 86 */
87 #define BFA_TRC_FILE(__mod, __submod) \ 87 #define BFA_TRC_FILE(__mod, __submod) \
88 static int __trc_fileno = ((BFA_TRC_ ## __mod ## _ ## __submod) | \ 88 static int __trc_fileno = ((BFA_TRC_ ## __mod ## _ ## __submod) | \
89 BFA_TRC_MOD(__mod)) 89 BFA_TRC_MOD(__mod))
90 90
91 91
92 #define bfa_trc32(_trcp, _data) \ 92 #define bfa_trc32(_trcp, _data) \
93 __bfa_trc((_trcp)->trcmod, __trc_fileno, __LINE__, (u32)_data) 93 __bfa_trc((_trcp)->trcmod, __trc_fileno, __LINE__, (u32)_data)
94 #define bfa_trc(_trcp, _data) \ 94 #define bfa_trc(_trcp, _data) \
95 __bfa_trc((_trcp)->trcmod, __trc_fileno, __LINE__, (u64)_data) 95 __bfa_trc((_trcp)->trcmod, __trc_fileno, __LINE__, (u64)_data)
96 96
97 static inline void 97 static inline void
98 bfa_trc_init(struct bfa_trc_mod_s *trcm) 98 bfa_trc_init(struct bfa_trc_mod_s *trcm)
99 { 99 {
100 trcm->head = trcm->tail = trcm->stopped = 0; 100 trcm->head = trcm->tail = trcm->stopped = 0;
101 trcm->ntrc = BFA_TRC_MAX; 101 trcm->ntrc = BFA_TRC_MAX;
102 } 102 }
103 103
104 static inline void 104 static inline void
105 bfa_trc_stop(struct bfa_trc_mod_s *trcm) 105 bfa_trc_stop(struct bfa_trc_mod_s *trcm)
106 { 106 {
107 trcm->stopped = 1; 107 trcm->stopped = 1;
108 } 108 }
109 109
110 #ifdef FWTRC
111 extern void dc_flush(void *data);
112 #else
113 #define dc_flush(data)
114 #endif
115
116
117 static inline void 110 static inline void
118 __bfa_trc(struct bfa_trc_mod_s *trcm, int fileno, int line, u64 data) 111 __bfa_trc(struct bfa_trc_mod_s *trcm, int fileno, int line, u64 data)
119 { 112 {
120 int tail = trcm->tail; 113 int tail = trcm->tail;
121 struct bfa_trc_s *trc = &trcm->trc[tail]; 114 struct bfa_trc_s *trc = &trcm->trc[tail];
122 115
123 if (trcm->stopped) 116 if (trcm->stopped)
124 return; 117 return;
125 118
126 trc->fileno = (u16) fileno; 119 trc->fileno = (u16) fileno;
127 trc->line = (u16) line; 120 trc->line = (u16) line;
128 trc->data.u64 = data; 121 trc->data.u64 = data;
129 trc->timestamp = BFA_TRC_TS(trcm); 122 trc->timestamp = BFA_TRC_TS(trcm);
130 dc_flush(trc);
131 123
132 trcm->tail = (trcm->tail + 1) & (BFA_TRC_MAX - 1); 124 trcm->tail = (trcm->tail + 1) & (BFA_TRC_MAX - 1);
133 if (trcm->tail == trcm->head) 125 if (trcm->tail == trcm->head)
134 trcm->head = (trcm->head + 1) & (BFA_TRC_MAX - 1); 126 trcm->head = (trcm->head + 1) & (BFA_TRC_MAX - 1);
135 dc_flush(trcm);
136 } 127 }
137 128
138 129
139 static inline void 130 static inline void
140 __bfa_trc32(struct bfa_trc_mod_s *trcm, int fileno, int line, u32 data) 131 __bfa_trc32(struct bfa_trc_mod_s *trcm, int fileno, int line, u32 data)
141 { 132 {
142 int tail = trcm->tail; 133 int tail = trcm->tail;
143 struct bfa_trc_s *trc = &trcm->trc[tail]; 134 struct bfa_trc_s *trc = &trcm->trc[tail];
144 135
145 if (trcm->stopped) 136 if (trcm->stopped)
146 return; 137 return;
147 138
148 trc->fileno = (u16) fileno; 139 trc->fileno = (u16) fileno;
149 trc->line = (u16) line; 140 trc->line = (u16) line;
150 trc->data.u32.u32 = data; 141 trc->data.u32.u32 = data;
151 trc->timestamp = BFA_TRC_TS(trcm); 142 trc->timestamp = BFA_TRC_TS(trcm);
152 dc_flush(trc);
153 143
154 trcm->tail = (trcm->tail + 1) & (BFA_TRC_MAX - 1); 144 trcm->tail = (trcm->tail + 1) & (BFA_TRC_MAX - 1);
155 if (trcm->tail == trcm->head) 145 if (trcm->tail == trcm->head)
156 trcm->head = (trcm->head + 1) & (BFA_TRC_MAX - 1); 146 trcm->head = (trcm->head + 1) & (BFA_TRC_MAX - 1);
157 dc_flush(trcm);
158 } 147 }
159 148
160 #ifndef BFA_PERF_BUILD
161 #define bfa_trc_fp(_trcp, _data) bfa_trc(_trcp, _data)
162 #else
163 #define bfa_trc_fp(_trcp, _data)
164 #endif
165
166 #define bfa_sm_fault(__mod, __event) do { \ 149 #define bfa_sm_fault(__mod, __event) do { \
167 bfa_trc(__mod, (((u32)0xDEAD << 16) | __event)); \ 150 bfa_trc(__mod, (((u32)0xDEAD << 16) | __event)); \
168 printk(KERN_ERR "Assertion failure: %s:%d: %d", \ 151 printk(KERN_ERR "Assertion failure: %s:%d: %d", \
169 __FILE__, __LINE__, (__event)); \ 152 __FILE__, __LINE__, (__event)); \
170 } while (0) 153 } while (0)
171 154
172 #ifndef BFA_PERF_BUILD
173 #define bfa_assert_fp(__cond) bfa_assert(__cond)
174 #else
175 #define bfa_assert_fp(__cond)
176 #endif
177
178 /* BFA queue definitions */ 155 /* BFA queue definitions */
179 #define bfa_q_first(_q) ((void *)(((struct list_head *) (_q))->next)) 156 #define bfa_q_first(_q) ((void *)(((struct list_head *) (_q))->next))
180 #define bfa_q_next(_qe) (((struct list_head *) (_qe))->next) 157 #define bfa_q_next(_qe) (((struct list_head *) (_qe))->next)
181 #define bfa_q_prev(_qe) (((struct list_head *) (_qe))->prev) 158 #define bfa_q_prev(_qe) (((struct list_head *) (_qe))->prev)
182 159
183 /* 160 /*
184 * bfa_q_qe_init - to initialize a queue element 161 * bfa_q_qe_init - to initialize a queue element
185 */ 162 */
186 #define bfa_q_qe_init(_qe) { \ 163 #define bfa_q_qe_init(_qe) { \
187 bfa_q_next(_qe) = (struct list_head *) NULL; \ 164 bfa_q_next(_qe) = (struct list_head *) NULL; \
188 bfa_q_prev(_qe) = (struct list_head *) NULL; \ 165 bfa_q_prev(_qe) = (struct list_head *) NULL; \
189 } 166 }
190 167
191 /* 168 /*
192 * bfa_q_deq - dequeue an element from head of the queue 169 * bfa_q_deq - dequeue an element from head of the queue
193 */ 170 */
194 #define bfa_q_deq(_q, _qe) { \ 171 #define bfa_q_deq(_q, _qe) { \
195 if (!list_empty(_q)) { \ 172 if (!list_empty(_q)) { \
196 (*((struct list_head **) (_qe))) = bfa_q_next(_q); \ 173 (*((struct list_head **) (_qe))) = bfa_q_next(_q); \
197 bfa_q_prev(bfa_q_next(*((struct list_head **) _qe))) = \ 174 bfa_q_prev(bfa_q_next(*((struct list_head **) _qe))) = \
198 (struct list_head *) (_q); \ 175 (struct list_head *) (_q); \
199 bfa_q_next(_q) = bfa_q_next(*((struct list_head **) _qe));\ 176 bfa_q_next(_q) = bfa_q_next(*((struct list_head **) _qe));\
200 BFA_Q_DBG_INIT(*((struct list_head **) _qe)); \
201 } else { \ 177 } else { \
202 *((struct list_head **) (_qe)) = (struct list_head *) NULL;\ 178 *((struct list_head **) (_qe)) = (struct list_head *) NULL;\
203 } \ 179 } \
204 } 180 }
205 181
206 /* 182 /*
207 * bfa_q_deq_tail - dequeue an element from tail of the queue 183 * bfa_q_deq_tail - dequeue an element from tail of the queue
208 */ 184 */
209 #define bfa_q_deq_tail(_q, _qe) { \ 185 #define bfa_q_deq_tail(_q, _qe) { \
210 if (!list_empty(_q)) { \ 186 if (!list_empty(_q)) { \
211 *((struct list_head **) (_qe)) = bfa_q_prev(_q); \ 187 *((struct list_head **) (_qe)) = bfa_q_prev(_q); \
212 bfa_q_next(bfa_q_prev(*((struct list_head **) _qe))) = \ 188 bfa_q_next(bfa_q_prev(*((struct list_head **) _qe))) = \
213 (struct list_head *) (_q); \ 189 (struct list_head *) (_q); \
214 bfa_q_prev(_q) = bfa_q_prev(*(struct list_head **) _qe);\ 190 bfa_q_prev(_q) = bfa_q_prev(*(struct list_head **) _qe);\
215 BFA_Q_DBG_INIT(*((struct list_head **) _qe)); \
216 } else { \ 191 } else { \
217 *((struct list_head **) (_qe)) = (struct list_head *) NULL;\ 192 *((struct list_head **) (_qe)) = (struct list_head *) NULL;\
218 } \ 193 } \
219 } 194 }
220 195
221 static inline int 196 static inline int
222 bfa_q_is_on_q_func(struct list_head *q, struct list_head *qe) 197 bfa_q_is_on_q_func(struct list_head *q, struct list_head *qe)
223 { 198 {
224 struct list_head *tqe; 199 struct list_head *tqe;
225 200
226 tqe = bfa_q_next(q); 201 tqe = bfa_q_next(q);
227 while (tqe != q) { 202 while (tqe != q) {
228 if (tqe == qe) 203 if (tqe == qe)
229 return 1; 204 return 1;
230 tqe = bfa_q_next(tqe); 205 tqe = bfa_q_next(tqe);
231 if (tqe == NULL) 206 if (tqe == NULL)
232 break; 207 break;
233 } 208 }
234 return 0; 209 return 0;
235 } 210 }
236
237 /*
238 * #ifdef BFA_DEBUG (Using bfa_assert to check for debug_build is not
239 * consistent across modules)
240 */
241 #ifndef BFA_PERF_BUILD
242 #define BFA_Q_DBG_INIT(_qe) bfa_q_qe_init(_qe)
243 #else
244 #define BFA_Q_DBG_INIT(_qe)
245 #endif
246 211
247 #define bfa_q_is_on_q(_q, _qe) \ 212 #define bfa_q_is_on_q(_q, _qe) \
248 bfa_q_is_on_q_func(_q, (struct list_head *)(_qe)) 213 bfa_q_is_on_q_func(_q, (struct list_head *)(_qe))
249 214
250 /* 215 /*
251 * @ BFA state machine interfaces 216 * @ BFA state machine interfaces
252 */ 217 */
253 218
254 typedef void (*bfa_sm_t)(void *sm, int event); 219 typedef void (*bfa_sm_t)(void *sm, int event);
255 220
256 /* 221 /*
257 * oc - object class eg. bfa_ioc 222 * oc - object class eg. bfa_ioc
258 * st - state, eg. reset 223 * st - state, eg. reset
259 * otype - object type, eg. struct bfa_ioc_s 224 * otype - object type, eg. struct bfa_ioc_s
260 * etype - object type, eg. enum ioc_event 225 * etype - object type, eg. enum ioc_event
261 */ 226 */
262 #define bfa_sm_state_decl(oc, st, otype, etype) \ 227 #define bfa_sm_state_decl(oc, st, otype, etype) \
263 static void oc ## _sm_ ## st(otype * fsm, etype event) 228 static void oc ## _sm_ ## st(otype * fsm, etype event)
264 229
265 #define bfa_sm_set_state(_sm, _state) ((_sm)->sm = (bfa_sm_t)(_state)) 230 #define bfa_sm_set_state(_sm, _state) ((_sm)->sm = (bfa_sm_t)(_state))
266 #define bfa_sm_send_event(_sm, _event) ((_sm)->sm((_sm), (_event))) 231 #define bfa_sm_send_event(_sm, _event) ((_sm)->sm((_sm), (_event)))
267 #define bfa_sm_get_state(_sm) ((_sm)->sm) 232 #define bfa_sm_get_state(_sm) ((_sm)->sm)
268 #define bfa_sm_cmp_state(_sm, _state) ((_sm)->sm == (bfa_sm_t)(_state)) 233 #define bfa_sm_cmp_state(_sm, _state) ((_sm)->sm == (bfa_sm_t)(_state))
269 234
270 /* 235 /*
271 * For converting from state machine function to state encoding. 236 * For converting from state machine function to state encoding.
272 */ 237 */
273 struct bfa_sm_table_s { 238 struct bfa_sm_table_s {
274 bfa_sm_t sm; /* state machine function */ 239 bfa_sm_t sm; /* state machine function */
275 int state; /* state machine encoding */ 240 int state; /* state machine encoding */
276 char *name; /* state name for display */ 241 char *name; /* state name for display */
277 }; 242 };
278 #define BFA_SM(_sm) ((bfa_sm_t)(_sm)) 243 #define BFA_SM(_sm) ((bfa_sm_t)(_sm))
279 244
280 /* 245 /*
281 * State machine with entry actions. 246 * State machine with entry actions.
282 */ 247 */
283 typedef void (*bfa_fsm_t)(void *fsm, int event); 248 typedef void (*bfa_fsm_t)(void *fsm, int event);
284 249
285 /* 250 /*
286 * oc - object class eg. bfa_ioc 251 * oc - object class eg. bfa_ioc
287 * st - state, eg. reset 252 * st - state, eg. reset
288 * otype - object type, eg. struct bfa_ioc_s 253 * otype - object type, eg. struct bfa_ioc_s
289 * etype - object type, eg. enum ioc_event 254 * etype - object type, eg. enum ioc_event
290 */ 255 */
291 #define bfa_fsm_state_decl(oc, st, otype, etype) \ 256 #define bfa_fsm_state_decl(oc, st, otype, etype) \
292 static void oc ## _sm_ ## st(otype * fsm, etype event); \ 257 static void oc ## _sm_ ## st(otype * fsm, etype event); \
293 static void oc ## _sm_ ## st ## _entry(otype * fsm) 258 static void oc ## _sm_ ## st ## _entry(otype * fsm)
294 259
295 #define bfa_fsm_set_state(_fsm, _state) do { \ 260 #define bfa_fsm_set_state(_fsm, _state) do { \
296 (_fsm)->fsm = (bfa_fsm_t)(_state); \ 261 (_fsm)->fsm = (bfa_fsm_t)(_state); \
297 _state ## _entry(_fsm); \ 262 _state ## _entry(_fsm); \
298 } while (0) 263 } while (0)
299 264
300 #define bfa_fsm_send_event(_fsm, _event) ((_fsm)->fsm((_fsm), (_event))) 265 #define bfa_fsm_send_event(_fsm, _event) ((_fsm)->fsm((_fsm), (_event)))
301 #define bfa_fsm_get_state(_fsm) ((_fsm)->fsm) 266 #define bfa_fsm_get_state(_fsm) ((_fsm)->fsm)
302 #define bfa_fsm_cmp_state(_fsm, _state) \ 267 #define bfa_fsm_cmp_state(_fsm, _state) \
303 ((_fsm)->fsm == (bfa_fsm_t)(_state)) 268 ((_fsm)->fsm == (bfa_fsm_t)(_state))
304 269
305 static inline int 270 static inline int
306 bfa_sm_to_state(struct bfa_sm_table_s *smt, bfa_sm_t sm) 271 bfa_sm_to_state(struct bfa_sm_table_s *smt, bfa_sm_t sm)
307 { 272 {
308 int i = 0; 273 int i = 0;
309 274
310 while (smt[i].sm && smt[i].sm != sm) 275 while (smt[i].sm && smt[i].sm != sm)
311 i++; 276 i++;
312 return smt[i].state; 277 return smt[i].state;
313 } 278 }
314 279
315 /* 280 /*
316 * @ Generic wait counter. 281 * @ Generic wait counter.
317 */ 282 */
318 283
319 typedef void (*bfa_wc_resume_t) (void *cbarg); 284 typedef void (*bfa_wc_resume_t) (void *cbarg);
320 285
321 struct bfa_wc_s { 286 struct bfa_wc_s {
322 bfa_wc_resume_t wc_resume; 287 bfa_wc_resume_t wc_resume;
323 void *wc_cbarg; 288 void *wc_cbarg;
324 int wc_count; 289 int wc_count;
325 }; 290 };
326 291
327 static inline void 292 static inline void
328 bfa_wc_up(struct bfa_wc_s *wc) 293 bfa_wc_up(struct bfa_wc_s *wc)
329 { 294 {
330 wc->wc_count++; 295 wc->wc_count++;
331 } 296 }
332 297
333 static inline void 298 static inline void
334 bfa_wc_down(struct bfa_wc_s *wc) 299 bfa_wc_down(struct bfa_wc_s *wc)
335 { 300 {
336 wc->wc_count--; 301 wc->wc_count--;
337 if (wc->wc_count == 0) 302 if (wc->wc_count == 0)
338 wc->wc_resume(wc->wc_cbarg); 303 wc->wc_resume(wc->wc_cbarg);
339 } 304 }
340 305
341 /* 306 /*
342 * Initialize a waiting counter. 307 * Initialize a waiting counter.
343 */ 308 */
344 static inline void 309 static inline void
345 bfa_wc_init(struct bfa_wc_s *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg) 310 bfa_wc_init(struct bfa_wc_s *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg)
346 { 311 {
347 wc->wc_resume = wc_resume; 312 wc->wc_resume = wc_resume;
348 wc->wc_cbarg = wc_cbarg; 313 wc->wc_cbarg = wc_cbarg;
349 wc->wc_count = 0; 314 wc->wc_count = 0;
350 bfa_wc_up(wc); 315 bfa_wc_up(wc);
351 } 316 }
352 317
353 /* 318 /*
354 * Wait for counter to reach zero 319 * Wait for counter to reach zero
355 */ 320 */
356 static inline void 321 static inline void
357 bfa_wc_wait(struct bfa_wc_s *wc) 322 bfa_wc_wait(struct bfa_wc_s *wc)
358 { 323 {
359 bfa_wc_down(wc); 324 bfa_wc_down(wc);
360 } 325 }
361 326
362 static inline void 327 static inline void
363 wwn2str(char *wwn_str, u64 wwn) 328 wwn2str(char *wwn_str, u64 wwn)
364 { 329 {
365 union { 330 union {
366 u64 wwn; 331 u64 wwn;
367 u8 byte[8]; 332 u8 byte[8];
368 } w; 333 } w;
369 334
370 w.wwn = wwn; 335 w.wwn = wwn;
371 sprintf(wwn_str, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x", w.byte[0], 336 sprintf(wwn_str, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x", w.byte[0],
372 w.byte[1], w.byte[2], w.byte[3], w.byte[4], w.byte[5], 337 w.byte[1], w.byte[2], w.byte[3], w.byte[4], w.byte[5],
373 w.byte[6], w.byte[7]); 338 w.byte[6], w.byte[7]);
374 } 339 }
375 340
376 static inline void 341 static inline void
377 fcid2str(char *fcid_str, u32 fcid) 342 fcid2str(char *fcid_str, u32 fcid)
378 { 343 {
379 union { 344 union {
380 u32 fcid; 345 u32 fcid;
381 u8 byte[4]; 346 u8 byte[4];
382 } f; 347 } f;
383 348
384 f.fcid = fcid; 349 f.fcid = fcid;
385 sprintf(fcid_str, "%02x:%02x:%02x", f.byte[1], f.byte[2], f.byte[3]); 350 sprintf(fcid_str, "%02x:%02x:%02x", f.byte[1], f.byte[2], f.byte[3]);
386 } 351 }
387 352
388 #define bfa_swap_3b(_x) \ 353 #define bfa_swap_3b(_x) \
389 ((((_x) & 0xff) << 16) | \ 354 ((((_x) & 0xff) << 16) | \
390 ((_x) & 0x00ff00) | \ 355 ((_x) & 0x00ff00) | \
391 (((_x) & 0xff0000) >> 16)) 356 (((_x) & 0xff0000) >> 16))
392 357
393 #ifndef __BIG_ENDIAN 358 #ifndef __BIG_ENDIAN
394 #define bfa_hton3b(_x) bfa_swap_3b(_x) 359 #define bfa_hton3b(_x) bfa_swap_3b(_x)
395 #else 360 #else
396 #define bfa_hton3b(_x) (_x) 361 #define bfa_hton3b(_x) (_x)
397 #endif 362 #endif
398 363
399 #define bfa_ntoh3b(_x) bfa_hton3b(_x) 364 #define bfa_ntoh3b(_x) bfa_hton3b(_x)
400 365
401 #endif /* __BFA_CS_H__ */ 366 #endif /* __BFA_CS_H__ */
402 367
drivers/scsi/bfa/bfa_fcpim.c
1 /* 1 /*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved 3 * All rights reserved
4 * www.brocade.com 4 * www.brocade.com
5 * 5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter. 6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify it 8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as 9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation 10 * published by the Free Software Foundation
11 * 11 *
12 * This program is distributed in the hope that it will be useful, but 12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of 13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17 17
18 #include "bfad_drv.h" 18 #include "bfad_drv.h"
19 #include "bfa_modules.h" 19 #include "bfa_modules.h"
20 20
21 BFA_TRC_FILE(HAL, FCPIM); 21 BFA_TRC_FILE(HAL, FCPIM);
22 BFA_MODULE(fcpim); 22 BFA_MODULE(fcpim);
23 23
24 /* 24 /*
25 * BFA ITNIM Related definitions 25 * BFA ITNIM Related definitions
26 */ 26 */
27 static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim); 27 static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
28 28
29 #define BFA_ITNIM_FROM_TAG(_fcpim, _tag) \ 29 #define BFA_ITNIM_FROM_TAG(_fcpim, _tag) \
30 (((_fcpim)->itnim_arr + ((_tag) & ((_fcpim)->num_itnims - 1)))) 30 (((_fcpim)->itnim_arr + ((_tag) & ((_fcpim)->num_itnims - 1))))
31 31
32 #define bfa_fcpim_additn(__itnim) \ 32 #define bfa_fcpim_additn(__itnim) \
33 list_add_tail(&(__itnim)->qe, &(__itnim)->fcpim->itnim_q) 33 list_add_tail(&(__itnim)->qe, &(__itnim)->fcpim->itnim_q)
34 #define bfa_fcpim_delitn(__itnim) do { \ 34 #define bfa_fcpim_delitn(__itnim) do { \
35 WARN_ON(!bfa_q_is_on_q(&(__itnim)->fcpim->itnim_q, __itnim)); \ 35 WARN_ON(!bfa_q_is_on_q(&(__itnim)->fcpim->itnim_q, __itnim)); \
36 bfa_itnim_update_del_itn_stats(__itnim); \ 36 bfa_itnim_update_del_itn_stats(__itnim); \
37 list_del(&(__itnim)->qe); \ 37 list_del(&(__itnim)->qe); \
38 WARN_ON(!list_empty(&(__itnim)->io_q)); \ 38 WARN_ON(!list_empty(&(__itnim)->io_q)); \
39 WARN_ON(!list_empty(&(__itnim)->io_cleanup_q)); \ 39 WARN_ON(!list_empty(&(__itnim)->io_cleanup_q)); \
40 WARN_ON(!list_empty(&(__itnim)->pending_q)); \ 40 WARN_ON(!list_empty(&(__itnim)->pending_q)); \
41 } while (0) 41 } while (0)
42 42
43 #define bfa_itnim_online_cb(__itnim) do { \ 43 #define bfa_itnim_online_cb(__itnim) do { \
44 if ((__itnim)->bfa->fcs) \ 44 if ((__itnim)->bfa->fcs) \
45 bfa_cb_itnim_online((__itnim)->ditn); \ 45 bfa_cb_itnim_online((__itnim)->ditn); \
46 else { \ 46 else { \
47 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \ 47 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
48 __bfa_cb_itnim_online, (__itnim)); \ 48 __bfa_cb_itnim_online, (__itnim)); \
49 } \ 49 } \
50 } while (0) 50 } while (0)
51 51
52 #define bfa_itnim_offline_cb(__itnim) do { \ 52 #define bfa_itnim_offline_cb(__itnim) do { \
53 if ((__itnim)->bfa->fcs) \ 53 if ((__itnim)->bfa->fcs) \
54 bfa_cb_itnim_offline((__itnim)->ditn); \ 54 bfa_cb_itnim_offline((__itnim)->ditn); \
55 else { \ 55 else { \
56 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \ 56 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
57 __bfa_cb_itnim_offline, (__itnim)); \ 57 __bfa_cb_itnim_offline, (__itnim)); \
58 } \ 58 } \
59 } while (0) 59 } while (0)
60 60
61 #define bfa_itnim_sler_cb(__itnim) do { \ 61 #define bfa_itnim_sler_cb(__itnim) do { \
62 if ((__itnim)->bfa->fcs) \ 62 if ((__itnim)->bfa->fcs) \
63 bfa_cb_itnim_sler((__itnim)->ditn); \ 63 bfa_cb_itnim_sler((__itnim)->ditn); \
64 else { \ 64 else { \
65 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \ 65 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
66 __bfa_cb_itnim_sler, (__itnim)); \ 66 __bfa_cb_itnim_sler, (__itnim)); \
67 } \ 67 } \
68 } while (0) 68 } while (0)
69 69
70 /* 70 /*
71 * itnim state machine event 71 * itnim state machine event
72 */ 72 */
73 enum bfa_itnim_event { 73 enum bfa_itnim_event {
74 BFA_ITNIM_SM_CREATE = 1, /* itnim is created */ 74 BFA_ITNIM_SM_CREATE = 1, /* itnim is created */
75 BFA_ITNIM_SM_ONLINE = 2, /* itnim is online */ 75 BFA_ITNIM_SM_ONLINE = 2, /* itnim is online */
76 BFA_ITNIM_SM_OFFLINE = 3, /* itnim is offline */ 76 BFA_ITNIM_SM_OFFLINE = 3, /* itnim is offline */
77 BFA_ITNIM_SM_FWRSP = 4, /* firmware response */ 77 BFA_ITNIM_SM_FWRSP = 4, /* firmware response */
78 BFA_ITNIM_SM_DELETE = 5, /* deleting an existing itnim */ 78 BFA_ITNIM_SM_DELETE = 5, /* deleting an existing itnim */
79 BFA_ITNIM_SM_CLEANUP = 6, /* IO cleanup completion */ 79 BFA_ITNIM_SM_CLEANUP = 6, /* IO cleanup completion */
80 BFA_ITNIM_SM_SLER = 7, /* second level error recovery */ 80 BFA_ITNIM_SM_SLER = 7, /* second level error recovery */
81 BFA_ITNIM_SM_HWFAIL = 8, /* IOC h/w failure event */ 81 BFA_ITNIM_SM_HWFAIL = 8, /* IOC h/w failure event */
82 BFA_ITNIM_SM_QRESUME = 9, /* queue space available */ 82 BFA_ITNIM_SM_QRESUME = 9, /* queue space available */
83 }; 83 };
84 84
85 /* 85 /*
86 * BFA IOIM related definitions 86 * BFA IOIM related definitions
87 */ 87 */
88 #define bfa_ioim_move_to_comp_q(__ioim) do { \ 88 #define bfa_ioim_move_to_comp_q(__ioim) do { \
89 list_del(&(__ioim)->qe); \ 89 list_del(&(__ioim)->qe); \
90 list_add_tail(&(__ioim)->qe, &(__ioim)->fcpim->ioim_comp_q); \ 90 list_add_tail(&(__ioim)->qe, &(__ioim)->fcpim->ioim_comp_q); \
91 } while (0) 91 } while (0)
92 92
93 93
94 #define bfa_ioim_cb_profile_comp(__fcpim, __ioim) do { \ 94 #define bfa_ioim_cb_profile_comp(__fcpim, __ioim) do { \
95 if ((__fcpim)->profile_comp) \ 95 if ((__fcpim)->profile_comp) \
96 (__fcpim)->profile_comp(__ioim); \ 96 (__fcpim)->profile_comp(__ioim); \
97 } while (0) 97 } while (0)
98 98
99 #define bfa_ioim_cb_profile_start(__fcpim, __ioim) do { \ 99 #define bfa_ioim_cb_profile_start(__fcpim, __ioim) do { \
100 if ((__fcpim)->profile_start) \ 100 if ((__fcpim)->profile_start) \
101 (__fcpim)->profile_start(__ioim); \ 101 (__fcpim)->profile_start(__ioim); \
102 } while (0) 102 } while (0)
103 103
104 /* 104 /*
105 * IO state machine events 105 * IO state machine events
106 */ 106 */
107 enum bfa_ioim_event { 107 enum bfa_ioim_event {
108 BFA_IOIM_SM_START = 1, /* io start request from host */ 108 BFA_IOIM_SM_START = 1, /* io start request from host */
109 BFA_IOIM_SM_COMP_GOOD = 2, /* io good comp, resource free */ 109 BFA_IOIM_SM_COMP_GOOD = 2, /* io good comp, resource free */
110 BFA_IOIM_SM_COMP = 3, /* io comp, resource is free */ 110 BFA_IOIM_SM_COMP = 3, /* io comp, resource is free */
111 BFA_IOIM_SM_COMP_UTAG = 4, /* io comp, resource is free */ 111 BFA_IOIM_SM_COMP_UTAG = 4, /* io comp, resource is free */
112 BFA_IOIM_SM_DONE = 5, /* io comp, resource not free */ 112 BFA_IOIM_SM_DONE = 5, /* io comp, resource not free */
113 BFA_IOIM_SM_FREE = 6, /* io resource is freed */ 113 BFA_IOIM_SM_FREE = 6, /* io resource is freed */
114 BFA_IOIM_SM_ABORT = 7, /* abort request from scsi stack */ 114 BFA_IOIM_SM_ABORT = 7, /* abort request from scsi stack */
115 BFA_IOIM_SM_ABORT_COMP = 8, /* abort from f/w */ 115 BFA_IOIM_SM_ABORT_COMP = 8, /* abort from f/w */
116 BFA_IOIM_SM_ABORT_DONE = 9, /* abort completion from f/w */ 116 BFA_IOIM_SM_ABORT_DONE = 9, /* abort completion from f/w */
117 BFA_IOIM_SM_QRESUME = 10, /* CQ space available to queue IO */ 117 BFA_IOIM_SM_QRESUME = 10, /* CQ space available to queue IO */
118 BFA_IOIM_SM_SGALLOCED = 11, /* SG page allocation successful */ 118 BFA_IOIM_SM_SGALLOCED = 11, /* SG page allocation successful */
119 BFA_IOIM_SM_SQRETRY = 12, /* sequence recovery retry */ 119 BFA_IOIM_SM_SQRETRY = 12, /* sequence recovery retry */
120 BFA_IOIM_SM_HCB = 13, /* bfa callback complete */ 120 BFA_IOIM_SM_HCB = 13, /* bfa callback complete */
121 BFA_IOIM_SM_CLEANUP = 14, /* IO cleanup from itnim */ 121 BFA_IOIM_SM_CLEANUP = 14, /* IO cleanup from itnim */
122 BFA_IOIM_SM_TMSTART = 15, /* IO cleanup from tskim */ 122 BFA_IOIM_SM_TMSTART = 15, /* IO cleanup from tskim */
123 BFA_IOIM_SM_TMDONE = 16, /* IO cleanup from tskim */ 123 BFA_IOIM_SM_TMDONE = 16, /* IO cleanup from tskim */
124 BFA_IOIM_SM_HWFAIL = 17, /* IOC h/w failure event */ 124 BFA_IOIM_SM_HWFAIL = 17, /* IOC h/w failure event */
125 BFA_IOIM_SM_IOTOV = 18, /* ITN offline TOV */ 125 BFA_IOIM_SM_IOTOV = 18, /* ITN offline TOV */
126 }; 126 };
127 127
128 128
129 /* 129 /*
130 * BFA TSKIM related definitions 130 * BFA TSKIM related definitions
131 */ 131 */
132 132
133 /* 133 /*
134 * task management completion handling 134 * task management completion handling
135 */ 135 */
136 #define bfa_tskim_qcomp(__tskim, __cbfn) do { \ 136 #define bfa_tskim_qcomp(__tskim, __cbfn) do { \
137 bfa_cb_queue((__tskim)->bfa, &(__tskim)->hcb_qe, __cbfn, (__tskim));\ 137 bfa_cb_queue((__tskim)->bfa, &(__tskim)->hcb_qe, __cbfn, (__tskim));\
138 bfa_tskim_notify_comp(__tskim); \ 138 bfa_tskim_notify_comp(__tskim); \
139 } while (0) 139 } while (0)
140 140
141 #define bfa_tskim_notify_comp(__tskim) do { \ 141 #define bfa_tskim_notify_comp(__tskim) do { \
142 if ((__tskim)->notify) \ 142 if ((__tskim)->notify) \
143 bfa_itnim_tskdone((__tskim)->itnim); \ 143 bfa_itnim_tskdone((__tskim)->itnim); \
144 } while (0) 144 } while (0)
145 145
146 146
147 enum bfa_tskim_event { 147 enum bfa_tskim_event {
148 BFA_TSKIM_SM_START = 1, /* TM command start */ 148 BFA_TSKIM_SM_START = 1, /* TM command start */
149 BFA_TSKIM_SM_DONE = 2, /* TM completion */ 149 BFA_TSKIM_SM_DONE = 2, /* TM completion */
150 BFA_TSKIM_SM_QRESUME = 3, /* resume after qfull */ 150 BFA_TSKIM_SM_QRESUME = 3, /* resume after qfull */
151 BFA_TSKIM_SM_HWFAIL = 5, /* IOC h/w failure event */ 151 BFA_TSKIM_SM_HWFAIL = 5, /* IOC h/w failure event */
152 BFA_TSKIM_SM_HCB = 6, /* BFA callback completion */ 152 BFA_TSKIM_SM_HCB = 6, /* BFA callback completion */
153 BFA_TSKIM_SM_IOS_DONE = 7, /* IO and sub TM completions */ 153 BFA_TSKIM_SM_IOS_DONE = 7, /* IO and sub TM completions */
154 BFA_TSKIM_SM_CLEANUP = 8, /* TM cleanup on ITN offline */ 154 BFA_TSKIM_SM_CLEANUP = 8, /* TM cleanup on ITN offline */
155 BFA_TSKIM_SM_CLEANUP_DONE = 9, /* TM abort completion */ 155 BFA_TSKIM_SM_CLEANUP_DONE = 9, /* TM abort completion */
156 }; 156 };
157 157
158 /* 158 /*
159 * forward declaration for BFA ITNIM functions 159 * forward declaration for BFA ITNIM functions
160 */ 160 */
161 static void bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim); 161 static void bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim);
162 static bfa_boolean_t bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim); 162 static bfa_boolean_t bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim);
163 static bfa_boolean_t bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim); 163 static bfa_boolean_t bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim);
164 static void bfa_itnim_cleanp_comp(void *itnim_cbarg); 164 static void bfa_itnim_cleanp_comp(void *itnim_cbarg);
165 static void bfa_itnim_cleanup(struct bfa_itnim_s *itnim); 165 static void bfa_itnim_cleanup(struct bfa_itnim_s *itnim);
166 static void __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete); 166 static void __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete);
167 static void __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete); 167 static void __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete);
168 static void __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete); 168 static void __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete);
169 static void bfa_itnim_iotov_online(struct bfa_itnim_s *itnim); 169 static void bfa_itnim_iotov_online(struct bfa_itnim_s *itnim);
170 static void bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim); 170 static void bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim);
171 static void bfa_itnim_iotov(void *itnim_arg); 171 static void bfa_itnim_iotov(void *itnim_arg);
172 static void bfa_itnim_iotov_start(struct bfa_itnim_s *itnim); 172 static void bfa_itnim_iotov_start(struct bfa_itnim_s *itnim);
173 static void bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim); 173 static void bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim);
174 static void bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim); 174 static void bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim);
175 175
176 /* 176 /*
177 * forward declaration of ITNIM state machine 177 * forward declaration of ITNIM state machine
178 */ 178 */
179 static void bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, 179 static void bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim,
180 enum bfa_itnim_event event); 180 enum bfa_itnim_event event);
181 static void bfa_itnim_sm_created(struct bfa_itnim_s *itnim, 181 static void bfa_itnim_sm_created(struct bfa_itnim_s *itnim,
182 enum bfa_itnim_event event); 182 enum bfa_itnim_event event);
183 static void bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim, 183 static void bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim,
184 enum bfa_itnim_event event); 184 enum bfa_itnim_event event);
185 static void bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim, 185 static void bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
186 enum bfa_itnim_event event); 186 enum bfa_itnim_event event);
187 static void bfa_itnim_sm_online(struct bfa_itnim_s *itnim, 187 static void bfa_itnim_sm_online(struct bfa_itnim_s *itnim,
188 enum bfa_itnim_event event); 188 enum bfa_itnim_event event);
189 static void bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, 189 static void bfa_itnim_sm_sler(struct bfa_itnim_s *itnim,
190 enum bfa_itnim_event event); 190 enum bfa_itnim_event event);
191 static void bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim, 191 static void bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
192 enum bfa_itnim_event event); 192 enum bfa_itnim_event event);
193 static void bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim, 193 static void bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
194 enum bfa_itnim_event event); 194 enum bfa_itnim_event event);
195 static void bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim, 195 static void bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim,
196 enum bfa_itnim_event event); 196 enum bfa_itnim_event event);
197 static void bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, 197 static void bfa_itnim_sm_offline(struct bfa_itnim_s *itnim,
198 enum bfa_itnim_event event); 198 enum bfa_itnim_event event);
199 static void bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim, 199 static void bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
200 enum bfa_itnim_event event); 200 enum bfa_itnim_event event);
201 static void bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, 201 static void bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim,
202 enum bfa_itnim_event event); 202 enum bfa_itnim_event event);
203 static void bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim, 203 static void bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
204 enum bfa_itnim_event event); 204 enum bfa_itnim_event event);
205 static void bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim, 205 static void bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
206 enum bfa_itnim_event event); 206 enum bfa_itnim_event event);
207 static void bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim, 207 static void bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
208 enum bfa_itnim_event event); 208 enum bfa_itnim_event event);
209 209
210 /* 210 /*
211 * forward declaration for BFA IOIM functions 211 * forward declaration for BFA IOIM functions
212 */ 212 */
213 static bfa_boolean_t bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim); 213 static bfa_boolean_t bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim);
214 static bfa_boolean_t bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim); 214 static bfa_boolean_t bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim);
215 static bfa_boolean_t bfa_ioim_send_abort(struct bfa_ioim_s *ioim); 215 static bfa_boolean_t bfa_ioim_send_abort(struct bfa_ioim_s *ioim);
216 static void bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim); 216 static void bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim);
217 static void __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete); 217 static void __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete);
218 static void __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete); 218 static void __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete);
219 static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete); 219 static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete);
220 static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete); 220 static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
221 static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete); 221 static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
222 static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim); 222 static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim);
223 223
224 /* 224 /*
225 * forward declaration of BFA IO state machine 225 * forward declaration of BFA IO state machine
226 */ 226 */
227 static void bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, 227 static void bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim,
228 enum bfa_ioim_event event); 228 enum bfa_ioim_event event);
229 static void bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, 229 static void bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim,
230 enum bfa_ioim_event event); 230 enum bfa_ioim_event event);
231 static void bfa_ioim_sm_active(struct bfa_ioim_s *ioim, 231 static void bfa_ioim_sm_active(struct bfa_ioim_s *ioim,
232 enum bfa_ioim_event event); 232 enum bfa_ioim_event event);
233 static void bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, 233 static void bfa_ioim_sm_abort(struct bfa_ioim_s *ioim,
234 enum bfa_ioim_event event); 234 enum bfa_ioim_event event);
235 static void bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, 235 static void bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim,
236 enum bfa_ioim_event event); 236 enum bfa_ioim_event event);
237 static void bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, 237 static void bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim,
238 enum bfa_ioim_event event); 238 enum bfa_ioim_event event);
239 static void bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, 239 static void bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim,
240 enum bfa_ioim_event event); 240 enum bfa_ioim_event event);
241 static void bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, 241 static void bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim,
242 enum bfa_ioim_event event); 242 enum bfa_ioim_event event);
243 static void bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, 243 static void bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim,
244 enum bfa_ioim_event event); 244 enum bfa_ioim_event event);
245 static void bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, 245 static void bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim,
246 enum bfa_ioim_event event); 246 enum bfa_ioim_event event);
247 static void bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, 247 static void bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim,
248 enum bfa_ioim_event event); 248 enum bfa_ioim_event event);
249 static void bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, 249 static void bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim,
250 enum bfa_ioim_event event); 250 enum bfa_ioim_event event);
251 /* 251 /*
252 * forward declaration for BFA TSKIM functions 252 * forward declaration for BFA TSKIM functions
253 */ 253 */
254 static void __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete); 254 static void __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete);
255 static void __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete); 255 static void __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete);
256 static bfa_boolean_t bfa_tskim_match_scope(struct bfa_tskim_s *tskim, 256 static bfa_boolean_t bfa_tskim_match_scope(struct bfa_tskim_s *tskim,
257 struct scsi_lun lun); 257 struct scsi_lun lun);
258 static void bfa_tskim_gather_ios(struct bfa_tskim_s *tskim); 258 static void bfa_tskim_gather_ios(struct bfa_tskim_s *tskim);
259 static void bfa_tskim_cleanp_comp(void *tskim_cbarg); 259 static void bfa_tskim_cleanp_comp(void *tskim_cbarg);
260 static void bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim); 260 static void bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim);
261 static bfa_boolean_t bfa_tskim_send(struct bfa_tskim_s *tskim); 261 static bfa_boolean_t bfa_tskim_send(struct bfa_tskim_s *tskim);
262 static bfa_boolean_t bfa_tskim_send_abort(struct bfa_tskim_s *tskim); 262 static bfa_boolean_t bfa_tskim_send_abort(struct bfa_tskim_s *tskim);
263 static void bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim); 263 static void bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim);
264 264
265 /* 265 /*
266 * forward declaration of BFA TSKIM state machine 266 * forward declaration of BFA TSKIM state machine
267 */ 267 */
268 static void bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, 268 static void bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim,
269 enum bfa_tskim_event event); 269 enum bfa_tskim_event event);
270 static void bfa_tskim_sm_active(struct bfa_tskim_s *tskim, 270 static void bfa_tskim_sm_active(struct bfa_tskim_s *tskim,
271 enum bfa_tskim_event event); 271 enum bfa_tskim_event event);
272 static void bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, 272 static void bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim,
273 enum bfa_tskim_event event); 273 enum bfa_tskim_event event);
274 static void bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, 274 static void bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim,
275 enum bfa_tskim_event event); 275 enum bfa_tskim_event event);
276 static void bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, 276 static void bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim,
277 enum bfa_tskim_event event); 277 enum bfa_tskim_event event);
278 static void bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim, 278 static void bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
279 enum bfa_tskim_event event); 279 enum bfa_tskim_event event);
280 static void bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, 280 static void bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim,
281 enum bfa_tskim_event event); 281 enum bfa_tskim_event event);
282 /* 282 /*
283 * BFA FCP Initiator Mode module 283 * BFA FCP Initiator Mode module
284 */ 284 */
285 285
286 /* 286 /*
287 * Compute and return memory needed by FCP(im) module. 287 * Compute and return memory needed by FCP(im) module.
288 */ 288 */
289 static void 289 static void
290 bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len, 290 bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
291 u32 *dm_len) 291 u32 *dm_len)
292 { 292 {
293 bfa_itnim_meminfo(cfg, km_len, dm_len); 293 bfa_itnim_meminfo(cfg, km_len, dm_len);
294 294
295 /* 295 /*
296 * IO memory 296 * IO memory
297 */ 297 */
298 if (cfg->fwcfg.num_ioim_reqs < BFA_IOIM_MIN) 298 if (cfg->fwcfg.num_ioim_reqs < BFA_IOIM_MIN)
299 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN; 299 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
300 else if (cfg->fwcfg.num_ioim_reqs > BFA_IOIM_MAX) 300 else if (cfg->fwcfg.num_ioim_reqs > BFA_IOIM_MAX)
301 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX; 301 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
302 302
303 *km_len += cfg->fwcfg.num_ioim_reqs * 303 *km_len += cfg->fwcfg.num_ioim_reqs *
304 (sizeof(struct bfa_ioim_s) + sizeof(struct bfa_ioim_sp_s)); 304 (sizeof(struct bfa_ioim_s) + sizeof(struct bfa_ioim_sp_s));
305 305
306 *dm_len += cfg->fwcfg.num_ioim_reqs * BFI_IOIM_SNSLEN; 306 *dm_len += cfg->fwcfg.num_ioim_reqs * BFI_IOIM_SNSLEN;
307 307
308 /* 308 /*
309 * task management command memory 309 * task management command memory
310 */ 310 */
311 if (cfg->fwcfg.num_tskim_reqs < BFA_TSKIM_MIN) 311 if (cfg->fwcfg.num_tskim_reqs < BFA_TSKIM_MIN)
312 cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN; 312 cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN;
313 *km_len += cfg->fwcfg.num_tskim_reqs * sizeof(struct bfa_tskim_s); 313 *km_len += cfg->fwcfg.num_tskim_reqs * sizeof(struct bfa_tskim_s);
314 } 314 }
315 315
316 316
317 static void 317 static void
318 bfa_fcpim_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, 318 bfa_fcpim_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
319 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) 319 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
320 { 320 {
321 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); 321 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
322 322
323 bfa_trc(bfa, cfg->drvcfg.path_tov); 323 bfa_trc(bfa, cfg->drvcfg.path_tov);
324 bfa_trc(bfa, cfg->fwcfg.num_rports); 324 bfa_trc(bfa, cfg->fwcfg.num_rports);
325 bfa_trc(bfa, cfg->fwcfg.num_ioim_reqs); 325 bfa_trc(bfa, cfg->fwcfg.num_ioim_reqs);
326 bfa_trc(bfa, cfg->fwcfg.num_tskim_reqs); 326 bfa_trc(bfa, cfg->fwcfg.num_tskim_reqs);
327 327
328 fcpim->bfa = bfa; 328 fcpim->bfa = bfa;
329 fcpim->num_itnims = cfg->fwcfg.num_rports; 329 fcpim->num_itnims = cfg->fwcfg.num_rports;
330 fcpim->num_ioim_reqs = cfg->fwcfg.num_ioim_reqs; 330 fcpim->num_ioim_reqs = cfg->fwcfg.num_ioim_reqs;
331 fcpim->num_tskim_reqs = cfg->fwcfg.num_tskim_reqs; 331 fcpim->num_tskim_reqs = cfg->fwcfg.num_tskim_reqs;
332 fcpim->path_tov = cfg->drvcfg.path_tov; 332 fcpim->path_tov = cfg->drvcfg.path_tov;
333 fcpim->delay_comp = cfg->drvcfg.delay_comp; 333 fcpim->delay_comp = cfg->drvcfg.delay_comp;
334 fcpim->profile_comp = NULL; 334 fcpim->profile_comp = NULL;
335 fcpim->profile_start = NULL; 335 fcpim->profile_start = NULL;
336 336
337 bfa_itnim_attach(fcpim, meminfo); 337 bfa_itnim_attach(fcpim, meminfo);
338 bfa_tskim_attach(fcpim, meminfo); 338 bfa_tskim_attach(fcpim, meminfo);
339 bfa_ioim_attach(fcpim, meminfo); 339 bfa_ioim_attach(fcpim, meminfo);
340 } 340 }
341 341
342 static void 342 static void
343 bfa_fcpim_detach(struct bfa_s *bfa) 343 bfa_fcpim_detach(struct bfa_s *bfa)
344 { 344 {
345 } 345 }
346 346
347 static void 347 static void
348 bfa_fcpim_start(struct bfa_s *bfa) 348 bfa_fcpim_start(struct bfa_s *bfa)
349 { 349 {
350 } 350 }
351 351
352 static void 352 static void
353 bfa_fcpim_stop(struct bfa_s *bfa) 353 bfa_fcpim_stop(struct bfa_s *bfa)
354 { 354 {
355 } 355 }
356 356
357 static void 357 static void
358 bfa_fcpim_iocdisable(struct bfa_s *bfa) 358 bfa_fcpim_iocdisable(struct bfa_s *bfa)
359 { 359 {
360 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); 360 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
361 struct bfa_itnim_s *itnim; 361 struct bfa_itnim_s *itnim;
362 struct list_head *qe, *qen; 362 struct list_head *qe, *qen;
363 363
364 list_for_each_safe(qe, qen, &fcpim->itnim_q) { 364 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
365 itnim = (struct bfa_itnim_s *) qe; 365 itnim = (struct bfa_itnim_s *) qe;
366 bfa_itnim_iocdisable(itnim); 366 bfa_itnim_iocdisable(itnim);
367 } 367 }
368 } 368 }
369 369
370 void 370 void
371 bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov) 371 bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov)
372 { 372 {
373 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); 373 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
374 374
375 fcpim->path_tov = path_tov * 1000; 375 fcpim->path_tov = path_tov * 1000;
376 if (fcpim->path_tov > BFA_FCPIM_PATHTOV_MAX) 376 if (fcpim->path_tov > BFA_FCPIM_PATHTOV_MAX)
377 fcpim->path_tov = BFA_FCPIM_PATHTOV_MAX; 377 fcpim->path_tov = BFA_FCPIM_PATHTOV_MAX;
378 } 378 }
379 379
380 u16 380 u16
381 bfa_fcpim_path_tov_get(struct bfa_s *bfa) 381 bfa_fcpim_path_tov_get(struct bfa_s *bfa)
382 { 382 {
383 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); 383 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
384 384
385 return fcpim->path_tov / 1000; 385 return fcpim->path_tov / 1000;
386 } 386 }
387 387
388 u16 388 u16
389 bfa_fcpim_qdepth_get(struct bfa_s *bfa) 389 bfa_fcpim_qdepth_get(struct bfa_s *bfa)
390 { 390 {
391 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); 391 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
392 392
393 return fcpim->q_depth; 393 return fcpim->q_depth;
394 } 394 }
395 395
396 /* 396 /*
397 * BFA ITNIM module state machine functions 397 * BFA ITNIM module state machine functions
398 */ 398 */
399 399
400 /* 400 /*
401 * Beginning/unallocated state - no events expected. 401 * Beginning/unallocated state - no events expected.
402 */ 402 */
403 static void 403 static void
404 bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) 404 bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
405 { 405 {
406 bfa_trc(itnim->bfa, itnim->rport->rport_tag); 406 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
407 bfa_trc(itnim->bfa, event); 407 bfa_trc(itnim->bfa, event);
408 408
409 switch (event) { 409 switch (event) {
410 case BFA_ITNIM_SM_CREATE: 410 case BFA_ITNIM_SM_CREATE:
411 bfa_sm_set_state(itnim, bfa_itnim_sm_created); 411 bfa_sm_set_state(itnim, bfa_itnim_sm_created);
412 itnim->is_online = BFA_FALSE; 412 itnim->is_online = BFA_FALSE;
413 bfa_fcpim_additn(itnim); 413 bfa_fcpim_additn(itnim);
414 break; 414 break;
415 415
416 default: 416 default:
417 bfa_sm_fault(itnim->bfa, event); 417 bfa_sm_fault(itnim->bfa, event);
418 } 418 }
419 } 419 }
420 420
421 /* 421 /*
422 * Beginning state, only online event expected. 422 * Beginning state, only online event expected.
423 */ 423 */
424 static void 424 static void
425 bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) 425 bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
426 { 426 {
427 bfa_trc(itnim->bfa, itnim->rport->rport_tag); 427 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
428 bfa_trc(itnim->bfa, event); 428 bfa_trc(itnim->bfa, event);
429 429
430 switch (event) { 430 switch (event) {
431 case BFA_ITNIM_SM_ONLINE: 431 case BFA_ITNIM_SM_ONLINE:
432 if (bfa_itnim_send_fwcreate(itnim)) 432 if (bfa_itnim_send_fwcreate(itnim))
433 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate); 433 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
434 else 434 else
435 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull); 435 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
436 break; 436 break;
437 437
438 case BFA_ITNIM_SM_DELETE: 438 case BFA_ITNIM_SM_DELETE:
439 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit); 439 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
440 bfa_fcpim_delitn(itnim); 440 bfa_fcpim_delitn(itnim);
441 break; 441 break;
442 442
443 case BFA_ITNIM_SM_HWFAIL: 443 case BFA_ITNIM_SM_HWFAIL:
444 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); 444 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
445 break; 445 break;
446 446
447 default: 447 default:
448 bfa_sm_fault(itnim->bfa, event); 448 bfa_sm_fault(itnim->bfa, event);
449 } 449 }
450 } 450 }
451 451
452 /* 452 /*
453 * Waiting for itnim create response from firmware. 453 * Waiting for itnim create response from firmware.
454 */ 454 */
455 static void 455 static void
456 bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) 456 bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
457 { 457 {
458 bfa_trc(itnim->bfa, itnim->rport->rport_tag); 458 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
459 bfa_trc(itnim->bfa, event); 459 bfa_trc(itnim->bfa, event);
460 460
461 switch (event) { 461 switch (event) {
462 case BFA_ITNIM_SM_FWRSP: 462 case BFA_ITNIM_SM_FWRSP:
463 bfa_sm_set_state(itnim, bfa_itnim_sm_online); 463 bfa_sm_set_state(itnim, bfa_itnim_sm_online);
464 itnim->is_online = BFA_TRUE; 464 itnim->is_online = BFA_TRUE;
465 bfa_itnim_iotov_online(itnim); 465 bfa_itnim_iotov_online(itnim);
466 bfa_itnim_online_cb(itnim); 466 bfa_itnim_online_cb(itnim);
467 break; 467 break;
468 468
469 case BFA_ITNIM_SM_DELETE: 469 case BFA_ITNIM_SM_DELETE:
470 bfa_sm_set_state(itnim, bfa_itnim_sm_delete_pending); 470 bfa_sm_set_state(itnim, bfa_itnim_sm_delete_pending);
471 break; 471 break;
472 472
473 case BFA_ITNIM_SM_OFFLINE: 473 case BFA_ITNIM_SM_OFFLINE:
474 if (bfa_itnim_send_fwdelete(itnim)) 474 if (bfa_itnim_send_fwdelete(itnim))
475 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete); 475 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
476 else 476 else
477 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull); 477 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
478 break; 478 break;
479 479
480 case BFA_ITNIM_SM_HWFAIL: 480 case BFA_ITNIM_SM_HWFAIL:
481 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); 481 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
482 break; 482 break;
483 483
484 default: 484 default:
485 bfa_sm_fault(itnim->bfa, event); 485 bfa_sm_fault(itnim->bfa, event);
486 } 486 }
487 } 487 }
488 488
489 static void 489 static void
490 bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim, 490 bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
491 enum bfa_itnim_event event) 491 enum bfa_itnim_event event)
492 { 492 {
493 bfa_trc(itnim->bfa, itnim->rport->rport_tag); 493 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
494 bfa_trc(itnim->bfa, event); 494 bfa_trc(itnim->bfa, event);
495 495
496 switch (event) { 496 switch (event) {
497 case BFA_ITNIM_SM_QRESUME: 497 case BFA_ITNIM_SM_QRESUME:
498 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate); 498 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
499 bfa_itnim_send_fwcreate(itnim); 499 bfa_itnim_send_fwcreate(itnim);
500 break; 500 break;
501 501
502 case BFA_ITNIM_SM_DELETE: 502 case BFA_ITNIM_SM_DELETE:
503 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit); 503 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
504 bfa_reqq_wcancel(&itnim->reqq_wait); 504 bfa_reqq_wcancel(&itnim->reqq_wait);
505 bfa_fcpim_delitn(itnim); 505 bfa_fcpim_delitn(itnim);
506 break; 506 break;
507 507
508 case BFA_ITNIM_SM_OFFLINE: 508 case BFA_ITNIM_SM_OFFLINE:
509 bfa_sm_set_state(itnim, bfa_itnim_sm_offline); 509 bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
510 bfa_reqq_wcancel(&itnim->reqq_wait); 510 bfa_reqq_wcancel(&itnim->reqq_wait);
511 bfa_itnim_offline_cb(itnim); 511 bfa_itnim_offline_cb(itnim);
512 break; 512 break;
513 513
514 case BFA_ITNIM_SM_HWFAIL: 514 case BFA_ITNIM_SM_HWFAIL:
515 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); 515 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
516 bfa_reqq_wcancel(&itnim->reqq_wait); 516 bfa_reqq_wcancel(&itnim->reqq_wait);
517 break; 517 break;
518 518
519 default: 519 default:
520 bfa_sm_fault(itnim->bfa, event); 520 bfa_sm_fault(itnim->bfa, event);
521 } 521 }
522 } 522 }
523 523
524 /* 524 /*
525 * Waiting for itnim create response from firmware, a delete is pending. 525 * Waiting for itnim create response from firmware, a delete is pending.
526 */ 526 */
527 static void 527 static void
528 bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim, 528 bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
529 enum bfa_itnim_event event) 529 enum bfa_itnim_event event)
530 { 530 {
531 bfa_trc(itnim->bfa, itnim->rport->rport_tag); 531 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
532 bfa_trc(itnim->bfa, event); 532 bfa_trc(itnim->bfa, event);
533 533
534 switch (event) { 534 switch (event) {
535 case BFA_ITNIM_SM_FWRSP: 535 case BFA_ITNIM_SM_FWRSP:
536 if (bfa_itnim_send_fwdelete(itnim)) 536 if (bfa_itnim_send_fwdelete(itnim))
537 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting); 537 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
538 else 538 else
539 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull); 539 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
540 break; 540 break;
541 541
542 case BFA_ITNIM_SM_HWFAIL: 542 case BFA_ITNIM_SM_HWFAIL:
543 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit); 543 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
544 bfa_fcpim_delitn(itnim); 544 bfa_fcpim_delitn(itnim);
545 break; 545 break;
546 546
547 default: 547 default:
548 bfa_sm_fault(itnim->bfa, event); 548 bfa_sm_fault(itnim->bfa, event);
549 } 549 }
550 } 550 }
551 551
552 /* 552 /*
553 * Online state - normal parking state. 553 * Online state - normal parking state.
554 */ 554 */
555 static void 555 static void
556 bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) 556 bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
557 { 557 {
558 bfa_trc(itnim->bfa, itnim->rport->rport_tag); 558 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
559 bfa_trc(itnim->bfa, event); 559 bfa_trc(itnim->bfa, event);
560 560
561 switch (event) { 561 switch (event) {
562 case BFA_ITNIM_SM_OFFLINE: 562 case BFA_ITNIM_SM_OFFLINE:
563 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline); 563 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
564 itnim->is_online = BFA_FALSE; 564 itnim->is_online = BFA_FALSE;
565 bfa_itnim_iotov_start(itnim); 565 bfa_itnim_iotov_start(itnim);
566 bfa_itnim_cleanup(itnim); 566 bfa_itnim_cleanup(itnim);
567 break; 567 break;
568 568
569 case BFA_ITNIM_SM_DELETE: 569 case BFA_ITNIM_SM_DELETE:
570 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete); 570 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
571 itnim->is_online = BFA_FALSE; 571 itnim->is_online = BFA_FALSE;
572 bfa_itnim_cleanup(itnim); 572 bfa_itnim_cleanup(itnim);
573 break; 573 break;
574 574
575 case BFA_ITNIM_SM_SLER: 575 case BFA_ITNIM_SM_SLER:
576 bfa_sm_set_state(itnim, bfa_itnim_sm_sler); 576 bfa_sm_set_state(itnim, bfa_itnim_sm_sler);
577 itnim->is_online = BFA_FALSE; 577 itnim->is_online = BFA_FALSE;
578 bfa_itnim_iotov_start(itnim); 578 bfa_itnim_iotov_start(itnim);
579 bfa_itnim_sler_cb(itnim); 579 bfa_itnim_sler_cb(itnim);
580 break; 580 break;
581 581
582 case BFA_ITNIM_SM_HWFAIL: 582 case BFA_ITNIM_SM_HWFAIL:
583 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); 583 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
584 itnim->is_online = BFA_FALSE; 584 itnim->is_online = BFA_FALSE;
585 bfa_itnim_iotov_start(itnim); 585 bfa_itnim_iotov_start(itnim);
586 bfa_itnim_iocdisable_cleanup(itnim); 586 bfa_itnim_iocdisable_cleanup(itnim);
587 break; 587 break;
588 588
589 default: 589 default:
590 bfa_sm_fault(itnim->bfa, event); 590 bfa_sm_fault(itnim->bfa, event);
591 } 591 }
592 } 592 }
593 593
594 /* 594 /*
595 * Second level error recovery need. 595 * Second level error recovery need.
596 */ 596 */
597 static void 597 static void
598 bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) 598 bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
599 { 599 {
600 bfa_trc(itnim->bfa, itnim->rport->rport_tag); 600 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
601 bfa_trc(itnim->bfa, event); 601 bfa_trc(itnim->bfa, event);
602 602
603 switch (event) { 603 switch (event) {
604 case BFA_ITNIM_SM_OFFLINE: 604 case BFA_ITNIM_SM_OFFLINE:
605 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline); 605 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
606 bfa_itnim_cleanup(itnim); 606 bfa_itnim_cleanup(itnim);
607 break; 607 break;
608 608
609 case BFA_ITNIM_SM_DELETE: 609 case BFA_ITNIM_SM_DELETE:
610 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete); 610 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
611 bfa_itnim_cleanup(itnim); 611 bfa_itnim_cleanup(itnim);
612 bfa_itnim_iotov_delete(itnim); 612 bfa_itnim_iotov_delete(itnim);
613 break; 613 break;
614 614
615 case BFA_ITNIM_SM_HWFAIL: 615 case BFA_ITNIM_SM_HWFAIL:
616 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); 616 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
617 bfa_itnim_iocdisable_cleanup(itnim); 617 bfa_itnim_iocdisable_cleanup(itnim);
618 break; 618 break;
619 619
620 default: 620 default:
621 bfa_sm_fault(itnim->bfa, event); 621 bfa_sm_fault(itnim->bfa, event);
622 } 622 }
623 } 623 }
624 624
625 /* 625 /*
626 * Going offline. Waiting for active IO cleanup. 626 * Going offline. Waiting for active IO cleanup.
627 */ 627 */
628 static void 628 static void
629 bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim, 629 bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
630 enum bfa_itnim_event event) 630 enum bfa_itnim_event event)
631 { 631 {
632 bfa_trc(itnim->bfa, itnim->rport->rport_tag); 632 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
633 bfa_trc(itnim->bfa, event); 633 bfa_trc(itnim->bfa, event);
634 634
635 switch (event) { 635 switch (event) {
636 case BFA_ITNIM_SM_CLEANUP: 636 case BFA_ITNIM_SM_CLEANUP:
637 if (bfa_itnim_send_fwdelete(itnim)) 637 if (bfa_itnim_send_fwdelete(itnim))
638 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete); 638 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
639 else 639 else
640 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull); 640 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
641 break; 641 break;
642 642
643 case BFA_ITNIM_SM_DELETE: 643 case BFA_ITNIM_SM_DELETE:
644 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete); 644 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
645 bfa_itnim_iotov_delete(itnim); 645 bfa_itnim_iotov_delete(itnim);
646 break; 646 break;
647 647
648 case BFA_ITNIM_SM_HWFAIL: 648 case BFA_ITNIM_SM_HWFAIL:
649 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); 649 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
650 bfa_itnim_iocdisable_cleanup(itnim); 650 bfa_itnim_iocdisable_cleanup(itnim);
651 bfa_itnim_offline_cb(itnim); 651 bfa_itnim_offline_cb(itnim);
652 break; 652 break;
653 653
654 case BFA_ITNIM_SM_SLER: 654 case BFA_ITNIM_SM_SLER:
655 break; 655 break;
656 656
657 default: 657 default:
658 bfa_sm_fault(itnim->bfa, event); 658 bfa_sm_fault(itnim->bfa, event);
659 } 659 }
660 } 660 }
661 661
662 /* 662 /*
663 * Deleting itnim. Waiting for active IO cleanup. 663 * Deleting itnim. Waiting for active IO cleanup.
664 */ 664 */
665 static void 665 static void
666 bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim, 666 bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
667 enum bfa_itnim_event event) 667 enum bfa_itnim_event event)
668 { 668 {
669 bfa_trc(itnim->bfa, itnim->rport->rport_tag); 669 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
670 bfa_trc(itnim->bfa, event); 670 bfa_trc(itnim->bfa, event);
671 671
672 switch (event) { 672 switch (event) {
673 case BFA_ITNIM_SM_CLEANUP: 673 case BFA_ITNIM_SM_CLEANUP:
674 if (bfa_itnim_send_fwdelete(itnim)) 674 if (bfa_itnim_send_fwdelete(itnim))
675 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting); 675 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
676 else 676 else
677 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull); 677 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
678 break; 678 break;
679 679
680 case BFA_ITNIM_SM_HWFAIL: 680 case BFA_ITNIM_SM_HWFAIL:
681 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); 681 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
682 bfa_itnim_iocdisable_cleanup(itnim); 682 bfa_itnim_iocdisable_cleanup(itnim);
683 break; 683 break;
684 684
685 default: 685 default:
686 bfa_sm_fault(itnim->bfa, event); 686 bfa_sm_fault(itnim->bfa, event);
687 } 687 }
688 } 688 }
689 689
690 /* 690 /*
691 * Rport offline. Fimrware itnim is being deleted - awaiting f/w response. 691 * Rport offline. Fimrware itnim is being deleted - awaiting f/w response.
692 */ 692 */
693 static void 693 static void
694 bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) 694 bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
695 { 695 {
696 bfa_trc(itnim->bfa, itnim->rport->rport_tag); 696 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
697 bfa_trc(itnim->bfa, event); 697 bfa_trc(itnim->bfa, event);
698 698
699 switch (event) { 699 switch (event) {
700 case BFA_ITNIM_SM_FWRSP: 700 case BFA_ITNIM_SM_FWRSP:
701 bfa_sm_set_state(itnim, bfa_itnim_sm_offline); 701 bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
702 bfa_itnim_offline_cb(itnim); 702 bfa_itnim_offline_cb(itnim);
703 break; 703 break;
704 704
705 case BFA_ITNIM_SM_DELETE: 705 case BFA_ITNIM_SM_DELETE:
706 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting); 706 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
707 break; 707 break;
708 708
709 case BFA_ITNIM_SM_HWFAIL: 709 case BFA_ITNIM_SM_HWFAIL:
710 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); 710 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
711 bfa_itnim_offline_cb(itnim); 711 bfa_itnim_offline_cb(itnim);
712 break; 712 break;
713 713
714 default: 714 default:
715 bfa_sm_fault(itnim->bfa, event); 715 bfa_sm_fault(itnim->bfa, event);
716 } 716 }
717 } 717 }
718 718
719 static void 719 static void
720 bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim, 720 bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
721 enum bfa_itnim_event event) 721 enum bfa_itnim_event event)
722 { 722 {
723 bfa_trc(itnim->bfa, itnim->rport->rport_tag); 723 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
724 bfa_trc(itnim->bfa, event); 724 bfa_trc(itnim->bfa, event);
725 725
726 switch (event) { 726 switch (event) {
727 case BFA_ITNIM_SM_QRESUME: 727 case BFA_ITNIM_SM_QRESUME:
728 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete); 728 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
729 bfa_itnim_send_fwdelete(itnim); 729 bfa_itnim_send_fwdelete(itnim);
730 break; 730 break;
731 731
732 case BFA_ITNIM_SM_DELETE: 732 case BFA_ITNIM_SM_DELETE:
733 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull); 733 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
734 break; 734 break;
735 735
736 case BFA_ITNIM_SM_HWFAIL: 736 case BFA_ITNIM_SM_HWFAIL:
737 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); 737 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
738 bfa_reqq_wcancel(&itnim->reqq_wait); 738 bfa_reqq_wcancel(&itnim->reqq_wait);
739 bfa_itnim_offline_cb(itnim); 739 bfa_itnim_offline_cb(itnim);
740 break; 740 break;
741 741
742 default: 742 default:
743 bfa_sm_fault(itnim->bfa, event); 743 bfa_sm_fault(itnim->bfa, event);
744 } 744 }
745 } 745 }
746 746
747 /* 747 /*
748 * Offline state. 748 * Offline state.
749 */ 749 */
750 static void 750 static void
751 bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) 751 bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
752 { 752 {
753 bfa_trc(itnim->bfa, itnim->rport->rport_tag); 753 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
754 bfa_trc(itnim->bfa, event); 754 bfa_trc(itnim->bfa, event);
755 755
756 switch (event) { 756 switch (event) {
757 case BFA_ITNIM_SM_DELETE: 757 case BFA_ITNIM_SM_DELETE:
758 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit); 758 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
759 bfa_itnim_iotov_delete(itnim); 759 bfa_itnim_iotov_delete(itnim);
760 bfa_fcpim_delitn(itnim); 760 bfa_fcpim_delitn(itnim);
761 break; 761 break;
762 762
763 case BFA_ITNIM_SM_ONLINE: 763 case BFA_ITNIM_SM_ONLINE:
764 if (bfa_itnim_send_fwcreate(itnim)) 764 if (bfa_itnim_send_fwcreate(itnim))
765 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate); 765 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
766 else 766 else
767 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull); 767 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
768 break; 768 break;
769 769
770 case BFA_ITNIM_SM_HWFAIL: 770 case BFA_ITNIM_SM_HWFAIL:
771 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); 771 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
772 break; 772 break;
773 773
774 default: 774 default:
775 bfa_sm_fault(itnim->bfa, event); 775 bfa_sm_fault(itnim->bfa, event);
776 } 776 }
777 } 777 }
778 778
779 static void 779 static void
780 bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim, 780 bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
781 enum bfa_itnim_event event) 781 enum bfa_itnim_event event)
782 { 782 {
783 bfa_trc(itnim->bfa, itnim->rport->rport_tag); 783 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
784 bfa_trc(itnim->bfa, event); 784 bfa_trc(itnim->bfa, event);
785 785
786 switch (event) { 786 switch (event) {
787 case BFA_ITNIM_SM_DELETE: 787 case BFA_ITNIM_SM_DELETE:
788 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit); 788 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
789 bfa_itnim_iotov_delete(itnim); 789 bfa_itnim_iotov_delete(itnim);
790 bfa_fcpim_delitn(itnim); 790 bfa_fcpim_delitn(itnim);
791 break; 791 break;
792 792
793 case BFA_ITNIM_SM_OFFLINE: 793 case BFA_ITNIM_SM_OFFLINE:
794 bfa_itnim_offline_cb(itnim); 794 bfa_itnim_offline_cb(itnim);
795 break; 795 break;
796 796
797 case BFA_ITNIM_SM_ONLINE: 797 case BFA_ITNIM_SM_ONLINE:
798 if (bfa_itnim_send_fwcreate(itnim)) 798 if (bfa_itnim_send_fwcreate(itnim))
799 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate); 799 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
800 else 800 else
801 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull); 801 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
802 break; 802 break;
803 803
804 case BFA_ITNIM_SM_HWFAIL: 804 case BFA_ITNIM_SM_HWFAIL:
805 break; 805 break;
806 806
807 default: 807 default:
808 bfa_sm_fault(itnim->bfa, event); 808 bfa_sm_fault(itnim->bfa, event);
809 } 809 }
810 } 810 }
811 811
812 /* 812 /*
813 * Itnim is deleted, waiting for firmware response to delete. 813 * Itnim is deleted, waiting for firmware response to delete.
814 */ 814 */
815 static void 815 static void
816 bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) 816 bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
817 { 817 {
818 bfa_trc(itnim->bfa, itnim->rport->rport_tag); 818 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
819 bfa_trc(itnim->bfa, event); 819 bfa_trc(itnim->bfa, event);
820 820
821 switch (event) { 821 switch (event) {
822 case BFA_ITNIM_SM_FWRSP: 822 case BFA_ITNIM_SM_FWRSP:
823 case BFA_ITNIM_SM_HWFAIL: 823 case BFA_ITNIM_SM_HWFAIL:
824 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit); 824 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
825 bfa_fcpim_delitn(itnim); 825 bfa_fcpim_delitn(itnim);
826 break; 826 break;
827 827
828 default: 828 default:
829 bfa_sm_fault(itnim->bfa, event); 829 bfa_sm_fault(itnim->bfa, event);
830 } 830 }
831 } 831 }
832 832
833 static void 833 static void
834 bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim, 834 bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
835 enum bfa_itnim_event event) 835 enum bfa_itnim_event event)
836 { 836 {
837 bfa_trc(itnim->bfa, itnim->rport->rport_tag); 837 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
838 bfa_trc(itnim->bfa, event); 838 bfa_trc(itnim->bfa, event);
839 839
840 switch (event) { 840 switch (event) {
841 case BFA_ITNIM_SM_QRESUME: 841 case BFA_ITNIM_SM_QRESUME:
842 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting); 842 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
843 bfa_itnim_send_fwdelete(itnim); 843 bfa_itnim_send_fwdelete(itnim);
844 break; 844 break;
845 845
846 case BFA_ITNIM_SM_HWFAIL: 846 case BFA_ITNIM_SM_HWFAIL:
847 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit); 847 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
848 bfa_reqq_wcancel(&itnim->reqq_wait); 848 bfa_reqq_wcancel(&itnim->reqq_wait);
849 bfa_fcpim_delitn(itnim); 849 bfa_fcpim_delitn(itnim);
850 break; 850 break;
851 851
852 default: 852 default:
853 bfa_sm_fault(itnim->bfa, event); 853 bfa_sm_fault(itnim->bfa, event);
854 } 854 }
855 } 855 }
856 856
857 /* 857 /*
858 * Initiate cleanup of all IOs on an IOC failure. 858 * Initiate cleanup of all IOs on an IOC failure.
859 */ 859 */
860 static void 860 static void
861 bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim) 861 bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim)
862 { 862 {
863 struct bfa_tskim_s *tskim; 863 struct bfa_tskim_s *tskim;
864 struct bfa_ioim_s *ioim; 864 struct bfa_ioim_s *ioim;
865 struct list_head *qe, *qen; 865 struct list_head *qe, *qen;
866 866
867 list_for_each_safe(qe, qen, &itnim->tsk_q) { 867 list_for_each_safe(qe, qen, &itnim->tsk_q) {
868 tskim = (struct bfa_tskim_s *) qe; 868 tskim = (struct bfa_tskim_s *) qe;
869 bfa_tskim_iocdisable(tskim); 869 bfa_tskim_iocdisable(tskim);
870 } 870 }
871 871
872 list_for_each_safe(qe, qen, &itnim->io_q) { 872 list_for_each_safe(qe, qen, &itnim->io_q) {
873 ioim = (struct bfa_ioim_s *) qe; 873 ioim = (struct bfa_ioim_s *) qe;
874 bfa_ioim_iocdisable(ioim); 874 bfa_ioim_iocdisable(ioim);
875 } 875 }
876 876
877 /* 877 /*
878 * For IO request in pending queue, we pretend an early timeout. 878 * For IO request in pending queue, we pretend an early timeout.
879 */ 879 */
880 list_for_each_safe(qe, qen, &itnim->pending_q) { 880 list_for_each_safe(qe, qen, &itnim->pending_q) {
881 ioim = (struct bfa_ioim_s *) qe; 881 ioim = (struct bfa_ioim_s *) qe;
882 bfa_ioim_tov(ioim); 882 bfa_ioim_tov(ioim);
883 } 883 }
884 884
885 list_for_each_safe(qe, qen, &itnim->io_cleanup_q) { 885 list_for_each_safe(qe, qen, &itnim->io_cleanup_q) {
886 ioim = (struct bfa_ioim_s *) qe; 886 ioim = (struct bfa_ioim_s *) qe;
887 bfa_ioim_iocdisable(ioim); 887 bfa_ioim_iocdisable(ioim);
888 } 888 }
889 } 889 }
890 890
891 /* 891 /*
892 * IO cleanup completion 892 * IO cleanup completion
893 */ 893 */
894 static void 894 static void
895 bfa_itnim_cleanp_comp(void *itnim_cbarg) 895 bfa_itnim_cleanp_comp(void *itnim_cbarg)
896 { 896 {
897 struct bfa_itnim_s *itnim = itnim_cbarg; 897 struct bfa_itnim_s *itnim = itnim_cbarg;
898 898
899 bfa_stats(itnim, cleanup_comps); 899 bfa_stats(itnim, cleanup_comps);
900 bfa_sm_send_event(itnim, BFA_ITNIM_SM_CLEANUP); 900 bfa_sm_send_event(itnim, BFA_ITNIM_SM_CLEANUP);
901 } 901 }
902 902
903 /* 903 /*
904 * Initiate cleanup of all IOs. 904 * Initiate cleanup of all IOs.
905 */ 905 */
906 static void 906 static void
907 bfa_itnim_cleanup(struct bfa_itnim_s *itnim) 907 bfa_itnim_cleanup(struct bfa_itnim_s *itnim)
908 { 908 {
909 struct bfa_ioim_s *ioim; 909 struct bfa_ioim_s *ioim;
910 struct bfa_tskim_s *tskim; 910 struct bfa_tskim_s *tskim;
911 struct list_head *qe, *qen; 911 struct list_head *qe, *qen;
912 912
913 bfa_wc_init(&itnim->wc, bfa_itnim_cleanp_comp, itnim); 913 bfa_wc_init(&itnim->wc, bfa_itnim_cleanp_comp, itnim);
914 914
915 list_for_each_safe(qe, qen, &itnim->io_q) { 915 list_for_each_safe(qe, qen, &itnim->io_q) {
916 ioim = (struct bfa_ioim_s *) qe; 916 ioim = (struct bfa_ioim_s *) qe;
917 917
918 /* 918 /*
919 * Move IO to a cleanup queue from active queue so that a later 919 * Move IO to a cleanup queue from active queue so that a later
920 * TM will not pickup this IO. 920 * TM will not pickup this IO.
921 */ 921 */
922 list_del(&ioim->qe); 922 list_del(&ioim->qe);
923 list_add_tail(&ioim->qe, &itnim->io_cleanup_q); 923 list_add_tail(&ioim->qe, &itnim->io_cleanup_q);
924 924
925 bfa_wc_up(&itnim->wc); 925 bfa_wc_up(&itnim->wc);
926 bfa_ioim_cleanup(ioim); 926 bfa_ioim_cleanup(ioim);
927 } 927 }
928 928
929 list_for_each_safe(qe, qen, &itnim->tsk_q) { 929 list_for_each_safe(qe, qen, &itnim->tsk_q) {
930 tskim = (struct bfa_tskim_s *) qe; 930 tskim = (struct bfa_tskim_s *) qe;
931 bfa_wc_up(&itnim->wc); 931 bfa_wc_up(&itnim->wc);
932 bfa_tskim_cleanup(tskim); 932 bfa_tskim_cleanup(tskim);
933 } 933 }
934 934
935 bfa_wc_wait(&itnim->wc); 935 bfa_wc_wait(&itnim->wc);
936 } 936 }
937 937
938 static void 938 static void
939 __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete) 939 __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete)
940 { 940 {
941 struct bfa_itnim_s *itnim = cbarg; 941 struct bfa_itnim_s *itnim = cbarg;
942 942
943 if (complete) 943 if (complete)
944 bfa_cb_itnim_online(itnim->ditn); 944 bfa_cb_itnim_online(itnim->ditn);
945 } 945 }
946 946
947 static void 947 static void
948 __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete) 948 __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete)
949 { 949 {
950 struct bfa_itnim_s *itnim = cbarg; 950 struct bfa_itnim_s *itnim = cbarg;
951 951
952 if (complete) 952 if (complete)
953 bfa_cb_itnim_offline(itnim->ditn); 953 bfa_cb_itnim_offline(itnim->ditn);
954 } 954 }
955 955
956 static void 956 static void
957 __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete) 957 __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete)
958 { 958 {
959 struct bfa_itnim_s *itnim = cbarg; 959 struct bfa_itnim_s *itnim = cbarg;
960 960
961 if (complete) 961 if (complete)
962 bfa_cb_itnim_sler(itnim->ditn); 962 bfa_cb_itnim_sler(itnim->ditn);
963 } 963 }
964 964
965 /* 965 /*
966 * Call to resume any I/O requests waiting for room in request queue. 966 * Call to resume any I/O requests waiting for room in request queue.
967 */ 967 */
968 static void 968 static void
969 bfa_itnim_qresume(void *cbarg) 969 bfa_itnim_qresume(void *cbarg)
970 { 970 {
971 struct bfa_itnim_s *itnim = cbarg; 971 struct bfa_itnim_s *itnim = cbarg;
972 972
973 bfa_sm_send_event(itnim, BFA_ITNIM_SM_QRESUME); 973 bfa_sm_send_event(itnim, BFA_ITNIM_SM_QRESUME);
974 } 974 }
975 975
976 /* 976 /*
977 * bfa_itnim_public 977 * bfa_itnim_public
978 */ 978 */
979 979
980 void 980 void
981 bfa_itnim_iodone(struct bfa_itnim_s *itnim) 981 bfa_itnim_iodone(struct bfa_itnim_s *itnim)
982 { 982 {
983 bfa_wc_down(&itnim->wc); 983 bfa_wc_down(&itnim->wc);
984 } 984 }
985 985
986 void 986 void
987 bfa_itnim_tskdone(struct bfa_itnim_s *itnim) 987 bfa_itnim_tskdone(struct bfa_itnim_s *itnim)
988 { 988 {
989 bfa_wc_down(&itnim->wc); 989 bfa_wc_down(&itnim->wc);
990 } 990 }
991 991
992 void 992 void
993 bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len, 993 bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
994 u32 *dm_len) 994 u32 *dm_len)
995 { 995 {
996 /* 996 /*
997 * ITN memory 997 * ITN memory
998 */ 998 */
999 *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itnim_s); 999 *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itnim_s);
1000 } 1000 }
1001 1001
1002 void 1002 void
1003 bfa_itnim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo) 1003 bfa_itnim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
1004 { 1004 {
1005 struct bfa_s *bfa = fcpim->bfa; 1005 struct bfa_s *bfa = fcpim->bfa;
1006 struct bfa_itnim_s *itnim; 1006 struct bfa_itnim_s *itnim;
1007 int i, j; 1007 int i, j;
1008 1008
1009 INIT_LIST_HEAD(&fcpim->itnim_q); 1009 INIT_LIST_HEAD(&fcpim->itnim_q);
1010 1010
1011 itnim = (struct bfa_itnim_s *) bfa_meminfo_kva(minfo); 1011 itnim = (struct bfa_itnim_s *) bfa_meminfo_kva(minfo);
1012 fcpim->itnim_arr = itnim; 1012 fcpim->itnim_arr = itnim;
1013 1013
1014 for (i = 0; i < fcpim->num_itnims; i++, itnim++) { 1014 for (i = 0; i < fcpim->num_itnims; i++, itnim++) {
1015 memset(itnim, 0, sizeof(struct bfa_itnim_s)); 1015 memset(itnim, 0, sizeof(struct bfa_itnim_s));
1016 itnim->bfa = bfa; 1016 itnim->bfa = bfa;
1017 itnim->fcpim = fcpim; 1017 itnim->fcpim = fcpim;
1018 itnim->reqq = BFA_REQQ_QOS_LO; 1018 itnim->reqq = BFA_REQQ_QOS_LO;
1019 itnim->rport = BFA_RPORT_FROM_TAG(bfa, i); 1019 itnim->rport = BFA_RPORT_FROM_TAG(bfa, i);
1020 itnim->iotov_active = BFA_FALSE; 1020 itnim->iotov_active = BFA_FALSE;
1021 bfa_reqq_winit(&itnim->reqq_wait, bfa_itnim_qresume, itnim); 1021 bfa_reqq_winit(&itnim->reqq_wait, bfa_itnim_qresume, itnim);
1022 1022
1023 INIT_LIST_HEAD(&itnim->io_q); 1023 INIT_LIST_HEAD(&itnim->io_q);
1024 INIT_LIST_HEAD(&itnim->io_cleanup_q); 1024 INIT_LIST_HEAD(&itnim->io_cleanup_q);
1025 INIT_LIST_HEAD(&itnim->pending_q); 1025 INIT_LIST_HEAD(&itnim->pending_q);
1026 INIT_LIST_HEAD(&itnim->tsk_q); 1026 INIT_LIST_HEAD(&itnim->tsk_q);
1027 INIT_LIST_HEAD(&itnim->delay_comp_q); 1027 INIT_LIST_HEAD(&itnim->delay_comp_q);
1028 for (j = 0; j < BFA_IOBUCKET_MAX; j++) 1028 for (j = 0; j < BFA_IOBUCKET_MAX; j++)
1029 itnim->ioprofile.io_latency.min[j] = ~0; 1029 itnim->ioprofile.io_latency.min[j] = ~0;
1030 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit); 1030 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
1031 } 1031 }
1032 1032
1033 bfa_meminfo_kva(minfo) = (u8 *) itnim; 1033 bfa_meminfo_kva(minfo) = (u8 *) itnim;
1034 } 1034 }
1035 1035
1036 void 1036 void
1037 bfa_itnim_iocdisable(struct bfa_itnim_s *itnim) 1037 bfa_itnim_iocdisable(struct bfa_itnim_s *itnim)
1038 { 1038 {
1039 bfa_stats(itnim, ioc_disabled); 1039 bfa_stats(itnim, ioc_disabled);
1040 bfa_sm_send_event(itnim, BFA_ITNIM_SM_HWFAIL); 1040 bfa_sm_send_event(itnim, BFA_ITNIM_SM_HWFAIL);
1041 } 1041 }
1042 1042
1043 static bfa_boolean_t 1043 static bfa_boolean_t
1044 bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim) 1044 bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim)
1045 { 1045 {
1046 struct bfi_itnim_create_req_s *m; 1046 struct bfi_itnim_create_req_s *m;
1047 1047
1048 itnim->msg_no++; 1048 itnim->msg_no++;
1049 1049
1050 /* 1050 /*
1051 * check for room in queue to send request now 1051 * check for room in queue to send request now
1052 */ 1052 */
1053 m = bfa_reqq_next(itnim->bfa, itnim->reqq); 1053 m = bfa_reqq_next(itnim->bfa, itnim->reqq);
1054 if (!m) { 1054 if (!m) {
1055 bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait); 1055 bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
1056 return BFA_FALSE; 1056 return BFA_FALSE;
1057 } 1057 }
1058 1058
1059 bfi_h2i_set(m->mh, BFI_MC_ITNIM, BFI_ITNIM_H2I_CREATE_REQ, 1059 bfi_h2i_set(m->mh, BFI_MC_ITNIM, BFI_ITNIM_H2I_CREATE_REQ,
1060 bfa_lpuid(itnim->bfa)); 1060 bfa_lpuid(itnim->bfa));
1061 m->fw_handle = itnim->rport->fw_handle; 1061 m->fw_handle = itnim->rport->fw_handle;
1062 m->class = FC_CLASS_3; 1062 m->class = FC_CLASS_3;
1063 m->seq_rec = itnim->seq_rec; 1063 m->seq_rec = itnim->seq_rec;
1064 m->msg_no = itnim->msg_no; 1064 m->msg_no = itnim->msg_no;
1065 bfa_stats(itnim, fw_create); 1065 bfa_stats(itnim, fw_create);
1066 1066
1067 /* 1067 /*
1068 * queue I/O message to firmware 1068 * queue I/O message to firmware
1069 */ 1069 */
1070 bfa_reqq_produce(itnim->bfa, itnim->reqq); 1070 bfa_reqq_produce(itnim->bfa, itnim->reqq);
1071 return BFA_TRUE; 1071 return BFA_TRUE;
1072 } 1072 }
1073 1073
1074 static bfa_boolean_t 1074 static bfa_boolean_t
1075 bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim) 1075 bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim)
1076 { 1076 {
1077 struct bfi_itnim_delete_req_s *m; 1077 struct bfi_itnim_delete_req_s *m;
1078 1078
1079 /* 1079 /*
1080 * check for room in queue to send request now 1080 * check for room in queue to send request now
1081 */ 1081 */
1082 m = bfa_reqq_next(itnim->bfa, itnim->reqq); 1082 m = bfa_reqq_next(itnim->bfa, itnim->reqq);
1083 if (!m) { 1083 if (!m) {
1084 bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait); 1084 bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
1085 return BFA_FALSE; 1085 return BFA_FALSE;
1086 } 1086 }
1087 1087
1088 bfi_h2i_set(m->mh, BFI_MC_ITNIM, BFI_ITNIM_H2I_DELETE_REQ, 1088 bfi_h2i_set(m->mh, BFI_MC_ITNIM, BFI_ITNIM_H2I_DELETE_REQ,
1089 bfa_lpuid(itnim->bfa)); 1089 bfa_lpuid(itnim->bfa));
1090 m->fw_handle = itnim->rport->fw_handle; 1090 m->fw_handle = itnim->rport->fw_handle;
1091 bfa_stats(itnim, fw_delete); 1091 bfa_stats(itnim, fw_delete);
1092 1092
1093 /* 1093 /*
1094 * queue I/O message to firmware 1094 * queue I/O message to firmware
1095 */ 1095 */
1096 bfa_reqq_produce(itnim->bfa, itnim->reqq); 1096 bfa_reqq_produce(itnim->bfa, itnim->reqq);
1097 return BFA_TRUE; 1097 return BFA_TRUE;
1098 } 1098 }
1099 1099
1100 /* 1100 /*
1101 * Cleanup all pending failed inflight requests. 1101 * Cleanup all pending failed inflight requests.
1102 */ 1102 */
1103 static void 1103 static void
1104 bfa_itnim_delayed_comp(struct bfa_itnim_s *itnim, bfa_boolean_t iotov) 1104 bfa_itnim_delayed_comp(struct bfa_itnim_s *itnim, bfa_boolean_t iotov)
1105 { 1105 {
1106 struct bfa_ioim_s *ioim; 1106 struct bfa_ioim_s *ioim;
1107 struct list_head *qe, *qen; 1107 struct list_head *qe, *qen;
1108 1108
1109 list_for_each_safe(qe, qen, &itnim->delay_comp_q) { 1109 list_for_each_safe(qe, qen, &itnim->delay_comp_q) {
1110 ioim = (struct bfa_ioim_s *)qe; 1110 ioim = (struct bfa_ioim_s *)qe;
1111 bfa_ioim_delayed_comp(ioim, iotov); 1111 bfa_ioim_delayed_comp(ioim, iotov);
1112 } 1112 }
1113 } 1113 }
1114 1114
1115 /* 1115 /*
1116 * Start all pending IO requests. 1116 * Start all pending IO requests.
1117 */ 1117 */
1118 static void 1118 static void
1119 bfa_itnim_iotov_online(struct bfa_itnim_s *itnim) 1119 bfa_itnim_iotov_online(struct bfa_itnim_s *itnim)
1120 { 1120 {
1121 struct bfa_ioim_s *ioim; 1121 struct bfa_ioim_s *ioim;
1122 1122
1123 bfa_itnim_iotov_stop(itnim); 1123 bfa_itnim_iotov_stop(itnim);
1124 1124
1125 /* 1125 /*
1126 * Abort all inflight IO requests in the queue 1126 * Abort all inflight IO requests in the queue
1127 */ 1127 */
1128 bfa_itnim_delayed_comp(itnim, BFA_FALSE); 1128 bfa_itnim_delayed_comp(itnim, BFA_FALSE);
1129 1129
1130 /* 1130 /*
1131 * Start all pending IO requests. 1131 * Start all pending IO requests.
1132 */ 1132 */
1133 while (!list_empty(&itnim->pending_q)) { 1133 while (!list_empty(&itnim->pending_q)) {
1134 bfa_q_deq(&itnim->pending_q, &ioim); 1134 bfa_q_deq(&itnim->pending_q, &ioim);
1135 list_add_tail(&ioim->qe, &itnim->io_q); 1135 list_add_tail(&ioim->qe, &itnim->io_q);
1136 bfa_ioim_start(ioim); 1136 bfa_ioim_start(ioim);
1137 } 1137 }
1138 } 1138 }
1139 1139
1140 /* 1140 /*
1141 * Fail all pending IO requests 1141 * Fail all pending IO requests
1142 */ 1142 */
1143 static void 1143 static void
1144 bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim) 1144 bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim)
1145 { 1145 {
1146 struct bfa_ioim_s *ioim; 1146 struct bfa_ioim_s *ioim;
1147 1147
1148 /* 1148 /*
1149 * Fail all inflight IO requests in the queue 1149 * Fail all inflight IO requests in the queue
1150 */ 1150 */
1151 bfa_itnim_delayed_comp(itnim, BFA_TRUE); 1151 bfa_itnim_delayed_comp(itnim, BFA_TRUE);
1152 1152
1153 /* 1153 /*
1154 * Fail any pending IO requests. 1154 * Fail any pending IO requests.
1155 */ 1155 */
1156 while (!list_empty(&itnim->pending_q)) { 1156 while (!list_empty(&itnim->pending_q)) {
1157 bfa_q_deq(&itnim->pending_q, &ioim); 1157 bfa_q_deq(&itnim->pending_q, &ioim);
1158 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q); 1158 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
1159 bfa_ioim_tov(ioim); 1159 bfa_ioim_tov(ioim);
1160 } 1160 }
1161 } 1161 }
1162 1162
1163 /* 1163 /*
1164 * IO TOV timer callback. Fail any pending IO requests. 1164 * IO TOV timer callback. Fail any pending IO requests.
1165 */ 1165 */
1166 static void 1166 static void
1167 bfa_itnim_iotov(void *itnim_arg) 1167 bfa_itnim_iotov(void *itnim_arg)
1168 { 1168 {
1169 struct bfa_itnim_s *itnim = itnim_arg; 1169 struct bfa_itnim_s *itnim = itnim_arg;
1170 1170
1171 itnim->iotov_active = BFA_FALSE; 1171 itnim->iotov_active = BFA_FALSE;
1172 1172
1173 bfa_cb_itnim_tov_begin(itnim->ditn); 1173 bfa_cb_itnim_tov_begin(itnim->ditn);
1174 bfa_itnim_iotov_cleanup(itnim); 1174 bfa_itnim_iotov_cleanup(itnim);
1175 bfa_cb_itnim_tov(itnim->ditn); 1175 bfa_cb_itnim_tov(itnim->ditn);
1176 } 1176 }
1177 1177
1178 /* 1178 /*
1179 * Start IO TOV timer for failing back pending IO requests in offline state. 1179 * Start IO TOV timer for failing back pending IO requests in offline state.
1180 */ 1180 */
1181 static void 1181 static void
1182 bfa_itnim_iotov_start(struct bfa_itnim_s *itnim) 1182 bfa_itnim_iotov_start(struct bfa_itnim_s *itnim)
1183 { 1183 {
1184 if (itnim->fcpim->path_tov > 0) { 1184 if (itnim->fcpim->path_tov > 0) {
1185 1185
1186 itnim->iotov_active = BFA_TRUE; 1186 itnim->iotov_active = BFA_TRUE;
1187 WARN_ON(!bfa_itnim_hold_io(itnim)); 1187 WARN_ON(!bfa_itnim_hold_io(itnim));
1188 bfa_timer_start(itnim->bfa, &itnim->timer, 1188 bfa_timer_start(itnim->bfa, &itnim->timer,
1189 bfa_itnim_iotov, itnim, itnim->fcpim->path_tov); 1189 bfa_itnim_iotov, itnim, itnim->fcpim->path_tov);
1190 } 1190 }
1191 } 1191 }
1192 1192
1193 /* 1193 /*
1194 * Stop IO TOV timer. 1194 * Stop IO TOV timer.
1195 */ 1195 */
1196 static void 1196 static void
1197 bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim) 1197 bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim)
1198 { 1198 {
1199 if (itnim->iotov_active) { 1199 if (itnim->iotov_active) {
1200 itnim->iotov_active = BFA_FALSE; 1200 itnim->iotov_active = BFA_FALSE;
1201 bfa_timer_stop(&itnim->timer); 1201 bfa_timer_stop(&itnim->timer);
1202 } 1202 }
1203 } 1203 }
1204 1204
1205 /* 1205 /*
1206 * Stop IO TOV timer. 1206 * Stop IO TOV timer.
1207 */ 1207 */
1208 static void 1208 static void
1209 bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim) 1209 bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim)
1210 { 1210 {
1211 bfa_boolean_t pathtov_active = BFA_FALSE; 1211 bfa_boolean_t pathtov_active = BFA_FALSE;
1212 1212
1213 if (itnim->iotov_active) 1213 if (itnim->iotov_active)
1214 pathtov_active = BFA_TRUE; 1214 pathtov_active = BFA_TRUE;
1215 1215
1216 bfa_itnim_iotov_stop(itnim); 1216 bfa_itnim_iotov_stop(itnim);
1217 if (pathtov_active) 1217 if (pathtov_active)
1218 bfa_cb_itnim_tov_begin(itnim->ditn); 1218 bfa_cb_itnim_tov_begin(itnim->ditn);
1219 bfa_itnim_iotov_cleanup(itnim); 1219 bfa_itnim_iotov_cleanup(itnim);
1220 if (pathtov_active) 1220 if (pathtov_active)
1221 bfa_cb_itnim_tov(itnim->ditn); 1221 bfa_cb_itnim_tov(itnim->ditn);
1222 } 1222 }
1223 1223
1224 static void 1224 static void
1225 bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim) 1225 bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim)
1226 { 1226 {
1227 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(itnim->bfa); 1227 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(itnim->bfa);
1228 fcpim->del_itn_stats.del_itn_iocomp_aborted += 1228 fcpim->del_itn_stats.del_itn_iocomp_aborted +=
1229 itnim->stats.iocomp_aborted; 1229 itnim->stats.iocomp_aborted;
1230 fcpim->del_itn_stats.del_itn_iocomp_timedout += 1230 fcpim->del_itn_stats.del_itn_iocomp_timedout +=
1231 itnim->stats.iocomp_timedout; 1231 itnim->stats.iocomp_timedout;
1232 fcpim->del_itn_stats.del_itn_iocom_sqer_needed += 1232 fcpim->del_itn_stats.del_itn_iocom_sqer_needed +=
1233 itnim->stats.iocom_sqer_needed; 1233 itnim->stats.iocom_sqer_needed;
1234 fcpim->del_itn_stats.del_itn_iocom_res_free += 1234 fcpim->del_itn_stats.del_itn_iocom_res_free +=
1235 itnim->stats.iocom_res_free; 1235 itnim->stats.iocom_res_free;
1236 fcpim->del_itn_stats.del_itn_iocom_hostabrts += 1236 fcpim->del_itn_stats.del_itn_iocom_hostabrts +=
1237 itnim->stats.iocom_hostabrts; 1237 itnim->stats.iocom_hostabrts;
1238 fcpim->del_itn_stats.del_itn_total_ios += itnim->stats.total_ios; 1238 fcpim->del_itn_stats.del_itn_total_ios += itnim->stats.total_ios;
1239 fcpim->del_itn_stats.del_io_iocdowns += itnim->stats.io_iocdowns; 1239 fcpim->del_itn_stats.del_io_iocdowns += itnim->stats.io_iocdowns;
1240 fcpim->del_itn_stats.del_tm_iocdowns += itnim->stats.tm_iocdowns; 1240 fcpim->del_itn_stats.del_tm_iocdowns += itnim->stats.tm_iocdowns;
1241 } 1241 }
1242 1242
1243 /* 1243 /*
1244 * bfa_itnim_public 1244 * bfa_itnim_public
1245 */ 1245 */
1246 1246
1247 /* 1247 /*
1248 * Itnim interrupt processing. 1248 * Itnim interrupt processing.
1249 */ 1249 */
1250 void 1250 void
1251 bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) 1251 bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1252 { 1252 {
1253 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); 1253 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
1254 union bfi_itnim_i2h_msg_u msg; 1254 union bfi_itnim_i2h_msg_u msg;
1255 struct bfa_itnim_s *itnim; 1255 struct bfa_itnim_s *itnim;
1256 1256
1257 bfa_trc(bfa, m->mhdr.msg_id); 1257 bfa_trc(bfa, m->mhdr.msg_id);
1258 1258
1259 msg.msg = m; 1259 msg.msg = m;
1260 1260
1261 switch (m->mhdr.msg_id) { 1261 switch (m->mhdr.msg_id) {
1262 case BFI_ITNIM_I2H_CREATE_RSP: 1262 case BFI_ITNIM_I2H_CREATE_RSP:
1263 itnim = BFA_ITNIM_FROM_TAG(fcpim, 1263 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1264 msg.create_rsp->bfa_handle); 1264 msg.create_rsp->bfa_handle);
1265 WARN_ON(msg.create_rsp->status != BFA_STATUS_OK); 1265 WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
1266 bfa_stats(itnim, create_comps); 1266 bfa_stats(itnim, create_comps);
1267 bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP); 1267 bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
1268 break; 1268 break;
1269 1269
1270 case BFI_ITNIM_I2H_DELETE_RSP: 1270 case BFI_ITNIM_I2H_DELETE_RSP:
1271 itnim = BFA_ITNIM_FROM_TAG(fcpim, 1271 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1272 msg.delete_rsp->bfa_handle); 1272 msg.delete_rsp->bfa_handle);
1273 WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK); 1273 WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
1274 bfa_stats(itnim, delete_comps); 1274 bfa_stats(itnim, delete_comps);
1275 bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP); 1275 bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
1276 break; 1276 break;
1277 1277
1278 case BFI_ITNIM_I2H_SLER_EVENT: 1278 case BFI_ITNIM_I2H_SLER_EVENT:
1279 itnim = BFA_ITNIM_FROM_TAG(fcpim, 1279 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1280 msg.sler_event->bfa_handle); 1280 msg.sler_event->bfa_handle);
1281 bfa_stats(itnim, sler_events); 1281 bfa_stats(itnim, sler_events);
1282 bfa_sm_send_event(itnim, BFA_ITNIM_SM_SLER); 1282 bfa_sm_send_event(itnim, BFA_ITNIM_SM_SLER);
1283 break; 1283 break;
1284 1284
1285 default: 1285 default:
1286 bfa_trc(bfa, m->mhdr.msg_id); 1286 bfa_trc(bfa, m->mhdr.msg_id);
1287 WARN_ON(1); 1287 WARN_ON(1);
1288 } 1288 }
1289 } 1289 }
1290 1290
1291 /* 1291 /*
1292 * bfa_itnim_api 1292 * bfa_itnim_api
1293 */ 1293 */
1294 1294
1295 struct bfa_itnim_s * 1295 struct bfa_itnim_s *
1296 bfa_itnim_create(struct bfa_s *bfa, struct bfa_rport_s *rport, void *ditn) 1296 bfa_itnim_create(struct bfa_s *bfa, struct bfa_rport_s *rport, void *ditn)
1297 { 1297 {
1298 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); 1298 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
1299 struct bfa_itnim_s *itnim; 1299 struct bfa_itnim_s *itnim;
1300 1300
1301 itnim = BFA_ITNIM_FROM_TAG(fcpim, rport->rport_tag); 1301 itnim = BFA_ITNIM_FROM_TAG(fcpim, rport->rport_tag);
1302 WARN_ON(itnim->rport != rport); 1302 WARN_ON(itnim->rport != rport);
1303 1303
1304 itnim->ditn = ditn; 1304 itnim->ditn = ditn;
1305 1305
1306 bfa_stats(itnim, creates); 1306 bfa_stats(itnim, creates);
1307 bfa_sm_send_event(itnim, BFA_ITNIM_SM_CREATE); 1307 bfa_sm_send_event(itnim, BFA_ITNIM_SM_CREATE);
1308 1308
1309 return itnim; 1309 return itnim;
1310 } 1310 }
1311 1311
1312 void 1312 void
1313 bfa_itnim_delete(struct bfa_itnim_s *itnim) 1313 bfa_itnim_delete(struct bfa_itnim_s *itnim)
1314 { 1314 {
1315 bfa_stats(itnim, deletes); 1315 bfa_stats(itnim, deletes);
1316 bfa_sm_send_event(itnim, BFA_ITNIM_SM_DELETE); 1316 bfa_sm_send_event(itnim, BFA_ITNIM_SM_DELETE);
1317 } 1317 }
1318 1318
1319 void 1319 void
1320 bfa_itnim_online(struct bfa_itnim_s *itnim, bfa_boolean_t seq_rec) 1320 bfa_itnim_online(struct bfa_itnim_s *itnim, bfa_boolean_t seq_rec)
1321 { 1321 {
1322 itnim->seq_rec = seq_rec; 1322 itnim->seq_rec = seq_rec;
1323 bfa_stats(itnim, onlines); 1323 bfa_stats(itnim, onlines);
1324 bfa_sm_send_event(itnim, BFA_ITNIM_SM_ONLINE); 1324 bfa_sm_send_event(itnim, BFA_ITNIM_SM_ONLINE);
1325 } 1325 }
1326 1326
1327 void 1327 void
1328 bfa_itnim_offline(struct bfa_itnim_s *itnim) 1328 bfa_itnim_offline(struct bfa_itnim_s *itnim)
1329 { 1329 {
1330 bfa_stats(itnim, offlines); 1330 bfa_stats(itnim, offlines);
1331 bfa_sm_send_event(itnim, BFA_ITNIM_SM_OFFLINE); 1331 bfa_sm_send_event(itnim, BFA_ITNIM_SM_OFFLINE);
1332 } 1332 }
1333 1333
1334 /* 1334 /*
1335 * Return true if itnim is considered offline for holding off IO request. 1335 * Return true if itnim is considered offline for holding off IO request.
1336 * IO is not held if itnim is being deleted. 1336 * IO is not held if itnim is being deleted.
1337 */ 1337 */
1338 bfa_boolean_t 1338 bfa_boolean_t
1339 bfa_itnim_hold_io(struct bfa_itnim_s *itnim) 1339 bfa_itnim_hold_io(struct bfa_itnim_s *itnim)
1340 { 1340 {
1341 return itnim->fcpim->path_tov && itnim->iotov_active && 1341 return itnim->fcpim->path_tov && itnim->iotov_active &&
1342 (bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwcreate) || 1342 (bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwcreate) ||
1343 bfa_sm_cmp_state(itnim, bfa_itnim_sm_sler) || 1343 bfa_sm_cmp_state(itnim, bfa_itnim_sm_sler) ||
1344 bfa_sm_cmp_state(itnim, bfa_itnim_sm_cleanup_offline) || 1344 bfa_sm_cmp_state(itnim, bfa_itnim_sm_cleanup_offline) ||
1345 bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwdelete) || 1345 bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwdelete) ||
1346 bfa_sm_cmp_state(itnim, bfa_itnim_sm_offline) || 1346 bfa_sm_cmp_state(itnim, bfa_itnim_sm_offline) ||
1347 bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable)); 1347 bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable));
1348 } 1348 }
1349 1349
1350 void 1350 void
1351 bfa_itnim_clear_stats(struct bfa_itnim_s *itnim) 1351 bfa_itnim_clear_stats(struct bfa_itnim_s *itnim)
1352 { 1352 {
1353 int j; 1353 int j;
1354 memset(&itnim->stats, 0, sizeof(itnim->stats)); 1354 memset(&itnim->stats, 0, sizeof(itnim->stats));
1355 memset(&itnim->ioprofile, 0, sizeof(itnim->ioprofile)); 1355 memset(&itnim->ioprofile, 0, sizeof(itnim->ioprofile));
1356 for (j = 0; j < BFA_IOBUCKET_MAX; j++) 1356 for (j = 0; j < BFA_IOBUCKET_MAX; j++)
1357 itnim->ioprofile.io_latency.min[j] = ~0; 1357 itnim->ioprofile.io_latency.min[j] = ~0;
1358 } 1358 }
1359 1359
1360 /* 1360 /*
1361 * BFA IO module state machine functions 1361 * BFA IO module state machine functions
1362 */ 1362 */
1363 1363
1364 /* 1364 /*
1365 * IO is not started (unallocated). 1365 * IO is not started (unallocated).
1366 */ 1366 */
1367 static void 1367 static void
1368 bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) 1368 bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1369 { 1369 {
1370 bfa_trc_fp(ioim->bfa, ioim->iotag);
1371 bfa_trc_fp(ioim->bfa, event);
1372
1373 switch (event) { 1370 switch (event) {
1374 case BFA_IOIM_SM_START: 1371 case BFA_IOIM_SM_START:
1375 if (!bfa_itnim_is_online(ioim->itnim)) { 1372 if (!bfa_itnim_is_online(ioim->itnim)) {
1376 if (!bfa_itnim_hold_io(ioim->itnim)) { 1373 if (!bfa_itnim_hold_io(ioim->itnim)) {
1377 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); 1374 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1378 list_del(&ioim->qe); 1375 list_del(&ioim->qe);
1379 list_add_tail(&ioim->qe, 1376 list_add_tail(&ioim->qe,
1380 &ioim->fcpim->ioim_comp_q); 1377 &ioim->fcpim->ioim_comp_q);
1381 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, 1378 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1382 __bfa_cb_ioim_pathtov, ioim); 1379 __bfa_cb_ioim_pathtov, ioim);
1383 } else { 1380 } else {
1384 list_del(&ioim->qe); 1381 list_del(&ioim->qe);
1385 list_add_tail(&ioim->qe, 1382 list_add_tail(&ioim->qe,
1386 &ioim->itnim->pending_q); 1383 &ioim->itnim->pending_q);
1387 } 1384 }
1388 break; 1385 break;
1389 } 1386 }
1390 1387
1391 if (ioim->nsges > BFI_SGE_INLINE) { 1388 if (ioim->nsges > BFI_SGE_INLINE) {
1392 if (!bfa_ioim_sgpg_alloc(ioim)) { 1389 if (!bfa_ioim_sgpg_alloc(ioim)) {
1393 bfa_sm_set_state(ioim, bfa_ioim_sm_sgalloc); 1390 bfa_sm_set_state(ioim, bfa_ioim_sm_sgalloc);
1394 return; 1391 return;
1395 } 1392 }
1396 } 1393 }
1397 1394
1398 if (!bfa_ioim_send_ioreq(ioim)) { 1395 if (!bfa_ioim_send_ioreq(ioim)) {
1399 bfa_sm_set_state(ioim, bfa_ioim_sm_qfull); 1396 bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1400 break; 1397 break;
1401 } 1398 }
1402 1399
1403 bfa_sm_set_state(ioim, bfa_ioim_sm_active); 1400 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1404 break; 1401 break;
1405 1402
1406 case BFA_IOIM_SM_IOTOV: 1403 case BFA_IOIM_SM_IOTOV:
1407 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); 1404 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1408 bfa_ioim_move_to_comp_q(ioim); 1405 bfa_ioim_move_to_comp_q(ioim);
1409 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, 1406 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1410 __bfa_cb_ioim_pathtov, ioim); 1407 __bfa_cb_ioim_pathtov, ioim);
1411 break; 1408 break;
1412 1409
1413 case BFA_IOIM_SM_ABORT: 1410 case BFA_IOIM_SM_ABORT:
1414 /* 1411 /*
1415 * IO in pending queue can get abort requests. Complete abort 1412 * IO in pending queue can get abort requests. Complete abort
1416 * requests immediately. 1413 * requests immediately.
1417 */ 1414 */
1418 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); 1415 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1419 WARN_ON(!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim)); 1416 WARN_ON(!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim));
1420 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, 1417 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1421 __bfa_cb_ioim_abort, ioim); 1418 __bfa_cb_ioim_abort, ioim);
1422 break; 1419 break;
1423 1420
1424 default: 1421 default:
1425 bfa_sm_fault(ioim->bfa, event); 1422 bfa_sm_fault(ioim->bfa, event);
1426 } 1423 }
1427 } 1424 }
1428 1425
1429 /* 1426 /*
1430 * IO is waiting for SG pages. 1427 * IO is waiting for SG pages.
1431 */ 1428 */
1432 static void 1429 static void
1433 bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) 1430 bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1434 { 1431 {
1435 bfa_trc(ioim->bfa, ioim->iotag); 1432 bfa_trc(ioim->bfa, ioim->iotag);
1436 bfa_trc(ioim->bfa, event); 1433 bfa_trc(ioim->bfa, event);
1437 1434
1438 switch (event) { 1435 switch (event) {
1439 case BFA_IOIM_SM_SGALLOCED: 1436 case BFA_IOIM_SM_SGALLOCED:
1440 if (!bfa_ioim_send_ioreq(ioim)) { 1437 if (!bfa_ioim_send_ioreq(ioim)) {
1441 bfa_sm_set_state(ioim, bfa_ioim_sm_qfull); 1438 bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1442 break; 1439 break;
1443 } 1440 }
1444 bfa_sm_set_state(ioim, bfa_ioim_sm_active); 1441 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1445 break; 1442 break;
1446 1443
1447 case BFA_IOIM_SM_CLEANUP: 1444 case BFA_IOIM_SM_CLEANUP:
1448 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); 1445 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1449 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe); 1446 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1450 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, 1447 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1451 ioim); 1448 ioim);
1452 bfa_ioim_notify_cleanup(ioim); 1449 bfa_ioim_notify_cleanup(ioim);
1453 break; 1450 break;
1454 1451
1455 case BFA_IOIM_SM_ABORT: 1452 case BFA_IOIM_SM_ABORT:
1456 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); 1453 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1457 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe); 1454 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1458 bfa_ioim_move_to_comp_q(ioim); 1455 bfa_ioim_move_to_comp_q(ioim);
1459 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, 1456 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1460 ioim); 1457 ioim);
1461 break; 1458 break;
1462 1459
1463 case BFA_IOIM_SM_HWFAIL: 1460 case BFA_IOIM_SM_HWFAIL:
1464 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); 1461 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1465 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe); 1462 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1466 bfa_ioim_move_to_comp_q(ioim); 1463 bfa_ioim_move_to_comp_q(ioim);
1467 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, 1464 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1468 ioim); 1465 ioim);
1469 break; 1466 break;
1470 1467
1471 default: 1468 default:
1472 bfa_sm_fault(ioim->bfa, event); 1469 bfa_sm_fault(ioim->bfa, event);
1473 } 1470 }
1474 } 1471 }
1475 1472
1476 /* 1473 /*
1477 * IO is active. 1474 * IO is active.
1478 */ 1475 */
1479 static void 1476 static void
1480 bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) 1477 bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1481 { 1478 {
1482 bfa_trc_fp(ioim->bfa, ioim->iotag);
1483 bfa_trc_fp(ioim->bfa, event);
1484
1485 switch (event) { 1479 switch (event) {
1486 case BFA_IOIM_SM_COMP_GOOD: 1480 case BFA_IOIM_SM_COMP_GOOD:
1487 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); 1481 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1488 bfa_ioim_move_to_comp_q(ioim); 1482 bfa_ioim_move_to_comp_q(ioim);
1489 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, 1483 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1490 __bfa_cb_ioim_good_comp, ioim); 1484 __bfa_cb_ioim_good_comp, ioim);
1491 break; 1485 break;
1492 1486
1493 case BFA_IOIM_SM_COMP: 1487 case BFA_IOIM_SM_COMP:
1494 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); 1488 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1495 bfa_ioim_move_to_comp_q(ioim); 1489 bfa_ioim_move_to_comp_q(ioim);
1496 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp, 1490 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
1497 ioim); 1491 ioim);
1498 break; 1492 break;
1499 1493
1500 case BFA_IOIM_SM_DONE: 1494 case BFA_IOIM_SM_DONE:
1501 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free); 1495 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1502 bfa_ioim_move_to_comp_q(ioim); 1496 bfa_ioim_move_to_comp_q(ioim);
1503 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp, 1497 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
1504 ioim); 1498 ioim);
1505 break; 1499 break;
1506 1500
1507 case BFA_IOIM_SM_ABORT: 1501 case BFA_IOIM_SM_ABORT:
1508 ioim->iosp->abort_explicit = BFA_TRUE; 1502 ioim->iosp->abort_explicit = BFA_TRUE;
1509 ioim->io_cbfn = __bfa_cb_ioim_abort; 1503 ioim->io_cbfn = __bfa_cb_ioim_abort;
1510 1504
1511 if (bfa_ioim_send_abort(ioim)) 1505 if (bfa_ioim_send_abort(ioim))
1512 bfa_sm_set_state(ioim, bfa_ioim_sm_abort); 1506 bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
1513 else { 1507 else {
1514 bfa_sm_set_state(ioim, bfa_ioim_sm_abort_qfull); 1508 bfa_sm_set_state(ioim, bfa_ioim_sm_abort_qfull);
1515 bfa_stats(ioim->itnim, qwait); 1509 bfa_stats(ioim->itnim, qwait);
1516 bfa_reqq_wait(ioim->bfa, ioim->reqq, 1510 bfa_reqq_wait(ioim->bfa, ioim->reqq,
1517 &ioim->iosp->reqq_wait); 1511 &ioim->iosp->reqq_wait);
1518 } 1512 }
1519 break; 1513 break;
1520 1514
1521 case BFA_IOIM_SM_CLEANUP: 1515 case BFA_IOIM_SM_CLEANUP:
1522 ioim->iosp->abort_explicit = BFA_FALSE; 1516 ioim->iosp->abort_explicit = BFA_FALSE;
1523 ioim->io_cbfn = __bfa_cb_ioim_failed; 1517 ioim->io_cbfn = __bfa_cb_ioim_failed;
1524 1518
1525 if (bfa_ioim_send_abort(ioim)) 1519 if (bfa_ioim_send_abort(ioim))
1526 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup); 1520 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1527 else { 1521 else {
1528 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull); 1522 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1529 bfa_stats(ioim->itnim, qwait); 1523 bfa_stats(ioim->itnim, qwait);
1530 bfa_reqq_wait(ioim->bfa, ioim->reqq, 1524 bfa_reqq_wait(ioim->bfa, ioim->reqq,
1531 &ioim->iosp->reqq_wait); 1525 &ioim->iosp->reqq_wait);
1532 } 1526 }
1533 break; 1527 break;
1534 1528
1535 case BFA_IOIM_SM_HWFAIL: 1529 case BFA_IOIM_SM_HWFAIL:
1536 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); 1530 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1537 bfa_ioim_move_to_comp_q(ioim); 1531 bfa_ioim_move_to_comp_q(ioim);
1538 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, 1532 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1539 ioim); 1533 ioim);
1540 break; 1534 break;
1541 1535
1542 case BFA_IOIM_SM_SQRETRY: 1536 case BFA_IOIM_SM_SQRETRY:
1543 if (bfa_ioim_maxretry_reached(ioim)) { 1537 if (bfa_ioim_maxretry_reached(ioim)) {
1544 /* max retry reached, free IO */ 1538 /* max retry reached, free IO */
1545 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free); 1539 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1546 bfa_ioim_move_to_comp_q(ioim); 1540 bfa_ioim_move_to_comp_q(ioim);
1547 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, 1541 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1548 __bfa_cb_ioim_failed, ioim); 1542 __bfa_cb_ioim_failed, ioim);
1549 break; 1543 break;
1550 } 1544 }
1551 /* waiting for IO tag resource free */ 1545 /* waiting for IO tag resource free */
1552 bfa_sm_set_state(ioim, bfa_ioim_sm_cmnd_retry); 1546 bfa_sm_set_state(ioim, bfa_ioim_sm_cmnd_retry);
1553 break; 1547 break;
1554 1548
1555 default: 1549 default:
1556 bfa_sm_fault(ioim->bfa, event); 1550 bfa_sm_fault(ioim->bfa, event);
1557 } 1551 }
1558 } 1552 }
1559 1553
1560 /* 1554 /*
1561 * IO is retried with new tag. 1555 * IO is retried with new tag.
1562 */ 1556 */
1563 static void 1557 static void
1564 bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) 1558 bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1565 { 1559 {
1566 bfa_trc_fp(ioim->bfa, ioim->iotag);
1567 bfa_trc_fp(ioim->bfa, event);
1568
1569 switch (event) { 1560 switch (event) {
1570 case BFA_IOIM_SM_FREE: 1561 case BFA_IOIM_SM_FREE:
1571 /* abts and rrq done. Now retry the IO with new tag */ 1562 /* abts and rrq done. Now retry the IO with new tag */
1572 bfa_ioim_update_iotag(ioim); 1563 bfa_ioim_update_iotag(ioim);
1573 if (!bfa_ioim_send_ioreq(ioim)) { 1564 if (!bfa_ioim_send_ioreq(ioim)) {
1574 bfa_sm_set_state(ioim, bfa_ioim_sm_qfull); 1565 bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1575 break; 1566 break;
1576 } 1567 }
1577 bfa_sm_set_state(ioim, bfa_ioim_sm_active); 1568 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1578 break; 1569 break;
1579 1570
1580 case BFA_IOIM_SM_CLEANUP: 1571 case BFA_IOIM_SM_CLEANUP:
1581 ioim->iosp->abort_explicit = BFA_FALSE; 1572 ioim->iosp->abort_explicit = BFA_FALSE;
1582 ioim->io_cbfn = __bfa_cb_ioim_failed; 1573 ioim->io_cbfn = __bfa_cb_ioim_failed;
1583 1574
1584 if (bfa_ioim_send_abort(ioim)) 1575 if (bfa_ioim_send_abort(ioim))
1585 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup); 1576 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1586 else { 1577 else {
1587 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull); 1578 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1588 bfa_stats(ioim->itnim, qwait); 1579 bfa_stats(ioim->itnim, qwait);
1589 bfa_reqq_wait(ioim->bfa, ioim->reqq, 1580 bfa_reqq_wait(ioim->bfa, ioim->reqq,
1590 &ioim->iosp->reqq_wait); 1581 &ioim->iosp->reqq_wait);
1591 } 1582 }
1592 break; 1583 break;
1593 1584
1594 case BFA_IOIM_SM_HWFAIL: 1585 case BFA_IOIM_SM_HWFAIL:
1595 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); 1586 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1596 bfa_ioim_move_to_comp_q(ioim); 1587 bfa_ioim_move_to_comp_q(ioim);
1597 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, 1588 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1598 __bfa_cb_ioim_failed, ioim); 1589 __bfa_cb_ioim_failed, ioim);
1599 break; 1590 break;
1600 1591
1601 case BFA_IOIM_SM_ABORT: 1592 case BFA_IOIM_SM_ABORT:
1602 /* in this state IO abort is done. 1593 /* in this state IO abort is done.
1603 * Waiting for IO tag resource free. 1594 * Waiting for IO tag resource free.
1604 */ 1595 */
1605 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free); 1596 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1606 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, 1597 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1607 ioim); 1598 ioim);
1608 break; 1599 break;
1609 1600
1610 default: 1601 default:
1611 bfa_sm_fault(ioim->bfa, event); 1602 bfa_sm_fault(ioim->bfa, event);
1612 } 1603 }
1613 } 1604 }
1614 1605
1615 /* 1606 /*
1616 * IO is being aborted, waiting for completion from firmware. 1607 * IO is being aborted, waiting for completion from firmware.
1617 */ 1608 */
1618 static void 1609 static void
1619 bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) 1610 bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1620 { 1611 {
1621 bfa_trc(ioim->bfa, ioim->iotag); 1612 bfa_trc(ioim->bfa, ioim->iotag);
1622 bfa_trc(ioim->bfa, event); 1613 bfa_trc(ioim->bfa, event);
1623 1614
1624 switch (event) { 1615 switch (event) {
1625 case BFA_IOIM_SM_COMP_GOOD: 1616 case BFA_IOIM_SM_COMP_GOOD:
1626 case BFA_IOIM_SM_COMP: 1617 case BFA_IOIM_SM_COMP:
1627 case BFA_IOIM_SM_DONE: 1618 case BFA_IOIM_SM_DONE:
1628 case BFA_IOIM_SM_FREE: 1619 case BFA_IOIM_SM_FREE:
1629 break; 1620 break;
1630 1621
1631 case BFA_IOIM_SM_ABORT_DONE: 1622 case BFA_IOIM_SM_ABORT_DONE:
1632 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free); 1623 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1633 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, 1624 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1634 ioim); 1625 ioim);
1635 break; 1626 break;
1636 1627
1637 case BFA_IOIM_SM_ABORT_COMP: 1628 case BFA_IOIM_SM_ABORT_COMP:
1638 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); 1629 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1639 bfa_ioim_move_to_comp_q(ioim); 1630 bfa_ioim_move_to_comp_q(ioim);
1640 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, 1631 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1641 ioim); 1632 ioim);
1642 break; 1633 break;
1643 1634
1644 case BFA_IOIM_SM_COMP_UTAG: 1635 case BFA_IOIM_SM_COMP_UTAG:
1645 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); 1636 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1646 bfa_ioim_move_to_comp_q(ioim); 1637 bfa_ioim_move_to_comp_q(ioim);
1647 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, 1638 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1648 ioim); 1639 ioim);
1649 break; 1640 break;
1650 1641
1651 case BFA_IOIM_SM_CLEANUP: 1642 case BFA_IOIM_SM_CLEANUP:
1652 WARN_ON(ioim->iosp->abort_explicit != BFA_TRUE); 1643 WARN_ON(ioim->iosp->abort_explicit != BFA_TRUE);
1653 ioim->iosp->abort_explicit = BFA_FALSE; 1644 ioim->iosp->abort_explicit = BFA_FALSE;
1654 1645
1655 if (bfa_ioim_send_abort(ioim)) 1646 if (bfa_ioim_send_abort(ioim))
1656 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup); 1647 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1657 else { 1648 else {
1658 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull); 1649 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1659 bfa_stats(ioim->itnim, qwait); 1650 bfa_stats(ioim->itnim, qwait);
1660 bfa_reqq_wait(ioim->bfa, ioim->reqq, 1651 bfa_reqq_wait(ioim->bfa, ioim->reqq,
1661 &ioim->iosp->reqq_wait); 1652 &ioim->iosp->reqq_wait);
1662 } 1653 }
1663 break; 1654 break;
1664 1655
1665 case BFA_IOIM_SM_HWFAIL: 1656 case BFA_IOIM_SM_HWFAIL:
1666 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); 1657 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1667 bfa_ioim_move_to_comp_q(ioim); 1658 bfa_ioim_move_to_comp_q(ioim);
1668 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, 1659 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1669 ioim); 1660 ioim);
1670 break; 1661 break;
1671 1662
1672 default: 1663 default:
1673 bfa_sm_fault(ioim->bfa, event); 1664 bfa_sm_fault(ioim->bfa, event);
1674 } 1665 }
1675 } 1666 }
1676 1667
1677 /* 1668 /*
1678 * IO is being cleaned up (implicit abort), waiting for completion from 1669 * IO is being cleaned up (implicit abort), waiting for completion from
1679 * firmware. 1670 * firmware.
1680 */ 1671 */
1681 static void 1672 static void
1682 bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) 1673 bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1683 { 1674 {
1684 bfa_trc(ioim->bfa, ioim->iotag); 1675 bfa_trc(ioim->bfa, ioim->iotag);
1685 bfa_trc(ioim->bfa, event); 1676 bfa_trc(ioim->bfa, event);
1686 1677
1687 switch (event) { 1678 switch (event) {
1688 case BFA_IOIM_SM_COMP_GOOD: 1679 case BFA_IOIM_SM_COMP_GOOD:
1689 case BFA_IOIM_SM_COMP: 1680 case BFA_IOIM_SM_COMP:
1690 case BFA_IOIM_SM_DONE: 1681 case BFA_IOIM_SM_DONE:
1691 case BFA_IOIM_SM_FREE: 1682 case BFA_IOIM_SM_FREE:
1692 break; 1683 break;
1693 1684
1694 case BFA_IOIM_SM_ABORT: 1685 case BFA_IOIM_SM_ABORT:
1695 /* 1686 /*
1696 * IO is already being aborted implicitly 1687 * IO is already being aborted implicitly
1697 */ 1688 */
1698 ioim->io_cbfn = __bfa_cb_ioim_abort; 1689 ioim->io_cbfn = __bfa_cb_ioim_abort;
1699 break; 1690 break;
1700 1691
1701 case BFA_IOIM_SM_ABORT_DONE: 1692 case BFA_IOIM_SM_ABORT_DONE:
1702 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free); 1693 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1703 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim); 1694 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1704 bfa_ioim_notify_cleanup(ioim); 1695 bfa_ioim_notify_cleanup(ioim);
1705 break; 1696 break;
1706 1697
1707 case BFA_IOIM_SM_ABORT_COMP: 1698 case BFA_IOIM_SM_ABORT_COMP:
1708 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); 1699 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1709 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim); 1700 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1710 bfa_ioim_notify_cleanup(ioim); 1701 bfa_ioim_notify_cleanup(ioim);
1711 break; 1702 break;
1712 1703
1713 case BFA_IOIM_SM_COMP_UTAG: 1704 case BFA_IOIM_SM_COMP_UTAG:
1714 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); 1705 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1715 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim); 1706 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1716 bfa_ioim_notify_cleanup(ioim); 1707 bfa_ioim_notify_cleanup(ioim);
1717 break; 1708 break;
1718 1709
1719 case BFA_IOIM_SM_HWFAIL: 1710 case BFA_IOIM_SM_HWFAIL:
1720 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); 1711 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1721 bfa_ioim_move_to_comp_q(ioim); 1712 bfa_ioim_move_to_comp_q(ioim);
1722 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, 1713 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1723 ioim); 1714 ioim);
1724 break; 1715 break;
1725 1716
1726 case BFA_IOIM_SM_CLEANUP: 1717 case BFA_IOIM_SM_CLEANUP:
1727 /* 1718 /*
1728 * IO can be in cleanup state already due to TM command. 1719 * IO can be in cleanup state already due to TM command.
1729 * 2nd cleanup request comes from ITN offline event. 1720 * 2nd cleanup request comes from ITN offline event.
1730 */ 1721 */
1731 break; 1722 break;
1732 1723
1733 default: 1724 default:
1734 bfa_sm_fault(ioim->bfa, event); 1725 bfa_sm_fault(ioim->bfa, event);
1735 } 1726 }
1736 } 1727 }
1737 1728
1738 /* 1729 /*
1739 * IO is waiting for room in request CQ 1730 * IO is waiting for room in request CQ
1740 */ 1731 */
1741 static void 1732 static void
1742 bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) 1733 bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1743 { 1734 {
1744 bfa_trc(ioim->bfa, ioim->iotag); 1735 bfa_trc(ioim->bfa, ioim->iotag);
1745 bfa_trc(ioim->bfa, event); 1736 bfa_trc(ioim->bfa, event);
1746 1737
1747 switch (event) { 1738 switch (event) {
1748 case BFA_IOIM_SM_QRESUME: 1739 case BFA_IOIM_SM_QRESUME:
1749 bfa_sm_set_state(ioim, bfa_ioim_sm_active); 1740 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1750 bfa_ioim_send_ioreq(ioim); 1741 bfa_ioim_send_ioreq(ioim);
1751 break; 1742 break;
1752 1743
1753 case BFA_IOIM_SM_ABORT: 1744 case BFA_IOIM_SM_ABORT:
1754 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); 1745 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1755 bfa_reqq_wcancel(&ioim->iosp->reqq_wait); 1746 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1756 bfa_ioim_move_to_comp_q(ioim); 1747 bfa_ioim_move_to_comp_q(ioim);
1757 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, 1748 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1758 ioim); 1749 ioim);
1759 break; 1750 break;
1760 1751
1761 case BFA_IOIM_SM_CLEANUP: 1752 case BFA_IOIM_SM_CLEANUP:
1762 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); 1753 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1763 bfa_reqq_wcancel(&ioim->iosp->reqq_wait); 1754 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1764 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, 1755 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1765 ioim); 1756 ioim);
1766 bfa_ioim_notify_cleanup(ioim); 1757 bfa_ioim_notify_cleanup(ioim);
1767 break; 1758 break;
1768 1759
1769 case BFA_IOIM_SM_HWFAIL: 1760 case BFA_IOIM_SM_HWFAIL:
1770 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); 1761 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1771 bfa_reqq_wcancel(&ioim->iosp->reqq_wait); 1762 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1772 bfa_ioim_move_to_comp_q(ioim); 1763 bfa_ioim_move_to_comp_q(ioim);
1773 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, 1764 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1774 ioim); 1765 ioim);
1775 break; 1766 break;
1776 1767
1777 default: 1768 default:
1778 bfa_sm_fault(ioim->bfa, event); 1769 bfa_sm_fault(ioim->bfa, event);
1779 } 1770 }
1780 } 1771 }
1781 1772
1782 /* 1773 /*
1783 * Active IO is being aborted, waiting for room in request CQ. 1774 * Active IO is being aborted, waiting for room in request CQ.
1784 */ 1775 */
1785 static void 1776 static void
1786 bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) 1777 bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1787 { 1778 {
1788 bfa_trc(ioim->bfa, ioim->iotag); 1779 bfa_trc(ioim->bfa, ioim->iotag);
1789 bfa_trc(ioim->bfa, event); 1780 bfa_trc(ioim->bfa, event);
1790 1781
1791 switch (event) { 1782 switch (event) {
1792 case BFA_IOIM_SM_QRESUME: 1783 case BFA_IOIM_SM_QRESUME:
1793 bfa_sm_set_state(ioim, bfa_ioim_sm_abort); 1784 bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
1794 bfa_ioim_send_abort(ioim); 1785 bfa_ioim_send_abort(ioim);
1795 break; 1786 break;
1796 1787
1797 case BFA_IOIM_SM_CLEANUP: 1788 case BFA_IOIM_SM_CLEANUP:
1798 WARN_ON(ioim->iosp->abort_explicit != BFA_TRUE); 1789 WARN_ON(ioim->iosp->abort_explicit != BFA_TRUE);
1799 ioim->iosp->abort_explicit = BFA_FALSE; 1790 ioim->iosp->abort_explicit = BFA_FALSE;
1800 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull); 1791 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1801 break; 1792 break;
1802 1793
1803 case BFA_IOIM_SM_COMP_GOOD: 1794 case BFA_IOIM_SM_COMP_GOOD:
1804 case BFA_IOIM_SM_COMP: 1795 case BFA_IOIM_SM_COMP:
1805 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); 1796 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1806 bfa_reqq_wcancel(&ioim->iosp->reqq_wait); 1797 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1807 bfa_ioim_move_to_comp_q(ioim); 1798 bfa_ioim_move_to_comp_q(ioim);
1808 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, 1799 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1809 ioim); 1800 ioim);
1810 break; 1801 break;
1811 1802
1812 case BFA_IOIM_SM_DONE: 1803 case BFA_IOIM_SM_DONE:
1813 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free); 1804 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1814 bfa_reqq_wcancel(&ioim->iosp->reqq_wait); 1805 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1815 bfa_ioim_move_to_comp_q(ioim); 1806 bfa_ioim_move_to_comp_q(ioim);
1816 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, 1807 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1817 ioim); 1808 ioim);
1818 break; 1809 break;
1819 1810
1820 case BFA_IOIM_SM_HWFAIL: 1811 case BFA_IOIM_SM_HWFAIL:
1821 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); 1812 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1822 bfa_reqq_wcancel(&ioim->iosp->reqq_wait); 1813 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1823 bfa_ioim_move_to_comp_q(ioim); 1814 bfa_ioim_move_to_comp_q(ioim);
1824 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, 1815 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1825 ioim); 1816 ioim);
1826 break; 1817 break;
1827 1818
1828 default: 1819 default:
1829 bfa_sm_fault(ioim->bfa, event); 1820 bfa_sm_fault(ioim->bfa, event);
1830 } 1821 }
1831 } 1822 }
1832 1823
1833 /* 1824 /*
1834 * Active IO is being cleaned up, waiting for room in request CQ. 1825 * Active IO is being cleaned up, waiting for room in request CQ.
1835 */ 1826 */
1836 static void 1827 static void
1837 bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) 1828 bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1838 { 1829 {
1839 bfa_trc(ioim->bfa, ioim->iotag); 1830 bfa_trc(ioim->bfa, ioim->iotag);
1840 bfa_trc(ioim->bfa, event); 1831 bfa_trc(ioim->bfa, event);
1841 1832
1842 switch (event) { 1833 switch (event) {
1843 case BFA_IOIM_SM_QRESUME: 1834 case BFA_IOIM_SM_QRESUME:
1844 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup); 1835 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1845 bfa_ioim_send_abort(ioim); 1836 bfa_ioim_send_abort(ioim);
1846 break; 1837 break;
1847 1838
1848 case BFA_IOIM_SM_ABORT: 1839 case BFA_IOIM_SM_ABORT:
1849 /* 1840 /*
1850 * IO is alraedy being cleaned up implicitly 1841 * IO is alraedy being cleaned up implicitly
1851 */ 1842 */
1852 ioim->io_cbfn = __bfa_cb_ioim_abort; 1843 ioim->io_cbfn = __bfa_cb_ioim_abort;
1853 break; 1844 break;
1854 1845
1855 case BFA_IOIM_SM_COMP_GOOD: 1846 case BFA_IOIM_SM_COMP_GOOD:
1856 case BFA_IOIM_SM_COMP: 1847 case BFA_IOIM_SM_COMP:
1857 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); 1848 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1858 bfa_reqq_wcancel(&ioim->iosp->reqq_wait); 1849 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1859 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim); 1850 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1860 bfa_ioim_notify_cleanup(ioim); 1851 bfa_ioim_notify_cleanup(ioim);
1861 break; 1852 break;
1862 1853
1863 case BFA_IOIM_SM_DONE: 1854 case BFA_IOIM_SM_DONE:
1864 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free); 1855 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1865 bfa_reqq_wcancel(&ioim->iosp->reqq_wait); 1856 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1866 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim); 1857 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1867 bfa_ioim_notify_cleanup(ioim); 1858 bfa_ioim_notify_cleanup(ioim);
1868 break; 1859 break;
1869 1860
1870 case BFA_IOIM_SM_HWFAIL: 1861 case BFA_IOIM_SM_HWFAIL:
1871 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); 1862 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1872 bfa_reqq_wcancel(&ioim->iosp->reqq_wait); 1863 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1873 bfa_ioim_move_to_comp_q(ioim); 1864 bfa_ioim_move_to_comp_q(ioim);
1874 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, 1865 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1875 ioim); 1866 ioim);
1876 break; 1867 break;
1877 1868
1878 default: 1869 default:
1879 bfa_sm_fault(ioim->bfa, event); 1870 bfa_sm_fault(ioim->bfa, event);
1880 } 1871 }
1881 } 1872 }
1882 1873
1883 /* 1874 /*
1884 * IO bfa callback is pending. 1875 * IO bfa callback is pending.
1885 */ 1876 */
1886 static void 1877 static void
1887 bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) 1878 bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1888 { 1879 {
1889 bfa_trc_fp(ioim->bfa, ioim->iotag);
1890 bfa_trc_fp(ioim->bfa, event);
1891
1892 switch (event) { 1880 switch (event) {
1893 case BFA_IOIM_SM_HCB: 1881 case BFA_IOIM_SM_HCB:
1894 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit); 1882 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
1895 bfa_ioim_free(ioim); 1883 bfa_ioim_free(ioim);
1896 break; 1884 break;
1897 1885
1898 case BFA_IOIM_SM_CLEANUP: 1886 case BFA_IOIM_SM_CLEANUP:
1899 bfa_ioim_notify_cleanup(ioim); 1887 bfa_ioim_notify_cleanup(ioim);
1900 break; 1888 break;
1901 1889
1902 case BFA_IOIM_SM_HWFAIL: 1890 case BFA_IOIM_SM_HWFAIL:
1903 break; 1891 break;
1904 1892
1905 default: 1893 default:
1906 bfa_sm_fault(ioim->bfa, event); 1894 bfa_sm_fault(ioim->bfa, event);
1907 } 1895 }
1908 } 1896 }
1909 1897
1910 /* 1898 /*
1911 * IO bfa callback is pending. IO resource cannot be freed. 1899 * IO bfa callback is pending. IO resource cannot be freed.
1912 */ 1900 */
1913 static void 1901 static void
1914 bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) 1902 bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1915 { 1903 {
1916 bfa_trc(ioim->bfa, ioim->iotag); 1904 bfa_trc(ioim->bfa, ioim->iotag);
1917 bfa_trc(ioim->bfa, event); 1905 bfa_trc(ioim->bfa, event);
1918 1906
1919 switch (event) { 1907 switch (event) {
1920 case BFA_IOIM_SM_HCB: 1908 case BFA_IOIM_SM_HCB:
1921 bfa_sm_set_state(ioim, bfa_ioim_sm_resfree); 1909 bfa_sm_set_state(ioim, bfa_ioim_sm_resfree);
1922 list_del(&ioim->qe); 1910 list_del(&ioim->qe);
1923 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_resfree_q); 1911 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_resfree_q);
1924 break; 1912 break;
1925 1913
1926 case BFA_IOIM_SM_FREE: 1914 case BFA_IOIM_SM_FREE:
1927 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); 1915 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1928 break; 1916 break;
1929 1917
1930 case BFA_IOIM_SM_CLEANUP: 1918 case BFA_IOIM_SM_CLEANUP:
1931 bfa_ioim_notify_cleanup(ioim); 1919 bfa_ioim_notify_cleanup(ioim);
1932 break; 1920 break;
1933 1921
1934 case BFA_IOIM_SM_HWFAIL: 1922 case BFA_IOIM_SM_HWFAIL:
1935 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); 1923 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1936 break; 1924 break;
1937 1925
1938 default: 1926 default:
1939 bfa_sm_fault(ioim->bfa, event); 1927 bfa_sm_fault(ioim->bfa, event);
1940 } 1928 }
1941 } 1929 }
1942 1930
1943 /* 1931 /*
1944 * IO is completed, waiting resource free from firmware. 1932 * IO is completed, waiting resource free from firmware.
1945 */ 1933 */
1946 static void 1934 static void
1947 bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) 1935 bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1948 { 1936 {
1949 bfa_trc(ioim->bfa, ioim->iotag); 1937 bfa_trc(ioim->bfa, ioim->iotag);
1950 bfa_trc(ioim->bfa, event); 1938 bfa_trc(ioim->bfa, event);
1951 1939
1952 switch (event) { 1940 switch (event) {
1953 case BFA_IOIM_SM_FREE: 1941 case BFA_IOIM_SM_FREE:
1954 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit); 1942 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
1955 bfa_ioim_free(ioim); 1943 bfa_ioim_free(ioim);
1956 break; 1944 break;
1957 1945
1958 case BFA_IOIM_SM_CLEANUP: 1946 case BFA_IOIM_SM_CLEANUP:
1959 bfa_ioim_notify_cleanup(ioim); 1947 bfa_ioim_notify_cleanup(ioim);
1960 break; 1948 break;
1961 1949
1962 case BFA_IOIM_SM_HWFAIL: 1950 case BFA_IOIM_SM_HWFAIL:
1963 break; 1951 break;
1964 1952
1965 default: 1953 default:
1966 bfa_sm_fault(ioim->bfa, event); 1954 bfa_sm_fault(ioim->bfa, event);
1967 } 1955 }
1968 } 1956 }
1969 1957
1970 1958
1971 static void 1959 static void
1972 __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete) 1960 __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
1973 { 1961 {
1974 struct bfa_ioim_s *ioim = cbarg; 1962 struct bfa_ioim_s *ioim = cbarg;
1975 1963
1976 if (!complete) { 1964 if (!complete) {
1977 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB); 1965 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
1978 return; 1966 return;
1979 } 1967 }
1980 1968
1981 bfa_cb_ioim_good_comp(ioim->bfa->bfad, ioim->dio); 1969 bfa_cb_ioim_good_comp(ioim->bfa->bfad, ioim->dio);
1982 } 1970 }
1983 1971
1984 static void 1972 static void
1985 __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete) 1973 __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
1986 { 1974 {
1987 struct bfa_ioim_s *ioim = cbarg; 1975 struct bfa_ioim_s *ioim = cbarg;
1988 struct bfi_ioim_rsp_s *m; 1976 struct bfi_ioim_rsp_s *m;
1989 u8 *snsinfo = NULL; 1977 u8 *snsinfo = NULL;
1990 u8 sns_len = 0; 1978 u8 sns_len = 0;
1991 s32 residue = 0; 1979 s32 residue = 0;
1992 1980
1993 if (!complete) { 1981 if (!complete) {
1994 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB); 1982 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
1995 return; 1983 return;
1996 } 1984 }
1997 1985
1998 m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg; 1986 m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
1999 if (m->io_status == BFI_IOIM_STS_OK) { 1987 if (m->io_status == BFI_IOIM_STS_OK) {
2000 /* 1988 /*
2001 * setup sense information, if present 1989 * setup sense information, if present
2002 */ 1990 */
2003 if ((m->scsi_status == SCSI_STATUS_CHECK_CONDITION) && 1991 if ((m->scsi_status == SCSI_STATUS_CHECK_CONDITION) &&
2004 m->sns_len) { 1992 m->sns_len) {
2005 sns_len = m->sns_len; 1993 sns_len = m->sns_len;
2006 snsinfo = ioim->iosp->snsinfo; 1994 snsinfo = ioim->iosp->snsinfo;
2007 } 1995 }
2008 1996
2009 /* 1997 /*
2010 * setup residue value correctly for normal completions 1998 * setup residue value correctly for normal completions
2011 */ 1999 */
2012 if (m->resid_flags == FCP_RESID_UNDER) { 2000 if (m->resid_flags == FCP_RESID_UNDER) {
2013 residue = be32_to_cpu(m->residue); 2001 residue = be32_to_cpu(m->residue);
2014 bfa_stats(ioim->itnim, iocomp_underrun); 2002 bfa_stats(ioim->itnim, iocomp_underrun);
2015 } 2003 }
2016 if (m->resid_flags == FCP_RESID_OVER) { 2004 if (m->resid_flags == FCP_RESID_OVER) {
2017 residue = be32_to_cpu(m->residue); 2005 residue = be32_to_cpu(m->residue);
2018 residue = -residue; 2006 residue = -residue;
2019 bfa_stats(ioim->itnim, iocomp_overrun); 2007 bfa_stats(ioim->itnim, iocomp_overrun);
2020 } 2008 }
2021 } 2009 }
2022 2010
2023 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, m->io_status, 2011 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, m->io_status,
2024 m->scsi_status, sns_len, snsinfo, residue); 2012 m->scsi_status, sns_len, snsinfo, residue);
2025 } 2013 }
2026 2014
2027 static void 2015 static void
2028 __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete) 2016 __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
2029 { 2017 {
2030 struct bfa_ioim_s *ioim = cbarg; 2018 struct bfa_ioim_s *ioim = cbarg;
2031 2019
2032 if (!complete) { 2020 if (!complete) {
2033 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB); 2021 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2034 return; 2022 return;
2035 } 2023 }
2036 2024
2037 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED, 2025 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED,
2038 0, 0, NULL, 0); 2026 0, 0, NULL, 0);
2039 } 2027 }
2040 2028
2041 static void 2029 static void
2042 __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete) 2030 __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete)
2043 { 2031 {
2044 struct bfa_ioim_s *ioim = cbarg; 2032 struct bfa_ioim_s *ioim = cbarg;
2045 2033
2046 bfa_stats(ioim->itnim, path_tov_expired); 2034 bfa_stats(ioim->itnim, path_tov_expired);
2047 if (!complete) { 2035 if (!complete) {
2048 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB); 2036 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2049 return; 2037 return;
2050 } 2038 }
2051 2039
2052 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV, 2040 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV,
2053 0, 0, NULL, 0); 2041 0, 0, NULL, 0);
2054 } 2042 }
2055 2043
2056 static void 2044 static void
2057 __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete) 2045 __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete)
2058 { 2046 {
2059 struct bfa_ioim_s *ioim = cbarg; 2047 struct bfa_ioim_s *ioim = cbarg;
2060 2048
2061 if (!complete) { 2049 if (!complete) {
2062 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB); 2050 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2063 return; 2051 return;
2064 } 2052 }
2065 2053
2066 bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio); 2054 bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio);
2067 } 2055 }
2068 2056
2069 static void 2057 static void
2070 bfa_ioim_sgpg_alloced(void *cbarg) 2058 bfa_ioim_sgpg_alloced(void *cbarg)
2071 { 2059 {
2072 struct bfa_ioim_s *ioim = cbarg; 2060 struct bfa_ioim_s *ioim = cbarg;
2073 2061
2074 ioim->nsgpgs = BFA_SGPG_NPAGE(ioim->nsges); 2062 ioim->nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
2075 list_splice_tail_init(&ioim->iosp->sgpg_wqe.sgpg_q, &ioim->sgpg_q); 2063 list_splice_tail_init(&ioim->iosp->sgpg_wqe.sgpg_q, &ioim->sgpg_q);
2076 ioim->sgpg = bfa_q_first(&ioim->sgpg_q); 2064 ioim->sgpg = bfa_q_first(&ioim->sgpg_q);
2077 bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED); 2065 bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED);
2078 } 2066 }
2079 2067
2080 /* 2068 /*
2081 * Send I/O request to firmware. 2069 * Send I/O request to firmware.
2082 */ 2070 */
2083 static bfa_boolean_t 2071 static bfa_boolean_t
2084 bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim) 2072 bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
2085 { 2073 {
2086 struct bfa_itnim_s *itnim = ioim->itnim; 2074 struct bfa_itnim_s *itnim = ioim->itnim;
2087 struct bfi_ioim_req_s *m; 2075 struct bfi_ioim_req_s *m;
2088 static struct fcp_cmnd_s cmnd_z0 = { { { 0 } } }; 2076 static struct fcp_cmnd_s cmnd_z0 = { { { 0 } } };
2089 struct bfi_sge_s *sge, *sgpge; 2077 struct bfi_sge_s *sge, *sgpge;
2090 u32 pgdlen = 0; 2078 u32 pgdlen = 0;
2091 u32 fcp_dl; 2079 u32 fcp_dl;
2092 u64 addr; 2080 u64 addr;
2093 struct scatterlist *sg; 2081 struct scatterlist *sg;
2094 struct bfa_sgpg_s *sgpg; 2082 struct bfa_sgpg_s *sgpg;
2095 struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio; 2083 struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
2096 u32 i, sge_id, pgcumsz; 2084 u32 i, sge_id, pgcumsz;
2097 enum dma_data_direction dmadir; 2085 enum dma_data_direction dmadir;
2098 2086
2099 /* 2087 /*
2100 * check for room in queue to send request now 2088 * check for room in queue to send request now
2101 */ 2089 */
2102 m = bfa_reqq_next(ioim->bfa, ioim->reqq); 2090 m = bfa_reqq_next(ioim->bfa, ioim->reqq);
2103 if (!m) { 2091 if (!m) {
2104 bfa_stats(ioim->itnim, qwait); 2092 bfa_stats(ioim->itnim, qwait);
2105 bfa_reqq_wait(ioim->bfa, ioim->reqq, 2093 bfa_reqq_wait(ioim->bfa, ioim->reqq,
2106 &ioim->iosp->reqq_wait); 2094 &ioim->iosp->reqq_wait);
2107 return BFA_FALSE; 2095 return BFA_FALSE;
2108 } 2096 }
2109 2097
2110 /* 2098 /*
2111 * build i/o request message next 2099 * build i/o request message next
2112 */ 2100 */
2113 m->io_tag = cpu_to_be16(ioim->iotag); 2101 m->io_tag = cpu_to_be16(ioim->iotag);
2114 m->rport_hdl = ioim->itnim->rport->fw_handle; 2102 m->rport_hdl = ioim->itnim->rport->fw_handle;
2115 m->io_timeout = 0; 2103 m->io_timeout = 0;
2116 2104
2117 sge = &m->sges[0]; 2105 sge = &m->sges[0];
2118 sgpg = ioim->sgpg; 2106 sgpg = ioim->sgpg;
2119 sge_id = 0; 2107 sge_id = 0;
2120 sgpge = NULL; 2108 sgpge = NULL;
2121 pgcumsz = 0; 2109 pgcumsz = 0;
2122 scsi_for_each_sg(cmnd, sg, ioim->nsges, i) { 2110 scsi_for_each_sg(cmnd, sg, ioim->nsges, i) {
2123 if (i == 0) { 2111 if (i == 0) {
2124 /* build inline IO SG element */ 2112 /* build inline IO SG element */
2125 addr = bfa_sgaddr_le(sg_dma_address(sg)); 2113 addr = bfa_sgaddr_le(sg_dma_address(sg));
2126 sge->sga = *(union bfi_addr_u *) &addr; 2114 sge->sga = *(union bfi_addr_u *) &addr;
2127 pgdlen = sg_dma_len(sg); 2115 pgdlen = sg_dma_len(sg);
2128 sge->sg_len = pgdlen; 2116 sge->sg_len = pgdlen;
2129 sge->flags = (ioim->nsges > BFI_SGE_INLINE) ? 2117 sge->flags = (ioim->nsges > BFI_SGE_INLINE) ?
2130 BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST; 2118 BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST;
2131 bfa_sge_to_be(sge); 2119 bfa_sge_to_be(sge);
2132 sge++; 2120 sge++;
2133 } else { 2121 } else {
2134 if (sge_id == 0) 2122 if (sge_id == 0)
2135 sgpge = sgpg->sgpg->sges; 2123 sgpge = sgpg->sgpg->sges;
2136 2124
2137 addr = bfa_sgaddr_le(sg_dma_address(sg)); 2125 addr = bfa_sgaddr_le(sg_dma_address(sg));
2138 sgpge->sga = *(union bfi_addr_u *) &addr; 2126 sgpge->sga = *(union bfi_addr_u *) &addr;
2139 sgpge->sg_len = sg_dma_len(sg); 2127 sgpge->sg_len = sg_dma_len(sg);
2140 pgcumsz += sgpge->sg_len; 2128 pgcumsz += sgpge->sg_len;
2141 2129
2142 /* set flags */ 2130 /* set flags */
2143 if (i < (ioim->nsges - 1) && 2131 if (i < (ioim->nsges - 1) &&
2144 sge_id < (BFI_SGPG_DATA_SGES - 1)) 2132 sge_id < (BFI_SGPG_DATA_SGES - 1))
2145 sgpge->flags = BFI_SGE_DATA; 2133 sgpge->flags = BFI_SGE_DATA;
2146 else if (i < (ioim->nsges - 1)) 2134 else if (i < (ioim->nsges - 1))
2147 sgpge->flags = BFI_SGE_DATA_CPL; 2135 sgpge->flags = BFI_SGE_DATA_CPL;
2148 else 2136 else
2149 sgpge->flags = BFI_SGE_DATA_LAST; 2137 sgpge->flags = BFI_SGE_DATA_LAST;
2150 2138
2151 bfa_sge_to_le(sgpge); 2139 bfa_sge_to_le(sgpge);
2152 2140
2153 sgpge++; 2141 sgpge++;
2154 if (i == (ioim->nsges - 1)) { 2142 if (i == (ioim->nsges - 1)) {
2155 sgpge->flags = BFI_SGE_PGDLEN; 2143 sgpge->flags = BFI_SGE_PGDLEN;
2156 sgpge->sga.a32.addr_lo = 0; 2144 sgpge->sga.a32.addr_lo = 0;
2157 sgpge->sga.a32.addr_hi = 0; 2145 sgpge->sga.a32.addr_hi = 0;
2158 sgpge->sg_len = pgcumsz; 2146 sgpge->sg_len = pgcumsz;
2159 bfa_sge_to_le(sgpge); 2147 bfa_sge_to_le(sgpge);
2160 } else if (++sge_id == BFI_SGPG_DATA_SGES) { 2148 } else if (++sge_id == BFI_SGPG_DATA_SGES) {
2161 sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg); 2149 sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg);
2162 sgpge->flags = BFI_SGE_LINK; 2150 sgpge->flags = BFI_SGE_LINK;
2163 sgpge->sga = sgpg->sgpg_pa; 2151 sgpge->sga = sgpg->sgpg_pa;
2164 sgpge->sg_len = pgcumsz; 2152 sgpge->sg_len = pgcumsz;
2165 bfa_sge_to_le(sgpge); 2153 bfa_sge_to_le(sgpge);
2166 sge_id = 0; 2154 sge_id = 0;
2167 pgcumsz = 0; 2155 pgcumsz = 0;
2168 } 2156 }
2169 } 2157 }
2170 } 2158 }
2171 2159
2172 if (ioim->nsges > BFI_SGE_INLINE) { 2160 if (ioim->nsges > BFI_SGE_INLINE) {
2173 sge->sga = ioim->sgpg->sgpg_pa; 2161 sge->sga = ioim->sgpg->sgpg_pa;
2174 } else { 2162 } else {
2175 sge->sga.a32.addr_lo = 0; 2163 sge->sga.a32.addr_lo = 0;
2176 sge->sga.a32.addr_hi = 0; 2164 sge->sga.a32.addr_hi = 0;
2177 } 2165 }
2178 sge->sg_len = pgdlen; 2166 sge->sg_len = pgdlen;
2179 sge->flags = BFI_SGE_PGDLEN; 2167 sge->flags = BFI_SGE_PGDLEN;
2180 bfa_sge_to_be(sge); 2168 bfa_sge_to_be(sge);
2181 2169
2182 /* 2170 /*
2183 * set up I/O command parameters 2171 * set up I/O command parameters
2184 */ 2172 */
2185 m->cmnd = cmnd_z0; 2173 m->cmnd = cmnd_z0;
2186 int_to_scsilun(cmnd->device->lun, &m->cmnd.lun); 2174 int_to_scsilun(cmnd->device->lun, &m->cmnd.lun);
2187 dmadir = cmnd->sc_data_direction; 2175 dmadir = cmnd->sc_data_direction;
2188 if (dmadir == DMA_TO_DEVICE) 2176 if (dmadir == DMA_TO_DEVICE)
2189 m->cmnd.iodir = FCP_IODIR_WRITE; 2177 m->cmnd.iodir = FCP_IODIR_WRITE;
2190 else if (dmadir == DMA_FROM_DEVICE) 2178 else if (dmadir == DMA_FROM_DEVICE)
2191 m->cmnd.iodir = FCP_IODIR_READ; 2179 m->cmnd.iodir = FCP_IODIR_READ;
2192 else 2180 else
2193 m->cmnd.iodir = FCP_IODIR_NONE; 2181 m->cmnd.iodir = FCP_IODIR_NONE;
2194 2182
2195 m->cmnd.cdb = *(scsi_cdb_t *) cmnd->cmnd; 2183 m->cmnd.cdb = *(scsi_cdb_t *) cmnd->cmnd;
2196 fcp_dl = scsi_bufflen(cmnd); 2184 fcp_dl = scsi_bufflen(cmnd);
2197 m->cmnd.fcp_dl = cpu_to_be32(fcp_dl); 2185 m->cmnd.fcp_dl = cpu_to_be32(fcp_dl);
2198 2186
2199 /* 2187 /*
2200 * set up I/O message header 2188 * set up I/O message header
2201 */ 2189 */
2202 switch (m->cmnd.iodir) { 2190 switch (m->cmnd.iodir) {
2203 case FCP_IODIR_READ: 2191 case FCP_IODIR_READ:
2204 bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_lpuid(ioim->bfa)); 2192 bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_lpuid(ioim->bfa));
2205 bfa_stats(itnim, input_reqs); 2193 bfa_stats(itnim, input_reqs);
2206 ioim->itnim->stats.rd_throughput += fcp_dl; 2194 ioim->itnim->stats.rd_throughput += fcp_dl;
2207 break; 2195 break;
2208 case FCP_IODIR_WRITE: 2196 case FCP_IODIR_WRITE:
2209 bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_lpuid(ioim->bfa)); 2197 bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_lpuid(ioim->bfa));
2210 bfa_stats(itnim, output_reqs); 2198 bfa_stats(itnim, output_reqs);
2211 ioim->itnim->stats.wr_throughput += fcp_dl; 2199 ioim->itnim->stats.wr_throughput += fcp_dl;
2212 break; 2200 break;
2213 case FCP_IODIR_RW: 2201 case FCP_IODIR_RW:
2214 bfa_stats(itnim, input_reqs); 2202 bfa_stats(itnim, input_reqs);
2215 bfa_stats(itnim, output_reqs); 2203 bfa_stats(itnim, output_reqs);
2216 default: 2204 default:
2217 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa)); 2205 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
2218 } 2206 }
2219 if (itnim->seq_rec || 2207 if (itnim->seq_rec ||
2220 (scsi_bufflen(cmnd) & (sizeof(u32) - 1))) 2208 (scsi_bufflen(cmnd) & (sizeof(u32) - 1)))
2221 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa)); 2209 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
2222 2210
2223 /* 2211 /*
2224 * queue I/O message to firmware 2212 * queue I/O message to firmware
2225 */ 2213 */
2226 bfa_reqq_produce(ioim->bfa, ioim->reqq); 2214 bfa_reqq_produce(ioim->bfa, ioim->reqq);
2227 return BFA_TRUE; 2215 return BFA_TRUE;
2228 } 2216 }
2229 2217
2230 /* 2218 /*
2231 * Setup any additional SG pages needed.Inline SG element is setup 2219 * Setup any additional SG pages needed.Inline SG element is setup
2232 * at queuing time. 2220 * at queuing time.
2233 */ 2221 */
2234 static bfa_boolean_t 2222 static bfa_boolean_t
2235 bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim) 2223 bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim)
2236 { 2224 {
2237 u16 nsgpgs; 2225 u16 nsgpgs;
2238 2226
2239 WARN_ON(ioim->nsges <= BFI_SGE_INLINE); 2227 WARN_ON(ioim->nsges <= BFI_SGE_INLINE);
2240 2228
2241 /* 2229 /*
2242 * allocate SG pages needed 2230 * allocate SG pages needed
2243 */ 2231 */
2244 nsgpgs = BFA_SGPG_NPAGE(ioim->nsges); 2232 nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
2245 if (!nsgpgs) 2233 if (!nsgpgs)
2246 return BFA_TRUE; 2234 return BFA_TRUE;
2247 2235
2248 if (bfa_sgpg_malloc(ioim->bfa, &ioim->sgpg_q, nsgpgs) 2236 if (bfa_sgpg_malloc(ioim->bfa, &ioim->sgpg_q, nsgpgs)
2249 != BFA_STATUS_OK) { 2237 != BFA_STATUS_OK) {
2250 bfa_sgpg_wait(ioim->bfa, &ioim->iosp->sgpg_wqe, nsgpgs); 2238 bfa_sgpg_wait(ioim->bfa, &ioim->iosp->sgpg_wqe, nsgpgs);
2251 return BFA_FALSE; 2239 return BFA_FALSE;
2252 } 2240 }
2253 2241
2254 ioim->nsgpgs = nsgpgs; 2242 ioim->nsgpgs = nsgpgs;
2255 ioim->sgpg = bfa_q_first(&ioim->sgpg_q); 2243 ioim->sgpg = bfa_q_first(&ioim->sgpg_q);
2256 2244
2257 return BFA_TRUE; 2245 return BFA_TRUE;
2258 } 2246 }
2259 2247
2260 /* 2248 /*
2261 * Send I/O abort request to firmware. 2249 * Send I/O abort request to firmware.
2262 */ 2250 */
2263 static bfa_boolean_t 2251 static bfa_boolean_t
2264 bfa_ioim_send_abort(struct bfa_ioim_s *ioim) 2252 bfa_ioim_send_abort(struct bfa_ioim_s *ioim)
2265 { 2253 {
2266 struct bfi_ioim_abort_req_s *m; 2254 struct bfi_ioim_abort_req_s *m;
2267 enum bfi_ioim_h2i msgop; 2255 enum bfi_ioim_h2i msgop;
2268 2256
2269 /* 2257 /*
2270 * check for room in queue to send request now 2258 * check for room in queue to send request now
2271 */ 2259 */
2272 m = bfa_reqq_next(ioim->bfa, ioim->reqq); 2260 m = bfa_reqq_next(ioim->bfa, ioim->reqq);
2273 if (!m) 2261 if (!m)
2274 return BFA_FALSE; 2262 return BFA_FALSE;
2275 2263
2276 /* 2264 /*
2277 * build i/o request message next 2265 * build i/o request message next
2278 */ 2266 */
2279 if (ioim->iosp->abort_explicit) 2267 if (ioim->iosp->abort_explicit)
2280 msgop = BFI_IOIM_H2I_IOABORT_REQ; 2268 msgop = BFI_IOIM_H2I_IOABORT_REQ;
2281 else 2269 else
2282 msgop = BFI_IOIM_H2I_IOCLEANUP_REQ; 2270 msgop = BFI_IOIM_H2I_IOCLEANUP_REQ;
2283 2271
2284 bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_lpuid(ioim->bfa)); 2272 bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_lpuid(ioim->bfa));
2285 m->io_tag = cpu_to_be16(ioim->iotag); 2273 m->io_tag = cpu_to_be16(ioim->iotag);
2286 m->abort_tag = ++ioim->abort_tag; 2274 m->abort_tag = ++ioim->abort_tag;
2287 2275
2288 /* 2276 /*
2289 * queue I/O message to firmware 2277 * queue I/O message to firmware
2290 */ 2278 */
2291 bfa_reqq_produce(ioim->bfa, ioim->reqq); 2279 bfa_reqq_produce(ioim->bfa, ioim->reqq);
2292 return BFA_TRUE; 2280 return BFA_TRUE;
2293 } 2281 }
2294 2282
2295 /* 2283 /*
2296 * Call to resume any I/O requests waiting for room in request queue. 2284 * Call to resume any I/O requests waiting for room in request queue.
2297 */ 2285 */
2298 static void 2286 static void
2299 bfa_ioim_qresume(void *cbarg) 2287 bfa_ioim_qresume(void *cbarg)
2300 { 2288 {
2301 struct bfa_ioim_s *ioim = cbarg; 2289 struct bfa_ioim_s *ioim = cbarg;
2302 2290
2303 bfa_stats(ioim->itnim, qresumes); 2291 bfa_stats(ioim->itnim, qresumes);
2304 bfa_sm_send_event(ioim, BFA_IOIM_SM_QRESUME); 2292 bfa_sm_send_event(ioim, BFA_IOIM_SM_QRESUME);
2305 } 2293 }
2306 2294
2307 2295
2308 static void 2296 static void
2309 bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim) 2297 bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim)
2310 { 2298 {
2311 /* 2299 /*
2312 * Move IO from itnim queue to fcpim global queue since itnim will be 2300 * Move IO from itnim queue to fcpim global queue since itnim will be
2313 * freed. 2301 * freed.
2314 */ 2302 */
2315 list_del(&ioim->qe); 2303 list_del(&ioim->qe);
2316 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q); 2304 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
2317 2305
2318 if (!ioim->iosp->tskim) { 2306 if (!ioim->iosp->tskim) {
2319 if (ioim->fcpim->delay_comp && ioim->itnim->iotov_active) { 2307 if (ioim->fcpim->delay_comp && ioim->itnim->iotov_active) {
2320 bfa_cb_dequeue(&ioim->hcb_qe); 2308 bfa_cb_dequeue(&ioim->hcb_qe);
2321 list_del(&ioim->qe); 2309 list_del(&ioim->qe);
2322 list_add_tail(&ioim->qe, &ioim->itnim->delay_comp_q); 2310 list_add_tail(&ioim->qe, &ioim->itnim->delay_comp_q);
2323 } 2311 }
2324 bfa_itnim_iodone(ioim->itnim); 2312 bfa_itnim_iodone(ioim->itnim);
2325 } else 2313 } else
2326 bfa_wc_down(&ioim->iosp->tskim->wc); 2314 bfa_wc_down(&ioim->iosp->tskim->wc);
2327 } 2315 }
2328 2316
2329 static bfa_boolean_t 2317 static bfa_boolean_t
2330 bfa_ioim_is_abortable(struct bfa_ioim_s *ioim) 2318 bfa_ioim_is_abortable(struct bfa_ioim_s *ioim)
2331 { 2319 {
2332 if ((bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit) && 2320 if ((bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit) &&
2333 (!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim))) || 2321 (!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim))) ||
2334 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort)) || 2322 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort)) ||
2335 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort_qfull)) || 2323 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort_qfull)) ||
2336 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb)) || 2324 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb)) ||
2337 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb_free)) || 2325 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb_free)) ||
2338 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_resfree))) 2326 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_resfree)))
2339 return BFA_FALSE; 2327 return BFA_FALSE;
2340 2328
2341 return BFA_TRUE; 2329 return BFA_TRUE;
2342 } 2330 }
2343 2331
2344 void 2332 void
2345 bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov) 2333 bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
2346 { 2334 {
2347 /* 2335 /*
2348 * If path tov timer expired, failback with PATHTOV status - these 2336 * If path tov timer expired, failback with PATHTOV status - these
2349 * IO requests are not normally retried by IO stack. 2337 * IO requests are not normally retried by IO stack.
2350 * 2338 *
2351 * Otherwise device cameback online and fail it with normal failed 2339 * Otherwise device cameback online and fail it with normal failed
2352 * status so that IO stack retries these failed IO requests. 2340 * status so that IO stack retries these failed IO requests.
2353 */ 2341 */
2354 if (iotov) 2342 if (iotov)
2355 ioim->io_cbfn = __bfa_cb_ioim_pathtov; 2343 ioim->io_cbfn = __bfa_cb_ioim_pathtov;
2356 else { 2344 else {
2357 ioim->io_cbfn = __bfa_cb_ioim_failed; 2345 ioim->io_cbfn = __bfa_cb_ioim_failed;
2358 bfa_stats(ioim->itnim, iocom_nexus_abort); 2346 bfa_stats(ioim->itnim, iocom_nexus_abort);
2359 } 2347 }
2360 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim); 2348 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
2361 2349
2362 /* 2350 /*
2363 * Move IO to fcpim global queue since itnim will be 2351 * Move IO to fcpim global queue since itnim will be
2364 * freed. 2352 * freed.
2365 */ 2353 */
2366 list_del(&ioim->qe); 2354 list_del(&ioim->qe);
2367 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q); 2355 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
2368 } 2356 }
2369 2357
2370 2358
2371 /* 2359 /*
2372 * Memory allocation and initialization. 2360 * Memory allocation and initialization.
2373 */ 2361 */
2374 void 2362 void
2375 bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo) 2363 bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
2376 { 2364 {
2377 struct bfa_ioim_s *ioim; 2365 struct bfa_ioim_s *ioim;
2378 struct bfa_ioim_sp_s *iosp; 2366 struct bfa_ioim_sp_s *iosp;
2379 u16 i; 2367 u16 i;
2380 u8 *snsinfo; 2368 u8 *snsinfo;
2381 u32 snsbufsz; 2369 u32 snsbufsz;
2382 2370
2383 /* 2371 /*
2384 * claim memory first 2372 * claim memory first
2385 */ 2373 */
2386 ioim = (struct bfa_ioim_s *) bfa_meminfo_kva(minfo); 2374 ioim = (struct bfa_ioim_s *) bfa_meminfo_kva(minfo);
2387 fcpim->ioim_arr = ioim; 2375 fcpim->ioim_arr = ioim;
2388 bfa_meminfo_kva(minfo) = (u8 *) (ioim + fcpim->num_ioim_reqs); 2376 bfa_meminfo_kva(minfo) = (u8 *) (ioim + fcpim->num_ioim_reqs);
2389 2377
2390 iosp = (struct bfa_ioim_sp_s *) bfa_meminfo_kva(minfo); 2378 iosp = (struct bfa_ioim_sp_s *) bfa_meminfo_kva(minfo);
2391 fcpim->ioim_sp_arr = iosp; 2379 fcpim->ioim_sp_arr = iosp;
2392 bfa_meminfo_kva(minfo) = (u8 *) (iosp + fcpim->num_ioim_reqs); 2380 bfa_meminfo_kva(minfo) = (u8 *) (iosp + fcpim->num_ioim_reqs);
2393 2381
2394 /* 2382 /*
2395 * Claim DMA memory for per IO sense data. 2383 * Claim DMA memory for per IO sense data.
2396 */ 2384 */
2397 snsbufsz = fcpim->num_ioim_reqs * BFI_IOIM_SNSLEN; 2385 snsbufsz = fcpim->num_ioim_reqs * BFI_IOIM_SNSLEN;
2398 fcpim->snsbase.pa = bfa_meminfo_dma_phys(minfo); 2386 fcpim->snsbase.pa = bfa_meminfo_dma_phys(minfo);
2399 bfa_meminfo_dma_phys(minfo) += snsbufsz; 2387 bfa_meminfo_dma_phys(minfo) += snsbufsz;
2400 2388
2401 fcpim->snsbase.kva = bfa_meminfo_dma_virt(minfo); 2389 fcpim->snsbase.kva = bfa_meminfo_dma_virt(minfo);
2402 bfa_meminfo_dma_virt(minfo) += snsbufsz; 2390 bfa_meminfo_dma_virt(minfo) += snsbufsz;
2403 snsinfo = fcpim->snsbase.kva; 2391 snsinfo = fcpim->snsbase.kva;
2404 bfa_iocfc_set_snsbase(fcpim->bfa, fcpim->snsbase.pa); 2392 bfa_iocfc_set_snsbase(fcpim->bfa, fcpim->snsbase.pa);
2405 2393
2406 /* 2394 /*
2407 * Initialize ioim free queues 2395 * Initialize ioim free queues
2408 */ 2396 */
2409 INIT_LIST_HEAD(&fcpim->ioim_free_q); 2397 INIT_LIST_HEAD(&fcpim->ioim_free_q);
2410 INIT_LIST_HEAD(&fcpim->ioim_resfree_q); 2398 INIT_LIST_HEAD(&fcpim->ioim_resfree_q);
2411 INIT_LIST_HEAD(&fcpim->ioim_comp_q); 2399 INIT_LIST_HEAD(&fcpim->ioim_comp_q);
2412 2400
2413 for (i = 0; i < fcpim->num_ioim_reqs; 2401 for (i = 0; i < fcpim->num_ioim_reqs;
2414 i++, ioim++, iosp++, snsinfo += BFI_IOIM_SNSLEN) { 2402 i++, ioim++, iosp++, snsinfo += BFI_IOIM_SNSLEN) {
2415 /* 2403 /*
2416 * initialize IOIM 2404 * initialize IOIM
2417 */ 2405 */
2418 memset(ioim, 0, sizeof(struct bfa_ioim_s)); 2406 memset(ioim, 0, sizeof(struct bfa_ioim_s));
2419 ioim->iotag = i; 2407 ioim->iotag = i;
2420 ioim->bfa = fcpim->bfa; 2408 ioim->bfa = fcpim->bfa;
2421 ioim->fcpim = fcpim; 2409 ioim->fcpim = fcpim;
2422 ioim->iosp = iosp; 2410 ioim->iosp = iosp;
2423 iosp->snsinfo = snsinfo; 2411 iosp->snsinfo = snsinfo;
2424 INIT_LIST_HEAD(&ioim->sgpg_q); 2412 INIT_LIST_HEAD(&ioim->sgpg_q);
2425 bfa_reqq_winit(&ioim->iosp->reqq_wait, 2413 bfa_reqq_winit(&ioim->iosp->reqq_wait,
2426 bfa_ioim_qresume, ioim); 2414 bfa_ioim_qresume, ioim);
2427 bfa_sgpg_winit(&ioim->iosp->sgpg_wqe, 2415 bfa_sgpg_winit(&ioim->iosp->sgpg_wqe,
2428 bfa_ioim_sgpg_alloced, ioim); 2416 bfa_ioim_sgpg_alloced, ioim);
2429 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit); 2417 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
2430 2418
2431 list_add_tail(&ioim->qe, &fcpim->ioim_free_q); 2419 list_add_tail(&ioim->qe, &fcpim->ioim_free_q);
2432 } 2420 }
2433 } 2421 }
2434 2422
2435 void 2423 void
2436 bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) 2424 bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2437 { 2425 {
2438 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); 2426 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
2439 struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m; 2427 struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
2440 struct bfa_ioim_s *ioim; 2428 struct bfa_ioim_s *ioim;
2441 u16 iotag; 2429 u16 iotag;
2442 enum bfa_ioim_event evt = BFA_IOIM_SM_COMP; 2430 enum bfa_ioim_event evt = BFA_IOIM_SM_COMP;
2443 2431
2444 iotag = be16_to_cpu(rsp->io_tag); 2432 iotag = be16_to_cpu(rsp->io_tag);
2445 2433
2446 ioim = BFA_IOIM_FROM_TAG(fcpim, iotag); 2434 ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
2447 WARN_ON(ioim->iotag != iotag); 2435 WARN_ON(ioim->iotag != iotag);
2448 2436
2449 bfa_trc(ioim->bfa, ioim->iotag); 2437 bfa_trc(ioim->bfa, ioim->iotag);
2450 bfa_trc(ioim->bfa, rsp->io_status); 2438 bfa_trc(ioim->bfa, rsp->io_status);
2451 bfa_trc(ioim->bfa, rsp->reuse_io_tag); 2439 bfa_trc(ioim->bfa, rsp->reuse_io_tag);
2452 2440
2453 if (bfa_sm_cmp_state(ioim, bfa_ioim_sm_active)) 2441 if (bfa_sm_cmp_state(ioim, bfa_ioim_sm_active))
2454 ioim->iosp->comp_rspmsg = *m; 2442 ioim->iosp->comp_rspmsg = *m;
2455 2443
2456 switch (rsp->io_status) { 2444 switch (rsp->io_status) {
2457 case BFI_IOIM_STS_OK: 2445 case BFI_IOIM_STS_OK:
2458 bfa_stats(ioim->itnim, iocomp_ok); 2446 bfa_stats(ioim->itnim, iocomp_ok);
2459 if (rsp->reuse_io_tag == 0) 2447 if (rsp->reuse_io_tag == 0)
2460 evt = BFA_IOIM_SM_DONE; 2448 evt = BFA_IOIM_SM_DONE;
2461 else 2449 else
2462 evt = BFA_IOIM_SM_COMP; 2450 evt = BFA_IOIM_SM_COMP;
2463 break; 2451 break;
2464 2452
2465 case BFI_IOIM_STS_TIMEDOUT: 2453 case BFI_IOIM_STS_TIMEDOUT:
2466 bfa_stats(ioim->itnim, iocomp_timedout); 2454 bfa_stats(ioim->itnim, iocomp_timedout);
2467 case BFI_IOIM_STS_ABORTED: 2455 case BFI_IOIM_STS_ABORTED:
2468 rsp->io_status = BFI_IOIM_STS_ABORTED; 2456 rsp->io_status = BFI_IOIM_STS_ABORTED;
2469 bfa_stats(ioim->itnim, iocomp_aborted); 2457 bfa_stats(ioim->itnim, iocomp_aborted);
2470 if (rsp->reuse_io_tag == 0) 2458 if (rsp->reuse_io_tag == 0)
2471 evt = BFA_IOIM_SM_DONE; 2459 evt = BFA_IOIM_SM_DONE;
2472 else 2460 else
2473 evt = BFA_IOIM_SM_COMP; 2461 evt = BFA_IOIM_SM_COMP;
2474 break; 2462 break;
2475 2463
2476 case BFI_IOIM_STS_PROTO_ERR: 2464 case BFI_IOIM_STS_PROTO_ERR:
2477 bfa_stats(ioim->itnim, iocom_proto_err); 2465 bfa_stats(ioim->itnim, iocom_proto_err);
2478 WARN_ON(!rsp->reuse_io_tag); 2466 WARN_ON(!rsp->reuse_io_tag);
2479 evt = BFA_IOIM_SM_COMP; 2467 evt = BFA_IOIM_SM_COMP;
2480 break; 2468 break;
2481 2469
2482 case BFI_IOIM_STS_SQER_NEEDED: 2470 case BFI_IOIM_STS_SQER_NEEDED:
2483 bfa_stats(ioim->itnim, iocom_sqer_needed); 2471 bfa_stats(ioim->itnim, iocom_sqer_needed);
2484 WARN_ON(rsp->reuse_io_tag != 0); 2472 WARN_ON(rsp->reuse_io_tag != 0);
2485 evt = BFA_IOIM_SM_SQRETRY; 2473 evt = BFA_IOIM_SM_SQRETRY;
2486 break; 2474 break;
2487 2475
2488 case BFI_IOIM_STS_RES_FREE: 2476 case BFI_IOIM_STS_RES_FREE:
2489 bfa_stats(ioim->itnim, iocom_res_free); 2477 bfa_stats(ioim->itnim, iocom_res_free);
2490 evt = BFA_IOIM_SM_FREE; 2478 evt = BFA_IOIM_SM_FREE;
2491 break; 2479 break;
2492 2480
2493 case BFI_IOIM_STS_HOST_ABORTED: 2481 case BFI_IOIM_STS_HOST_ABORTED:
2494 bfa_stats(ioim->itnim, iocom_hostabrts); 2482 bfa_stats(ioim->itnim, iocom_hostabrts);
2495 if (rsp->abort_tag != ioim->abort_tag) { 2483 if (rsp->abort_tag != ioim->abort_tag) {
2496 bfa_trc(ioim->bfa, rsp->abort_tag); 2484 bfa_trc(ioim->bfa, rsp->abort_tag);
2497 bfa_trc(ioim->bfa, ioim->abort_tag); 2485 bfa_trc(ioim->bfa, ioim->abort_tag);
2498 return; 2486 return;
2499 } 2487 }
2500 2488
2501 if (rsp->reuse_io_tag) 2489 if (rsp->reuse_io_tag)
2502 evt = BFA_IOIM_SM_ABORT_COMP; 2490 evt = BFA_IOIM_SM_ABORT_COMP;
2503 else 2491 else
2504 evt = BFA_IOIM_SM_ABORT_DONE; 2492 evt = BFA_IOIM_SM_ABORT_DONE;
2505 break; 2493 break;
2506 2494
2507 case BFI_IOIM_STS_UTAG: 2495 case BFI_IOIM_STS_UTAG:
2508 bfa_stats(ioim->itnim, iocom_utags); 2496 bfa_stats(ioim->itnim, iocom_utags);
2509 evt = BFA_IOIM_SM_COMP_UTAG; 2497 evt = BFA_IOIM_SM_COMP_UTAG;
2510 break; 2498 break;
2511 2499
2512 default: 2500 default:
2513 WARN_ON(1); 2501 WARN_ON(1);
2514 } 2502 }
2515 2503
2516 bfa_sm_send_event(ioim, evt); 2504 bfa_sm_send_event(ioim, evt);
2517 } 2505 }
2518 2506
2519 void 2507 void
2520 bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m) 2508 bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2521 { 2509 {
2522 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); 2510 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
2523 struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m; 2511 struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
2524 struct bfa_ioim_s *ioim; 2512 struct bfa_ioim_s *ioim;
2525 u16 iotag; 2513 u16 iotag;
2526 2514
2527 iotag = be16_to_cpu(rsp->io_tag); 2515 iotag = be16_to_cpu(rsp->io_tag);
2528 2516
2529 ioim = BFA_IOIM_FROM_TAG(fcpim, iotag); 2517 ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
2530 WARN_ON(BFA_IOIM_TAG_2_ID(ioim->iotag) != iotag); 2518 WARN_ON(BFA_IOIM_TAG_2_ID(ioim->iotag) != iotag);
2531 2519
2532 bfa_trc_fp(ioim->bfa, ioim->iotag);
2533 bfa_ioim_cb_profile_comp(fcpim, ioim); 2520 bfa_ioim_cb_profile_comp(fcpim, ioim);
2534
2535 bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD); 2521 bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
2536 } 2522 }
2537 2523
2538 /* 2524 /*
2539 * Called by itnim to clean up IO while going offline. 2525 * Called by itnim to clean up IO while going offline.
2540 */ 2526 */
2541 void 2527 void
2542 bfa_ioim_cleanup(struct bfa_ioim_s *ioim) 2528 bfa_ioim_cleanup(struct bfa_ioim_s *ioim)
2543 { 2529 {
2544 bfa_trc(ioim->bfa, ioim->iotag); 2530 bfa_trc(ioim->bfa, ioim->iotag);
2545 bfa_stats(ioim->itnim, io_cleanups); 2531 bfa_stats(ioim->itnim, io_cleanups);
2546 2532
2547 ioim->iosp->tskim = NULL; 2533 ioim->iosp->tskim = NULL;
2548 bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP); 2534 bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
2549 } 2535 }
2550 2536
2551 void 2537 void
2552 bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim, struct bfa_tskim_s *tskim) 2538 bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim, struct bfa_tskim_s *tskim)
2553 { 2539 {
2554 bfa_trc(ioim->bfa, ioim->iotag); 2540 bfa_trc(ioim->bfa, ioim->iotag);
2555 bfa_stats(ioim->itnim, io_tmaborts); 2541 bfa_stats(ioim->itnim, io_tmaborts);
2556 2542
2557 ioim->iosp->tskim = tskim; 2543 ioim->iosp->tskim = tskim;
2558 bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP); 2544 bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
2559 } 2545 }
2560 2546
2561 /* 2547 /*
2562 * IOC failure handling. 2548 * IOC failure handling.
2563 */ 2549 */
2564 void 2550 void
2565 bfa_ioim_iocdisable(struct bfa_ioim_s *ioim) 2551 bfa_ioim_iocdisable(struct bfa_ioim_s *ioim)
2566 { 2552 {
2567 bfa_trc(ioim->bfa, ioim->iotag); 2553 bfa_trc(ioim->bfa, ioim->iotag);
2568 bfa_stats(ioim->itnim, io_iocdowns); 2554 bfa_stats(ioim->itnim, io_iocdowns);
2569 bfa_sm_send_event(ioim, BFA_IOIM_SM_HWFAIL); 2555 bfa_sm_send_event(ioim, BFA_IOIM_SM_HWFAIL);
2570 } 2556 }
2571 2557
2572 /* 2558 /*
2573 * IO offline TOV popped. Fail the pending IO. 2559 * IO offline TOV popped. Fail the pending IO.
2574 */ 2560 */
2575 void 2561 void
2576 bfa_ioim_tov(struct bfa_ioim_s *ioim) 2562 bfa_ioim_tov(struct bfa_ioim_s *ioim)
2577 { 2563 {
2578 bfa_trc(ioim->bfa, ioim->iotag); 2564 bfa_trc(ioim->bfa, ioim->iotag);
2579 bfa_sm_send_event(ioim, BFA_IOIM_SM_IOTOV); 2565 bfa_sm_send_event(ioim, BFA_IOIM_SM_IOTOV);
2580 } 2566 }
2581 2567
2582 2568
2583 /* 2569 /*
2584 * Allocate IOIM resource for initiator mode I/O request. 2570 * Allocate IOIM resource for initiator mode I/O request.
2585 */ 2571 */
2586 struct bfa_ioim_s * 2572 struct bfa_ioim_s *
2587 bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio, 2573 bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio,
2588 struct bfa_itnim_s *itnim, u16 nsges) 2574 struct bfa_itnim_s *itnim, u16 nsges)
2589 { 2575 {
2590 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); 2576 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
2591 struct bfa_ioim_s *ioim; 2577 struct bfa_ioim_s *ioim;
2592 2578
2593 /* 2579 /*
2594 * alocate IOIM resource 2580 * alocate IOIM resource
2595 */ 2581 */
2596 bfa_q_deq(&fcpim->ioim_free_q, &ioim); 2582 bfa_q_deq(&fcpim->ioim_free_q, &ioim);
2597 if (!ioim) { 2583 if (!ioim) {
2598 bfa_stats(itnim, no_iotags); 2584 bfa_stats(itnim, no_iotags);
2599 return NULL; 2585 return NULL;
2600 } 2586 }
2601 2587
2602 ioim->dio = dio; 2588 ioim->dio = dio;
2603 ioim->itnim = itnim; 2589 ioim->itnim = itnim;
2604 ioim->nsges = nsges; 2590 ioim->nsges = nsges;
2605 ioim->nsgpgs = 0; 2591 ioim->nsgpgs = 0;
2606 2592
2607 bfa_stats(itnim, total_ios); 2593 bfa_stats(itnim, total_ios);
2608 fcpim->ios_active++; 2594 fcpim->ios_active++;
2609 2595
2610 list_add_tail(&ioim->qe, &itnim->io_q); 2596 list_add_tail(&ioim->qe, &itnim->io_q);
2611 bfa_trc_fp(ioim->bfa, ioim->iotag);
2612 2597
2613 return ioim; 2598 return ioim;
2614 } 2599 }
2615 2600
2616 void 2601 void
2617 bfa_ioim_free(struct bfa_ioim_s *ioim) 2602 bfa_ioim_free(struct bfa_ioim_s *ioim)
2618 { 2603 {
2619 struct bfa_fcpim_mod_s *fcpim = ioim->fcpim; 2604 struct bfa_fcpim_mod_s *fcpim = ioim->fcpim;
2620 2605
2621 bfa_trc_fp(ioim->bfa, ioim->iotag);
2622 bfa_assert_fp(bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit));
2623
2624 bfa_assert_fp(list_empty(&ioim->sgpg_q) ||
2625 (ioim->nsges > BFI_SGE_INLINE));
2626
2627 if (ioim->nsgpgs > 0) 2606 if (ioim->nsgpgs > 0)
2628 bfa_sgpg_mfree(ioim->bfa, &ioim->sgpg_q, ioim->nsgpgs); 2607 bfa_sgpg_mfree(ioim->bfa, &ioim->sgpg_q, ioim->nsgpgs);
2629 2608
2630 bfa_stats(ioim->itnim, io_comps); 2609 bfa_stats(ioim->itnim, io_comps);
2631 fcpim->ios_active--; 2610 fcpim->ios_active--;
2632 2611
2633 ioim->iotag &= BFA_IOIM_IOTAG_MASK; 2612 ioim->iotag &= BFA_IOIM_IOTAG_MASK;
2634 list_del(&ioim->qe); 2613 list_del(&ioim->qe);
2635 list_add_tail(&ioim->qe, &fcpim->ioim_free_q); 2614 list_add_tail(&ioim->qe, &fcpim->ioim_free_q);
2636 } 2615 }
2637 2616
2638 void 2617 void
2639 bfa_ioim_start(struct bfa_ioim_s *ioim) 2618 bfa_ioim_start(struct bfa_ioim_s *ioim)
2640 { 2619 {
2641 bfa_trc_fp(ioim->bfa, ioim->iotag);
2642
2643 bfa_ioim_cb_profile_start(ioim->fcpim, ioim); 2620 bfa_ioim_cb_profile_start(ioim->fcpim, ioim);
2644 2621
2645 /* 2622 /*
2646 * Obtain the queue over which this request has to be issued 2623 * Obtain the queue over which this request has to be issued
2647 */ 2624 */
2648 ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ? 2625 ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ?
2649 BFA_FALSE : bfa_itnim_get_reqq(ioim); 2626 BFA_FALSE : bfa_itnim_get_reqq(ioim);
2650 2627
2651 bfa_sm_send_event(ioim, BFA_IOIM_SM_START); 2628 bfa_sm_send_event(ioim, BFA_IOIM_SM_START);
2652 } 2629 }
2653 2630
2654 /* 2631 /*
2655 * Driver I/O abort request. 2632 * Driver I/O abort request.
2656 */ 2633 */
2657 bfa_status_t 2634 bfa_status_t
2658 bfa_ioim_abort(struct bfa_ioim_s *ioim) 2635 bfa_ioim_abort(struct bfa_ioim_s *ioim)
2659 { 2636 {
2660 2637
2661 bfa_trc(ioim->bfa, ioim->iotag); 2638 bfa_trc(ioim->bfa, ioim->iotag);
2662 2639
2663 if (!bfa_ioim_is_abortable(ioim)) 2640 if (!bfa_ioim_is_abortable(ioim))
2664 return BFA_STATUS_FAILED; 2641 return BFA_STATUS_FAILED;
2665 2642
2666 bfa_stats(ioim->itnim, io_aborts); 2643 bfa_stats(ioim->itnim, io_aborts);
2667 bfa_sm_send_event(ioim, BFA_IOIM_SM_ABORT); 2644 bfa_sm_send_event(ioim, BFA_IOIM_SM_ABORT);
2668 2645
2669 return BFA_STATUS_OK; 2646 return BFA_STATUS_OK;
2670 } 2647 }
2671 2648
2672 /* 2649 /*
2673 * BFA TSKIM state machine functions 2650 * BFA TSKIM state machine functions
2674 */ 2651 */
2675 2652
2676 /* 2653 /*
2677 * Task management command beginning state. 2654 * Task management command beginning state.
2678 */ 2655 */
2679 static void 2656 static void
2680 bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) 2657 bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
2681 { 2658 {
2682 bfa_trc(tskim->bfa, event); 2659 bfa_trc(tskim->bfa, event);
2683 2660
2684 switch (event) { 2661 switch (event) {
2685 case BFA_TSKIM_SM_START: 2662 case BFA_TSKIM_SM_START:
2686 bfa_sm_set_state(tskim, bfa_tskim_sm_active); 2663 bfa_sm_set_state(tskim, bfa_tskim_sm_active);
2687 bfa_tskim_gather_ios(tskim); 2664 bfa_tskim_gather_ios(tskim);
2688 2665
2689 /* 2666 /*
2690 * If device is offline, do not send TM on wire. Just cleanup 2667 * If device is offline, do not send TM on wire. Just cleanup
2691 * any pending IO requests and complete TM request. 2668 * any pending IO requests and complete TM request.
2692 */ 2669 */
2693 if (!bfa_itnim_is_online(tskim->itnim)) { 2670 if (!bfa_itnim_is_online(tskim->itnim)) {
2694 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup); 2671 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
2695 tskim->tsk_status = BFI_TSKIM_STS_OK; 2672 tskim->tsk_status = BFI_TSKIM_STS_OK;
2696 bfa_tskim_cleanup_ios(tskim); 2673 bfa_tskim_cleanup_ios(tskim);
2697 return; 2674 return;
2698 } 2675 }
2699 2676
2700 if (!bfa_tskim_send(tskim)) { 2677 if (!bfa_tskim_send(tskim)) {
2701 bfa_sm_set_state(tskim, bfa_tskim_sm_qfull); 2678 bfa_sm_set_state(tskim, bfa_tskim_sm_qfull);
2702 bfa_stats(tskim->itnim, tm_qwait); 2679 bfa_stats(tskim->itnim, tm_qwait);
2703 bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq, 2680 bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
2704 &tskim->reqq_wait); 2681 &tskim->reqq_wait);
2705 } 2682 }
2706 break; 2683 break;
2707 2684
2708 default: 2685 default:
2709 bfa_sm_fault(tskim->bfa, event); 2686 bfa_sm_fault(tskim->bfa, event);
2710 } 2687 }
2711 } 2688 }
2712 2689
2713 /* 2690 /*
2714 * TM command is active, awaiting completion from firmware to 2691 * TM command is active, awaiting completion from firmware to
2715 * cleanup IO requests in TM scope. 2692 * cleanup IO requests in TM scope.
2716 */ 2693 */
2717 static void 2694 static void
2718 bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) 2695 bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
2719 { 2696 {
2720 bfa_trc(tskim->bfa, event); 2697 bfa_trc(tskim->bfa, event);
2721 2698
2722 switch (event) { 2699 switch (event) {
2723 case BFA_TSKIM_SM_DONE: 2700 case BFA_TSKIM_SM_DONE:
2724 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup); 2701 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
2725 bfa_tskim_cleanup_ios(tskim); 2702 bfa_tskim_cleanup_ios(tskim);
2726 break; 2703 break;
2727 2704
2728 case BFA_TSKIM_SM_CLEANUP: 2705 case BFA_TSKIM_SM_CLEANUP:
2729 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup); 2706 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
2730 if (!bfa_tskim_send_abort(tskim)) { 2707 if (!bfa_tskim_send_abort(tskim)) {
2731 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup_qfull); 2708 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup_qfull);
2732 bfa_stats(tskim->itnim, tm_qwait); 2709 bfa_stats(tskim->itnim, tm_qwait);
2733 bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq, 2710 bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
2734 &tskim->reqq_wait); 2711 &tskim->reqq_wait);
2735 } 2712 }
2736 break; 2713 break;
2737 2714
2738 case BFA_TSKIM_SM_HWFAIL: 2715 case BFA_TSKIM_SM_HWFAIL:
2739 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb); 2716 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
2740 bfa_tskim_iocdisable_ios(tskim); 2717 bfa_tskim_iocdisable_ios(tskim);
2741 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed); 2718 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
2742 break; 2719 break;
2743 2720
2744 default: 2721 default:
2745 bfa_sm_fault(tskim->bfa, event); 2722 bfa_sm_fault(tskim->bfa, event);
2746 } 2723 }
2747 } 2724 }
2748 2725
2749 /* 2726 /*
2750 * An active TM is being cleaned up since ITN is offline. Awaiting cleanup 2727 * An active TM is being cleaned up since ITN is offline. Awaiting cleanup
2751 * completion event from firmware. 2728 * completion event from firmware.
2752 */ 2729 */
2753 static void 2730 static void
2754 bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) 2731 bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
2755 { 2732 {
2756 bfa_trc(tskim->bfa, event); 2733 bfa_trc(tskim->bfa, event);
2757 2734
2758 switch (event) { 2735 switch (event) {
2759 case BFA_TSKIM_SM_DONE: 2736 case BFA_TSKIM_SM_DONE:
2760 /* 2737 /*
2761 * Ignore and wait for ABORT completion from firmware. 2738 * Ignore and wait for ABORT completion from firmware.
2762 */ 2739 */
2763 break; 2740 break;
2764 2741
2765 case BFA_TSKIM_SM_CLEANUP_DONE: 2742 case BFA_TSKIM_SM_CLEANUP_DONE:
2766 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup); 2743 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
2767 bfa_tskim_cleanup_ios(tskim); 2744 bfa_tskim_cleanup_ios(tskim);
2768 break; 2745 break;
2769 2746
2770 case BFA_TSKIM_SM_HWFAIL: 2747 case BFA_TSKIM_SM_HWFAIL:
2771 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb); 2748 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
2772 bfa_tskim_iocdisable_ios(tskim); 2749 bfa_tskim_iocdisable_ios(tskim);
2773 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed); 2750 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
2774 break; 2751 break;
2775 2752
2776 default: 2753 default:
2777 bfa_sm_fault(tskim->bfa, event); 2754 bfa_sm_fault(tskim->bfa, event);
2778 } 2755 }
2779 } 2756 }
2780 2757
2781 static void 2758 static void
2782 bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) 2759 bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
2783 { 2760 {
2784 bfa_trc(tskim->bfa, event); 2761 bfa_trc(tskim->bfa, event);
2785 2762
2786 switch (event) { 2763 switch (event) {
2787 case BFA_TSKIM_SM_IOS_DONE: 2764 case BFA_TSKIM_SM_IOS_DONE:
2788 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb); 2765 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
2789 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_done); 2766 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_done);
2790 break; 2767 break;
2791 2768
2792 case BFA_TSKIM_SM_CLEANUP: 2769 case BFA_TSKIM_SM_CLEANUP:
2793 /* 2770 /*
2794 * Ignore, TM command completed on wire. 2771 * Ignore, TM command completed on wire.
2795 * Notify TM conmpletion on IO cleanup completion. 2772 * Notify TM conmpletion on IO cleanup completion.
2796 */ 2773 */
2797 break; 2774 break;
2798 2775
2799 case BFA_TSKIM_SM_HWFAIL: 2776 case BFA_TSKIM_SM_HWFAIL:
2800 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb); 2777 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
2801 bfa_tskim_iocdisable_ios(tskim); 2778 bfa_tskim_iocdisable_ios(tskim);
2802 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed); 2779 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
2803 break; 2780 break;
2804 2781
2805 default: 2782 default:
2806 bfa_sm_fault(tskim->bfa, event); 2783 bfa_sm_fault(tskim->bfa, event);
2807 } 2784 }
2808 } 2785 }
2809 2786
2810 /* 2787 /*
2811 * Task management command is waiting for room in request CQ 2788 * Task management command is waiting for room in request CQ
2812 */ 2789 */
2813 static void 2790 static void
2814 bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) 2791 bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
2815 { 2792 {
2816 bfa_trc(tskim->bfa, event); 2793 bfa_trc(tskim->bfa, event);
2817 2794
2818 switch (event) { 2795 switch (event) {
2819 case BFA_TSKIM_SM_QRESUME: 2796 case BFA_TSKIM_SM_QRESUME:
2820 bfa_sm_set_state(tskim, bfa_tskim_sm_active); 2797 bfa_sm_set_state(tskim, bfa_tskim_sm_active);
2821 bfa_tskim_send(tskim); 2798 bfa_tskim_send(tskim);
2822 break; 2799 break;
2823 2800
2824 case BFA_TSKIM_SM_CLEANUP: 2801 case BFA_TSKIM_SM_CLEANUP:
2825 /* 2802 /*
2826 * No need to send TM on wire since ITN is offline. 2803 * No need to send TM on wire since ITN is offline.
2827 */ 2804 */
2828 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup); 2805 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
2829 bfa_reqq_wcancel(&tskim->reqq_wait); 2806 bfa_reqq_wcancel(&tskim->reqq_wait);
2830 bfa_tskim_cleanup_ios(tskim); 2807 bfa_tskim_cleanup_ios(tskim);
2831 break; 2808 break;
2832 2809
2833 case BFA_TSKIM_SM_HWFAIL: 2810 case BFA_TSKIM_SM_HWFAIL:
2834 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb); 2811 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
2835 bfa_reqq_wcancel(&tskim->reqq_wait); 2812 bfa_reqq_wcancel(&tskim->reqq_wait);
2836 bfa_tskim_iocdisable_ios(tskim); 2813 bfa_tskim_iocdisable_ios(tskim);
2837 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed); 2814 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
2838 break; 2815 break;
2839 2816
2840 default: 2817 default:
2841 bfa_sm_fault(tskim->bfa, event); 2818 bfa_sm_fault(tskim->bfa, event);
2842 } 2819 }
2843 } 2820 }
2844 2821
2845 /* 2822 /*
2846 * Task management command is active, awaiting for room in request CQ 2823 * Task management command is active, awaiting for room in request CQ
2847 * to send clean up request. 2824 * to send clean up request.
2848 */ 2825 */
2849 static void 2826 static void
2850 bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim, 2827 bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
2851 enum bfa_tskim_event event) 2828 enum bfa_tskim_event event)
2852 { 2829 {
2853 bfa_trc(tskim->bfa, event); 2830 bfa_trc(tskim->bfa, event);
2854 2831
2855 switch (event) { 2832 switch (event) {
2856 case BFA_TSKIM_SM_DONE: 2833 case BFA_TSKIM_SM_DONE:
2857 bfa_reqq_wcancel(&tskim->reqq_wait); 2834 bfa_reqq_wcancel(&tskim->reqq_wait);
2858 /* 2835 /*
2859 * Fall through !!! 2836 * Fall through !!!
2860 */ 2837 */
2861 case BFA_TSKIM_SM_QRESUME: 2838 case BFA_TSKIM_SM_QRESUME:
2862 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup); 2839 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
2863 bfa_tskim_send_abort(tskim); 2840 bfa_tskim_send_abort(tskim);
2864 break; 2841 break;
2865 2842
2866 case BFA_TSKIM_SM_HWFAIL: 2843 case BFA_TSKIM_SM_HWFAIL:
2867 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb); 2844 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
2868 bfa_reqq_wcancel(&tskim->reqq_wait); 2845 bfa_reqq_wcancel(&tskim->reqq_wait);
2869 bfa_tskim_iocdisable_ios(tskim); 2846 bfa_tskim_iocdisable_ios(tskim);
2870 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed); 2847 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
2871 break; 2848 break;
2872 2849
2873 default: 2850 default:
2874 bfa_sm_fault(tskim->bfa, event); 2851 bfa_sm_fault(tskim->bfa, event);
2875 } 2852 }
2876 } 2853 }
2877 2854
2878 /* 2855 /*
2879 * BFA callback is pending 2856 * BFA callback is pending
2880 */ 2857 */
2881 static void 2858 static void
2882 bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) 2859 bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
2883 { 2860 {
2884 bfa_trc(tskim->bfa, event); 2861 bfa_trc(tskim->bfa, event);
2885 2862
2886 switch (event) { 2863 switch (event) {
2887 case BFA_TSKIM_SM_HCB: 2864 case BFA_TSKIM_SM_HCB:
2888 bfa_sm_set_state(tskim, bfa_tskim_sm_uninit); 2865 bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
2889 bfa_tskim_free(tskim); 2866 bfa_tskim_free(tskim);
2890 break; 2867 break;
2891 2868
2892 case BFA_TSKIM_SM_CLEANUP: 2869 case BFA_TSKIM_SM_CLEANUP:
2893 bfa_tskim_notify_comp(tskim); 2870 bfa_tskim_notify_comp(tskim);
2894 break; 2871 break;
2895 2872
2896 case BFA_TSKIM_SM_HWFAIL: 2873 case BFA_TSKIM_SM_HWFAIL:
2897 break; 2874 break;
2898 2875
2899 default: 2876 default:
2900 bfa_sm_fault(tskim->bfa, event); 2877 bfa_sm_fault(tskim->bfa, event);
2901 } 2878 }
2902 } 2879 }
2903 2880
2904 static void 2881 static void
2905 __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete) 2882 __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete)
2906 { 2883 {
2907 struct bfa_tskim_s *tskim = cbarg; 2884 struct bfa_tskim_s *tskim = cbarg;
2908 2885
2909 if (!complete) { 2886 if (!complete) {
2910 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB); 2887 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
2911 return; 2888 return;
2912 } 2889 }
2913 2890
2914 bfa_stats(tskim->itnim, tm_success); 2891 bfa_stats(tskim->itnim, tm_success);
2915 bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk, tskim->tsk_status); 2892 bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk, tskim->tsk_status);
2916 } 2893 }
2917 2894
2918 static void 2895 static void
2919 __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete) 2896 __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete)
2920 { 2897 {
2921 struct bfa_tskim_s *tskim = cbarg; 2898 struct bfa_tskim_s *tskim = cbarg;
2922 2899
2923 if (!complete) { 2900 if (!complete) {
2924 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB); 2901 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
2925 return; 2902 return;
2926 } 2903 }
2927 2904
2928 bfa_stats(tskim->itnim, tm_failures); 2905 bfa_stats(tskim->itnim, tm_failures);
2929 bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk, 2906 bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk,
2930 BFI_TSKIM_STS_FAILED); 2907 BFI_TSKIM_STS_FAILED);
2931 } 2908 }
2932 2909
2933 static bfa_boolean_t 2910 static bfa_boolean_t
2934 bfa_tskim_match_scope(struct bfa_tskim_s *tskim, struct scsi_lun lun) 2911 bfa_tskim_match_scope(struct bfa_tskim_s *tskim, struct scsi_lun lun)
2935 { 2912 {
2936 switch (tskim->tm_cmnd) { 2913 switch (tskim->tm_cmnd) {
2937 case FCP_TM_TARGET_RESET: 2914 case FCP_TM_TARGET_RESET:
2938 return BFA_TRUE; 2915 return BFA_TRUE;
2939 2916
2940 case FCP_TM_ABORT_TASK_SET: 2917 case FCP_TM_ABORT_TASK_SET:
2941 case FCP_TM_CLEAR_TASK_SET: 2918 case FCP_TM_CLEAR_TASK_SET:
2942 case FCP_TM_LUN_RESET: 2919 case FCP_TM_LUN_RESET:
2943 case FCP_TM_CLEAR_ACA: 2920 case FCP_TM_CLEAR_ACA:
2944 return !memcmp(&tskim->lun, &lun, sizeof(lun)); 2921 return !memcmp(&tskim->lun, &lun, sizeof(lun));
2945 2922
2946 default: 2923 default:
2947 WARN_ON(1); 2924 WARN_ON(1);
2948 } 2925 }
2949 2926
2950 return BFA_FALSE; 2927 return BFA_FALSE;
2951 } 2928 }
2952 2929
2953 /* 2930 /*
2954 * Gather affected IO requests and task management commands. 2931 * Gather affected IO requests and task management commands.
2955 */ 2932 */
2956 static void 2933 static void
2957 bfa_tskim_gather_ios(struct bfa_tskim_s *tskim) 2934 bfa_tskim_gather_ios(struct bfa_tskim_s *tskim)
2958 { 2935 {
2959 struct bfa_itnim_s *itnim = tskim->itnim; 2936 struct bfa_itnim_s *itnim = tskim->itnim;
2960 struct bfa_ioim_s *ioim; 2937 struct bfa_ioim_s *ioim;
2961 struct list_head *qe, *qen; 2938 struct list_head *qe, *qen;
2962 struct scsi_cmnd *cmnd; 2939 struct scsi_cmnd *cmnd;
2963 struct scsi_lun scsilun; 2940 struct scsi_lun scsilun;
2964 2941
2965 INIT_LIST_HEAD(&tskim->io_q); 2942 INIT_LIST_HEAD(&tskim->io_q);
2966 2943
2967 /* 2944 /*
2968 * Gather any active IO requests first. 2945 * Gather any active IO requests first.
2969 */ 2946 */
2970 list_for_each_safe(qe, qen, &itnim->io_q) { 2947 list_for_each_safe(qe, qen, &itnim->io_q) {
2971 ioim = (struct bfa_ioim_s *) qe; 2948 ioim = (struct bfa_ioim_s *) qe;
2972 cmnd = (struct scsi_cmnd *) ioim->dio; 2949 cmnd = (struct scsi_cmnd *) ioim->dio;
2973 int_to_scsilun(cmnd->device->lun, &scsilun); 2950 int_to_scsilun(cmnd->device->lun, &scsilun);
2974 if (bfa_tskim_match_scope(tskim, scsilun)) { 2951 if (bfa_tskim_match_scope(tskim, scsilun)) {
2975 list_del(&ioim->qe); 2952 list_del(&ioim->qe);
2976 list_add_tail(&ioim->qe, &tskim->io_q); 2953 list_add_tail(&ioim->qe, &tskim->io_q);
2977 } 2954 }
2978 } 2955 }
2979 2956
2980 /* 2957 /*
2981 * Failback any pending IO requests immediately. 2958 * Failback any pending IO requests immediately.
2982 */ 2959 */
2983 list_for_each_safe(qe, qen, &itnim->pending_q) { 2960 list_for_each_safe(qe, qen, &itnim->pending_q) {
2984 ioim = (struct bfa_ioim_s *) qe; 2961 ioim = (struct bfa_ioim_s *) qe;
2985 cmnd = (struct scsi_cmnd *) ioim->dio; 2962 cmnd = (struct scsi_cmnd *) ioim->dio;
2986 int_to_scsilun(cmnd->device->lun, &scsilun); 2963 int_to_scsilun(cmnd->device->lun, &scsilun);
2987 if (bfa_tskim_match_scope(tskim, scsilun)) { 2964 if (bfa_tskim_match_scope(tskim, scsilun)) {
2988 list_del(&ioim->qe); 2965 list_del(&ioim->qe);
2989 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q); 2966 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
2990 bfa_ioim_tov(ioim); 2967 bfa_ioim_tov(ioim);
2991 } 2968 }
2992 } 2969 }
2993 } 2970 }
2994 2971
2995 /* 2972 /*
2996 * IO cleanup completion 2973 * IO cleanup completion
2997 */ 2974 */
2998 static void 2975 static void
2999 bfa_tskim_cleanp_comp(void *tskim_cbarg) 2976 bfa_tskim_cleanp_comp(void *tskim_cbarg)
3000 { 2977 {
3001 struct bfa_tskim_s *tskim = tskim_cbarg; 2978 struct bfa_tskim_s *tskim = tskim_cbarg;
3002 2979
3003 bfa_stats(tskim->itnim, tm_io_comps); 2980 bfa_stats(tskim->itnim, tm_io_comps);
3004 bfa_sm_send_event(tskim, BFA_TSKIM_SM_IOS_DONE); 2981 bfa_sm_send_event(tskim, BFA_TSKIM_SM_IOS_DONE);
3005 } 2982 }
3006 2983
3007 /* 2984 /*
3008 * Gather affected IO requests and task management commands. 2985 * Gather affected IO requests and task management commands.
3009 */ 2986 */
3010 static void 2987 static void
3011 bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim) 2988 bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim)
3012 { 2989 {
3013 struct bfa_ioim_s *ioim; 2990 struct bfa_ioim_s *ioim;
3014 struct list_head *qe, *qen; 2991 struct list_head *qe, *qen;
3015 2992
3016 bfa_wc_init(&tskim->wc, bfa_tskim_cleanp_comp, tskim); 2993 bfa_wc_init(&tskim->wc, bfa_tskim_cleanp_comp, tskim);
3017 2994
3018 list_for_each_safe(qe, qen, &tskim->io_q) { 2995 list_for_each_safe(qe, qen, &tskim->io_q) {
3019 ioim = (struct bfa_ioim_s *) qe; 2996 ioim = (struct bfa_ioim_s *) qe;
3020 bfa_wc_up(&tskim->wc); 2997 bfa_wc_up(&tskim->wc);
3021 bfa_ioim_cleanup_tm(ioim, tskim); 2998 bfa_ioim_cleanup_tm(ioim, tskim);
3022 } 2999 }
3023 3000
3024 bfa_wc_wait(&tskim->wc); 3001 bfa_wc_wait(&tskim->wc);
3025 } 3002 }
3026 3003
3027 /* 3004 /*
3028 * Send task management request to firmware. 3005 * Send task management request to firmware.
3029 */ 3006 */
3030 static bfa_boolean_t 3007 static bfa_boolean_t
3031 bfa_tskim_send(struct bfa_tskim_s *tskim) 3008 bfa_tskim_send(struct bfa_tskim_s *tskim)
3032 { 3009 {
3033 struct bfa_itnim_s *itnim = tskim->itnim; 3010 struct bfa_itnim_s *itnim = tskim->itnim;
3034 struct bfi_tskim_req_s *m; 3011 struct bfi_tskim_req_s *m;
3035 3012
3036 /* 3013 /*
3037 * check for room in queue to send request now 3014 * check for room in queue to send request now
3038 */ 3015 */
3039 m = bfa_reqq_next(tskim->bfa, itnim->reqq); 3016 m = bfa_reqq_next(tskim->bfa, itnim->reqq);
3040 if (!m) 3017 if (!m)
3041 return BFA_FALSE; 3018 return BFA_FALSE;
3042 3019
3043 /* 3020 /*
3044 * build i/o request message next 3021 * build i/o request message next
3045 */ 3022 */
3046 bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ, 3023 bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ,
3047 bfa_lpuid(tskim->bfa)); 3024 bfa_lpuid(tskim->bfa));
3048 3025
3049 m->tsk_tag = cpu_to_be16(tskim->tsk_tag); 3026 m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
3050 m->itn_fhdl = tskim->itnim->rport->fw_handle; 3027 m->itn_fhdl = tskim->itnim->rport->fw_handle;
3051 m->t_secs = tskim->tsecs; 3028 m->t_secs = tskim->tsecs;
3052 m->lun = tskim->lun; 3029 m->lun = tskim->lun;
3053 m->tm_flags = tskim->tm_cmnd; 3030 m->tm_flags = tskim->tm_cmnd;
3054 3031
3055 /* 3032 /*
3056 * queue I/O message to firmware 3033 * queue I/O message to firmware
3057 */ 3034 */
3058 bfa_reqq_produce(tskim->bfa, itnim->reqq); 3035 bfa_reqq_produce(tskim->bfa, itnim->reqq);
3059 return BFA_TRUE; 3036 return BFA_TRUE;
3060 } 3037 }
3061 3038
3062 /* 3039 /*
3063 * Send abort request to cleanup an active TM to firmware. 3040 * Send abort request to cleanup an active TM to firmware.
3064 */ 3041 */
3065 static bfa_boolean_t 3042 static bfa_boolean_t
3066 bfa_tskim_send_abort(struct bfa_tskim_s *tskim) 3043 bfa_tskim_send_abort(struct bfa_tskim_s *tskim)
3067 { 3044 {
3068 struct bfa_itnim_s *itnim = tskim->itnim; 3045 struct bfa_itnim_s *itnim = tskim->itnim;
3069 struct bfi_tskim_abortreq_s *m; 3046 struct bfi_tskim_abortreq_s *m;
3070 3047
3071 /* 3048 /*
3072 * check for room in queue to send request now 3049 * check for room in queue to send request now
3073 */ 3050 */
3074 m = bfa_reqq_next(tskim->bfa, itnim->reqq); 3051 m = bfa_reqq_next(tskim->bfa, itnim->reqq);
3075 if (!m) 3052 if (!m)
3076 return BFA_FALSE; 3053 return BFA_FALSE;
3077 3054
3078 /* 3055 /*
3079 * build i/o request message next 3056 * build i/o request message next
3080 */ 3057 */
3081 bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ, 3058 bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ,
3082 bfa_lpuid(tskim->bfa)); 3059 bfa_lpuid(tskim->bfa));
3083 3060
3084 m->tsk_tag = cpu_to_be16(tskim->tsk_tag); 3061 m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
3085 3062
3086 /* 3063 /*
3087 * queue I/O message to firmware 3064 * queue I/O message to firmware
3088 */ 3065 */
3089 bfa_reqq_produce(tskim->bfa, itnim->reqq); 3066 bfa_reqq_produce(tskim->bfa, itnim->reqq);
3090 return BFA_TRUE; 3067 return BFA_TRUE;
3091 } 3068 }
3092 3069
3093 /* 3070 /*
3094 * Call to resume task management cmnd waiting for room in request queue. 3071 * Call to resume task management cmnd waiting for room in request queue.
3095 */ 3072 */
3096 static void 3073 static void
3097 bfa_tskim_qresume(void *cbarg) 3074 bfa_tskim_qresume(void *cbarg)
3098 { 3075 {
3099 struct bfa_tskim_s *tskim = cbarg; 3076 struct bfa_tskim_s *tskim = cbarg;
3100 3077
3101 bfa_stats(tskim->itnim, tm_qresumes); 3078 bfa_stats(tskim->itnim, tm_qresumes);
3102 bfa_sm_send_event(tskim, BFA_TSKIM_SM_QRESUME); 3079 bfa_sm_send_event(tskim, BFA_TSKIM_SM_QRESUME);
3103 } 3080 }
3104 3081
3105 /* 3082 /*
3106 * Cleanup IOs associated with a task mangement command on IOC failures. 3083 * Cleanup IOs associated with a task mangement command on IOC failures.
3107 */ 3084 */
3108 static void 3085 static void
3109 bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim) 3086 bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim)
3110 { 3087 {
3111 struct bfa_ioim_s *ioim; 3088 struct bfa_ioim_s *ioim;
3112 struct list_head *qe, *qen; 3089 struct list_head *qe, *qen;
3113 3090
3114 list_for_each_safe(qe, qen, &tskim->io_q) { 3091 list_for_each_safe(qe, qen, &tskim->io_q) {
3115 ioim = (struct bfa_ioim_s *) qe; 3092 ioim = (struct bfa_ioim_s *) qe;
3116 bfa_ioim_iocdisable(ioim); 3093 bfa_ioim_iocdisable(ioim);
3117 } 3094 }
3118 } 3095 }
3119 3096
3120 /* 3097 /*
3121 * Notification on completions from related ioim. 3098 * Notification on completions from related ioim.
3122 */ 3099 */
3123 void 3100 void
3124 bfa_tskim_iodone(struct bfa_tskim_s *tskim) 3101 bfa_tskim_iodone(struct bfa_tskim_s *tskim)
3125 { 3102 {
3126 bfa_wc_down(&tskim->wc); 3103 bfa_wc_down(&tskim->wc);
3127 } 3104 }
3128 3105
3129 /* 3106 /*
3130 * Handle IOC h/w failure notification from itnim. 3107 * Handle IOC h/w failure notification from itnim.
3131 */ 3108 */
3132 void 3109 void
3133 bfa_tskim_iocdisable(struct bfa_tskim_s *tskim) 3110 bfa_tskim_iocdisable(struct bfa_tskim_s *tskim)
3134 { 3111 {
3135 tskim->notify = BFA_FALSE; 3112 tskim->notify = BFA_FALSE;
3136 bfa_stats(tskim->itnim, tm_iocdowns); 3113 bfa_stats(tskim->itnim, tm_iocdowns);
3137 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HWFAIL); 3114 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HWFAIL);
3138 } 3115 }
3139 3116
3140 /* 3117 /*
3141 * Cleanup TM command and associated IOs as part of ITNIM offline. 3118 * Cleanup TM command and associated IOs as part of ITNIM offline.
3142 */ 3119 */
3143 void 3120 void
3144 bfa_tskim_cleanup(struct bfa_tskim_s *tskim) 3121 bfa_tskim_cleanup(struct bfa_tskim_s *tskim)
3145 { 3122 {
3146 tskim->notify = BFA_TRUE; 3123 tskim->notify = BFA_TRUE;
3147 bfa_stats(tskim->itnim, tm_cleanups); 3124 bfa_stats(tskim->itnim, tm_cleanups);
3148 bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP); 3125 bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP);
3149 } 3126 }
3150 3127
3151 /* 3128 /*
3152 * Memory allocation and initialization. 3129 * Memory allocation and initialization.
3153 */ 3130 */
3154 void 3131 void
3155 bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo) 3132 bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
3156 { 3133 {
3157 struct bfa_tskim_s *tskim; 3134 struct bfa_tskim_s *tskim;
3158 u16 i; 3135 u16 i;
3159 3136
3160 INIT_LIST_HEAD(&fcpim->tskim_free_q); 3137 INIT_LIST_HEAD(&fcpim->tskim_free_q);
3161 3138
3162 tskim = (struct bfa_tskim_s *) bfa_meminfo_kva(minfo); 3139 tskim = (struct bfa_tskim_s *) bfa_meminfo_kva(minfo);
3163 fcpim->tskim_arr = tskim; 3140 fcpim->tskim_arr = tskim;
3164 3141
3165 for (i = 0; i < fcpim->num_tskim_reqs; i++, tskim++) { 3142 for (i = 0; i < fcpim->num_tskim_reqs; i++, tskim++) {
3166 /* 3143 /*
3167 * initialize TSKIM 3144 * initialize TSKIM
3168 */ 3145 */
3169 memset(tskim, 0, sizeof(struct bfa_tskim_s)); 3146 memset(tskim, 0, sizeof(struct bfa_tskim_s));
3170 tskim->tsk_tag = i; 3147 tskim->tsk_tag = i;
3171 tskim->bfa = fcpim->bfa; 3148 tskim->bfa = fcpim->bfa;
3172 tskim->fcpim = fcpim; 3149 tskim->fcpim = fcpim;
3173 tskim->notify = BFA_FALSE; 3150 tskim->notify = BFA_FALSE;
3174 bfa_reqq_winit(&tskim->reqq_wait, bfa_tskim_qresume, 3151 bfa_reqq_winit(&tskim->reqq_wait, bfa_tskim_qresume,
3175 tskim); 3152 tskim);
3176 bfa_sm_set_state(tskim, bfa_tskim_sm_uninit); 3153 bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
3177 3154
3178 list_add_tail(&tskim->qe, &fcpim->tskim_free_q); 3155 list_add_tail(&tskim->qe, &fcpim->tskim_free_q);
3179 } 3156 }
3180 3157
3181 bfa_meminfo_kva(minfo) = (u8 *) tskim; 3158 bfa_meminfo_kva(minfo) = (u8 *) tskim;
3182 } 3159 }
3183 3160
3184 void 3161 void
3185 bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) 3162 bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3186 { 3163 {
3187 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); 3164 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
3188 struct bfi_tskim_rsp_s *rsp = (struct bfi_tskim_rsp_s *) m; 3165 struct bfi_tskim_rsp_s *rsp = (struct bfi_tskim_rsp_s *) m;
3189 struct bfa_tskim_s *tskim; 3166 struct bfa_tskim_s *tskim;
3190 u16 tsk_tag = be16_to_cpu(rsp->tsk_tag); 3167 u16 tsk_tag = be16_to_cpu(rsp->tsk_tag);
3191 3168
3192 tskim = BFA_TSKIM_FROM_TAG(fcpim, tsk_tag); 3169 tskim = BFA_TSKIM_FROM_TAG(fcpim, tsk_tag);
3193 WARN_ON(tskim->tsk_tag != tsk_tag); 3170 WARN_ON(tskim->tsk_tag != tsk_tag);
3194 3171
3195 tskim->tsk_status = rsp->tsk_status; 3172 tskim->tsk_status = rsp->tsk_status;
3196 3173
3197 /* 3174 /*
3198 * Firmware sends BFI_TSKIM_STS_ABORTED status for abort 3175 * Firmware sends BFI_TSKIM_STS_ABORTED status for abort
3199 * requests. All other statuses are for normal completions. 3176 * requests. All other statuses are for normal completions.
3200 */ 3177 */
3201 if (rsp->tsk_status == BFI_TSKIM_STS_ABORTED) { 3178 if (rsp->tsk_status == BFI_TSKIM_STS_ABORTED) {
3202 bfa_stats(tskim->itnim, tm_cleanup_comps); 3179 bfa_stats(tskim->itnim, tm_cleanup_comps);
3203 bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP_DONE); 3180 bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP_DONE);
3204 } else { 3181 } else {
3205 bfa_stats(tskim->itnim, tm_fw_rsps); 3182 bfa_stats(tskim->itnim, tm_fw_rsps);
3206 bfa_sm_send_event(tskim, BFA_TSKIM_SM_DONE); 3183 bfa_sm_send_event(tskim, BFA_TSKIM_SM_DONE);
3207 } 3184 }
3208 } 3185 }
3209 3186
3210 3187
3211 struct bfa_tskim_s * 3188 struct bfa_tskim_s *
3212 bfa_tskim_alloc(struct bfa_s *bfa, struct bfad_tskim_s *dtsk) 3189 bfa_tskim_alloc(struct bfa_s *bfa, struct bfad_tskim_s *dtsk)
3213 { 3190 {
3214 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); 3191 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
3215 struct bfa_tskim_s *tskim; 3192 struct bfa_tskim_s *tskim;
3216 3193
3217 bfa_q_deq(&fcpim->tskim_free_q, &tskim); 3194 bfa_q_deq(&fcpim->tskim_free_q, &tskim);
3218 3195
3219 if (tskim) 3196 if (tskim)
3220 tskim->dtsk = dtsk; 3197 tskim->dtsk = dtsk;
3221 3198
3222 return tskim; 3199 return tskim;
3223 } 3200 }
3224 3201
3225 void 3202 void
3226 bfa_tskim_free(struct bfa_tskim_s *tskim) 3203 bfa_tskim_free(struct bfa_tskim_s *tskim)
3227 { 3204 {
3228 WARN_ON(!bfa_q_is_on_q_func(&tskim->itnim->tsk_q, &tskim->qe)); 3205 WARN_ON(!bfa_q_is_on_q_func(&tskim->itnim->tsk_q, &tskim->qe));
3229 list_del(&tskim->qe); 3206 list_del(&tskim->qe);
3230 list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q); 3207 list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q);
3231 } 3208 }
3232 3209
3233 /* 3210 /*
3234 * Start a task management command. 3211 * Start a task management command.
3235 * 3212 *
3236 * @param[in] tskim BFA task management command instance 3213 * @param[in] tskim BFA task management command instance
3237 * @param[in] itnim i-t nexus for the task management command 3214 * @param[in] itnim i-t nexus for the task management command
3238 * @param[in] lun lun, if applicable 3215 * @param[in] lun lun, if applicable
3239 * @param[in] tm_cmnd Task management command code. 3216 * @param[in] tm_cmnd Task management command code.
3240 * @param[in] t_secs Timeout in seconds 3217 * @param[in] t_secs Timeout in seconds
3241 * 3218 *
3242 * @return None. 3219 * @return None.
3243 */ 3220 */
3244 void 3221 void
3245 bfa_tskim_start(struct bfa_tskim_s *tskim, struct bfa_itnim_s *itnim, 3222 bfa_tskim_start(struct bfa_tskim_s *tskim, struct bfa_itnim_s *itnim,
3246 struct scsi_lun lun, 3223 struct scsi_lun lun,
3247 enum fcp_tm_cmnd tm_cmnd, u8 tsecs) 3224 enum fcp_tm_cmnd tm_cmnd, u8 tsecs)
3248 { 3225 {
3249 tskim->itnim = itnim; 3226 tskim->itnim = itnim;
3250 tskim->lun = lun; 3227 tskim->lun = lun;
3251 tskim->tm_cmnd = tm_cmnd; 3228 tskim->tm_cmnd = tm_cmnd;
3252 tskim->tsecs = tsecs; 3229 tskim->tsecs = tsecs;
3253 tskim->notify = BFA_FALSE; 3230 tskim->notify = BFA_FALSE;
3254 bfa_stats(itnim, tm_cmnds); 3231 bfa_stats(itnim, tm_cmnds);
3255 3232
3256 list_add_tail(&tskim->qe, &itnim->tsk_q); 3233 list_add_tail(&tskim->qe, &itnim->tsk_q);
3257 bfa_sm_send_event(tskim, BFA_TSKIM_SM_START); 3234 bfa_sm_send_event(tskim, BFA_TSKIM_SM_START);
3258 } 3235 }
3259 3236
drivers/scsi/bfa/bfa_svc.c
1 /* 1 /*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved 3 * All rights reserved
4 * www.brocade.com 4 * www.brocade.com
5 * 5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter. 6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify it 8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as 9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation 10 * published by the Free Software Foundation
11 * 11 *
12 * This program is distributed in the hope that it will be useful, but 12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of 13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17 17
18 #include "bfad_drv.h" 18 #include "bfad_drv.h"
19 #include "bfa_plog.h" 19 #include "bfa_plog.h"
20 #include "bfa_cs.h" 20 #include "bfa_cs.h"
21 #include "bfa_modules.h" 21 #include "bfa_modules.h"
22 22
23 BFA_TRC_FILE(HAL, FCXP); 23 BFA_TRC_FILE(HAL, FCXP);
24 BFA_MODULE(fcxp); 24 BFA_MODULE(fcxp);
25 BFA_MODULE(sgpg); 25 BFA_MODULE(sgpg);
26 BFA_MODULE(lps); 26 BFA_MODULE(lps);
27 BFA_MODULE(fcport); 27 BFA_MODULE(fcport);
28 BFA_MODULE(rport); 28 BFA_MODULE(rport);
29 BFA_MODULE(uf); 29 BFA_MODULE(uf);
30 30
31 /* 31 /*
32 * LPS related definitions 32 * LPS related definitions
33 */ 33 */
34 #define BFA_LPS_MIN_LPORTS (1) 34 #define BFA_LPS_MIN_LPORTS (1)
35 #define BFA_LPS_MAX_LPORTS (256) 35 #define BFA_LPS_MAX_LPORTS (256)
36 36
37 /* 37 /*
38 * Maximum Vports supported per physical port or vf. 38 * Maximum Vports supported per physical port or vf.
39 */ 39 */
40 #define BFA_LPS_MAX_VPORTS_SUPP_CB 255 40 #define BFA_LPS_MAX_VPORTS_SUPP_CB 255
41 #define BFA_LPS_MAX_VPORTS_SUPP_CT 190 41 #define BFA_LPS_MAX_VPORTS_SUPP_CT 190
42 42
43 43
44 /* 44 /*
45 * FC PORT related definitions 45 * FC PORT related definitions
46 */ 46 */
47 /* 47 /*
48 * The port is considered disabled if corresponding physical port or IOC are 48 * The port is considered disabled if corresponding physical port or IOC are
49 * disabled explicitly 49 * disabled explicitly
50 */ 50 */
51 #define BFA_PORT_IS_DISABLED(bfa) \ 51 #define BFA_PORT_IS_DISABLED(bfa) \
52 ((bfa_fcport_is_disabled(bfa) == BFA_TRUE) || \ 52 ((bfa_fcport_is_disabled(bfa) == BFA_TRUE) || \
53 (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE)) 53 (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
54 54
55 /* 55 /*
56 * BFA port state machine events 56 * BFA port state machine events
57 */ 57 */
58 enum bfa_fcport_sm_event { 58 enum bfa_fcport_sm_event {
59 BFA_FCPORT_SM_START = 1, /* start port state machine */ 59 BFA_FCPORT_SM_START = 1, /* start port state machine */
60 BFA_FCPORT_SM_STOP = 2, /* stop port state machine */ 60 BFA_FCPORT_SM_STOP = 2, /* stop port state machine */
61 BFA_FCPORT_SM_ENABLE = 3, /* enable port */ 61 BFA_FCPORT_SM_ENABLE = 3, /* enable port */
62 BFA_FCPORT_SM_DISABLE = 4, /* disable port state machine */ 62 BFA_FCPORT_SM_DISABLE = 4, /* disable port state machine */
63 BFA_FCPORT_SM_FWRSP = 5, /* firmware enable/disable rsp */ 63 BFA_FCPORT_SM_FWRSP = 5, /* firmware enable/disable rsp */
64 BFA_FCPORT_SM_LINKUP = 6, /* firmware linkup event */ 64 BFA_FCPORT_SM_LINKUP = 6, /* firmware linkup event */
65 BFA_FCPORT_SM_LINKDOWN = 7, /* firmware linkup down */ 65 BFA_FCPORT_SM_LINKDOWN = 7, /* firmware linkup down */
66 BFA_FCPORT_SM_QRESUME = 8, /* CQ space available */ 66 BFA_FCPORT_SM_QRESUME = 8, /* CQ space available */
67 BFA_FCPORT_SM_HWFAIL = 9, /* IOC h/w failure */ 67 BFA_FCPORT_SM_HWFAIL = 9, /* IOC h/w failure */
68 }; 68 };
69 69
70 /* 70 /*
71 * BFA port link notification state machine events 71 * BFA port link notification state machine events
72 */ 72 */
73 73
74 enum bfa_fcport_ln_sm_event { 74 enum bfa_fcport_ln_sm_event {
75 BFA_FCPORT_LN_SM_LINKUP = 1, /* linkup event */ 75 BFA_FCPORT_LN_SM_LINKUP = 1, /* linkup event */
76 BFA_FCPORT_LN_SM_LINKDOWN = 2, /* linkdown event */ 76 BFA_FCPORT_LN_SM_LINKDOWN = 2, /* linkdown event */
77 BFA_FCPORT_LN_SM_NOTIFICATION = 3 /* done notification */ 77 BFA_FCPORT_LN_SM_NOTIFICATION = 3 /* done notification */
78 }; 78 };
79 79
80 /* 80 /*
81 * RPORT related definitions 81 * RPORT related definitions
82 */ 82 */
83 #define bfa_rport_offline_cb(__rp) do { \ 83 #define bfa_rport_offline_cb(__rp) do { \
84 if ((__rp)->bfa->fcs) \ 84 if ((__rp)->bfa->fcs) \
85 bfa_cb_rport_offline((__rp)->rport_drv); \ 85 bfa_cb_rport_offline((__rp)->rport_drv); \
86 else { \ 86 else { \
87 bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \ 87 bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \
88 __bfa_cb_rport_offline, (__rp)); \ 88 __bfa_cb_rport_offline, (__rp)); \
89 } \ 89 } \
90 } while (0) 90 } while (0)
91 91
92 #define bfa_rport_online_cb(__rp) do { \ 92 #define bfa_rport_online_cb(__rp) do { \
93 if ((__rp)->bfa->fcs) \ 93 if ((__rp)->bfa->fcs) \
94 bfa_cb_rport_online((__rp)->rport_drv); \ 94 bfa_cb_rport_online((__rp)->rport_drv); \
95 else { \ 95 else { \
96 bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \ 96 bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \
97 __bfa_cb_rport_online, (__rp)); \ 97 __bfa_cb_rport_online, (__rp)); \
98 } \ 98 } \
99 } while (0) 99 } while (0)
100 100
101 /* 101 /*
102 * forward declarations FCXP related functions 102 * forward declarations FCXP related functions
103 */ 103 */
104 static void __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete); 104 static void __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete);
105 static void hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp, 105 static void hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
106 struct bfi_fcxp_send_rsp_s *fcxp_rsp); 106 struct bfi_fcxp_send_rsp_s *fcxp_rsp);
107 static void hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, 107 static void hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen,
108 struct bfa_fcxp_s *fcxp, struct fchs_s *fchs); 108 struct bfa_fcxp_s *fcxp, struct fchs_s *fchs);
109 static void bfa_fcxp_qresume(void *cbarg); 109 static void bfa_fcxp_qresume(void *cbarg);
110 static void bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, 110 static void bfa_fcxp_queue(struct bfa_fcxp_s *fcxp,
111 struct bfi_fcxp_send_req_s *send_req); 111 struct bfi_fcxp_send_req_s *send_req);
112 112
113 /* 113 /*
114 * forward declarations for LPS functions 114 * forward declarations for LPS functions
115 */ 115 */
116 static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, 116 static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
117 u32 *dm_len); 117 u32 *dm_len);
118 static void bfa_lps_attach(struct bfa_s *bfa, void *bfad, 118 static void bfa_lps_attach(struct bfa_s *bfa, void *bfad,
119 struct bfa_iocfc_cfg_s *cfg, 119 struct bfa_iocfc_cfg_s *cfg,
120 struct bfa_meminfo_s *meminfo, 120 struct bfa_meminfo_s *meminfo,
121 struct bfa_pcidev_s *pcidev); 121 struct bfa_pcidev_s *pcidev);
122 static void bfa_lps_detach(struct bfa_s *bfa); 122 static void bfa_lps_detach(struct bfa_s *bfa);
123 static void bfa_lps_start(struct bfa_s *bfa); 123 static void bfa_lps_start(struct bfa_s *bfa);
124 static void bfa_lps_stop(struct bfa_s *bfa); 124 static void bfa_lps_stop(struct bfa_s *bfa);
125 static void bfa_lps_iocdisable(struct bfa_s *bfa); 125 static void bfa_lps_iocdisable(struct bfa_s *bfa);
126 static void bfa_lps_login_rsp(struct bfa_s *bfa, 126 static void bfa_lps_login_rsp(struct bfa_s *bfa,
127 struct bfi_lps_login_rsp_s *rsp); 127 struct bfi_lps_login_rsp_s *rsp);
128 static void bfa_lps_logout_rsp(struct bfa_s *bfa, 128 static void bfa_lps_logout_rsp(struct bfa_s *bfa,
129 struct bfi_lps_logout_rsp_s *rsp); 129 struct bfi_lps_logout_rsp_s *rsp);
130 static void bfa_lps_reqq_resume(void *lps_arg); 130 static void bfa_lps_reqq_resume(void *lps_arg);
131 static void bfa_lps_free(struct bfa_lps_s *lps); 131 static void bfa_lps_free(struct bfa_lps_s *lps);
132 static void bfa_lps_send_login(struct bfa_lps_s *lps); 132 static void bfa_lps_send_login(struct bfa_lps_s *lps);
133 static void bfa_lps_send_logout(struct bfa_lps_s *lps); 133 static void bfa_lps_send_logout(struct bfa_lps_s *lps);
134 static void bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps); 134 static void bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps);
135 static void bfa_lps_login_comp(struct bfa_lps_s *lps); 135 static void bfa_lps_login_comp(struct bfa_lps_s *lps);
136 static void bfa_lps_logout_comp(struct bfa_lps_s *lps); 136 static void bfa_lps_logout_comp(struct bfa_lps_s *lps);
137 static void bfa_lps_cvl_event(struct bfa_lps_s *lps); 137 static void bfa_lps_cvl_event(struct bfa_lps_s *lps);
138 138
139 /* 139 /*
140 * forward declaration for LPS state machine 140 * forward declaration for LPS state machine
141 */ 141 */
142 static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event); 142 static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event);
143 static void bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event); 143 static void bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event);
144 static void bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event 144 static void bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event
145 event); 145 event);
146 static void bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event); 146 static void bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event);
147 static void bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps, 147 static void bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps,
148 enum bfa_lps_event event); 148 enum bfa_lps_event event);
149 static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event); 149 static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event);
150 static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event 150 static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event
151 event); 151 event);
152 152
153 /* 153 /*
154 * forward declaration for FC Port functions 154 * forward declaration for FC Port functions
155 */ 155 */
156 static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport); 156 static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport);
157 static bfa_boolean_t bfa_fcport_send_disable(struct bfa_fcport_s *fcport); 157 static bfa_boolean_t bfa_fcport_send_disable(struct bfa_fcport_s *fcport);
158 static void bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport); 158 static void bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport);
159 static void bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport); 159 static void bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport);
160 static void bfa_fcport_set_wwns(struct bfa_fcport_s *fcport); 160 static void bfa_fcport_set_wwns(struct bfa_fcport_s *fcport);
161 static void __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete); 161 static void __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete);
162 static void bfa_fcport_scn(struct bfa_fcport_s *fcport, 162 static void bfa_fcport_scn(struct bfa_fcport_s *fcport,
163 enum bfa_port_linkstate event, bfa_boolean_t trunk); 163 enum bfa_port_linkstate event, bfa_boolean_t trunk);
164 static void bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, 164 static void bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln,
165 enum bfa_port_linkstate event); 165 enum bfa_port_linkstate event);
166 static void __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete); 166 static void __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete);
167 static void bfa_fcport_stats_get_timeout(void *cbarg); 167 static void bfa_fcport_stats_get_timeout(void *cbarg);
168 static void bfa_fcport_stats_clr_timeout(void *cbarg); 168 static void bfa_fcport_stats_clr_timeout(void *cbarg);
169 static void bfa_trunk_iocdisable(struct bfa_s *bfa); 169 static void bfa_trunk_iocdisable(struct bfa_s *bfa);
170 170
171 /* 171 /*
172 * forward declaration for FC PORT state machine 172 * forward declaration for FC PORT state machine
173 */ 173 */
174 static void bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport, 174 static void bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
175 enum bfa_fcport_sm_event event); 175 enum bfa_fcport_sm_event event);
176 static void bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport, 176 static void bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
177 enum bfa_fcport_sm_event event); 177 enum bfa_fcport_sm_event event);
178 static void bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport, 178 static void bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
179 enum bfa_fcport_sm_event event); 179 enum bfa_fcport_sm_event event);
180 static void bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport, 180 static void bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
181 enum bfa_fcport_sm_event event); 181 enum bfa_fcport_sm_event event);
182 static void bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport, 182 static void bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
183 enum bfa_fcport_sm_event event); 183 enum bfa_fcport_sm_event event);
184 static void bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport, 184 static void bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
185 enum bfa_fcport_sm_event event); 185 enum bfa_fcport_sm_event event);
186 static void bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport, 186 static void bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
187 enum bfa_fcport_sm_event event); 187 enum bfa_fcport_sm_event event);
188 static void bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport, 188 static void bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
189 enum bfa_fcport_sm_event event); 189 enum bfa_fcport_sm_event event);
190 static void bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport, 190 static void bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
191 enum bfa_fcport_sm_event event); 191 enum bfa_fcport_sm_event event);
192 static void bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport, 192 static void bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
193 enum bfa_fcport_sm_event event); 193 enum bfa_fcport_sm_event event);
194 static void bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport, 194 static void bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
195 enum bfa_fcport_sm_event event); 195 enum bfa_fcport_sm_event event);
196 static void bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport, 196 static void bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
197 enum bfa_fcport_sm_event event); 197 enum bfa_fcport_sm_event event);
198 198
199 static void bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln, 199 static void bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
200 enum bfa_fcport_ln_sm_event event); 200 enum bfa_fcport_ln_sm_event event);
201 static void bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln, 201 static void bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
202 enum bfa_fcport_ln_sm_event event); 202 enum bfa_fcport_ln_sm_event event);
203 static void bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln, 203 static void bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
204 enum bfa_fcport_ln_sm_event event); 204 enum bfa_fcport_ln_sm_event event);
205 static void bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln, 205 static void bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
206 enum bfa_fcport_ln_sm_event event); 206 enum bfa_fcport_ln_sm_event event);
207 static void bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln, 207 static void bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
208 enum bfa_fcport_ln_sm_event event); 208 enum bfa_fcport_ln_sm_event event);
209 static void bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln, 209 static void bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
210 enum bfa_fcport_ln_sm_event event); 210 enum bfa_fcport_ln_sm_event event);
211 static void bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln, 211 static void bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
212 enum bfa_fcport_ln_sm_event event); 212 enum bfa_fcport_ln_sm_event event);
213 213
214 static struct bfa_sm_table_s hal_port_sm_table[] = { 214 static struct bfa_sm_table_s hal_port_sm_table[] = {
215 {BFA_SM(bfa_fcport_sm_uninit), BFA_PORT_ST_UNINIT}, 215 {BFA_SM(bfa_fcport_sm_uninit), BFA_PORT_ST_UNINIT},
216 {BFA_SM(bfa_fcport_sm_enabling_qwait), BFA_PORT_ST_ENABLING_QWAIT}, 216 {BFA_SM(bfa_fcport_sm_enabling_qwait), BFA_PORT_ST_ENABLING_QWAIT},
217 {BFA_SM(bfa_fcport_sm_enabling), BFA_PORT_ST_ENABLING}, 217 {BFA_SM(bfa_fcport_sm_enabling), BFA_PORT_ST_ENABLING},
218 {BFA_SM(bfa_fcport_sm_linkdown), BFA_PORT_ST_LINKDOWN}, 218 {BFA_SM(bfa_fcport_sm_linkdown), BFA_PORT_ST_LINKDOWN},
219 {BFA_SM(bfa_fcport_sm_linkup), BFA_PORT_ST_LINKUP}, 219 {BFA_SM(bfa_fcport_sm_linkup), BFA_PORT_ST_LINKUP},
220 {BFA_SM(bfa_fcport_sm_disabling_qwait), BFA_PORT_ST_DISABLING_QWAIT}, 220 {BFA_SM(bfa_fcport_sm_disabling_qwait), BFA_PORT_ST_DISABLING_QWAIT},
221 {BFA_SM(bfa_fcport_sm_toggling_qwait), BFA_PORT_ST_TOGGLING_QWAIT}, 221 {BFA_SM(bfa_fcport_sm_toggling_qwait), BFA_PORT_ST_TOGGLING_QWAIT},
222 {BFA_SM(bfa_fcport_sm_disabling), BFA_PORT_ST_DISABLING}, 222 {BFA_SM(bfa_fcport_sm_disabling), BFA_PORT_ST_DISABLING},
223 {BFA_SM(bfa_fcport_sm_disabled), BFA_PORT_ST_DISABLED}, 223 {BFA_SM(bfa_fcport_sm_disabled), BFA_PORT_ST_DISABLED},
224 {BFA_SM(bfa_fcport_sm_stopped), BFA_PORT_ST_STOPPED}, 224 {BFA_SM(bfa_fcport_sm_stopped), BFA_PORT_ST_STOPPED},
225 {BFA_SM(bfa_fcport_sm_iocdown), BFA_PORT_ST_IOCDOWN}, 225 {BFA_SM(bfa_fcport_sm_iocdown), BFA_PORT_ST_IOCDOWN},
226 {BFA_SM(bfa_fcport_sm_iocfail), BFA_PORT_ST_IOCDOWN}, 226 {BFA_SM(bfa_fcport_sm_iocfail), BFA_PORT_ST_IOCDOWN},
227 }; 227 };
228 228
229 229
230 /* 230 /*
231 * forward declaration for RPORT related functions 231 * forward declaration for RPORT related functions
232 */ 232 */
233 static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod); 233 static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod);
234 static void bfa_rport_free(struct bfa_rport_s *rport); 234 static void bfa_rport_free(struct bfa_rport_s *rport);
235 static bfa_boolean_t bfa_rport_send_fwcreate(struct bfa_rport_s *rp); 235 static bfa_boolean_t bfa_rport_send_fwcreate(struct bfa_rport_s *rp);
236 static bfa_boolean_t bfa_rport_send_fwdelete(struct bfa_rport_s *rp); 236 static bfa_boolean_t bfa_rport_send_fwdelete(struct bfa_rport_s *rp);
237 static bfa_boolean_t bfa_rport_send_fwspeed(struct bfa_rport_s *rp); 237 static bfa_boolean_t bfa_rport_send_fwspeed(struct bfa_rport_s *rp);
238 static void __bfa_cb_rport_online(void *cbarg, 238 static void __bfa_cb_rport_online(void *cbarg,
239 bfa_boolean_t complete); 239 bfa_boolean_t complete);
240 static void __bfa_cb_rport_offline(void *cbarg, 240 static void __bfa_cb_rport_offline(void *cbarg,
241 bfa_boolean_t complete); 241 bfa_boolean_t complete);
242 242
243 /* 243 /*
244 * forward declaration for RPORT state machine 244 * forward declaration for RPORT state machine
245 */ 245 */
246 static void bfa_rport_sm_uninit(struct bfa_rport_s *rp, 246 static void bfa_rport_sm_uninit(struct bfa_rport_s *rp,
247 enum bfa_rport_event event); 247 enum bfa_rport_event event);
248 static void bfa_rport_sm_created(struct bfa_rport_s *rp, 248 static void bfa_rport_sm_created(struct bfa_rport_s *rp,
249 enum bfa_rport_event event); 249 enum bfa_rport_event event);
250 static void bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, 250 static void bfa_rport_sm_fwcreate(struct bfa_rport_s *rp,
251 enum bfa_rport_event event); 251 enum bfa_rport_event event);
252 static void bfa_rport_sm_online(struct bfa_rport_s *rp, 252 static void bfa_rport_sm_online(struct bfa_rport_s *rp,
253 enum bfa_rport_event event); 253 enum bfa_rport_event event);
254 static void bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, 254 static void bfa_rport_sm_fwdelete(struct bfa_rport_s *rp,
255 enum bfa_rport_event event); 255 enum bfa_rport_event event);
256 static void bfa_rport_sm_offline(struct bfa_rport_s *rp, 256 static void bfa_rport_sm_offline(struct bfa_rport_s *rp,
257 enum bfa_rport_event event); 257 enum bfa_rport_event event);
258 static void bfa_rport_sm_deleting(struct bfa_rport_s *rp, 258 static void bfa_rport_sm_deleting(struct bfa_rport_s *rp,
259 enum bfa_rport_event event); 259 enum bfa_rport_event event);
260 static void bfa_rport_sm_offline_pending(struct bfa_rport_s *rp, 260 static void bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
261 enum bfa_rport_event event); 261 enum bfa_rport_event event);
262 static void bfa_rport_sm_delete_pending(struct bfa_rport_s *rp, 262 static void bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
263 enum bfa_rport_event event); 263 enum bfa_rport_event event);
264 static void bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, 264 static void bfa_rport_sm_iocdisable(struct bfa_rport_s *rp,
265 enum bfa_rport_event event); 265 enum bfa_rport_event event);
266 static void bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, 266 static void bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp,
267 enum bfa_rport_event event); 267 enum bfa_rport_event event);
268 static void bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, 268 static void bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp,
269 enum bfa_rport_event event); 269 enum bfa_rport_event event);
270 static void bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, 270 static void bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp,
271 enum bfa_rport_event event); 271 enum bfa_rport_event event);
272 272
273 /* 273 /*
274 * PLOG related definitions 274 * PLOG related definitions
275 */ 275 */
276 static int 276 static int
277 plkd_validate_logrec(struct bfa_plog_rec_s *pl_rec) 277 plkd_validate_logrec(struct bfa_plog_rec_s *pl_rec)
278 { 278 {
279 if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) && 279 if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
280 (pl_rec->log_type != BFA_PL_LOG_TYPE_STRING)) 280 (pl_rec->log_type != BFA_PL_LOG_TYPE_STRING))
281 return 1; 281 return 1;
282 282
283 if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) && 283 if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
284 (pl_rec->log_num_ints > BFA_PL_INT_LOG_SZ)) 284 (pl_rec->log_num_ints > BFA_PL_INT_LOG_SZ))
285 return 1; 285 return 1;
286 286
287 return 0; 287 return 0;
288 } 288 }
289 289
290 static u64 290 static u64
291 bfa_get_log_time(void) 291 bfa_get_log_time(void)
292 { 292 {
293 u64 system_time = 0; 293 u64 system_time = 0;
294 struct timeval tv; 294 struct timeval tv;
295 do_gettimeofday(&tv); 295 do_gettimeofday(&tv);
296 296
297 /* We are interested in seconds only. */ 297 /* We are interested in seconds only. */
298 system_time = tv.tv_sec; 298 system_time = tv.tv_sec;
299 return system_time; 299 return system_time;
300 } 300 }
301 301
302 static void 302 static void
303 bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec) 303 bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
304 { 304 {
305 u16 tail; 305 u16 tail;
306 struct bfa_plog_rec_s *pl_recp; 306 struct bfa_plog_rec_s *pl_recp;
307 307
308 if (plog->plog_enabled == 0) 308 if (plog->plog_enabled == 0)
309 return; 309 return;
310 310
311 if (plkd_validate_logrec(pl_rec)) { 311 if (plkd_validate_logrec(pl_rec)) {
312 WARN_ON(1); 312 WARN_ON(1);
313 return; 313 return;
314 } 314 }
315 315
316 tail = plog->tail; 316 tail = plog->tail;
317 317
318 pl_recp = &(plog->plog_recs[tail]); 318 pl_recp = &(plog->plog_recs[tail]);
319 319
320 memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s)); 320 memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s));
321 321
322 pl_recp->tv = bfa_get_log_time(); 322 pl_recp->tv = bfa_get_log_time();
323 BFA_PL_LOG_REC_INCR(plog->tail); 323 BFA_PL_LOG_REC_INCR(plog->tail);
324 324
325 if (plog->head == plog->tail) 325 if (plog->head == plog->tail)
326 BFA_PL_LOG_REC_INCR(plog->head); 326 BFA_PL_LOG_REC_INCR(plog->head);
327 } 327 }
328 328
329 void 329 void
330 bfa_plog_init(struct bfa_plog_s *plog) 330 bfa_plog_init(struct bfa_plog_s *plog)
331 { 331 {
332 memset((char *)plog, 0, sizeof(struct bfa_plog_s)); 332 memset((char *)plog, 0, sizeof(struct bfa_plog_s));
333 333
334 memcpy(plog->plog_sig, BFA_PL_SIG_STR, BFA_PL_SIG_LEN); 334 memcpy(plog->plog_sig, BFA_PL_SIG_STR, BFA_PL_SIG_LEN);
335 plog->head = plog->tail = 0; 335 plog->head = plog->tail = 0;
336 plog->plog_enabled = 1; 336 plog->plog_enabled = 1;
337 } 337 }
338 338
339 void 339 void
340 bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid, 340 bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
341 enum bfa_plog_eid event, 341 enum bfa_plog_eid event,
342 u16 misc, char *log_str) 342 u16 misc, char *log_str)
343 { 343 {
344 struct bfa_plog_rec_s lp; 344 struct bfa_plog_rec_s lp;
345 345
346 if (plog->plog_enabled) { 346 if (plog->plog_enabled) {
347 memset(&lp, 0, sizeof(struct bfa_plog_rec_s)); 347 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
348 lp.mid = mid; 348 lp.mid = mid;
349 lp.eid = event; 349 lp.eid = event;
350 lp.log_type = BFA_PL_LOG_TYPE_STRING; 350 lp.log_type = BFA_PL_LOG_TYPE_STRING;
351 lp.misc = misc; 351 lp.misc = misc;
352 strncpy(lp.log_entry.string_log, log_str, 352 strncpy(lp.log_entry.string_log, log_str,
353 BFA_PL_STRING_LOG_SZ - 1); 353 BFA_PL_STRING_LOG_SZ - 1);
354 lp.log_entry.string_log[BFA_PL_STRING_LOG_SZ - 1] = '\0'; 354 lp.log_entry.string_log[BFA_PL_STRING_LOG_SZ - 1] = '\0';
355 bfa_plog_add(plog, &lp); 355 bfa_plog_add(plog, &lp);
356 } 356 }
357 } 357 }
358 358
359 void 359 void
360 bfa_plog_intarr(struct bfa_plog_s *plog, enum bfa_plog_mid mid, 360 bfa_plog_intarr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
361 enum bfa_plog_eid event, 361 enum bfa_plog_eid event,
362 u16 misc, u32 *intarr, u32 num_ints) 362 u16 misc, u32 *intarr, u32 num_ints)
363 { 363 {
364 struct bfa_plog_rec_s lp; 364 struct bfa_plog_rec_s lp;
365 u32 i; 365 u32 i;
366 366
367 if (num_ints > BFA_PL_INT_LOG_SZ) 367 if (num_ints > BFA_PL_INT_LOG_SZ)
368 num_ints = BFA_PL_INT_LOG_SZ; 368 num_ints = BFA_PL_INT_LOG_SZ;
369 369
370 if (plog->plog_enabled) { 370 if (plog->plog_enabled) {
371 memset(&lp, 0, sizeof(struct bfa_plog_rec_s)); 371 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
372 lp.mid = mid; 372 lp.mid = mid;
373 lp.eid = event; 373 lp.eid = event;
374 lp.log_type = BFA_PL_LOG_TYPE_INT; 374 lp.log_type = BFA_PL_LOG_TYPE_INT;
375 lp.misc = misc; 375 lp.misc = misc;
376 376
377 for (i = 0; i < num_ints; i++) 377 for (i = 0; i < num_ints; i++)
378 lp.log_entry.int_log[i] = intarr[i]; 378 lp.log_entry.int_log[i] = intarr[i];
379 379
380 lp.log_num_ints = (u8) num_ints; 380 lp.log_num_ints = (u8) num_ints;
381 381
382 bfa_plog_add(plog, &lp); 382 bfa_plog_add(plog, &lp);
383 } 383 }
384 } 384 }
385 385
386 void 386 void
387 bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid, 387 bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
388 enum bfa_plog_eid event, 388 enum bfa_plog_eid event,
389 u16 misc, struct fchs_s *fchdr) 389 u16 misc, struct fchs_s *fchdr)
390 { 390 {
391 struct bfa_plog_rec_s lp; 391 struct bfa_plog_rec_s lp;
392 u32 *tmp_int = (u32 *) fchdr; 392 u32 *tmp_int = (u32 *) fchdr;
393 u32 ints[BFA_PL_INT_LOG_SZ]; 393 u32 ints[BFA_PL_INT_LOG_SZ];
394 394
395 if (plog->plog_enabled) { 395 if (plog->plog_enabled) {
396 memset(&lp, 0, sizeof(struct bfa_plog_rec_s)); 396 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
397 397
398 ints[0] = tmp_int[0]; 398 ints[0] = tmp_int[0];
399 ints[1] = tmp_int[1]; 399 ints[1] = tmp_int[1];
400 ints[2] = tmp_int[4]; 400 ints[2] = tmp_int[4];
401 401
402 bfa_plog_intarr(plog, mid, event, misc, ints, 3); 402 bfa_plog_intarr(plog, mid, event, misc, ints, 3);
403 } 403 }
404 } 404 }
405 405
406 void 406 void
407 bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid, 407 bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
408 enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr, 408 enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr,
409 u32 pld_w0) 409 u32 pld_w0)
410 { 410 {
411 struct bfa_plog_rec_s lp; 411 struct bfa_plog_rec_s lp;
412 u32 *tmp_int = (u32 *) fchdr; 412 u32 *tmp_int = (u32 *) fchdr;
413 u32 ints[BFA_PL_INT_LOG_SZ]; 413 u32 ints[BFA_PL_INT_LOG_SZ];
414 414
415 if (plog->plog_enabled) { 415 if (plog->plog_enabled) {
416 memset(&lp, 0, sizeof(struct bfa_plog_rec_s)); 416 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
417 417
418 ints[0] = tmp_int[0]; 418 ints[0] = tmp_int[0];
419 ints[1] = tmp_int[1]; 419 ints[1] = tmp_int[1];
420 ints[2] = tmp_int[4]; 420 ints[2] = tmp_int[4];
421 ints[3] = pld_w0; 421 ints[3] = pld_w0;
422 422
423 bfa_plog_intarr(plog, mid, event, misc, ints, 4); 423 bfa_plog_intarr(plog, mid, event, misc, ints, 4);
424 } 424 }
425 } 425 }
426 426
427 427
428 /* 428 /*
429 * fcxp_pvt BFA FCXP private functions 429 * fcxp_pvt BFA FCXP private functions
430 */ 430 */
431 431
432 static void 432 static void
433 claim_fcxp_req_rsp_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi) 433 claim_fcxp_req_rsp_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
434 { 434 {
435 u8 *dm_kva = NULL; 435 u8 *dm_kva = NULL;
436 u64 dm_pa; 436 u64 dm_pa;
437 u32 buf_pool_sz; 437 u32 buf_pool_sz;
438 438
439 dm_kva = bfa_meminfo_dma_virt(mi); 439 dm_kva = bfa_meminfo_dma_virt(mi);
440 dm_pa = bfa_meminfo_dma_phys(mi); 440 dm_pa = bfa_meminfo_dma_phys(mi);
441 441
442 buf_pool_sz = mod->req_pld_sz * mod->num_fcxps; 442 buf_pool_sz = mod->req_pld_sz * mod->num_fcxps;
443 443
444 /* 444 /*
445 * Initialize the fcxp req payload list 445 * Initialize the fcxp req payload list
446 */ 446 */
447 mod->req_pld_list_kva = dm_kva; 447 mod->req_pld_list_kva = dm_kva;
448 mod->req_pld_list_pa = dm_pa; 448 mod->req_pld_list_pa = dm_pa;
449 dm_kva += buf_pool_sz; 449 dm_kva += buf_pool_sz;
450 dm_pa += buf_pool_sz; 450 dm_pa += buf_pool_sz;
451 memset(mod->req_pld_list_kva, 0, buf_pool_sz); 451 memset(mod->req_pld_list_kva, 0, buf_pool_sz);
452 452
453 /* 453 /*
454 * Initialize the fcxp rsp payload list 454 * Initialize the fcxp rsp payload list
455 */ 455 */
456 buf_pool_sz = mod->rsp_pld_sz * mod->num_fcxps; 456 buf_pool_sz = mod->rsp_pld_sz * mod->num_fcxps;
457 mod->rsp_pld_list_kva = dm_kva; 457 mod->rsp_pld_list_kva = dm_kva;
458 mod->rsp_pld_list_pa = dm_pa; 458 mod->rsp_pld_list_pa = dm_pa;
459 dm_kva += buf_pool_sz; 459 dm_kva += buf_pool_sz;
460 dm_pa += buf_pool_sz; 460 dm_pa += buf_pool_sz;
461 memset(mod->rsp_pld_list_kva, 0, buf_pool_sz); 461 memset(mod->rsp_pld_list_kva, 0, buf_pool_sz);
462 462
463 bfa_meminfo_dma_virt(mi) = dm_kva; 463 bfa_meminfo_dma_virt(mi) = dm_kva;
464 bfa_meminfo_dma_phys(mi) = dm_pa; 464 bfa_meminfo_dma_phys(mi) = dm_pa;
465 } 465 }
466 466
467 static void 467 static void
468 claim_fcxps_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi) 468 claim_fcxps_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
469 { 469 {
470 u16 i; 470 u16 i;
471 struct bfa_fcxp_s *fcxp; 471 struct bfa_fcxp_s *fcxp;
472 472
473 fcxp = (struct bfa_fcxp_s *) bfa_meminfo_kva(mi); 473 fcxp = (struct bfa_fcxp_s *) bfa_meminfo_kva(mi);
474 memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps); 474 memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps);
475 475
476 INIT_LIST_HEAD(&mod->fcxp_free_q); 476 INIT_LIST_HEAD(&mod->fcxp_free_q);
477 INIT_LIST_HEAD(&mod->fcxp_active_q); 477 INIT_LIST_HEAD(&mod->fcxp_active_q);
478 478
479 mod->fcxp_list = fcxp; 479 mod->fcxp_list = fcxp;
480 480
481 for (i = 0; i < mod->num_fcxps; i++) { 481 for (i = 0; i < mod->num_fcxps; i++) {
482 fcxp->fcxp_mod = mod; 482 fcxp->fcxp_mod = mod;
483 fcxp->fcxp_tag = i; 483 fcxp->fcxp_tag = i;
484 484
485 list_add_tail(&fcxp->qe, &mod->fcxp_free_q); 485 list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
486 bfa_reqq_winit(&fcxp->reqq_wqe, bfa_fcxp_qresume, fcxp); 486 bfa_reqq_winit(&fcxp->reqq_wqe, bfa_fcxp_qresume, fcxp);
487 fcxp->reqq_waiting = BFA_FALSE; 487 fcxp->reqq_waiting = BFA_FALSE;
488 488
489 fcxp = fcxp + 1; 489 fcxp = fcxp + 1;
490 } 490 }
491 491
492 bfa_meminfo_kva(mi) = (void *)fcxp; 492 bfa_meminfo_kva(mi) = (void *)fcxp;
493 } 493 }
494 494
495 static void 495 static void
496 bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, 496 bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
497 u32 *dm_len) 497 u32 *dm_len)
498 { 498 {
499 u16 num_fcxp_reqs = cfg->fwcfg.num_fcxp_reqs; 499 u16 num_fcxp_reqs = cfg->fwcfg.num_fcxp_reqs;
500 500
501 if (num_fcxp_reqs == 0) 501 if (num_fcxp_reqs == 0)
502 return; 502 return;
503 503
504 /* 504 /*
505 * Account for req/rsp payload 505 * Account for req/rsp payload
506 */ 506 */
507 *dm_len += BFA_FCXP_MAX_IBUF_SZ * num_fcxp_reqs; 507 *dm_len += BFA_FCXP_MAX_IBUF_SZ * num_fcxp_reqs;
508 if (cfg->drvcfg.min_cfg) 508 if (cfg->drvcfg.min_cfg)
509 *dm_len += BFA_FCXP_MAX_IBUF_SZ * num_fcxp_reqs; 509 *dm_len += BFA_FCXP_MAX_IBUF_SZ * num_fcxp_reqs;
510 else 510 else
511 *dm_len += BFA_FCXP_MAX_LBUF_SZ * num_fcxp_reqs; 511 *dm_len += BFA_FCXP_MAX_LBUF_SZ * num_fcxp_reqs;
512 512
513 /* 513 /*
514 * Account for fcxp structs 514 * Account for fcxp structs
515 */ 515 */
516 *ndm_len += sizeof(struct bfa_fcxp_s) * num_fcxp_reqs; 516 *ndm_len += sizeof(struct bfa_fcxp_s) * num_fcxp_reqs;
517 } 517 }
518 518
519 static void 519 static void
520 bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, 520 bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
521 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) 521 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
522 { 522 {
523 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa); 523 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
524 524
525 memset(mod, 0, sizeof(struct bfa_fcxp_mod_s)); 525 memset(mod, 0, sizeof(struct bfa_fcxp_mod_s));
526 mod->bfa = bfa; 526 mod->bfa = bfa;
527 mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs; 527 mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs;
528 528
529 /* 529 /*
530 * Initialize FCXP request and response payload sizes. 530 * Initialize FCXP request and response payload sizes.
531 */ 531 */
532 mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ; 532 mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ;
533 if (!cfg->drvcfg.min_cfg) 533 if (!cfg->drvcfg.min_cfg)
534 mod->rsp_pld_sz = BFA_FCXP_MAX_LBUF_SZ; 534 mod->rsp_pld_sz = BFA_FCXP_MAX_LBUF_SZ;
535 535
536 INIT_LIST_HEAD(&mod->wait_q); 536 INIT_LIST_HEAD(&mod->wait_q);
537 537
538 claim_fcxp_req_rsp_mem(mod, meminfo); 538 claim_fcxp_req_rsp_mem(mod, meminfo);
539 claim_fcxps_mem(mod, meminfo); 539 claim_fcxps_mem(mod, meminfo);
540 } 540 }
541 541
542 static void 542 static void
543 bfa_fcxp_detach(struct bfa_s *bfa) 543 bfa_fcxp_detach(struct bfa_s *bfa)
544 { 544 {
545 } 545 }
546 546
547 static void 547 static void
548 bfa_fcxp_start(struct bfa_s *bfa) 548 bfa_fcxp_start(struct bfa_s *bfa)
549 { 549 {
550 } 550 }
551 551
552 static void 552 static void
553 bfa_fcxp_stop(struct bfa_s *bfa) 553 bfa_fcxp_stop(struct bfa_s *bfa)
554 { 554 {
555 } 555 }
556 556
557 static void 557 static void
558 bfa_fcxp_iocdisable(struct bfa_s *bfa) 558 bfa_fcxp_iocdisable(struct bfa_s *bfa)
559 { 559 {
560 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa); 560 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
561 struct bfa_fcxp_s *fcxp; 561 struct bfa_fcxp_s *fcxp;
562 struct list_head *qe, *qen; 562 struct list_head *qe, *qen;
563 563
564 list_for_each_safe(qe, qen, &mod->fcxp_active_q) { 564 list_for_each_safe(qe, qen, &mod->fcxp_active_q) {
565 fcxp = (struct bfa_fcxp_s *) qe; 565 fcxp = (struct bfa_fcxp_s *) qe;
566 if (fcxp->caller == NULL) { 566 if (fcxp->caller == NULL) {
567 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg, 567 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
568 BFA_STATUS_IOC_FAILURE, 0, 0, NULL); 568 BFA_STATUS_IOC_FAILURE, 0, 0, NULL);
569 bfa_fcxp_free(fcxp); 569 bfa_fcxp_free(fcxp);
570 } else { 570 } else {
571 fcxp->rsp_status = BFA_STATUS_IOC_FAILURE; 571 fcxp->rsp_status = BFA_STATUS_IOC_FAILURE;
572 bfa_cb_queue(bfa, &fcxp->hcb_qe, 572 bfa_cb_queue(bfa, &fcxp->hcb_qe,
573 __bfa_fcxp_send_cbfn, fcxp); 573 __bfa_fcxp_send_cbfn, fcxp);
574 } 574 }
575 } 575 }
576 } 576 }
577 577
578 static struct bfa_fcxp_s * 578 static struct bfa_fcxp_s *
579 bfa_fcxp_get(struct bfa_fcxp_mod_s *fm) 579 bfa_fcxp_get(struct bfa_fcxp_mod_s *fm)
580 { 580 {
581 struct bfa_fcxp_s *fcxp; 581 struct bfa_fcxp_s *fcxp;
582 582
583 bfa_q_deq(&fm->fcxp_free_q, &fcxp); 583 bfa_q_deq(&fm->fcxp_free_q, &fcxp);
584 584
585 if (fcxp) 585 if (fcxp)
586 list_add_tail(&fcxp->qe, &fm->fcxp_active_q); 586 list_add_tail(&fcxp->qe, &fm->fcxp_active_q);
587 587
588 return fcxp; 588 return fcxp;
589 } 589 }
590 590
591 static void 591 static void
592 bfa_fcxp_init_reqrsp(struct bfa_fcxp_s *fcxp, 592 bfa_fcxp_init_reqrsp(struct bfa_fcxp_s *fcxp,
593 struct bfa_s *bfa, 593 struct bfa_s *bfa,
594 u8 *use_ibuf, 594 u8 *use_ibuf,
595 u32 *nr_sgles, 595 u32 *nr_sgles,
596 bfa_fcxp_get_sgaddr_t *r_sga_cbfn, 596 bfa_fcxp_get_sgaddr_t *r_sga_cbfn,
597 bfa_fcxp_get_sglen_t *r_sglen_cbfn, 597 bfa_fcxp_get_sglen_t *r_sglen_cbfn,
598 struct list_head *r_sgpg_q, 598 struct list_head *r_sgpg_q,
599 int n_sgles, 599 int n_sgles,
600 bfa_fcxp_get_sgaddr_t sga_cbfn, 600 bfa_fcxp_get_sgaddr_t sga_cbfn,
601 bfa_fcxp_get_sglen_t sglen_cbfn) 601 bfa_fcxp_get_sglen_t sglen_cbfn)
602 { 602 {
603 603
604 WARN_ON(bfa == NULL); 604 WARN_ON(bfa == NULL);
605 605
606 bfa_trc(bfa, fcxp->fcxp_tag); 606 bfa_trc(bfa, fcxp->fcxp_tag);
607 607
608 if (n_sgles == 0) { 608 if (n_sgles == 0) {
609 *use_ibuf = 1; 609 *use_ibuf = 1;
610 } else { 610 } else {
611 WARN_ON(*sga_cbfn == NULL); 611 WARN_ON(*sga_cbfn == NULL);
612 WARN_ON(*sglen_cbfn == NULL); 612 WARN_ON(*sglen_cbfn == NULL);
613 613
614 *use_ibuf = 0; 614 *use_ibuf = 0;
615 *r_sga_cbfn = sga_cbfn; 615 *r_sga_cbfn = sga_cbfn;
616 *r_sglen_cbfn = sglen_cbfn; 616 *r_sglen_cbfn = sglen_cbfn;
617 617
618 *nr_sgles = n_sgles; 618 *nr_sgles = n_sgles;
619 619
620 /* 620 /*
621 * alloc required sgpgs 621 * alloc required sgpgs
622 */ 622 */
623 if (n_sgles > BFI_SGE_INLINE) 623 if (n_sgles > BFI_SGE_INLINE)
624 WARN_ON(1); 624 WARN_ON(1);
625 } 625 }
626 626
627 } 627 }
628 628
629 static void 629 static void
630 bfa_fcxp_init(struct bfa_fcxp_s *fcxp, 630 bfa_fcxp_init(struct bfa_fcxp_s *fcxp,
631 void *caller, struct bfa_s *bfa, int nreq_sgles, 631 void *caller, struct bfa_s *bfa, int nreq_sgles,
632 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn, 632 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
633 bfa_fcxp_get_sglen_t req_sglen_cbfn, 633 bfa_fcxp_get_sglen_t req_sglen_cbfn,
634 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn, 634 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
635 bfa_fcxp_get_sglen_t rsp_sglen_cbfn) 635 bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
636 { 636 {
637 637
638 WARN_ON(bfa == NULL); 638 WARN_ON(bfa == NULL);
639 639
640 bfa_trc(bfa, fcxp->fcxp_tag); 640 bfa_trc(bfa, fcxp->fcxp_tag);
641 641
642 fcxp->caller = caller; 642 fcxp->caller = caller;
643 643
644 bfa_fcxp_init_reqrsp(fcxp, bfa, 644 bfa_fcxp_init_reqrsp(fcxp, bfa,
645 &fcxp->use_ireqbuf, &fcxp->nreq_sgles, &fcxp->req_sga_cbfn, 645 &fcxp->use_ireqbuf, &fcxp->nreq_sgles, &fcxp->req_sga_cbfn,
646 &fcxp->req_sglen_cbfn, &fcxp->req_sgpg_q, 646 &fcxp->req_sglen_cbfn, &fcxp->req_sgpg_q,
647 nreq_sgles, req_sga_cbfn, req_sglen_cbfn); 647 nreq_sgles, req_sga_cbfn, req_sglen_cbfn);
648 648
649 bfa_fcxp_init_reqrsp(fcxp, bfa, 649 bfa_fcxp_init_reqrsp(fcxp, bfa,
650 &fcxp->use_irspbuf, &fcxp->nrsp_sgles, &fcxp->rsp_sga_cbfn, 650 &fcxp->use_irspbuf, &fcxp->nrsp_sgles, &fcxp->rsp_sga_cbfn,
651 &fcxp->rsp_sglen_cbfn, &fcxp->rsp_sgpg_q, 651 &fcxp->rsp_sglen_cbfn, &fcxp->rsp_sgpg_q,
652 nrsp_sgles, rsp_sga_cbfn, rsp_sglen_cbfn); 652 nrsp_sgles, rsp_sga_cbfn, rsp_sglen_cbfn);
653 653
654 } 654 }
655 655
656 static void 656 static void
657 bfa_fcxp_put(struct bfa_fcxp_s *fcxp) 657 bfa_fcxp_put(struct bfa_fcxp_s *fcxp)
658 { 658 {
659 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod; 659 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
660 struct bfa_fcxp_wqe_s *wqe; 660 struct bfa_fcxp_wqe_s *wqe;
661 661
662 bfa_q_deq(&mod->wait_q, &wqe); 662 bfa_q_deq(&mod->wait_q, &wqe);
663 if (wqe) { 663 if (wqe) {
664 bfa_trc(mod->bfa, fcxp->fcxp_tag); 664 bfa_trc(mod->bfa, fcxp->fcxp_tag);
665 665
666 bfa_fcxp_init(fcxp, wqe->caller, wqe->bfa, wqe->nreq_sgles, 666 bfa_fcxp_init(fcxp, wqe->caller, wqe->bfa, wqe->nreq_sgles,
667 wqe->nrsp_sgles, wqe->req_sga_cbfn, 667 wqe->nrsp_sgles, wqe->req_sga_cbfn,
668 wqe->req_sglen_cbfn, wqe->rsp_sga_cbfn, 668 wqe->req_sglen_cbfn, wqe->rsp_sga_cbfn,
669 wqe->rsp_sglen_cbfn); 669 wqe->rsp_sglen_cbfn);
670 670
671 wqe->alloc_cbfn(wqe->alloc_cbarg, fcxp); 671 wqe->alloc_cbfn(wqe->alloc_cbarg, fcxp);
672 return; 672 return;
673 } 673 }
674 674
675 WARN_ON(!bfa_q_is_on_q(&mod->fcxp_active_q, fcxp)); 675 WARN_ON(!bfa_q_is_on_q(&mod->fcxp_active_q, fcxp));
676 list_del(&fcxp->qe); 676 list_del(&fcxp->qe);
677 list_add_tail(&fcxp->qe, &mod->fcxp_free_q); 677 list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
678 } 678 }
679 679
680 static void 680 static void
681 bfa_fcxp_null_comp(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg, 681 bfa_fcxp_null_comp(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
682 bfa_status_t req_status, u32 rsp_len, 682 bfa_status_t req_status, u32 rsp_len,
683 u32 resid_len, struct fchs_s *rsp_fchs) 683 u32 resid_len, struct fchs_s *rsp_fchs)
684 { 684 {
685 /* discarded fcxp completion */ 685 /* discarded fcxp completion */
686 } 686 }
687 687
688 static void 688 static void
689 __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete) 689 __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete)
690 { 690 {
691 struct bfa_fcxp_s *fcxp = cbarg; 691 struct bfa_fcxp_s *fcxp = cbarg;
692 692
693 if (complete) { 693 if (complete) {
694 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg, 694 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
695 fcxp->rsp_status, fcxp->rsp_len, 695 fcxp->rsp_status, fcxp->rsp_len,
696 fcxp->residue_len, &fcxp->rsp_fchs); 696 fcxp->residue_len, &fcxp->rsp_fchs);
697 } else { 697 } else {
698 bfa_fcxp_free(fcxp); 698 bfa_fcxp_free(fcxp);
699 } 699 }
700 } 700 }
701 701
702 static void 702 static void
703 hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp) 703 hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp)
704 { 704 {
705 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa); 705 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
706 struct bfa_fcxp_s *fcxp; 706 struct bfa_fcxp_s *fcxp;
707 u16 fcxp_tag = be16_to_cpu(fcxp_rsp->fcxp_tag); 707 u16 fcxp_tag = be16_to_cpu(fcxp_rsp->fcxp_tag);
708 708
709 bfa_trc(bfa, fcxp_tag); 709 bfa_trc(bfa, fcxp_tag);
710 710
711 fcxp_rsp->rsp_len = be32_to_cpu(fcxp_rsp->rsp_len); 711 fcxp_rsp->rsp_len = be32_to_cpu(fcxp_rsp->rsp_len);
712 712
713 /* 713 /*
714 * @todo f/w should not set residue to non-0 when everything 714 * @todo f/w should not set residue to non-0 when everything
715 * is received. 715 * is received.
716 */ 716 */
717 if (fcxp_rsp->req_status == BFA_STATUS_OK) 717 if (fcxp_rsp->req_status == BFA_STATUS_OK)
718 fcxp_rsp->residue_len = 0; 718 fcxp_rsp->residue_len = 0;
719 else 719 else
720 fcxp_rsp->residue_len = be32_to_cpu(fcxp_rsp->residue_len); 720 fcxp_rsp->residue_len = be32_to_cpu(fcxp_rsp->residue_len);
721 721
722 fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag); 722 fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag);
723 723
724 WARN_ON(fcxp->send_cbfn == NULL); 724 WARN_ON(fcxp->send_cbfn == NULL);
725 725
726 hal_fcxp_rx_plog(mod->bfa, fcxp, fcxp_rsp); 726 hal_fcxp_rx_plog(mod->bfa, fcxp, fcxp_rsp);
727 727
728 if (fcxp->send_cbfn != NULL) { 728 if (fcxp->send_cbfn != NULL) {
729 bfa_trc(mod->bfa, (NULL == fcxp->caller)); 729 bfa_trc(mod->bfa, (NULL == fcxp->caller));
730 if (fcxp->caller == NULL) { 730 if (fcxp->caller == NULL) {
731 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg, 731 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
732 fcxp_rsp->req_status, fcxp_rsp->rsp_len, 732 fcxp_rsp->req_status, fcxp_rsp->rsp_len,
733 fcxp_rsp->residue_len, &fcxp_rsp->fchs); 733 fcxp_rsp->residue_len, &fcxp_rsp->fchs);
734 /* 734 /*
735 * fcxp automatically freed on return from the callback 735 * fcxp automatically freed on return from the callback
736 */ 736 */
737 bfa_fcxp_free(fcxp); 737 bfa_fcxp_free(fcxp);
738 } else { 738 } else {
739 fcxp->rsp_status = fcxp_rsp->req_status; 739 fcxp->rsp_status = fcxp_rsp->req_status;
740 fcxp->rsp_len = fcxp_rsp->rsp_len; 740 fcxp->rsp_len = fcxp_rsp->rsp_len;
741 fcxp->residue_len = fcxp_rsp->residue_len; 741 fcxp->residue_len = fcxp_rsp->residue_len;
742 fcxp->rsp_fchs = fcxp_rsp->fchs; 742 fcxp->rsp_fchs = fcxp_rsp->fchs;
743 743
744 bfa_cb_queue(bfa, &fcxp->hcb_qe, 744 bfa_cb_queue(bfa, &fcxp->hcb_qe,
745 __bfa_fcxp_send_cbfn, fcxp); 745 __bfa_fcxp_send_cbfn, fcxp);
746 } 746 }
747 } else { 747 } else {
748 bfa_trc(bfa, (NULL == fcxp->send_cbfn)); 748 bfa_trc(bfa, (NULL == fcxp->send_cbfn));
749 } 749 }
750 } 750 }
751 751
752 static void 752 static void
753 hal_fcxp_set_local_sges(struct bfi_sge_s *sge, u32 reqlen, u64 req_pa) 753 hal_fcxp_set_local_sges(struct bfi_sge_s *sge, u32 reqlen, u64 req_pa)
754 { 754 {
755 union bfi_addr_u sga_zero = { {0} }; 755 union bfi_addr_u sga_zero = { {0} };
756 756
757 sge->sg_len = reqlen; 757 sge->sg_len = reqlen;
758 sge->flags = BFI_SGE_DATA_LAST; 758 sge->flags = BFI_SGE_DATA_LAST;
759 bfa_dma_addr_set(sge[0].sga, req_pa); 759 bfa_dma_addr_set(sge[0].sga, req_pa);
760 bfa_sge_to_be(sge); 760 bfa_sge_to_be(sge);
761 sge++; 761 sge++;
762 762
763 sge->sga = sga_zero; 763 sge->sga = sga_zero;
764 sge->sg_len = reqlen; 764 sge->sg_len = reqlen;
765 sge->flags = BFI_SGE_PGDLEN; 765 sge->flags = BFI_SGE_PGDLEN;
766 bfa_sge_to_be(sge); 766 bfa_sge_to_be(sge);
767 } 767 }
768 768
769 static void 769 static void
770 hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, struct bfa_fcxp_s *fcxp, 770 hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, struct bfa_fcxp_s *fcxp,
771 struct fchs_s *fchs) 771 struct fchs_s *fchs)
772 { 772 {
773 /* 773 /*
774 * TODO: TX ox_id 774 * TODO: TX ox_id
775 */ 775 */
776 if (reqlen > 0) { 776 if (reqlen > 0) {
777 if (fcxp->use_ireqbuf) { 777 if (fcxp->use_ireqbuf) {
778 u32 pld_w0 = 778 u32 pld_w0 =
779 *((u32 *) BFA_FCXP_REQ_PLD(fcxp)); 779 *((u32 *) BFA_FCXP_REQ_PLD(fcxp));
780 780
781 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP, 781 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
782 BFA_PL_EID_TX, 782 BFA_PL_EID_TX,
783 reqlen + sizeof(struct fchs_s), fchs, 783 reqlen + sizeof(struct fchs_s), fchs,
784 pld_w0); 784 pld_w0);
785 } else { 785 } else {
786 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, 786 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
787 BFA_PL_EID_TX, 787 BFA_PL_EID_TX,
788 reqlen + sizeof(struct fchs_s), 788 reqlen + sizeof(struct fchs_s),
789 fchs); 789 fchs);
790 } 790 }
791 } else { 791 } else {
792 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_TX, 792 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_TX,
793 reqlen + sizeof(struct fchs_s), fchs); 793 reqlen + sizeof(struct fchs_s), fchs);
794 } 794 }
795 } 795 }
796 796
797 static void 797 static void
798 hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp, 798 hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
799 struct bfi_fcxp_send_rsp_s *fcxp_rsp) 799 struct bfi_fcxp_send_rsp_s *fcxp_rsp)
800 { 800 {
801 if (fcxp_rsp->rsp_len > 0) { 801 if (fcxp_rsp->rsp_len > 0) {
802 if (fcxp->use_irspbuf) { 802 if (fcxp->use_irspbuf) {
803 u32 pld_w0 = 803 u32 pld_w0 =
804 *((u32 *) BFA_FCXP_RSP_PLD(fcxp)); 804 *((u32 *) BFA_FCXP_RSP_PLD(fcxp));
805 805
806 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP, 806 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
807 BFA_PL_EID_RX, 807 BFA_PL_EID_RX,
808 (u16) fcxp_rsp->rsp_len, 808 (u16) fcxp_rsp->rsp_len,
809 &fcxp_rsp->fchs, pld_w0); 809 &fcxp_rsp->fchs, pld_w0);
810 } else { 810 } else {
811 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, 811 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
812 BFA_PL_EID_RX, 812 BFA_PL_EID_RX,
813 (u16) fcxp_rsp->rsp_len, 813 (u16) fcxp_rsp->rsp_len,
814 &fcxp_rsp->fchs); 814 &fcxp_rsp->fchs);
815 } 815 }
816 } else { 816 } else {
817 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_RX, 817 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_RX,
818 (u16) fcxp_rsp->rsp_len, &fcxp_rsp->fchs); 818 (u16) fcxp_rsp->rsp_len, &fcxp_rsp->fchs);
819 } 819 }
820 } 820 }
821 821
822 /* 822 /*
823 * Handler to resume sending fcxp when space in available in cpe queue. 823 * Handler to resume sending fcxp when space in available in cpe queue.
824 */ 824 */
825 static void 825 static void
826 bfa_fcxp_qresume(void *cbarg) 826 bfa_fcxp_qresume(void *cbarg)
827 { 827 {
828 struct bfa_fcxp_s *fcxp = cbarg; 828 struct bfa_fcxp_s *fcxp = cbarg;
829 struct bfa_s *bfa = fcxp->fcxp_mod->bfa; 829 struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
830 struct bfi_fcxp_send_req_s *send_req; 830 struct bfi_fcxp_send_req_s *send_req;
831 831
832 fcxp->reqq_waiting = BFA_FALSE; 832 fcxp->reqq_waiting = BFA_FALSE;
833 send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP); 833 send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
834 bfa_fcxp_queue(fcxp, send_req); 834 bfa_fcxp_queue(fcxp, send_req);
835 } 835 }
836 836
837 /* 837 /*
838 * Queue fcxp send request to foimrware. 838 * Queue fcxp send request to foimrware.
839 */ 839 */
840 static void 840 static void
841 bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req) 841 bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
842 { 842 {
843 struct bfa_s *bfa = fcxp->fcxp_mod->bfa; 843 struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
844 struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info; 844 struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info;
845 struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info; 845 struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info;
846 struct bfa_rport_s *rport = reqi->bfa_rport; 846 struct bfa_rport_s *rport = reqi->bfa_rport;
847 847
848 bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ, 848 bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ,
849 bfa_lpuid(bfa)); 849 bfa_lpuid(bfa));
850 850
851 send_req->fcxp_tag = cpu_to_be16(fcxp->fcxp_tag); 851 send_req->fcxp_tag = cpu_to_be16(fcxp->fcxp_tag);
852 if (rport) { 852 if (rport) {
853 send_req->rport_fw_hndl = rport->fw_handle; 853 send_req->rport_fw_hndl = rport->fw_handle;
854 send_req->max_frmsz = cpu_to_be16(rport->rport_info.max_frmsz); 854 send_req->max_frmsz = cpu_to_be16(rport->rport_info.max_frmsz);
855 if (send_req->max_frmsz == 0) 855 if (send_req->max_frmsz == 0)
856 send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ); 856 send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
857 } else { 857 } else {
858 send_req->rport_fw_hndl = 0; 858 send_req->rport_fw_hndl = 0;
859 send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ); 859 send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
860 } 860 }
861 861
862 send_req->vf_id = cpu_to_be16(reqi->vf_id); 862 send_req->vf_id = cpu_to_be16(reqi->vf_id);
863 send_req->lp_tag = reqi->lp_tag; 863 send_req->lp_tag = reqi->lp_tag;
864 send_req->class = reqi->class; 864 send_req->class = reqi->class;
865 send_req->rsp_timeout = rspi->rsp_timeout; 865 send_req->rsp_timeout = rspi->rsp_timeout;
866 send_req->cts = reqi->cts; 866 send_req->cts = reqi->cts;
867 send_req->fchs = reqi->fchs; 867 send_req->fchs = reqi->fchs;
868 868
869 send_req->req_len = cpu_to_be32(reqi->req_tot_len); 869 send_req->req_len = cpu_to_be32(reqi->req_tot_len);
870 send_req->rsp_maxlen = cpu_to_be32(rspi->rsp_maxlen); 870 send_req->rsp_maxlen = cpu_to_be32(rspi->rsp_maxlen);
871 871
872 /* 872 /*
873 * setup req sgles 873 * setup req sgles
874 */ 874 */
875 if (fcxp->use_ireqbuf == 1) { 875 if (fcxp->use_ireqbuf == 1) {
876 hal_fcxp_set_local_sges(send_req->req_sge, reqi->req_tot_len, 876 hal_fcxp_set_local_sges(send_req->req_sge, reqi->req_tot_len,
877 BFA_FCXP_REQ_PLD_PA(fcxp)); 877 BFA_FCXP_REQ_PLD_PA(fcxp));
878 } else { 878 } else {
879 if (fcxp->nreq_sgles > 0) { 879 if (fcxp->nreq_sgles > 0) {
880 WARN_ON(fcxp->nreq_sgles != 1); 880 WARN_ON(fcxp->nreq_sgles != 1);
881 hal_fcxp_set_local_sges(send_req->req_sge, 881 hal_fcxp_set_local_sges(send_req->req_sge,
882 reqi->req_tot_len, 882 reqi->req_tot_len,
883 fcxp->req_sga_cbfn(fcxp->caller, 883 fcxp->req_sga_cbfn(fcxp->caller,
884 0)); 884 0));
885 } else { 885 } else {
886 WARN_ON(reqi->req_tot_len != 0); 886 WARN_ON(reqi->req_tot_len != 0);
887 hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0); 887 hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0);
888 } 888 }
889 } 889 }
890 890
891 /* 891 /*
892 * setup rsp sgles 892 * setup rsp sgles
893 */ 893 */
894 if (fcxp->use_irspbuf == 1) { 894 if (fcxp->use_irspbuf == 1) {
895 WARN_ON(rspi->rsp_maxlen > BFA_FCXP_MAX_LBUF_SZ); 895 WARN_ON(rspi->rsp_maxlen > BFA_FCXP_MAX_LBUF_SZ);
896 896
897 hal_fcxp_set_local_sges(send_req->rsp_sge, rspi->rsp_maxlen, 897 hal_fcxp_set_local_sges(send_req->rsp_sge, rspi->rsp_maxlen,
898 BFA_FCXP_RSP_PLD_PA(fcxp)); 898 BFA_FCXP_RSP_PLD_PA(fcxp));
899 899
900 } else { 900 } else {
901 if (fcxp->nrsp_sgles > 0) { 901 if (fcxp->nrsp_sgles > 0) {
902 WARN_ON(fcxp->nrsp_sgles != 1); 902 WARN_ON(fcxp->nrsp_sgles != 1);
903 hal_fcxp_set_local_sges(send_req->rsp_sge, 903 hal_fcxp_set_local_sges(send_req->rsp_sge,
904 rspi->rsp_maxlen, 904 rspi->rsp_maxlen,
905 fcxp->rsp_sga_cbfn(fcxp->caller, 905 fcxp->rsp_sga_cbfn(fcxp->caller,
906 0)); 906 0));
907 } else { 907 } else {
908 WARN_ON(rspi->rsp_maxlen != 0); 908 WARN_ON(rspi->rsp_maxlen != 0);
909 hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0); 909 hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0);
910 } 910 }
911 } 911 }
912 912
913 hal_fcxp_tx_plog(bfa, reqi->req_tot_len, fcxp, &reqi->fchs); 913 hal_fcxp_tx_plog(bfa, reqi->req_tot_len, fcxp, &reqi->fchs);
914 914
915 bfa_reqq_produce(bfa, BFA_REQQ_FCXP); 915 bfa_reqq_produce(bfa, BFA_REQQ_FCXP);
916 916
917 bfa_trc(bfa, bfa_reqq_pi(bfa, BFA_REQQ_FCXP)); 917 bfa_trc(bfa, bfa_reqq_pi(bfa, BFA_REQQ_FCXP));
918 bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP)); 918 bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP));
919 } 919 }
920 920
921 /* 921 /*
922 * Allocate an FCXP instance to send a response or to send a request 922 * Allocate an FCXP instance to send a response or to send a request
923 * that has a response. Request/response buffers are allocated by caller. 923 * that has a response. Request/response buffers are allocated by caller.
924 * 924 *
925 * @param[in] bfa BFA bfa instance 925 * @param[in] bfa BFA bfa instance
926 * @param[in] nreq_sgles Number of SG elements required for request 926 * @param[in] nreq_sgles Number of SG elements required for request
927 * buffer. 0, if fcxp internal buffers are used. 927 * buffer. 0, if fcxp internal buffers are used.
928 * Use bfa_fcxp_get_reqbuf() to get the 928 * Use bfa_fcxp_get_reqbuf() to get the
929 * internal req buffer. 929 * internal req buffer.
930 * @param[in] req_sgles SG elements describing request buffer. Will be 930 * @param[in] req_sgles SG elements describing request buffer. Will be
931 * copied in by BFA and hence can be freed on 931 * copied in by BFA and hence can be freed on
932 * return from this function. 932 * return from this function.
933 * @param[in] get_req_sga function ptr to be called to get a request SG 933 * @param[in] get_req_sga function ptr to be called to get a request SG
934 * Address (given the sge index). 934 * Address (given the sge index).
935 * @param[in] get_req_sglen function ptr to be called to get a request SG 935 * @param[in] get_req_sglen function ptr to be called to get a request SG
936 * len (given the sge index). 936 * len (given the sge index).
937 * @param[in] get_rsp_sga function ptr to be called to get a response SG 937 * @param[in] get_rsp_sga function ptr to be called to get a response SG
938 * Address (given the sge index). 938 * Address (given the sge index).
939 * @param[in] get_rsp_sglen function ptr to be called to get a response SG 939 * @param[in] get_rsp_sglen function ptr to be called to get a response SG
940 * len (given the sge index). 940 * len (given the sge index).
941 * 941 *
942 * @return FCXP instance. NULL on failure. 942 * @return FCXP instance. NULL on failure.
943 */ 943 */
944 struct bfa_fcxp_s * 944 struct bfa_fcxp_s *
945 bfa_fcxp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles, 945 bfa_fcxp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
946 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn, 946 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
947 bfa_fcxp_get_sglen_t req_sglen_cbfn, 947 bfa_fcxp_get_sglen_t req_sglen_cbfn,
948 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn, 948 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
949 bfa_fcxp_get_sglen_t rsp_sglen_cbfn) 949 bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
950 { 950 {
951 struct bfa_fcxp_s *fcxp = NULL; 951 struct bfa_fcxp_s *fcxp = NULL;
952 952
953 WARN_ON(bfa == NULL); 953 WARN_ON(bfa == NULL);
954 954
955 fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa)); 955 fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa));
956 if (fcxp == NULL) 956 if (fcxp == NULL)
957 return NULL; 957 return NULL;
958 958
959 bfa_trc(bfa, fcxp->fcxp_tag); 959 bfa_trc(bfa, fcxp->fcxp_tag);
960 960
961 bfa_fcxp_init(fcxp, caller, bfa, nreq_sgles, nrsp_sgles, req_sga_cbfn, 961 bfa_fcxp_init(fcxp, caller, bfa, nreq_sgles, nrsp_sgles, req_sga_cbfn,
962 req_sglen_cbfn, rsp_sga_cbfn, rsp_sglen_cbfn); 962 req_sglen_cbfn, rsp_sga_cbfn, rsp_sglen_cbfn);
963 963
964 return fcxp; 964 return fcxp;
965 } 965 }
966 966
967 /* 967 /*
968 * Get the internal request buffer pointer 968 * Get the internal request buffer pointer
969 * 969 *
970 * @param[in] fcxp BFA fcxp pointer 970 * @param[in] fcxp BFA fcxp pointer
971 * 971 *
972 * @return pointer to the internal request buffer 972 * @return pointer to the internal request buffer
973 */ 973 */
974 void * 974 void *
975 bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp) 975 bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp)
976 { 976 {
977 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod; 977 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
978 void *reqbuf; 978 void *reqbuf;
979 979
980 WARN_ON(fcxp->use_ireqbuf != 1); 980 WARN_ON(fcxp->use_ireqbuf != 1);
981 reqbuf = ((u8 *)mod->req_pld_list_kva) + 981 reqbuf = ((u8 *)mod->req_pld_list_kva) +
982 fcxp->fcxp_tag * mod->req_pld_sz; 982 fcxp->fcxp_tag * mod->req_pld_sz;
983 return reqbuf; 983 return reqbuf;
984 } 984 }
985 985
986 u32 986 u32
987 bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp) 987 bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp)
988 { 988 {
989 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod; 989 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
990 990
991 return mod->req_pld_sz; 991 return mod->req_pld_sz;
992 } 992 }
993 993
994 /* 994 /*
995 * Get the internal response buffer pointer 995 * Get the internal response buffer pointer
996 * 996 *
997 * @param[in] fcxp BFA fcxp pointer 997 * @param[in] fcxp BFA fcxp pointer
998 * 998 *
999 * @return pointer to the internal request buffer 999 * @return pointer to the internal request buffer
1000 */ 1000 */
1001 void * 1001 void *
1002 bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp) 1002 bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp)
1003 { 1003 {
1004 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod; 1004 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
1005 void *rspbuf; 1005 void *rspbuf;
1006 1006
1007 WARN_ON(fcxp->use_irspbuf != 1); 1007 WARN_ON(fcxp->use_irspbuf != 1);
1008 1008
1009 rspbuf = ((u8 *)mod->rsp_pld_list_kva) + 1009 rspbuf = ((u8 *)mod->rsp_pld_list_kva) +
1010 fcxp->fcxp_tag * mod->rsp_pld_sz; 1010 fcxp->fcxp_tag * mod->rsp_pld_sz;
1011 return rspbuf; 1011 return rspbuf;
1012 } 1012 }
1013 1013
1014 /* 1014 /*
1015 * Free the BFA FCXP 1015 * Free the BFA FCXP
1016 * 1016 *
1017 * @param[in] fcxp BFA fcxp pointer 1017 * @param[in] fcxp BFA fcxp pointer
1018 * 1018 *
1019 * @return void 1019 * @return void
1020 */ 1020 */
1021 void 1021 void
1022 bfa_fcxp_free(struct bfa_fcxp_s *fcxp) 1022 bfa_fcxp_free(struct bfa_fcxp_s *fcxp)
1023 { 1023 {
1024 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod; 1024 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
1025 1025
1026 WARN_ON(fcxp == NULL); 1026 WARN_ON(fcxp == NULL);
1027 bfa_trc(mod->bfa, fcxp->fcxp_tag); 1027 bfa_trc(mod->bfa, fcxp->fcxp_tag);
1028 bfa_fcxp_put(fcxp); 1028 bfa_fcxp_put(fcxp);
1029 } 1029 }
1030 1030
1031 /* 1031 /*
1032 * Send a FCXP request 1032 * Send a FCXP request
1033 * 1033 *
1034 * @param[in] fcxp BFA fcxp pointer 1034 * @param[in] fcxp BFA fcxp pointer
1035 * @param[in] rport BFA rport pointer. Could be left NULL for WKA rports 1035 * @param[in] rport BFA rport pointer. Could be left NULL for WKA rports
1036 * @param[in] vf_id virtual Fabric ID 1036 * @param[in] vf_id virtual Fabric ID
1037 * @param[in] lp_tag lport tag 1037 * @param[in] lp_tag lport tag
1038 * @param[in] cts use Continous sequence 1038 * @param[in] cts use Continous sequence
1039 * @param[in] cos fc Class of Service 1039 * @param[in] cos fc Class of Service
1040 * @param[in] reqlen request length, does not include FCHS length 1040 * @param[in] reqlen request length, does not include FCHS length
1041 * @param[in] fchs fc Header Pointer. The header content will be copied 1041 * @param[in] fchs fc Header Pointer. The header content will be copied
1042 * in by BFA. 1042 * in by BFA.
1043 * 1043 *
1044 * @param[in] cbfn call back function to be called on receiving 1044 * @param[in] cbfn call back function to be called on receiving
1045 * the response 1045 * the response
1046 * @param[in] cbarg arg for cbfn 1046 * @param[in] cbarg arg for cbfn
1047 * @param[in] rsp_timeout 1047 * @param[in] rsp_timeout
1048 * response timeout 1048 * response timeout
1049 * 1049 *
1050 * @return bfa_status_t 1050 * @return bfa_status_t
1051 */ 1051 */
1052 void 1052 void
1053 bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport, 1053 bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
1054 u16 vf_id, u8 lp_tag, bfa_boolean_t cts, enum fc_cos cos, 1054 u16 vf_id, u8 lp_tag, bfa_boolean_t cts, enum fc_cos cos,
1055 u32 reqlen, struct fchs_s *fchs, bfa_cb_fcxp_send_t cbfn, 1055 u32 reqlen, struct fchs_s *fchs, bfa_cb_fcxp_send_t cbfn,
1056 void *cbarg, u32 rsp_maxlen, u8 rsp_timeout) 1056 void *cbarg, u32 rsp_maxlen, u8 rsp_timeout)
1057 { 1057 {
1058 struct bfa_s *bfa = fcxp->fcxp_mod->bfa; 1058 struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
1059 struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info; 1059 struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info;
1060 struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info; 1060 struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info;
1061 struct bfi_fcxp_send_req_s *send_req; 1061 struct bfi_fcxp_send_req_s *send_req;
1062 1062
1063 bfa_trc(bfa, fcxp->fcxp_tag); 1063 bfa_trc(bfa, fcxp->fcxp_tag);
1064 1064
1065 /* 1065 /*
1066 * setup request/response info 1066 * setup request/response info
1067 */ 1067 */
1068 reqi->bfa_rport = rport; 1068 reqi->bfa_rport = rport;
1069 reqi->vf_id = vf_id; 1069 reqi->vf_id = vf_id;
1070 reqi->lp_tag = lp_tag; 1070 reqi->lp_tag = lp_tag;
1071 reqi->class = cos; 1071 reqi->class = cos;
1072 rspi->rsp_timeout = rsp_timeout; 1072 rspi->rsp_timeout = rsp_timeout;
1073 reqi->cts = cts; 1073 reqi->cts = cts;
1074 reqi->fchs = *fchs; 1074 reqi->fchs = *fchs;
1075 reqi->req_tot_len = reqlen; 1075 reqi->req_tot_len = reqlen;
1076 rspi->rsp_maxlen = rsp_maxlen; 1076 rspi->rsp_maxlen = rsp_maxlen;
1077 fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp; 1077 fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp;
1078 fcxp->send_cbarg = cbarg; 1078 fcxp->send_cbarg = cbarg;
1079 1079
1080 /* 1080 /*
1081 * If no room in CPE queue, wait for space in request queue 1081 * If no room in CPE queue, wait for space in request queue
1082 */ 1082 */
1083 send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP); 1083 send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
1084 if (!send_req) { 1084 if (!send_req) {
1085 bfa_trc(bfa, fcxp->fcxp_tag); 1085 bfa_trc(bfa, fcxp->fcxp_tag);
1086 fcxp->reqq_waiting = BFA_TRUE; 1086 fcxp->reqq_waiting = BFA_TRUE;
1087 bfa_reqq_wait(bfa, BFA_REQQ_FCXP, &fcxp->reqq_wqe); 1087 bfa_reqq_wait(bfa, BFA_REQQ_FCXP, &fcxp->reqq_wqe);
1088 return; 1088 return;
1089 } 1089 }
1090 1090
1091 bfa_fcxp_queue(fcxp, send_req); 1091 bfa_fcxp_queue(fcxp, send_req);
1092 } 1092 }
1093 1093
1094 /* 1094 /*
1095 * Abort a BFA FCXP 1095 * Abort a BFA FCXP
1096 * 1096 *
1097 * @param[in] fcxp BFA fcxp pointer 1097 * @param[in] fcxp BFA fcxp pointer
1098 * 1098 *
1099 * @return void 1099 * @return void
1100 */ 1100 */
1101 bfa_status_t 1101 bfa_status_t
1102 bfa_fcxp_abort(struct bfa_fcxp_s *fcxp) 1102 bfa_fcxp_abort(struct bfa_fcxp_s *fcxp)
1103 { 1103 {
1104 bfa_trc(fcxp->fcxp_mod->bfa, fcxp->fcxp_tag); 1104 bfa_trc(fcxp->fcxp_mod->bfa, fcxp->fcxp_tag);
1105 WARN_ON(1); 1105 WARN_ON(1);
1106 return BFA_STATUS_OK; 1106 return BFA_STATUS_OK;
1107 } 1107 }
1108 1108
1109 void 1109 void
1110 bfa_fcxp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe, 1110 bfa_fcxp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
1111 bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg, 1111 bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg,
1112 void *caller, int nreq_sgles, 1112 void *caller, int nreq_sgles,
1113 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn, 1113 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
1114 bfa_fcxp_get_sglen_t req_sglen_cbfn, 1114 bfa_fcxp_get_sglen_t req_sglen_cbfn,
1115 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn, 1115 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
1116 bfa_fcxp_get_sglen_t rsp_sglen_cbfn) 1116 bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
1117 { 1117 {
1118 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa); 1118 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1119 1119
1120 WARN_ON(!list_empty(&mod->fcxp_free_q)); 1120 WARN_ON(!list_empty(&mod->fcxp_free_q));
1121 1121
1122 wqe->alloc_cbfn = alloc_cbfn; 1122 wqe->alloc_cbfn = alloc_cbfn;
1123 wqe->alloc_cbarg = alloc_cbarg; 1123 wqe->alloc_cbarg = alloc_cbarg;
1124 wqe->caller = caller; 1124 wqe->caller = caller;
1125 wqe->bfa = bfa; 1125 wqe->bfa = bfa;
1126 wqe->nreq_sgles = nreq_sgles; 1126 wqe->nreq_sgles = nreq_sgles;
1127 wqe->nrsp_sgles = nrsp_sgles; 1127 wqe->nrsp_sgles = nrsp_sgles;
1128 wqe->req_sga_cbfn = req_sga_cbfn; 1128 wqe->req_sga_cbfn = req_sga_cbfn;
1129 wqe->req_sglen_cbfn = req_sglen_cbfn; 1129 wqe->req_sglen_cbfn = req_sglen_cbfn;
1130 wqe->rsp_sga_cbfn = rsp_sga_cbfn; 1130 wqe->rsp_sga_cbfn = rsp_sga_cbfn;
1131 wqe->rsp_sglen_cbfn = rsp_sglen_cbfn; 1131 wqe->rsp_sglen_cbfn = rsp_sglen_cbfn;
1132 1132
1133 list_add_tail(&wqe->qe, &mod->wait_q); 1133 list_add_tail(&wqe->qe, &mod->wait_q);
1134 } 1134 }
1135 1135
1136 void 1136 void
1137 bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe) 1137 bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe)
1138 { 1138 {
1139 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa); 1139 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1140 1140
1141 WARN_ON(!bfa_q_is_on_q(&mod->wait_q, wqe)); 1141 WARN_ON(!bfa_q_is_on_q(&mod->wait_q, wqe));
1142 list_del(&wqe->qe); 1142 list_del(&wqe->qe);
1143 } 1143 }
1144 1144
1145 void 1145 void
1146 bfa_fcxp_discard(struct bfa_fcxp_s *fcxp) 1146 bfa_fcxp_discard(struct bfa_fcxp_s *fcxp)
1147 { 1147 {
1148 /* 1148 /*
1149 * If waiting for room in request queue, cancel reqq wait 1149 * If waiting for room in request queue, cancel reqq wait
1150 * and free fcxp. 1150 * and free fcxp.
1151 */ 1151 */
1152 if (fcxp->reqq_waiting) { 1152 if (fcxp->reqq_waiting) {
1153 fcxp->reqq_waiting = BFA_FALSE; 1153 fcxp->reqq_waiting = BFA_FALSE;
1154 bfa_reqq_wcancel(&fcxp->reqq_wqe); 1154 bfa_reqq_wcancel(&fcxp->reqq_wqe);
1155 bfa_fcxp_free(fcxp); 1155 bfa_fcxp_free(fcxp);
1156 return; 1156 return;
1157 } 1157 }
1158 1158
1159 fcxp->send_cbfn = bfa_fcxp_null_comp; 1159 fcxp->send_cbfn = bfa_fcxp_null_comp;
1160 } 1160 }
1161 1161
1162 void 1162 void
1163 bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg) 1163 bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
1164 { 1164 {
1165 switch (msg->mhdr.msg_id) { 1165 switch (msg->mhdr.msg_id) {
1166 case BFI_FCXP_I2H_SEND_RSP: 1166 case BFI_FCXP_I2H_SEND_RSP:
1167 hal_fcxp_send_comp(bfa, (struct bfi_fcxp_send_rsp_s *) msg); 1167 hal_fcxp_send_comp(bfa, (struct bfi_fcxp_send_rsp_s *) msg);
1168 break; 1168 break;
1169 1169
1170 default: 1170 default:
1171 bfa_trc(bfa, msg->mhdr.msg_id); 1171 bfa_trc(bfa, msg->mhdr.msg_id);
1172 WARN_ON(1); 1172 WARN_ON(1);
1173 } 1173 }
1174 } 1174 }
1175 1175
1176 u32 1176 u32
1177 bfa_fcxp_get_maxrsp(struct bfa_s *bfa) 1177 bfa_fcxp_get_maxrsp(struct bfa_s *bfa)
1178 { 1178 {
1179 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa); 1179 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1180 1180
1181 return mod->rsp_pld_sz; 1181 return mod->rsp_pld_sz;
1182 } 1182 }
1183 1183
1184 1184
1185 /* 1185 /*
1186 * BFA LPS state machine functions 1186 * BFA LPS state machine functions
1187 */ 1187 */
1188 1188
1189 /* 1189 /*
1190 * Init state -- no login 1190 * Init state -- no login
1191 */ 1191 */
1192 static void 1192 static void
1193 bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event) 1193 bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
1194 { 1194 {
1195 bfa_trc(lps->bfa, lps->lp_tag); 1195 bfa_trc(lps->bfa, lps->lp_tag);
1196 bfa_trc(lps->bfa, event); 1196 bfa_trc(lps->bfa, event);
1197 1197
1198 switch (event) { 1198 switch (event) {
1199 case BFA_LPS_SM_LOGIN: 1199 case BFA_LPS_SM_LOGIN:
1200 if (bfa_reqq_full(lps->bfa, lps->reqq)) { 1200 if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1201 bfa_sm_set_state(lps, bfa_lps_sm_loginwait); 1201 bfa_sm_set_state(lps, bfa_lps_sm_loginwait);
1202 bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe); 1202 bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1203 } else { 1203 } else {
1204 bfa_sm_set_state(lps, bfa_lps_sm_login); 1204 bfa_sm_set_state(lps, bfa_lps_sm_login);
1205 bfa_lps_send_login(lps); 1205 bfa_lps_send_login(lps);
1206 } 1206 }
1207 1207
1208 if (lps->fdisc) 1208 if (lps->fdisc)
1209 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, 1209 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1210 BFA_PL_EID_LOGIN, 0, "FDISC Request"); 1210 BFA_PL_EID_LOGIN, 0, "FDISC Request");
1211 else 1211 else
1212 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, 1212 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1213 BFA_PL_EID_LOGIN, 0, "FLOGI Request"); 1213 BFA_PL_EID_LOGIN, 0, "FLOGI Request");
1214 break; 1214 break;
1215 1215
1216 case BFA_LPS_SM_LOGOUT: 1216 case BFA_LPS_SM_LOGOUT:
1217 bfa_lps_logout_comp(lps); 1217 bfa_lps_logout_comp(lps);
1218 break; 1218 break;
1219 1219
1220 case BFA_LPS_SM_DELETE: 1220 case BFA_LPS_SM_DELETE:
1221 bfa_lps_free(lps); 1221 bfa_lps_free(lps);
1222 break; 1222 break;
1223 1223
1224 case BFA_LPS_SM_RX_CVL: 1224 case BFA_LPS_SM_RX_CVL:
1225 case BFA_LPS_SM_OFFLINE: 1225 case BFA_LPS_SM_OFFLINE:
1226 break; 1226 break;
1227 1227
1228 case BFA_LPS_SM_FWRSP: 1228 case BFA_LPS_SM_FWRSP:
1229 /* 1229 /*
1230 * Could happen when fabric detects loopback and discards 1230 * Could happen when fabric detects loopback and discards
1231 * the lps request. Fw will eventually sent out the timeout 1231 * the lps request. Fw will eventually sent out the timeout
1232 * Just ignore 1232 * Just ignore
1233 */ 1233 */
1234 break; 1234 break;
1235 1235
1236 default: 1236 default:
1237 bfa_sm_fault(lps->bfa, event); 1237 bfa_sm_fault(lps->bfa, event);
1238 } 1238 }
1239 } 1239 }
1240 1240
1241 /* 1241 /*
1242 * login is in progress -- awaiting response from firmware 1242 * login is in progress -- awaiting response from firmware
1243 */ 1243 */
1244 static void 1244 static void
1245 bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event) 1245 bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
1246 { 1246 {
1247 bfa_trc(lps->bfa, lps->lp_tag); 1247 bfa_trc(lps->bfa, lps->lp_tag);
1248 bfa_trc(lps->bfa, event); 1248 bfa_trc(lps->bfa, event);
1249 1249
1250 switch (event) { 1250 switch (event) {
1251 case BFA_LPS_SM_FWRSP: 1251 case BFA_LPS_SM_FWRSP:
1252 if (lps->status == BFA_STATUS_OK) { 1252 if (lps->status == BFA_STATUS_OK) {
1253 bfa_sm_set_state(lps, bfa_lps_sm_online); 1253 bfa_sm_set_state(lps, bfa_lps_sm_online);
1254 if (lps->fdisc) 1254 if (lps->fdisc)
1255 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, 1255 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1256 BFA_PL_EID_LOGIN, 0, "FDISC Accept"); 1256 BFA_PL_EID_LOGIN, 0, "FDISC Accept");
1257 else 1257 else
1258 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, 1258 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1259 BFA_PL_EID_LOGIN, 0, "FLOGI Accept"); 1259 BFA_PL_EID_LOGIN, 0, "FLOGI Accept");
1260 /* If N2N, send the assigned PID to FW */ 1260 /* If N2N, send the assigned PID to FW */
1261 bfa_trc(lps->bfa, lps->fport); 1261 bfa_trc(lps->bfa, lps->fport);
1262 bfa_trc(lps->bfa, lps->lp_pid); 1262 bfa_trc(lps->bfa, lps->lp_pid);
1263 1263
1264 if (!lps->fport && lps->lp_pid) 1264 if (!lps->fport && lps->lp_pid)
1265 bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID); 1265 bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID);
1266 } else { 1266 } else {
1267 bfa_sm_set_state(lps, bfa_lps_sm_init); 1267 bfa_sm_set_state(lps, bfa_lps_sm_init);
1268 if (lps->fdisc) 1268 if (lps->fdisc)
1269 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, 1269 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1270 BFA_PL_EID_LOGIN, 0, 1270 BFA_PL_EID_LOGIN, 0,
1271 "FDISC Fail (RJT or timeout)"); 1271 "FDISC Fail (RJT or timeout)");
1272 else 1272 else
1273 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, 1273 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1274 BFA_PL_EID_LOGIN, 0, 1274 BFA_PL_EID_LOGIN, 0,
1275 "FLOGI Fail (RJT or timeout)"); 1275 "FLOGI Fail (RJT or timeout)");
1276 } 1276 }
1277 bfa_lps_login_comp(lps); 1277 bfa_lps_login_comp(lps);
1278 break; 1278 break;
1279 1279
1280 case BFA_LPS_SM_OFFLINE: 1280 case BFA_LPS_SM_OFFLINE:
1281 bfa_sm_set_state(lps, bfa_lps_sm_init); 1281 bfa_sm_set_state(lps, bfa_lps_sm_init);
1282 break; 1282 break;
1283 1283
1284 case BFA_LPS_SM_SET_N2N_PID: 1284 case BFA_LPS_SM_SET_N2N_PID:
1285 bfa_trc(lps->bfa, lps->fport); 1285 bfa_trc(lps->bfa, lps->fport);
1286 bfa_trc(lps->bfa, lps->lp_pid); 1286 bfa_trc(lps->bfa, lps->lp_pid);
1287 break; 1287 break;
1288 1288
1289 default: 1289 default:
1290 bfa_sm_fault(lps->bfa, event); 1290 bfa_sm_fault(lps->bfa, event);
1291 } 1291 }
1292 } 1292 }
1293 1293
1294 /* 1294 /*
1295 * login pending - awaiting space in request queue 1295 * login pending - awaiting space in request queue
1296 */ 1296 */
1297 static void 1297 static void
1298 bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event) 1298 bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1299 { 1299 {
1300 bfa_trc(lps->bfa, lps->lp_tag); 1300 bfa_trc(lps->bfa, lps->lp_tag);
1301 bfa_trc(lps->bfa, event); 1301 bfa_trc(lps->bfa, event);
1302 1302
1303 switch (event) { 1303 switch (event) {
1304 case BFA_LPS_SM_RESUME: 1304 case BFA_LPS_SM_RESUME:
1305 bfa_sm_set_state(lps, bfa_lps_sm_login); 1305 bfa_sm_set_state(lps, bfa_lps_sm_login);
1306 break; 1306 break;
1307 1307
1308 case BFA_LPS_SM_OFFLINE: 1308 case BFA_LPS_SM_OFFLINE:
1309 bfa_sm_set_state(lps, bfa_lps_sm_init); 1309 bfa_sm_set_state(lps, bfa_lps_sm_init);
1310 bfa_reqq_wcancel(&lps->wqe); 1310 bfa_reqq_wcancel(&lps->wqe);
1311 break; 1311 break;
1312 1312
1313 case BFA_LPS_SM_RX_CVL: 1313 case BFA_LPS_SM_RX_CVL:
1314 /* 1314 /*
1315 * Login was not even sent out; so when getting out 1315 * Login was not even sent out; so when getting out
1316 * of this state, it will appear like a login retry 1316 * of this state, it will appear like a login retry
1317 * after Clear virtual link 1317 * after Clear virtual link
1318 */ 1318 */
1319 break; 1319 break;
1320 1320
1321 default: 1321 default:
1322 bfa_sm_fault(lps->bfa, event); 1322 bfa_sm_fault(lps->bfa, event);
1323 } 1323 }
1324 } 1324 }
1325 1325
1326 /* 1326 /*
1327 * login complete 1327 * login complete
1328 */ 1328 */
1329 static void 1329 static void
1330 bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event) 1330 bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
1331 { 1331 {
1332 bfa_trc(lps->bfa, lps->lp_tag); 1332 bfa_trc(lps->bfa, lps->lp_tag);
1333 bfa_trc(lps->bfa, event); 1333 bfa_trc(lps->bfa, event);
1334 1334
1335 switch (event) { 1335 switch (event) {
1336 case BFA_LPS_SM_LOGOUT: 1336 case BFA_LPS_SM_LOGOUT:
1337 if (bfa_reqq_full(lps->bfa, lps->reqq)) { 1337 if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1338 bfa_sm_set_state(lps, bfa_lps_sm_logowait); 1338 bfa_sm_set_state(lps, bfa_lps_sm_logowait);
1339 bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe); 1339 bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1340 } else { 1340 } else {
1341 bfa_sm_set_state(lps, bfa_lps_sm_logout); 1341 bfa_sm_set_state(lps, bfa_lps_sm_logout);
1342 bfa_lps_send_logout(lps); 1342 bfa_lps_send_logout(lps);
1343 } 1343 }
1344 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, 1344 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1345 BFA_PL_EID_LOGO, 0, "Logout"); 1345 BFA_PL_EID_LOGO, 0, "Logout");
1346 break; 1346 break;
1347 1347
1348 case BFA_LPS_SM_RX_CVL: 1348 case BFA_LPS_SM_RX_CVL:
1349 bfa_sm_set_state(lps, bfa_lps_sm_init); 1349 bfa_sm_set_state(lps, bfa_lps_sm_init);
1350 1350
1351 /* Let the vport module know about this event */ 1351 /* Let the vport module know about this event */
1352 bfa_lps_cvl_event(lps); 1352 bfa_lps_cvl_event(lps);
1353 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, 1353 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1354 BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx"); 1354 BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
1355 break; 1355 break;
1356 1356
1357 case BFA_LPS_SM_SET_N2N_PID: 1357 case BFA_LPS_SM_SET_N2N_PID:
1358 if (bfa_reqq_full(lps->bfa, lps->reqq)) { 1358 if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1359 bfa_sm_set_state(lps, bfa_lps_sm_online_n2n_pid_wait); 1359 bfa_sm_set_state(lps, bfa_lps_sm_online_n2n_pid_wait);
1360 bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe); 1360 bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1361 } else 1361 } else
1362 bfa_lps_send_set_n2n_pid(lps); 1362 bfa_lps_send_set_n2n_pid(lps);
1363 break; 1363 break;
1364 1364
1365 case BFA_LPS_SM_OFFLINE: 1365 case BFA_LPS_SM_OFFLINE:
1366 case BFA_LPS_SM_DELETE: 1366 case BFA_LPS_SM_DELETE:
1367 bfa_sm_set_state(lps, bfa_lps_sm_init); 1367 bfa_sm_set_state(lps, bfa_lps_sm_init);
1368 break; 1368 break;
1369 1369
1370 default: 1370 default:
1371 bfa_sm_fault(lps->bfa, event); 1371 bfa_sm_fault(lps->bfa, event);
1372 } 1372 }
1373 } 1373 }
1374 1374
1375 /** 1375 /**
1376 * login complete 1376 * login complete
1377 */ 1377 */
1378 static void 1378 static void
1379 bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps, enum bfa_lps_event event) 1379 bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1380 { 1380 {
1381 bfa_trc(lps->bfa, lps->lp_tag); 1381 bfa_trc(lps->bfa, lps->lp_tag);
1382 bfa_trc(lps->bfa, event); 1382 bfa_trc(lps->bfa, event);
1383 1383
1384 switch (event) { 1384 switch (event) {
1385 case BFA_LPS_SM_RESUME: 1385 case BFA_LPS_SM_RESUME:
1386 bfa_sm_set_state(lps, bfa_lps_sm_online); 1386 bfa_sm_set_state(lps, bfa_lps_sm_online);
1387 bfa_lps_send_set_n2n_pid(lps); 1387 bfa_lps_send_set_n2n_pid(lps);
1388 break; 1388 break;
1389 1389
1390 case BFA_LPS_SM_LOGOUT: 1390 case BFA_LPS_SM_LOGOUT:
1391 bfa_sm_set_state(lps, bfa_lps_sm_logowait); 1391 bfa_sm_set_state(lps, bfa_lps_sm_logowait);
1392 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, 1392 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1393 BFA_PL_EID_LOGO, 0, "Logout"); 1393 BFA_PL_EID_LOGO, 0, "Logout");
1394 break; 1394 break;
1395 1395
1396 case BFA_LPS_SM_RX_CVL: 1396 case BFA_LPS_SM_RX_CVL:
1397 bfa_sm_set_state(lps, bfa_lps_sm_init); 1397 bfa_sm_set_state(lps, bfa_lps_sm_init);
1398 bfa_reqq_wcancel(&lps->wqe); 1398 bfa_reqq_wcancel(&lps->wqe);
1399 1399
1400 /* Let the vport module know about this event */ 1400 /* Let the vport module know about this event */
1401 bfa_lps_cvl_event(lps); 1401 bfa_lps_cvl_event(lps);
1402 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, 1402 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1403 BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx"); 1403 BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
1404 break; 1404 break;
1405 1405
1406 case BFA_LPS_SM_OFFLINE: 1406 case BFA_LPS_SM_OFFLINE:
1407 case BFA_LPS_SM_DELETE: 1407 case BFA_LPS_SM_DELETE:
1408 bfa_sm_set_state(lps, bfa_lps_sm_init); 1408 bfa_sm_set_state(lps, bfa_lps_sm_init);
1409 bfa_reqq_wcancel(&lps->wqe); 1409 bfa_reqq_wcancel(&lps->wqe);
1410 break; 1410 break;
1411 1411
1412 default: 1412 default:
1413 bfa_sm_fault(lps->bfa, event); 1413 bfa_sm_fault(lps->bfa, event);
1414 } 1414 }
1415 } 1415 }
1416 1416
1417 /* 1417 /*
1418 * logout in progress - awaiting firmware response 1418 * logout in progress - awaiting firmware response
1419 */ 1419 */
1420 static void 1420 static void
1421 bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event) 1421 bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
1422 { 1422 {
1423 bfa_trc(lps->bfa, lps->lp_tag); 1423 bfa_trc(lps->bfa, lps->lp_tag);
1424 bfa_trc(lps->bfa, event); 1424 bfa_trc(lps->bfa, event);
1425 1425
1426 switch (event) { 1426 switch (event) {
1427 case BFA_LPS_SM_FWRSP: 1427 case BFA_LPS_SM_FWRSP:
1428 bfa_sm_set_state(lps, bfa_lps_sm_init); 1428 bfa_sm_set_state(lps, bfa_lps_sm_init);
1429 bfa_lps_logout_comp(lps); 1429 bfa_lps_logout_comp(lps);
1430 break; 1430 break;
1431 1431
1432 case BFA_LPS_SM_OFFLINE: 1432 case BFA_LPS_SM_OFFLINE:
1433 bfa_sm_set_state(lps, bfa_lps_sm_init); 1433 bfa_sm_set_state(lps, bfa_lps_sm_init);
1434 break; 1434 break;
1435 1435
1436 default: 1436 default:
1437 bfa_sm_fault(lps->bfa, event); 1437 bfa_sm_fault(lps->bfa, event);
1438 } 1438 }
1439 } 1439 }
1440 1440
1441 /* 1441 /*
1442 * logout pending -- awaiting space in request queue 1442 * logout pending -- awaiting space in request queue
1443 */ 1443 */
1444 static void 1444 static void
1445 bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event) 1445 bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1446 { 1446 {
1447 bfa_trc(lps->bfa, lps->lp_tag); 1447 bfa_trc(lps->bfa, lps->lp_tag);
1448 bfa_trc(lps->bfa, event); 1448 bfa_trc(lps->bfa, event);
1449 1449
1450 switch (event) { 1450 switch (event) {
1451 case BFA_LPS_SM_RESUME: 1451 case BFA_LPS_SM_RESUME:
1452 bfa_sm_set_state(lps, bfa_lps_sm_logout); 1452 bfa_sm_set_state(lps, bfa_lps_sm_logout);
1453 bfa_lps_send_logout(lps); 1453 bfa_lps_send_logout(lps);
1454 break; 1454 break;
1455 1455
1456 case BFA_LPS_SM_OFFLINE: 1456 case BFA_LPS_SM_OFFLINE:
1457 bfa_sm_set_state(lps, bfa_lps_sm_init); 1457 bfa_sm_set_state(lps, bfa_lps_sm_init);
1458 bfa_reqq_wcancel(&lps->wqe); 1458 bfa_reqq_wcancel(&lps->wqe);
1459 break; 1459 break;
1460 1460
1461 default: 1461 default:
1462 bfa_sm_fault(lps->bfa, event); 1462 bfa_sm_fault(lps->bfa, event);
1463 } 1463 }
1464 } 1464 }
1465 1465
1466 1466
1467 1467
1468 /* 1468 /*
1469 * lps_pvt BFA LPS private functions 1469 * lps_pvt BFA LPS private functions
1470 */ 1470 */
1471 1471
1472 /* 1472 /*
1473 * return memory requirement 1473 * return memory requirement
1474 */ 1474 */
1475 static void 1475 static void
1476 bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, 1476 bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
1477 u32 *dm_len) 1477 u32 *dm_len)
1478 { 1478 {
1479 if (cfg->drvcfg.min_cfg) 1479 if (cfg->drvcfg.min_cfg)
1480 *ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MIN_LPORTS; 1480 *ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MIN_LPORTS;
1481 else 1481 else
1482 *ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS; 1482 *ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS;
1483 } 1483 }
1484 1484
1485 /* 1485 /*
1486 * bfa module attach at initialization time 1486 * bfa module attach at initialization time
1487 */ 1487 */
1488 static void 1488 static void
1489 bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, 1489 bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1490 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) 1490 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
1491 { 1491 {
1492 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); 1492 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1493 struct bfa_lps_s *lps; 1493 struct bfa_lps_s *lps;
1494 int i; 1494 int i;
1495 1495
1496 memset(mod, 0, sizeof(struct bfa_lps_mod_s)); 1496 memset(mod, 0, sizeof(struct bfa_lps_mod_s));
1497 mod->num_lps = BFA_LPS_MAX_LPORTS; 1497 mod->num_lps = BFA_LPS_MAX_LPORTS;
1498 if (cfg->drvcfg.min_cfg) 1498 if (cfg->drvcfg.min_cfg)
1499 mod->num_lps = BFA_LPS_MIN_LPORTS; 1499 mod->num_lps = BFA_LPS_MIN_LPORTS;
1500 else 1500 else
1501 mod->num_lps = BFA_LPS_MAX_LPORTS; 1501 mod->num_lps = BFA_LPS_MAX_LPORTS;
1502 mod->lps_arr = lps = (struct bfa_lps_s *) bfa_meminfo_kva(meminfo); 1502 mod->lps_arr = lps = (struct bfa_lps_s *) bfa_meminfo_kva(meminfo);
1503 1503
1504 bfa_meminfo_kva(meminfo) += mod->num_lps * sizeof(struct bfa_lps_s); 1504 bfa_meminfo_kva(meminfo) += mod->num_lps * sizeof(struct bfa_lps_s);
1505 1505
1506 INIT_LIST_HEAD(&mod->lps_free_q); 1506 INIT_LIST_HEAD(&mod->lps_free_q);
1507 INIT_LIST_HEAD(&mod->lps_active_q); 1507 INIT_LIST_HEAD(&mod->lps_active_q);
1508 1508
1509 for (i = 0; i < mod->num_lps; i++, lps++) { 1509 for (i = 0; i < mod->num_lps; i++, lps++) {
1510 lps->bfa = bfa; 1510 lps->bfa = bfa;
1511 lps->lp_tag = (u8) i; 1511 lps->lp_tag = (u8) i;
1512 lps->reqq = BFA_REQQ_LPS; 1512 lps->reqq = BFA_REQQ_LPS;
1513 bfa_reqq_winit(&lps->wqe, bfa_lps_reqq_resume, lps); 1513 bfa_reqq_winit(&lps->wqe, bfa_lps_reqq_resume, lps);
1514 list_add_tail(&lps->qe, &mod->lps_free_q); 1514 list_add_tail(&lps->qe, &mod->lps_free_q);
1515 } 1515 }
1516 } 1516 }
1517 1517
1518 static void 1518 static void
1519 bfa_lps_detach(struct bfa_s *bfa) 1519 bfa_lps_detach(struct bfa_s *bfa)
1520 { 1520 {
1521 } 1521 }
1522 1522
1523 static void 1523 static void
1524 bfa_lps_start(struct bfa_s *bfa) 1524 bfa_lps_start(struct bfa_s *bfa)
1525 { 1525 {
1526 } 1526 }
1527 1527
1528 static void 1528 static void
1529 bfa_lps_stop(struct bfa_s *bfa) 1529 bfa_lps_stop(struct bfa_s *bfa)
1530 { 1530 {
1531 } 1531 }
1532 1532
1533 /* 1533 /*
1534 * IOC in disabled state -- consider all lps offline 1534 * IOC in disabled state -- consider all lps offline
1535 */ 1535 */
1536 static void 1536 static void
1537 bfa_lps_iocdisable(struct bfa_s *bfa) 1537 bfa_lps_iocdisable(struct bfa_s *bfa)
1538 { 1538 {
1539 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); 1539 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1540 struct bfa_lps_s *lps; 1540 struct bfa_lps_s *lps;
1541 struct list_head *qe, *qen; 1541 struct list_head *qe, *qen;
1542 1542
1543 list_for_each_safe(qe, qen, &mod->lps_active_q) { 1543 list_for_each_safe(qe, qen, &mod->lps_active_q) {
1544 lps = (struct bfa_lps_s *) qe; 1544 lps = (struct bfa_lps_s *) qe;
1545 bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE); 1545 bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
1546 } 1546 }
1547 } 1547 }
1548 1548
1549 /* 1549 /*
1550 * Firmware login response 1550 * Firmware login response
1551 */ 1551 */
1552 static void 1552 static void
1553 bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp) 1553 bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
1554 { 1554 {
1555 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); 1555 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1556 struct bfa_lps_s *lps; 1556 struct bfa_lps_s *lps;
1557 1557
1558 WARN_ON(rsp->lp_tag >= mod->num_lps); 1558 WARN_ON(rsp->lp_tag >= mod->num_lps);
1559 lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag); 1559 lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag);
1560 1560
1561 lps->status = rsp->status; 1561 lps->status = rsp->status;
1562 switch (rsp->status) { 1562 switch (rsp->status) {
1563 case BFA_STATUS_OK: 1563 case BFA_STATUS_OK:
1564 lps->fport = rsp->f_port; 1564 lps->fport = rsp->f_port;
1565 if (lps->fport) 1565 if (lps->fport)
1566 lps->lp_pid = rsp->lp_pid; 1566 lps->lp_pid = rsp->lp_pid;
1567 lps->npiv_en = rsp->npiv_en; 1567 lps->npiv_en = rsp->npiv_en;
1568 lps->pr_bbcred = be16_to_cpu(rsp->bb_credit); 1568 lps->pr_bbcred = be16_to_cpu(rsp->bb_credit);
1569 lps->pr_pwwn = rsp->port_name; 1569 lps->pr_pwwn = rsp->port_name;
1570 lps->pr_nwwn = rsp->node_name; 1570 lps->pr_nwwn = rsp->node_name;
1571 lps->auth_req = rsp->auth_req; 1571 lps->auth_req = rsp->auth_req;
1572 lps->lp_mac = rsp->lp_mac; 1572 lps->lp_mac = rsp->lp_mac;
1573 lps->brcd_switch = rsp->brcd_switch; 1573 lps->brcd_switch = rsp->brcd_switch;
1574 lps->fcf_mac = rsp->fcf_mac; 1574 lps->fcf_mac = rsp->fcf_mac;
1575 1575
1576 break; 1576 break;
1577 1577
1578 case BFA_STATUS_FABRIC_RJT: 1578 case BFA_STATUS_FABRIC_RJT:
1579 lps->lsrjt_rsn = rsp->lsrjt_rsn; 1579 lps->lsrjt_rsn = rsp->lsrjt_rsn;
1580 lps->lsrjt_expl = rsp->lsrjt_expl; 1580 lps->lsrjt_expl = rsp->lsrjt_expl;
1581 1581
1582 break; 1582 break;
1583 1583
1584 case BFA_STATUS_EPROTOCOL: 1584 case BFA_STATUS_EPROTOCOL:
1585 lps->ext_status = rsp->ext_status; 1585 lps->ext_status = rsp->ext_status;
1586 1586
1587 break; 1587 break;
1588 1588
1589 default: 1589 default:
1590 /* Nothing to do with other status */ 1590 /* Nothing to do with other status */
1591 break; 1591 break;
1592 } 1592 }
1593 1593
1594 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP); 1594 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1595 } 1595 }
1596 1596
1597 /* 1597 /*
1598 * Firmware logout response 1598 * Firmware logout response
1599 */ 1599 */
1600 static void 1600 static void
1601 bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp) 1601 bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp)
1602 { 1602 {
1603 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); 1603 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1604 struct bfa_lps_s *lps; 1604 struct bfa_lps_s *lps;
1605 1605
1606 WARN_ON(rsp->lp_tag >= mod->num_lps); 1606 WARN_ON(rsp->lp_tag >= mod->num_lps);
1607 lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag); 1607 lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag);
1608 1608
1609 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP); 1609 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1610 } 1610 }
1611 1611
1612 /* 1612 /*
1613 * Firmware received a Clear virtual link request (for FCoE) 1613 * Firmware received a Clear virtual link request (for FCoE)
1614 */ 1614 */
1615 static void 1615 static void
1616 bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl) 1616 bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl)
1617 { 1617 {
1618 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); 1618 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1619 struct bfa_lps_s *lps; 1619 struct bfa_lps_s *lps;
1620 1620
1621 lps = BFA_LPS_FROM_TAG(mod, cvl->lp_tag); 1621 lps = BFA_LPS_FROM_TAG(mod, cvl->lp_tag);
1622 1622
1623 bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL); 1623 bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL);
1624 } 1624 }
1625 1625
1626 /* 1626 /*
1627 * Space is available in request queue, resume queueing request to firmware. 1627 * Space is available in request queue, resume queueing request to firmware.
1628 */ 1628 */
1629 static void 1629 static void
1630 bfa_lps_reqq_resume(void *lps_arg) 1630 bfa_lps_reqq_resume(void *lps_arg)
1631 { 1631 {
1632 struct bfa_lps_s *lps = lps_arg; 1632 struct bfa_lps_s *lps = lps_arg;
1633 1633
1634 bfa_sm_send_event(lps, BFA_LPS_SM_RESUME); 1634 bfa_sm_send_event(lps, BFA_LPS_SM_RESUME);
1635 } 1635 }
1636 1636
1637 /* 1637 /*
1638 * lps is freed -- triggered by vport delete 1638 * lps is freed -- triggered by vport delete
1639 */ 1639 */
1640 static void 1640 static void
1641 bfa_lps_free(struct bfa_lps_s *lps) 1641 bfa_lps_free(struct bfa_lps_s *lps)
1642 { 1642 {
1643 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(lps->bfa); 1643 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(lps->bfa);
1644 1644
1645 lps->lp_pid = 0; 1645 lps->lp_pid = 0;
1646 list_del(&lps->qe); 1646 list_del(&lps->qe);
1647 list_add_tail(&lps->qe, &mod->lps_free_q); 1647 list_add_tail(&lps->qe, &mod->lps_free_q);
1648 } 1648 }
1649 1649
1650 /* 1650 /*
1651 * send login request to firmware 1651 * send login request to firmware
1652 */ 1652 */
1653 static void 1653 static void
1654 bfa_lps_send_login(struct bfa_lps_s *lps) 1654 bfa_lps_send_login(struct bfa_lps_s *lps)
1655 { 1655 {
1656 struct bfi_lps_login_req_s *m; 1656 struct bfi_lps_login_req_s *m;
1657 1657
1658 m = bfa_reqq_next(lps->bfa, lps->reqq); 1658 m = bfa_reqq_next(lps->bfa, lps->reqq);
1659 WARN_ON(!m); 1659 WARN_ON(!m);
1660 1660
1661 bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGIN_REQ, 1661 bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGIN_REQ,
1662 bfa_lpuid(lps->bfa)); 1662 bfa_lpuid(lps->bfa));
1663 1663
1664 m->lp_tag = lps->lp_tag; 1664 m->lp_tag = lps->lp_tag;
1665 m->alpa = lps->alpa; 1665 m->alpa = lps->alpa;
1666 m->pdu_size = cpu_to_be16(lps->pdusz); 1666 m->pdu_size = cpu_to_be16(lps->pdusz);
1667 m->pwwn = lps->pwwn; 1667 m->pwwn = lps->pwwn;
1668 m->nwwn = lps->nwwn; 1668 m->nwwn = lps->nwwn;
1669 m->fdisc = lps->fdisc; 1669 m->fdisc = lps->fdisc;
1670 m->auth_en = lps->auth_en; 1670 m->auth_en = lps->auth_en;
1671 1671
1672 bfa_reqq_produce(lps->bfa, lps->reqq); 1672 bfa_reqq_produce(lps->bfa, lps->reqq);
1673 } 1673 }
1674 1674
1675 /* 1675 /*
1676 * send logout request to firmware 1676 * send logout request to firmware
1677 */ 1677 */
1678 static void 1678 static void
1679 bfa_lps_send_logout(struct bfa_lps_s *lps) 1679 bfa_lps_send_logout(struct bfa_lps_s *lps)
1680 { 1680 {
1681 struct bfi_lps_logout_req_s *m; 1681 struct bfi_lps_logout_req_s *m;
1682 1682
1683 m = bfa_reqq_next(lps->bfa, lps->reqq); 1683 m = bfa_reqq_next(lps->bfa, lps->reqq);
1684 WARN_ON(!m); 1684 WARN_ON(!m);
1685 1685
1686 bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGOUT_REQ, 1686 bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGOUT_REQ,
1687 bfa_lpuid(lps->bfa)); 1687 bfa_lpuid(lps->bfa));
1688 1688
1689 m->lp_tag = lps->lp_tag; 1689 m->lp_tag = lps->lp_tag;
1690 m->port_name = lps->pwwn; 1690 m->port_name = lps->pwwn;
1691 bfa_reqq_produce(lps->bfa, lps->reqq); 1691 bfa_reqq_produce(lps->bfa, lps->reqq);
1692 } 1692 }
1693 1693
1694 /** 1694 /**
1695 * send n2n pid set request to firmware 1695 * send n2n pid set request to firmware
1696 */ 1696 */
1697 static void 1697 static void
1698 bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps) 1698 bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps)
1699 { 1699 {
1700 struct bfi_lps_n2n_pid_req_s *m; 1700 struct bfi_lps_n2n_pid_req_s *m;
1701 1701
1702 m = bfa_reqq_next(lps->bfa, lps->reqq); 1702 m = bfa_reqq_next(lps->bfa, lps->reqq);
1703 WARN_ON(!m); 1703 WARN_ON(!m);
1704 1704
1705 bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_N2N_PID_REQ, 1705 bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_N2N_PID_REQ,
1706 bfa_lpuid(lps->bfa)); 1706 bfa_lpuid(lps->bfa));
1707 1707
1708 m->lp_tag = lps->lp_tag; 1708 m->lp_tag = lps->lp_tag;
1709 m->lp_pid = lps->lp_pid; 1709 m->lp_pid = lps->lp_pid;
1710 bfa_reqq_produce(lps->bfa, lps->reqq); 1710 bfa_reqq_produce(lps->bfa, lps->reqq);
1711 } 1711 }
1712 1712
1713 /* 1713 /*
1714 * Indirect login completion handler for non-fcs 1714 * Indirect login completion handler for non-fcs
1715 */ 1715 */
1716 static void 1716 static void
1717 bfa_lps_login_comp_cb(void *arg, bfa_boolean_t complete) 1717 bfa_lps_login_comp_cb(void *arg, bfa_boolean_t complete)
1718 { 1718 {
1719 struct bfa_lps_s *lps = arg; 1719 struct bfa_lps_s *lps = arg;
1720 1720
1721 if (!complete) 1721 if (!complete)
1722 return; 1722 return;
1723 1723
1724 if (lps->fdisc) 1724 if (lps->fdisc)
1725 bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status); 1725 bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1726 else 1726 else
1727 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status); 1727 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1728 } 1728 }
1729 1729
1730 /* 1730 /*
1731 * Login completion handler -- direct call for fcs, queue for others 1731 * Login completion handler -- direct call for fcs, queue for others
1732 */ 1732 */
1733 static void 1733 static void
1734 bfa_lps_login_comp(struct bfa_lps_s *lps) 1734 bfa_lps_login_comp(struct bfa_lps_s *lps)
1735 { 1735 {
1736 if (!lps->bfa->fcs) { 1736 if (!lps->bfa->fcs) {
1737 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_login_comp_cb, 1737 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_login_comp_cb,
1738 lps); 1738 lps);
1739 return; 1739 return;
1740 } 1740 }
1741 1741
1742 if (lps->fdisc) 1742 if (lps->fdisc)
1743 bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status); 1743 bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1744 else 1744 else
1745 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status); 1745 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1746 } 1746 }
1747 1747
1748 /* 1748 /*
1749 * Indirect logout completion handler for non-fcs 1749 * Indirect logout completion handler for non-fcs
1750 */ 1750 */
1751 static void 1751 static void
1752 bfa_lps_logout_comp_cb(void *arg, bfa_boolean_t complete) 1752 bfa_lps_logout_comp_cb(void *arg, bfa_boolean_t complete)
1753 { 1753 {
1754 struct bfa_lps_s *lps = arg; 1754 struct bfa_lps_s *lps = arg;
1755 1755
1756 if (!complete) 1756 if (!complete)
1757 return; 1757 return;
1758 1758
1759 if (lps->fdisc) 1759 if (lps->fdisc)
1760 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg); 1760 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1761 } 1761 }
1762 1762
1763 /* 1763 /*
1764 * Logout completion handler -- direct call for fcs, queue for others 1764 * Logout completion handler -- direct call for fcs, queue for others
1765 */ 1765 */
1766 static void 1766 static void
1767 bfa_lps_logout_comp(struct bfa_lps_s *lps) 1767 bfa_lps_logout_comp(struct bfa_lps_s *lps)
1768 { 1768 {
1769 if (!lps->bfa->fcs) { 1769 if (!lps->bfa->fcs) {
1770 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_logout_comp_cb, 1770 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_logout_comp_cb,
1771 lps); 1771 lps);
1772 return; 1772 return;
1773 } 1773 }
1774 if (lps->fdisc) 1774 if (lps->fdisc)
1775 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg); 1775 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1776 } 1776 }
1777 1777
1778 /* 1778 /*
1779 * Clear virtual link completion handler for non-fcs 1779 * Clear virtual link completion handler for non-fcs
1780 */ 1780 */
1781 static void 1781 static void
1782 bfa_lps_cvl_event_cb(void *arg, bfa_boolean_t complete) 1782 bfa_lps_cvl_event_cb(void *arg, bfa_boolean_t complete)
1783 { 1783 {
1784 struct bfa_lps_s *lps = arg; 1784 struct bfa_lps_s *lps = arg;
1785 1785
1786 if (!complete) 1786 if (!complete)
1787 return; 1787 return;
1788 1788
1789 /* Clear virtual link to base port will result in link down */ 1789 /* Clear virtual link to base port will result in link down */
1790 if (lps->fdisc) 1790 if (lps->fdisc)
1791 bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg); 1791 bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1792 } 1792 }
1793 1793
1794 /* 1794 /*
1795 * Received Clear virtual link event --direct call for fcs, 1795 * Received Clear virtual link event --direct call for fcs,
1796 * queue for others 1796 * queue for others
1797 */ 1797 */
1798 static void 1798 static void
1799 bfa_lps_cvl_event(struct bfa_lps_s *lps) 1799 bfa_lps_cvl_event(struct bfa_lps_s *lps)
1800 { 1800 {
1801 if (!lps->bfa->fcs) { 1801 if (!lps->bfa->fcs) {
1802 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_cvl_event_cb, 1802 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_cvl_event_cb,
1803 lps); 1803 lps);
1804 return; 1804 return;
1805 } 1805 }
1806 1806
1807 /* Clear virtual link to base port will result in link down */ 1807 /* Clear virtual link to base port will result in link down */
1808 if (lps->fdisc) 1808 if (lps->fdisc)
1809 bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg); 1809 bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1810 } 1810 }
1811 1811
1812 1812
1813 1813
1814 /* 1814 /*
1815 * lps_public BFA LPS public functions 1815 * lps_public BFA LPS public functions
1816 */ 1816 */
1817 1817
1818 u32 1818 u32
1819 bfa_lps_get_max_vport(struct bfa_s *bfa) 1819 bfa_lps_get_max_vport(struct bfa_s *bfa)
1820 { 1820 {
1821 if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT) 1821 if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT)
1822 return BFA_LPS_MAX_VPORTS_SUPP_CT; 1822 return BFA_LPS_MAX_VPORTS_SUPP_CT;
1823 else 1823 else
1824 return BFA_LPS_MAX_VPORTS_SUPP_CB; 1824 return BFA_LPS_MAX_VPORTS_SUPP_CB;
1825 } 1825 }
1826 1826
1827 /* 1827 /*
1828 * Allocate a lport srvice tag. 1828 * Allocate a lport srvice tag.
1829 */ 1829 */
1830 struct bfa_lps_s * 1830 struct bfa_lps_s *
1831 bfa_lps_alloc(struct bfa_s *bfa) 1831 bfa_lps_alloc(struct bfa_s *bfa)
1832 { 1832 {
1833 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); 1833 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1834 struct bfa_lps_s *lps = NULL; 1834 struct bfa_lps_s *lps = NULL;
1835 1835
1836 bfa_q_deq(&mod->lps_free_q, &lps); 1836 bfa_q_deq(&mod->lps_free_q, &lps);
1837 1837
1838 if (lps == NULL) 1838 if (lps == NULL)
1839 return NULL; 1839 return NULL;
1840 1840
1841 list_add_tail(&lps->qe, &mod->lps_active_q); 1841 list_add_tail(&lps->qe, &mod->lps_active_q);
1842 1842
1843 bfa_sm_set_state(lps, bfa_lps_sm_init); 1843 bfa_sm_set_state(lps, bfa_lps_sm_init);
1844 return lps; 1844 return lps;
1845 } 1845 }
1846 1846
1847 /* 1847 /*
1848 * Free lport service tag. This can be called anytime after an alloc. 1848 * Free lport service tag. This can be called anytime after an alloc.
1849 * No need to wait for any pending login/logout completions. 1849 * No need to wait for any pending login/logout completions.
1850 */ 1850 */
1851 void 1851 void
1852 bfa_lps_delete(struct bfa_lps_s *lps) 1852 bfa_lps_delete(struct bfa_lps_s *lps)
1853 { 1853 {
1854 bfa_sm_send_event(lps, BFA_LPS_SM_DELETE); 1854 bfa_sm_send_event(lps, BFA_LPS_SM_DELETE);
1855 } 1855 }
1856 1856
1857 /* 1857 /*
1858 * Initiate a lport login. 1858 * Initiate a lport login.
1859 */ 1859 */
1860 void 1860 void
1861 bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz, 1861 bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz,
1862 wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en) 1862 wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en)
1863 { 1863 {
1864 lps->uarg = uarg; 1864 lps->uarg = uarg;
1865 lps->alpa = alpa; 1865 lps->alpa = alpa;
1866 lps->pdusz = pdusz; 1866 lps->pdusz = pdusz;
1867 lps->pwwn = pwwn; 1867 lps->pwwn = pwwn;
1868 lps->nwwn = nwwn; 1868 lps->nwwn = nwwn;
1869 lps->fdisc = BFA_FALSE; 1869 lps->fdisc = BFA_FALSE;
1870 lps->auth_en = auth_en; 1870 lps->auth_en = auth_en;
1871 bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN); 1871 bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1872 } 1872 }
1873 1873
1874 /* 1874 /*
1875 * Initiate a lport fdisc login. 1875 * Initiate a lport fdisc login.
1876 */ 1876 */
1877 void 1877 void
1878 bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn, 1878 bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn,
1879 wwn_t nwwn) 1879 wwn_t nwwn)
1880 { 1880 {
1881 lps->uarg = uarg; 1881 lps->uarg = uarg;
1882 lps->alpa = 0; 1882 lps->alpa = 0;
1883 lps->pdusz = pdusz; 1883 lps->pdusz = pdusz;
1884 lps->pwwn = pwwn; 1884 lps->pwwn = pwwn;
1885 lps->nwwn = nwwn; 1885 lps->nwwn = nwwn;
1886 lps->fdisc = BFA_TRUE; 1886 lps->fdisc = BFA_TRUE;
1887 lps->auth_en = BFA_FALSE; 1887 lps->auth_en = BFA_FALSE;
1888 bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN); 1888 bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1889 } 1889 }
1890 1890
1891 1891
1892 /* 1892 /*
1893 * Initiate a lport FDSIC logout. 1893 * Initiate a lport FDSIC logout.
1894 */ 1894 */
1895 void 1895 void
1896 bfa_lps_fdisclogo(struct bfa_lps_s *lps) 1896 bfa_lps_fdisclogo(struct bfa_lps_s *lps)
1897 { 1897 {
1898 bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT); 1898 bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
1899 } 1899 }
1900 1900
1901 1901
1902 /* 1902 /*
1903 * Return lport services tag given the pid 1903 * Return lport services tag given the pid
1904 */ 1904 */
1905 u8 1905 u8
1906 bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid) 1906 bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid)
1907 { 1907 {
1908 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); 1908 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1909 struct bfa_lps_s *lps; 1909 struct bfa_lps_s *lps;
1910 int i; 1910 int i;
1911 1911
1912 for (i = 0, lps = mod->lps_arr; i < mod->num_lps; i++, lps++) { 1912 for (i = 0, lps = mod->lps_arr; i < mod->num_lps; i++, lps++) {
1913 if (lps->lp_pid == pid) 1913 if (lps->lp_pid == pid)
1914 return lps->lp_tag; 1914 return lps->lp_tag;
1915 } 1915 }
1916 1916
1917 /* Return base port tag anyway */ 1917 /* Return base port tag anyway */
1918 return 0; 1918 return 0;
1919 } 1919 }
1920 1920
1921 1921
1922 /* 1922 /*
1923 * return port id assigned to the base lport 1923 * return port id assigned to the base lport
1924 */ 1924 */
1925 u32 1925 u32
1926 bfa_lps_get_base_pid(struct bfa_s *bfa) 1926 bfa_lps_get_base_pid(struct bfa_s *bfa)
1927 { 1927 {
1928 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); 1928 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1929 1929
1930 return BFA_LPS_FROM_TAG(mod, 0)->lp_pid; 1930 return BFA_LPS_FROM_TAG(mod, 0)->lp_pid;
1931 } 1931 }
1932 1932
1933 /** 1933 /**
1934 * Set PID in case of n2n (which is assigned during PLOGI) 1934 * Set PID in case of n2n (which is assigned during PLOGI)
1935 */ 1935 */
1936 void 1936 void
1937 bfa_lps_set_n2n_pid(struct bfa_lps_s *lps, uint32_t n2n_pid) 1937 bfa_lps_set_n2n_pid(struct bfa_lps_s *lps, uint32_t n2n_pid)
1938 { 1938 {
1939 bfa_trc(lps->bfa, lps->lp_tag); 1939 bfa_trc(lps->bfa, lps->lp_tag);
1940 bfa_trc(lps->bfa, n2n_pid); 1940 bfa_trc(lps->bfa, n2n_pid);
1941 1941
1942 lps->lp_pid = n2n_pid; 1942 lps->lp_pid = n2n_pid;
1943 bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID); 1943 bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID);
1944 } 1944 }
1945 1945
1946 /* 1946 /*
1947 * LPS firmware message class handler. 1947 * LPS firmware message class handler.
1948 */ 1948 */
1949 void 1949 void
1950 bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m) 1950 bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1951 { 1951 {
1952 union bfi_lps_i2h_msg_u msg; 1952 union bfi_lps_i2h_msg_u msg;
1953 1953
1954 bfa_trc(bfa, m->mhdr.msg_id); 1954 bfa_trc(bfa, m->mhdr.msg_id);
1955 msg.msg = m; 1955 msg.msg = m;
1956 1956
1957 switch (m->mhdr.msg_id) { 1957 switch (m->mhdr.msg_id) {
1958 case BFI_LPS_H2I_LOGIN_RSP: 1958 case BFI_LPS_H2I_LOGIN_RSP:
1959 bfa_lps_login_rsp(bfa, msg.login_rsp); 1959 bfa_lps_login_rsp(bfa, msg.login_rsp);
1960 break; 1960 break;
1961 1961
1962 case BFI_LPS_H2I_LOGOUT_RSP: 1962 case BFI_LPS_H2I_LOGOUT_RSP:
1963 bfa_lps_logout_rsp(bfa, msg.logout_rsp); 1963 bfa_lps_logout_rsp(bfa, msg.logout_rsp);
1964 break; 1964 break;
1965 1965
1966 case BFI_LPS_H2I_CVL_EVENT: 1966 case BFI_LPS_H2I_CVL_EVENT:
1967 bfa_lps_rx_cvl_event(bfa, msg.cvl_event); 1967 bfa_lps_rx_cvl_event(bfa, msg.cvl_event);
1968 break; 1968 break;
1969 1969
1970 default: 1970 default:
1971 bfa_trc(bfa, m->mhdr.msg_id); 1971 bfa_trc(bfa, m->mhdr.msg_id);
1972 WARN_ON(1); 1972 WARN_ON(1);
1973 } 1973 }
1974 } 1974 }
1975 1975
1976 /* 1976 /*
1977 * FC PORT state machine functions 1977 * FC PORT state machine functions
1978 */ 1978 */
1979 static void 1979 static void
1980 bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport, 1980 bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
1981 enum bfa_fcport_sm_event event) 1981 enum bfa_fcport_sm_event event)
1982 { 1982 {
1983 bfa_trc(fcport->bfa, event); 1983 bfa_trc(fcport->bfa, event);
1984 1984
1985 switch (event) { 1985 switch (event) {
1986 case BFA_FCPORT_SM_START: 1986 case BFA_FCPORT_SM_START:
1987 /* 1987 /*
1988 * Start event after IOC is configured and BFA is started. 1988 * Start event after IOC is configured and BFA is started.
1989 */ 1989 */
1990 fcport->use_flash_cfg = BFA_TRUE; 1990 fcport->use_flash_cfg = BFA_TRUE;
1991 1991
1992 if (bfa_fcport_send_enable(fcport)) { 1992 if (bfa_fcport_send_enable(fcport)) {
1993 bfa_trc(fcport->bfa, BFA_TRUE); 1993 bfa_trc(fcport->bfa, BFA_TRUE);
1994 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); 1994 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
1995 } else { 1995 } else {
1996 bfa_trc(fcport->bfa, BFA_FALSE); 1996 bfa_trc(fcport->bfa, BFA_FALSE);
1997 bfa_sm_set_state(fcport, 1997 bfa_sm_set_state(fcport,
1998 bfa_fcport_sm_enabling_qwait); 1998 bfa_fcport_sm_enabling_qwait);
1999 } 1999 }
2000 break; 2000 break;
2001 2001
2002 case BFA_FCPORT_SM_ENABLE: 2002 case BFA_FCPORT_SM_ENABLE:
2003 /* 2003 /*
2004 * Port is persistently configured to be in enabled state. Do 2004 * Port is persistently configured to be in enabled state. Do
2005 * not change state. Port enabling is done when START event is 2005 * not change state. Port enabling is done when START event is
2006 * received. 2006 * received.
2007 */ 2007 */
2008 break; 2008 break;
2009 2009
2010 case BFA_FCPORT_SM_DISABLE: 2010 case BFA_FCPORT_SM_DISABLE:
2011 /* 2011 /*
2012 * If a port is persistently configured to be disabled, the 2012 * If a port is persistently configured to be disabled, the
2013 * first event will a port disable request. 2013 * first event will a port disable request.
2014 */ 2014 */
2015 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled); 2015 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2016 break; 2016 break;
2017 2017
2018 case BFA_FCPORT_SM_HWFAIL: 2018 case BFA_FCPORT_SM_HWFAIL:
2019 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); 2019 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2020 break; 2020 break;
2021 2021
2022 default: 2022 default:
2023 bfa_sm_fault(fcport->bfa, event); 2023 bfa_sm_fault(fcport->bfa, event);
2024 } 2024 }
2025 } 2025 }
2026 2026
2027 static void 2027 static void
2028 bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport, 2028 bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
2029 enum bfa_fcport_sm_event event) 2029 enum bfa_fcport_sm_event event)
2030 { 2030 {
2031 char pwwn_buf[BFA_STRING_32]; 2031 char pwwn_buf[BFA_STRING_32];
2032 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad; 2032 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2033 bfa_trc(fcport->bfa, event); 2033 bfa_trc(fcport->bfa, event);
2034 2034
2035 switch (event) { 2035 switch (event) {
2036 case BFA_FCPORT_SM_QRESUME: 2036 case BFA_FCPORT_SM_QRESUME:
2037 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); 2037 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2038 bfa_fcport_send_enable(fcport); 2038 bfa_fcport_send_enable(fcport);
2039 break; 2039 break;
2040 2040
2041 case BFA_FCPORT_SM_STOP: 2041 case BFA_FCPORT_SM_STOP:
2042 bfa_reqq_wcancel(&fcport->reqq_wait); 2042 bfa_reqq_wcancel(&fcport->reqq_wait);
2043 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); 2043 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2044 break; 2044 break;
2045 2045
2046 case BFA_FCPORT_SM_ENABLE: 2046 case BFA_FCPORT_SM_ENABLE:
2047 /* 2047 /*
2048 * Already enable is in progress. 2048 * Already enable is in progress.
2049 */ 2049 */
2050 break; 2050 break;
2051 2051
2052 case BFA_FCPORT_SM_DISABLE: 2052 case BFA_FCPORT_SM_DISABLE:
2053 /* 2053 /*
2054 * Just send disable request to firmware when room becomes 2054 * Just send disable request to firmware when room becomes
2055 * available in request queue. 2055 * available in request queue.
2056 */ 2056 */
2057 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled); 2057 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2058 bfa_reqq_wcancel(&fcport->reqq_wait); 2058 bfa_reqq_wcancel(&fcport->reqq_wait);
2059 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, 2059 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2060 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable"); 2060 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2061 wwn2str(pwwn_buf, fcport->pwwn); 2061 wwn2str(pwwn_buf, fcport->pwwn);
2062 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2062 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2063 "Base port disabled: WWN = %s\n", pwwn_buf); 2063 "Base port disabled: WWN = %s\n", pwwn_buf);
2064 break; 2064 break;
2065 2065
2066 case BFA_FCPORT_SM_LINKUP: 2066 case BFA_FCPORT_SM_LINKUP:
2067 case BFA_FCPORT_SM_LINKDOWN: 2067 case BFA_FCPORT_SM_LINKDOWN:
2068 /* 2068 /*
2069 * Possible to get link events when doing back-to-back 2069 * Possible to get link events when doing back-to-back
2070 * enable/disables. 2070 * enable/disables.
2071 */ 2071 */
2072 break; 2072 break;
2073 2073
2074 case BFA_FCPORT_SM_HWFAIL: 2074 case BFA_FCPORT_SM_HWFAIL:
2075 bfa_reqq_wcancel(&fcport->reqq_wait); 2075 bfa_reqq_wcancel(&fcport->reqq_wait);
2076 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); 2076 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2077 break; 2077 break;
2078 2078
2079 default: 2079 default:
2080 bfa_sm_fault(fcport->bfa, event); 2080 bfa_sm_fault(fcport->bfa, event);
2081 } 2081 }
2082 } 2082 }
2083 2083
2084 static void 2084 static void
2085 bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport, 2085 bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
2086 enum bfa_fcport_sm_event event) 2086 enum bfa_fcport_sm_event event)
2087 { 2087 {
2088 char pwwn_buf[BFA_STRING_32]; 2088 char pwwn_buf[BFA_STRING_32];
2089 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad; 2089 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2090 bfa_trc(fcport->bfa, event); 2090 bfa_trc(fcport->bfa, event);
2091 2091
2092 switch (event) { 2092 switch (event) {
2093 case BFA_FCPORT_SM_FWRSP: 2093 case BFA_FCPORT_SM_FWRSP:
2094 case BFA_FCPORT_SM_LINKDOWN: 2094 case BFA_FCPORT_SM_LINKDOWN:
2095 bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown); 2095 bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
2096 break; 2096 break;
2097 2097
2098 case BFA_FCPORT_SM_LINKUP: 2098 case BFA_FCPORT_SM_LINKUP:
2099 bfa_fcport_update_linkinfo(fcport); 2099 bfa_fcport_update_linkinfo(fcport);
2100 bfa_sm_set_state(fcport, bfa_fcport_sm_linkup); 2100 bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2101 2101
2102 WARN_ON(!fcport->event_cbfn); 2102 WARN_ON(!fcport->event_cbfn);
2103 bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE); 2103 bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2104 break; 2104 break;
2105 2105
2106 case BFA_FCPORT_SM_ENABLE: 2106 case BFA_FCPORT_SM_ENABLE:
2107 /* 2107 /*
2108 * Already being enabled. 2108 * Already being enabled.
2109 */ 2109 */
2110 break; 2110 break;
2111 2111
2112 case BFA_FCPORT_SM_DISABLE: 2112 case BFA_FCPORT_SM_DISABLE:
2113 if (bfa_fcport_send_disable(fcport)) 2113 if (bfa_fcport_send_disable(fcport))
2114 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling); 2114 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2115 else 2115 else
2116 bfa_sm_set_state(fcport, 2116 bfa_sm_set_state(fcport,
2117 bfa_fcport_sm_disabling_qwait); 2117 bfa_fcport_sm_disabling_qwait);
2118 2118
2119 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, 2119 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2120 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable"); 2120 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2121 wwn2str(pwwn_buf, fcport->pwwn); 2121 wwn2str(pwwn_buf, fcport->pwwn);
2122 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2122 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2123 "Base port disabled: WWN = %s\n", pwwn_buf); 2123 "Base port disabled: WWN = %s\n", pwwn_buf);
2124 break; 2124 break;
2125 2125
2126 case BFA_FCPORT_SM_STOP: 2126 case BFA_FCPORT_SM_STOP:
2127 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); 2127 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2128 break; 2128 break;
2129 2129
2130 case BFA_FCPORT_SM_HWFAIL: 2130 case BFA_FCPORT_SM_HWFAIL:
2131 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); 2131 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2132 break; 2132 break;
2133 2133
2134 default: 2134 default:
2135 bfa_sm_fault(fcport->bfa, event); 2135 bfa_sm_fault(fcport->bfa, event);
2136 } 2136 }
2137 } 2137 }
2138 2138
2139 static void 2139 static void
2140 bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport, 2140 bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
2141 enum bfa_fcport_sm_event event) 2141 enum bfa_fcport_sm_event event)
2142 { 2142 {
2143 struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event; 2143 struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
2144 char pwwn_buf[BFA_STRING_32]; 2144 char pwwn_buf[BFA_STRING_32];
2145 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad; 2145 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2146 2146
2147 bfa_trc(fcport->bfa, event); 2147 bfa_trc(fcport->bfa, event);
2148 2148
2149 switch (event) { 2149 switch (event) {
2150 case BFA_FCPORT_SM_LINKUP: 2150 case BFA_FCPORT_SM_LINKUP:
2151 bfa_fcport_update_linkinfo(fcport); 2151 bfa_fcport_update_linkinfo(fcport);
2152 bfa_sm_set_state(fcport, bfa_fcport_sm_linkup); 2152 bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2153 WARN_ON(!fcport->event_cbfn); 2153 WARN_ON(!fcport->event_cbfn);
2154 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, 2154 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2155 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup"); 2155 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup");
2156 if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) { 2156 if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
2157 2157
2158 bfa_trc(fcport->bfa, 2158 bfa_trc(fcport->bfa,
2159 pevent->link_state.vc_fcf.fcf.fipenabled); 2159 pevent->link_state.vc_fcf.fcf.fipenabled);
2160 bfa_trc(fcport->bfa, 2160 bfa_trc(fcport->bfa,
2161 pevent->link_state.vc_fcf.fcf.fipfailed); 2161 pevent->link_state.vc_fcf.fcf.fipfailed);
2162 2162
2163 if (pevent->link_state.vc_fcf.fcf.fipfailed) 2163 if (pevent->link_state.vc_fcf.fcf.fipfailed)
2164 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, 2164 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2165 BFA_PL_EID_FIP_FCF_DISC, 0, 2165 BFA_PL_EID_FIP_FCF_DISC, 0,
2166 "FIP FCF Discovery Failed"); 2166 "FIP FCF Discovery Failed");
2167 else 2167 else
2168 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, 2168 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2169 BFA_PL_EID_FIP_FCF_DISC, 0, 2169 BFA_PL_EID_FIP_FCF_DISC, 0,
2170 "FIP FCF Discovered"); 2170 "FIP FCF Discovered");
2171 } 2171 }
2172 2172
2173 bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE); 2173 bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2174 wwn2str(pwwn_buf, fcport->pwwn); 2174 wwn2str(pwwn_buf, fcport->pwwn);
2175 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2175 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2176 "Base port online: WWN = %s\n", pwwn_buf); 2176 "Base port online: WWN = %s\n", pwwn_buf);
2177 break; 2177 break;
2178 2178
2179 case BFA_FCPORT_SM_LINKDOWN: 2179 case BFA_FCPORT_SM_LINKDOWN:
2180 /* 2180 /*
2181 * Possible to get link down event. 2181 * Possible to get link down event.
2182 */ 2182 */
2183 break; 2183 break;
2184 2184
2185 case BFA_FCPORT_SM_ENABLE: 2185 case BFA_FCPORT_SM_ENABLE:
2186 /* 2186 /*
2187 * Already enabled. 2187 * Already enabled.
2188 */ 2188 */
2189 break; 2189 break;
2190 2190
2191 case BFA_FCPORT_SM_DISABLE: 2191 case BFA_FCPORT_SM_DISABLE:
2192 if (bfa_fcport_send_disable(fcport)) 2192 if (bfa_fcport_send_disable(fcport))
2193 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling); 2193 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2194 else 2194 else
2195 bfa_sm_set_state(fcport, 2195 bfa_sm_set_state(fcport,
2196 bfa_fcport_sm_disabling_qwait); 2196 bfa_fcport_sm_disabling_qwait);
2197 2197
2198 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, 2198 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2199 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable"); 2199 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2200 wwn2str(pwwn_buf, fcport->pwwn); 2200 wwn2str(pwwn_buf, fcport->pwwn);
2201 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2201 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2202 "Base port disabled: WWN = %s\n", pwwn_buf); 2202 "Base port disabled: WWN = %s\n", pwwn_buf);
2203 break; 2203 break;
2204 2204
2205 case BFA_FCPORT_SM_STOP: 2205 case BFA_FCPORT_SM_STOP:
2206 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); 2206 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2207 break; 2207 break;
2208 2208
2209 case BFA_FCPORT_SM_HWFAIL: 2209 case BFA_FCPORT_SM_HWFAIL:
2210 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); 2210 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2211 break; 2211 break;
2212 2212
2213 default: 2213 default:
2214 bfa_sm_fault(fcport->bfa, event); 2214 bfa_sm_fault(fcport->bfa, event);
2215 } 2215 }
2216 } 2216 }
2217 2217
2218 static void 2218 static void
2219 bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport, 2219 bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
2220 enum bfa_fcport_sm_event event) 2220 enum bfa_fcport_sm_event event)
2221 { 2221 {
2222 char pwwn_buf[BFA_STRING_32]; 2222 char pwwn_buf[BFA_STRING_32];
2223 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad; 2223 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2224 2224
2225 bfa_trc(fcport->bfa, event); 2225 bfa_trc(fcport->bfa, event);
2226 2226
2227 switch (event) { 2227 switch (event) {
2228 case BFA_FCPORT_SM_ENABLE: 2228 case BFA_FCPORT_SM_ENABLE:
2229 /* 2229 /*
2230 * Already enabled. 2230 * Already enabled.
2231 */ 2231 */
2232 break; 2232 break;
2233 2233
2234 case BFA_FCPORT_SM_DISABLE: 2234 case BFA_FCPORT_SM_DISABLE:
2235 if (bfa_fcport_send_disable(fcport)) 2235 if (bfa_fcport_send_disable(fcport))
2236 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling); 2236 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2237 else 2237 else
2238 bfa_sm_set_state(fcport, 2238 bfa_sm_set_state(fcport,
2239 bfa_fcport_sm_disabling_qwait); 2239 bfa_fcport_sm_disabling_qwait);
2240 2240
2241 bfa_fcport_reset_linkinfo(fcport); 2241 bfa_fcport_reset_linkinfo(fcport);
2242 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE); 2242 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2243 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, 2243 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2244 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable"); 2244 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2245 wwn2str(pwwn_buf, fcport->pwwn); 2245 wwn2str(pwwn_buf, fcport->pwwn);
2246 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2246 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2247 "Base port offline: WWN = %s\n", pwwn_buf); 2247 "Base port offline: WWN = %s\n", pwwn_buf);
2248 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2248 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2249 "Base port disabled: WWN = %s\n", pwwn_buf); 2249 "Base port disabled: WWN = %s\n", pwwn_buf);
2250 break; 2250 break;
2251 2251
2252 case BFA_FCPORT_SM_LINKDOWN: 2252 case BFA_FCPORT_SM_LINKDOWN:
2253 bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown); 2253 bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
2254 bfa_fcport_reset_linkinfo(fcport); 2254 bfa_fcport_reset_linkinfo(fcport);
2255 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE); 2255 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2256 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, 2256 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2257 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown"); 2257 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown");
2258 wwn2str(pwwn_buf, fcport->pwwn); 2258 wwn2str(pwwn_buf, fcport->pwwn);
2259 if (BFA_PORT_IS_DISABLED(fcport->bfa)) 2259 if (BFA_PORT_IS_DISABLED(fcport->bfa))
2260 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2260 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2261 "Base port offline: WWN = %s\n", pwwn_buf); 2261 "Base port offline: WWN = %s\n", pwwn_buf);
2262 else 2262 else
2263 BFA_LOG(KERN_ERR, bfad, bfa_log_level, 2263 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2264 "Base port (WWN = %s) " 2264 "Base port (WWN = %s) "
2265 "lost fabric connectivity\n", pwwn_buf); 2265 "lost fabric connectivity\n", pwwn_buf);
2266 break; 2266 break;
2267 2267
2268 case BFA_FCPORT_SM_STOP: 2268 case BFA_FCPORT_SM_STOP:
2269 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); 2269 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2270 bfa_fcport_reset_linkinfo(fcport); 2270 bfa_fcport_reset_linkinfo(fcport);
2271 wwn2str(pwwn_buf, fcport->pwwn); 2271 wwn2str(pwwn_buf, fcport->pwwn);
2272 if (BFA_PORT_IS_DISABLED(fcport->bfa)) 2272 if (BFA_PORT_IS_DISABLED(fcport->bfa))
2273 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2273 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2274 "Base port offline: WWN = %s\n", pwwn_buf); 2274 "Base port offline: WWN = %s\n", pwwn_buf);
2275 else 2275 else
2276 BFA_LOG(KERN_ERR, bfad, bfa_log_level, 2276 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2277 "Base port (WWN = %s) " 2277 "Base port (WWN = %s) "
2278 "lost fabric connectivity\n", pwwn_buf); 2278 "lost fabric connectivity\n", pwwn_buf);
2279 break; 2279 break;
2280 2280
2281 case BFA_FCPORT_SM_HWFAIL: 2281 case BFA_FCPORT_SM_HWFAIL:
2282 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); 2282 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2283 bfa_fcport_reset_linkinfo(fcport); 2283 bfa_fcport_reset_linkinfo(fcport);
2284 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE); 2284 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2285 wwn2str(pwwn_buf, fcport->pwwn); 2285 wwn2str(pwwn_buf, fcport->pwwn);
2286 if (BFA_PORT_IS_DISABLED(fcport->bfa)) 2286 if (BFA_PORT_IS_DISABLED(fcport->bfa))
2287 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2287 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2288 "Base port offline: WWN = %s\n", pwwn_buf); 2288 "Base port offline: WWN = %s\n", pwwn_buf);
2289 else 2289 else
2290 BFA_LOG(KERN_ERR, bfad, bfa_log_level, 2290 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2291 "Base port (WWN = %s) " 2291 "Base port (WWN = %s) "
2292 "lost fabric connectivity\n", pwwn_buf); 2292 "lost fabric connectivity\n", pwwn_buf);
2293 break; 2293 break;
2294 2294
2295 default: 2295 default:
2296 bfa_sm_fault(fcport->bfa, event); 2296 bfa_sm_fault(fcport->bfa, event);
2297 } 2297 }
2298 } 2298 }
2299 2299
2300 static void 2300 static void
2301 bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport, 2301 bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
2302 enum bfa_fcport_sm_event event) 2302 enum bfa_fcport_sm_event event)
2303 { 2303 {
2304 bfa_trc(fcport->bfa, event); 2304 bfa_trc(fcport->bfa, event);
2305 2305
2306 switch (event) { 2306 switch (event) {
2307 case BFA_FCPORT_SM_QRESUME: 2307 case BFA_FCPORT_SM_QRESUME:
2308 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling); 2308 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2309 bfa_fcport_send_disable(fcport); 2309 bfa_fcport_send_disable(fcport);
2310 break; 2310 break;
2311 2311
2312 case BFA_FCPORT_SM_STOP: 2312 case BFA_FCPORT_SM_STOP:
2313 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); 2313 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2314 bfa_reqq_wcancel(&fcport->reqq_wait); 2314 bfa_reqq_wcancel(&fcport->reqq_wait);
2315 break; 2315 break;
2316 2316
2317 case BFA_FCPORT_SM_ENABLE: 2317 case BFA_FCPORT_SM_ENABLE:
2318 bfa_sm_set_state(fcport, bfa_fcport_sm_toggling_qwait); 2318 bfa_sm_set_state(fcport, bfa_fcport_sm_toggling_qwait);
2319 break; 2319 break;
2320 2320
2321 case BFA_FCPORT_SM_DISABLE: 2321 case BFA_FCPORT_SM_DISABLE:
2322 /* 2322 /*
2323 * Already being disabled. 2323 * Already being disabled.
2324 */ 2324 */
2325 break; 2325 break;
2326 2326
2327 case BFA_FCPORT_SM_LINKUP: 2327 case BFA_FCPORT_SM_LINKUP:
2328 case BFA_FCPORT_SM_LINKDOWN: 2328 case BFA_FCPORT_SM_LINKDOWN:
2329 /* 2329 /*
2330 * Possible to get link events when doing back-to-back 2330 * Possible to get link events when doing back-to-back
2331 * enable/disables. 2331 * enable/disables.
2332 */ 2332 */
2333 break; 2333 break;
2334 2334
2335 case BFA_FCPORT_SM_HWFAIL: 2335 case BFA_FCPORT_SM_HWFAIL:
2336 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail); 2336 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2337 bfa_reqq_wcancel(&fcport->reqq_wait); 2337 bfa_reqq_wcancel(&fcport->reqq_wait);
2338 break; 2338 break;
2339 2339
2340 default: 2340 default:
2341 bfa_sm_fault(fcport->bfa, event); 2341 bfa_sm_fault(fcport->bfa, event);
2342 } 2342 }
2343 } 2343 }
2344 2344
2345 static void 2345 static void
2346 bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport, 2346 bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
2347 enum bfa_fcport_sm_event event) 2347 enum bfa_fcport_sm_event event)
2348 { 2348 {
2349 bfa_trc(fcport->bfa, event); 2349 bfa_trc(fcport->bfa, event);
2350 2350
2351 switch (event) { 2351 switch (event) {
2352 case BFA_FCPORT_SM_QRESUME: 2352 case BFA_FCPORT_SM_QRESUME:
2353 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling); 2353 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2354 bfa_fcport_send_disable(fcport); 2354 bfa_fcport_send_disable(fcport);
2355 if (bfa_fcport_send_enable(fcport)) 2355 if (bfa_fcport_send_enable(fcport))
2356 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); 2356 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2357 else 2357 else
2358 bfa_sm_set_state(fcport, 2358 bfa_sm_set_state(fcport,
2359 bfa_fcport_sm_enabling_qwait); 2359 bfa_fcport_sm_enabling_qwait);
2360 break; 2360 break;
2361 2361
2362 case BFA_FCPORT_SM_STOP: 2362 case BFA_FCPORT_SM_STOP:
2363 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); 2363 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2364 bfa_reqq_wcancel(&fcport->reqq_wait); 2364 bfa_reqq_wcancel(&fcport->reqq_wait);
2365 break; 2365 break;
2366 2366
2367 case BFA_FCPORT_SM_ENABLE: 2367 case BFA_FCPORT_SM_ENABLE:
2368 break; 2368 break;
2369 2369
2370 case BFA_FCPORT_SM_DISABLE: 2370 case BFA_FCPORT_SM_DISABLE:
2371 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait); 2371 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
2372 break; 2372 break;
2373 2373
2374 case BFA_FCPORT_SM_LINKUP: 2374 case BFA_FCPORT_SM_LINKUP:
2375 case BFA_FCPORT_SM_LINKDOWN: 2375 case BFA_FCPORT_SM_LINKDOWN:
2376 /* 2376 /*
2377 * Possible to get link events when doing back-to-back 2377 * Possible to get link events when doing back-to-back
2378 * enable/disables. 2378 * enable/disables.
2379 */ 2379 */
2380 break; 2380 break;
2381 2381
2382 case BFA_FCPORT_SM_HWFAIL: 2382 case BFA_FCPORT_SM_HWFAIL:
2383 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail); 2383 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2384 bfa_reqq_wcancel(&fcport->reqq_wait); 2384 bfa_reqq_wcancel(&fcport->reqq_wait);
2385 break; 2385 break;
2386 2386
2387 default: 2387 default:
2388 bfa_sm_fault(fcport->bfa, event); 2388 bfa_sm_fault(fcport->bfa, event);
2389 } 2389 }
2390 } 2390 }
2391 2391
2392 static void 2392 static void
2393 bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport, 2393 bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
2394 enum bfa_fcport_sm_event event) 2394 enum bfa_fcport_sm_event event)
2395 { 2395 {
2396 char pwwn_buf[BFA_STRING_32]; 2396 char pwwn_buf[BFA_STRING_32];
2397 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad; 2397 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2398 bfa_trc(fcport->bfa, event); 2398 bfa_trc(fcport->bfa, event);
2399 2399
2400 switch (event) { 2400 switch (event) {
2401 case BFA_FCPORT_SM_FWRSP: 2401 case BFA_FCPORT_SM_FWRSP:
2402 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled); 2402 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2403 break; 2403 break;
2404 2404
2405 case BFA_FCPORT_SM_DISABLE: 2405 case BFA_FCPORT_SM_DISABLE:
2406 /* 2406 /*
2407 * Already being disabled. 2407 * Already being disabled.
2408 */ 2408 */
2409 break; 2409 break;
2410 2410
2411 case BFA_FCPORT_SM_ENABLE: 2411 case BFA_FCPORT_SM_ENABLE:
2412 if (bfa_fcport_send_enable(fcport)) 2412 if (bfa_fcport_send_enable(fcport))
2413 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); 2413 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2414 else 2414 else
2415 bfa_sm_set_state(fcport, 2415 bfa_sm_set_state(fcport,
2416 bfa_fcport_sm_enabling_qwait); 2416 bfa_fcport_sm_enabling_qwait);
2417 2417
2418 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, 2418 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2419 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable"); 2419 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2420 wwn2str(pwwn_buf, fcport->pwwn); 2420 wwn2str(pwwn_buf, fcport->pwwn);
2421 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2421 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2422 "Base port enabled: WWN = %s\n", pwwn_buf); 2422 "Base port enabled: WWN = %s\n", pwwn_buf);
2423 break; 2423 break;
2424 2424
2425 case BFA_FCPORT_SM_STOP: 2425 case BFA_FCPORT_SM_STOP:
2426 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); 2426 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2427 break; 2427 break;
2428 2428
2429 case BFA_FCPORT_SM_LINKUP: 2429 case BFA_FCPORT_SM_LINKUP:
2430 case BFA_FCPORT_SM_LINKDOWN: 2430 case BFA_FCPORT_SM_LINKDOWN:
2431 /* 2431 /*
2432 * Possible to get link events when doing back-to-back 2432 * Possible to get link events when doing back-to-back
2433 * enable/disables. 2433 * enable/disables.
2434 */ 2434 */
2435 break; 2435 break;
2436 2436
2437 case BFA_FCPORT_SM_HWFAIL: 2437 case BFA_FCPORT_SM_HWFAIL:
2438 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail); 2438 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2439 break; 2439 break;
2440 2440
2441 default: 2441 default:
2442 bfa_sm_fault(fcport->bfa, event); 2442 bfa_sm_fault(fcport->bfa, event);
2443 } 2443 }
2444 } 2444 }
2445 2445
2446 static void 2446 static void
2447 bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport, 2447 bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
2448 enum bfa_fcport_sm_event event) 2448 enum bfa_fcport_sm_event event)
2449 { 2449 {
2450 char pwwn_buf[BFA_STRING_32]; 2450 char pwwn_buf[BFA_STRING_32];
2451 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad; 2451 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2452 bfa_trc(fcport->bfa, event); 2452 bfa_trc(fcport->bfa, event);
2453 2453
2454 switch (event) { 2454 switch (event) {
2455 case BFA_FCPORT_SM_START: 2455 case BFA_FCPORT_SM_START:
2456 /* 2456 /*
2457 * Ignore start event for a port that is disabled. 2457 * Ignore start event for a port that is disabled.
2458 */ 2458 */
2459 break; 2459 break;
2460 2460
2461 case BFA_FCPORT_SM_STOP: 2461 case BFA_FCPORT_SM_STOP:
2462 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); 2462 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2463 break; 2463 break;
2464 2464
2465 case BFA_FCPORT_SM_ENABLE: 2465 case BFA_FCPORT_SM_ENABLE:
2466 if (bfa_fcport_send_enable(fcport)) 2466 if (bfa_fcport_send_enable(fcport))
2467 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); 2467 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2468 else 2468 else
2469 bfa_sm_set_state(fcport, 2469 bfa_sm_set_state(fcport,
2470 bfa_fcport_sm_enabling_qwait); 2470 bfa_fcport_sm_enabling_qwait);
2471 2471
2472 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, 2472 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2473 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable"); 2473 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2474 wwn2str(pwwn_buf, fcport->pwwn); 2474 wwn2str(pwwn_buf, fcport->pwwn);
2475 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2475 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2476 "Base port enabled: WWN = %s\n", pwwn_buf); 2476 "Base port enabled: WWN = %s\n", pwwn_buf);
2477 break; 2477 break;
2478 2478
2479 case BFA_FCPORT_SM_DISABLE: 2479 case BFA_FCPORT_SM_DISABLE:
2480 /* 2480 /*
2481 * Already disabled. 2481 * Already disabled.
2482 */ 2482 */
2483 break; 2483 break;
2484 2484
2485 case BFA_FCPORT_SM_HWFAIL: 2485 case BFA_FCPORT_SM_HWFAIL:
2486 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail); 2486 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2487 break; 2487 break;
2488 2488
2489 default: 2489 default:
2490 bfa_sm_fault(fcport->bfa, event); 2490 bfa_sm_fault(fcport->bfa, event);
2491 } 2491 }
2492 } 2492 }
2493 2493
2494 static void 2494 static void
2495 bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport, 2495 bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
2496 enum bfa_fcport_sm_event event) 2496 enum bfa_fcport_sm_event event)
2497 { 2497 {
2498 bfa_trc(fcport->bfa, event); 2498 bfa_trc(fcport->bfa, event);
2499 2499
2500 switch (event) { 2500 switch (event) {
2501 case BFA_FCPORT_SM_START: 2501 case BFA_FCPORT_SM_START:
2502 if (bfa_fcport_send_enable(fcport)) 2502 if (bfa_fcport_send_enable(fcport))
2503 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); 2503 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2504 else 2504 else
2505 bfa_sm_set_state(fcport, 2505 bfa_sm_set_state(fcport,
2506 bfa_fcport_sm_enabling_qwait); 2506 bfa_fcport_sm_enabling_qwait);
2507 break; 2507 break;
2508 2508
2509 default: 2509 default:
2510 /* 2510 /*
2511 * Ignore all other events. 2511 * Ignore all other events.
2512 */ 2512 */
2513 ; 2513 ;
2514 } 2514 }
2515 } 2515 }
2516 2516
2517 /* 2517 /*
2518 * Port is enabled. IOC is down/failed. 2518 * Port is enabled. IOC is down/failed.
2519 */ 2519 */
2520 static void 2520 static void
2521 bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport, 2521 bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
2522 enum bfa_fcport_sm_event event) 2522 enum bfa_fcport_sm_event event)
2523 { 2523 {
2524 bfa_trc(fcport->bfa, event); 2524 bfa_trc(fcport->bfa, event);
2525 2525
2526 switch (event) { 2526 switch (event) {
2527 case BFA_FCPORT_SM_START: 2527 case BFA_FCPORT_SM_START:
2528 if (bfa_fcport_send_enable(fcport)) 2528 if (bfa_fcport_send_enable(fcport))
2529 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); 2529 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2530 else 2530 else
2531 bfa_sm_set_state(fcport, 2531 bfa_sm_set_state(fcport,
2532 bfa_fcport_sm_enabling_qwait); 2532 bfa_fcport_sm_enabling_qwait);
2533 break; 2533 break;
2534 2534
2535 default: 2535 default:
2536 /* 2536 /*
2537 * Ignore all events. 2537 * Ignore all events.
2538 */ 2538 */
2539 ; 2539 ;
2540 } 2540 }
2541 } 2541 }
2542 2542
2543 /* 2543 /*
2544 * Port is disabled. IOC is down/failed. 2544 * Port is disabled. IOC is down/failed.
2545 */ 2545 */
2546 static void 2546 static void
2547 bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport, 2547 bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
2548 enum bfa_fcport_sm_event event) 2548 enum bfa_fcport_sm_event event)
2549 { 2549 {
2550 bfa_trc(fcport->bfa, event); 2550 bfa_trc(fcport->bfa, event);
2551 2551
2552 switch (event) { 2552 switch (event) {
2553 case BFA_FCPORT_SM_START: 2553 case BFA_FCPORT_SM_START:
2554 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled); 2554 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2555 break; 2555 break;
2556 2556
2557 case BFA_FCPORT_SM_ENABLE: 2557 case BFA_FCPORT_SM_ENABLE:
2558 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); 2558 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2559 break; 2559 break;
2560 2560
2561 default: 2561 default:
2562 /* 2562 /*
2563 * Ignore all events. 2563 * Ignore all events.
2564 */ 2564 */
2565 ; 2565 ;
2566 } 2566 }
2567 } 2567 }
2568 2568
2569 /* 2569 /*
2570 * Link state is down 2570 * Link state is down
2571 */ 2571 */
2572 static void 2572 static void
2573 bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln, 2573 bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
2574 enum bfa_fcport_ln_sm_event event) 2574 enum bfa_fcport_ln_sm_event event)
2575 { 2575 {
2576 bfa_trc(ln->fcport->bfa, event); 2576 bfa_trc(ln->fcport->bfa, event);
2577 2577
2578 switch (event) { 2578 switch (event) {
2579 case BFA_FCPORT_LN_SM_LINKUP: 2579 case BFA_FCPORT_LN_SM_LINKUP:
2580 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf); 2580 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2581 bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP); 2581 bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2582 break; 2582 break;
2583 2583
2584 default: 2584 default:
2585 bfa_sm_fault(ln->fcport->bfa, event); 2585 bfa_sm_fault(ln->fcport->bfa, event);
2586 } 2586 }
2587 } 2587 }
2588 2588
2589 /* 2589 /*
2590 * Link state is waiting for down notification 2590 * Link state is waiting for down notification
2591 */ 2591 */
2592 static void 2592 static void
2593 bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln, 2593 bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
2594 enum bfa_fcport_ln_sm_event event) 2594 enum bfa_fcport_ln_sm_event event)
2595 { 2595 {
2596 bfa_trc(ln->fcport->bfa, event); 2596 bfa_trc(ln->fcport->bfa, event);
2597 2597
2598 switch (event) { 2598 switch (event) {
2599 case BFA_FCPORT_LN_SM_LINKUP: 2599 case BFA_FCPORT_LN_SM_LINKUP:
2600 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf); 2600 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2601 break; 2601 break;
2602 2602
2603 case BFA_FCPORT_LN_SM_NOTIFICATION: 2603 case BFA_FCPORT_LN_SM_NOTIFICATION:
2604 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn); 2604 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
2605 break; 2605 break;
2606 2606
2607 default: 2607 default:
2608 bfa_sm_fault(ln->fcport->bfa, event); 2608 bfa_sm_fault(ln->fcport->bfa, event);
2609 } 2609 }
2610 } 2610 }
2611 2611
2612 /* 2612 /*
2613 * Link state is waiting for down notification and there is a pending up 2613 * Link state is waiting for down notification and there is a pending up
2614 */ 2614 */
2615 static void 2615 static void
2616 bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln, 2616 bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
2617 enum bfa_fcport_ln_sm_event event) 2617 enum bfa_fcport_ln_sm_event event)
2618 { 2618 {
2619 bfa_trc(ln->fcport->bfa, event); 2619 bfa_trc(ln->fcport->bfa, event);
2620 2620
2621 switch (event) { 2621 switch (event) {
2622 case BFA_FCPORT_LN_SM_LINKDOWN: 2622 case BFA_FCPORT_LN_SM_LINKDOWN:
2623 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf); 2623 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2624 break; 2624 break;
2625 2625
2626 case BFA_FCPORT_LN_SM_NOTIFICATION: 2626 case BFA_FCPORT_LN_SM_NOTIFICATION:
2627 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf); 2627 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2628 bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP); 2628 bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2629 break; 2629 break;
2630 2630
2631 default: 2631 default:
2632 bfa_sm_fault(ln->fcport->bfa, event); 2632 bfa_sm_fault(ln->fcport->bfa, event);
2633 } 2633 }
2634 } 2634 }
2635 2635
2636 /* 2636 /*
2637 * Link state is up 2637 * Link state is up
2638 */ 2638 */
2639 static void 2639 static void
2640 bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln, 2640 bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
2641 enum bfa_fcport_ln_sm_event event) 2641 enum bfa_fcport_ln_sm_event event)
2642 { 2642 {
2643 bfa_trc(ln->fcport->bfa, event); 2643 bfa_trc(ln->fcport->bfa, event);
2644 2644
2645 switch (event) { 2645 switch (event) {
2646 case BFA_FCPORT_LN_SM_LINKDOWN: 2646 case BFA_FCPORT_LN_SM_LINKDOWN:
2647 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf); 2647 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2648 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN); 2648 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2649 break; 2649 break;
2650 2650
2651 default: 2651 default:
2652 bfa_sm_fault(ln->fcport->bfa, event); 2652 bfa_sm_fault(ln->fcport->bfa, event);
2653 } 2653 }
2654 } 2654 }
2655 2655
2656 /* 2656 /*
2657 * Link state is waiting for up notification 2657 * Link state is waiting for up notification
2658 */ 2658 */
2659 static void 2659 static void
2660 bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln, 2660 bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
2661 enum bfa_fcport_ln_sm_event event) 2661 enum bfa_fcport_ln_sm_event event)
2662 { 2662 {
2663 bfa_trc(ln->fcport->bfa, event); 2663 bfa_trc(ln->fcport->bfa, event);
2664 2664
2665 switch (event) { 2665 switch (event) {
2666 case BFA_FCPORT_LN_SM_LINKDOWN: 2666 case BFA_FCPORT_LN_SM_LINKDOWN:
2667 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf); 2667 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2668 break; 2668 break;
2669 2669
2670 case BFA_FCPORT_LN_SM_NOTIFICATION: 2670 case BFA_FCPORT_LN_SM_NOTIFICATION:
2671 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up); 2671 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up);
2672 break; 2672 break;
2673 2673
2674 default: 2674 default:
2675 bfa_sm_fault(ln->fcport->bfa, event); 2675 bfa_sm_fault(ln->fcport->bfa, event);
2676 } 2676 }
2677 } 2677 }
2678 2678
2679 /* 2679 /*
2680 * Link state is waiting for up notification and there is a pending down 2680 * Link state is waiting for up notification and there is a pending down
2681 */ 2681 */
2682 static void 2682 static void
2683 bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln, 2683 bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
2684 enum bfa_fcport_ln_sm_event event) 2684 enum bfa_fcport_ln_sm_event event)
2685 { 2685 {
2686 bfa_trc(ln->fcport->bfa, event); 2686 bfa_trc(ln->fcport->bfa, event);
2687 2687
2688 switch (event) { 2688 switch (event) {
2689 case BFA_FCPORT_LN_SM_LINKUP: 2689 case BFA_FCPORT_LN_SM_LINKUP:
2690 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_up_nf); 2690 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_up_nf);
2691 break; 2691 break;
2692 2692
2693 case BFA_FCPORT_LN_SM_NOTIFICATION: 2693 case BFA_FCPORT_LN_SM_NOTIFICATION:
2694 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf); 2694 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2695 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN); 2695 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2696 break; 2696 break;
2697 2697
2698 default: 2698 default:
2699 bfa_sm_fault(ln->fcport->bfa, event); 2699 bfa_sm_fault(ln->fcport->bfa, event);
2700 } 2700 }
2701 } 2701 }
2702 2702
2703 /* 2703 /*
2704 * Link state is waiting for up notification and there are pending down and up 2704 * Link state is waiting for up notification and there are pending down and up
2705 */ 2705 */
2706 static void 2706 static void
2707 bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln, 2707 bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
2708 enum bfa_fcport_ln_sm_event event) 2708 enum bfa_fcport_ln_sm_event event)
2709 { 2709 {
2710 bfa_trc(ln->fcport->bfa, event); 2710 bfa_trc(ln->fcport->bfa, event);
2711 2711
2712 switch (event) { 2712 switch (event) {
2713 case BFA_FCPORT_LN_SM_LINKDOWN: 2713 case BFA_FCPORT_LN_SM_LINKDOWN:
2714 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf); 2714 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2715 break; 2715 break;
2716 2716
2717 case BFA_FCPORT_LN_SM_NOTIFICATION: 2717 case BFA_FCPORT_LN_SM_NOTIFICATION:
2718 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf); 2718 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2719 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN); 2719 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2720 break; 2720 break;
2721 2721
2722 default: 2722 default:
2723 bfa_sm_fault(ln->fcport->bfa, event); 2723 bfa_sm_fault(ln->fcport->bfa, event);
2724 } 2724 }
2725 } 2725 }
2726 2726
2727 static void 2727 static void
2728 __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete) 2728 __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete)
2729 { 2729 {
2730 struct bfa_fcport_ln_s *ln = cbarg; 2730 struct bfa_fcport_ln_s *ln = cbarg;
2731 2731
2732 if (complete) 2732 if (complete)
2733 ln->fcport->event_cbfn(ln->fcport->event_cbarg, ln->ln_event); 2733 ln->fcport->event_cbfn(ln->fcport->event_cbarg, ln->ln_event);
2734 else 2734 else
2735 bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION); 2735 bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
2736 } 2736 }
2737 2737
2738 /* 2738 /*
2739 * Send SCN notification to upper layers. 2739 * Send SCN notification to upper layers.
2740 * trunk - false if caller is fcport to ignore fcport event in trunked mode 2740 * trunk - false if caller is fcport to ignore fcport event in trunked mode
2741 */ 2741 */
2742 static void 2742 static void
2743 bfa_fcport_scn(struct bfa_fcport_s *fcport, enum bfa_port_linkstate event, 2743 bfa_fcport_scn(struct bfa_fcport_s *fcport, enum bfa_port_linkstate event,
2744 bfa_boolean_t trunk) 2744 bfa_boolean_t trunk)
2745 { 2745 {
2746 if (fcport->cfg.trunked && !trunk) 2746 if (fcport->cfg.trunked && !trunk)
2747 return; 2747 return;
2748 2748
2749 switch (event) { 2749 switch (event) {
2750 case BFA_PORT_LINKUP: 2750 case BFA_PORT_LINKUP:
2751 bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKUP); 2751 bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKUP);
2752 break; 2752 break;
2753 case BFA_PORT_LINKDOWN: 2753 case BFA_PORT_LINKDOWN:
2754 bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKDOWN); 2754 bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKDOWN);
2755 break; 2755 break;
2756 default: 2756 default:
2757 WARN_ON(1); 2757 WARN_ON(1);
2758 } 2758 }
2759 } 2759 }
2760 2760
2761 static void 2761 static void
2762 bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_port_linkstate event) 2762 bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_port_linkstate event)
2763 { 2763 {
2764 struct bfa_fcport_s *fcport = ln->fcport; 2764 struct bfa_fcport_s *fcport = ln->fcport;
2765 2765
2766 if (fcport->bfa->fcs) { 2766 if (fcport->bfa->fcs) {
2767 fcport->event_cbfn(fcport->event_cbarg, event); 2767 fcport->event_cbfn(fcport->event_cbarg, event);
2768 bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION); 2768 bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
2769 } else { 2769 } else {
2770 ln->ln_event = event; 2770 ln->ln_event = event;
2771 bfa_cb_queue(fcport->bfa, &ln->ln_qe, 2771 bfa_cb_queue(fcport->bfa, &ln->ln_qe,
2772 __bfa_cb_fcport_event, ln); 2772 __bfa_cb_fcport_event, ln);
2773 } 2773 }
2774 } 2774 }
2775 2775
2776 #define FCPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_fcport_stats_u), \ 2776 #define FCPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_fcport_stats_u), \
2777 BFA_CACHELINE_SZ)) 2777 BFA_CACHELINE_SZ))
2778 2778
2779 static void 2779 static void
2780 bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, 2780 bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
2781 u32 *dm_len) 2781 u32 *dm_len)
2782 { 2782 {
2783 *dm_len += FCPORT_STATS_DMA_SZ; 2783 *dm_len += FCPORT_STATS_DMA_SZ;
2784 } 2784 }
2785 2785
2786 static void 2786 static void
2787 bfa_fcport_qresume(void *cbarg) 2787 bfa_fcport_qresume(void *cbarg)
2788 { 2788 {
2789 struct bfa_fcport_s *fcport = cbarg; 2789 struct bfa_fcport_s *fcport = cbarg;
2790 2790
2791 bfa_sm_send_event(fcport, BFA_FCPORT_SM_QRESUME); 2791 bfa_sm_send_event(fcport, BFA_FCPORT_SM_QRESUME);
2792 } 2792 }
2793 2793
2794 static void 2794 static void
2795 bfa_fcport_mem_claim(struct bfa_fcport_s *fcport, struct bfa_meminfo_s *meminfo) 2795 bfa_fcport_mem_claim(struct bfa_fcport_s *fcport, struct bfa_meminfo_s *meminfo)
2796 { 2796 {
2797 u8 *dm_kva; 2797 u8 *dm_kva;
2798 u64 dm_pa; 2798 u64 dm_pa;
2799 2799
2800 dm_kva = bfa_meminfo_dma_virt(meminfo); 2800 dm_kva = bfa_meminfo_dma_virt(meminfo);
2801 dm_pa = bfa_meminfo_dma_phys(meminfo); 2801 dm_pa = bfa_meminfo_dma_phys(meminfo);
2802 2802
2803 fcport->stats_kva = dm_kva; 2803 fcport->stats_kva = dm_kva;
2804 fcport->stats_pa = dm_pa; 2804 fcport->stats_pa = dm_pa;
2805 fcport->stats = (union bfa_fcport_stats_u *) dm_kva; 2805 fcport->stats = (union bfa_fcport_stats_u *) dm_kva;
2806 2806
2807 dm_kva += FCPORT_STATS_DMA_SZ; 2807 dm_kva += FCPORT_STATS_DMA_SZ;
2808 dm_pa += FCPORT_STATS_DMA_SZ; 2808 dm_pa += FCPORT_STATS_DMA_SZ;
2809 2809
2810 bfa_meminfo_dma_virt(meminfo) = dm_kva; 2810 bfa_meminfo_dma_virt(meminfo) = dm_kva;
2811 bfa_meminfo_dma_phys(meminfo) = dm_pa; 2811 bfa_meminfo_dma_phys(meminfo) = dm_pa;
2812 } 2812 }
2813 2813
2814 /* 2814 /*
2815 * Memory initialization. 2815 * Memory initialization.
2816 */ 2816 */
2817 static void 2817 static void
2818 bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, 2818 bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
2819 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) 2819 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
2820 { 2820 {
2821 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 2821 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
2822 struct bfa_port_cfg_s *port_cfg = &fcport->cfg; 2822 struct bfa_port_cfg_s *port_cfg = &fcport->cfg;
2823 struct bfa_fcport_ln_s *ln = &fcport->ln; 2823 struct bfa_fcport_ln_s *ln = &fcport->ln;
2824 struct timeval tv; 2824 struct timeval tv;
2825 2825
2826 memset(fcport, 0, sizeof(struct bfa_fcport_s)); 2826 memset(fcport, 0, sizeof(struct bfa_fcport_s));
2827 fcport->bfa = bfa; 2827 fcport->bfa = bfa;
2828 ln->fcport = fcport; 2828 ln->fcport = fcport;
2829 2829
2830 bfa_fcport_mem_claim(fcport, meminfo); 2830 bfa_fcport_mem_claim(fcport, meminfo);
2831 2831
2832 bfa_sm_set_state(fcport, bfa_fcport_sm_uninit); 2832 bfa_sm_set_state(fcport, bfa_fcport_sm_uninit);
2833 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn); 2833 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
2834 2834
2835 /* 2835 /*
2836 * initialize time stamp for stats reset 2836 * initialize time stamp for stats reset
2837 */ 2837 */
2838 do_gettimeofday(&tv); 2838 do_gettimeofday(&tv);
2839 fcport->stats_reset_time = tv.tv_sec; 2839 fcport->stats_reset_time = tv.tv_sec;
2840 2840
2841 /* 2841 /*
2842 * initialize and set default configuration 2842 * initialize and set default configuration
2843 */ 2843 */
2844 port_cfg->topology = BFA_PORT_TOPOLOGY_P2P; 2844 port_cfg->topology = BFA_PORT_TOPOLOGY_P2P;
2845 port_cfg->speed = BFA_PORT_SPEED_AUTO; 2845 port_cfg->speed = BFA_PORT_SPEED_AUTO;
2846 port_cfg->trunked = BFA_FALSE; 2846 port_cfg->trunked = BFA_FALSE;
2847 port_cfg->maxfrsize = 0; 2847 port_cfg->maxfrsize = 0;
2848 2848
2849 port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS; 2849 port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS;
2850 2850
2851 bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport); 2851 bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport);
2852 } 2852 }
2853 2853
2854 static void 2854 static void
2855 bfa_fcport_detach(struct bfa_s *bfa) 2855 bfa_fcport_detach(struct bfa_s *bfa)
2856 { 2856 {
2857 } 2857 }
2858 2858
2859 /* 2859 /*
2860 * Called when IOC is ready. 2860 * Called when IOC is ready.
2861 */ 2861 */
2862 static void 2862 static void
2863 bfa_fcport_start(struct bfa_s *bfa) 2863 bfa_fcport_start(struct bfa_s *bfa)
2864 { 2864 {
2865 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START); 2865 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START);
2866 } 2866 }
2867 2867
2868 /* 2868 /*
2869 * Called before IOC is stopped. 2869 * Called before IOC is stopped.
2870 */ 2870 */
2871 static void 2871 static void
2872 bfa_fcport_stop(struct bfa_s *bfa) 2872 bfa_fcport_stop(struct bfa_s *bfa)
2873 { 2873 {
2874 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_STOP); 2874 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_STOP);
2875 bfa_trunk_iocdisable(bfa); 2875 bfa_trunk_iocdisable(bfa);
2876 } 2876 }
2877 2877
2878 /* 2878 /*
2879 * Called when IOC failure is detected. 2879 * Called when IOC failure is detected.
2880 */ 2880 */
2881 static void 2881 static void
2882 bfa_fcport_iocdisable(struct bfa_s *bfa) 2882 bfa_fcport_iocdisable(struct bfa_s *bfa)
2883 { 2883 {
2884 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 2884 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
2885 2885
2886 bfa_sm_send_event(fcport, BFA_FCPORT_SM_HWFAIL); 2886 bfa_sm_send_event(fcport, BFA_FCPORT_SM_HWFAIL);
2887 bfa_trunk_iocdisable(bfa); 2887 bfa_trunk_iocdisable(bfa);
2888 } 2888 }
2889 2889
2890 static void 2890 static void
2891 bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport) 2891 bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
2892 { 2892 {
2893 struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event; 2893 struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
2894 struct bfa_fcport_trunk_s *trunk = &fcport->trunk; 2894 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
2895 2895
2896 fcport->speed = pevent->link_state.speed; 2896 fcport->speed = pevent->link_state.speed;
2897 fcport->topology = pevent->link_state.topology; 2897 fcport->topology = pevent->link_state.topology;
2898 2898
2899 if (fcport->topology == BFA_PORT_TOPOLOGY_LOOP) 2899 if (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)
2900 fcport->myalpa = 0; 2900 fcport->myalpa = 0;
2901 2901
2902 /* QoS Details */ 2902 /* QoS Details */
2903 fcport->qos_attr = pevent->link_state.qos_attr; 2903 fcport->qos_attr = pevent->link_state.qos_attr;
2904 fcport->qos_vc_attr = pevent->link_state.vc_fcf.qos_vc_attr; 2904 fcport->qos_vc_attr = pevent->link_state.vc_fcf.qos_vc_attr;
2905 2905
2906 /* 2906 /*
2907 * update trunk state if applicable 2907 * update trunk state if applicable
2908 */ 2908 */
2909 if (!fcport->cfg.trunked) 2909 if (!fcport->cfg.trunked)
2910 trunk->attr.state = BFA_TRUNK_DISABLED; 2910 trunk->attr.state = BFA_TRUNK_DISABLED;
2911 2911
2912 /* update FCoE specific */ 2912 /* update FCoE specific */
2913 fcport->fcoe_vlan = be16_to_cpu(pevent->link_state.vc_fcf.fcf.vlan); 2913 fcport->fcoe_vlan = be16_to_cpu(pevent->link_state.vc_fcf.fcf.vlan);
2914 2914
2915 bfa_trc(fcport->bfa, fcport->speed); 2915 bfa_trc(fcport->bfa, fcport->speed);
2916 bfa_trc(fcport->bfa, fcport->topology); 2916 bfa_trc(fcport->bfa, fcport->topology);
2917 } 2917 }
2918 2918
2919 static void 2919 static void
2920 bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport) 2920 bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport)
2921 { 2921 {
2922 fcport->speed = BFA_PORT_SPEED_UNKNOWN; 2922 fcport->speed = BFA_PORT_SPEED_UNKNOWN;
2923 fcport->topology = BFA_PORT_TOPOLOGY_NONE; 2923 fcport->topology = BFA_PORT_TOPOLOGY_NONE;
2924 } 2924 }
2925 2925
2926 /* 2926 /*
2927 * Send port enable message to firmware. 2927 * Send port enable message to firmware.
2928 */ 2928 */
2929 static bfa_boolean_t 2929 static bfa_boolean_t
2930 bfa_fcport_send_enable(struct bfa_fcport_s *fcport) 2930 bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
2931 { 2931 {
2932 struct bfi_fcport_enable_req_s *m; 2932 struct bfi_fcport_enable_req_s *m;
2933 2933
2934 /* 2934 /*
2935 * Increment message tag before queue check, so that responses to old 2935 * Increment message tag before queue check, so that responses to old
2936 * requests are discarded. 2936 * requests are discarded.
2937 */ 2937 */
2938 fcport->msgtag++; 2938 fcport->msgtag++;
2939 2939
2940 /* 2940 /*
2941 * check for room in queue to send request now 2941 * check for room in queue to send request now
2942 */ 2942 */
2943 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); 2943 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
2944 if (!m) { 2944 if (!m) {
2945 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT, 2945 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
2946 &fcport->reqq_wait); 2946 &fcport->reqq_wait);
2947 return BFA_FALSE; 2947 return BFA_FALSE;
2948 } 2948 }
2949 2949
2950 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ, 2950 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ,
2951 bfa_lpuid(fcport->bfa)); 2951 bfa_lpuid(fcport->bfa));
2952 m->nwwn = fcport->nwwn; 2952 m->nwwn = fcport->nwwn;
2953 m->pwwn = fcport->pwwn; 2953 m->pwwn = fcport->pwwn;
2954 m->port_cfg = fcport->cfg; 2954 m->port_cfg = fcport->cfg;
2955 m->msgtag = fcport->msgtag; 2955 m->msgtag = fcport->msgtag;
2956 m->port_cfg.maxfrsize = cpu_to_be16(fcport->cfg.maxfrsize); 2956 m->port_cfg.maxfrsize = cpu_to_be16(fcport->cfg.maxfrsize);
2957 m->use_flash_cfg = fcport->use_flash_cfg; 2957 m->use_flash_cfg = fcport->use_flash_cfg;
2958 bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa); 2958 bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa);
2959 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo); 2959 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
2960 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi); 2960 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);
2961 2961
2962 /* 2962 /*
2963 * queue I/O message to firmware 2963 * queue I/O message to firmware
2964 */ 2964 */
2965 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); 2965 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
2966 return BFA_TRUE; 2966 return BFA_TRUE;
2967 } 2967 }
2968 2968
2969 /* 2969 /*
2970 * Send port disable message to firmware. 2970 * Send port disable message to firmware.
2971 */ 2971 */
2972 static bfa_boolean_t 2972 static bfa_boolean_t
2973 bfa_fcport_send_disable(struct bfa_fcport_s *fcport) 2973 bfa_fcport_send_disable(struct bfa_fcport_s *fcport)
2974 { 2974 {
2975 struct bfi_fcport_req_s *m; 2975 struct bfi_fcport_req_s *m;
2976 2976
2977 /* 2977 /*
2978 * Increment message tag before queue check, so that responses to old 2978 * Increment message tag before queue check, so that responses to old
2979 * requests are discarded. 2979 * requests are discarded.
2980 */ 2980 */
2981 fcport->msgtag++; 2981 fcport->msgtag++;
2982 2982
2983 /* 2983 /*
2984 * check for room in queue to send request now 2984 * check for room in queue to send request now
2985 */ 2985 */
2986 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); 2986 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
2987 if (!m) { 2987 if (!m) {
2988 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT, 2988 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
2989 &fcport->reqq_wait); 2989 &fcport->reqq_wait);
2990 return BFA_FALSE; 2990 return BFA_FALSE;
2991 } 2991 }
2992 2992
2993 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ, 2993 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ,
2994 bfa_lpuid(fcport->bfa)); 2994 bfa_lpuid(fcport->bfa));
2995 m->msgtag = fcport->msgtag; 2995 m->msgtag = fcport->msgtag;
2996 2996
2997 /* 2997 /*
2998 * queue I/O message to firmware 2998 * queue I/O message to firmware
2999 */ 2999 */
3000 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); 3000 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
3001 3001
3002 return BFA_TRUE; 3002 return BFA_TRUE;
3003 } 3003 }
3004 3004
3005 static void 3005 static void
3006 bfa_fcport_set_wwns(struct bfa_fcport_s *fcport) 3006 bfa_fcport_set_wwns(struct bfa_fcport_s *fcport)
3007 { 3007 {
3008 fcport->pwwn = fcport->bfa->ioc.attr->pwwn; 3008 fcport->pwwn = fcport->bfa->ioc.attr->pwwn;
3009 fcport->nwwn = fcport->bfa->ioc.attr->nwwn; 3009 fcport->nwwn = fcport->bfa->ioc.attr->nwwn;
3010 3010
3011 bfa_trc(fcport->bfa, fcport->pwwn); 3011 bfa_trc(fcport->bfa, fcport->pwwn);
3012 bfa_trc(fcport->bfa, fcport->nwwn); 3012 bfa_trc(fcport->bfa, fcport->nwwn);
3013 } 3013 }
3014 3014
3015 static void 3015 static void
3016 bfa_fcport_send_txcredit(void *port_cbarg) 3016 bfa_fcport_send_txcredit(void *port_cbarg)
3017 { 3017 {
3018 3018
3019 struct bfa_fcport_s *fcport = port_cbarg; 3019 struct bfa_fcport_s *fcport = port_cbarg;
3020 struct bfi_fcport_set_svc_params_req_s *m; 3020 struct bfi_fcport_set_svc_params_req_s *m;
3021 3021
3022 /* 3022 /*
3023 * check for room in queue to send request now 3023 * check for room in queue to send request now
3024 */ 3024 */
3025 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); 3025 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3026 if (!m) { 3026 if (!m) {
3027 bfa_trc(fcport->bfa, fcport->cfg.tx_bbcredit); 3027 bfa_trc(fcport->bfa, fcport->cfg.tx_bbcredit);
3028 return; 3028 return;
3029 } 3029 }
3030 3030
3031 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ, 3031 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ,
3032 bfa_lpuid(fcport->bfa)); 3032 bfa_lpuid(fcport->bfa));
3033 m->tx_bbcredit = cpu_to_be16((u16)fcport->cfg.tx_bbcredit); 3033 m->tx_bbcredit = cpu_to_be16((u16)fcport->cfg.tx_bbcredit);
3034 3034
3035 /* 3035 /*
3036 * queue I/O message to firmware 3036 * queue I/O message to firmware
3037 */ 3037 */
3038 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); 3038 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
3039 } 3039 }
3040 3040
3041 static void 3041 static void
3042 bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d, 3042 bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d,
3043 struct bfa_qos_stats_s *s) 3043 struct bfa_qos_stats_s *s)
3044 { 3044 {
3045 u32 *dip = (u32 *) d; 3045 u32 *dip = (u32 *) d;
3046 __be32 *sip = (__be32 *) s; 3046 __be32 *sip = (__be32 *) s;
3047 int i; 3047 int i;
3048 3048
3049 /* Now swap the 32 bit fields */ 3049 /* Now swap the 32 bit fields */
3050 for (i = 0; i < (sizeof(struct bfa_qos_stats_s)/sizeof(u32)); ++i) 3050 for (i = 0; i < (sizeof(struct bfa_qos_stats_s)/sizeof(u32)); ++i)
3051 dip[i] = be32_to_cpu(sip[i]); 3051 dip[i] = be32_to_cpu(sip[i]);
3052 } 3052 }
3053 3053
3054 static void 3054 static void
3055 bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d, 3055 bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d,
3056 struct bfa_fcoe_stats_s *s) 3056 struct bfa_fcoe_stats_s *s)
3057 { 3057 {
3058 u32 *dip = (u32 *) d; 3058 u32 *dip = (u32 *) d;
3059 __be32 *sip = (__be32 *) s; 3059 __be32 *sip = (__be32 *) s;
3060 int i; 3060 int i;
3061 3061
3062 for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32)); 3062 for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32));
3063 i = i + 2) { 3063 i = i + 2) {
3064 #ifdef __BIG_ENDIAN 3064 #ifdef __BIG_ENDIAN
3065 dip[i] = be32_to_cpu(sip[i]); 3065 dip[i] = be32_to_cpu(sip[i]);
3066 dip[i + 1] = be32_to_cpu(sip[i + 1]); 3066 dip[i + 1] = be32_to_cpu(sip[i + 1]);
3067 #else 3067 #else
3068 dip[i] = be32_to_cpu(sip[i + 1]); 3068 dip[i] = be32_to_cpu(sip[i + 1]);
3069 dip[i + 1] = be32_to_cpu(sip[i]); 3069 dip[i + 1] = be32_to_cpu(sip[i]);
3070 #endif 3070 #endif
3071 } 3071 }
3072 } 3072 }
3073 3073
3074 static void 3074 static void
3075 __bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete) 3075 __bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
3076 { 3076 {
3077 struct bfa_fcport_s *fcport = cbarg; 3077 struct bfa_fcport_s *fcport = cbarg;
3078 3078
3079 if (complete) { 3079 if (complete) {
3080 if (fcport->stats_status == BFA_STATUS_OK) { 3080 if (fcport->stats_status == BFA_STATUS_OK) {
3081 struct timeval tv; 3081 struct timeval tv;
3082 3082
3083 /* Swap FC QoS or FCoE stats */ 3083 /* Swap FC QoS or FCoE stats */
3084 if (bfa_ioc_get_fcmode(&fcport->bfa->ioc)) { 3084 if (bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
3085 bfa_fcport_qos_stats_swap( 3085 bfa_fcport_qos_stats_swap(
3086 &fcport->stats_ret->fcqos, 3086 &fcport->stats_ret->fcqos,
3087 &fcport->stats->fcqos); 3087 &fcport->stats->fcqos);
3088 } else { 3088 } else {
3089 bfa_fcport_fcoe_stats_swap( 3089 bfa_fcport_fcoe_stats_swap(
3090 &fcport->stats_ret->fcoe, 3090 &fcport->stats_ret->fcoe,
3091 &fcport->stats->fcoe); 3091 &fcport->stats->fcoe);
3092 3092
3093 do_gettimeofday(&tv); 3093 do_gettimeofday(&tv);
3094 fcport->stats_ret->fcoe.secs_reset = 3094 fcport->stats_ret->fcoe.secs_reset =
3095 tv.tv_sec - fcport->stats_reset_time; 3095 tv.tv_sec - fcport->stats_reset_time;
3096 } 3096 }
3097 } 3097 }
3098 fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status); 3098 fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
3099 } else { 3099 } else {
3100 fcport->stats_busy = BFA_FALSE; 3100 fcport->stats_busy = BFA_FALSE;
3101 fcport->stats_status = BFA_STATUS_OK; 3101 fcport->stats_status = BFA_STATUS_OK;
3102 } 3102 }
3103 } 3103 }
3104 3104
3105 static void 3105 static void
3106 bfa_fcport_stats_get_timeout(void *cbarg) 3106 bfa_fcport_stats_get_timeout(void *cbarg)
3107 { 3107 {
3108 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg; 3108 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3109 3109
3110 bfa_trc(fcport->bfa, fcport->stats_qfull); 3110 bfa_trc(fcport->bfa, fcport->stats_qfull);
3111 3111
3112 if (fcport->stats_qfull) { 3112 if (fcport->stats_qfull) {
3113 bfa_reqq_wcancel(&fcport->stats_reqq_wait); 3113 bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3114 fcport->stats_qfull = BFA_FALSE; 3114 fcport->stats_qfull = BFA_FALSE;
3115 } 3115 }
3116 3116
3117 fcport->stats_status = BFA_STATUS_ETIMER; 3117 fcport->stats_status = BFA_STATUS_ETIMER;
3118 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, __bfa_cb_fcport_stats_get, 3118 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, __bfa_cb_fcport_stats_get,
3119 fcport); 3119 fcport);
3120 } 3120 }
3121 3121
3122 static void 3122 static void
3123 bfa_fcport_send_stats_get(void *cbarg) 3123 bfa_fcport_send_stats_get(void *cbarg)
3124 { 3124 {
3125 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg; 3125 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3126 struct bfi_fcport_req_s *msg; 3126 struct bfi_fcport_req_s *msg;
3127 3127
3128 msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); 3128 msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3129 3129
3130 if (!msg) { 3130 if (!msg) {
3131 fcport->stats_qfull = BFA_TRUE; 3131 fcport->stats_qfull = BFA_TRUE;
3132 bfa_reqq_winit(&fcport->stats_reqq_wait, 3132 bfa_reqq_winit(&fcport->stats_reqq_wait,
3133 bfa_fcport_send_stats_get, fcport); 3133 bfa_fcport_send_stats_get, fcport);
3134 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT, 3134 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3135 &fcport->stats_reqq_wait); 3135 &fcport->stats_reqq_wait);
3136 return; 3136 return;
3137 } 3137 }
3138 fcport->stats_qfull = BFA_FALSE; 3138 fcport->stats_qfull = BFA_FALSE;
3139 3139
3140 memset(msg, 0, sizeof(struct bfi_fcport_req_s)); 3140 memset(msg, 0, sizeof(struct bfi_fcport_req_s));
3141 bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ, 3141 bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ,
3142 bfa_lpuid(fcport->bfa)); 3142 bfa_lpuid(fcport->bfa));
3143 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); 3143 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
3144 } 3144 }
3145 3145
3146 static void 3146 static void
3147 __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete) 3147 __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
3148 { 3148 {
3149 struct bfa_fcport_s *fcport = cbarg; 3149 struct bfa_fcport_s *fcport = cbarg;
3150 3150
3151 if (complete) { 3151 if (complete) {
3152 struct timeval tv; 3152 struct timeval tv;
3153 3153
3154 /* 3154 /*
3155 * re-initialize time stamp for stats reset 3155 * re-initialize time stamp for stats reset
3156 */ 3156 */
3157 do_gettimeofday(&tv); 3157 do_gettimeofday(&tv);
3158 fcport->stats_reset_time = tv.tv_sec; 3158 fcport->stats_reset_time = tv.tv_sec;
3159 3159
3160 fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status); 3160 fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
3161 } else { 3161 } else {
3162 fcport->stats_busy = BFA_FALSE; 3162 fcport->stats_busy = BFA_FALSE;
3163 fcport->stats_status = BFA_STATUS_OK; 3163 fcport->stats_status = BFA_STATUS_OK;
3164 } 3164 }
3165 } 3165 }
3166 3166
3167 static void 3167 static void
3168 bfa_fcport_stats_clr_timeout(void *cbarg) 3168 bfa_fcport_stats_clr_timeout(void *cbarg)
3169 { 3169 {
3170 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg; 3170 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3171 3171
3172 bfa_trc(fcport->bfa, fcport->stats_qfull); 3172 bfa_trc(fcport->bfa, fcport->stats_qfull);
3173 3173
3174 if (fcport->stats_qfull) { 3174 if (fcport->stats_qfull) {
3175 bfa_reqq_wcancel(&fcport->stats_reqq_wait); 3175 bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3176 fcport->stats_qfull = BFA_FALSE; 3176 fcport->stats_qfull = BFA_FALSE;
3177 } 3177 }
3178 3178
3179 fcport->stats_status = BFA_STATUS_ETIMER; 3179 fcport->stats_status = BFA_STATUS_ETIMER;
3180 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, 3180 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
3181 __bfa_cb_fcport_stats_clr, fcport); 3181 __bfa_cb_fcport_stats_clr, fcport);
3182 } 3182 }
3183 3183
3184 static void 3184 static void
3185 bfa_fcport_send_stats_clear(void *cbarg) 3185 bfa_fcport_send_stats_clear(void *cbarg)
3186 { 3186 {
3187 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg; 3187 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3188 struct bfi_fcport_req_s *msg; 3188 struct bfi_fcport_req_s *msg;
3189 3189
3190 msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); 3190 msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3191 3191
3192 if (!msg) { 3192 if (!msg) {
3193 fcport->stats_qfull = BFA_TRUE; 3193 fcport->stats_qfull = BFA_TRUE;
3194 bfa_reqq_winit(&fcport->stats_reqq_wait, 3194 bfa_reqq_winit(&fcport->stats_reqq_wait,
3195 bfa_fcport_send_stats_clear, fcport); 3195 bfa_fcport_send_stats_clear, fcport);
3196 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT, 3196 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3197 &fcport->stats_reqq_wait); 3197 &fcport->stats_reqq_wait);
3198 return; 3198 return;
3199 } 3199 }
3200 fcport->stats_qfull = BFA_FALSE; 3200 fcport->stats_qfull = BFA_FALSE;
3201 3201
3202 memset(msg, 0, sizeof(struct bfi_fcport_req_s)); 3202 memset(msg, 0, sizeof(struct bfi_fcport_req_s));
3203 bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ, 3203 bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ,
3204 bfa_lpuid(fcport->bfa)); 3204 bfa_lpuid(fcport->bfa));
3205 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); 3205 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
3206 } 3206 }
3207 3207
3208 /* 3208 /*
3209 * Handle trunk SCN event from firmware. 3209 * Handle trunk SCN event from firmware.
3210 */ 3210 */
3211 static void 3211 static void
3212 bfa_trunk_scn(struct bfa_fcport_s *fcport, struct bfi_fcport_trunk_scn_s *scn) 3212 bfa_trunk_scn(struct bfa_fcport_s *fcport, struct bfi_fcport_trunk_scn_s *scn)
3213 { 3213 {
3214 struct bfa_fcport_trunk_s *trunk = &fcport->trunk; 3214 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
3215 struct bfi_fcport_trunk_link_s *tlink; 3215 struct bfi_fcport_trunk_link_s *tlink;
3216 struct bfa_trunk_link_attr_s *lattr; 3216 struct bfa_trunk_link_attr_s *lattr;
3217 enum bfa_trunk_state state_prev; 3217 enum bfa_trunk_state state_prev;
3218 int i; 3218 int i;
3219 int link_bm = 0; 3219 int link_bm = 0;
3220 3220
3221 bfa_trc(fcport->bfa, fcport->cfg.trunked); 3221 bfa_trc(fcport->bfa, fcport->cfg.trunked);
3222 WARN_ON(scn->trunk_state != BFA_TRUNK_ONLINE && 3222 WARN_ON(scn->trunk_state != BFA_TRUNK_ONLINE &&
3223 scn->trunk_state != BFA_TRUNK_OFFLINE); 3223 scn->trunk_state != BFA_TRUNK_OFFLINE);
3224 3224
3225 bfa_trc(fcport->bfa, trunk->attr.state); 3225 bfa_trc(fcport->bfa, trunk->attr.state);
3226 bfa_trc(fcport->bfa, scn->trunk_state); 3226 bfa_trc(fcport->bfa, scn->trunk_state);
3227 bfa_trc(fcport->bfa, scn->trunk_speed); 3227 bfa_trc(fcport->bfa, scn->trunk_speed);
3228 3228
3229 /* 3229 /*
3230 * Save off new state for trunk attribute query 3230 * Save off new state for trunk attribute query
3231 */ 3231 */
3232 state_prev = trunk->attr.state; 3232 state_prev = trunk->attr.state;
3233 if (fcport->cfg.trunked && (trunk->attr.state != BFA_TRUNK_DISABLED)) 3233 if (fcport->cfg.trunked && (trunk->attr.state != BFA_TRUNK_DISABLED))
3234 trunk->attr.state = scn->trunk_state; 3234 trunk->attr.state = scn->trunk_state;
3235 trunk->attr.speed = scn->trunk_speed; 3235 trunk->attr.speed = scn->trunk_speed;
3236 for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) { 3236 for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3237 lattr = &trunk->attr.link_attr[i]; 3237 lattr = &trunk->attr.link_attr[i];
3238 tlink = &scn->tlink[i]; 3238 tlink = &scn->tlink[i];
3239 3239
3240 lattr->link_state = tlink->state; 3240 lattr->link_state = tlink->state;
3241 lattr->trunk_wwn = tlink->trunk_wwn; 3241 lattr->trunk_wwn = tlink->trunk_wwn;
3242 lattr->fctl = tlink->fctl; 3242 lattr->fctl = tlink->fctl;
3243 lattr->speed = tlink->speed; 3243 lattr->speed = tlink->speed;
3244 lattr->deskew = be32_to_cpu(tlink->deskew); 3244 lattr->deskew = be32_to_cpu(tlink->deskew);
3245 3245
3246 if (tlink->state == BFA_TRUNK_LINK_STATE_UP) { 3246 if (tlink->state == BFA_TRUNK_LINK_STATE_UP) {
3247 fcport->speed = tlink->speed; 3247 fcport->speed = tlink->speed;
3248 fcport->topology = BFA_PORT_TOPOLOGY_P2P; 3248 fcport->topology = BFA_PORT_TOPOLOGY_P2P;
3249 link_bm |= 1 << i; 3249 link_bm |= 1 << i;
3250 } 3250 }
3251 3251
3252 bfa_trc(fcport->bfa, lattr->link_state); 3252 bfa_trc(fcport->bfa, lattr->link_state);
3253 bfa_trc(fcport->bfa, lattr->trunk_wwn); 3253 bfa_trc(fcport->bfa, lattr->trunk_wwn);
3254 bfa_trc(fcport->bfa, lattr->fctl); 3254 bfa_trc(fcport->bfa, lattr->fctl);
3255 bfa_trc(fcport->bfa, lattr->speed); 3255 bfa_trc(fcport->bfa, lattr->speed);
3256 bfa_trc(fcport->bfa, lattr->deskew); 3256 bfa_trc(fcport->bfa, lattr->deskew);
3257 } 3257 }
3258 3258
3259 switch (link_bm) { 3259 switch (link_bm) {
3260 case 3: 3260 case 3:
3261 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, 3261 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3262 BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,1)"); 3262 BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,1)");
3263 break; 3263 break;
3264 case 2: 3264 case 2:
3265 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, 3265 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3266 BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(-,1)"); 3266 BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(-,1)");
3267 break; 3267 break;
3268 case 1: 3268 case 1:
3269 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, 3269 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3270 BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,-)"); 3270 BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,-)");
3271 break; 3271 break;
3272 default: 3272 default:
3273 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, 3273 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3274 BFA_PL_EID_TRUNK_SCN, 0, "Trunk down"); 3274 BFA_PL_EID_TRUNK_SCN, 0, "Trunk down");
3275 } 3275 }
3276 3276
3277 /* 3277 /*
3278 * Notify upper layers if trunk state changed. 3278 * Notify upper layers if trunk state changed.
3279 */ 3279 */
3280 if ((state_prev != trunk->attr.state) || 3280 if ((state_prev != trunk->attr.state) ||
3281 (scn->trunk_state == BFA_TRUNK_OFFLINE)) { 3281 (scn->trunk_state == BFA_TRUNK_OFFLINE)) {
3282 bfa_fcport_scn(fcport, (scn->trunk_state == BFA_TRUNK_ONLINE) ? 3282 bfa_fcport_scn(fcport, (scn->trunk_state == BFA_TRUNK_ONLINE) ?
3283 BFA_PORT_LINKUP : BFA_PORT_LINKDOWN, BFA_TRUE); 3283 BFA_PORT_LINKUP : BFA_PORT_LINKDOWN, BFA_TRUE);
3284 } 3284 }
3285 } 3285 }
3286 3286
3287 static void 3287 static void
3288 bfa_trunk_iocdisable(struct bfa_s *bfa) 3288 bfa_trunk_iocdisable(struct bfa_s *bfa)
3289 { 3289 {
3290 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 3290 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3291 int i = 0; 3291 int i = 0;
3292 3292
3293 /* 3293 /*
3294 * In trunked mode, notify upper layers that link is down 3294 * In trunked mode, notify upper layers that link is down
3295 */ 3295 */
3296 if (fcport->cfg.trunked) { 3296 if (fcport->cfg.trunked) {
3297 if (fcport->trunk.attr.state == BFA_TRUNK_ONLINE) 3297 if (fcport->trunk.attr.state == BFA_TRUNK_ONLINE)
3298 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_TRUE); 3298 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_TRUE);
3299 3299
3300 fcport->trunk.attr.state = BFA_TRUNK_OFFLINE; 3300 fcport->trunk.attr.state = BFA_TRUNK_OFFLINE;
3301 fcport->trunk.attr.speed = BFA_PORT_SPEED_UNKNOWN; 3301 fcport->trunk.attr.speed = BFA_PORT_SPEED_UNKNOWN;
3302 for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) { 3302 for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3303 fcport->trunk.attr.link_attr[i].trunk_wwn = 0; 3303 fcport->trunk.attr.link_attr[i].trunk_wwn = 0;
3304 fcport->trunk.attr.link_attr[i].fctl = 3304 fcport->trunk.attr.link_attr[i].fctl =
3305 BFA_TRUNK_LINK_FCTL_NORMAL; 3305 BFA_TRUNK_LINK_FCTL_NORMAL;
3306 fcport->trunk.attr.link_attr[i].link_state = 3306 fcport->trunk.attr.link_attr[i].link_state =
3307 BFA_TRUNK_LINK_STATE_DN_LINKDN; 3307 BFA_TRUNK_LINK_STATE_DN_LINKDN;
3308 fcport->trunk.attr.link_attr[i].speed = 3308 fcport->trunk.attr.link_attr[i].speed =
3309 BFA_PORT_SPEED_UNKNOWN; 3309 BFA_PORT_SPEED_UNKNOWN;
3310 fcport->trunk.attr.link_attr[i].deskew = 0; 3310 fcport->trunk.attr.link_attr[i].deskew = 0;
3311 } 3311 }
3312 } 3312 }
3313 } 3313 }
3314 3314
3315 /* 3315 /*
3316 * Called to initialize port attributes 3316 * Called to initialize port attributes
3317 */ 3317 */
3318 void 3318 void
3319 bfa_fcport_init(struct bfa_s *bfa) 3319 bfa_fcport_init(struct bfa_s *bfa)
3320 { 3320 {
3321 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 3321 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3322 3322
3323 /* 3323 /*
3324 * Initialize port attributes from IOC hardware data. 3324 * Initialize port attributes from IOC hardware data.
3325 */ 3325 */
3326 bfa_fcport_set_wwns(fcport); 3326 bfa_fcport_set_wwns(fcport);
3327 if (fcport->cfg.maxfrsize == 0) 3327 if (fcport->cfg.maxfrsize == 0)
3328 fcport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc); 3328 fcport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc);
3329 fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc); 3329 fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
3330 fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc); 3330 fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
3331 3331
3332 WARN_ON(!fcport->cfg.maxfrsize); 3332 WARN_ON(!fcport->cfg.maxfrsize);
3333 WARN_ON(!fcport->cfg.rx_bbcredit); 3333 WARN_ON(!fcport->cfg.rx_bbcredit);
3334 WARN_ON(!fcport->speed_sup); 3334 WARN_ON(!fcport->speed_sup);
3335 } 3335 }
3336 3336
3337 /* 3337 /*
3338 * Firmware message handler. 3338 * Firmware message handler.
3339 */ 3339 */
3340 void 3340 void
3341 bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg) 3341 bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
3342 { 3342 {
3343 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 3343 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3344 union bfi_fcport_i2h_msg_u i2hmsg; 3344 union bfi_fcport_i2h_msg_u i2hmsg;
3345 3345
3346 i2hmsg.msg = msg; 3346 i2hmsg.msg = msg;
3347 fcport->event_arg.i2hmsg = i2hmsg; 3347 fcport->event_arg.i2hmsg = i2hmsg;
3348 3348
3349 bfa_trc(bfa, msg->mhdr.msg_id); 3349 bfa_trc(bfa, msg->mhdr.msg_id);
3350 bfa_trc(bfa, bfa_sm_to_state(hal_port_sm_table, fcport->sm)); 3350 bfa_trc(bfa, bfa_sm_to_state(hal_port_sm_table, fcport->sm));
3351 3351
3352 switch (msg->mhdr.msg_id) { 3352 switch (msg->mhdr.msg_id) {
3353 case BFI_FCPORT_I2H_ENABLE_RSP: 3353 case BFI_FCPORT_I2H_ENABLE_RSP:
3354 if (fcport->msgtag == i2hmsg.penable_rsp->msgtag) { 3354 if (fcport->msgtag == i2hmsg.penable_rsp->msgtag) {
3355 3355
3356 if (fcport->use_flash_cfg) { 3356 if (fcport->use_flash_cfg) {
3357 fcport->cfg = i2hmsg.penable_rsp->port_cfg; 3357 fcport->cfg = i2hmsg.penable_rsp->port_cfg;
3358 fcport->cfg.maxfrsize = 3358 fcport->cfg.maxfrsize =
3359 cpu_to_be16(fcport->cfg.maxfrsize); 3359 cpu_to_be16(fcport->cfg.maxfrsize);
3360 fcport->cfg.path_tov = 3360 fcport->cfg.path_tov =
3361 cpu_to_be16(fcport->cfg.path_tov); 3361 cpu_to_be16(fcport->cfg.path_tov);
3362 fcport->cfg.q_depth = 3362 fcport->cfg.q_depth =
3363 cpu_to_be16(fcport->cfg.q_depth); 3363 cpu_to_be16(fcport->cfg.q_depth);
3364 3364
3365 if (fcport->cfg.trunked) 3365 if (fcport->cfg.trunked)
3366 fcport->trunk.attr.state = 3366 fcport->trunk.attr.state =
3367 BFA_TRUNK_OFFLINE; 3367 BFA_TRUNK_OFFLINE;
3368 else 3368 else
3369 fcport->trunk.attr.state = 3369 fcport->trunk.attr.state =
3370 BFA_TRUNK_DISABLED; 3370 BFA_TRUNK_DISABLED;
3371 fcport->use_flash_cfg = BFA_FALSE; 3371 fcport->use_flash_cfg = BFA_FALSE;
3372 } 3372 }
3373 3373
3374 bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP); 3374 bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3375 } 3375 }
3376 break; 3376 break;
3377 3377
3378 case BFI_FCPORT_I2H_DISABLE_RSP: 3378 case BFI_FCPORT_I2H_DISABLE_RSP:
3379 if (fcport->msgtag == i2hmsg.penable_rsp->msgtag) 3379 if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
3380 bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP); 3380 bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3381 break; 3381 break;
3382 3382
3383 case BFI_FCPORT_I2H_EVENT: 3383 case BFI_FCPORT_I2H_EVENT:
3384 if (i2hmsg.event->link_state.linkstate == BFA_PORT_LINKUP) 3384 if (i2hmsg.event->link_state.linkstate == BFA_PORT_LINKUP)
3385 bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP); 3385 bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP);
3386 else 3386 else
3387 bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKDOWN); 3387 bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKDOWN);
3388 break; 3388 break;
3389 3389
3390 case BFI_FCPORT_I2H_TRUNK_SCN: 3390 case BFI_FCPORT_I2H_TRUNK_SCN:
3391 bfa_trunk_scn(fcport, i2hmsg.trunk_scn); 3391 bfa_trunk_scn(fcport, i2hmsg.trunk_scn);
3392 break; 3392 break;
3393 3393
3394 case BFI_FCPORT_I2H_STATS_GET_RSP: 3394 case BFI_FCPORT_I2H_STATS_GET_RSP:
3395 /* 3395 /*
3396 * check for timer pop before processing the rsp 3396 * check for timer pop before processing the rsp
3397 */ 3397 */
3398 if (fcport->stats_busy == BFA_FALSE || 3398 if (fcport->stats_busy == BFA_FALSE ||
3399 fcport->stats_status == BFA_STATUS_ETIMER) 3399 fcport->stats_status == BFA_STATUS_ETIMER)
3400 break; 3400 break;
3401 3401
3402 bfa_timer_stop(&fcport->timer); 3402 bfa_timer_stop(&fcport->timer);
3403 fcport->stats_status = i2hmsg.pstatsget_rsp->status; 3403 fcport->stats_status = i2hmsg.pstatsget_rsp->status;
3404 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, 3404 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
3405 __bfa_cb_fcport_stats_get, fcport); 3405 __bfa_cb_fcport_stats_get, fcport);
3406 break; 3406 break;
3407 3407
3408 case BFI_FCPORT_I2H_STATS_CLEAR_RSP: 3408 case BFI_FCPORT_I2H_STATS_CLEAR_RSP:
3409 /* 3409 /*
3410 * check for timer pop before processing the rsp 3410 * check for timer pop before processing the rsp
3411 */ 3411 */
3412 if (fcport->stats_busy == BFA_FALSE || 3412 if (fcport->stats_busy == BFA_FALSE ||
3413 fcport->stats_status == BFA_STATUS_ETIMER) 3413 fcport->stats_status == BFA_STATUS_ETIMER)
3414 break; 3414 break;
3415 3415
3416 bfa_timer_stop(&fcport->timer); 3416 bfa_timer_stop(&fcport->timer);
3417 fcport->stats_status = BFA_STATUS_OK; 3417 fcport->stats_status = BFA_STATUS_OK;
3418 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, 3418 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
3419 __bfa_cb_fcport_stats_clr, fcport); 3419 __bfa_cb_fcport_stats_clr, fcport);
3420 break; 3420 break;
3421 3421
3422 case BFI_FCPORT_I2H_ENABLE_AEN: 3422 case BFI_FCPORT_I2H_ENABLE_AEN:
3423 bfa_sm_send_event(fcport, BFA_FCPORT_SM_ENABLE); 3423 bfa_sm_send_event(fcport, BFA_FCPORT_SM_ENABLE);
3424 break; 3424 break;
3425 3425
3426 case BFI_FCPORT_I2H_DISABLE_AEN: 3426 case BFI_FCPORT_I2H_DISABLE_AEN:
3427 bfa_sm_send_event(fcport, BFA_FCPORT_SM_DISABLE); 3427 bfa_sm_send_event(fcport, BFA_FCPORT_SM_DISABLE);
3428 break; 3428 break;
3429 3429
3430 default: 3430 default:
3431 WARN_ON(1); 3431 WARN_ON(1);
3432 break; 3432 break;
3433 } 3433 }
3434 } 3434 }
3435 3435
3436 /* 3436 /*
3437 * Registered callback for port events. 3437 * Registered callback for port events.
3438 */ 3438 */
3439 void 3439 void
3440 bfa_fcport_event_register(struct bfa_s *bfa, 3440 bfa_fcport_event_register(struct bfa_s *bfa,
3441 void (*cbfn) (void *cbarg, 3441 void (*cbfn) (void *cbarg,
3442 enum bfa_port_linkstate event), 3442 enum bfa_port_linkstate event),
3443 void *cbarg) 3443 void *cbarg)
3444 { 3444 {
3445 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 3445 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3446 3446
3447 fcport->event_cbfn = cbfn; 3447 fcport->event_cbfn = cbfn;
3448 fcport->event_cbarg = cbarg; 3448 fcport->event_cbarg = cbarg;
3449 } 3449 }
3450 3450
3451 bfa_status_t 3451 bfa_status_t
3452 bfa_fcport_enable(struct bfa_s *bfa) 3452 bfa_fcport_enable(struct bfa_s *bfa)
3453 { 3453 {
3454 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 3454 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3455 3455
3456 if (bfa_ioc_is_disabled(&bfa->ioc)) 3456 if (bfa_ioc_is_disabled(&bfa->ioc))
3457 return BFA_STATUS_IOC_DISABLED; 3457 return BFA_STATUS_IOC_DISABLED;
3458 3458
3459 if (fcport->diag_busy) 3459 if (fcport->diag_busy)
3460 return BFA_STATUS_DIAG_BUSY; 3460 return BFA_STATUS_DIAG_BUSY;
3461 3461
3462 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_ENABLE); 3462 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_ENABLE);
3463 return BFA_STATUS_OK; 3463 return BFA_STATUS_OK;
3464 } 3464 }
3465 3465
3466 bfa_status_t 3466 bfa_status_t
3467 bfa_fcport_disable(struct bfa_s *bfa) 3467 bfa_fcport_disable(struct bfa_s *bfa)
3468 { 3468 {
3469 3469
3470 if (bfa_ioc_is_disabled(&bfa->ioc)) 3470 if (bfa_ioc_is_disabled(&bfa->ioc))
3471 return BFA_STATUS_IOC_DISABLED; 3471 return BFA_STATUS_IOC_DISABLED;
3472 3472
3473 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DISABLE); 3473 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DISABLE);
3474 return BFA_STATUS_OK; 3474 return BFA_STATUS_OK;
3475 } 3475 }
3476 3476
3477 /* 3477 /*
3478 * Configure port speed. 3478 * Configure port speed.
3479 */ 3479 */
3480 bfa_status_t 3480 bfa_status_t
3481 bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed) 3481 bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
3482 { 3482 {
3483 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 3483 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3484 3484
3485 bfa_trc(bfa, speed); 3485 bfa_trc(bfa, speed);
3486 3486
3487 if (fcport->cfg.trunked == BFA_TRUE) 3487 if (fcport->cfg.trunked == BFA_TRUE)
3488 return BFA_STATUS_TRUNK_ENABLED; 3488 return BFA_STATUS_TRUNK_ENABLED;
3489 if ((speed != BFA_PORT_SPEED_AUTO) && (speed > fcport->speed_sup)) { 3489 if ((speed != BFA_PORT_SPEED_AUTO) && (speed > fcport->speed_sup)) {
3490 bfa_trc(bfa, fcport->speed_sup); 3490 bfa_trc(bfa, fcport->speed_sup);
3491 return BFA_STATUS_UNSUPP_SPEED; 3491 return BFA_STATUS_UNSUPP_SPEED;
3492 } 3492 }
3493 3493
3494 fcport->cfg.speed = speed; 3494 fcport->cfg.speed = speed;
3495 3495
3496 return BFA_STATUS_OK; 3496 return BFA_STATUS_OK;
3497 } 3497 }
3498 3498
3499 /* 3499 /*
3500 * Get current speed. 3500 * Get current speed.
3501 */ 3501 */
3502 enum bfa_port_speed 3502 enum bfa_port_speed
3503 bfa_fcport_get_speed(struct bfa_s *bfa) 3503 bfa_fcport_get_speed(struct bfa_s *bfa)
3504 { 3504 {
3505 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 3505 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3506 3506
3507 return fcport->speed; 3507 return fcport->speed;
3508 } 3508 }
3509 3509
3510 /* 3510 /*
3511 * Configure port topology. 3511 * Configure port topology.
3512 */ 3512 */
3513 bfa_status_t 3513 bfa_status_t
3514 bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_port_topology topology) 3514 bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_port_topology topology)
3515 { 3515 {
3516 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 3516 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3517 3517
3518 bfa_trc(bfa, topology); 3518 bfa_trc(bfa, topology);
3519 bfa_trc(bfa, fcport->cfg.topology); 3519 bfa_trc(bfa, fcport->cfg.topology);
3520 3520
3521 switch (topology) { 3521 switch (topology) {
3522 case BFA_PORT_TOPOLOGY_P2P: 3522 case BFA_PORT_TOPOLOGY_P2P:
3523 case BFA_PORT_TOPOLOGY_LOOP: 3523 case BFA_PORT_TOPOLOGY_LOOP:
3524 case BFA_PORT_TOPOLOGY_AUTO: 3524 case BFA_PORT_TOPOLOGY_AUTO:
3525 break; 3525 break;
3526 3526
3527 default: 3527 default:
3528 return BFA_STATUS_EINVAL; 3528 return BFA_STATUS_EINVAL;
3529 } 3529 }
3530 3530
3531 fcport->cfg.topology = topology; 3531 fcport->cfg.topology = topology;
3532 return BFA_STATUS_OK; 3532 return BFA_STATUS_OK;
3533 } 3533 }
3534 3534
3535 /* 3535 /*
3536 * Get current topology. 3536 * Get current topology.
3537 */ 3537 */
3538 enum bfa_port_topology 3538 enum bfa_port_topology
3539 bfa_fcport_get_topology(struct bfa_s *bfa) 3539 bfa_fcport_get_topology(struct bfa_s *bfa)
3540 { 3540 {
3541 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 3541 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3542 3542
3543 return fcport->topology; 3543 return fcport->topology;
3544 } 3544 }
3545 3545
3546 bfa_status_t 3546 bfa_status_t
3547 bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa) 3547 bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa)
3548 { 3548 {
3549 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 3549 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3550 3550
3551 bfa_trc(bfa, alpa); 3551 bfa_trc(bfa, alpa);
3552 bfa_trc(bfa, fcport->cfg.cfg_hardalpa); 3552 bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3553 bfa_trc(bfa, fcport->cfg.hardalpa); 3553 bfa_trc(bfa, fcport->cfg.hardalpa);
3554 3554
3555 fcport->cfg.cfg_hardalpa = BFA_TRUE; 3555 fcport->cfg.cfg_hardalpa = BFA_TRUE;
3556 fcport->cfg.hardalpa = alpa; 3556 fcport->cfg.hardalpa = alpa;
3557 3557
3558 return BFA_STATUS_OK; 3558 return BFA_STATUS_OK;
3559 } 3559 }
3560 3560
3561 bfa_status_t 3561 bfa_status_t
3562 bfa_fcport_clr_hardalpa(struct bfa_s *bfa) 3562 bfa_fcport_clr_hardalpa(struct bfa_s *bfa)
3563 { 3563 {
3564 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 3564 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3565 3565
3566 bfa_trc(bfa, fcport->cfg.cfg_hardalpa); 3566 bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3567 bfa_trc(bfa, fcport->cfg.hardalpa); 3567 bfa_trc(bfa, fcport->cfg.hardalpa);
3568 3568
3569 fcport->cfg.cfg_hardalpa = BFA_FALSE; 3569 fcport->cfg.cfg_hardalpa = BFA_FALSE;
3570 return BFA_STATUS_OK; 3570 return BFA_STATUS_OK;
3571 } 3571 }
3572 3572
3573 bfa_boolean_t 3573 bfa_boolean_t
3574 bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa) 3574 bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa)
3575 { 3575 {
3576 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 3576 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3577 3577
3578 *alpa = fcport->cfg.hardalpa; 3578 *alpa = fcport->cfg.hardalpa;
3579 return fcport->cfg.cfg_hardalpa; 3579 return fcport->cfg.cfg_hardalpa;
3580 } 3580 }
3581 3581
3582 u8 3582 u8
3583 bfa_fcport_get_myalpa(struct bfa_s *bfa) 3583 bfa_fcport_get_myalpa(struct bfa_s *bfa)
3584 { 3584 {
3585 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 3585 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3586 3586
3587 return fcport->myalpa; 3587 return fcport->myalpa;
3588 } 3588 }
3589 3589
3590 bfa_status_t 3590 bfa_status_t
3591 bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize) 3591 bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize)
3592 { 3592 {
3593 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 3593 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3594 3594
3595 bfa_trc(bfa, maxfrsize); 3595 bfa_trc(bfa, maxfrsize);
3596 bfa_trc(bfa, fcport->cfg.maxfrsize); 3596 bfa_trc(bfa, fcport->cfg.maxfrsize);
3597 3597
3598 /* with in range */ 3598 /* with in range */
3599 if ((maxfrsize > FC_MAX_PDUSZ) || (maxfrsize < FC_MIN_PDUSZ)) 3599 if ((maxfrsize > FC_MAX_PDUSZ) || (maxfrsize < FC_MIN_PDUSZ))
3600 return BFA_STATUS_INVLD_DFSZ; 3600 return BFA_STATUS_INVLD_DFSZ;
3601 3601
3602 /* power of 2, if not the max frame size of 2112 */ 3602 /* power of 2, if not the max frame size of 2112 */
3603 if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1))) 3603 if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1)))
3604 return BFA_STATUS_INVLD_DFSZ; 3604 return BFA_STATUS_INVLD_DFSZ;
3605 3605
3606 fcport->cfg.maxfrsize = maxfrsize; 3606 fcport->cfg.maxfrsize = maxfrsize;
3607 return BFA_STATUS_OK; 3607 return BFA_STATUS_OK;
3608 } 3608 }
3609 3609
3610 u16 3610 u16
3611 bfa_fcport_get_maxfrsize(struct bfa_s *bfa) 3611 bfa_fcport_get_maxfrsize(struct bfa_s *bfa)
3612 { 3612 {
3613 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 3613 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3614 3614
3615 return fcport->cfg.maxfrsize; 3615 return fcport->cfg.maxfrsize;
3616 } 3616 }
3617 3617
3618 u8 3618 u8
3619 bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa) 3619 bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa)
3620 { 3620 {
3621 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 3621 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3622 3622
3623 return fcport->cfg.rx_bbcredit; 3623 return fcport->cfg.rx_bbcredit;
3624 } 3624 }
3625 3625
3626 void 3626 void
3627 bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit) 3627 bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit)
3628 { 3628 {
3629 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 3629 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3630 3630
3631 fcport->cfg.tx_bbcredit = (u8)tx_bbcredit; 3631 fcport->cfg.tx_bbcredit = (u8)tx_bbcredit;
3632 bfa_fcport_send_txcredit(fcport); 3632 bfa_fcport_send_txcredit(fcport);
3633 } 3633 }
3634 3634
3635 /* 3635 /*
3636 * Get port attributes. 3636 * Get port attributes.
3637 */ 3637 */
3638 3638
3639 wwn_t 3639 wwn_t
3640 bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node) 3640 bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node)
3641 { 3641 {
3642 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 3642 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3643 if (node) 3643 if (node)
3644 return fcport->nwwn; 3644 return fcport->nwwn;
3645 else 3645 else
3646 return fcport->pwwn; 3646 return fcport->pwwn;
3647 } 3647 }
3648 3648
3649 void 3649 void
3650 bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr) 3650 bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
3651 { 3651 {
3652 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 3652 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3653 3653
3654 memset(attr, 0, sizeof(struct bfa_port_attr_s)); 3654 memset(attr, 0, sizeof(struct bfa_port_attr_s));
3655 3655
3656 attr->nwwn = fcport->nwwn; 3656 attr->nwwn = fcport->nwwn;
3657 attr->pwwn = fcport->pwwn; 3657 attr->pwwn = fcport->pwwn;
3658 3658
3659 attr->factorypwwn = bfa->ioc.attr->mfg_pwwn; 3659 attr->factorypwwn = bfa->ioc.attr->mfg_pwwn;
3660 attr->factorynwwn = bfa->ioc.attr->mfg_nwwn; 3660 attr->factorynwwn = bfa->ioc.attr->mfg_nwwn;
3661 3661
3662 memcpy(&attr->pport_cfg, &fcport->cfg, 3662 memcpy(&attr->pport_cfg, &fcport->cfg,
3663 sizeof(struct bfa_port_cfg_s)); 3663 sizeof(struct bfa_port_cfg_s));
3664 /* speed attributes */ 3664 /* speed attributes */
3665 attr->pport_cfg.speed = fcport->cfg.speed; 3665 attr->pport_cfg.speed = fcport->cfg.speed;
3666 attr->speed_supported = fcport->speed_sup; 3666 attr->speed_supported = fcport->speed_sup;
3667 attr->speed = fcport->speed; 3667 attr->speed = fcport->speed;
3668 attr->cos_supported = FC_CLASS_3; 3668 attr->cos_supported = FC_CLASS_3;
3669 3669
3670 /* topology attributes */ 3670 /* topology attributes */
3671 attr->pport_cfg.topology = fcport->cfg.topology; 3671 attr->pport_cfg.topology = fcport->cfg.topology;
3672 attr->topology = fcport->topology; 3672 attr->topology = fcport->topology;
3673 attr->pport_cfg.trunked = fcport->cfg.trunked; 3673 attr->pport_cfg.trunked = fcport->cfg.trunked;
3674 3674
3675 /* beacon attributes */ 3675 /* beacon attributes */
3676 attr->beacon = fcport->beacon; 3676 attr->beacon = fcport->beacon;
3677 attr->link_e2e_beacon = fcport->link_e2e_beacon; 3677 attr->link_e2e_beacon = fcport->link_e2e_beacon;
3678 attr->plog_enabled = (bfa_boolean_t)fcport->bfa->plog->plog_enabled; 3678 attr->plog_enabled = (bfa_boolean_t)fcport->bfa->plog->plog_enabled;
3679 attr->io_profile = bfa_fcpim_get_io_profile(fcport->bfa); 3679 attr->io_profile = bfa_fcpim_get_io_profile(fcport->bfa);
3680 3680
3681 attr->pport_cfg.path_tov = bfa_fcpim_path_tov_get(bfa); 3681 attr->pport_cfg.path_tov = bfa_fcpim_path_tov_get(bfa);
3682 attr->pport_cfg.q_depth = bfa_fcpim_qdepth_get(bfa); 3682 attr->pport_cfg.q_depth = bfa_fcpim_qdepth_get(bfa);
3683 attr->port_state = bfa_sm_to_state(hal_port_sm_table, fcport->sm); 3683 attr->port_state = bfa_sm_to_state(hal_port_sm_table, fcport->sm);
3684 if (bfa_ioc_is_disabled(&fcport->bfa->ioc)) 3684 if (bfa_ioc_is_disabled(&fcport->bfa->ioc))
3685 attr->port_state = BFA_PORT_ST_IOCDIS; 3685 attr->port_state = BFA_PORT_ST_IOCDIS;
3686 else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc)) 3686 else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc))
3687 attr->port_state = BFA_PORT_ST_FWMISMATCH; 3687 attr->port_state = BFA_PORT_ST_FWMISMATCH;
3688 3688
3689 /* FCoE vlan */ 3689 /* FCoE vlan */
3690 attr->fcoe_vlan = fcport->fcoe_vlan; 3690 attr->fcoe_vlan = fcport->fcoe_vlan;
3691 } 3691 }
3692 3692
3693 #define BFA_FCPORT_STATS_TOV 1000 3693 #define BFA_FCPORT_STATS_TOV 1000
3694 3694
3695 /* 3695 /*
3696 * Fetch port statistics (FCQoS or FCoE). 3696 * Fetch port statistics (FCQoS or FCoE).
3697 */ 3697 */
3698 bfa_status_t 3698 bfa_status_t
3699 bfa_fcport_get_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats, 3699 bfa_fcport_get_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
3700 bfa_cb_port_t cbfn, void *cbarg) 3700 bfa_cb_port_t cbfn, void *cbarg)
3701 { 3701 {
3702 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 3702 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3703 3703
3704 if (fcport->stats_busy) { 3704 if (fcport->stats_busy) {
3705 bfa_trc(bfa, fcport->stats_busy); 3705 bfa_trc(bfa, fcport->stats_busy);
3706 return BFA_STATUS_DEVBUSY; 3706 return BFA_STATUS_DEVBUSY;
3707 } 3707 }
3708 3708
3709 fcport->stats_busy = BFA_TRUE; 3709 fcport->stats_busy = BFA_TRUE;
3710 fcport->stats_ret = stats; 3710 fcport->stats_ret = stats;
3711 fcport->stats_cbfn = cbfn; 3711 fcport->stats_cbfn = cbfn;
3712 fcport->stats_cbarg = cbarg; 3712 fcport->stats_cbarg = cbarg;
3713 3713
3714 bfa_fcport_send_stats_get(fcport); 3714 bfa_fcport_send_stats_get(fcport);
3715 3715
3716 bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_get_timeout, 3716 bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_get_timeout,
3717 fcport, BFA_FCPORT_STATS_TOV); 3717 fcport, BFA_FCPORT_STATS_TOV);
3718 return BFA_STATUS_OK; 3718 return BFA_STATUS_OK;
3719 } 3719 }
3720 3720
3721 /* 3721 /*
3722 * Reset port statistics (FCQoS or FCoE). 3722 * Reset port statistics (FCQoS or FCoE).
3723 */ 3723 */
3724 bfa_status_t 3724 bfa_status_t
3725 bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg) 3725 bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg)
3726 { 3726 {
3727 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 3727 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3728 3728
3729 if (fcport->stats_busy) { 3729 if (fcport->stats_busy) {
3730 bfa_trc(bfa, fcport->stats_busy); 3730 bfa_trc(bfa, fcport->stats_busy);
3731 return BFA_STATUS_DEVBUSY; 3731 return BFA_STATUS_DEVBUSY;
3732 } 3732 }
3733 3733
3734 fcport->stats_busy = BFA_TRUE; 3734 fcport->stats_busy = BFA_TRUE;
3735 fcport->stats_cbfn = cbfn; 3735 fcport->stats_cbfn = cbfn;
3736 fcport->stats_cbarg = cbarg; 3736 fcport->stats_cbarg = cbarg;
3737 3737
3738 bfa_fcport_send_stats_clear(fcport); 3738 bfa_fcport_send_stats_clear(fcport);
3739 3739
3740 bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_clr_timeout, 3740 bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_clr_timeout,
3741 fcport, BFA_FCPORT_STATS_TOV); 3741 fcport, BFA_FCPORT_STATS_TOV);
3742 return BFA_STATUS_OK; 3742 return BFA_STATUS_OK;
3743 } 3743 }
3744 3744
3745 3745
3746 /* 3746 /*
3747 * Fetch port attributes. 3747 * Fetch port attributes.
3748 */ 3748 */
3749 bfa_boolean_t 3749 bfa_boolean_t
3750 bfa_fcport_is_disabled(struct bfa_s *bfa) 3750 bfa_fcport_is_disabled(struct bfa_s *bfa)
3751 { 3751 {
3752 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 3752 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3753 3753
3754 return bfa_sm_to_state(hal_port_sm_table, fcport->sm) == 3754 return bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
3755 BFA_PORT_ST_DISABLED; 3755 BFA_PORT_ST_DISABLED;
3756 3756
3757 } 3757 }
3758 3758
3759 bfa_boolean_t 3759 bfa_boolean_t
3760 bfa_fcport_is_ratelim(struct bfa_s *bfa) 3760 bfa_fcport_is_ratelim(struct bfa_s *bfa)
3761 { 3761 {
3762 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 3762 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3763 3763
3764 return fcport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE; 3764 return fcport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE;
3765 3765
3766 } 3766 }
3767 3767
3768 /* 3768 /*
3769 * Get default minimum ratelim speed 3769 * Get default minimum ratelim speed
3770 */ 3770 */
3771 enum bfa_port_speed 3771 enum bfa_port_speed
3772 bfa_fcport_get_ratelim_speed(struct bfa_s *bfa) 3772 bfa_fcport_get_ratelim_speed(struct bfa_s *bfa)
3773 { 3773 {
3774 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 3774 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3775 3775
3776 bfa_trc(bfa, fcport->cfg.trl_def_speed); 3776 bfa_trc(bfa, fcport->cfg.trl_def_speed);
3777 return fcport->cfg.trl_def_speed; 3777 return fcport->cfg.trl_def_speed;
3778 3778
3779 } 3779 }
3780 3780
3781 bfa_boolean_t 3781 bfa_boolean_t
3782 bfa_fcport_is_linkup(struct bfa_s *bfa) 3782 bfa_fcport_is_linkup(struct bfa_s *bfa)
3783 { 3783 {
3784 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 3784 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3785 3785
3786 return (!fcport->cfg.trunked && 3786 return (!fcport->cfg.trunked &&
3787 bfa_sm_cmp_state(fcport, bfa_fcport_sm_linkup)) || 3787 bfa_sm_cmp_state(fcport, bfa_fcport_sm_linkup)) ||
3788 (fcport->cfg.trunked && 3788 (fcport->cfg.trunked &&
3789 fcport->trunk.attr.state == BFA_TRUNK_ONLINE); 3789 fcport->trunk.attr.state == BFA_TRUNK_ONLINE);
3790 } 3790 }
3791 3791
3792 bfa_boolean_t 3792 bfa_boolean_t
3793 bfa_fcport_is_qos_enabled(struct bfa_s *bfa) 3793 bfa_fcport_is_qos_enabled(struct bfa_s *bfa)
3794 { 3794 {
3795 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 3795 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3796 3796
3797 return fcport->cfg.qos_enabled; 3797 return fcport->cfg.qos_enabled;
3798 } 3798 }
3799 3799
3800 /* 3800 /*
3801 * Rport State machine functions 3801 * Rport State machine functions
3802 */ 3802 */
3803 /* 3803 /*
3804 * Beginning state, only online event expected. 3804 * Beginning state, only online event expected.
3805 */ 3805 */
3806 static void 3806 static void
3807 bfa_rport_sm_uninit(struct bfa_rport_s *rp, enum bfa_rport_event event) 3807 bfa_rport_sm_uninit(struct bfa_rport_s *rp, enum bfa_rport_event event)
3808 { 3808 {
3809 bfa_trc(rp->bfa, rp->rport_tag); 3809 bfa_trc(rp->bfa, rp->rport_tag);
3810 bfa_trc(rp->bfa, event); 3810 bfa_trc(rp->bfa, event);
3811 3811
3812 switch (event) { 3812 switch (event) {
3813 case BFA_RPORT_SM_CREATE: 3813 case BFA_RPORT_SM_CREATE:
3814 bfa_stats(rp, sm_un_cr); 3814 bfa_stats(rp, sm_un_cr);
3815 bfa_sm_set_state(rp, bfa_rport_sm_created); 3815 bfa_sm_set_state(rp, bfa_rport_sm_created);
3816 break; 3816 break;
3817 3817
3818 default: 3818 default:
3819 bfa_stats(rp, sm_un_unexp); 3819 bfa_stats(rp, sm_un_unexp);
3820 bfa_sm_fault(rp->bfa, event); 3820 bfa_sm_fault(rp->bfa, event);
3821 } 3821 }
3822 } 3822 }
3823 3823
3824 static void 3824 static void
3825 bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event) 3825 bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event)
3826 { 3826 {
3827 bfa_trc(rp->bfa, rp->rport_tag); 3827 bfa_trc(rp->bfa, rp->rport_tag);
3828 bfa_trc(rp->bfa, event); 3828 bfa_trc(rp->bfa, event);
3829 3829
3830 switch (event) { 3830 switch (event) {
3831 case BFA_RPORT_SM_ONLINE: 3831 case BFA_RPORT_SM_ONLINE:
3832 bfa_stats(rp, sm_cr_on); 3832 bfa_stats(rp, sm_cr_on);
3833 if (bfa_rport_send_fwcreate(rp)) 3833 if (bfa_rport_send_fwcreate(rp))
3834 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate); 3834 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
3835 else 3835 else
3836 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull); 3836 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
3837 break; 3837 break;
3838 3838
3839 case BFA_RPORT_SM_DELETE: 3839 case BFA_RPORT_SM_DELETE:
3840 bfa_stats(rp, sm_cr_del); 3840 bfa_stats(rp, sm_cr_del);
3841 bfa_sm_set_state(rp, bfa_rport_sm_uninit); 3841 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
3842 bfa_rport_free(rp); 3842 bfa_rport_free(rp);
3843 break; 3843 break;
3844 3844
3845 case BFA_RPORT_SM_HWFAIL: 3845 case BFA_RPORT_SM_HWFAIL:
3846 bfa_stats(rp, sm_cr_hwf); 3846 bfa_stats(rp, sm_cr_hwf);
3847 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable); 3847 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
3848 break; 3848 break;
3849 3849
3850 default: 3850 default:
3851 bfa_stats(rp, sm_cr_unexp); 3851 bfa_stats(rp, sm_cr_unexp);
3852 bfa_sm_fault(rp->bfa, event); 3852 bfa_sm_fault(rp->bfa, event);
3853 } 3853 }
3854 } 3854 }
3855 3855
3856 /* 3856 /*
3857 * Waiting for rport create response from firmware. 3857 * Waiting for rport create response from firmware.
3858 */ 3858 */
3859 static void 3859 static void
3860 bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event) 3860 bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event)
3861 { 3861 {
3862 bfa_trc(rp->bfa, rp->rport_tag); 3862 bfa_trc(rp->bfa, rp->rport_tag);
3863 bfa_trc(rp->bfa, event); 3863 bfa_trc(rp->bfa, event);
3864 3864
3865 switch (event) { 3865 switch (event) {
3866 case BFA_RPORT_SM_FWRSP: 3866 case BFA_RPORT_SM_FWRSP:
3867 bfa_stats(rp, sm_fwc_rsp); 3867 bfa_stats(rp, sm_fwc_rsp);
3868 bfa_sm_set_state(rp, bfa_rport_sm_online); 3868 bfa_sm_set_state(rp, bfa_rport_sm_online);
3869 bfa_rport_online_cb(rp); 3869 bfa_rport_online_cb(rp);
3870 break; 3870 break;
3871 3871
3872 case BFA_RPORT_SM_DELETE: 3872 case BFA_RPORT_SM_DELETE:
3873 bfa_stats(rp, sm_fwc_del); 3873 bfa_stats(rp, sm_fwc_del);
3874 bfa_sm_set_state(rp, bfa_rport_sm_delete_pending); 3874 bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
3875 break; 3875 break;
3876 3876
3877 case BFA_RPORT_SM_OFFLINE: 3877 case BFA_RPORT_SM_OFFLINE:
3878 bfa_stats(rp, sm_fwc_off); 3878 bfa_stats(rp, sm_fwc_off);
3879 bfa_sm_set_state(rp, bfa_rport_sm_offline_pending); 3879 bfa_sm_set_state(rp, bfa_rport_sm_offline_pending);
3880 break; 3880 break;
3881 3881
3882 case BFA_RPORT_SM_HWFAIL: 3882 case BFA_RPORT_SM_HWFAIL:
3883 bfa_stats(rp, sm_fwc_hwf); 3883 bfa_stats(rp, sm_fwc_hwf);
3884 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable); 3884 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
3885 break; 3885 break;
3886 3886
3887 default: 3887 default:
3888 bfa_stats(rp, sm_fwc_unexp); 3888 bfa_stats(rp, sm_fwc_unexp);
3889 bfa_sm_fault(rp->bfa, event); 3889 bfa_sm_fault(rp->bfa, event);
3890 } 3890 }
3891 } 3891 }
3892 3892
3893 /* 3893 /*
3894 * Request queue is full, awaiting queue resume to send create request. 3894 * Request queue is full, awaiting queue resume to send create request.
3895 */ 3895 */
3896 static void 3896 static void
3897 bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event) 3897 bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
3898 { 3898 {
3899 bfa_trc(rp->bfa, rp->rport_tag); 3899 bfa_trc(rp->bfa, rp->rport_tag);
3900 bfa_trc(rp->bfa, event); 3900 bfa_trc(rp->bfa, event);
3901 3901
3902 switch (event) { 3902 switch (event) {
3903 case BFA_RPORT_SM_QRESUME: 3903 case BFA_RPORT_SM_QRESUME:
3904 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate); 3904 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
3905 bfa_rport_send_fwcreate(rp); 3905 bfa_rport_send_fwcreate(rp);
3906 break; 3906 break;
3907 3907
3908 case BFA_RPORT_SM_DELETE: 3908 case BFA_RPORT_SM_DELETE:
3909 bfa_stats(rp, sm_fwc_del); 3909 bfa_stats(rp, sm_fwc_del);
3910 bfa_sm_set_state(rp, bfa_rport_sm_uninit); 3910 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
3911 bfa_reqq_wcancel(&rp->reqq_wait); 3911 bfa_reqq_wcancel(&rp->reqq_wait);
3912 bfa_rport_free(rp); 3912 bfa_rport_free(rp);
3913 break; 3913 break;
3914 3914
3915 case BFA_RPORT_SM_OFFLINE: 3915 case BFA_RPORT_SM_OFFLINE:
3916 bfa_stats(rp, sm_fwc_off); 3916 bfa_stats(rp, sm_fwc_off);
3917 bfa_sm_set_state(rp, bfa_rport_sm_offline); 3917 bfa_sm_set_state(rp, bfa_rport_sm_offline);
3918 bfa_reqq_wcancel(&rp->reqq_wait); 3918 bfa_reqq_wcancel(&rp->reqq_wait);
3919 bfa_rport_offline_cb(rp); 3919 bfa_rport_offline_cb(rp);
3920 break; 3920 break;
3921 3921
3922 case BFA_RPORT_SM_HWFAIL: 3922 case BFA_RPORT_SM_HWFAIL:
3923 bfa_stats(rp, sm_fwc_hwf); 3923 bfa_stats(rp, sm_fwc_hwf);
3924 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable); 3924 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
3925 bfa_reqq_wcancel(&rp->reqq_wait); 3925 bfa_reqq_wcancel(&rp->reqq_wait);
3926 break; 3926 break;
3927 3927
3928 default: 3928 default:
3929 bfa_stats(rp, sm_fwc_unexp); 3929 bfa_stats(rp, sm_fwc_unexp);
3930 bfa_sm_fault(rp->bfa, event); 3930 bfa_sm_fault(rp->bfa, event);
3931 } 3931 }
3932 } 3932 }
3933 3933
3934 /* 3934 /*
3935 * Online state - normal parking state. 3935 * Online state - normal parking state.
3936 */ 3936 */
3937 static void 3937 static void
3938 bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event) 3938 bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event)
3939 { 3939 {
3940 struct bfi_rport_qos_scn_s *qos_scn; 3940 struct bfi_rport_qos_scn_s *qos_scn;
3941 3941
3942 bfa_trc(rp->bfa, rp->rport_tag); 3942 bfa_trc(rp->bfa, rp->rport_tag);
3943 bfa_trc(rp->bfa, event); 3943 bfa_trc(rp->bfa, event);
3944 3944
3945 switch (event) { 3945 switch (event) {
3946 case BFA_RPORT_SM_OFFLINE: 3946 case BFA_RPORT_SM_OFFLINE:
3947 bfa_stats(rp, sm_on_off); 3947 bfa_stats(rp, sm_on_off);
3948 if (bfa_rport_send_fwdelete(rp)) 3948 if (bfa_rport_send_fwdelete(rp))
3949 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete); 3949 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
3950 else 3950 else
3951 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull); 3951 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
3952 break; 3952 break;
3953 3953
3954 case BFA_RPORT_SM_DELETE: 3954 case BFA_RPORT_SM_DELETE:
3955 bfa_stats(rp, sm_on_del); 3955 bfa_stats(rp, sm_on_del);
3956 if (bfa_rport_send_fwdelete(rp)) 3956 if (bfa_rport_send_fwdelete(rp))
3957 bfa_sm_set_state(rp, bfa_rport_sm_deleting); 3957 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
3958 else 3958 else
3959 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull); 3959 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
3960 break; 3960 break;
3961 3961
3962 case BFA_RPORT_SM_HWFAIL: 3962 case BFA_RPORT_SM_HWFAIL:
3963 bfa_stats(rp, sm_on_hwf); 3963 bfa_stats(rp, sm_on_hwf);
3964 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable); 3964 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
3965 break; 3965 break;
3966 3966
3967 case BFA_RPORT_SM_SET_SPEED: 3967 case BFA_RPORT_SM_SET_SPEED:
3968 bfa_rport_send_fwspeed(rp); 3968 bfa_rport_send_fwspeed(rp);
3969 break; 3969 break;
3970 3970
3971 case BFA_RPORT_SM_QOS_SCN: 3971 case BFA_RPORT_SM_QOS_SCN:
3972 qos_scn = (struct bfi_rport_qos_scn_s *) rp->event_arg.fw_msg; 3972 qos_scn = (struct bfi_rport_qos_scn_s *) rp->event_arg.fw_msg;
3973 rp->qos_attr = qos_scn->new_qos_attr; 3973 rp->qos_attr = qos_scn->new_qos_attr;
3974 bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_flow_id); 3974 bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_flow_id);
3975 bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_flow_id); 3975 bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_flow_id);
3976 bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_priority); 3976 bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_priority);
3977 bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_priority); 3977 bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_priority);
3978 3978
3979 qos_scn->old_qos_attr.qos_flow_id = 3979 qos_scn->old_qos_attr.qos_flow_id =
3980 be32_to_cpu(qos_scn->old_qos_attr.qos_flow_id); 3980 be32_to_cpu(qos_scn->old_qos_attr.qos_flow_id);
3981 qos_scn->new_qos_attr.qos_flow_id = 3981 qos_scn->new_qos_attr.qos_flow_id =
3982 be32_to_cpu(qos_scn->new_qos_attr.qos_flow_id); 3982 be32_to_cpu(qos_scn->new_qos_attr.qos_flow_id);
3983 3983
3984 if (qos_scn->old_qos_attr.qos_flow_id != 3984 if (qos_scn->old_qos_attr.qos_flow_id !=
3985 qos_scn->new_qos_attr.qos_flow_id) 3985 qos_scn->new_qos_attr.qos_flow_id)
3986 bfa_cb_rport_qos_scn_flowid(rp->rport_drv, 3986 bfa_cb_rport_qos_scn_flowid(rp->rport_drv,
3987 qos_scn->old_qos_attr, 3987 qos_scn->old_qos_attr,
3988 qos_scn->new_qos_attr); 3988 qos_scn->new_qos_attr);
3989 if (qos_scn->old_qos_attr.qos_priority != 3989 if (qos_scn->old_qos_attr.qos_priority !=
3990 qos_scn->new_qos_attr.qos_priority) 3990 qos_scn->new_qos_attr.qos_priority)
3991 bfa_cb_rport_qos_scn_prio(rp->rport_drv, 3991 bfa_cb_rport_qos_scn_prio(rp->rport_drv,
3992 qos_scn->old_qos_attr, 3992 qos_scn->old_qos_attr,
3993 qos_scn->new_qos_attr); 3993 qos_scn->new_qos_attr);
3994 break; 3994 break;
3995 3995
3996 default: 3996 default:
3997 bfa_stats(rp, sm_on_unexp); 3997 bfa_stats(rp, sm_on_unexp);
3998 bfa_sm_fault(rp->bfa, event); 3998 bfa_sm_fault(rp->bfa, event);
3999 } 3999 }
4000 } 4000 }
4001 4001
4002 /* 4002 /*
4003 * Firmware rport is being deleted - awaiting f/w response. 4003 * Firmware rport is being deleted - awaiting f/w response.
4004 */ 4004 */
4005 static void 4005 static void
4006 bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, enum bfa_rport_event event) 4006 bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, enum bfa_rport_event event)
4007 { 4007 {
4008 bfa_trc(rp->bfa, rp->rport_tag); 4008 bfa_trc(rp->bfa, rp->rport_tag);
4009 bfa_trc(rp->bfa, event); 4009 bfa_trc(rp->bfa, event);
4010 4010
4011 switch (event) { 4011 switch (event) {
4012 case BFA_RPORT_SM_FWRSP: 4012 case BFA_RPORT_SM_FWRSP:
4013 bfa_stats(rp, sm_fwd_rsp); 4013 bfa_stats(rp, sm_fwd_rsp);
4014 bfa_sm_set_state(rp, bfa_rport_sm_offline); 4014 bfa_sm_set_state(rp, bfa_rport_sm_offline);
4015 bfa_rport_offline_cb(rp); 4015 bfa_rport_offline_cb(rp);
4016 break; 4016 break;
4017 4017
4018 case BFA_RPORT_SM_DELETE: 4018 case BFA_RPORT_SM_DELETE:
4019 bfa_stats(rp, sm_fwd_del); 4019 bfa_stats(rp, sm_fwd_del);
4020 bfa_sm_set_state(rp, bfa_rport_sm_deleting); 4020 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4021 break; 4021 break;
4022 4022
4023 case BFA_RPORT_SM_HWFAIL: 4023 case BFA_RPORT_SM_HWFAIL:
4024 bfa_stats(rp, sm_fwd_hwf); 4024 bfa_stats(rp, sm_fwd_hwf);
4025 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable); 4025 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4026 bfa_rport_offline_cb(rp); 4026 bfa_rport_offline_cb(rp);
4027 break; 4027 break;
4028 4028
4029 default: 4029 default:
4030 bfa_stats(rp, sm_fwd_unexp); 4030 bfa_stats(rp, sm_fwd_unexp);
4031 bfa_sm_fault(rp->bfa, event); 4031 bfa_sm_fault(rp->bfa, event);
4032 } 4032 }
4033 } 4033 }
4034 4034
4035 static void 4035 static void
4036 bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event) 4036 bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4037 { 4037 {
4038 bfa_trc(rp->bfa, rp->rport_tag); 4038 bfa_trc(rp->bfa, rp->rport_tag);
4039 bfa_trc(rp->bfa, event); 4039 bfa_trc(rp->bfa, event);
4040 4040
4041 switch (event) { 4041 switch (event) {
4042 case BFA_RPORT_SM_QRESUME: 4042 case BFA_RPORT_SM_QRESUME:
4043 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete); 4043 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4044 bfa_rport_send_fwdelete(rp); 4044 bfa_rport_send_fwdelete(rp);
4045 break; 4045 break;
4046 4046
4047 case BFA_RPORT_SM_DELETE: 4047 case BFA_RPORT_SM_DELETE:
4048 bfa_stats(rp, sm_fwd_del); 4048 bfa_stats(rp, sm_fwd_del);
4049 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull); 4049 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4050 break; 4050 break;
4051 4051
4052 case BFA_RPORT_SM_HWFAIL: 4052 case BFA_RPORT_SM_HWFAIL:
4053 bfa_stats(rp, sm_fwd_hwf); 4053 bfa_stats(rp, sm_fwd_hwf);
4054 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable); 4054 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4055 bfa_reqq_wcancel(&rp->reqq_wait); 4055 bfa_reqq_wcancel(&rp->reqq_wait);
4056 bfa_rport_offline_cb(rp); 4056 bfa_rport_offline_cb(rp);
4057 break; 4057 break;
4058 4058
4059 default: 4059 default:
4060 bfa_stats(rp, sm_fwd_unexp); 4060 bfa_stats(rp, sm_fwd_unexp);
4061 bfa_sm_fault(rp->bfa, event); 4061 bfa_sm_fault(rp->bfa, event);
4062 } 4062 }
4063 } 4063 }
4064 4064
4065 /* 4065 /*
4066 * Offline state. 4066 * Offline state.
4067 */ 4067 */
4068 static void 4068 static void
4069 bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event) 4069 bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event)
4070 { 4070 {
4071 bfa_trc(rp->bfa, rp->rport_tag); 4071 bfa_trc(rp->bfa, rp->rport_tag);
4072 bfa_trc(rp->bfa, event); 4072 bfa_trc(rp->bfa, event);
4073 4073
4074 switch (event) { 4074 switch (event) {
4075 case BFA_RPORT_SM_DELETE: 4075 case BFA_RPORT_SM_DELETE:
4076 bfa_stats(rp, sm_off_del); 4076 bfa_stats(rp, sm_off_del);
4077 bfa_sm_set_state(rp, bfa_rport_sm_uninit); 4077 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4078 bfa_rport_free(rp); 4078 bfa_rport_free(rp);
4079 break; 4079 break;
4080 4080
4081 case BFA_RPORT_SM_ONLINE: 4081 case BFA_RPORT_SM_ONLINE:
4082 bfa_stats(rp, sm_off_on); 4082 bfa_stats(rp, sm_off_on);
4083 if (bfa_rport_send_fwcreate(rp)) 4083 if (bfa_rport_send_fwcreate(rp))
4084 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate); 4084 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4085 else 4085 else
4086 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull); 4086 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4087 break; 4087 break;
4088 4088
4089 case BFA_RPORT_SM_HWFAIL: 4089 case BFA_RPORT_SM_HWFAIL:
4090 bfa_stats(rp, sm_off_hwf); 4090 bfa_stats(rp, sm_off_hwf);
4091 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable); 4091 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4092 break; 4092 break;
4093 4093
4094 default: 4094 default:
4095 bfa_stats(rp, sm_off_unexp); 4095 bfa_stats(rp, sm_off_unexp);
4096 bfa_sm_fault(rp->bfa, event); 4096 bfa_sm_fault(rp->bfa, event);
4097 } 4097 }
4098 } 4098 }
4099 4099
4100 /* 4100 /*
4101 * Rport is deleted, waiting for firmware response to delete. 4101 * Rport is deleted, waiting for firmware response to delete.
4102 */ 4102 */
4103 static void 4103 static void
4104 bfa_rport_sm_deleting(struct bfa_rport_s *rp, enum bfa_rport_event event) 4104 bfa_rport_sm_deleting(struct bfa_rport_s *rp, enum bfa_rport_event event)
4105 { 4105 {
4106 bfa_trc(rp->bfa, rp->rport_tag); 4106 bfa_trc(rp->bfa, rp->rport_tag);
4107 bfa_trc(rp->bfa, event); 4107 bfa_trc(rp->bfa, event);
4108 4108
4109 switch (event) { 4109 switch (event) {
4110 case BFA_RPORT_SM_FWRSP: 4110 case BFA_RPORT_SM_FWRSP:
4111 bfa_stats(rp, sm_del_fwrsp); 4111 bfa_stats(rp, sm_del_fwrsp);
4112 bfa_sm_set_state(rp, bfa_rport_sm_uninit); 4112 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4113 bfa_rport_free(rp); 4113 bfa_rport_free(rp);
4114 break; 4114 break;
4115 4115
4116 case BFA_RPORT_SM_HWFAIL: 4116 case BFA_RPORT_SM_HWFAIL:
4117 bfa_stats(rp, sm_del_hwf); 4117 bfa_stats(rp, sm_del_hwf);
4118 bfa_sm_set_state(rp, bfa_rport_sm_uninit); 4118 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4119 bfa_rport_free(rp); 4119 bfa_rport_free(rp);
4120 break; 4120 break;
4121 4121
4122 default: 4122 default:
4123 bfa_sm_fault(rp->bfa, event); 4123 bfa_sm_fault(rp->bfa, event);
4124 } 4124 }
4125 } 4125 }
4126 4126
4127 static void 4127 static void
4128 bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event) 4128 bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4129 { 4129 {
4130 bfa_trc(rp->bfa, rp->rport_tag); 4130 bfa_trc(rp->bfa, rp->rport_tag);
4131 bfa_trc(rp->bfa, event); 4131 bfa_trc(rp->bfa, event);
4132 4132
4133 switch (event) { 4133 switch (event) {
4134 case BFA_RPORT_SM_QRESUME: 4134 case BFA_RPORT_SM_QRESUME:
4135 bfa_stats(rp, sm_del_fwrsp); 4135 bfa_stats(rp, sm_del_fwrsp);
4136 bfa_sm_set_state(rp, bfa_rport_sm_deleting); 4136 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4137 bfa_rport_send_fwdelete(rp); 4137 bfa_rport_send_fwdelete(rp);
4138 break; 4138 break;
4139 4139
4140 case BFA_RPORT_SM_HWFAIL: 4140 case BFA_RPORT_SM_HWFAIL:
4141 bfa_stats(rp, sm_del_hwf); 4141 bfa_stats(rp, sm_del_hwf);
4142 bfa_sm_set_state(rp, bfa_rport_sm_uninit); 4142 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4143 bfa_reqq_wcancel(&rp->reqq_wait); 4143 bfa_reqq_wcancel(&rp->reqq_wait);
4144 bfa_rport_free(rp); 4144 bfa_rport_free(rp);
4145 break; 4145 break;
4146 4146
4147 default: 4147 default:
4148 bfa_sm_fault(rp->bfa, event); 4148 bfa_sm_fault(rp->bfa, event);
4149 } 4149 }
4150 } 4150 }
4151 4151
4152 /* 4152 /*
4153 * Waiting for rport create response from firmware. A delete is pending. 4153 * Waiting for rport create response from firmware. A delete is pending.
4154 */ 4154 */
4155 static void 4155 static void
4156 bfa_rport_sm_delete_pending(struct bfa_rport_s *rp, 4156 bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
4157 enum bfa_rport_event event) 4157 enum bfa_rport_event event)
4158 { 4158 {
4159 bfa_trc(rp->bfa, rp->rport_tag); 4159 bfa_trc(rp->bfa, rp->rport_tag);
4160 bfa_trc(rp->bfa, event); 4160 bfa_trc(rp->bfa, event);
4161 4161
4162 switch (event) { 4162 switch (event) {
4163 case BFA_RPORT_SM_FWRSP: 4163 case BFA_RPORT_SM_FWRSP:
4164 bfa_stats(rp, sm_delp_fwrsp); 4164 bfa_stats(rp, sm_delp_fwrsp);
4165 if (bfa_rport_send_fwdelete(rp)) 4165 if (bfa_rport_send_fwdelete(rp))
4166 bfa_sm_set_state(rp, bfa_rport_sm_deleting); 4166 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4167 else 4167 else
4168 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull); 4168 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4169 break; 4169 break;
4170 4170
4171 case BFA_RPORT_SM_HWFAIL: 4171 case BFA_RPORT_SM_HWFAIL:
4172 bfa_stats(rp, sm_delp_hwf); 4172 bfa_stats(rp, sm_delp_hwf);
4173 bfa_sm_set_state(rp, bfa_rport_sm_uninit); 4173 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4174 bfa_rport_free(rp); 4174 bfa_rport_free(rp);
4175 break; 4175 break;
4176 4176
4177 default: 4177 default:
4178 bfa_stats(rp, sm_delp_unexp); 4178 bfa_stats(rp, sm_delp_unexp);
4179 bfa_sm_fault(rp->bfa, event); 4179 bfa_sm_fault(rp->bfa, event);
4180 } 4180 }
4181 } 4181 }
4182 4182
4183 /* 4183 /*
4184 * Waiting for rport create response from firmware. Rport offline is pending. 4184 * Waiting for rport create response from firmware. Rport offline is pending.
4185 */ 4185 */
4186 static void 4186 static void
4187 bfa_rport_sm_offline_pending(struct bfa_rport_s *rp, 4187 bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
4188 enum bfa_rport_event event) 4188 enum bfa_rport_event event)
4189 { 4189 {
4190 bfa_trc(rp->bfa, rp->rport_tag); 4190 bfa_trc(rp->bfa, rp->rport_tag);
4191 bfa_trc(rp->bfa, event); 4191 bfa_trc(rp->bfa, event);
4192 4192
4193 switch (event) { 4193 switch (event) {
4194 case BFA_RPORT_SM_FWRSP: 4194 case BFA_RPORT_SM_FWRSP:
4195 bfa_stats(rp, sm_offp_fwrsp); 4195 bfa_stats(rp, sm_offp_fwrsp);
4196 if (bfa_rport_send_fwdelete(rp)) 4196 if (bfa_rport_send_fwdelete(rp))
4197 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete); 4197 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4198 else 4198 else
4199 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull); 4199 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
4200 break; 4200 break;
4201 4201
4202 case BFA_RPORT_SM_DELETE: 4202 case BFA_RPORT_SM_DELETE:
4203 bfa_stats(rp, sm_offp_del); 4203 bfa_stats(rp, sm_offp_del);
4204 bfa_sm_set_state(rp, bfa_rport_sm_delete_pending); 4204 bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
4205 break; 4205 break;
4206 4206
4207 case BFA_RPORT_SM_HWFAIL: 4207 case BFA_RPORT_SM_HWFAIL:
4208 bfa_stats(rp, sm_offp_hwf); 4208 bfa_stats(rp, sm_offp_hwf);
4209 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable); 4209 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4210 break; 4210 break;
4211 4211
4212 default: 4212 default:
4213 bfa_stats(rp, sm_offp_unexp); 4213 bfa_stats(rp, sm_offp_unexp);
4214 bfa_sm_fault(rp->bfa, event); 4214 bfa_sm_fault(rp->bfa, event);
4215 } 4215 }
4216 } 4216 }
4217 4217
4218 /* 4218 /*
4219 * IOC h/w failed. 4219 * IOC h/w failed.
4220 */ 4220 */
4221 static void 4221 static void
4222 bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event) 4222 bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event)
4223 { 4223 {
4224 bfa_trc(rp->bfa, rp->rport_tag); 4224 bfa_trc(rp->bfa, rp->rport_tag);
4225 bfa_trc(rp->bfa, event); 4225 bfa_trc(rp->bfa, event);
4226 4226
4227 switch (event) { 4227 switch (event) {
4228 case BFA_RPORT_SM_OFFLINE: 4228 case BFA_RPORT_SM_OFFLINE:
4229 bfa_stats(rp, sm_iocd_off); 4229 bfa_stats(rp, sm_iocd_off);
4230 bfa_rport_offline_cb(rp); 4230 bfa_rport_offline_cb(rp);
4231 break; 4231 break;
4232 4232
4233 case BFA_RPORT_SM_DELETE: 4233 case BFA_RPORT_SM_DELETE:
4234 bfa_stats(rp, sm_iocd_del); 4234 bfa_stats(rp, sm_iocd_del);
4235 bfa_sm_set_state(rp, bfa_rport_sm_uninit); 4235 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4236 bfa_rport_free(rp); 4236 bfa_rport_free(rp);
4237 break; 4237 break;
4238 4238
4239 case BFA_RPORT_SM_ONLINE: 4239 case BFA_RPORT_SM_ONLINE:
4240 bfa_stats(rp, sm_iocd_on); 4240 bfa_stats(rp, sm_iocd_on);
4241 if (bfa_rport_send_fwcreate(rp)) 4241 if (bfa_rport_send_fwcreate(rp))
4242 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate); 4242 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4243 else 4243 else
4244 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull); 4244 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4245 break; 4245 break;
4246 4246
4247 case BFA_RPORT_SM_HWFAIL: 4247 case BFA_RPORT_SM_HWFAIL:
4248 break; 4248 break;
4249 4249
4250 default: 4250 default:
4251 bfa_stats(rp, sm_iocd_unexp); 4251 bfa_stats(rp, sm_iocd_unexp);
4252 bfa_sm_fault(rp->bfa, event); 4252 bfa_sm_fault(rp->bfa, event);
4253 } 4253 }
4254 } 4254 }
4255 4255
4256 4256
4257 4257
4258 /* 4258 /*
4259 * bfa_rport_private BFA rport private functions 4259 * bfa_rport_private BFA rport private functions
4260 */ 4260 */
4261 4261
4262 static void 4262 static void
4263 __bfa_cb_rport_online(void *cbarg, bfa_boolean_t complete) 4263 __bfa_cb_rport_online(void *cbarg, bfa_boolean_t complete)
4264 { 4264 {
4265 struct bfa_rport_s *rp = cbarg; 4265 struct bfa_rport_s *rp = cbarg;
4266 4266
4267 if (complete) 4267 if (complete)
4268 bfa_cb_rport_online(rp->rport_drv); 4268 bfa_cb_rport_online(rp->rport_drv);
4269 } 4269 }
4270 4270
4271 static void 4271 static void
4272 __bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete) 4272 __bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete)
4273 { 4273 {
4274 struct bfa_rport_s *rp = cbarg; 4274 struct bfa_rport_s *rp = cbarg;
4275 4275
4276 if (complete) 4276 if (complete)
4277 bfa_cb_rport_offline(rp->rport_drv); 4277 bfa_cb_rport_offline(rp->rport_drv);
4278 } 4278 }
4279 4279
4280 static void 4280 static void
4281 bfa_rport_qresume(void *cbarg) 4281 bfa_rport_qresume(void *cbarg)
4282 { 4282 {
4283 struct bfa_rport_s *rp = cbarg; 4283 struct bfa_rport_s *rp = cbarg;
4284 4284
4285 bfa_sm_send_event(rp, BFA_RPORT_SM_QRESUME); 4285 bfa_sm_send_event(rp, BFA_RPORT_SM_QRESUME);
4286 } 4286 }
4287 4287
4288 static void 4288 static void
4289 bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len, 4289 bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
4290 u32 *dm_len) 4290 u32 *dm_len)
4291 { 4291 {
4292 if (cfg->fwcfg.num_rports < BFA_RPORT_MIN) 4292 if (cfg->fwcfg.num_rports < BFA_RPORT_MIN)
4293 cfg->fwcfg.num_rports = BFA_RPORT_MIN; 4293 cfg->fwcfg.num_rports = BFA_RPORT_MIN;
4294 4294
4295 *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s); 4295 *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s);
4296 } 4296 }
4297 4297
4298 static void 4298 static void
4299 bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, 4299 bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4300 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) 4300 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
4301 { 4301 {
4302 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa); 4302 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4303 struct bfa_rport_s *rp; 4303 struct bfa_rport_s *rp;
4304 u16 i; 4304 u16 i;
4305 4305
4306 INIT_LIST_HEAD(&mod->rp_free_q); 4306 INIT_LIST_HEAD(&mod->rp_free_q);
4307 INIT_LIST_HEAD(&mod->rp_active_q); 4307 INIT_LIST_HEAD(&mod->rp_active_q);
4308 4308
4309 rp = (struct bfa_rport_s *) bfa_meminfo_kva(meminfo); 4309 rp = (struct bfa_rport_s *) bfa_meminfo_kva(meminfo);
4310 mod->rps_list = rp; 4310 mod->rps_list = rp;
4311 mod->num_rports = cfg->fwcfg.num_rports; 4311 mod->num_rports = cfg->fwcfg.num_rports;
4312 4312
4313 WARN_ON(!mod->num_rports || 4313 WARN_ON(!mod->num_rports ||
4314 (mod->num_rports & (mod->num_rports - 1))); 4314 (mod->num_rports & (mod->num_rports - 1)));
4315 4315
4316 for (i = 0; i < mod->num_rports; i++, rp++) { 4316 for (i = 0; i < mod->num_rports; i++, rp++) {
4317 memset(rp, 0, sizeof(struct bfa_rport_s)); 4317 memset(rp, 0, sizeof(struct bfa_rport_s));
4318 rp->bfa = bfa; 4318 rp->bfa = bfa;
4319 rp->rport_tag = i; 4319 rp->rport_tag = i;
4320 bfa_sm_set_state(rp, bfa_rport_sm_uninit); 4320 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4321 4321
4322 /* 4322 /*
4323 * - is unused 4323 * - is unused
4324 */ 4324 */
4325 if (i) 4325 if (i)
4326 list_add_tail(&rp->qe, &mod->rp_free_q); 4326 list_add_tail(&rp->qe, &mod->rp_free_q);
4327 4327
4328 bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp); 4328 bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp);
4329 } 4329 }
4330 4330
4331 /* 4331 /*
4332 * consume memory 4332 * consume memory
4333 */ 4333 */
4334 bfa_meminfo_kva(meminfo) = (u8 *) rp; 4334 bfa_meminfo_kva(meminfo) = (u8 *) rp;
4335 } 4335 }
4336 4336
4337 static void 4337 static void
4338 bfa_rport_detach(struct bfa_s *bfa) 4338 bfa_rport_detach(struct bfa_s *bfa)
4339 { 4339 {
4340 } 4340 }
4341 4341
4342 static void 4342 static void
4343 bfa_rport_start(struct bfa_s *bfa) 4343 bfa_rport_start(struct bfa_s *bfa)
4344 { 4344 {
4345 } 4345 }
4346 4346
4347 static void 4347 static void
4348 bfa_rport_stop(struct bfa_s *bfa) 4348 bfa_rport_stop(struct bfa_s *bfa)
4349 { 4349 {
4350 } 4350 }
4351 4351
4352 static void 4352 static void
4353 bfa_rport_iocdisable(struct bfa_s *bfa) 4353 bfa_rport_iocdisable(struct bfa_s *bfa)
4354 { 4354 {
4355 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa); 4355 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4356 struct bfa_rport_s *rport; 4356 struct bfa_rport_s *rport;
4357 struct list_head *qe, *qen; 4357 struct list_head *qe, *qen;
4358 4358
4359 list_for_each_safe(qe, qen, &mod->rp_active_q) { 4359 list_for_each_safe(qe, qen, &mod->rp_active_q) {
4360 rport = (struct bfa_rport_s *) qe; 4360 rport = (struct bfa_rport_s *) qe;
4361 bfa_sm_send_event(rport, BFA_RPORT_SM_HWFAIL); 4361 bfa_sm_send_event(rport, BFA_RPORT_SM_HWFAIL);
4362 } 4362 }
4363 } 4363 }
4364 4364
4365 static struct bfa_rport_s * 4365 static struct bfa_rport_s *
4366 bfa_rport_alloc(struct bfa_rport_mod_s *mod) 4366 bfa_rport_alloc(struct bfa_rport_mod_s *mod)
4367 { 4367 {
4368 struct bfa_rport_s *rport; 4368 struct bfa_rport_s *rport;
4369 4369
4370 bfa_q_deq(&mod->rp_free_q, &rport); 4370 bfa_q_deq(&mod->rp_free_q, &rport);
4371 if (rport) 4371 if (rport)
4372 list_add_tail(&rport->qe, &mod->rp_active_q); 4372 list_add_tail(&rport->qe, &mod->rp_active_q);
4373 4373
4374 return rport; 4374 return rport;
4375 } 4375 }
4376 4376
4377 static void 4377 static void
4378 bfa_rport_free(struct bfa_rport_s *rport) 4378 bfa_rport_free(struct bfa_rport_s *rport)
4379 { 4379 {
4380 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(rport->bfa); 4380 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(rport->bfa);
4381 4381
4382 WARN_ON(!bfa_q_is_on_q(&mod->rp_active_q, rport)); 4382 WARN_ON(!bfa_q_is_on_q(&mod->rp_active_q, rport));
4383 list_del(&rport->qe); 4383 list_del(&rport->qe);
4384 list_add_tail(&rport->qe, &mod->rp_free_q); 4384 list_add_tail(&rport->qe, &mod->rp_free_q);
4385 } 4385 }
4386 4386
4387 static bfa_boolean_t 4387 static bfa_boolean_t
4388 bfa_rport_send_fwcreate(struct bfa_rport_s *rp) 4388 bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
4389 { 4389 {
4390 struct bfi_rport_create_req_s *m; 4390 struct bfi_rport_create_req_s *m;
4391 4391
4392 /* 4392 /*
4393 * check for room in queue to send request now 4393 * check for room in queue to send request now
4394 */ 4394 */
4395 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT); 4395 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4396 if (!m) { 4396 if (!m) {
4397 bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait); 4397 bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
4398 return BFA_FALSE; 4398 return BFA_FALSE;
4399 } 4399 }
4400 4400
4401 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ, 4401 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ,
4402 bfa_lpuid(rp->bfa)); 4402 bfa_lpuid(rp->bfa));
4403 m->bfa_handle = rp->rport_tag; 4403 m->bfa_handle = rp->rport_tag;
4404 m->max_frmsz = cpu_to_be16(rp->rport_info.max_frmsz); 4404 m->max_frmsz = cpu_to_be16(rp->rport_info.max_frmsz);
4405 m->pid = rp->rport_info.pid; 4405 m->pid = rp->rport_info.pid;
4406 m->lp_tag = rp->rport_info.lp_tag; 4406 m->lp_tag = rp->rport_info.lp_tag;
4407 m->local_pid = rp->rport_info.local_pid; 4407 m->local_pid = rp->rport_info.local_pid;
4408 m->fc_class = rp->rport_info.fc_class; 4408 m->fc_class = rp->rport_info.fc_class;
4409 m->vf_en = rp->rport_info.vf_en; 4409 m->vf_en = rp->rport_info.vf_en;
4410 m->vf_id = rp->rport_info.vf_id; 4410 m->vf_id = rp->rport_info.vf_id;
4411 m->cisc = rp->rport_info.cisc; 4411 m->cisc = rp->rport_info.cisc;
4412 4412
4413 /* 4413 /*
4414 * queue I/O message to firmware 4414 * queue I/O message to firmware
4415 */ 4415 */
4416 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT); 4416 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
4417 return BFA_TRUE; 4417 return BFA_TRUE;
4418 } 4418 }
4419 4419
4420 static bfa_boolean_t 4420 static bfa_boolean_t
4421 bfa_rport_send_fwdelete(struct bfa_rport_s *rp) 4421 bfa_rport_send_fwdelete(struct bfa_rport_s *rp)
4422 { 4422 {
4423 struct bfi_rport_delete_req_s *m; 4423 struct bfi_rport_delete_req_s *m;
4424 4424
4425 /* 4425 /*
4426 * check for room in queue to send request now 4426 * check for room in queue to send request now
4427 */ 4427 */
4428 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT); 4428 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4429 if (!m) { 4429 if (!m) {
4430 bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait); 4430 bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
4431 return BFA_FALSE; 4431 return BFA_FALSE;
4432 } 4432 }
4433 4433
4434 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_DELETE_REQ, 4434 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_DELETE_REQ,
4435 bfa_lpuid(rp->bfa)); 4435 bfa_lpuid(rp->bfa));
4436 m->fw_handle = rp->fw_handle; 4436 m->fw_handle = rp->fw_handle;
4437 4437
4438 /* 4438 /*
4439 * queue I/O message to firmware 4439 * queue I/O message to firmware
4440 */ 4440 */
4441 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT); 4441 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
4442 return BFA_TRUE; 4442 return BFA_TRUE;
4443 } 4443 }
4444 4444
4445 static bfa_boolean_t 4445 static bfa_boolean_t
4446 bfa_rport_send_fwspeed(struct bfa_rport_s *rp) 4446 bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
4447 { 4447 {
4448 struct bfa_rport_speed_req_s *m; 4448 struct bfa_rport_speed_req_s *m;
4449 4449
4450 /* 4450 /*
4451 * check for room in queue to send request now 4451 * check for room in queue to send request now
4452 */ 4452 */
4453 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT); 4453 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4454 if (!m) { 4454 if (!m) {
4455 bfa_trc(rp->bfa, rp->rport_info.speed); 4455 bfa_trc(rp->bfa, rp->rport_info.speed);
4456 return BFA_FALSE; 4456 return BFA_FALSE;
4457 } 4457 }
4458 4458
4459 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_SET_SPEED_REQ, 4459 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_SET_SPEED_REQ,
4460 bfa_lpuid(rp->bfa)); 4460 bfa_lpuid(rp->bfa));
4461 m->fw_handle = rp->fw_handle; 4461 m->fw_handle = rp->fw_handle;
4462 m->speed = (u8)rp->rport_info.speed; 4462 m->speed = (u8)rp->rport_info.speed;
4463 4463
4464 /* 4464 /*
4465 * queue I/O message to firmware 4465 * queue I/O message to firmware
4466 */ 4466 */
4467 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT); 4467 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
4468 return BFA_TRUE; 4468 return BFA_TRUE;
4469 } 4469 }
4470 4470
4471 4471
4472 4472
4473 /* 4473 /*
4474 * bfa_rport_public 4474 * bfa_rport_public
4475 */ 4475 */
4476 4476
4477 /* 4477 /*
4478 * Rport interrupt processing. 4478 * Rport interrupt processing.
4479 */ 4479 */
4480 void 4480 void
4481 bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m) 4481 bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
4482 { 4482 {
4483 union bfi_rport_i2h_msg_u msg; 4483 union bfi_rport_i2h_msg_u msg;
4484 struct bfa_rport_s *rp; 4484 struct bfa_rport_s *rp;
4485 4485
4486 bfa_trc(bfa, m->mhdr.msg_id); 4486 bfa_trc(bfa, m->mhdr.msg_id);
4487 4487
4488 msg.msg = m; 4488 msg.msg = m;
4489 4489
4490 switch (m->mhdr.msg_id) { 4490 switch (m->mhdr.msg_id) {
4491 case BFI_RPORT_I2H_CREATE_RSP: 4491 case BFI_RPORT_I2H_CREATE_RSP:
4492 rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle); 4492 rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle);
4493 rp->fw_handle = msg.create_rsp->fw_handle; 4493 rp->fw_handle = msg.create_rsp->fw_handle;
4494 rp->qos_attr = msg.create_rsp->qos_attr; 4494 rp->qos_attr = msg.create_rsp->qos_attr;
4495 WARN_ON(msg.create_rsp->status != BFA_STATUS_OK); 4495 WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
4496 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP); 4496 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
4497 break; 4497 break;
4498 4498
4499 case BFI_RPORT_I2H_DELETE_RSP: 4499 case BFI_RPORT_I2H_DELETE_RSP:
4500 rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle); 4500 rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle);
4501 WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK); 4501 WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
4502 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP); 4502 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
4503 break; 4503 break;
4504 4504
4505 case BFI_RPORT_I2H_QOS_SCN: 4505 case BFI_RPORT_I2H_QOS_SCN:
4506 rp = BFA_RPORT_FROM_TAG(bfa, msg.qos_scn_evt->bfa_handle); 4506 rp = BFA_RPORT_FROM_TAG(bfa, msg.qos_scn_evt->bfa_handle);
4507 rp->event_arg.fw_msg = msg.qos_scn_evt; 4507 rp->event_arg.fw_msg = msg.qos_scn_evt;
4508 bfa_sm_send_event(rp, BFA_RPORT_SM_QOS_SCN); 4508 bfa_sm_send_event(rp, BFA_RPORT_SM_QOS_SCN);
4509 break; 4509 break;
4510 4510
4511 default: 4511 default:
4512 bfa_trc(bfa, m->mhdr.msg_id); 4512 bfa_trc(bfa, m->mhdr.msg_id);
4513 WARN_ON(1); 4513 WARN_ON(1);
4514 } 4514 }
4515 } 4515 }
4516 4516
4517 4517
4518 4518
4519 /* 4519 /*
4520 * bfa_rport_api 4520 * bfa_rport_api
4521 */ 4521 */
4522 4522
4523 struct bfa_rport_s * 4523 struct bfa_rport_s *
4524 bfa_rport_create(struct bfa_s *bfa, void *rport_drv) 4524 bfa_rport_create(struct bfa_s *bfa, void *rport_drv)
4525 { 4525 {
4526 struct bfa_rport_s *rp; 4526 struct bfa_rport_s *rp;
4527 4527
4528 rp = bfa_rport_alloc(BFA_RPORT_MOD(bfa)); 4528 rp = bfa_rport_alloc(BFA_RPORT_MOD(bfa));
4529 4529
4530 if (rp == NULL) 4530 if (rp == NULL)
4531 return NULL; 4531 return NULL;
4532 4532
4533 rp->bfa = bfa; 4533 rp->bfa = bfa;
4534 rp->rport_drv = rport_drv; 4534 rp->rport_drv = rport_drv;
4535 memset(&rp->stats, 0, sizeof(rp->stats)); 4535 memset(&rp->stats, 0, sizeof(rp->stats));
4536 4536
4537 WARN_ON(!bfa_sm_cmp_state(rp, bfa_rport_sm_uninit)); 4537 WARN_ON(!bfa_sm_cmp_state(rp, bfa_rport_sm_uninit));
4538 bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE); 4538 bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE);
4539 4539
4540 return rp; 4540 return rp;
4541 } 4541 }
4542 4542
4543 void 4543 void
4544 bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info) 4544 bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info)
4545 { 4545 {
4546 WARN_ON(rport_info->max_frmsz == 0); 4546 WARN_ON(rport_info->max_frmsz == 0);
4547 4547
4548 /* 4548 /*
4549 * Some JBODs are seen to be not setting PDU size correctly in PLOGI 4549 * Some JBODs are seen to be not setting PDU size correctly in PLOGI
4550 * responses. Default to minimum size. 4550 * responses. Default to minimum size.
4551 */ 4551 */
4552 if (rport_info->max_frmsz == 0) { 4552 if (rport_info->max_frmsz == 0) {
4553 bfa_trc(rport->bfa, rport->rport_tag); 4553 bfa_trc(rport->bfa, rport->rport_tag);
4554 rport_info->max_frmsz = FC_MIN_PDUSZ; 4554 rport_info->max_frmsz = FC_MIN_PDUSZ;
4555 } 4555 }
4556 4556
4557 rport->rport_info = *rport_info; 4557 rport->rport_info = *rport_info;
4558 bfa_sm_send_event(rport, BFA_RPORT_SM_ONLINE); 4558 bfa_sm_send_event(rport, BFA_RPORT_SM_ONLINE);
4559 } 4559 }
4560 4560
4561 void 4561 void
4562 bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed) 4562 bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed)
4563 { 4563 {
4564 WARN_ON(speed == 0); 4564 WARN_ON(speed == 0);
4565 WARN_ON(speed == BFA_PORT_SPEED_AUTO); 4565 WARN_ON(speed == BFA_PORT_SPEED_AUTO);
4566 4566
4567 rport->rport_info.speed = speed; 4567 rport->rport_info.speed = speed;
4568 bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED); 4568 bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
4569 } 4569 }
4570 4570
4571 4571
4572 /* 4572 /*
4573 * SGPG related functions 4573 * SGPG related functions
4574 */ 4574 */
4575 4575
4576 /* 4576 /*
4577 * Compute and return memory needed by FCP(im) module. 4577 * Compute and return memory needed by FCP(im) module.
4578 */ 4578 */
4579 static void 4579 static void
4580 bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len, 4580 bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
4581 u32 *dm_len) 4581 u32 *dm_len)
4582 { 4582 {
4583 if (cfg->drvcfg.num_sgpgs < BFA_SGPG_MIN) 4583 if (cfg->drvcfg.num_sgpgs < BFA_SGPG_MIN)
4584 cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN; 4584 cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
4585 4585
4586 *km_len += (cfg->drvcfg.num_sgpgs + 1) * sizeof(struct bfa_sgpg_s); 4586 *km_len += (cfg->drvcfg.num_sgpgs + 1) * sizeof(struct bfa_sgpg_s);
4587 *dm_len += (cfg->drvcfg.num_sgpgs + 1) * sizeof(struct bfi_sgpg_s); 4587 *dm_len += (cfg->drvcfg.num_sgpgs + 1) * sizeof(struct bfi_sgpg_s);
4588 } 4588 }
4589 4589
4590 4590
4591 static void 4591 static void
4592 bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, 4592 bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4593 struct bfa_meminfo_s *minfo, struct bfa_pcidev_s *pcidev) 4593 struct bfa_meminfo_s *minfo, struct bfa_pcidev_s *pcidev)
4594 { 4594 {
4595 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa); 4595 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4596 int i; 4596 int i;
4597 struct bfa_sgpg_s *hsgpg; 4597 struct bfa_sgpg_s *hsgpg;
4598 struct bfi_sgpg_s *sgpg; 4598 struct bfi_sgpg_s *sgpg;
4599 u64 align_len; 4599 u64 align_len;
4600 4600
4601 union { 4601 union {
4602 u64 pa; 4602 u64 pa;
4603 union bfi_addr_u addr; 4603 union bfi_addr_u addr;
4604 } sgpg_pa, sgpg_pa_tmp; 4604 } sgpg_pa, sgpg_pa_tmp;
4605 4605
4606 INIT_LIST_HEAD(&mod->sgpg_q); 4606 INIT_LIST_HEAD(&mod->sgpg_q);
4607 INIT_LIST_HEAD(&mod->sgpg_wait_q); 4607 INIT_LIST_HEAD(&mod->sgpg_wait_q);
4608 4608
4609 bfa_trc(bfa, cfg->drvcfg.num_sgpgs); 4609 bfa_trc(bfa, cfg->drvcfg.num_sgpgs);
4610 4610
4611 mod->num_sgpgs = cfg->drvcfg.num_sgpgs; 4611 mod->num_sgpgs = cfg->drvcfg.num_sgpgs;
4612 mod->sgpg_arr_pa = bfa_meminfo_dma_phys(minfo); 4612 mod->sgpg_arr_pa = bfa_meminfo_dma_phys(minfo);
4613 align_len = (BFA_SGPG_ROUNDUP(mod->sgpg_arr_pa) - mod->sgpg_arr_pa); 4613 align_len = (BFA_SGPG_ROUNDUP(mod->sgpg_arr_pa) - mod->sgpg_arr_pa);
4614 mod->sgpg_arr_pa += align_len; 4614 mod->sgpg_arr_pa += align_len;
4615 mod->hsgpg_arr = (struct bfa_sgpg_s *) (bfa_meminfo_kva(minfo) + 4615 mod->hsgpg_arr = (struct bfa_sgpg_s *) (bfa_meminfo_kva(minfo) +
4616 align_len); 4616 align_len);
4617 mod->sgpg_arr = (struct bfi_sgpg_s *) (bfa_meminfo_dma_virt(minfo) + 4617 mod->sgpg_arr = (struct bfi_sgpg_s *) (bfa_meminfo_dma_virt(minfo) +
4618 align_len); 4618 align_len);
4619 4619
4620 hsgpg = mod->hsgpg_arr; 4620 hsgpg = mod->hsgpg_arr;
4621 sgpg = mod->sgpg_arr; 4621 sgpg = mod->sgpg_arr;
4622 sgpg_pa.pa = mod->sgpg_arr_pa; 4622 sgpg_pa.pa = mod->sgpg_arr_pa;
4623 mod->free_sgpgs = mod->num_sgpgs; 4623 mod->free_sgpgs = mod->num_sgpgs;
4624 4624
4625 WARN_ON(sgpg_pa.pa & (sizeof(struct bfi_sgpg_s) - 1)); 4625 WARN_ON(sgpg_pa.pa & (sizeof(struct bfi_sgpg_s) - 1));
4626 4626
4627 for (i = 0; i < mod->num_sgpgs; i++) { 4627 for (i = 0; i < mod->num_sgpgs; i++) {
4628 memset(hsgpg, 0, sizeof(*hsgpg)); 4628 memset(hsgpg, 0, sizeof(*hsgpg));
4629 memset(sgpg, 0, sizeof(*sgpg)); 4629 memset(sgpg, 0, sizeof(*sgpg));
4630 4630
4631 hsgpg->sgpg = sgpg; 4631 hsgpg->sgpg = sgpg;
4632 sgpg_pa_tmp.pa = bfa_sgaddr_le(sgpg_pa.pa); 4632 sgpg_pa_tmp.pa = bfa_sgaddr_le(sgpg_pa.pa);
4633 hsgpg->sgpg_pa = sgpg_pa_tmp.addr; 4633 hsgpg->sgpg_pa = sgpg_pa_tmp.addr;
4634 list_add_tail(&hsgpg->qe, &mod->sgpg_q); 4634 list_add_tail(&hsgpg->qe, &mod->sgpg_q);
4635 4635
4636 hsgpg++; 4636 hsgpg++;
4637 sgpg++; 4637 sgpg++;
4638 sgpg_pa.pa += sizeof(struct bfi_sgpg_s); 4638 sgpg_pa.pa += sizeof(struct bfi_sgpg_s);
4639 } 4639 }
4640 4640
4641 bfa_meminfo_kva(minfo) = (u8 *) hsgpg; 4641 bfa_meminfo_kva(minfo) = (u8 *) hsgpg;
4642 bfa_meminfo_dma_virt(minfo) = (u8 *) sgpg; 4642 bfa_meminfo_dma_virt(minfo) = (u8 *) sgpg;
4643 bfa_meminfo_dma_phys(minfo) = sgpg_pa.pa; 4643 bfa_meminfo_dma_phys(minfo) = sgpg_pa.pa;
4644 } 4644 }
4645 4645
4646 static void 4646 static void
4647 bfa_sgpg_detach(struct bfa_s *bfa) 4647 bfa_sgpg_detach(struct bfa_s *bfa)
4648 { 4648 {
4649 } 4649 }
4650 4650
4651 static void 4651 static void
4652 bfa_sgpg_start(struct bfa_s *bfa) 4652 bfa_sgpg_start(struct bfa_s *bfa)
4653 { 4653 {
4654 } 4654 }
4655 4655
4656 static void 4656 static void
4657 bfa_sgpg_stop(struct bfa_s *bfa) 4657 bfa_sgpg_stop(struct bfa_s *bfa)
4658 { 4658 {
4659 } 4659 }
4660 4660
4661 static void 4661 static void
4662 bfa_sgpg_iocdisable(struct bfa_s *bfa) 4662 bfa_sgpg_iocdisable(struct bfa_s *bfa)
4663 { 4663 {
4664 } 4664 }
4665 4665
4666 bfa_status_t 4666 bfa_status_t
4667 bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs) 4667 bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs)
4668 { 4668 {
4669 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa); 4669 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4670 struct bfa_sgpg_s *hsgpg; 4670 struct bfa_sgpg_s *hsgpg;
4671 int i; 4671 int i;
4672 4672
4673 bfa_trc_fp(bfa, nsgpgs);
4674
4675 if (mod->free_sgpgs < nsgpgs) 4673 if (mod->free_sgpgs < nsgpgs)
4676 return BFA_STATUS_ENOMEM; 4674 return BFA_STATUS_ENOMEM;
4677 4675
4678 for (i = 0; i < nsgpgs; i++) { 4676 for (i = 0; i < nsgpgs; i++) {
4679 bfa_q_deq(&mod->sgpg_q, &hsgpg); 4677 bfa_q_deq(&mod->sgpg_q, &hsgpg);
4680 WARN_ON(!hsgpg); 4678 WARN_ON(!hsgpg);
4681 list_add_tail(&hsgpg->qe, sgpg_q); 4679 list_add_tail(&hsgpg->qe, sgpg_q);
4682 } 4680 }
4683 4681
4684 mod->free_sgpgs -= nsgpgs; 4682 mod->free_sgpgs -= nsgpgs;
4685 return BFA_STATUS_OK; 4683 return BFA_STATUS_OK;
4686 } 4684 }
4687 4685
4688 void 4686 void
4689 bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpg) 4687 bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpg)
4690 { 4688 {
4691 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa); 4689 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4692 struct bfa_sgpg_wqe_s *wqe; 4690 struct bfa_sgpg_wqe_s *wqe;
4693
4694 bfa_trc_fp(bfa, nsgpg);
4695 4691
4696 mod->free_sgpgs += nsgpg; 4692 mod->free_sgpgs += nsgpg;
4697 WARN_ON(mod->free_sgpgs > mod->num_sgpgs); 4693 WARN_ON(mod->free_sgpgs > mod->num_sgpgs);
4698 4694
4699 list_splice_tail_init(sgpg_q, &mod->sgpg_q); 4695 list_splice_tail_init(sgpg_q, &mod->sgpg_q);
4700 4696
4701 if (list_empty(&mod->sgpg_wait_q)) 4697 if (list_empty(&mod->sgpg_wait_q))
4702 return; 4698 return;
4703 4699
4704 /* 4700 /*
4705 * satisfy as many waiting requests as possible 4701 * satisfy as many waiting requests as possible
4706 */ 4702 */
4707 do { 4703 do {
4708 wqe = bfa_q_first(&mod->sgpg_wait_q); 4704 wqe = bfa_q_first(&mod->sgpg_wait_q);
4709 if (mod->free_sgpgs < wqe->nsgpg) 4705 if (mod->free_sgpgs < wqe->nsgpg)
4710 nsgpg = mod->free_sgpgs; 4706 nsgpg = mod->free_sgpgs;
4711 else 4707 else
4712 nsgpg = wqe->nsgpg; 4708 nsgpg = wqe->nsgpg;
4713 bfa_sgpg_malloc(bfa, &wqe->sgpg_q, nsgpg); 4709 bfa_sgpg_malloc(bfa, &wqe->sgpg_q, nsgpg);
4714 wqe->nsgpg -= nsgpg; 4710 wqe->nsgpg -= nsgpg;
4715 if (wqe->nsgpg == 0) { 4711 if (wqe->nsgpg == 0) {
4716 list_del(&wqe->qe); 4712 list_del(&wqe->qe);
4717 wqe->cbfn(wqe->cbarg); 4713 wqe->cbfn(wqe->cbarg);
4718 } 4714 }
4719 } while (mod->free_sgpgs && !list_empty(&mod->sgpg_wait_q)); 4715 } while (mod->free_sgpgs && !list_empty(&mod->sgpg_wait_q));
4720 } 4716 }
4721 4717
4722 void 4718 void
4723 bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg) 4719 bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg)
4724 { 4720 {
4725 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa); 4721 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4726 4722
4727 WARN_ON(nsgpg <= 0); 4723 WARN_ON(nsgpg <= 0);
4728 WARN_ON(nsgpg <= mod->free_sgpgs); 4724 WARN_ON(nsgpg <= mod->free_sgpgs);
4729 4725
4730 wqe->nsgpg_total = wqe->nsgpg = nsgpg; 4726 wqe->nsgpg_total = wqe->nsgpg = nsgpg;
4731 4727
4732 /* 4728 /*
4733 * allocate any left to this one first 4729 * allocate any left to this one first
4734 */ 4730 */
4735 if (mod->free_sgpgs) { 4731 if (mod->free_sgpgs) {
4736 /* 4732 /*
4737 * no one else is waiting for SGPG 4733 * no one else is waiting for SGPG
4738 */ 4734 */
4739 WARN_ON(!list_empty(&mod->sgpg_wait_q)); 4735 WARN_ON(!list_empty(&mod->sgpg_wait_q));
4740 list_splice_tail_init(&mod->sgpg_q, &wqe->sgpg_q); 4736 list_splice_tail_init(&mod->sgpg_q, &wqe->sgpg_q);
4741 wqe->nsgpg -= mod->free_sgpgs; 4737 wqe->nsgpg -= mod->free_sgpgs;
4742 mod->free_sgpgs = 0; 4738 mod->free_sgpgs = 0;
4743 } 4739 }
4744 4740
4745 list_add_tail(&wqe->qe, &mod->sgpg_wait_q); 4741 list_add_tail(&wqe->qe, &mod->sgpg_wait_q);
4746 } 4742 }
4747 4743
4748 void 4744 void
4749 bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe) 4745 bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe)
4750 { 4746 {
4751 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa); 4747 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4752 4748
4753 WARN_ON(!bfa_q_is_on_q(&mod->sgpg_wait_q, wqe)); 4749 WARN_ON(!bfa_q_is_on_q(&mod->sgpg_wait_q, wqe));
4754 list_del(&wqe->qe); 4750 list_del(&wqe->qe);
4755 4751
4756 if (wqe->nsgpg_total != wqe->nsgpg) 4752 if (wqe->nsgpg_total != wqe->nsgpg)
4757 bfa_sgpg_mfree(bfa, &wqe->sgpg_q, 4753 bfa_sgpg_mfree(bfa, &wqe->sgpg_q,
4758 wqe->nsgpg_total - wqe->nsgpg); 4754 wqe->nsgpg_total - wqe->nsgpg);
4759 } 4755 }
4760 4756
4761 void 4757 void
4762 bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, void (*cbfn) (void *cbarg), 4758 bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, void (*cbfn) (void *cbarg),
4763 void *cbarg) 4759 void *cbarg)
4764 { 4760 {
4765 INIT_LIST_HEAD(&wqe->sgpg_q); 4761 INIT_LIST_HEAD(&wqe->sgpg_q);
4766 wqe->cbfn = cbfn; 4762 wqe->cbfn = cbfn;
4767 wqe->cbarg = cbarg; 4763 wqe->cbarg = cbarg;
4768 } 4764 }
4769 4765
4770 /* 4766 /*
4771 * UF related functions 4767 * UF related functions
4772 */ 4768 */
4773 /* 4769 /*
4774 ***************************************************************************** 4770 *****************************************************************************
4775 * Internal functions 4771 * Internal functions
4776 ***************************************************************************** 4772 *****************************************************************************
4777 */ 4773 */
4778 static void 4774 static void
4779 __bfa_cb_uf_recv(void *cbarg, bfa_boolean_t complete) 4775 __bfa_cb_uf_recv(void *cbarg, bfa_boolean_t complete)
4780 { 4776 {
4781 struct bfa_uf_s *uf = cbarg; 4777 struct bfa_uf_s *uf = cbarg;
4782 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(uf->bfa); 4778 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(uf->bfa);
4783 4779
4784 if (complete) 4780 if (complete)
4785 ufm->ufrecv(ufm->cbarg, uf); 4781 ufm->ufrecv(ufm->cbarg, uf);
4786 } 4782 }
4787 4783
4788 static void 4784 static void
4789 claim_uf_pbs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi) 4785 claim_uf_pbs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
4790 { 4786 {
4791 u32 uf_pb_tot_sz; 4787 u32 uf_pb_tot_sz;
4792 4788
4793 ufm->uf_pbs_kva = (struct bfa_uf_buf_s *) bfa_meminfo_dma_virt(mi); 4789 ufm->uf_pbs_kva = (struct bfa_uf_buf_s *) bfa_meminfo_dma_virt(mi);
4794 ufm->uf_pbs_pa = bfa_meminfo_dma_phys(mi); 4790 ufm->uf_pbs_pa = bfa_meminfo_dma_phys(mi);
4795 uf_pb_tot_sz = BFA_ROUNDUP((sizeof(struct bfa_uf_buf_s) * ufm->num_ufs), 4791 uf_pb_tot_sz = BFA_ROUNDUP((sizeof(struct bfa_uf_buf_s) * ufm->num_ufs),
4796 BFA_DMA_ALIGN_SZ); 4792 BFA_DMA_ALIGN_SZ);
4797 4793
4798 bfa_meminfo_dma_virt(mi) += uf_pb_tot_sz; 4794 bfa_meminfo_dma_virt(mi) += uf_pb_tot_sz;
4799 bfa_meminfo_dma_phys(mi) += uf_pb_tot_sz; 4795 bfa_meminfo_dma_phys(mi) += uf_pb_tot_sz;
4800 4796
4801 memset((void *)ufm->uf_pbs_kva, 0, uf_pb_tot_sz); 4797 memset((void *)ufm->uf_pbs_kva, 0, uf_pb_tot_sz);
4802 } 4798 }
4803 4799
4804 static void 4800 static void
4805 claim_uf_post_msgs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi) 4801 claim_uf_post_msgs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
4806 { 4802 {
4807 struct bfi_uf_buf_post_s *uf_bp_msg; 4803 struct bfi_uf_buf_post_s *uf_bp_msg;
4808 struct bfi_sge_s *sge; 4804 struct bfi_sge_s *sge;
4809 union bfi_addr_u sga_zero = { {0} }; 4805 union bfi_addr_u sga_zero = { {0} };
4810 u16 i; 4806 u16 i;
4811 u16 buf_len; 4807 u16 buf_len;
4812 4808
4813 ufm->uf_buf_posts = (struct bfi_uf_buf_post_s *) bfa_meminfo_kva(mi); 4809 ufm->uf_buf_posts = (struct bfi_uf_buf_post_s *) bfa_meminfo_kva(mi);
4814 uf_bp_msg = ufm->uf_buf_posts; 4810 uf_bp_msg = ufm->uf_buf_posts;
4815 4811
4816 for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs; 4812 for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs;
4817 i++, uf_bp_msg++) { 4813 i++, uf_bp_msg++) {
4818 memset(uf_bp_msg, 0, sizeof(struct bfi_uf_buf_post_s)); 4814 memset(uf_bp_msg, 0, sizeof(struct bfi_uf_buf_post_s));
4819 4815
4820 uf_bp_msg->buf_tag = i; 4816 uf_bp_msg->buf_tag = i;
4821 buf_len = sizeof(struct bfa_uf_buf_s); 4817 buf_len = sizeof(struct bfa_uf_buf_s);
4822 uf_bp_msg->buf_len = cpu_to_be16(buf_len); 4818 uf_bp_msg->buf_len = cpu_to_be16(buf_len);
4823 bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST, 4819 bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST,
4824 bfa_lpuid(ufm->bfa)); 4820 bfa_lpuid(ufm->bfa));
4825 4821
4826 sge = uf_bp_msg->sge; 4822 sge = uf_bp_msg->sge;
4827 sge[0].sg_len = buf_len; 4823 sge[0].sg_len = buf_len;
4828 sge[0].flags = BFI_SGE_DATA_LAST; 4824 sge[0].flags = BFI_SGE_DATA_LAST;
4829 bfa_dma_addr_set(sge[0].sga, ufm_pbs_pa(ufm, i)); 4825 bfa_dma_addr_set(sge[0].sga, ufm_pbs_pa(ufm, i));
4830 bfa_sge_to_be(sge); 4826 bfa_sge_to_be(sge);
4831 4827
4832 sge[1].sg_len = buf_len; 4828 sge[1].sg_len = buf_len;
4833 sge[1].flags = BFI_SGE_PGDLEN; 4829 sge[1].flags = BFI_SGE_PGDLEN;
4834 sge[1].sga = sga_zero; 4830 sge[1].sga = sga_zero;
4835 bfa_sge_to_be(&sge[1]); 4831 bfa_sge_to_be(&sge[1]);
4836 } 4832 }
4837 4833
4838 /* 4834 /*
4839 * advance pointer beyond consumed memory 4835 * advance pointer beyond consumed memory
4840 */ 4836 */
4841 bfa_meminfo_kva(mi) = (u8 *) uf_bp_msg; 4837 bfa_meminfo_kva(mi) = (u8 *) uf_bp_msg;
4842 } 4838 }
4843 4839
4844 static void 4840 static void
4845 claim_ufs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi) 4841 claim_ufs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
4846 { 4842 {
4847 u16 i; 4843 u16 i;
4848 struct bfa_uf_s *uf; 4844 struct bfa_uf_s *uf;
4849 4845
4850 /* 4846 /*
4851 * Claim block of memory for UF list 4847 * Claim block of memory for UF list
4852 */ 4848 */
4853 ufm->uf_list = (struct bfa_uf_s *) bfa_meminfo_kva(mi); 4849 ufm->uf_list = (struct bfa_uf_s *) bfa_meminfo_kva(mi);
4854 4850
4855 /* 4851 /*
4856 * Initialize UFs and queue it in UF free queue 4852 * Initialize UFs and queue it in UF free queue
4857 */ 4853 */
4858 for (i = 0, uf = ufm->uf_list; i < ufm->num_ufs; i++, uf++) { 4854 for (i = 0, uf = ufm->uf_list; i < ufm->num_ufs; i++, uf++) {
4859 memset(uf, 0, sizeof(struct bfa_uf_s)); 4855 memset(uf, 0, sizeof(struct bfa_uf_s));
4860 uf->bfa = ufm->bfa; 4856 uf->bfa = ufm->bfa;
4861 uf->uf_tag = i; 4857 uf->uf_tag = i;
4862 uf->pb_len = sizeof(struct bfa_uf_buf_s); 4858 uf->pb_len = sizeof(struct bfa_uf_buf_s);
4863 uf->buf_kva = (void *)&ufm->uf_pbs_kva[i]; 4859 uf->buf_kva = (void *)&ufm->uf_pbs_kva[i];
4864 uf->buf_pa = ufm_pbs_pa(ufm, i); 4860 uf->buf_pa = ufm_pbs_pa(ufm, i);
4865 list_add_tail(&uf->qe, &ufm->uf_free_q); 4861 list_add_tail(&uf->qe, &ufm->uf_free_q);
4866 } 4862 }
4867 4863
4868 /* 4864 /*
4869 * advance memory pointer 4865 * advance memory pointer
4870 */ 4866 */
4871 bfa_meminfo_kva(mi) = (u8 *) uf; 4867 bfa_meminfo_kva(mi) = (u8 *) uf;
4872 } 4868 }
4873 4869
4874 static void 4870 static void
4875 uf_mem_claim(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi) 4871 uf_mem_claim(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
4876 { 4872 {
4877 claim_uf_pbs(ufm, mi); 4873 claim_uf_pbs(ufm, mi);
4878 claim_ufs(ufm, mi); 4874 claim_ufs(ufm, mi);
4879 claim_uf_post_msgs(ufm, mi); 4875 claim_uf_post_msgs(ufm, mi);
4880 } 4876 }
4881 4877
4882 static void 4878 static void
4883 bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, u32 *dm_len) 4879 bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, u32 *dm_len)
4884 { 4880 {
4885 u32 num_ufs = cfg->fwcfg.num_uf_bufs; 4881 u32 num_ufs = cfg->fwcfg.num_uf_bufs;
4886 4882
4887 /* 4883 /*
4888 * dma-able memory for UF posted bufs 4884 * dma-able memory for UF posted bufs
4889 */ 4885 */
4890 *dm_len += BFA_ROUNDUP((sizeof(struct bfa_uf_buf_s) * num_ufs), 4886 *dm_len += BFA_ROUNDUP((sizeof(struct bfa_uf_buf_s) * num_ufs),
4891 BFA_DMA_ALIGN_SZ); 4887 BFA_DMA_ALIGN_SZ);
4892 4888
4893 /* 4889 /*
4894 * kernel Virtual memory for UFs and UF buf post msg copies 4890 * kernel Virtual memory for UFs and UF buf post msg copies
4895 */ 4891 */
4896 *ndm_len += sizeof(struct bfa_uf_s) * num_ufs; 4892 *ndm_len += sizeof(struct bfa_uf_s) * num_ufs;
4897 *ndm_len += sizeof(struct bfi_uf_buf_post_s) * num_ufs; 4893 *ndm_len += sizeof(struct bfi_uf_buf_post_s) * num_ufs;
4898 } 4894 }
4899 4895
4900 static void 4896 static void
4901 bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, 4897 bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4902 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) 4898 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
4903 { 4899 {
4904 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa); 4900 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
4905 4901
4906 memset(ufm, 0, sizeof(struct bfa_uf_mod_s)); 4902 memset(ufm, 0, sizeof(struct bfa_uf_mod_s));
4907 ufm->bfa = bfa; 4903 ufm->bfa = bfa;
4908 ufm->num_ufs = cfg->fwcfg.num_uf_bufs; 4904 ufm->num_ufs = cfg->fwcfg.num_uf_bufs;
4909 INIT_LIST_HEAD(&ufm->uf_free_q); 4905 INIT_LIST_HEAD(&ufm->uf_free_q);
4910 INIT_LIST_HEAD(&ufm->uf_posted_q); 4906 INIT_LIST_HEAD(&ufm->uf_posted_q);
4911 4907
4912 uf_mem_claim(ufm, meminfo); 4908 uf_mem_claim(ufm, meminfo);
4913 } 4909 }
4914 4910
4915 static void 4911 static void
4916 bfa_uf_detach(struct bfa_s *bfa) 4912 bfa_uf_detach(struct bfa_s *bfa)
4917 { 4913 {
4918 } 4914 }
4919 4915
4920 static struct bfa_uf_s * 4916 static struct bfa_uf_s *
4921 bfa_uf_get(struct bfa_uf_mod_s *uf_mod) 4917 bfa_uf_get(struct bfa_uf_mod_s *uf_mod)
4922 { 4918 {
4923 struct bfa_uf_s *uf; 4919 struct bfa_uf_s *uf;
4924 4920
4925 bfa_q_deq(&uf_mod->uf_free_q, &uf); 4921 bfa_q_deq(&uf_mod->uf_free_q, &uf);
4926 return uf; 4922 return uf;
4927 } 4923 }
4928 4924
4929 static void 4925 static void
4930 bfa_uf_put(struct bfa_uf_mod_s *uf_mod, struct bfa_uf_s *uf) 4926 bfa_uf_put(struct bfa_uf_mod_s *uf_mod, struct bfa_uf_s *uf)
4931 { 4927 {
4932 list_add_tail(&uf->qe, &uf_mod->uf_free_q); 4928 list_add_tail(&uf->qe, &uf_mod->uf_free_q);
4933 } 4929 }
4934 4930
4935 static bfa_status_t 4931 static bfa_status_t
4936 bfa_uf_post(struct bfa_uf_mod_s *ufm, struct bfa_uf_s *uf) 4932 bfa_uf_post(struct bfa_uf_mod_s *ufm, struct bfa_uf_s *uf)
4937 { 4933 {
4938 struct bfi_uf_buf_post_s *uf_post_msg; 4934 struct bfi_uf_buf_post_s *uf_post_msg;
4939 4935
4940 uf_post_msg = bfa_reqq_next(ufm->bfa, BFA_REQQ_FCXP); 4936 uf_post_msg = bfa_reqq_next(ufm->bfa, BFA_REQQ_FCXP);
4941 if (!uf_post_msg) 4937 if (!uf_post_msg)
4942 return BFA_STATUS_FAILED; 4938 return BFA_STATUS_FAILED;
4943 4939
4944 memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag], 4940 memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag],
4945 sizeof(struct bfi_uf_buf_post_s)); 4941 sizeof(struct bfi_uf_buf_post_s));
4946 bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP); 4942 bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP);
4947 4943
4948 bfa_trc(ufm->bfa, uf->uf_tag); 4944 bfa_trc(ufm->bfa, uf->uf_tag);
4949 4945
4950 list_add_tail(&uf->qe, &ufm->uf_posted_q); 4946 list_add_tail(&uf->qe, &ufm->uf_posted_q);
4951 return BFA_STATUS_OK; 4947 return BFA_STATUS_OK;
4952 } 4948 }
4953 4949
4954 static void 4950 static void
4955 bfa_uf_post_all(struct bfa_uf_mod_s *uf_mod) 4951 bfa_uf_post_all(struct bfa_uf_mod_s *uf_mod)
4956 { 4952 {
4957 struct bfa_uf_s *uf; 4953 struct bfa_uf_s *uf;
4958 4954
4959 while ((uf = bfa_uf_get(uf_mod)) != NULL) { 4955 while ((uf = bfa_uf_get(uf_mod)) != NULL) {
4960 if (bfa_uf_post(uf_mod, uf) != BFA_STATUS_OK) 4956 if (bfa_uf_post(uf_mod, uf) != BFA_STATUS_OK)
4961 break; 4957 break;
4962 } 4958 }
4963 } 4959 }
4964 4960
4965 static void 4961 static void
4966 uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m) 4962 uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
4967 { 4963 {
4968 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa); 4964 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
4969 u16 uf_tag = m->buf_tag; 4965 u16 uf_tag = m->buf_tag;
4970 struct bfa_uf_buf_s *uf_buf = &ufm->uf_pbs_kva[uf_tag]; 4966 struct bfa_uf_buf_s *uf_buf = &ufm->uf_pbs_kva[uf_tag];
4971 struct bfa_uf_s *uf = &ufm->uf_list[uf_tag]; 4967 struct bfa_uf_s *uf = &ufm->uf_list[uf_tag];
4972 u8 *buf = &uf_buf->d[0]; 4968 u8 *buf = &uf_buf->d[0];
4973 struct fchs_s *fchs; 4969 struct fchs_s *fchs;
4974 4970
4975 m->frm_len = be16_to_cpu(m->frm_len); 4971 m->frm_len = be16_to_cpu(m->frm_len);
4976 m->xfr_len = be16_to_cpu(m->xfr_len); 4972 m->xfr_len = be16_to_cpu(m->xfr_len);
4977 4973
4978 fchs = (struct fchs_s *)uf_buf; 4974 fchs = (struct fchs_s *)uf_buf;
4979 4975
4980 list_del(&uf->qe); /* dequeue from posted queue */ 4976 list_del(&uf->qe); /* dequeue from posted queue */
4981 4977
4982 uf->data_ptr = buf; 4978 uf->data_ptr = buf;
4983 uf->data_len = m->xfr_len; 4979 uf->data_len = m->xfr_len;
4984 4980
4985 WARN_ON(uf->data_len < sizeof(struct fchs_s)); 4981 WARN_ON(uf->data_len < sizeof(struct fchs_s));
4986 4982
4987 if (uf->data_len == sizeof(struct fchs_s)) { 4983 if (uf->data_len == sizeof(struct fchs_s)) {
4988 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_UF, BFA_PL_EID_RX, 4984 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_UF, BFA_PL_EID_RX,
4989 uf->data_len, (struct fchs_s *)buf); 4985 uf->data_len, (struct fchs_s *)buf);
4990 } else { 4986 } else {
4991 u32 pld_w0 = *((u32 *) (buf + sizeof(struct fchs_s))); 4987 u32 pld_w0 = *((u32 *) (buf + sizeof(struct fchs_s)));
4992 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_UF, 4988 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_UF,
4993 BFA_PL_EID_RX, uf->data_len, 4989 BFA_PL_EID_RX, uf->data_len,
4994 (struct fchs_s *)buf, pld_w0); 4990 (struct fchs_s *)buf, pld_w0);
4995 } 4991 }
4996 4992
4997 if (bfa->fcs) 4993 if (bfa->fcs)
4998 __bfa_cb_uf_recv(uf, BFA_TRUE); 4994 __bfa_cb_uf_recv(uf, BFA_TRUE);
4999 else 4995 else
5000 bfa_cb_queue(bfa, &uf->hcb_qe, __bfa_cb_uf_recv, uf); 4996 bfa_cb_queue(bfa, &uf->hcb_qe, __bfa_cb_uf_recv, uf);
5001 } 4997 }
5002 4998
5003 static void 4999 static void
5004 bfa_uf_stop(struct bfa_s *bfa) 5000 bfa_uf_stop(struct bfa_s *bfa)
5005 { 5001 {
5006 } 5002 }
5007 5003
5008 static void 5004 static void
5009 bfa_uf_iocdisable(struct bfa_s *bfa) 5005 bfa_uf_iocdisable(struct bfa_s *bfa)
5010 { 5006 {
5011 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa); 5007 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5012 struct bfa_uf_s *uf; 5008 struct bfa_uf_s *uf;
5013 struct list_head *qe, *qen; 5009 struct list_head *qe, *qen;
5014 5010
5015 list_for_each_safe(qe, qen, &ufm->uf_posted_q) { 5011 list_for_each_safe(qe, qen, &ufm->uf_posted_q) {
5016 uf = (struct bfa_uf_s *) qe; 5012 uf = (struct bfa_uf_s *) qe;
5017 list_del(&uf->qe); 5013 list_del(&uf->qe);
5018 bfa_uf_put(ufm, uf); 5014 bfa_uf_put(ufm, uf);
5019 } 5015 }
5020 } 5016 }
5021 5017
5022 static void 5018 static void
5023 bfa_uf_start(struct bfa_s *bfa) 5019 bfa_uf_start(struct bfa_s *bfa)
5024 { 5020 {
5025 bfa_uf_post_all(BFA_UF_MOD(bfa)); 5021 bfa_uf_post_all(BFA_UF_MOD(bfa));
5026 } 5022 }
5027 5023
5028 /* 5024 /*
5029 * Register handler for all unsolicted recieve frames. 5025 * Register handler for all unsolicted recieve frames.
5030 * 5026 *
5031 * @param[in] bfa BFA instance 5027 * @param[in] bfa BFA instance
5032 * @param[in] ufrecv receive handler function 5028 * @param[in] ufrecv receive handler function
5033 * @param[in] cbarg receive handler arg 5029 * @param[in] cbarg receive handler arg
5034 */ 5030 */
5035 void 5031 void
5036 bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv, void *cbarg) 5032 bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv, void *cbarg)
5037 { 5033 {
5038 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa); 5034 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5039 5035
5040 ufm->ufrecv = ufrecv; 5036 ufm->ufrecv = ufrecv;
5041 ufm->cbarg = cbarg; 5037 ufm->cbarg = cbarg;
5042 } 5038 }
5043 5039
5044 /* 5040 /*
5045 * Free an unsolicited frame back to BFA. 5041 * Free an unsolicited frame back to BFA.
5046 * 5042 *
5047 * @param[in] uf unsolicited frame to be freed 5043 * @param[in] uf unsolicited frame to be freed
5048 * 5044 *
5049 * @return None 5045 * @return None
5050 */ 5046 */
5051 void 5047 void
5052 bfa_uf_free(struct bfa_uf_s *uf) 5048 bfa_uf_free(struct bfa_uf_s *uf)
5053 { 5049 {
5054 bfa_uf_put(BFA_UF_MOD(uf->bfa), uf); 5050 bfa_uf_put(BFA_UF_MOD(uf->bfa), uf);
5055 bfa_uf_post_all(BFA_UF_MOD(uf->bfa)); 5051 bfa_uf_post_all(BFA_UF_MOD(uf->bfa));
5056 } 5052 }
5057 5053
5058 5054
5059 5055
5060 /* 5056 /*
5061 * uf_pub BFA uf module public functions 5057 * uf_pub BFA uf module public functions
5062 */ 5058 */
5063 void 5059 void
5064 bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg) 5060 bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
5065 { 5061 {
5066 bfa_trc(bfa, msg->mhdr.msg_id); 5062 bfa_trc(bfa, msg->mhdr.msg_id);
5067 5063
5068 switch (msg->mhdr.msg_id) { 5064 switch (msg->mhdr.msg_id) {
5069 case BFI_UF_I2H_FRM_RCVD: 5065 case BFI_UF_I2H_FRM_RCVD:
5070 uf_recv(bfa, (struct bfi_uf_frm_rcvd_s *) msg); 5066 uf_recv(bfa, (struct bfi_uf_frm_rcvd_s *) msg);
5071 break; 5067 break;
5072 5068
5073 default: 5069 default:
5074 bfa_trc(bfa, msg->mhdr.msg_id); 5070 bfa_trc(bfa, msg->mhdr.msg_id);
5075 WARN_ON(1); 5071 WARN_ON(1);
5076 } 5072 }
5077 } 5073 }
5078 5074
5079 5075
5080 5076
drivers/scsi/bfa/bfad.c
1 /* 1 /*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved 3 * All rights reserved
4 * www.brocade.com 4 * www.brocade.com
5 * 5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter. 6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify it 8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as 9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation 10 * published by the Free Software Foundation
11 * 11 *
12 * This program is distributed in the hope that it will be useful, but 12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of 13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17 17
18 /* 18 /*
19 * bfad.c Linux driver PCI interface module. 19 * bfad.c Linux driver PCI interface module.
20 */ 20 */
21 #include <linux/module.h> 21 #include <linux/module.h>
22 #include <linux/kthread.h> 22 #include <linux/kthread.h>
23 #include <linux/errno.h> 23 #include <linux/errno.h>
24 #include <linux/sched.h> 24 #include <linux/sched.h>
25 #include <linux/init.h> 25 #include <linux/init.h>
26 #include <linux/fs.h> 26 #include <linux/fs.h>
27 #include <linux/pci.h> 27 #include <linux/pci.h>
28 #include <linux/firmware.h> 28 #include <linux/firmware.h>
29 #include <asm/uaccess.h> 29 #include <asm/uaccess.h>
30 #include <asm/fcntl.h> 30 #include <asm/fcntl.h>
31 31
32 #include "bfad_drv.h" 32 #include "bfad_drv.h"
33 #include "bfad_im.h" 33 #include "bfad_im.h"
34 #include "bfa_fcs.h" 34 #include "bfa_fcs.h"
35 #include "bfa_defs.h" 35 #include "bfa_defs.h"
36 #include "bfa.h" 36 #include "bfa.h"
37 37
38 BFA_TRC_FILE(LDRV, BFAD); 38 BFA_TRC_FILE(LDRV, BFAD);
39 DEFINE_MUTEX(bfad_mutex); 39 DEFINE_MUTEX(bfad_mutex);
40 LIST_HEAD(bfad_list); 40 LIST_HEAD(bfad_list);
41 41
42 static int bfad_inst; 42 static int bfad_inst;
43 static int num_sgpgs_parm; 43 static int num_sgpgs_parm;
44 int supported_fc4s; 44 int supported_fc4s;
45 char *host_name, *os_name, *os_patch; 45 char *host_name, *os_name, *os_patch;
46 int num_rports, num_ios, num_tms; 46 int num_rports, num_ios, num_tms;
47 int num_fcxps, num_ufbufs; 47 int num_fcxps, num_ufbufs;
48 int reqq_size, rspq_size, num_sgpgs; 48 int reqq_size, rspq_size, num_sgpgs;
49 int rport_del_timeout = BFA_FCS_RPORT_DEF_DEL_TIMEOUT; 49 int rport_del_timeout = BFA_FCS_RPORT_DEF_DEL_TIMEOUT;
50 int bfa_lun_queue_depth = BFAD_LUN_QUEUE_DEPTH; 50 int bfa_lun_queue_depth = BFAD_LUN_QUEUE_DEPTH;
51 int bfa_io_max_sge = BFAD_IO_MAX_SGE; 51 int bfa_io_max_sge = BFAD_IO_MAX_SGE;
52 int bfa_log_level = 3; /* WARNING log level */ 52 int bfa_log_level = 3; /* WARNING log level */
53 int ioc_auto_recover = BFA_TRUE; 53 int ioc_auto_recover = BFA_TRUE;
54 int bfa_linkup_delay = -1; 54 int bfa_linkup_delay = -1;
55 int fdmi_enable = BFA_TRUE; 55 int fdmi_enable = BFA_TRUE;
56 int pcie_max_read_reqsz; 56 int pcie_max_read_reqsz;
57 int bfa_debugfs_enable = 1; 57 int bfa_debugfs_enable = 1;
58 int msix_disable_cb = 0, msix_disable_ct = 0; 58 int msix_disable_cb = 0, msix_disable_ct = 0;
59 59
60 u32 bfi_image_ct_fc_size, bfi_image_ct_cna_size, bfi_image_cb_fc_size; 60 u32 bfi_image_ct_fc_size, bfi_image_ct_cna_size, bfi_image_cb_fc_size;
61 u32 *bfi_image_ct_fc, *bfi_image_ct_cna, *bfi_image_cb_fc; 61 u32 *bfi_image_ct_fc, *bfi_image_ct_cna, *bfi_image_cb_fc;
62 62
63 static const char *msix_name_ct[] = { 63 static const char *msix_name_ct[] = {
64 "cpe0", "cpe1", "cpe2", "cpe3", 64 "cpe0", "cpe1", "cpe2", "cpe3",
65 "rme0", "rme1", "rme2", "rme3", 65 "rme0", "rme1", "rme2", "rme3",
66 "ctrl" }; 66 "ctrl" };
67 67
68 static const char *msix_name_cb[] = { 68 static const char *msix_name_cb[] = {
69 "cpe0", "cpe1", "cpe2", "cpe3", 69 "cpe0", "cpe1", "cpe2", "cpe3",
70 "rme0", "rme1", "rme2", "rme3", 70 "rme0", "rme1", "rme2", "rme3",
71 "eemc", "elpu0", "elpu1", "epss", "mlpu" }; 71 "eemc", "elpu0", "elpu1", "epss", "mlpu" };
72 72
73 MODULE_FIRMWARE(BFAD_FW_FILE_CT_FC); 73 MODULE_FIRMWARE(BFAD_FW_FILE_CT_FC);
74 MODULE_FIRMWARE(BFAD_FW_FILE_CT_CNA); 74 MODULE_FIRMWARE(BFAD_FW_FILE_CT_CNA);
75 MODULE_FIRMWARE(BFAD_FW_FILE_CB_FC); 75 MODULE_FIRMWARE(BFAD_FW_FILE_CB_FC);
76 76
77 module_param(os_name, charp, S_IRUGO | S_IWUSR); 77 module_param(os_name, charp, S_IRUGO | S_IWUSR);
78 MODULE_PARM_DESC(os_name, "OS name of the hba host machine"); 78 MODULE_PARM_DESC(os_name, "OS name of the hba host machine");
79 module_param(os_patch, charp, S_IRUGO | S_IWUSR); 79 module_param(os_patch, charp, S_IRUGO | S_IWUSR);
80 MODULE_PARM_DESC(os_patch, "OS patch level of the hba host machine"); 80 MODULE_PARM_DESC(os_patch, "OS patch level of the hba host machine");
81 module_param(host_name, charp, S_IRUGO | S_IWUSR); 81 module_param(host_name, charp, S_IRUGO | S_IWUSR);
82 MODULE_PARM_DESC(host_name, "Hostname of the hba host machine"); 82 MODULE_PARM_DESC(host_name, "Hostname of the hba host machine");
83 module_param(num_rports, int, S_IRUGO | S_IWUSR); 83 module_param(num_rports, int, S_IRUGO | S_IWUSR);
84 MODULE_PARM_DESC(num_rports, "Max number of rports supported per port " 84 MODULE_PARM_DESC(num_rports, "Max number of rports supported per port "
85 "(physical/logical), default=1024"); 85 "(physical/logical), default=1024");
86 module_param(num_ios, int, S_IRUGO | S_IWUSR); 86 module_param(num_ios, int, S_IRUGO | S_IWUSR);
87 MODULE_PARM_DESC(num_ios, "Max number of ioim requests, default=2000"); 87 MODULE_PARM_DESC(num_ios, "Max number of ioim requests, default=2000");
88 module_param(num_tms, int, S_IRUGO | S_IWUSR); 88 module_param(num_tms, int, S_IRUGO | S_IWUSR);
89 MODULE_PARM_DESC(num_tms, "Max number of task im requests, default=128"); 89 MODULE_PARM_DESC(num_tms, "Max number of task im requests, default=128");
90 module_param(num_fcxps, int, S_IRUGO | S_IWUSR); 90 module_param(num_fcxps, int, S_IRUGO | S_IWUSR);
91 MODULE_PARM_DESC(num_fcxps, "Max number of fcxp requests, default=64"); 91 MODULE_PARM_DESC(num_fcxps, "Max number of fcxp requests, default=64");
92 module_param(num_ufbufs, int, S_IRUGO | S_IWUSR); 92 module_param(num_ufbufs, int, S_IRUGO | S_IWUSR);
93 MODULE_PARM_DESC(num_ufbufs, "Max number of unsolicited frame " 93 MODULE_PARM_DESC(num_ufbufs, "Max number of unsolicited frame "
94 "buffers, default=64"); 94 "buffers, default=64");
95 module_param(reqq_size, int, S_IRUGO | S_IWUSR); 95 module_param(reqq_size, int, S_IRUGO | S_IWUSR);
96 MODULE_PARM_DESC(reqq_size, "Max number of request queue elements, " 96 MODULE_PARM_DESC(reqq_size, "Max number of request queue elements, "
97 "default=256"); 97 "default=256");
98 module_param(rspq_size, int, S_IRUGO | S_IWUSR); 98 module_param(rspq_size, int, S_IRUGO | S_IWUSR);
99 MODULE_PARM_DESC(rspq_size, "Max number of response queue elements, " 99 MODULE_PARM_DESC(rspq_size, "Max number of response queue elements, "
100 "default=64"); 100 "default=64");
101 module_param(num_sgpgs, int, S_IRUGO | S_IWUSR); 101 module_param(num_sgpgs, int, S_IRUGO | S_IWUSR);
102 MODULE_PARM_DESC(num_sgpgs, "Number of scatter/gather pages, default=2048"); 102 MODULE_PARM_DESC(num_sgpgs, "Number of scatter/gather pages, default=2048");
103 module_param(rport_del_timeout, int, S_IRUGO | S_IWUSR); 103 module_param(rport_del_timeout, int, S_IRUGO | S_IWUSR);
104 MODULE_PARM_DESC(rport_del_timeout, "Rport delete timeout, default=90 secs, " 104 MODULE_PARM_DESC(rport_del_timeout, "Rport delete timeout, default=90 secs, "
105 "Range[>0]"); 105 "Range[>0]");
106 module_param(bfa_lun_queue_depth, int, S_IRUGO | S_IWUSR); 106 module_param(bfa_lun_queue_depth, int, S_IRUGO | S_IWUSR);
107 MODULE_PARM_DESC(bfa_lun_queue_depth, "Lun queue depth, default=32, Range[>0]"); 107 MODULE_PARM_DESC(bfa_lun_queue_depth, "Lun queue depth, default=32, Range[>0]");
108 module_param(bfa_io_max_sge, int, S_IRUGO | S_IWUSR); 108 module_param(bfa_io_max_sge, int, S_IRUGO | S_IWUSR);
109 MODULE_PARM_DESC(bfa_io_max_sge, "Max io scatter/gather elements, default=255"); 109 MODULE_PARM_DESC(bfa_io_max_sge, "Max io scatter/gather elements, default=255");
110 module_param(bfa_log_level, int, S_IRUGO | S_IWUSR); 110 module_param(bfa_log_level, int, S_IRUGO | S_IWUSR);
111 MODULE_PARM_DESC(bfa_log_level, "Driver log level, default=3, " 111 MODULE_PARM_DESC(bfa_log_level, "Driver log level, default=3, "
112 "Range[Critical:1|Error:2|Warning:3|Info:4]"); 112 "Range[Critical:1|Error:2|Warning:3|Info:4]");
113 module_param(ioc_auto_recover, int, S_IRUGO | S_IWUSR); 113 module_param(ioc_auto_recover, int, S_IRUGO | S_IWUSR);
114 MODULE_PARM_DESC(ioc_auto_recover, "IOC auto recovery, default=1, " 114 MODULE_PARM_DESC(ioc_auto_recover, "IOC auto recovery, default=1, "
115 "Range[off:0|on:1]"); 115 "Range[off:0|on:1]");
116 module_param(bfa_linkup_delay, int, S_IRUGO | S_IWUSR); 116 module_param(bfa_linkup_delay, int, S_IRUGO | S_IWUSR);
117 MODULE_PARM_DESC(bfa_linkup_delay, "Link up delay, default=30 secs for " 117 MODULE_PARM_DESC(bfa_linkup_delay, "Link up delay, default=30 secs for "
118 "boot port. Otherwise 10 secs in RHEL4 & 0 for " 118 "boot port. Otherwise 10 secs in RHEL4 & 0 for "
119 "[RHEL5, SLES10, ESX40] Range[>0]"); 119 "[RHEL5, SLES10, ESX40] Range[>0]");
120 module_param(msix_disable_cb, int, S_IRUGO | S_IWUSR); 120 module_param(msix_disable_cb, int, S_IRUGO | S_IWUSR);
121 MODULE_PARM_DESC(msix_disable_cb, "Disable Message Signaled Interrupts " 121 MODULE_PARM_DESC(msix_disable_cb, "Disable Message Signaled Interrupts "
122 "for Brocade-415/425/815/825 cards, default=0, " 122 "for Brocade-415/425/815/825 cards, default=0, "
123 " Range[false:0|true:1]"); 123 " Range[false:0|true:1]");
124 module_param(msix_disable_ct, int, S_IRUGO | S_IWUSR); 124 module_param(msix_disable_ct, int, S_IRUGO | S_IWUSR);
125 MODULE_PARM_DESC(msix_disable_ct, "Disable Message Signaled Interrupts " 125 MODULE_PARM_DESC(msix_disable_ct, "Disable Message Signaled Interrupts "
126 "if possible for Brocade-1010/1020/804/1007/902/1741 " 126 "if possible for Brocade-1010/1020/804/1007/902/1741 "
127 "cards, default=0, Range[false:0|true:1]"); 127 "cards, default=0, Range[false:0|true:1]");
128 module_param(fdmi_enable, int, S_IRUGO | S_IWUSR); 128 module_param(fdmi_enable, int, S_IRUGO | S_IWUSR);
129 MODULE_PARM_DESC(fdmi_enable, "Enables fdmi registration, default=1, " 129 MODULE_PARM_DESC(fdmi_enable, "Enables fdmi registration, default=1, "
130 "Range[false:0|true:1]"); 130 "Range[false:0|true:1]");
131 module_param(pcie_max_read_reqsz, int, S_IRUGO | S_IWUSR); 131 module_param(pcie_max_read_reqsz, int, S_IRUGO | S_IWUSR);
132 MODULE_PARM_DESC(pcie_max_read_reqsz, "PCIe max read request size, default=0 " 132 MODULE_PARM_DESC(pcie_max_read_reqsz, "PCIe max read request size, default=0 "
133 "(use system setting), Range[128|256|512|1024|2048|4096]"); 133 "(use system setting), Range[128|256|512|1024|2048|4096]");
134 module_param(bfa_debugfs_enable, int, S_IRUGO | S_IWUSR); 134 module_param(bfa_debugfs_enable, int, S_IRUGO | S_IWUSR);
135 MODULE_PARM_DESC(bfa_debugfs_enable, "Enables debugfs feature, default=1," 135 MODULE_PARM_DESC(bfa_debugfs_enable, "Enables debugfs feature, default=1,"
136 " Range[false:0|true:1]"); 136 " Range[false:0|true:1]");
137 137
138 static void 138 static void
139 bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event); 139 bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event);
140 static void 140 static void
141 bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event); 141 bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event);
142 static void 142 static void
143 bfad_sm_initializing(struct bfad_s *bfad, enum bfad_sm_event event); 143 bfad_sm_initializing(struct bfad_s *bfad, enum bfad_sm_event event);
144 static void 144 static void
145 bfad_sm_operational(struct bfad_s *bfad, enum bfad_sm_event event); 145 bfad_sm_operational(struct bfad_s *bfad, enum bfad_sm_event event);
146 static void 146 static void
147 bfad_sm_stopping(struct bfad_s *bfad, enum bfad_sm_event event); 147 bfad_sm_stopping(struct bfad_s *bfad, enum bfad_sm_event event);
148 static void 148 static void
149 bfad_sm_failed(struct bfad_s *bfad, enum bfad_sm_event event); 149 bfad_sm_failed(struct bfad_s *bfad, enum bfad_sm_event event);
150 static void 150 static void
151 bfad_sm_fcs_exit(struct bfad_s *bfad, enum bfad_sm_event event); 151 bfad_sm_fcs_exit(struct bfad_s *bfad, enum bfad_sm_event event);
152 152
153 /* 153 /*
154 * Beginning state for the driver instance, awaiting the pci_probe event 154 * Beginning state for the driver instance, awaiting the pci_probe event
155 */ 155 */
156 static void 156 static void
157 bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event) 157 bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event)
158 { 158 {
159 bfa_trc(bfad, event); 159 bfa_trc(bfad, event);
160 160
161 switch (event) { 161 switch (event) {
162 case BFAD_E_CREATE: 162 case BFAD_E_CREATE:
163 bfa_sm_set_state(bfad, bfad_sm_created); 163 bfa_sm_set_state(bfad, bfad_sm_created);
164 bfad->bfad_tsk = kthread_create(bfad_worker, (void *) bfad, 164 bfad->bfad_tsk = kthread_create(bfad_worker, (void *) bfad,
165 "%s", "bfad_worker"); 165 "%s", "bfad_worker");
166 if (IS_ERR(bfad->bfad_tsk)) { 166 if (IS_ERR(bfad->bfad_tsk)) {
167 printk(KERN_INFO "bfad[%d]: Kernel thread " 167 printk(KERN_INFO "bfad[%d]: Kernel thread "
168 "creation failed!\n", bfad->inst_no); 168 "creation failed!\n", bfad->inst_no);
169 bfa_sm_send_event(bfad, BFAD_E_KTHREAD_CREATE_FAILED); 169 bfa_sm_send_event(bfad, BFAD_E_KTHREAD_CREATE_FAILED);
170 } 170 }
171 bfa_sm_send_event(bfad, BFAD_E_INIT); 171 bfa_sm_send_event(bfad, BFAD_E_INIT);
172 break; 172 break;
173 173
174 case BFAD_E_STOP: 174 case BFAD_E_STOP:
175 /* Ignore stop; already in uninit */ 175 /* Ignore stop; already in uninit */
176 break; 176 break;
177 177
178 default: 178 default:
179 bfa_sm_fault(bfad, event); 179 bfa_sm_fault(bfad, event);
180 } 180 }
181 } 181 }
182 182
183 /* 183 /*
184 * Driver Instance is created, awaiting event INIT to initialize the bfad 184 * Driver Instance is created, awaiting event INIT to initialize the bfad
185 */ 185 */
186 static void 186 static void
187 bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event) 187 bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event)
188 { 188 {
189 unsigned long flags; 189 unsigned long flags;
190 190
191 bfa_trc(bfad, event); 191 bfa_trc(bfad, event);
192 192
193 switch (event) { 193 switch (event) {
194 case BFAD_E_INIT: 194 case BFAD_E_INIT:
195 bfa_sm_set_state(bfad, bfad_sm_initializing); 195 bfa_sm_set_state(bfad, bfad_sm_initializing);
196 196
197 init_completion(&bfad->comp); 197 init_completion(&bfad->comp);
198 198
199 /* Enable Interrupt and wait bfa_init completion */ 199 /* Enable Interrupt and wait bfa_init completion */
200 if (bfad_setup_intr(bfad)) { 200 if (bfad_setup_intr(bfad)) {
201 printk(KERN_WARNING "bfad%d: bfad_setup_intr failed\n", 201 printk(KERN_WARNING "bfad%d: bfad_setup_intr failed\n",
202 bfad->inst_no); 202 bfad->inst_no);
203 bfa_sm_send_event(bfad, BFAD_E_INTR_INIT_FAILED); 203 bfa_sm_send_event(bfad, BFAD_E_INTR_INIT_FAILED);
204 break; 204 break;
205 } 205 }
206 206
207 spin_lock_irqsave(&bfad->bfad_lock, flags); 207 spin_lock_irqsave(&bfad->bfad_lock, flags);
208 bfa_iocfc_init(&bfad->bfa); 208 bfa_iocfc_init(&bfad->bfa);
209 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 209 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
210 210
211 /* Set up interrupt handler for each vectors */ 211 /* Set up interrupt handler for each vectors */
212 if ((bfad->bfad_flags & BFAD_MSIX_ON) && 212 if ((bfad->bfad_flags & BFAD_MSIX_ON) &&
213 bfad_install_msix_handler(bfad)) { 213 bfad_install_msix_handler(bfad)) {
214 printk(KERN_WARNING "%s: install_msix failed, bfad%d\n", 214 printk(KERN_WARNING "%s: install_msix failed, bfad%d\n",
215 __func__, bfad->inst_no); 215 __func__, bfad->inst_no);
216 } 216 }
217 217
218 bfad_init_timer(bfad); 218 bfad_init_timer(bfad);
219 219
220 wait_for_completion(&bfad->comp); 220 wait_for_completion(&bfad->comp);
221 221
222 if ((bfad->bfad_flags & BFAD_HAL_INIT_DONE)) { 222 if ((bfad->bfad_flags & BFAD_HAL_INIT_DONE)) {
223 bfa_sm_send_event(bfad, BFAD_E_INIT_SUCCESS); 223 bfa_sm_send_event(bfad, BFAD_E_INIT_SUCCESS);
224 } else { 224 } else {
225 bfad->bfad_flags |= BFAD_HAL_INIT_FAIL; 225 bfad->bfad_flags |= BFAD_HAL_INIT_FAIL;
226 bfa_sm_send_event(bfad, BFAD_E_INIT_FAILED); 226 bfa_sm_send_event(bfad, BFAD_E_INIT_FAILED);
227 } 227 }
228 228
229 break; 229 break;
230 230
231 case BFAD_E_KTHREAD_CREATE_FAILED: 231 case BFAD_E_KTHREAD_CREATE_FAILED:
232 bfa_sm_set_state(bfad, bfad_sm_uninit); 232 bfa_sm_set_state(bfad, bfad_sm_uninit);
233 break; 233 break;
234 234
235 default: 235 default:
236 bfa_sm_fault(bfad, event); 236 bfa_sm_fault(bfad, event);
237 } 237 }
238 } 238 }
239 239
240 static void 240 static void
241 bfad_sm_initializing(struct bfad_s *bfad, enum bfad_sm_event event) 241 bfad_sm_initializing(struct bfad_s *bfad, enum bfad_sm_event event)
242 { 242 {
243 int retval; 243 int retval;
244 unsigned long flags; 244 unsigned long flags;
245 245
246 bfa_trc(bfad, event); 246 bfa_trc(bfad, event);
247 247
248 switch (event) { 248 switch (event) {
249 case BFAD_E_INIT_SUCCESS: 249 case BFAD_E_INIT_SUCCESS:
250 kthread_stop(bfad->bfad_tsk); 250 kthread_stop(bfad->bfad_tsk);
251 spin_lock_irqsave(&bfad->bfad_lock, flags); 251 spin_lock_irqsave(&bfad->bfad_lock, flags);
252 bfad->bfad_tsk = NULL; 252 bfad->bfad_tsk = NULL;
253 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 253 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
254 254
255 retval = bfad_start_ops(bfad); 255 retval = bfad_start_ops(bfad);
256 if (retval != BFA_STATUS_OK) 256 if (retval != BFA_STATUS_OK)
257 break; 257 break;
258 bfa_sm_set_state(bfad, bfad_sm_operational); 258 bfa_sm_set_state(bfad, bfad_sm_operational);
259 break; 259 break;
260 260
261 case BFAD_E_INTR_INIT_FAILED: 261 case BFAD_E_INTR_INIT_FAILED:
262 bfa_sm_set_state(bfad, bfad_sm_uninit); 262 bfa_sm_set_state(bfad, bfad_sm_uninit);
263 kthread_stop(bfad->bfad_tsk); 263 kthread_stop(bfad->bfad_tsk);
264 spin_lock_irqsave(&bfad->bfad_lock, flags); 264 spin_lock_irqsave(&bfad->bfad_lock, flags);
265 bfad->bfad_tsk = NULL; 265 bfad->bfad_tsk = NULL;
266 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 266 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
267 break; 267 break;
268 268
269 case BFAD_E_INIT_FAILED: 269 case BFAD_E_INIT_FAILED:
270 bfa_sm_set_state(bfad, bfad_sm_failed); 270 bfa_sm_set_state(bfad, bfad_sm_failed);
271 break; 271 break;
272 default: 272 default:
273 bfa_sm_fault(bfad, event); 273 bfa_sm_fault(bfad, event);
274 } 274 }
275 } 275 }
276 276
277 static void 277 static void
278 bfad_sm_failed(struct bfad_s *bfad, enum bfad_sm_event event) 278 bfad_sm_failed(struct bfad_s *bfad, enum bfad_sm_event event)
279 { 279 {
280 int retval; 280 int retval;
281 281
282 bfa_trc(bfad, event); 282 bfa_trc(bfad, event);
283 283
284 switch (event) { 284 switch (event) {
285 case BFAD_E_INIT_SUCCESS: 285 case BFAD_E_INIT_SUCCESS:
286 retval = bfad_start_ops(bfad); 286 retval = bfad_start_ops(bfad);
287 if (retval != BFA_STATUS_OK) 287 if (retval != BFA_STATUS_OK)
288 break; 288 break;
289 bfa_sm_set_state(bfad, bfad_sm_operational); 289 bfa_sm_set_state(bfad, bfad_sm_operational);
290 break; 290 break;
291 291
292 case BFAD_E_STOP: 292 case BFAD_E_STOP:
293 if (bfad->bfad_flags & BFAD_CFG_PPORT_DONE) 293 if (bfad->bfad_flags & BFAD_CFG_PPORT_DONE)
294 bfad_uncfg_pport(bfad); 294 bfad_uncfg_pport(bfad);
295 if (bfad->bfad_flags & BFAD_FC4_PROBE_DONE) { 295 if (bfad->bfad_flags & BFAD_FC4_PROBE_DONE) {
296 bfad_im_probe_undo(bfad); 296 bfad_im_probe_undo(bfad);
297 bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE; 297 bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE;
298 } 298 }
299 bfad_stop(bfad); 299 bfad_stop(bfad);
300 break; 300 break;
301 301
302 case BFAD_E_EXIT_COMP: 302 case BFAD_E_EXIT_COMP:
303 bfa_sm_set_state(bfad, bfad_sm_uninit); 303 bfa_sm_set_state(bfad, bfad_sm_uninit);
304 bfad_remove_intr(bfad); 304 bfad_remove_intr(bfad);
305 del_timer_sync(&bfad->hal_tmo); 305 del_timer_sync(&bfad->hal_tmo);
306 break; 306 break;
307 307
308 default: 308 default:
309 bfa_sm_fault(bfad, event); 309 bfa_sm_fault(bfad, event);
310 } 310 }
311 } 311 }
312 312
313 static void 313 static void
314 bfad_sm_operational(struct bfad_s *bfad, enum bfad_sm_event event) 314 bfad_sm_operational(struct bfad_s *bfad, enum bfad_sm_event event)
315 { 315 {
316 bfa_trc(bfad, event); 316 bfa_trc(bfad, event);
317 317
318 switch (event) { 318 switch (event) {
319 case BFAD_E_STOP: 319 case BFAD_E_STOP:
320 bfa_sm_set_state(bfad, bfad_sm_fcs_exit); 320 bfa_sm_set_state(bfad, bfad_sm_fcs_exit);
321 bfad_fcs_stop(bfad); 321 bfad_fcs_stop(bfad);
322 break; 322 break;
323 323
324 default: 324 default:
325 bfa_sm_fault(bfad, event); 325 bfa_sm_fault(bfad, event);
326 } 326 }
327 } 327 }
328 328
329 static void 329 static void
330 bfad_sm_fcs_exit(struct bfad_s *bfad, enum bfad_sm_event event) 330 bfad_sm_fcs_exit(struct bfad_s *bfad, enum bfad_sm_event event)
331 { 331 {
332 bfa_trc(bfad, event); 332 bfa_trc(bfad, event);
333 333
334 switch (event) { 334 switch (event) {
335 case BFAD_E_FCS_EXIT_COMP: 335 case BFAD_E_FCS_EXIT_COMP:
336 bfa_sm_set_state(bfad, bfad_sm_stopping); 336 bfa_sm_set_state(bfad, bfad_sm_stopping);
337 bfad_stop(bfad); 337 bfad_stop(bfad);
338 break; 338 break;
339 339
340 default: 340 default:
341 bfa_sm_fault(bfad, event); 341 bfa_sm_fault(bfad, event);
342 } 342 }
343 } 343 }
344 344
345 static void 345 static void
346 bfad_sm_stopping(struct bfad_s *bfad, enum bfad_sm_event event) 346 bfad_sm_stopping(struct bfad_s *bfad, enum bfad_sm_event event)
347 { 347 {
348 bfa_trc(bfad, event); 348 bfa_trc(bfad, event);
349 349
350 switch (event) { 350 switch (event) {
351 case BFAD_E_EXIT_COMP: 351 case BFAD_E_EXIT_COMP:
352 bfa_sm_set_state(bfad, bfad_sm_uninit); 352 bfa_sm_set_state(bfad, bfad_sm_uninit);
353 bfad_remove_intr(bfad); 353 bfad_remove_intr(bfad);
354 del_timer_sync(&bfad->hal_tmo); 354 del_timer_sync(&bfad->hal_tmo);
355 bfad_im_probe_undo(bfad); 355 bfad_im_probe_undo(bfad);
356 bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE; 356 bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE;
357 bfad_uncfg_pport(bfad); 357 bfad_uncfg_pport(bfad);
358 break; 358 break;
359 359
360 default: 360 default:
361 bfa_sm_fault(bfad, event); 361 bfa_sm_fault(bfad, event);
362 break; 362 break;
363 } 363 }
364 } 364 }
365 365
366 /* 366 /*
367 * BFA callbacks 367 * BFA callbacks
368 */ 368 */
369 void 369 void
370 bfad_hcb_comp(void *arg, bfa_status_t status) 370 bfad_hcb_comp(void *arg, bfa_status_t status)
371 { 371 {
372 struct bfad_hal_comp *fcomp = (struct bfad_hal_comp *)arg; 372 struct bfad_hal_comp *fcomp = (struct bfad_hal_comp *)arg;
373 373
374 fcomp->status = status; 374 fcomp->status = status;
375 complete(&fcomp->comp); 375 complete(&fcomp->comp);
376 } 376 }
377 377
378 /* 378 /*
379 * bfa_init callback 379 * bfa_init callback
380 */ 380 */
381 void 381 void
382 bfa_cb_init(void *drv, bfa_status_t init_status) 382 bfa_cb_init(void *drv, bfa_status_t init_status)
383 { 383 {
384 struct bfad_s *bfad = drv; 384 struct bfad_s *bfad = drv;
385 385
386 if (init_status == BFA_STATUS_OK) { 386 if (init_status == BFA_STATUS_OK) {
387 bfad->bfad_flags |= BFAD_HAL_INIT_DONE; 387 bfad->bfad_flags |= BFAD_HAL_INIT_DONE;
388 388
389 /* 389 /*
390 * If BFAD_HAL_INIT_FAIL flag is set: 390 * If BFAD_HAL_INIT_FAIL flag is set:
391 * Wake up the kernel thread to start 391 * Wake up the kernel thread to start
392 * the bfad operations after HAL init done 392 * the bfad operations after HAL init done
393 */ 393 */
394 if ((bfad->bfad_flags & BFAD_HAL_INIT_FAIL)) { 394 if ((bfad->bfad_flags & BFAD_HAL_INIT_FAIL)) {
395 bfad->bfad_flags &= ~BFAD_HAL_INIT_FAIL; 395 bfad->bfad_flags &= ~BFAD_HAL_INIT_FAIL;
396 wake_up_process(bfad->bfad_tsk); 396 wake_up_process(bfad->bfad_tsk);
397 } 397 }
398 } 398 }
399 399
400 complete(&bfad->comp); 400 complete(&bfad->comp);
401 } 401 }
402 402
403 /* 403 /*
404 * BFA_FCS callbacks 404 * BFA_FCS callbacks
405 */ 405 */
406 struct bfad_port_s * 406 struct bfad_port_s *
407 bfa_fcb_lport_new(struct bfad_s *bfad, struct bfa_fcs_lport_s *port, 407 bfa_fcb_lport_new(struct bfad_s *bfad, struct bfa_fcs_lport_s *port,
408 enum bfa_lport_role roles, struct bfad_vf_s *vf_drv, 408 enum bfa_lport_role roles, struct bfad_vf_s *vf_drv,
409 struct bfad_vport_s *vp_drv) 409 struct bfad_vport_s *vp_drv)
410 { 410 {
411 bfa_status_t rc; 411 bfa_status_t rc;
412 struct bfad_port_s *port_drv; 412 struct bfad_port_s *port_drv;
413 413
414 if (!vp_drv && !vf_drv) { 414 if (!vp_drv && !vf_drv) {
415 port_drv = &bfad->pport; 415 port_drv = &bfad->pport;
416 port_drv->pvb_type = BFAD_PORT_PHYS_BASE; 416 port_drv->pvb_type = BFAD_PORT_PHYS_BASE;
417 } else if (!vp_drv && vf_drv) { 417 } else if (!vp_drv && vf_drv) {
418 port_drv = &vf_drv->base_port; 418 port_drv = &vf_drv->base_port;
419 port_drv->pvb_type = BFAD_PORT_VF_BASE; 419 port_drv->pvb_type = BFAD_PORT_VF_BASE;
420 } else if (vp_drv && !vf_drv) { 420 } else if (vp_drv && !vf_drv) {
421 port_drv = &vp_drv->drv_port; 421 port_drv = &vp_drv->drv_port;
422 port_drv->pvb_type = BFAD_PORT_PHYS_VPORT; 422 port_drv->pvb_type = BFAD_PORT_PHYS_VPORT;
423 } else { 423 } else {
424 port_drv = &vp_drv->drv_port; 424 port_drv = &vp_drv->drv_port;
425 port_drv->pvb_type = BFAD_PORT_VF_VPORT; 425 port_drv->pvb_type = BFAD_PORT_VF_VPORT;
426 } 426 }
427 427
428 port_drv->fcs_port = port; 428 port_drv->fcs_port = port;
429 port_drv->roles = roles; 429 port_drv->roles = roles;
430 430
431 if (roles & BFA_LPORT_ROLE_FCP_IM) { 431 if (roles & BFA_LPORT_ROLE_FCP_IM) {
432 rc = bfad_im_port_new(bfad, port_drv); 432 rc = bfad_im_port_new(bfad, port_drv);
433 if (rc != BFA_STATUS_OK) { 433 if (rc != BFA_STATUS_OK) {
434 bfad_im_port_delete(bfad, port_drv); 434 bfad_im_port_delete(bfad, port_drv);
435 port_drv = NULL; 435 port_drv = NULL;
436 } 436 }
437 } 437 }
438 438
439 return port_drv; 439 return port_drv;
440 } 440 }
441 441
442 void 442 void
443 bfa_fcb_lport_delete(struct bfad_s *bfad, enum bfa_lport_role roles, 443 bfa_fcb_lport_delete(struct bfad_s *bfad, enum bfa_lport_role roles,
444 struct bfad_vf_s *vf_drv, struct bfad_vport_s *vp_drv) 444 struct bfad_vf_s *vf_drv, struct bfad_vport_s *vp_drv)
445 { 445 {
446 struct bfad_port_s *port_drv; 446 struct bfad_port_s *port_drv;
447 447
448 /* this will be only called from rmmod context */ 448 /* this will be only called from rmmod context */
449 if (vp_drv && !vp_drv->comp_del) { 449 if (vp_drv && !vp_drv->comp_del) {
450 port_drv = (vp_drv) ? (&(vp_drv)->drv_port) : 450 port_drv = (vp_drv) ? (&(vp_drv)->drv_port) :
451 ((vf_drv) ? (&(vf_drv)->base_port) : 451 ((vf_drv) ? (&(vf_drv)->base_port) :
452 (&(bfad)->pport)); 452 (&(bfad)->pport));
453 bfa_trc(bfad, roles); 453 bfa_trc(bfad, roles);
454 if (roles & BFA_LPORT_ROLE_FCP_IM) 454 if (roles & BFA_LPORT_ROLE_FCP_IM)
455 bfad_im_port_delete(bfad, port_drv); 455 bfad_im_port_delete(bfad, port_drv);
456 } 456 }
457 } 457 }
458 458
459 /* 459 /*
460 * FCS RPORT alloc callback, after successful PLOGI by FCS 460 * FCS RPORT alloc callback, after successful PLOGI by FCS
461 */ 461 */
462 bfa_status_t 462 bfa_status_t
463 bfa_fcb_rport_alloc(struct bfad_s *bfad, struct bfa_fcs_rport_s **rport, 463 bfa_fcb_rport_alloc(struct bfad_s *bfad, struct bfa_fcs_rport_s **rport,
464 struct bfad_rport_s **rport_drv) 464 struct bfad_rport_s **rport_drv)
465 { 465 {
466 bfa_status_t rc = BFA_STATUS_OK; 466 bfa_status_t rc = BFA_STATUS_OK;
467 467
468 *rport_drv = kzalloc(sizeof(struct bfad_rport_s), GFP_ATOMIC); 468 *rport_drv = kzalloc(sizeof(struct bfad_rport_s), GFP_ATOMIC);
469 if (*rport_drv == NULL) { 469 if (*rport_drv == NULL) {
470 rc = BFA_STATUS_ENOMEM; 470 rc = BFA_STATUS_ENOMEM;
471 goto ext; 471 goto ext;
472 } 472 }
473 473
474 *rport = &(*rport_drv)->fcs_rport; 474 *rport = &(*rport_drv)->fcs_rport;
475 475
476 ext: 476 ext:
477 return rc; 477 return rc;
478 } 478 }
479 479
480 /* 480 /*
481 * FCS PBC VPORT Create 481 * FCS PBC VPORT Create
482 */ 482 */
483 void 483 void
484 bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s pbc_vport) 484 bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s pbc_vport)
485 { 485 {
486 486
487 struct bfa_lport_cfg_s port_cfg = {0}; 487 struct bfa_lport_cfg_s port_cfg = {0};
488 struct bfad_vport_s *vport; 488 struct bfad_vport_s *vport;
489 int rc; 489 int rc;
490 490
491 vport = kzalloc(sizeof(struct bfad_vport_s), GFP_KERNEL); 491 vport = kzalloc(sizeof(struct bfad_vport_s), GFP_KERNEL);
492 if (!vport) { 492 if (!vport) {
493 bfa_trc(bfad, 0); 493 bfa_trc(bfad, 0);
494 return; 494 return;
495 } 495 }
496 496
497 vport->drv_port.bfad = bfad; 497 vport->drv_port.bfad = bfad;
498 port_cfg.roles = BFA_LPORT_ROLE_FCP_IM; 498 port_cfg.roles = BFA_LPORT_ROLE_FCP_IM;
499 port_cfg.pwwn = pbc_vport.vp_pwwn; 499 port_cfg.pwwn = pbc_vport.vp_pwwn;
500 port_cfg.nwwn = pbc_vport.vp_nwwn; 500 port_cfg.nwwn = pbc_vport.vp_nwwn;
501 port_cfg.preboot_vp = BFA_TRUE; 501 port_cfg.preboot_vp = BFA_TRUE;
502 502
503 rc = bfa_fcs_pbc_vport_create(&vport->fcs_vport, &bfad->bfa_fcs, 0, 503 rc = bfa_fcs_pbc_vport_create(&vport->fcs_vport, &bfad->bfa_fcs, 0,
504 &port_cfg, vport); 504 &port_cfg, vport);
505 505
506 if (rc != BFA_STATUS_OK) { 506 if (rc != BFA_STATUS_OK) {
507 bfa_trc(bfad, 0); 507 bfa_trc(bfad, 0);
508 return; 508 return;
509 } 509 }
510 510
511 list_add_tail(&vport->list_entry, &bfad->pbc_vport_list); 511 list_add_tail(&vport->list_entry, &bfad->pbc_vport_list);
512 } 512 }
513 513
514 void 514 void
515 bfad_hal_mem_release(struct bfad_s *bfad) 515 bfad_hal_mem_release(struct bfad_s *bfad)
516 { 516 {
517 int i; 517 int i;
518 struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo; 518 struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo;
519 struct bfa_mem_elem_s *meminfo_elem; 519 struct bfa_mem_elem_s *meminfo_elem;
520 520
521 for (i = 0; i < BFA_MEM_TYPE_MAX; i++) { 521 for (i = 0; i < BFA_MEM_TYPE_MAX; i++) {
522 meminfo_elem = &hal_meminfo->meminfo[i]; 522 meminfo_elem = &hal_meminfo->meminfo[i];
523 if (meminfo_elem->kva != NULL) { 523 if (meminfo_elem->kva != NULL) {
524 switch (meminfo_elem->mem_type) { 524 switch (meminfo_elem->mem_type) {
525 case BFA_MEM_TYPE_KVA: 525 case BFA_MEM_TYPE_KVA:
526 vfree(meminfo_elem->kva); 526 vfree(meminfo_elem->kva);
527 break; 527 break;
528 case BFA_MEM_TYPE_DMA: 528 case BFA_MEM_TYPE_DMA:
529 dma_free_coherent(&bfad->pcidev->dev, 529 dma_free_coherent(&bfad->pcidev->dev,
530 meminfo_elem->mem_len, 530 meminfo_elem->mem_len,
531 meminfo_elem->kva, 531 meminfo_elem->kva,
532 (dma_addr_t) meminfo_elem->dma); 532 (dma_addr_t) meminfo_elem->dma);
533 break; 533 break;
534 default: 534 default:
535 WARN_ON(1); 535 WARN_ON(1);
536 break; 536 break;
537 } 537 }
538 } 538 }
539 } 539 }
540 540
541 memset(hal_meminfo, 0, sizeof(struct bfa_meminfo_s)); 541 memset(hal_meminfo, 0, sizeof(struct bfa_meminfo_s));
542 } 542 }
543 543
544 void 544 void
545 bfad_update_hal_cfg(struct bfa_iocfc_cfg_s *bfa_cfg) 545 bfad_update_hal_cfg(struct bfa_iocfc_cfg_s *bfa_cfg)
546 { 546 {
547 if (num_rports > 0) 547 if (num_rports > 0)
548 bfa_cfg->fwcfg.num_rports = num_rports; 548 bfa_cfg->fwcfg.num_rports = num_rports;
549 if (num_ios > 0) 549 if (num_ios > 0)
550 bfa_cfg->fwcfg.num_ioim_reqs = num_ios; 550 bfa_cfg->fwcfg.num_ioim_reqs = num_ios;
551 if (num_tms > 0) 551 if (num_tms > 0)
552 bfa_cfg->fwcfg.num_tskim_reqs = num_tms; 552 bfa_cfg->fwcfg.num_tskim_reqs = num_tms;
553 if (num_fcxps > 0) 553 if (num_fcxps > 0)
554 bfa_cfg->fwcfg.num_fcxp_reqs = num_fcxps; 554 bfa_cfg->fwcfg.num_fcxp_reqs = num_fcxps;
555 if (num_ufbufs > 0) 555 if (num_ufbufs > 0)
556 bfa_cfg->fwcfg.num_uf_bufs = num_ufbufs; 556 bfa_cfg->fwcfg.num_uf_bufs = num_ufbufs;
557 if (reqq_size > 0) 557 if (reqq_size > 0)
558 bfa_cfg->drvcfg.num_reqq_elems = reqq_size; 558 bfa_cfg->drvcfg.num_reqq_elems = reqq_size;
559 if (rspq_size > 0) 559 if (rspq_size > 0)
560 bfa_cfg->drvcfg.num_rspq_elems = rspq_size; 560 bfa_cfg->drvcfg.num_rspq_elems = rspq_size;
561 if (num_sgpgs > 0) 561 if (num_sgpgs > 0)
562 bfa_cfg->drvcfg.num_sgpgs = num_sgpgs; 562 bfa_cfg->drvcfg.num_sgpgs = num_sgpgs;
563 563
564 /* 564 /*
565 * populate the hal values back to the driver for sysfs use. 565 * populate the hal values back to the driver for sysfs use.
566 * otherwise, the default values will be shown as 0 in sysfs 566 * otherwise, the default values will be shown as 0 in sysfs
567 */ 567 */
568 num_rports = bfa_cfg->fwcfg.num_rports; 568 num_rports = bfa_cfg->fwcfg.num_rports;
569 num_ios = bfa_cfg->fwcfg.num_ioim_reqs; 569 num_ios = bfa_cfg->fwcfg.num_ioim_reqs;
570 num_tms = bfa_cfg->fwcfg.num_tskim_reqs; 570 num_tms = bfa_cfg->fwcfg.num_tskim_reqs;
571 num_fcxps = bfa_cfg->fwcfg.num_fcxp_reqs; 571 num_fcxps = bfa_cfg->fwcfg.num_fcxp_reqs;
572 num_ufbufs = bfa_cfg->fwcfg.num_uf_bufs; 572 num_ufbufs = bfa_cfg->fwcfg.num_uf_bufs;
573 reqq_size = bfa_cfg->drvcfg.num_reqq_elems; 573 reqq_size = bfa_cfg->drvcfg.num_reqq_elems;
574 rspq_size = bfa_cfg->drvcfg.num_rspq_elems; 574 rspq_size = bfa_cfg->drvcfg.num_rspq_elems;
575 num_sgpgs = bfa_cfg->drvcfg.num_sgpgs; 575 num_sgpgs = bfa_cfg->drvcfg.num_sgpgs;
576 } 576 }
577 577
578 bfa_status_t 578 bfa_status_t
579 bfad_hal_mem_alloc(struct bfad_s *bfad) 579 bfad_hal_mem_alloc(struct bfad_s *bfad)
580 { 580 {
581 int i; 581 int i;
582 struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo; 582 struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo;
583 struct bfa_mem_elem_s *meminfo_elem; 583 struct bfa_mem_elem_s *meminfo_elem;
584 dma_addr_t phys_addr; 584 dma_addr_t phys_addr;
585 void *kva; 585 void *kva;
586 bfa_status_t rc = BFA_STATUS_OK; 586 bfa_status_t rc = BFA_STATUS_OK;
587 int retry_count = 0; 587 int retry_count = 0;
588 int reset_value = 1; 588 int reset_value = 1;
589 int min_num_sgpgs = 512; 589 int min_num_sgpgs = 512;
590 590
591 bfa_cfg_get_default(&bfad->ioc_cfg); 591 bfa_cfg_get_default(&bfad->ioc_cfg);
592 592
593 retry: 593 retry:
594 bfad_update_hal_cfg(&bfad->ioc_cfg); 594 bfad_update_hal_cfg(&bfad->ioc_cfg);
595 bfad->cfg_data.ioc_queue_depth = bfad->ioc_cfg.fwcfg.num_ioim_reqs; 595 bfad->cfg_data.ioc_queue_depth = bfad->ioc_cfg.fwcfg.num_ioim_reqs;
596 bfa_cfg_get_meminfo(&bfad->ioc_cfg, hal_meminfo); 596 bfa_cfg_get_meminfo(&bfad->ioc_cfg, hal_meminfo);
597 597
598 for (i = 0; i < BFA_MEM_TYPE_MAX; i++) { 598 for (i = 0; i < BFA_MEM_TYPE_MAX; i++) {
599 meminfo_elem = &hal_meminfo->meminfo[i]; 599 meminfo_elem = &hal_meminfo->meminfo[i];
600 switch (meminfo_elem->mem_type) { 600 switch (meminfo_elem->mem_type) {
601 case BFA_MEM_TYPE_KVA: 601 case BFA_MEM_TYPE_KVA:
602 kva = vmalloc(meminfo_elem->mem_len); 602 kva = vmalloc(meminfo_elem->mem_len);
603 if (kva == NULL) { 603 if (kva == NULL) {
604 bfad_hal_mem_release(bfad); 604 bfad_hal_mem_release(bfad);
605 rc = BFA_STATUS_ENOMEM; 605 rc = BFA_STATUS_ENOMEM;
606 goto ext; 606 goto ext;
607 } 607 }
608 memset(kva, 0, meminfo_elem->mem_len); 608 memset(kva, 0, meminfo_elem->mem_len);
609 meminfo_elem->kva = kva; 609 meminfo_elem->kva = kva;
610 break; 610 break;
611 case BFA_MEM_TYPE_DMA: 611 case BFA_MEM_TYPE_DMA:
612 kva = dma_alloc_coherent(&bfad->pcidev->dev, 612 kva = dma_alloc_coherent(&bfad->pcidev->dev,
613 meminfo_elem->mem_len, &phys_addr, GFP_KERNEL); 613 meminfo_elem->mem_len, &phys_addr, GFP_KERNEL);
614 if (kva == NULL) { 614 if (kva == NULL) {
615 bfad_hal_mem_release(bfad); 615 bfad_hal_mem_release(bfad);
616 /* 616 /*
617 * If we cannot allocate with default 617 * If we cannot allocate with default
618 * num_sgpages try with half the value. 618 * num_sgpages try with half the value.
619 */ 619 */
620 if (num_sgpgs > min_num_sgpgs) { 620 if (num_sgpgs > min_num_sgpgs) {
621 printk(KERN_INFO 621 printk(KERN_INFO
622 "bfad[%d]: memory allocation failed" 622 "bfad[%d]: memory allocation failed"
623 " with num_sgpgs: %d\n", 623 " with num_sgpgs: %d\n",
624 bfad->inst_no, num_sgpgs); 624 bfad->inst_no, num_sgpgs);
625 nextLowerInt(&num_sgpgs); 625 nextLowerInt(&num_sgpgs);
626 printk(KERN_INFO 626 printk(KERN_INFO
627 "bfad[%d]: trying to allocate memory" 627 "bfad[%d]: trying to allocate memory"
628 " with num_sgpgs: %d\n", 628 " with num_sgpgs: %d\n",
629 bfad->inst_no, num_sgpgs); 629 bfad->inst_no, num_sgpgs);
630 retry_count++; 630 retry_count++;
631 goto retry; 631 goto retry;
632 } else { 632 } else {
633 if (num_sgpgs_parm > 0) 633 if (num_sgpgs_parm > 0)
634 num_sgpgs = num_sgpgs_parm; 634 num_sgpgs = num_sgpgs_parm;
635 else { 635 else {
636 reset_value = 636 reset_value =
637 (1 << retry_count); 637 (1 << retry_count);
638 num_sgpgs *= reset_value; 638 num_sgpgs *= reset_value;
639 } 639 }
640 rc = BFA_STATUS_ENOMEM; 640 rc = BFA_STATUS_ENOMEM;
641 goto ext; 641 goto ext;
642 } 642 }
643 } 643 }
644 644
645 if (num_sgpgs_parm > 0) 645 if (num_sgpgs_parm > 0)
646 num_sgpgs = num_sgpgs_parm; 646 num_sgpgs = num_sgpgs_parm;
647 else { 647 else {
648 reset_value = (1 << retry_count); 648 reset_value = (1 << retry_count);
649 num_sgpgs *= reset_value; 649 num_sgpgs *= reset_value;
650 } 650 }
651 651
652 memset(kva, 0, meminfo_elem->mem_len); 652 memset(kva, 0, meminfo_elem->mem_len);
653 meminfo_elem->kva = kva; 653 meminfo_elem->kva = kva;
654 meminfo_elem->dma = phys_addr; 654 meminfo_elem->dma = phys_addr;
655 break; 655 break;
656 default: 656 default:
657 break; 657 break;
658 658
659 } 659 }
660 } 660 }
661 ext: 661 ext:
662 return rc; 662 return rc;
663 } 663 }
664 664
665 /* 665 /*
666 * Create a vport under a vf. 666 * Create a vport under a vf.
667 */ 667 */
668 bfa_status_t 668 bfa_status_t
669 bfad_vport_create(struct bfad_s *bfad, u16 vf_id, 669 bfad_vport_create(struct bfad_s *bfad, u16 vf_id,
670 struct bfa_lport_cfg_s *port_cfg, struct device *dev) 670 struct bfa_lport_cfg_s *port_cfg, struct device *dev)
671 { 671 {
672 struct bfad_vport_s *vport; 672 struct bfad_vport_s *vport;
673 int rc = BFA_STATUS_OK; 673 int rc = BFA_STATUS_OK;
674 unsigned long flags; 674 unsigned long flags;
675 struct completion fcomp; 675 struct completion fcomp;
676 676
677 vport = kzalloc(sizeof(struct bfad_vport_s), GFP_KERNEL); 677 vport = kzalloc(sizeof(struct bfad_vport_s), GFP_KERNEL);
678 if (!vport) { 678 if (!vport) {
679 rc = BFA_STATUS_ENOMEM; 679 rc = BFA_STATUS_ENOMEM;
680 goto ext; 680 goto ext;
681 } 681 }
682 682
683 vport->drv_port.bfad = bfad; 683 vport->drv_port.bfad = bfad;
684 spin_lock_irqsave(&bfad->bfad_lock, flags); 684 spin_lock_irqsave(&bfad->bfad_lock, flags);
685 rc = bfa_fcs_vport_create(&vport->fcs_vport, &bfad->bfa_fcs, vf_id, 685 rc = bfa_fcs_vport_create(&vport->fcs_vport, &bfad->bfa_fcs, vf_id,
686 port_cfg, vport); 686 port_cfg, vport);
687 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 687 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
688 688
689 if (rc != BFA_STATUS_OK) 689 if (rc != BFA_STATUS_OK)
690 goto ext_free_vport; 690 goto ext_free_vport;
691 691
692 if (port_cfg->roles & BFA_LPORT_ROLE_FCP_IM) { 692 if (port_cfg->roles & BFA_LPORT_ROLE_FCP_IM) {
693 rc = bfad_im_scsi_host_alloc(bfad, vport->drv_port.im_port, 693 rc = bfad_im_scsi_host_alloc(bfad, vport->drv_port.im_port,
694 dev); 694 dev);
695 if (rc != BFA_STATUS_OK) 695 if (rc != BFA_STATUS_OK)
696 goto ext_free_fcs_vport; 696 goto ext_free_fcs_vport;
697 } 697 }
698 698
699 spin_lock_irqsave(&bfad->bfad_lock, flags); 699 spin_lock_irqsave(&bfad->bfad_lock, flags);
700 bfa_fcs_vport_start(&vport->fcs_vport); 700 bfa_fcs_vport_start(&vport->fcs_vport);
701 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 701 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
702 702
703 return BFA_STATUS_OK; 703 return BFA_STATUS_OK;
704 704
705 ext_free_fcs_vport: 705 ext_free_fcs_vport:
706 spin_lock_irqsave(&bfad->bfad_lock, flags); 706 spin_lock_irqsave(&bfad->bfad_lock, flags);
707 vport->comp_del = &fcomp; 707 vport->comp_del = &fcomp;
708 init_completion(vport->comp_del); 708 init_completion(vport->comp_del);
709 bfa_fcs_vport_delete(&vport->fcs_vport); 709 bfa_fcs_vport_delete(&vport->fcs_vport);
710 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 710 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
711 wait_for_completion(vport->comp_del); 711 wait_for_completion(vport->comp_del);
712 ext_free_vport: 712 ext_free_vport:
713 kfree(vport); 713 kfree(vport);
714 ext: 714 ext:
715 return rc; 715 return rc;
716 } 716 }
717 717
718 void 718 void
719 bfad_bfa_tmo(unsigned long data) 719 bfad_bfa_tmo(unsigned long data)
720 { 720 {
721 struct bfad_s *bfad = (struct bfad_s *) data; 721 struct bfad_s *bfad = (struct bfad_s *) data;
722 unsigned long flags; 722 unsigned long flags;
723 struct list_head doneq; 723 struct list_head doneq;
724 724
725 spin_lock_irqsave(&bfad->bfad_lock, flags); 725 spin_lock_irqsave(&bfad->bfad_lock, flags);
726 726
727 bfa_timer_beat(&bfad->bfa.timer_mod); 727 bfa_timer_beat(&bfad->bfa.timer_mod);
728 728
729 bfa_comp_deq(&bfad->bfa, &doneq); 729 bfa_comp_deq(&bfad->bfa, &doneq);
730 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 730 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
731 731
732 if (!list_empty(&doneq)) { 732 if (!list_empty(&doneq)) {
733 bfa_comp_process(&bfad->bfa, &doneq); 733 bfa_comp_process(&bfad->bfa, &doneq);
734 spin_lock_irqsave(&bfad->bfad_lock, flags); 734 spin_lock_irqsave(&bfad->bfad_lock, flags);
735 bfa_comp_free(&bfad->bfa, &doneq); 735 bfa_comp_free(&bfad->bfa, &doneq);
736 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 736 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
737 } 737 }
738 738
739 mod_timer(&bfad->hal_tmo, 739 mod_timer(&bfad->hal_tmo,
740 jiffies + msecs_to_jiffies(BFA_TIMER_FREQ)); 740 jiffies + msecs_to_jiffies(BFA_TIMER_FREQ));
741 } 741 }
742 742
743 void 743 void
744 bfad_init_timer(struct bfad_s *bfad) 744 bfad_init_timer(struct bfad_s *bfad)
745 { 745 {
746 init_timer(&bfad->hal_tmo); 746 init_timer(&bfad->hal_tmo);
747 bfad->hal_tmo.function = bfad_bfa_tmo; 747 bfad->hal_tmo.function = bfad_bfa_tmo;
748 bfad->hal_tmo.data = (unsigned long)bfad; 748 bfad->hal_tmo.data = (unsigned long)bfad;
749 749
750 mod_timer(&bfad->hal_tmo, 750 mod_timer(&bfad->hal_tmo,
751 jiffies + msecs_to_jiffies(BFA_TIMER_FREQ)); 751 jiffies + msecs_to_jiffies(BFA_TIMER_FREQ));
752 } 752 }
753 753
754 int 754 int
755 bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad) 755 bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
756 { 756 {
757 int rc = -ENODEV; 757 int rc = -ENODEV;
758 758
759 if (pci_enable_device(pdev)) { 759 if (pci_enable_device(pdev)) {
760 printk(KERN_ERR "pci_enable_device fail %p\n", pdev); 760 printk(KERN_ERR "pci_enable_device fail %p\n", pdev);
761 goto out; 761 goto out;
762 } 762 }
763 763
764 if (pci_request_regions(pdev, BFAD_DRIVER_NAME)) 764 if (pci_request_regions(pdev, BFAD_DRIVER_NAME))
765 goto out_disable_device; 765 goto out_disable_device;
766 766
767 pci_set_master(pdev); 767 pci_set_master(pdev);
768 768
769 769
770 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) 770 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)
771 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) { 771 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
772 printk(KERN_ERR "pci_set_dma_mask fail %p\n", pdev); 772 printk(KERN_ERR "pci_set_dma_mask fail %p\n", pdev);
773 goto out_release_region; 773 goto out_release_region;
774 } 774 }
775 775
776 bfad->pci_bar0_kva = pci_iomap(pdev, 0, pci_resource_len(pdev, 0)); 776 bfad->pci_bar0_kva = pci_iomap(pdev, 0, pci_resource_len(pdev, 0));
777 777
778 if (bfad->pci_bar0_kva == NULL) { 778 if (bfad->pci_bar0_kva == NULL) {
779 printk(KERN_ERR "Fail to map bar0\n"); 779 printk(KERN_ERR "Fail to map bar0\n");
780 goto out_release_region; 780 goto out_release_region;
781 } 781 }
782 782
783 bfad->hal_pcidev.pci_slot = PCI_SLOT(pdev->devfn); 783 bfad->hal_pcidev.pci_slot = PCI_SLOT(pdev->devfn);
784 bfad->hal_pcidev.pci_func = PCI_FUNC(pdev->devfn); 784 bfad->hal_pcidev.pci_func = PCI_FUNC(pdev->devfn);
785 bfad->hal_pcidev.pci_bar_kva = bfad->pci_bar0_kva; 785 bfad->hal_pcidev.pci_bar_kva = bfad->pci_bar0_kva;
786 bfad->hal_pcidev.device_id = pdev->device; 786 bfad->hal_pcidev.device_id = pdev->device;
787 bfad->pci_name = pci_name(pdev); 787 bfad->pci_name = pci_name(pdev);
788 788
789 bfad->pci_attr.vendor_id = pdev->vendor; 789 bfad->pci_attr.vendor_id = pdev->vendor;
790 bfad->pci_attr.device_id = pdev->device; 790 bfad->pci_attr.device_id = pdev->device;
791 bfad->pci_attr.ssid = pdev->subsystem_device; 791 bfad->pci_attr.ssid = pdev->subsystem_device;
792 bfad->pci_attr.ssvid = pdev->subsystem_vendor; 792 bfad->pci_attr.ssvid = pdev->subsystem_vendor;
793 bfad->pci_attr.pcifn = PCI_FUNC(pdev->devfn); 793 bfad->pci_attr.pcifn = PCI_FUNC(pdev->devfn);
794 794
795 bfad->pcidev = pdev; 795 bfad->pcidev = pdev;
796 796
797 /* Adjust PCIe Maximum Read Request Size */ 797 /* Adjust PCIe Maximum Read Request Size */
798 if (pcie_max_read_reqsz > 0) { 798 if (pcie_max_read_reqsz > 0) {
799 int pcie_cap_reg; 799 int pcie_cap_reg;
800 u16 pcie_dev_ctl; 800 u16 pcie_dev_ctl;
801 u16 mask = 0xffff; 801 u16 mask = 0xffff;
802 802
803 switch (pcie_max_read_reqsz) { 803 switch (pcie_max_read_reqsz) {
804 case 128: 804 case 128:
805 mask = 0x0; 805 mask = 0x0;
806 break; 806 break;
807 case 256: 807 case 256:
808 mask = 0x1000; 808 mask = 0x1000;
809 break; 809 break;
810 case 512: 810 case 512:
811 mask = 0x2000; 811 mask = 0x2000;
812 break; 812 break;
813 case 1024: 813 case 1024:
814 mask = 0x3000; 814 mask = 0x3000;
815 break; 815 break;
816 case 2048: 816 case 2048:
817 mask = 0x4000; 817 mask = 0x4000;
818 break; 818 break;
819 case 4096: 819 case 4096:
820 mask = 0x5000; 820 mask = 0x5000;
821 break; 821 break;
822 default: 822 default:
823 break; 823 break;
824 } 824 }
825 825
826 pcie_cap_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP); 826 pcie_cap_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP);
827 if (mask != 0xffff && pcie_cap_reg) { 827 if (mask != 0xffff && pcie_cap_reg) {
828 pcie_cap_reg += 0x08; 828 pcie_cap_reg += 0x08;
829 pci_read_config_word(pdev, pcie_cap_reg, &pcie_dev_ctl); 829 pci_read_config_word(pdev, pcie_cap_reg, &pcie_dev_ctl);
830 if ((pcie_dev_ctl & 0x7000) != mask) { 830 if ((pcie_dev_ctl & 0x7000) != mask) {
831 printk(KERN_WARNING "BFA[%s]: " 831 printk(KERN_WARNING "BFA[%s]: "
832 "pcie_max_read_request_size is %d, " 832 "pcie_max_read_request_size is %d, "
833 "reset to %d\n", bfad->pci_name, 833 "reset to %d\n", bfad->pci_name,
834 (1 << ((pcie_dev_ctl & 0x7000) >> 12)) << 7, 834 (1 << ((pcie_dev_ctl & 0x7000) >> 12)) << 7,
835 pcie_max_read_reqsz); 835 pcie_max_read_reqsz);
836 836
837 pcie_dev_ctl &= ~0x7000; 837 pcie_dev_ctl &= ~0x7000;
838 pci_write_config_word(pdev, pcie_cap_reg, 838 pci_write_config_word(pdev, pcie_cap_reg,
839 pcie_dev_ctl | mask); 839 pcie_dev_ctl | mask);
840 } 840 }
841 } 841 }
842 } 842 }
843 843
844 return 0; 844 return 0;
845 845
846 out_release_region: 846 out_release_region:
847 pci_release_regions(pdev); 847 pci_release_regions(pdev);
848 out_disable_device: 848 out_disable_device:
849 pci_disable_device(pdev); 849 pci_disable_device(pdev);
850 out: 850 out:
851 return rc; 851 return rc;
852 } 852 }
853 853
854 void 854 void
855 bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad) 855 bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad)
856 { 856 {
857 pci_iounmap(pdev, bfad->pci_bar0_kva); 857 pci_iounmap(pdev, bfad->pci_bar0_kva);
858 pci_release_regions(pdev); 858 pci_release_regions(pdev);
859 pci_disable_device(pdev); 859 pci_disable_device(pdev);
860 pci_set_drvdata(pdev, NULL); 860 pci_set_drvdata(pdev, NULL);
861 } 861 }
862 862
863 bfa_status_t 863 bfa_status_t
864 bfad_drv_init(struct bfad_s *bfad) 864 bfad_drv_init(struct bfad_s *bfad)
865 { 865 {
866 bfa_status_t rc; 866 bfa_status_t rc;
867 unsigned long flags; 867 unsigned long flags;
868 868
869 bfad->cfg_data.rport_del_timeout = rport_del_timeout; 869 bfad->cfg_data.rport_del_timeout = rport_del_timeout;
870 bfad->cfg_data.lun_queue_depth = bfa_lun_queue_depth; 870 bfad->cfg_data.lun_queue_depth = bfa_lun_queue_depth;
871 bfad->cfg_data.io_max_sge = bfa_io_max_sge; 871 bfad->cfg_data.io_max_sge = bfa_io_max_sge;
872 bfad->cfg_data.binding_method = FCP_PWWN_BINDING; 872 bfad->cfg_data.binding_method = FCP_PWWN_BINDING;
873 873
874 rc = bfad_hal_mem_alloc(bfad); 874 rc = bfad_hal_mem_alloc(bfad);
875 if (rc != BFA_STATUS_OK) { 875 if (rc != BFA_STATUS_OK) {
876 printk(KERN_WARNING "bfad%d bfad_hal_mem_alloc failure\n", 876 printk(KERN_WARNING "bfad%d bfad_hal_mem_alloc failure\n",
877 bfad->inst_no); 877 bfad->inst_no);
878 printk(KERN_WARNING 878 printk(KERN_WARNING
879 "Not enough memory to attach all Brocade HBA ports, %s", 879 "Not enough memory to attach all Brocade HBA ports, %s",
880 "System may need more memory.\n"); 880 "System may need more memory.\n");
881 goto out_hal_mem_alloc_failure; 881 goto out_hal_mem_alloc_failure;
882 } 882 }
883 883
884 bfad->bfa.trcmod = bfad->trcmod; 884 bfad->bfa.trcmod = bfad->trcmod;
885 bfad->bfa.plog = &bfad->plog_buf; 885 bfad->bfa.plog = &bfad->plog_buf;
886 bfa_plog_init(&bfad->plog_buf); 886 bfa_plog_init(&bfad->plog_buf);
887 bfa_plog_str(&bfad->plog_buf, BFA_PL_MID_DRVR, BFA_PL_EID_DRIVER_START, 887 bfa_plog_str(&bfad->plog_buf, BFA_PL_MID_DRVR, BFA_PL_EID_DRIVER_START,
888 0, "Driver Attach"); 888 0, "Driver Attach");
889 889
890 bfa_attach(&bfad->bfa, bfad, &bfad->ioc_cfg, &bfad->meminfo, 890 bfa_attach(&bfad->bfa, bfad, &bfad->ioc_cfg, &bfad->meminfo,
891 &bfad->hal_pcidev); 891 &bfad->hal_pcidev);
892 892
893 /* FCS INIT */ 893 /* FCS INIT */
894 spin_lock_irqsave(&bfad->bfad_lock, flags); 894 spin_lock_irqsave(&bfad->bfad_lock, flags);
895 bfad->bfa_fcs.trcmod = bfad->trcmod; 895 bfad->bfa_fcs.trcmod = bfad->trcmod;
896 bfa_fcs_attach(&bfad->bfa_fcs, &bfad->bfa, bfad, BFA_FALSE); 896 bfa_fcs_attach(&bfad->bfa_fcs, &bfad->bfa, bfad, BFA_FALSE);
897 bfad->bfa_fcs.fdmi_enabled = fdmi_enable; 897 bfad->bfa_fcs.fdmi_enabled = fdmi_enable;
898 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 898 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
899 899
900 bfad->bfad_flags |= BFAD_DRV_INIT_DONE; 900 bfad->bfad_flags |= BFAD_DRV_INIT_DONE;
901 901
902 return BFA_STATUS_OK; 902 return BFA_STATUS_OK;
903 903
904 out_hal_mem_alloc_failure: 904 out_hal_mem_alloc_failure:
905 return BFA_STATUS_FAILED; 905 return BFA_STATUS_FAILED;
906 } 906 }
907 907
908 void 908 void
909 bfad_drv_uninit(struct bfad_s *bfad) 909 bfad_drv_uninit(struct bfad_s *bfad)
910 { 910 {
911 unsigned long flags; 911 unsigned long flags;
912 912
913 spin_lock_irqsave(&bfad->bfad_lock, flags); 913 spin_lock_irqsave(&bfad->bfad_lock, flags);
914 init_completion(&bfad->comp); 914 init_completion(&bfad->comp);
915 bfa_iocfc_stop(&bfad->bfa); 915 bfa_iocfc_stop(&bfad->bfa);
916 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 916 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
917 wait_for_completion(&bfad->comp); 917 wait_for_completion(&bfad->comp);
918 918
919 del_timer_sync(&bfad->hal_tmo); 919 del_timer_sync(&bfad->hal_tmo);
920 bfa_isr_disable(&bfad->bfa); 920 bfa_isr_disable(&bfad->bfa);
921 bfa_detach(&bfad->bfa); 921 bfa_detach(&bfad->bfa);
922 bfad_remove_intr(bfad); 922 bfad_remove_intr(bfad);
923 bfad_hal_mem_release(bfad); 923 bfad_hal_mem_release(bfad);
924 924
925 bfad->bfad_flags &= ~BFAD_DRV_INIT_DONE; 925 bfad->bfad_flags &= ~BFAD_DRV_INIT_DONE;
926 } 926 }
927 927
928 void 928 void
929 bfad_drv_start(struct bfad_s *bfad) 929 bfad_drv_start(struct bfad_s *bfad)
930 { 930 {
931 unsigned long flags; 931 unsigned long flags;
932 932
933 spin_lock_irqsave(&bfad->bfad_lock, flags); 933 spin_lock_irqsave(&bfad->bfad_lock, flags);
934 bfa_iocfc_start(&bfad->bfa); 934 bfa_iocfc_start(&bfad->bfa);
935 bfa_fcs_fabric_modstart(&bfad->bfa_fcs); 935 bfa_fcs_fabric_modstart(&bfad->bfa_fcs);
936 bfad->bfad_flags |= BFAD_HAL_START_DONE; 936 bfad->bfad_flags |= BFAD_HAL_START_DONE;
937 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 937 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
938 938
939 if (bfad->im) 939 if (bfad->im)
940 flush_workqueue(bfad->im->drv_workq); 940 flush_workqueue(bfad->im->drv_workq);
941 } 941 }
942 942
943 void 943 void
944 bfad_fcs_stop(struct bfad_s *bfad) 944 bfad_fcs_stop(struct bfad_s *bfad)
945 { 945 {
946 unsigned long flags; 946 unsigned long flags;
947 947
948 spin_lock_irqsave(&bfad->bfad_lock, flags); 948 spin_lock_irqsave(&bfad->bfad_lock, flags);
949 init_completion(&bfad->comp); 949 init_completion(&bfad->comp);
950 bfad->pport.flags |= BFAD_PORT_DELETE; 950 bfad->pport.flags |= BFAD_PORT_DELETE;
951 bfa_fcs_exit(&bfad->bfa_fcs); 951 bfa_fcs_exit(&bfad->bfa_fcs);
952 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 952 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
953 wait_for_completion(&bfad->comp); 953 wait_for_completion(&bfad->comp);
954 954
955 bfa_sm_send_event(bfad, BFAD_E_FCS_EXIT_COMP); 955 bfa_sm_send_event(bfad, BFAD_E_FCS_EXIT_COMP);
956 } 956 }
957 957
958 void 958 void
959 bfad_stop(struct bfad_s *bfad) 959 bfad_stop(struct bfad_s *bfad)
960 { 960 {
961 unsigned long flags; 961 unsigned long flags;
962 962
963 spin_lock_irqsave(&bfad->bfad_lock, flags); 963 spin_lock_irqsave(&bfad->bfad_lock, flags);
964 init_completion(&bfad->comp); 964 init_completion(&bfad->comp);
965 bfa_iocfc_stop(&bfad->bfa); 965 bfa_iocfc_stop(&bfad->bfa);
966 bfad->bfad_flags &= ~BFAD_HAL_START_DONE; 966 bfad->bfad_flags &= ~BFAD_HAL_START_DONE;
967 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 967 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
968 wait_for_completion(&bfad->comp); 968 wait_for_completion(&bfad->comp);
969 969
970 bfa_sm_send_event(bfad, BFAD_E_EXIT_COMP); 970 bfa_sm_send_event(bfad, BFAD_E_EXIT_COMP);
971 } 971 }
972 972
973 bfa_status_t 973 bfa_status_t
974 bfad_cfg_pport(struct bfad_s *bfad, enum bfa_lport_role role) 974 bfad_cfg_pport(struct bfad_s *bfad, enum bfa_lport_role role)
975 { 975 {
976 int rc = BFA_STATUS_OK; 976 int rc = BFA_STATUS_OK;
977 977
978 /* Allocate scsi_host for the physical port */ 978 /* Allocate scsi_host for the physical port */
979 if ((supported_fc4s & BFA_LPORT_ROLE_FCP_IM) && 979 if ((supported_fc4s & BFA_LPORT_ROLE_FCP_IM) &&
980 (role & BFA_LPORT_ROLE_FCP_IM)) { 980 (role & BFA_LPORT_ROLE_FCP_IM)) {
981 if (bfad->pport.im_port == NULL) { 981 if (bfad->pport.im_port == NULL) {
982 rc = BFA_STATUS_FAILED; 982 rc = BFA_STATUS_FAILED;
983 goto out; 983 goto out;
984 } 984 }
985 985
986 rc = bfad_im_scsi_host_alloc(bfad, bfad->pport.im_port, 986 rc = bfad_im_scsi_host_alloc(bfad, bfad->pport.im_port,
987 &bfad->pcidev->dev); 987 &bfad->pcidev->dev);
988 if (rc != BFA_STATUS_OK) 988 if (rc != BFA_STATUS_OK)
989 goto out; 989 goto out;
990 990
991 bfad->pport.roles |= BFA_LPORT_ROLE_FCP_IM; 991 bfad->pport.roles |= BFA_LPORT_ROLE_FCP_IM;
992 } 992 }
993 993
994 /* Setup the debugfs node for this scsi_host */ 994 /* Setup the debugfs node for this scsi_host */
995 if (bfa_debugfs_enable) 995 if (bfa_debugfs_enable)
996 bfad_debugfs_init(&bfad->pport); 996 bfad_debugfs_init(&bfad->pport);
997 997
998 bfad->bfad_flags |= BFAD_CFG_PPORT_DONE; 998 bfad->bfad_flags |= BFAD_CFG_PPORT_DONE;
999 999
1000 out: 1000 out:
1001 return rc; 1001 return rc;
1002 } 1002 }
1003 1003
1004 void 1004 void
1005 bfad_uncfg_pport(struct bfad_s *bfad) 1005 bfad_uncfg_pport(struct bfad_s *bfad)
1006 { 1006 {
1007 /* Remove the debugfs node for this scsi_host */ 1007 /* Remove the debugfs node for this scsi_host */
1008 kfree(bfad->regdata); 1008 kfree(bfad->regdata);
1009 bfad_debugfs_exit(&bfad->pport); 1009 bfad_debugfs_exit(&bfad->pport);
1010 1010
1011 if ((supported_fc4s & BFA_LPORT_ROLE_FCP_IM) && 1011 if ((supported_fc4s & BFA_LPORT_ROLE_FCP_IM) &&
1012 (bfad->pport.roles & BFA_LPORT_ROLE_FCP_IM)) { 1012 (bfad->pport.roles & BFA_LPORT_ROLE_FCP_IM)) {
1013 bfad_im_scsi_host_free(bfad, bfad->pport.im_port); 1013 bfad_im_scsi_host_free(bfad, bfad->pport.im_port);
1014 bfad_im_port_clean(bfad->pport.im_port); 1014 bfad_im_port_clean(bfad->pport.im_port);
1015 kfree(bfad->pport.im_port); 1015 kfree(bfad->pport.im_port);
1016 bfad->pport.roles &= ~BFA_LPORT_ROLE_FCP_IM; 1016 bfad->pport.roles &= ~BFA_LPORT_ROLE_FCP_IM;
1017 } 1017 }
1018 1018
1019 bfad->bfad_flags &= ~BFAD_CFG_PPORT_DONE; 1019 bfad->bfad_flags &= ~BFAD_CFG_PPORT_DONE;
1020 } 1020 }
1021 1021
1022 bfa_status_t 1022 bfa_status_t
1023 bfad_start_ops(struct bfad_s *bfad) { 1023 bfad_start_ops(struct bfad_s *bfad) {
1024 1024
1025 int retval; 1025 int retval;
1026 unsigned long flags; 1026 unsigned long flags;
1027 struct bfad_vport_s *vport, *vport_new; 1027 struct bfad_vport_s *vport, *vport_new;
1028 struct bfa_fcs_driver_info_s driver_info; 1028 struct bfa_fcs_driver_info_s driver_info;
1029 1029
1030 /* Fill the driver_info info to fcs*/ 1030 /* Fill the driver_info info to fcs*/
1031 memset(&driver_info, 0, sizeof(driver_info)); 1031 memset(&driver_info, 0, sizeof(driver_info));
1032 strncpy(driver_info.version, BFAD_DRIVER_VERSION, 1032 strncpy(driver_info.version, BFAD_DRIVER_VERSION,
1033 sizeof(driver_info.version) - 1); 1033 sizeof(driver_info.version) - 1);
1034 if (host_name) 1034 if (host_name)
1035 strncpy(driver_info.host_machine_name, host_name, 1035 strncpy(driver_info.host_machine_name, host_name,
1036 sizeof(driver_info.host_machine_name) - 1); 1036 sizeof(driver_info.host_machine_name) - 1);
1037 if (os_name) 1037 if (os_name)
1038 strncpy(driver_info.host_os_name, os_name, 1038 strncpy(driver_info.host_os_name, os_name,
1039 sizeof(driver_info.host_os_name) - 1); 1039 sizeof(driver_info.host_os_name) - 1);
1040 if (os_patch) 1040 if (os_patch)
1041 strncpy(driver_info.host_os_patch, os_patch, 1041 strncpy(driver_info.host_os_patch, os_patch,
1042 sizeof(driver_info.host_os_patch) - 1); 1042 sizeof(driver_info.host_os_patch) - 1);
1043 1043
1044 strncpy(driver_info.os_device_name, bfad->pci_name, 1044 strncpy(driver_info.os_device_name, bfad->pci_name,
1045 sizeof(driver_info.os_device_name - 1)); 1045 sizeof(driver_info.os_device_name - 1));
1046 1046
1047 /* FCS INIT */ 1047 /* FCS INIT */
1048 spin_lock_irqsave(&bfad->bfad_lock, flags); 1048 spin_lock_irqsave(&bfad->bfad_lock, flags);
1049 bfa_fcs_driver_info_init(&bfad->bfa_fcs, &driver_info); 1049 bfa_fcs_driver_info_init(&bfad->bfa_fcs, &driver_info);
1050 bfa_fcs_init(&bfad->bfa_fcs); 1050 bfa_fcs_init(&bfad->bfa_fcs);
1051 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1051 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1052 1052
1053 retval = bfad_cfg_pport(bfad, BFA_LPORT_ROLE_FCP_IM); 1053 retval = bfad_cfg_pport(bfad, BFA_LPORT_ROLE_FCP_IM);
1054 if (retval != BFA_STATUS_OK) { 1054 if (retval != BFA_STATUS_OK) {
1055 if (bfa_sm_cmp_state(bfad, bfad_sm_initializing)) 1055 if (bfa_sm_cmp_state(bfad, bfad_sm_initializing))
1056 bfa_sm_set_state(bfad, bfad_sm_failed); 1056 bfa_sm_set_state(bfad, bfad_sm_failed);
1057 bfad_stop(bfad); 1057 bfad_stop(bfad);
1058 return BFA_STATUS_FAILED; 1058 return BFA_STATUS_FAILED;
1059 } 1059 }
1060 1060
1061 /* BFAD level FC4 IM specific resource allocation */ 1061 /* BFAD level FC4 IM specific resource allocation */
1062 retval = bfad_im_probe(bfad); 1062 retval = bfad_im_probe(bfad);
1063 if (retval != BFA_STATUS_OK) { 1063 if (retval != BFA_STATUS_OK) {
1064 printk(KERN_WARNING "bfad_im_probe failed\n"); 1064 printk(KERN_WARNING "bfad_im_probe failed\n");
1065 if (bfa_sm_cmp_state(bfad, bfad_sm_initializing)) 1065 if (bfa_sm_cmp_state(bfad, bfad_sm_initializing))
1066 bfa_sm_set_state(bfad, bfad_sm_failed); 1066 bfa_sm_set_state(bfad, bfad_sm_failed);
1067 bfad_im_probe_undo(bfad); 1067 bfad_im_probe_undo(bfad);
1068 bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE; 1068 bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE;
1069 bfad_uncfg_pport(bfad); 1069 bfad_uncfg_pport(bfad);
1070 bfad_stop(bfad); 1070 bfad_stop(bfad);
1071 return BFA_STATUS_FAILED; 1071 return BFA_STATUS_FAILED;
1072 } else 1072 } else
1073 bfad->bfad_flags |= BFAD_FC4_PROBE_DONE; 1073 bfad->bfad_flags |= BFAD_FC4_PROBE_DONE;
1074 1074
1075 bfad_drv_start(bfad); 1075 bfad_drv_start(bfad);
1076 1076
1077 /* Complete pbc vport create */ 1077 /* Complete pbc vport create */
1078 list_for_each_entry_safe(vport, vport_new, &bfad->pbc_vport_list, 1078 list_for_each_entry_safe(vport, vport_new, &bfad->pbc_vport_list,
1079 list_entry) { 1079 list_entry) {
1080 struct fc_vport_identifiers vid; 1080 struct fc_vport_identifiers vid;
1081 struct fc_vport *fc_vport; 1081 struct fc_vport *fc_vport;
1082 char pwwn_buf[BFA_STRING_32]; 1082 char pwwn_buf[BFA_STRING_32];
1083 1083
1084 memset(&vid, 0, sizeof(vid)); 1084 memset(&vid, 0, sizeof(vid));
1085 vid.roles = FC_PORT_ROLE_FCP_INITIATOR; 1085 vid.roles = FC_PORT_ROLE_FCP_INITIATOR;
1086 vid.vport_type = FC_PORTTYPE_NPIV; 1086 vid.vport_type = FC_PORTTYPE_NPIV;
1087 vid.disable = false; 1087 vid.disable = false;
1088 vid.node_name = wwn_to_u64((u8 *) 1088 vid.node_name = wwn_to_u64((u8 *)
1089 (&((vport->fcs_vport).lport.port_cfg.nwwn))); 1089 (&((vport->fcs_vport).lport.port_cfg.nwwn)));
1090 vid.port_name = wwn_to_u64((u8 *) 1090 vid.port_name = wwn_to_u64((u8 *)
1091 (&((vport->fcs_vport).lport.port_cfg.pwwn))); 1091 (&((vport->fcs_vport).lport.port_cfg.pwwn)));
1092 fc_vport = fc_vport_create(bfad->pport.im_port->shost, 0, &vid); 1092 fc_vport = fc_vport_create(bfad->pport.im_port->shost, 0, &vid);
1093 if (!fc_vport) { 1093 if (!fc_vport) {
1094 wwn2str(pwwn_buf, vid.port_name); 1094 wwn2str(pwwn_buf, vid.port_name);
1095 printk(KERN_WARNING "bfad%d: failed to create pbc vport" 1095 printk(KERN_WARNING "bfad%d: failed to create pbc vport"
1096 " %s\n", bfad->inst_no, pwwn_buf); 1096 " %s\n", bfad->inst_no, pwwn_buf);
1097 } 1097 }
1098 list_del(&vport->list_entry); 1098 list_del(&vport->list_entry);
1099 kfree(vport); 1099 kfree(vport);
1100 } 1100 }
1101 1101
1102 /* 1102 /*
1103 * If bfa_linkup_delay is set to -1 default; try to retrive the 1103 * If bfa_linkup_delay is set to -1 default; try to retrive the
1104 * value using the bfad_get_linkup_delay(); else use the 1104 * value using the bfad_get_linkup_delay(); else use the
1105 * passed in module param value as the bfa_linkup_delay. 1105 * passed in module param value as the bfa_linkup_delay.
1106 */ 1106 */
1107 if (bfa_linkup_delay < 0) { 1107 if (bfa_linkup_delay < 0) {
1108 bfa_linkup_delay = bfad_get_linkup_delay(bfad); 1108 bfa_linkup_delay = bfad_get_linkup_delay(bfad);
1109 bfad_rport_online_wait(bfad); 1109 bfad_rport_online_wait(bfad);
1110 bfa_linkup_delay = -1; 1110 bfa_linkup_delay = -1;
1111 } else 1111 } else
1112 bfad_rport_online_wait(bfad); 1112 bfad_rport_online_wait(bfad);
1113 1113
1114 BFA_LOG(KERN_INFO, bfad, bfa_log_level, "bfa device claimed\n"); 1114 BFA_LOG(KERN_INFO, bfad, bfa_log_level, "bfa device claimed\n");
1115 1115
1116 return BFA_STATUS_OK; 1116 return BFA_STATUS_OK;
1117 } 1117 }
1118 1118
1119 int 1119 int
1120 bfad_worker(void *ptr) 1120 bfad_worker(void *ptr)
1121 { 1121 {
1122 struct bfad_s *bfad; 1122 struct bfad_s *bfad;
1123 unsigned long flags; 1123 unsigned long flags;
1124 1124
1125 bfad = (struct bfad_s *)ptr; 1125 bfad = (struct bfad_s *)ptr;
1126 1126
1127 while (!kthread_should_stop()) { 1127 while (!kthread_should_stop()) {
1128 1128
1129 /* Send event BFAD_E_INIT_SUCCESS */ 1129 /* Send event BFAD_E_INIT_SUCCESS */
1130 bfa_sm_send_event(bfad, BFAD_E_INIT_SUCCESS); 1130 bfa_sm_send_event(bfad, BFAD_E_INIT_SUCCESS);
1131 1131
1132 spin_lock_irqsave(&bfad->bfad_lock, flags); 1132 spin_lock_irqsave(&bfad->bfad_lock, flags);
1133 bfad->bfad_tsk = NULL; 1133 bfad->bfad_tsk = NULL;
1134 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1134 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1135 1135
1136 break; 1136 break;
1137 } 1137 }
1138 1138
1139 return 0; 1139 return 0;
1140 } 1140 }
1141 1141
1142 /* 1142 /*
1143 * BFA driver interrupt functions 1143 * BFA driver interrupt functions
1144 */ 1144 */
1145 irqreturn_t 1145 irqreturn_t
1146 bfad_intx(int irq, void *dev_id) 1146 bfad_intx(int irq, void *dev_id)
1147 { 1147 {
1148 struct bfad_s *bfad = dev_id; 1148 struct bfad_s *bfad = dev_id;
1149 struct list_head doneq; 1149 struct list_head doneq;
1150 unsigned long flags; 1150 unsigned long flags;
1151 bfa_boolean_t rc; 1151 bfa_boolean_t rc;
1152 1152
1153 spin_lock_irqsave(&bfad->bfad_lock, flags); 1153 spin_lock_irqsave(&bfad->bfad_lock, flags);
1154 rc = bfa_intx(&bfad->bfa); 1154 rc = bfa_intx(&bfad->bfa);
1155 if (!rc) { 1155 if (!rc) {
1156 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1156 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1157 return IRQ_NONE; 1157 return IRQ_NONE;
1158 } 1158 }
1159 1159
1160 bfa_comp_deq(&bfad->bfa, &doneq); 1160 bfa_comp_deq(&bfad->bfa, &doneq);
1161 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1161 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1162 1162
1163 if (!list_empty(&doneq)) { 1163 if (!list_empty(&doneq)) {
1164 bfa_comp_process(&bfad->bfa, &doneq); 1164 bfa_comp_process(&bfad->bfa, &doneq);
1165 1165
1166 spin_lock_irqsave(&bfad->bfad_lock, flags); 1166 spin_lock_irqsave(&bfad->bfad_lock, flags);
1167 bfa_comp_free(&bfad->bfa, &doneq); 1167 bfa_comp_free(&bfad->bfa, &doneq);
1168 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1168 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1169 bfa_trc_fp(bfad, irq);
1170 } 1169 }
1171 1170
1172 return IRQ_HANDLED; 1171 return IRQ_HANDLED;
1173 1172
1174 } 1173 }
1175 1174
1176 static irqreturn_t 1175 static irqreturn_t
1177 bfad_msix(int irq, void *dev_id) 1176 bfad_msix(int irq, void *dev_id)
1178 { 1177 {
1179 struct bfad_msix_s *vec = dev_id; 1178 struct bfad_msix_s *vec = dev_id;
1180 struct bfad_s *bfad = vec->bfad; 1179 struct bfad_s *bfad = vec->bfad;
1181 struct list_head doneq; 1180 struct list_head doneq;
1182 unsigned long flags; 1181 unsigned long flags;
1183 1182
1184 spin_lock_irqsave(&bfad->bfad_lock, flags); 1183 spin_lock_irqsave(&bfad->bfad_lock, flags);
1185 1184
1186 bfa_msix(&bfad->bfa, vec->msix.entry); 1185 bfa_msix(&bfad->bfa, vec->msix.entry);
1187 bfa_comp_deq(&bfad->bfa, &doneq); 1186 bfa_comp_deq(&bfad->bfa, &doneq);
1188 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1187 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1189 1188
1190 if (!list_empty(&doneq)) { 1189 if (!list_empty(&doneq)) {
1191 bfa_comp_process(&bfad->bfa, &doneq); 1190 bfa_comp_process(&bfad->bfa, &doneq);
1192 1191
1193 spin_lock_irqsave(&bfad->bfad_lock, flags); 1192 spin_lock_irqsave(&bfad->bfad_lock, flags);
1194 bfa_comp_free(&bfad->bfa, &doneq); 1193 bfa_comp_free(&bfad->bfa, &doneq);
1195 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1194 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1196 } 1195 }
1197 1196
1198 return IRQ_HANDLED; 1197 return IRQ_HANDLED;
1199 } 1198 }
1200 1199
1201 /* 1200 /*
1202 * Initialize the MSIX entry table. 1201 * Initialize the MSIX entry table.
1203 */ 1202 */
1204 static void 1203 static void
1205 bfad_init_msix_entry(struct bfad_s *bfad, struct msix_entry *msix_entries, 1204 bfad_init_msix_entry(struct bfad_s *bfad, struct msix_entry *msix_entries,
1206 int mask, int max_bit) 1205 int mask, int max_bit)
1207 { 1206 {
1208 int i; 1207 int i;
1209 int match = 0x00000001; 1208 int match = 0x00000001;
1210 1209
1211 for (i = 0, bfad->nvec = 0; i < MAX_MSIX_ENTRY; i++) { 1210 for (i = 0, bfad->nvec = 0; i < MAX_MSIX_ENTRY; i++) {
1212 if (mask & match) { 1211 if (mask & match) {
1213 bfad->msix_tab[bfad->nvec].msix.entry = i; 1212 bfad->msix_tab[bfad->nvec].msix.entry = i;
1214 bfad->msix_tab[bfad->nvec].bfad = bfad; 1213 bfad->msix_tab[bfad->nvec].bfad = bfad;
1215 msix_entries[bfad->nvec].entry = i; 1214 msix_entries[bfad->nvec].entry = i;
1216 bfad->nvec++; 1215 bfad->nvec++;
1217 } 1216 }
1218 1217
1219 match <<= 1; 1218 match <<= 1;
1220 } 1219 }
1221 1220
1222 } 1221 }
1223 1222
1224 int 1223 int
1225 bfad_install_msix_handler(struct bfad_s *bfad) 1224 bfad_install_msix_handler(struct bfad_s *bfad)
1226 { 1225 {
1227 int i, error = 0; 1226 int i, error = 0;
1228 1227
1229 for (i = 0; i < bfad->nvec; i++) { 1228 for (i = 0; i < bfad->nvec; i++) {
1230 sprintf(bfad->msix_tab[i].name, "bfa-%s-%s", 1229 sprintf(bfad->msix_tab[i].name, "bfa-%s-%s",
1231 bfad->pci_name, 1230 bfad->pci_name,
1232 ((bfa_asic_id_ct(bfad->hal_pcidev.device_id)) ? 1231 ((bfa_asic_id_ct(bfad->hal_pcidev.device_id)) ?
1233 msix_name_ct[i] : msix_name_cb[i])); 1232 msix_name_ct[i] : msix_name_cb[i]));
1234 1233
1235 error = request_irq(bfad->msix_tab[i].msix.vector, 1234 error = request_irq(bfad->msix_tab[i].msix.vector,
1236 (irq_handler_t) bfad_msix, 0, 1235 (irq_handler_t) bfad_msix, 0,
1237 bfad->msix_tab[i].name, &bfad->msix_tab[i]); 1236 bfad->msix_tab[i].name, &bfad->msix_tab[i]);
1238 bfa_trc(bfad, i); 1237 bfa_trc(bfad, i);
1239 bfa_trc(bfad, bfad->msix_tab[i].msix.vector); 1238 bfa_trc(bfad, bfad->msix_tab[i].msix.vector);
1240 if (error) { 1239 if (error) {
1241 int j; 1240 int j;
1242 1241
1243 for (j = 0; j < i; j++) 1242 for (j = 0; j < i; j++)
1244 free_irq(bfad->msix_tab[j].msix.vector, 1243 free_irq(bfad->msix_tab[j].msix.vector,
1245 &bfad->msix_tab[j]); 1244 &bfad->msix_tab[j]);
1246 1245
1247 return 1; 1246 return 1;
1248 } 1247 }
1249 } 1248 }
1250 1249
1251 return 0; 1250 return 0;
1252 } 1251 }
1253 1252
1254 /* 1253 /*
1255 * Setup MSIX based interrupt. 1254 * Setup MSIX based interrupt.
1256 */ 1255 */
1257 int 1256 int
1258 bfad_setup_intr(struct bfad_s *bfad) 1257 bfad_setup_intr(struct bfad_s *bfad)
1259 { 1258 {
1260 int error = 0; 1259 int error = 0;
1261 u32 mask = 0, i, num_bit = 0, max_bit = 0; 1260 u32 mask = 0, i, num_bit = 0, max_bit = 0;
1262 struct msix_entry msix_entries[MAX_MSIX_ENTRY]; 1261 struct msix_entry msix_entries[MAX_MSIX_ENTRY];
1263 struct pci_dev *pdev = bfad->pcidev; 1262 struct pci_dev *pdev = bfad->pcidev;
1264 1263
1265 /* Call BFA to get the msix map for this PCI function. */ 1264 /* Call BFA to get the msix map for this PCI function. */
1266 bfa_msix_getvecs(&bfad->bfa, &mask, &num_bit, &max_bit); 1265 bfa_msix_getvecs(&bfad->bfa, &mask, &num_bit, &max_bit);
1267 1266
1268 /* Set up the msix entry table */ 1267 /* Set up the msix entry table */
1269 bfad_init_msix_entry(bfad, msix_entries, mask, max_bit); 1268 bfad_init_msix_entry(bfad, msix_entries, mask, max_bit);
1270 1269
1271 if ((bfa_asic_id_ct(pdev->device) && !msix_disable_ct) || 1270 if ((bfa_asic_id_ct(pdev->device) && !msix_disable_ct) ||
1272 (!bfa_asic_id_ct(pdev->device) && !msix_disable_cb)) { 1271 (!bfa_asic_id_ct(pdev->device) && !msix_disable_cb)) {
1273 1272
1274 error = pci_enable_msix(bfad->pcidev, msix_entries, bfad->nvec); 1273 error = pci_enable_msix(bfad->pcidev, msix_entries, bfad->nvec);
1275 if (error) { 1274 if (error) {
1276 /* 1275 /*
1277 * Only error number of vector is available. 1276 * Only error number of vector is available.
1278 * We don't have a mechanism to map multiple 1277 * We don't have a mechanism to map multiple
1279 * interrupts into one vector, so even if we 1278 * interrupts into one vector, so even if we
1280 * can try to request less vectors, we don't 1279 * can try to request less vectors, we don't
1281 * know how to associate interrupt events to 1280 * know how to associate interrupt events to
1282 * vectors. Linux doesn't dupicate vectors 1281 * vectors. Linux doesn't dupicate vectors
1283 * in the MSIX table for this case. 1282 * in the MSIX table for this case.
1284 */ 1283 */
1285 1284
1286 printk(KERN_WARNING "bfad%d: " 1285 printk(KERN_WARNING "bfad%d: "
1287 "pci_enable_msix failed (%d)," 1286 "pci_enable_msix failed (%d),"
1288 " use line based.\n", bfad->inst_no, error); 1287 " use line based.\n", bfad->inst_no, error);
1289 1288
1290 goto line_based; 1289 goto line_based;
1291 } 1290 }
1292 1291
1293 /* Save the vectors */ 1292 /* Save the vectors */
1294 for (i = 0; i < bfad->nvec; i++) { 1293 for (i = 0; i < bfad->nvec; i++) {
1295 bfa_trc(bfad, msix_entries[i].vector); 1294 bfa_trc(bfad, msix_entries[i].vector);
1296 bfad->msix_tab[i].msix.vector = msix_entries[i].vector; 1295 bfad->msix_tab[i].msix.vector = msix_entries[i].vector;
1297 } 1296 }
1298 1297
1299 bfa_msix_init(&bfad->bfa, bfad->nvec); 1298 bfa_msix_init(&bfad->bfa, bfad->nvec);
1300 1299
1301 bfad->bfad_flags |= BFAD_MSIX_ON; 1300 bfad->bfad_flags |= BFAD_MSIX_ON;
1302 1301
1303 return error; 1302 return error;
1304 } 1303 }
1305 1304
1306 line_based: 1305 line_based:
1307 error = 0; 1306 error = 0;
1308 if (request_irq 1307 if (request_irq
1309 (bfad->pcidev->irq, (irq_handler_t) bfad_intx, BFAD_IRQ_FLAGS, 1308 (bfad->pcidev->irq, (irq_handler_t) bfad_intx, BFAD_IRQ_FLAGS,
1310 BFAD_DRIVER_NAME, bfad) != 0) { 1309 BFAD_DRIVER_NAME, bfad) != 0) {
1311 /* Enable interrupt handler failed */ 1310 /* Enable interrupt handler failed */
1312 return 1; 1311 return 1;
1313 } 1312 }
1314 1313
1315 return error; 1314 return error;
1316 } 1315 }
1317 1316
1318 void 1317 void
1319 bfad_remove_intr(struct bfad_s *bfad) 1318 bfad_remove_intr(struct bfad_s *bfad)
1320 { 1319 {
1321 int i; 1320 int i;
1322 1321
1323 if (bfad->bfad_flags & BFAD_MSIX_ON) { 1322 if (bfad->bfad_flags & BFAD_MSIX_ON) {
1324 for (i = 0; i < bfad->nvec; i++) 1323 for (i = 0; i < bfad->nvec; i++)
1325 free_irq(bfad->msix_tab[i].msix.vector, 1324 free_irq(bfad->msix_tab[i].msix.vector,
1326 &bfad->msix_tab[i]); 1325 &bfad->msix_tab[i]);
1327 1326
1328 pci_disable_msix(bfad->pcidev); 1327 pci_disable_msix(bfad->pcidev);
1329 bfad->bfad_flags &= ~BFAD_MSIX_ON; 1328 bfad->bfad_flags &= ~BFAD_MSIX_ON;
1330 } else { 1329 } else {
1331 free_irq(bfad->pcidev->irq, bfad); 1330 free_irq(bfad->pcidev->irq, bfad);
1332 } 1331 }
1333 } 1332 }
1334 1333
1335 /* 1334 /*
1336 * PCI probe entry. 1335 * PCI probe entry.
1337 */ 1336 */
1338 int 1337 int
1339 bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) 1338 bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
1340 { 1339 {
1341 struct bfad_s *bfad; 1340 struct bfad_s *bfad;
1342 int error = -ENODEV, retval; 1341 int error = -ENODEV, retval;
1343 1342
1344 /* For single port cards - only claim function 0 */ 1343 /* For single port cards - only claim function 0 */
1345 if ((pdev->device == BFA_PCI_DEVICE_ID_FC_8G1P) && 1344 if ((pdev->device == BFA_PCI_DEVICE_ID_FC_8G1P) &&
1346 (PCI_FUNC(pdev->devfn) != 0)) 1345 (PCI_FUNC(pdev->devfn) != 0))
1347 return -ENODEV; 1346 return -ENODEV;
1348 1347
1349 bfad = kzalloc(sizeof(struct bfad_s), GFP_KERNEL); 1348 bfad = kzalloc(sizeof(struct bfad_s), GFP_KERNEL);
1350 if (!bfad) { 1349 if (!bfad) {
1351 error = -ENOMEM; 1350 error = -ENOMEM;
1352 goto out; 1351 goto out;
1353 } 1352 }
1354 1353
1355 bfad->trcmod = kzalloc(sizeof(struct bfa_trc_mod_s), GFP_KERNEL); 1354 bfad->trcmod = kzalloc(sizeof(struct bfa_trc_mod_s), GFP_KERNEL);
1356 if (!bfad->trcmod) { 1355 if (!bfad->trcmod) {
1357 printk(KERN_WARNING "Error alloc trace buffer!\n"); 1356 printk(KERN_WARNING "Error alloc trace buffer!\n");
1358 error = -ENOMEM; 1357 error = -ENOMEM;
1359 goto out_alloc_trace_failure; 1358 goto out_alloc_trace_failure;
1360 } 1359 }
1361 1360
1362 /* TRACE INIT */ 1361 /* TRACE INIT */
1363 bfa_trc_init(bfad->trcmod); 1362 bfa_trc_init(bfad->trcmod);
1364 bfa_trc(bfad, bfad_inst); 1363 bfa_trc(bfad, bfad_inst);
1365 1364
1366 if (!(bfad_load_fwimg(pdev))) { 1365 if (!(bfad_load_fwimg(pdev))) {
1367 kfree(bfad->trcmod); 1366 kfree(bfad->trcmod);
1368 goto out_alloc_trace_failure; 1367 goto out_alloc_trace_failure;
1369 } 1368 }
1370 1369
1371 retval = bfad_pci_init(pdev, bfad); 1370 retval = bfad_pci_init(pdev, bfad);
1372 if (retval) { 1371 if (retval) {
1373 printk(KERN_WARNING "bfad_pci_init failure!\n"); 1372 printk(KERN_WARNING "bfad_pci_init failure!\n");
1374 error = retval; 1373 error = retval;
1375 goto out_pci_init_failure; 1374 goto out_pci_init_failure;
1376 } 1375 }
1377 1376
1378 mutex_lock(&bfad_mutex); 1377 mutex_lock(&bfad_mutex);
1379 bfad->inst_no = bfad_inst++; 1378 bfad->inst_no = bfad_inst++;
1380 list_add_tail(&bfad->list_entry, &bfad_list); 1379 list_add_tail(&bfad->list_entry, &bfad_list);
1381 mutex_unlock(&bfad_mutex); 1380 mutex_unlock(&bfad_mutex);
1382 1381
1383 /* Initializing the state machine: State set to uninit */ 1382 /* Initializing the state machine: State set to uninit */
1384 bfa_sm_set_state(bfad, bfad_sm_uninit); 1383 bfa_sm_set_state(bfad, bfad_sm_uninit);
1385 1384
1386 spin_lock_init(&bfad->bfad_lock); 1385 spin_lock_init(&bfad->bfad_lock);
1387 pci_set_drvdata(pdev, bfad); 1386 pci_set_drvdata(pdev, bfad);
1388 1387
1389 bfad->ref_count = 0; 1388 bfad->ref_count = 0;
1390 bfad->pport.bfad = bfad; 1389 bfad->pport.bfad = bfad;
1391 INIT_LIST_HEAD(&bfad->pbc_vport_list); 1390 INIT_LIST_HEAD(&bfad->pbc_vport_list);
1392 1391
1393 retval = bfad_drv_init(bfad); 1392 retval = bfad_drv_init(bfad);
1394 if (retval != BFA_STATUS_OK) 1393 if (retval != BFA_STATUS_OK)
1395 goto out_drv_init_failure; 1394 goto out_drv_init_failure;
1396 1395
1397 bfa_sm_send_event(bfad, BFAD_E_CREATE); 1396 bfa_sm_send_event(bfad, BFAD_E_CREATE);
1398 1397
1399 if (bfa_sm_cmp_state(bfad, bfad_sm_uninit)) 1398 if (bfa_sm_cmp_state(bfad, bfad_sm_uninit))
1400 goto out_bfad_sm_failure; 1399 goto out_bfad_sm_failure;
1401 1400
1402 return 0; 1401 return 0;
1403 1402
1404 out_bfad_sm_failure: 1403 out_bfad_sm_failure:
1405 bfa_detach(&bfad->bfa); 1404 bfa_detach(&bfad->bfa);
1406 bfad_hal_mem_release(bfad); 1405 bfad_hal_mem_release(bfad);
1407 out_drv_init_failure: 1406 out_drv_init_failure:
1408 mutex_lock(&bfad_mutex); 1407 mutex_lock(&bfad_mutex);
1409 bfad_inst--; 1408 bfad_inst--;
1410 list_del(&bfad->list_entry); 1409 list_del(&bfad->list_entry);
1411 mutex_unlock(&bfad_mutex); 1410 mutex_unlock(&bfad_mutex);
1412 bfad_pci_uninit(pdev, bfad); 1411 bfad_pci_uninit(pdev, bfad);
1413 out_pci_init_failure: 1412 out_pci_init_failure:
1414 kfree(bfad->trcmod); 1413 kfree(bfad->trcmod);
1415 out_alloc_trace_failure: 1414 out_alloc_trace_failure:
1416 kfree(bfad); 1415 kfree(bfad);
1417 out: 1416 out:
1418 return error; 1417 return error;
1419 } 1418 }
1420 1419
1421 /* 1420 /*
1422 * PCI remove entry. 1421 * PCI remove entry.
1423 */ 1422 */
1424 void 1423 void
1425 bfad_pci_remove(struct pci_dev *pdev) 1424 bfad_pci_remove(struct pci_dev *pdev)
1426 { 1425 {
1427 struct bfad_s *bfad = pci_get_drvdata(pdev); 1426 struct bfad_s *bfad = pci_get_drvdata(pdev);
1428 unsigned long flags; 1427 unsigned long flags;
1429 1428
1430 bfa_trc(bfad, bfad->inst_no); 1429 bfa_trc(bfad, bfad->inst_no);
1431 1430
1432 spin_lock_irqsave(&bfad->bfad_lock, flags); 1431 spin_lock_irqsave(&bfad->bfad_lock, flags);
1433 if (bfad->bfad_tsk != NULL) { 1432 if (bfad->bfad_tsk != NULL) {
1434 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1433 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1435 kthread_stop(bfad->bfad_tsk); 1434 kthread_stop(bfad->bfad_tsk);
1436 } else { 1435 } else {
1437 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1436 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1438 } 1437 }
1439 1438
1440 /* Send Event BFAD_E_STOP */ 1439 /* Send Event BFAD_E_STOP */
1441 bfa_sm_send_event(bfad, BFAD_E_STOP); 1440 bfa_sm_send_event(bfad, BFAD_E_STOP);
1442 1441
1443 /* Driver detach and dealloc mem */ 1442 /* Driver detach and dealloc mem */
1444 spin_lock_irqsave(&bfad->bfad_lock, flags); 1443 spin_lock_irqsave(&bfad->bfad_lock, flags);
1445 bfa_detach(&bfad->bfa); 1444 bfa_detach(&bfad->bfa);
1446 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1445 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1447 bfad_hal_mem_release(bfad); 1446 bfad_hal_mem_release(bfad);
1448 1447
1449 /* Cleaning the BFAD instance */ 1448 /* Cleaning the BFAD instance */
1450 mutex_lock(&bfad_mutex); 1449 mutex_lock(&bfad_mutex);
1451 bfad_inst--; 1450 bfad_inst--;
1452 list_del(&bfad->list_entry); 1451 list_del(&bfad->list_entry);
1453 mutex_unlock(&bfad_mutex); 1452 mutex_unlock(&bfad_mutex);
1454 bfad_pci_uninit(pdev, bfad); 1453 bfad_pci_uninit(pdev, bfad);
1455 1454
1456 kfree(bfad->trcmod); 1455 kfree(bfad->trcmod);
1457 kfree(bfad); 1456 kfree(bfad);
1458 } 1457 }
1459 1458
1460 struct pci_device_id bfad_id_table[] = { 1459 struct pci_device_id bfad_id_table[] = {
1461 { 1460 {
1462 .vendor = BFA_PCI_VENDOR_ID_BROCADE, 1461 .vendor = BFA_PCI_VENDOR_ID_BROCADE,
1463 .device = BFA_PCI_DEVICE_ID_FC_8G2P, 1462 .device = BFA_PCI_DEVICE_ID_FC_8G2P,
1464 .subvendor = PCI_ANY_ID, 1463 .subvendor = PCI_ANY_ID,
1465 .subdevice = PCI_ANY_ID, 1464 .subdevice = PCI_ANY_ID,
1466 }, 1465 },
1467 { 1466 {
1468 .vendor = BFA_PCI_VENDOR_ID_BROCADE, 1467 .vendor = BFA_PCI_VENDOR_ID_BROCADE,
1469 .device = BFA_PCI_DEVICE_ID_FC_8G1P, 1468 .device = BFA_PCI_DEVICE_ID_FC_8G1P,
1470 .subvendor = PCI_ANY_ID, 1469 .subvendor = PCI_ANY_ID,
1471 .subdevice = PCI_ANY_ID, 1470 .subdevice = PCI_ANY_ID,
1472 }, 1471 },
1473 { 1472 {
1474 .vendor = BFA_PCI_VENDOR_ID_BROCADE, 1473 .vendor = BFA_PCI_VENDOR_ID_BROCADE,
1475 .device = BFA_PCI_DEVICE_ID_CT, 1474 .device = BFA_PCI_DEVICE_ID_CT,
1476 .subvendor = PCI_ANY_ID, 1475 .subvendor = PCI_ANY_ID,
1477 .subdevice = PCI_ANY_ID, 1476 .subdevice = PCI_ANY_ID,
1478 .class = (PCI_CLASS_SERIAL_FIBER << 8), 1477 .class = (PCI_CLASS_SERIAL_FIBER << 8),
1479 .class_mask = ~0, 1478 .class_mask = ~0,
1480 }, 1479 },
1481 { 1480 {
1482 .vendor = BFA_PCI_VENDOR_ID_BROCADE, 1481 .vendor = BFA_PCI_VENDOR_ID_BROCADE,
1483 .device = BFA_PCI_DEVICE_ID_CT_FC, 1482 .device = BFA_PCI_DEVICE_ID_CT_FC,
1484 .subvendor = PCI_ANY_ID, 1483 .subvendor = PCI_ANY_ID,
1485 .subdevice = PCI_ANY_ID, 1484 .subdevice = PCI_ANY_ID,
1486 .class = (PCI_CLASS_SERIAL_FIBER << 8), 1485 .class = (PCI_CLASS_SERIAL_FIBER << 8),
1487 .class_mask = ~0, 1486 .class_mask = ~0,
1488 }, 1487 },
1489 1488
1490 {0, 0}, 1489 {0, 0},
1491 }; 1490 };
1492 1491
1493 MODULE_DEVICE_TABLE(pci, bfad_id_table); 1492 MODULE_DEVICE_TABLE(pci, bfad_id_table);
1494 1493
1495 static struct pci_driver bfad_pci_driver = { 1494 static struct pci_driver bfad_pci_driver = {
1496 .name = BFAD_DRIVER_NAME, 1495 .name = BFAD_DRIVER_NAME,
1497 .id_table = bfad_id_table, 1496 .id_table = bfad_id_table,
1498 .probe = bfad_pci_probe, 1497 .probe = bfad_pci_probe,
1499 .remove = __devexit_p(bfad_pci_remove), 1498 .remove = __devexit_p(bfad_pci_remove),
1500 }; 1499 };
1501 1500
1502 /* 1501 /*
1503 * Driver module init. 1502 * Driver module init.
1504 */ 1503 */
1505 static int __init 1504 static int __init
1506 bfad_init(void) 1505 bfad_init(void)
1507 { 1506 {
1508 int error = 0; 1507 int error = 0;
1509 1508
1510 printk(KERN_INFO "Brocade BFA FC/FCOE SCSI driver - version: %s\n", 1509 printk(KERN_INFO "Brocade BFA FC/FCOE SCSI driver - version: %s\n",
1511 BFAD_DRIVER_VERSION); 1510 BFAD_DRIVER_VERSION);
1512 1511
1513 if (num_sgpgs > 0) 1512 if (num_sgpgs > 0)
1514 num_sgpgs_parm = num_sgpgs; 1513 num_sgpgs_parm = num_sgpgs;
1515 1514
1516 error = bfad_im_module_init(); 1515 error = bfad_im_module_init();
1517 if (error) { 1516 if (error) {
1518 error = -ENOMEM; 1517 error = -ENOMEM;
1519 printk(KERN_WARNING "bfad_im_module_init failure\n"); 1518 printk(KERN_WARNING "bfad_im_module_init failure\n");
1520 goto ext; 1519 goto ext;
1521 } 1520 }
1522 1521
1523 if (strcmp(FCPI_NAME, " fcpim") == 0) 1522 if (strcmp(FCPI_NAME, " fcpim") == 0)
1524 supported_fc4s |= BFA_LPORT_ROLE_FCP_IM; 1523 supported_fc4s |= BFA_LPORT_ROLE_FCP_IM;
1525 1524
1526 bfa_auto_recover = ioc_auto_recover; 1525 bfa_auto_recover = ioc_auto_recover;
1527 bfa_fcs_rport_set_del_timeout(rport_del_timeout); 1526 bfa_fcs_rport_set_del_timeout(rport_del_timeout);
1528 1527
1529 error = pci_register_driver(&bfad_pci_driver); 1528 error = pci_register_driver(&bfad_pci_driver);
1530 if (error) { 1529 if (error) {
1531 printk(KERN_WARNING "pci_register_driver failure\n"); 1530 printk(KERN_WARNING "pci_register_driver failure\n");
1532 goto ext; 1531 goto ext;
1533 } 1532 }
1534 1533
1535 return 0; 1534 return 0;
1536 1535
1537 ext: 1536 ext:
1538 bfad_im_module_exit(); 1537 bfad_im_module_exit();
1539 return error; 1538 return error;
1540 } 1539 }
1541 1540
1542 /* 1541 /*
1543 * Driver module exit. 1542 * Driver module exit.
1544 */ 1543 */
1545 static void __exit 1544 static void __exit
1546 bfad_exit(void) 1545 bfad_exit(void)
1547 { 1546 {
1548 pci_unregister_driver(&bfad_pci_driver); 1547 pci_unregister_driver(&bfad_pci_driver);
1549 bfad_im_module_exit(); 1548 bfad_im_module_exit();
1550 bfad_free_fwimg(); 1549 bfad_free_fwimg();
1551 } 1550 }
1552 1551
1553 /* Firmware handling */ 1552 /* Firmware handling */
1554 u32 * 1553 u32 *
1555 bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image, 1554 bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
1556 u32 *bfi_image_size, char *fw_name) 1555 u32 *bfi_image_size, char *fw_name)
1557 { 1556 {
1558 const struct firmware *fw; 1557 const struct firmware *fw;
1559 1558
1560 if (request_firmware(&fw, fw_name, &pdev->dev)) { 1559 if (request_firmware(&fw, fw_name, &pdev->dev)) {
1561 printk(KERN_ALERT "Can't locate firmware %s\n", fw_name); 1560 printk(KERN_ALERT "Can't locate firmware %s\n", fw_name);
1562 goto error; 1561 goto error;
1563 } 1562 }
1564 1563
1565 *bfi_image = vmalloc(fw->size); 1564 *bfi_image = vmalloc(fw->size);
1566 if (NULL == *bfi_image) { 1565 if (NULL == *bfi_image) {
1567 printk(KERN_ALERT "Fail to allocate buffer for fw image " 1566 printk(KERN_ALERT "Fail to allocate buffer for fw image "
1568 "size=%x!\n", (u32) fw->size); 1567 "size=%x!\n", (u32) fw->size);
1569 goto error; 1568 goto error;
1570 } 1569 }
1571 1570
1572 memcpy(*bfi_image, fw->data, fw->size); 1571 memcpy(*bfi_image, fw->data, fw->size);
1573 *bfi_image_size = fw->size/sizeof(u32); 1572 *bfi_image_size = fw->size/sizeof(u32);
1574 1573
1575 return *bfi_image; 1574 return *bfi_image;
1576 1575
1577 error: 1576 error:
1578 return NULL; 1577 return NULL;
1579 } 1578 }
1580 1579
1581 u32 * 1580 u32 *
1582 bfad_get_firmware_buf(struct pci_dev *pdev) 1581 bfad_get_firmware_buf(struct pci_dev *pdev)
1583 { 1582 {
1584 if (pdev->device == BFA_PCI_DEVICE_ID_CT_FC) { 1583 if (pdev->device == BFA_PCI_DEVICE_ID_CT_FC) {
1585 if (bfi_image_ct_fc_size == 0) 1584 if (bfi_image_ct_fc_size == 0)
1586 bfad_read_firmware(pdev, &bfi_image_ct_fc, 1585 bfad_read_firmware(pdev, &bfi_image_ct_fc,
1587 &bfi_image_ct_fc_size, BFAD_FW_FILE_CT_FC); 1586 &bfi_image_ct_fc_size, BFAD_FW_FILE_CT_FC);
1588 return bfi_image_ct_fc; 1587 return bfi_image_ct_fc;
1589 } else if (pdev->device == BFA_PCI_DEVICE_ID_CT) { 1588 } else if (pdev->device == BFA_PCI_DEVICE_ID_CT) {
1590 if (bfi_image_ct_cna_size == 0) 1589 if (bfi_image_ct_cna_size == 0)
1591 bfad_read_firmware(pdev, &bfi_image_ct_cna, 1590 bfad_read_firmware(pdev, &bfi_image_ct_cna,
1592 &bfi_image_ct_cna_size, BFAD_FW_FILE_CT_CNA); 1591 &bfi_image_ct_cna_size, BFAD_FW_FILE_CT_CNA);
1593 return bfi_image_ct_cna; 1592 return bfi_image_ct_cna;
1594 } else { 1593 } else {
1595 if (bfi_image_cb_fc_size == 0) 1594 if (bfi_image_cb_fc_size == 0)
1596 bfad_read_firmware(pdev, &bfi_image_cb_fc, 1595 bfad_read_firmware(pdev, &bfi_image_cb_fc,
1597 &bfi_image_cb_fc_size, BFAD_FW_FILE_CB_FC); 1596 &bfi_image_cb_fc_size, BFAD_FW_FILE_CB_FC);
1598 return bfi_image_cb_fc; 1597 return bfi_image_cb_fc;
1599 } 1598 }
1600 } 1599 }
1601 1600
1602 module_init(bfad_init); 1601 module_init(bfad_init);
1603 module_exit(bfad_exit); 1602 module_exit(bfad_exit);
1604 MODULE_LICENSE("GPL"); 1603 MODULE_LICENSE("GPL");
1605 MODULE_DESCRIPTION("Brocade Fibre Channel HBA Driver" BFAD_PROTO_NAME); 1604 MODULE_DESCRIPTION("Brocade Fibre Channel HBA Driver" BFAD_PROTO_NAME);
1606 MODULE_AUTHOR("Brocade Communications Systems, Inc."); 1605 MODULE_AUTHOR("Brocade Communications Systems, Inc.");
1607 MODULE_VERSION(BFAD_DRIVER_VERSION); 1606 MODULE_VERSION(BFAD_DRIVER_VERSION);
1608 1607
drivers/scsi/bfa/bfad_im.c
1 /* 1 /*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved 3 * All rights reserved
4 * www.brocade.com 4 * www.brocade.com
5 * 5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter. 6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify it 8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as 9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation 10 * published by the Free Software Foundation
11 * 11 *
12 * This program is distributed in the hope that it will be useful, but 12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of 13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17 17
18 /* 18 /*
19 * bfad_im.c Linux driver IM module. 19 * bfad_im.c Linux driver IM module.
20 */ 20 */
21 21
22 #include "bfad_drv.h" 22 #include "bfad_drv.h"
23 #include "bfad_im.h" 23 #include "bfad_im.h"
24 #include "bfa_fcs.h" 24 #include "bfa_fcs.h"
25 25
26 BFA_TRC_FILE(LDRV, IM); 26 BFA_TRC_FILE(LDRV, IM);
27 27
28 DEFINE_IDR(bfad_im_port_index); 28 DEFINE_IDR(bfad_im_port_index);
29 struct scsi_transport_template *bfad_im_scsi_transport_template; 29 struct scsi_transport_template *bfad_im_scsi_transport_template;
30 struct scsi_transport_template *bfad_im_scsi_vport_transport_template; 30 struct scsi_transport_template *bfad_im_scsi_vport_transport_template;
31 static void bfad_im_itnim_work_handler(struct work_struct *work); 31 static void bfad_im_itnim_work_handler(struct work_struct *work);
32 static int bfad_im_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmnd); 32 static int bfad_im_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmnd);
33 static int bfad_im_slave_alloc(struct scsi_device *sdev); 33 static int bfad_im_slave_alloc(struct scsi_device *sdev);
34 static void bfad_im_fc_rport_add(struct bfad_im_port_s *im_port, 34 static void bfad_im_fc_rport_add(struct bfad_im_port_s *im_port,
35 struct bfad_itnim_s *itnim); 35 struct bfad_itnim_s *itnim);
36 36
37 void 37 void
38 bfa_cb_ioim_done(void *drv, struct bfad_ioim_s *dio, 38 bfa_cb_ioim_done(void *drv, struct bfad_ioim_s *dio,
39 enum bfi_ioim_status io_status, u8 scsi_status, 39 enum bfi_ioim_status io_status, u8 scsi_status,
40 int sns_len, u8 *sns_info, s32 residue) 40 int sns_len, u8 *sns_info, s32 residue)
41 { 41 {
42 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio; 42 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
43 struct bfad_s *bfad = drv; 43 struct bfad_s *bfad = drv;
44 struct bfad_itnim_data_s *itnim_data; 44 struct bfad_itnim_data_s *itnim_data;
45 struct bfad_itnim_s *itnim; 45 struct bfad_itnim_s *itnim;
46 u8 host_status = DID_OK; 46 u8 host_status = DID_OK;
47 47
48 switch (io_status) { 48 switch (io_status) {
49 case BFI_IOIM_STS_OK: 49 case BFI_IOIM_STS_OK:
50 bfa_trc(bfad, scsi_status); 50 bfa_trc(bfad, scsi_status);
51 scsi_set_resid(cmnd, 0); 51 scsi_set_resid(cmnd, 0);
52 52
53 if (sns_len > 0) { 53 if (sns_len > 0) {
54 bfa_trc(bfad, sns_len); 54 bfa_trc(bfad, sns_len);
55 if (sns_len > SCSI_SENSE_BUFFERSIZE) 55 if (sns_len > SCSI_SENSE_BUFFERSIZE)
56 sns_len = SCSI_SENSE_BUFFERSIZE; 56 sns_len = SCSI_SENSE_BUFFERSIZE;
57 memcpy(cmnd->sense_buffer, sns_info, sns_len); 57 memcpy(cmnd->sense_buffer, sns_info, sns_len);
58 } 58 }
59 59
60 if (residue > 0) { 60 if (residue > 0) {
61 bfa_trc(bfad, residue); 61 bfa_trc(bfad, residue);
62 scsi_set_resid(cmnd, residue); 62 scsi_set_resid(cmnd, residue);
63 if (!sns_len && (scsi_status == SAM_STAT_GOOD) && 63 if (!sns_len && (scsi_status == SAM_STAT_GOOD) &&
64 (scsi_bufflen(cmnd) - residue) < 64 (scsi_bufflen(cmnd) - residue) <
65 cmnd->underflow) { 65 cmnd->underflow) {
66 bfa_trc(bfad, 0); 66 bfa_trc(bfad, 0);
67 host_status = DID_ERROR; 67 host_status = DID_ERROR;
68 } 68 }
69 } 69 }
70 cmnd->result = ScsiResult(host_status, scsi_status); 70 cmnd->result = ScsiResult(host_status, scsi_status);
71 71
72 break; 72 break;
73 73
74 case BFI_IOIM_STS_ABORTED: 74 case BFI_IOIM_STS_ABORTED:
75 case BFI_IOIM_STS_TIMEDOUT: 75 case BFI_IOIM_STS_TIMEDOUT:
76 case BFI_IOIM_STS_PATHTOV: 76 case BFI_IOIM_STS_PATHTOV:
77 default: 77 default:
78 host_status = DID_ERROR; 78 host_status = DID_ERROR;
79 cmnd->result = ScsiResult(host_status, 0); 79 cmnd->result = ScsiResult(host_status, 0);
80 } 80 }
81 81
82 /* Unmap DMA, if host is NULL, it means a scsi passthru cmd */ 82 /* Unmap DMA, if host is NULL, it means a scsi passthru cmd */
83 if (cmnd->device->host != NULL) 83 if (cmnd->device->host != NULL)
84 scsi_dma_unmap(cmnd); 84 scsi_dma_unmap(cmnd);
85 85
86 cmnd->host_scribble = NULL; 86 cmnd->host_scribble = NULL;
87 bfa_trc(bfad, cmnd->result); 87 bfa_trc(bfad, cmnd->result);
88 88
89 itnim_data = cmnd->device->hostdata; 89 itnim_data = cmnd->device->hostdata;
90 if (itnim_data) { 90 if (itnim_data) {
91 itnim = itnim_data->itnim; 91 itnim = itnim_data->itnim;
92 if (!cmnd->result && itnim && 92 if (!cmnd->result && itnim &&
93 (bfa_lun_queue_depth > cmnd->device->queue_depth)) { 93 (bfa_lun_queue_depth > cmnd->device->queue_depth)) {
94 /* Queue depth adjustment for good status completion */ 94 /* Queue depth adjustment for good status completion */
95 bfad_ramp_up_qdepth(itnim, cmnd->device); 95 bfad_ramp_up_qdepth(itnim, cmnd->device);
96 } else if (cmnd->result == SAM_STAT_TASK_SET_FULL && itnim) { 96 } else if (cmnd->result == SAM_STAT_TASK_SET_FULL && itnim) {
97 /* qfull handling */ 97 /* qfull handling */
98 bfad_handle_qfull(itnim, cmnd->device); 98 bfad_handle_qfull(itnim, cmnd->device);
99 } 99 }
100 } 100 }
101 101
102 cmnd->scsi_done(cmnd); 102 cmnd->scsi_done(cmnd);
103 } 103 }
104 104
105 void 105 void
106 bfa_cb_ioim_good_comp(void *drv, struct bfad_ioim_s *dio) 106 bfa_cb_ioim_good_comp(void *drv, struct bfad_ioim_s *dio)
107 { 107 {
108 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio; 108 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
109 struct bfad_itnim_data_s *itnim_data; 109 struct bfad_itnim_data_s *itnim_data;
110 struct bfad_itnim_s *itnim; 110 struct bfad_itnim_s *itnim;
111 111
112 cmnd->result = ScsiResult(DID_OK, SCSI_STATUS_GOOD); 112 cmnd->result = ScsiResult(DID_OK, SCSI_STATUS_GOOD);
113 113
114 /* Unmap DMA, if host is NULL, it means a scsi passthru cmd */ 114 /* Unmap DMA, if host is NULL, it means a scsi passthru cmd */
115 if (cmnd->device->host != NULL) 115 if (cmnd->device->host != NULL)
116 scsi_dma_unmap(cmnd); 116 scsi_dma_unmap(cmnd);
117 117
118 cmnd->host_scribble = NULL; 118 cmnd->host_scribble = NULL;
119 119
120 /* Queue depth adjustment */ 120 /* Queue depth adjustment */
121 if (bfa_lun_queue_depth > cmnd->device->queue_depth) { 121 if (bfa_lun_queue_depth > cmnd->device->queue_depth) {
122 itnim_data = cmnd->device->hostdata; 122 itnim_data = cmnd->device->hostdata;
123 if (itnim_data) { 123 if (itnim_data) {
124 itnim = itnim_data->itnim; 124 itnim = itnim_data->itnim;
125 if (itnim) 125 if (itnim)
126 bfad_ramp_up_qdepth(itnim, cmnd->device); 126 bfad_ramp_up_qdepth(itnim, cmnd->device);
127 } 127 }
128 } 128 }
129 129
130 cmnd->scsi_done(cmnd); 130 cmnd->scsi_done(cmnd);
131 } 131 }
132 132
133 void 133 void
134 bfa_cb_ioim_abort(void *drv, struct bfad_ioim_s *dio) 134 bfa_cb_ioim_abort(void *drv, struct bfad_ioim_s *dio)
135 { 135 {
136 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio; 136 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
137 struct bfad_s *bfad = drv; 137 struct bfad_s *bfad = drv;
138 138
139 cmnd->result = ScsiResult(DID_ERROR, 0); 139 cmnd->result = ScsiResult(DID_ERROR, 0);
140 140
141 /* Unmap DMA, if host is NULL, it means a scsi passthru cmd */ 141 /* Unmap DMA, if host is NULL, it means a scsi passthru cmd */
142 if (cmnd->device->host != NULL) 142 if (cmnd->device->host != NULL)
143 scsi_dma_unmap(cmnd); 143 scsi_dma_unmap(cmnd);
144 144
145 bfa_trc(bfad, cmnd->result); 145 bfa_trc(bfad, cmnd->result);
146 cmnd->host_scribble = NULL; 146 cmnd->host_scribble = NULL;
147 } 147 }
148 148
149 void 149 void
150 bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk, 150 bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk,
151 enum bfi_tskim_status tsk_status) 151 enum bfi_tskim_status tsk_status)
152 { 152 {
153 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dtsk; 153 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dtsk;
154 wait_queue_head_t *wq; 154 wait_queue_head_t *wq;
155 155
156 cmnd->SCp.Status |= tsk_status << 1; 156 cmnd->SCp.Status |= tsk_status << 1;
157 set_bit(IO_DONE_BIT, (unsigned long *)&cmnd->SCp.Status); 157 set_bit(IO_DONE_BIT, (unsigned long *)&cmnd->SCp.Status);
158 wq = (wait_queue_head_t *) cmnd->SCp.ptr; 158 wq = (wait_queue_head_t *) cmnd->SCp.ptr;
159 cmnd->SCp.ptr = NULL; 159 cmnd->SCp.ptr = NULL;
160 160
161 if (wq) 161 if (wq)
162 wake_up(wq); 162 wake_up(wq);
163 } 163 }
164 164
165 /* 165 /*
166 * Scsi_Host_template SCSI host template 166 * Scsi_Host_template SCSI host template
167 */ 167 */
168 /* 168 /*
169 * Scsi_Host template entry, returns BFAD PCI info. 169 * Scsi_Host template entry, returns BFAD PCI info.
170 */ 170 */
171 static const char * 171 static const char *
172 bfad_im_info(struct Scsi_Host *shost) 172 bfad_im_info(struct Scsi_Host *shost)
173 { 173 {
174 static char bfa_buf[256]; 174 static char bfa_buf[256];
175 struct bfad_im_port_s *im_port = 175 struct bfad_im_port_s *im_port =
176 (struct bfad_im_port_s *) shost->hostdata[0]; 176 (struct bfad_im_port_s *) shost->hostdata[0];
177 struct bfad_s *bfad = im_port->bfad; 177 struct bfad_s *bfad = im_port->bfad;
178 struct bfa_s *bfa = &bfad->bfa; 178 struct bfa_s *bfa = &bfad->bfa;
179 struct bfa_ioc_s *ioc = &bfa->ioc; 179 struct bfa_ioc_s *ioc = &bfa->ioc;
180 char model[BFA_ADAPTER_MODEL_NAME_LEN]; 180 char model[BFA_ADAPTER_MODEL_NAME_LEN];
181 181
182 bfa_get_adapter_model(bfa, model); 182 bfa_get_adapter_model(bfa, model);
183 183
184 memset(bfa_buf, 0, sizeof(bfa_buf)); 184 memset(bfa_buf, 0, sizeof(bfa_buf));
185 if (ioc->ctdev && !ioc->fcmode) 185 if (ioc->ctdev && !ioc->fcmode)
186 snprintf(bfa_buf, sizeof(bfa_buf), 186 snprintf(bfa_buf, sizeof(bfa_buf),
187 "Brocade FCOE Adapter, " "model: %s hwpath: %s driver: %s", 187 "Brocade FCOE Adapter, " "model: %s hwpath: %s driver: %s",
188 model, bfad->pci_name, BFAD_DRIVER_VERSION); 188 model, bfad->pci_name, BFAD_DRIVER_VERSION);
189 else 189 else
190 snprintf(bfa_buf, sizeof(bfa_buf), 190 snprintf(bfa_buf, sizeof(bfa_buf),
191 "Brocade FC Adapter, " "model: %s hwpath: %s driver: %s", 191 "Brocade FC Adapter, " "model: %s hwpath: %s driver: %s",
192 model, bfad->pci_name, BFAD_DRIVER_VERSION); 192 model, bfad->pci_name, BFAD_DRIVER_VERSION);
193 193
194 return bfa_buf; 194 return bfa_buf;
195 } 195 }
196 196
197 /* 197 /*
198 * Scsi_Host template entry, aborts the specified SCSI command. 198 * Scsi_Host template entry, aborts the specified SCSI command.
199 * 199 *
200 * Returns: SUCCESS or FAILED. 200 * Returns: SUCCESS or FAILED.
201 */ 201 */
202 static int 202 static int
203 bfad_im_abort_handler(struct scsi_cmnd *cmnd) 203 bfad_im_abort_handler(struct scsi_cmnd *cmnd)
204 { 204 {
205 struct Scsi_Host *shost = cmnd->device->host; 205 struct Scsi_Host *shost = cmnd->device->host;
206 struct bfad_im_port_s *im_port = 206 struct bfad_im_port_s *im_port =
207 (struct bfad_im_port_s *) shost->hostdata[0]; 207 (struct bfad_im_port_s *) shost->hostdata[0];
208 struct bfad_s *bfad = im_port->bfad; 208 struct bfad_s *bfad = im_port->bfad;
209 struct bfa_ioim_s *hal_io; 209 struct bfa_ioim_s *hal_io;
210 unsigned long flags; 210 unsigned long flags;
211 u32 timeout; 211 u32 timeout;
212 int rc = FAILED; 212 int rc = FAILED;
213 213
214 spin_lock_irqsave(&bfad->bfad_lock, flags); 214 spin_lock_irqsave(&bfad->bfad_lock, flags);
215 hal_io = (struct bfa_ioim_s *) cmnd->host_scribble; 215 hal_io = (struct bfa_ioim_s *) cmnd->host_scribble;
216 if (!hal_io) { 216 if (!hal_io) {
217 /* IO has been completed, retrun success */ 217 /* IO has been completed, retrun success */
218 rc = SUCCESS; 218 rc = SUCCESS;
219 goto out; 219 goto out;
220 } 220 }
221 if (hal_io->dio != (struct bfad_ioim_s *) cmnd) { 221 if (hal_io->dio != (struct bfad_ioim_s *) cmnd) {
222 rc = FAILED; 222 rc = FAILED;
223 goto out; 223 goto out;
224 } 224 }
225 225
226 bfa_trc(bfad, hal_io->iotag); 226 bfa_trc(bfad, hal_io->iotag);
227 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 227 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
228 "scsi%d: abort cmnd %p iotag %x\n", 228 "scsi%d: abort cmnd %p iotag %x\n",
229 im_port->shost->host_no, cmnd, hal_io->iotag); 229 im_port->shost->host_no, cmnd, hal_io->iotag);
230 (void) bfa_ioim_abort(hal_io); 230 (void) bfa_ioim_abort(hal_io);
231 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 231 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
232 232
233 /* Need to wait until the command get aborted */ 233 /* Need to wait until the command get aborted */
234 timeout = 10; 234 timeout = 10;
235 while ((struct bfa_ioim_s *) cmnd->host_scribble == hal_io) { 235 while ((struct bfa_ioim_s *) cmnd->host_scribble == hal_io) {
236 set_current_state(TASK_UNINTERRUPTIBLE); 236 set_current_state(TASK_UNINTERRUPTIBLE);
237 schedule_timeout(timeout); 237 schedule_timeout(timeout);
238 if (timeout < 4 * HZ) 238 if (timeout < 4 * HZ)
239 timeout *= 2; 239 timeout *= 2;
240 } 240 }
241 241
242 cmnd->scsi_done(cmnd); 242 cmnd->scsi_done(cmnd);
243 bfa_trc(bfad, hal_io->iotag); 243 bfa_trc(bfad, hal_io->iotag);
244 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 244 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
245 "scsi%d: complete abort 0x%p iotag 0x%x\n", 245 "scsi%d: complete abort 0x%p iotag 0x%x\n",
246 im_port->shost->host_no, cmnd, hal_io->iotag); 246 im_port->shost->host_no, cmnd, hal_io->iotag);
247 return SUCCESS; 247 return SUCCESS;
248 out: 248 out:
249 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 249 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
250 return rc; 250 return rc;
251 } 251 }
252 252
253 static bfa_status_t 253 static bfa_status_t
254 bfad_im_target_reset_send(struct bfad_s *bfad, struct scsi_cmnd *cmnd, 254 bfad_im_target_reset_send(struct bfad_s *bfad, struct scsi_cmnd *cmnd,
255 struct bfad_itnim_s *itnim) 255 struct bfad_itnim_s *itnim)
256 { 256 {
257 struct bfa_tskim_s *tskim; 257 struct bfa_tskim_s *tskim;
258 struct bfa_itnim_s *bfa_itnim; 258 struct bfa_itnim_s *bfa_itnim;
259 bfa_status_t rc = BFA_STATUS_OK; 259 bfa_status_t rc = BFA_STATUS_OK;
260 struct scsi_lun scsilun; 260 struct scsi_lun scsilun;
261 261
262 tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd); 262 tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd);
263 if (!tskim) { 263 if (!tskim) {
264 BFA_LOG(KERN_ERR, bfad, bfa_log_level, 264 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
265 "target reset, fail to allocate tskim\n"); 265 "target reset, fail to allocate tskim\n");
266 rc = BFA_STATUS_FAILED; 266 rc = BFA_STATUS_FAILED;
267 goto out; 267 goto out;
268 } 268 }
269 269
270 /* 270 /*
271 * Set host_scribble to NULL to avoid aborting a task command if 271 * Set host_scribble to NULL to avoid aborting a task command if
272 * happens. 272 * happens.
273 */ 273 */
274 cmnd->host_scribble = NULL; 274 cmnd->host_scribble = NULL;
275 cmnd->SCp.Status = 0; 275 cmnd->SCp.Status = 0;
276 bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim->fcs_itnim); 276 bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim->fcs_itnim);
277 memset(&scsilun, 0, sizeof(scsilun)); 277 memset(&scsilun, 0, sizeof(scsilun));
278 bfa_tskim_start(tskim, bfa_itnim, scsilun, 278 bfa_tskim_start(tskim, bfa_itnim, scsilun,
279 FCP_TM_TARGET_RESET, BFAD_TARGET_RESET_TMO); 279 FCP_TM_TARGET_RESET, BFAD_TARGET_RESET_TMO);
280 out: 280 out:
281 return rc; 281 return rc;
282 } 282 }
283 283
284 /* 284 /*
285 * Scsi_Host template entry, resets a LUN and abort its all commands. 285 * Scsi_Host template entry, resets a LUN and abort its all commands.
286 * 286 *
287 * Returns: SUCCESS or FAILED. 287 * Returns: SUCCESS or FAILED.
288 * 288 *
289 */ 289 */
290 static int 290 static int
291 bfad_im_reset_lun_handler(struct scsi_cmnd *cmnd) 291 bfad_im_reset_lun_handler(struct scsi_cmnd *cmnd)
292 { 292 {
293 struct Scsi_Host *shost = cmnd->device->host; 293 struct Scsi_Host *shost = cmnd->device->host;
294 struct bfad_im_port_s *im_port = 294 struct bfad_im_port_s *im_port =
295 (struct bfad_im_port_s *) shost->hostdata[0]; 295 (struct bfad_im_port_s *) shost->hostdata[0];
296 struct bfad_itnim_data_s *itnim_data = cmnd->device->hostdata; 296 struct bfad_itnim_data_s *itnim_data = cmnd->device->hostdata;
297 struct bfad_s *bfad = im_port->bfad; 297 struct bfad_s *bfad = im_port->bfad;
298 struct bfa_tskim_s *tskim; 298 struct bfa_tskim_s *tskim;
299 struct bfad_itnim_s *itnim; 299 struct bfad_itnim_s *itnim;
300 struct bfa_itnim_s *bfa_itnim; 300 struct bfa_itnim_s *bfa_itnim;
301 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); 301 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
302 int rc = SUCCESS; 302 int rc = SUCCESS;
303 unsigned long flags; 303 unsigned long flags;
304 enum bfi_tskim_status task_status; 304 enum bfi_tskim_status task_status;
305 struct scsi_lun scsilun; 305 struct scsi_lun scsilun;
306 306
307 spin_lock_irqsave(&bfad->bfad_lock, flags); 307 spin_lock_irqsave(&bfad->bfad_lock, flags);
308 itnim = itnim_data->itnim; 308 itnim = itnim_data->itnim;
309 if (!itnim) { 309 if (!itnim) {
310 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 310 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
311 rc = FAILED; 311 rc = FAILED;
312 goto out; 312 goto out;
313 } 313 }
314 314
315 tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd); 315 tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd);
316 if (!tskim) { 316 if (!tskim) {
317 BFA_LOG(KERN_ERR, bfad, bfa_log_level, 317 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
318 "LUN reset, fail to allocate tskim"); 318 "LUN reset, fail to allocate tskim");
319 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 319 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
320 rc = FAILED; 320 rc = FAILED;
321 goto out; 321 goto out;
322 } 322 }
323 323
324 /* 324 /*
325 * Set host_scribble to NULL to avoid aborting a task command 325 * Set host_scribble to NULL to avoid aborting a task command
326 * if happens. 326 * if happens.
327 */ 327 */
328 cmnd->host_scribble = NULL; 328 cmnd->host_scribble = NULL;
329 cmnd->SCp.ptr = (char *)&wq; 329 cmnd->SCp.ptr = (char *)&wq;
330 cmnd->SCp.Status = 0; 330 cmnd->SCp.Status = 0;
331 bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim->fcs_itnim); 331 bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim->fcs_itnim);
332 int_to_scsilun(cmnd->device->lun, &scsilun); 332 int_to_scsilun(cmnd->device->lun, &scsilun);
333 bfa_tskim_start(tskim, bfa_itnim, scsilun, 333 bfa_tskim_start(tskim, bfa_itnim, scsilun,
334 FCP_TM_LUN_RESET, BFAD_LUN_RESET_TMO); 334 FCP_TM_LUN_RESET, BFAD_LUN_RESET_TMO);
335 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 335 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
336 336
337 wait_event(wq, test_bit(IO_DONE_BIT, 337 wait_event(wq, test_bit(IO_DONE_BIT,
338 (unsigned long *)&cmnd->SCp.Status)); 338 (unsigned long *)&cmnd->SCp.Status));
339 339
340 task_status = cmnd->SCp.Status >> 1; 340 task_status = cmnd->SCp.Status >> 1;
341 if (task_status != BFI_TSKIM_STS_OK) { 341 if (task_status != BFI_TSKIM_STS_OK) {
342 BFA_LOG(KERN_ERR, bfad, bfa_log_level, 342 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
343 "LUN reset failure, status: %d\n", task_status); 343 "LUN reset failure, status: %d\n", task_status);
344 rc = FAILED; 344 rc = FAILED;
345 } 345 }
346 346
347 out: 347 out:
348 return rc; 348 return rc;
349 } 349 }
350 350
351 /* 351 /*
352 * Scsi_Host template entry, resets the bus and abort all commands. 352 * Scsi_Host template entry, resets the bus and abort all commands.
353 */ 353 */
354 static int 354 static int
355 bfad_im_reset_bus_handler(struct scsi_cmnd *cmnd) 355 bfad_im_reset_bus_handler(struct scsi_cmnd *cmnd)
356 { 356 {
357 struct Scsi_Host *shost = cmnd->device->host; 357 struct Scsi_Host *shost = cmnd->device->host;
358 struct bfad_im_port_s *im_port = 358 struct bfad_im_port_s *im_port =
359 (struct bfad_im_port_s *) shost->hostdata[0]; 359 (struct bfad_im_port_s *) shost->hostdata[0];
360 struct bfad_s *bfad = im_port->bfad; 360 struct bfad_s *bfad = im_port->bfad;
361 struct bfad_itnim_s *itnim; 361 struct bfad_itnim_s *itnim;
362 unsigned long flags; 362 unsigned long flags;
363 u32 i, rc, err_cnt = 0; 363 u32 i, rc, err_cnt = 0;
364 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); 364 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
365 enum bfi_tskim_status task_status; 365 enum bfi_tskim_status task_status;
366 366
367 spin_lock_irqsave(&bfad->bfad_lock, flags); 367 spin_lock_irqsave(&bfad->bfad_lock, flags);
368 for (i = 0; i < MAX_FCP_TARGET; i++) { 368 for (i = 0; i < MAX_FCP_TARGET; i++) {
369 itnim = bfad_get_itnim(im_port, i); 369 itnim = bfad_get_itnim(im_port, i);
370 if (itnim) { 370 if (itnim) {
371 cmnd->SCp.ptr = (char *)&wq; 371 cmnd->SCp.ptr = (char *)&wq;
372 rc = bfad_im_target_reset_send(bfad, cmnd, itnim); 372 rc = bfad_im_target_reset_send(bfad, cmnd, itnim);
373 if (rc != BFA_STATUS_OK) { 373 if (rc != BFA_STATUS_OK) {
374 err_cnt++; 374 err_cnt++;
375 continue; 375 continue;
376 } 376 }
377 377
378 /* wait target reset to complete */ 378 /* wait target reset to complete */
379 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 379 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
380 wait_event(wq, test_bit(IO_DONE_BIT, 380 wait_event(wq, test_bit(IO_DONE_BIT,
381 (unsigned long *)&cmnd->SCp.Status)); 381 (unsigned long *)&cmnd->SCp.Status));
382 spin_lock_irqsave(&bfad->bfad_lock, flags); 382 spin_lock_irqsave(&bfad->bfad_lock, flags);
383 383
384 task_status = cmnd->SCp.Status >> 1; 384 task_status = cmnd->SCp.Status >> 1;
385 if (task_status != BFI_TSKIM_STS_OK) { 385 if (task_status != BFI_TSKIM_STS_OK) {
386 BFA_LOG(KERN_ERR, bfad, bfa_log_level, 386 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
387 "target reset failure," 387 "target reset failure,"
388 " status: %d\n", task_status); 388 " status: %d\n", task_status);
389 err_cnt++; 389 err_cnt++;
390 } 390 }
391 } 391 }
392 } 392 }
393 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 393 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
394 394
395 if (err_cnt) 395 if (err_cnt)
396 return FAILED; 396 return FAILED;
397 397
398 return SUCCESS; 398 return SUCCESS;
399 } 399 }
400 400
401 /* 401 /*
402 * Scsi_Host template entry slave_destroy. 402 * Scsi_Host template entry slave_destroy.
403 */ 403 */
404 static void 404 static void
405 bfad_im_slave_destroy(struct scsi_device *sdev) 405 bfad_im_slave_destroy(struct scsi_device *sdev)
406 { 406 {
407 sdev->hostdata = NULL; 407 sdev->hostdata = NULL;
408 return; 408 return;
409 } 409 }
410 410
411 /* 411 /*
412 * BFA FCS itnim callbacks 412 * BFA FCS itnim callbacks
413 */ 413 */
414 414
415 /* 415 /*
416 * BFA FCS itnim alloc callback, after successful PRLI 416 * BFA FCS itnim alloc callback, after successful PRLI
417 * Context: Interrupt 417 * Context: Interrupt
418 */ 418 */
419 void 419 void
420 bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim, 420 bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim,
421 struct bfad_itnim_s **itnim_drv) 421 struct bfad_itnim_s **itnim_drv)
422 { 422 {
423 *itnim_drv = kzalloc(sizeof(struct bfad_itnim_s), GFP_ATOMIC); 423 *itnim_drv = kzalloc(sizeof(struct bfad_itnim_s), GFP_ATOMIC);
424 if (*itnim_drv == NULL) 424 if (*itnim_drv == NULL)
425 return; 425 return;
426 426
427 (*itnim_drv)->im = bfad->im; 427 (*itnim_drv)->im = bfad->im;
428 *itnim = &(*itnim_drv)->fcs_itnim; 428 *itnim = &(*itnim_drv)->fcs_itnim;
429 (*itnim_drv)->state = ITNIM_STATE_NONE; 429 (*itnim_drv)->state = ITNIM_STATE_NONE;
430 430
431 /* 431 /*
432 * Initiaze the itnim_work 432 * Initiaze the itnim_work
433 */ 433 */
434 INIT_WORK(&(*itnim_drv)->itnim_work, bfad_im_itnim_work_handler); 434 INIT_WORK(&(*itnim_drv)->itnim_work, bfad_im_itnim_work_handler);
435 bfad->bfad_flags |= BFAD_RPORT_ONLINE; 435 bfad->bfad_flags |= BFAD_RPORT_ONLINE;
436 } 436 }
437 437
438 /* 438 /*
439 * BFA FCS itnim free callback. 439 * BFA FCS itnim free callback.
440 * Context: Interrupt. bfad_lock is held 440 * Context: Interrupt. bfad_lock is held
441 */ 441 */
442 void 442 void
443 bfa_fcb_itnim_free(struct bfad_s *bfad, struct bfad_itnim_s *itnim_drv) 443 bfa_fcb_itnim_free(struct bfad_s *bfad, struct bfad_itnim_s *itnim_drv)
444 { 444 {
445 struct bfad_port_s *port; 445 struct bfad_port_s *port;
446 wwn_t wwpn; 446 wwn_t wwpn;
447 u32 fcid; 447 u32 fcid;
448 char wwpn_str[32], fcid_str[16]; 448 char wwpn_str[32], fcid_str[16];
449 struct bfad_im_s *im = itnim_drv->im; 449 struct bfad_im_s *im = itnim_drv->im;
450 450
451 /* online to free state transtion should not happen */ 451 /* online to free state transtion should not happen */
452 WARN_ON(itnim_drv->state == ITNIM_STATE_ONLINE); 452 WARN_ON(itnim_drv->state == ITNIM_STATE_ONLINE);
453 453
454 itnim_drv->queue_work = 1; 454 itnim_drv->queue_work = 1;
455 /* offline request is not yet done, use the same request to free */ 455 /* offline request is not yet done, use the same request to free */
456 if (itnim_drv->state == ITNIM_STATE_OFFLINE_PENDING) 456 if (itnim_drv->state == ITNIM_STATE_OFFLINE_PENDING)
457 itnim_drv->queue_work = 0; 457 itnim_drv->queue_work = 0;
458 458
459 itnim_drv->state = ITNIM_STATE_FREE; 459 itnim_drv->state = ITNIM_STATE_FREE;
460 port = bfa_fcs_itnim_get_drvport(&itnim_drv->fcs_itnim); 460 port = bfa_fcs_itnim_get_drvport(&itnim_drv->fcs_itnim);
461 itnim_drv->im_port = port->im_port; 461 itnim_drv->im_port = port->im_port;
462 wwpn = bfa_fcs_itnim_get_pwwn(&itnim_drv->fcs_itnim); 462 wwpn = bfa_fcs_itnim_get_pwwn(&itnim_drv->fcs_itnim);
463 fcid = bfa_fcs_itnim_get_fcid(&itnim_drv->fcs_itnim); 463 fcid = bfa_fcs_itnim_get_fcid(&itnim_drv->fcs_itnim);
464 wwn2str(wwpn_str, wwpn); 464 wwn2str(wwpn_str, wwpn);
465 fcid2str(fcid_str, fcid); 465 fcid2str(fcid_str, fcid);
466 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 466 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
467 "ITNIM FREE scsi%d: FCID: %s WWPN: %s\n", 467 "ITNIM FREE scsi%d: FCID: %s WWPN: %s\n",
468 port->im_port->shost->host_no, 468 port->im_port->shost->host_no,
469 fcid_str, wwpn_str); 469 fcid_str, wwpn_str);
470 470
471 /* ITNIM processing */ 471 /* ITNIM processing */
472 if (itnim_drv->queue_work) 472 if (itnim_drv->queue_work)
473 queue_work(im->drv_workq, &itnim_drv->itnim_work); 473 queue_work(im->drv_workq, &itnim_drv->itnim_work);
474 } 474 }
475 475
476 /* 476 /*
477 * BFA FCS itnim online callback. 477 * BFA FCS itnim online callback.
478 * Context: Interrupt. bfad_lock is held 478 * Context: Interrupt. bfad_lock is held
479 */ 479 */
480 void 480 void
481 bfa_fcb_itnim_online(struct bfad_itnim_s *itnim_drv) 481 bfa_fcb_itnim_online(struct bfad_itnim_s *itnim_drv)
482 { 482 {
483 struct bfad_port_s *port; 483 struct bfad_port_s *port;
484 struct bfad_im_s *im = itnim_drv->im; 484 struct bfad_im_s *im = itnim_drv->im;
485 485
486 itnim_drv->bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim_drv->fcs_itnim); 486 itnim_drv->bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim_drv->fcs_itnim);
487 port = bfa_fcs_itnim_get_drvport(&itnim_drv->fcs_itnim); 487 port = bfa_fcs_itnim_get_drvport(&itnim_drv->fcs_itnim);
488 itnim_drv->state = ITNIM_STATE_ONLINE; 488 itnim_drv->state = ITNIM_STATE_ONLINE;
489 itnim_drv->queue_work = 1; 489 itnim_drv->queue_work = 1;
490 itnim_drv->im_port = port->im_port; 490 itnim_drv->im_port = port->im_port;
491 491
492 /* ITNIM processing */ 492 /* ITNIM processing */
493 if (itnim_drv->queue_work) 493 if (itnim_drv->queue_work)
494 queue_work(im->drv_workq, &itnim_drv->itnim_work); 494 queue_work(im->drv_workq, &itnim_drv->itnim_work);
495 } 495 }
496 496
497 /* 497 /*
498 * BFA FCS itnim offline callback. 498 * BFA FCS itnim offline callback.
499 * Context: Interrupt. bfad_lock is held 499 * Context: Interrupt. bfad_lock is held
500 */ 500 */
501 void 501 void
502 bfa_fcb_itnim_offline(struct bfad_itnim_s *itnim_drv) 502 bfa_fcb_itnim_offline(struct bfad_itnim_s *itnim_drv)
503 { 503 {
504 struct bfad_port_s *port; 504 struct bfad_port_s *port;
505 struct bfad_s *bfad; 505 struct bfad_s *bfad;
506 struct bfad_im_s *im = itnim_drv->im; 506 struct bfad_im_s *im = itnim_drv->im;
507 507
508 port = bfa_fcs_itnim_get_drvport(&itnim_drv->fcs_itnim); 508 port = bfa_fcs_itnim_get_drvport(&itnim_drv->fcs_itnim);
509 bfad = port->bfad; 509 bfad = port->bfad;
510 if ((bfad->pport.flags & BFAD_PORT_DELETE) || 510 if ((bfad->pport.flags & BFAD_PORT_DELETE) ||
511 (port->flags & BFAD_PORT_DELETE)) { 511 (port->flags & BFAD_PORT_DELETE)) {
512 itnim_drv->state = ITNIM_STATE_OFFLINE; 512 itnim_drv->state = ITNIM_STATE_OFFLINE;
513 return; 513 return;
514 } 514 }
515 itnim_drv->im_port = port->im_port; 515 itnim_drv->im_port = port->im_port;
516 itnim_drv->state = ITNIM_STATE_OFFLINE_PENDING; 516 itnim_drv->state = ITNIM_STATE_OFFLINE_PENDING;
517 itnim_drv->queue_work = 1; 517 itnim_drv->queue_work = 1;
518 518
519 /* ITNIM processing */ 519 /* ITNIM processing */
520 if (itnim_drv->queue_work) 520 if (itnim_drv->queue_work)
521 queue_work(im->drv_workq, &itnim_drv->itnim_work); 521 queue_work(im->drv_workq, &itnim_drv->itnim_work);
522 } 522 }
523 523
524 /* 524 /*
525 * Allocate a Scsi_Host for a port. 525 * Allocate a Scsi_Host for a port.
526 */ 526 */
527 int 527 int
528 bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port, 528 bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port,
529 struct device *dev) 529 struct device *dev)
530 { 530 {
531 int error = 1; 531 int error = 1;
532 532
533 mutex_lock(&bfad_mutex); 533 mutex_lock(&bfad_mutex);
534 if (!idr_pre_get(&bfad_im_port_index, GFP_KERNEL)) { 534 if (!idr_pre_get(&bfad_im_port_index, GFP_KERNEL)) {
535 mutex_unlock(&bfad_mutex); 535 mutex_unlock(&bfad_mutex);
536 printk(KERN_WARNING "idr_pre_get failure\n"); 536 printk(KERN_WARNING "idr_pre_get failure\n");
537 goto out; 537 goto out;
538 } 538 }
539 539
540 error = idr_get_new(&bfad_im_port_index, im_port, 540 error = idr_get_new(&bfad_im_port_index, im_port,
541 &im_port->idr_id); 541 &im_port->idr_id);
542 if (error) { 542 if (error) {
543 mutex_unlock(&bfad_mutex); 543 mutex_unlock(&bfad_mutex);
544 printk(KERN_WARNING "idr_get_new failure\n"); 544 printk(KERN_WARNING "idr_get_new failure\n");
545 goto out; 545 goto out;
546 } 546 }
547 547
548 mutex_unlock(&bfad_mutex); 548 mutex_unlock(&bfad_mutex);
549 549
550 im_port->shost = bfad_scsi_host_alloc(im_port, bfad); 550 im_port->shost = bfad_scsi_host_alloc(im_port, bfad);
551 if (!im_port->shost) { 551 if (!im_port->shost) {
552 error = 1; 552 error = 1;
553 goto out_free_idr; 553 goto out_free_idr;
554 } 554 }
555 555
556 im_port->shost->hostdata[0] = (unsigned long)im_port; 556 im_port->shost->hostdata[0] = (unsigned long)im_port;
557 im_port->shost->unique_id = im_port->idr_id; 557 im_port->shost->unique_id = im_port->idr_id;
558 im_port->shost->this_id = -1; 558 im_port->shost->this_id = -1;
559 im_port->shost->max_id = MAX_FCP_TARGET; 559 im_port->shost->max_id = MAX_FCP_TARGET;
560 im_port->shost->max_lun = MAX_FCP_LUN; 560 im_port->shost->max_lun = MAX_FCP_LUN;
561 im_port->shost->max_cmd_len = 16; 561 im_port->shost->max_cmd_len = 16;
562 im_port->shost->can_queue = bfad->cfg_data.ioc_queue_depth; 562 im_port->shost->can_queue = bfad->cfg_data.ioc_queue_depth;
563 if (im_port->port->pvb_type == BFAD_PORT_PHYS_BASE) 563 if (im_port->port->pvb_type == BFAD_PORT_PHYS_BASE)
564 im_port->shost->transportt = bfad_im_scsi_transport_template; 564 im_port->shost->transportt = bfad_im_scsi_transport_template;
565 else 565 else
566 im_port->shost->transportt = 566 im_port->shost->transportt =
567 bfad_im_scsi_vport_transport_template; 567 bfad_im_scsi_vport_transport_template;
568 568
569 error = scsi_add_host_with_dma(im_port->shost, dev, &bfad->pcidev->dev); 569 error = scsi_add_host_with_dma(im_port->shost, dev, &bfad->pcidev->dev);
570 if (error) { 570 if (error) {
571 printk(KERN_WARNING "scsi_add_host failure %d\n", error); 571 printk(KERN_WARNING "scsi_add_host failure %d\n", error);
572 goto out_fc_rel; 572 goto out_fc_rel;
573 } 573 }
574 574
575 /* setup host fixed attribute if the lk supports */ 575 /* setup host fixed attribute if the lk supports */
576 bfad_fc_host_init(im_port); 576 bfad_fc_host_init(im_port);
577 577
578 return 0; 578 return 0;
579 579
580 out_fc_rel: 580 out_fc_rel:
581 scsi_host_put(im_port->shost); 581 scsi_host_put(im_port->shost);
582 im_port->shost = NULL; 582 im_port->shost = NULL;
583 out_free_idr: 583 out_free_idr:
584 mutex_lock(&bfad_mutex); 584 mutex_lock(&bfad_mutex);
585 idr_remove(&bfad_im_port_index, im_port->idr_id); 585 idr_remove(&bfad_im_port_index, im_port->idr_id);
586 mutex_unlock(&bfad_mutex); 586 mutex_unlock(&bfad_mutex);
587 out: 587 out:
588 return error; 588 return error;
589 } 589 }
590 590
591 void 591 void
592 bfad_im_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port) 592 bfad_im_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
593 { 593 {
594 bfa_trc(bfad, bfad->inst_no); 594 bfa_trc(bfad, bfad->inst_no);
595 BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Free scsi%d\n", 595 BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Free scsi%d\n",
596 im_port->shost->host_no); 596 im_port->shost->host_no);
597 597
598 fc_remove_host(im_port->shost); 598 fc_remove_host(im_port->shost);
599 599
600 scsi_remove_host(im_port->shost); 600 scsi_remove_host(im_port->shost);
601 scsi_host_put(im_port->shost); 601 scsi_host_put(im_port->shost);
602 602
603 mutex_lock(&bfad_mutex); 603 mutex_lock(&bfad_mutex);
604 idr_remove(&bfad_im_port_index, im_port->idr_id); 604 idr_remove(&bfad_im_port_index, im_port->idr_id);
605 mutex_unlock(&bfad_mutex); 605 mutex_unlock(&bfad_mutex);
606 } 606 }
607 607
608 static void 608 static void
609 bfad_im_port_delete_handler(struct work_struct *work) 609 bfad_im_port_delete_handler(struct work_struct *work)
610 { 610 {
611 struct bfad_im_port_s *im_port = 611 struct bfad_im_port_s *im_port =
612 container_of(work, struct bfad_im_port_s, port_delete_work); 612 container_of(work, struct bfad_im_port_s, port_delete_work);
613 613
614 if (im_port->port->pvb_type != BFAD_PORT_PHYS_BASE) { 614 if (im_port->port->pvb_type != BFAD_PORT_PHYS_BASE) {
615 im_port->flags |= BFAD_PORT_DELETE; 615 im_port->flags |= BFAD_PORT_DELETE;
616 fc_vport_terminate(im_port->fc_vport); 616 fc_vport_terminate(im_port->fc_vport);
617 } 617 }
618 } 618 }
619 619
620 bfa_status_t 620 bfa_status_t
621 bfad_im_port_new(struct bfad_s *bfad, struct bfad_port_s *port) 621 bfad_im_port_new(struct bfad_s *bfad, struct bfad_port_s *port)
622 { 622 {
623 int rc = BFA_STATUS_OK; 623 int rc = BFA_STATUS_OK;
624 struct bfad_im_port_s *im_port; 624 struct bfad_im_port_s *im_port;
625 625
626 im_port = kzalloc(sizeof(struct bfad_im_port_s), GFP_ATOMIC); 626 im_port = kzalloc(sizeof(struct bfad_im_port_s), GFP_ATOMIC);
627 if (im_port == NULL) { 627 if (im_port == NULL) {
628 rc = BFA_STATUS_ENOMEM; 628 rc = BFA_STATUS_ENOMEM;
629 goto ext; 629 goto ext;
630 } 630 }
631 port->im_port = im_port; 631 port->im_port = im_port;
632 im_port->port = port; 632 im_port->port = port;
633 im_port->bfad = bfad; 633 im_port->bfad = bfad;
634 634
635 INIT_WORK(&im_port->port_delete_work, bfad_im_port_delete_handler); 635 INIT_WORK(&im_port->port_delete_work, bfad_im_port_delete_handler);
636 INIT_LIST_HEAD(&im_port->itnim_mapped_list); 636 INIT_LIST_HEAD(&im_port->itnim_mapped_list);
637 INIT_LIST_HEAD(&im_port->binding_list); 637 INIT_LIST_HEAD(&im_port->binding_list);
638 638
639 ext: 639 ext:
640 return rc; 640 return rc;
641 } 641 }
642 642
643 void 643 void
644 bfad_im_port_delete(struct bfad_s *bfad, struct bfad_port_s *port) 644 bfad_im_port_delete(struct bfad_s *bfad, struct bfad_port_s *port)
645 { 645 {
646 struct bfad_im_port_s *im_port = port->im_port; 646 struct bfad_im_port_s *im_port = port->im_port;
647 647
648 queue_work(bfad->im->drv_workq, 648 queue_work(bfad->im->drv_workq,
649 &im_port->port_delete_work); 649 &im_port->port_delete_work);
650 } 650 }
651 651
652 void 652 void
653 bfad_im_port_clean(struct bfad_im_port_s *im_port) 653 bfad_im_port_clean(struct bfad_im_port_s *im_port)
654 { 654 {
655 struct bfad_fcp_binding *bp, *bp_new; 655 struct bfad_fcp_binding *bp, *bp_new;
656 unsigned long flags; 656 unsigned long flags;
657 struct bfad_s *bfad = im_port->bfad; 657 struct bfad_s *bfad = im_port->bfad;
658 658
659 spin_lock_irqsave(&bfad->bfad_lock, flags); 659 spin_lock_irqsave(&bfad->bfad_lock, flags);
660 list_for_each_entry_safe(bp, bp_new, &im_port->binding_list, 660 list_for_each_entry_safe(bp, bp_new, &im_port->binding_list,
661 list_entry) { 661 list_entry) {
662 list_del(&bp->list_entry); 662 list_del(&bp->list_entry);
663 kfree(bp); 663 kfree(bp);
664 } 664 }
665 665
666 /* the itnim_mapped_list must be empty at this time */ 666 /* the itnim_mapped_list must be empty at this time */
667 WARN_ON(!list_empty(&im_port->itnim_mapped_list)); 667 WARN_ON(!list_empty(&im_port->itnim_mapped_list));
668 668
669 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 669 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
670 } 670 }
671 671
672 bfa_status_t 672 bfa_status_t
673 bfad_im_probe(struct bfad_s *bfad) 673 bfad_im_probe(struct bfad_s *bfad)
674 { 674 {
675 struct bfad_im_s *im; 675 struct bfad_im_s *im;
676 bfa_status_t rc = BFA_STATUS_OK; 676 bfa_status_t rc = BFA_STATUS_OK;
677 677
678 im = kzalloc(sizeof(struct bfad_im_s), GFP_KERNEL); 678 im = kzalloc(sizeof(struct bfad_im_s), GFP_KERNEL);
679 if (im == NULL) { 679 if (im == NULL) {
680 rc = BFA_STATUS_ENOMEM; 680 rc = BFA_STATUS_ENOMEM;
681 goto ext; 681 goto ext;
682 } 682 }
683 683
684 bfad->im = im; 684 bfad->im = im;
685 im->bfad = bfad; 685 im->bfad = bfad;
686 686
687 if (bfad_thread_workq(bfad) != BFA_STATUS_OK) { 687 if (bfad_thread_workq(bfad) != BFA_STATUS_OK) {
688 kfree(im); 688 kfree(im);
689 rc = BFA_STATUS_FAILED; 689 rc = BFA_STATUS_FAILED;
690 } 690 }
691 691
692 ext: 692 ext:
693 return rc; 693 return rc;
694 } 694 }
695 695
696 void 696 void
697 bfad_im_probe_undo(struct bfad_s *bfad) 697 bfad_im_probe_undo(struct bfad_s *bfad)
698 { 698 {
699 if (bfad->im) { 699 if (bfad->im) {
700 bfad_destroy_workq(bfad->im); 700 bfad_destroy_workq(bfad->im);
701 kfree(bfad->im); 701 kfree(bfad->im);
702 bfad->im = NULL; 702 bfad->im = NULL;
703 } 703 }
704 } 704 }
705 705
706 struct Scsi_Host * 706 struct Scsi_Host *
707 bfad_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad) 707 bfad_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad)
708 { 708 {
709 struct scsi_host_template *sht; 709 struct scsi_host_template *sht;
710 710
711 if (im_port->port->pvb_type == BFAD_PORT_PHYS_BASE) 711 if (im_port->port->pvb_type == BFAD_PORT_PHYS_BASE)
712 sht = &bfad_im_scsi_host_template; 712 sht = &bfad_im_scsi_host_template;
713 else 713 else
714 sht = &bfad_im_vport_template; 714 sht = &bfad_im_vport_template;
715 715
716 sht->sg_tablesize = bfad->cfg_data.io_max_sge; 716 sht->sg_tablesize = bfad->cfg_data.io_max_sge;
717 717
718 return scsi_host_alloc(sht, sizeof(unsigned long)); 718 return scsi_host_alloc(sht, sizeof(unsigned long));
719 } 719 }
720 720
721 void 721 void
722 bfad_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port) 722 bfad_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
723 { 723 {
724 if (!(im_port->flags & BFAD_PORT_DELETE)) 724 if (!(im_port->flags & BFAD_PORT_DELETE))
725 flush_workqueue(bfad->im->drv_workq); 725 flush_workqueue(bfad->im->drv_workq);
726 bfad_im_scsi_host_free(im_port->bfad, im_port); 726 bfad_im_scsi_host_free(im_port->bfad, im_port);
727 bfad_im_port_clean(im_port); 727 bfad_im_port_clean(im_port);
728 kfree(im_port); 728 kfree(im_port);
729 } 729 }
730 730
731 void 731 void
732 bfad_destroy_workq(struct bfad_im_s *im) 732 bfad_destroy_workq(struct bfad_im_s *im)
733 { 733 {
734 if (im && im->drv_workq) { 734 if (im && im->drv_workq) {
735 flush_workqueue(im->drv_workq); 735 flush_workqueue(im->drv_workq);
736 destroy_workqueue(im->drv_workq); 736 destroy_workqueue(im->drv_workq);
737 im->drv_workq = NULL; 737 im->drv_workq = NULL;
738 } 738 }
739 } 739 }
740 740
741 bfa_status_t 741 bfa_status_t
742 bfad_thread_workq(struct bfad_s *bfad) 742 bfad_thread_workq(struct bfad_s *bfad)
743 { 743 {
744 struct bfad_im_s *im = bfad->im; 744 struct bfad_im_s *im = bfad->im;
745 745
746 bfa_trc(bfad, 0); 746 bfa_trc(bfad, 0);
747 snprintf(im->drv_workq_name, KOBJ_NAME_LEN, "bfad_wq_%d", 747 snprintf(im->drv_workq_name, KOBJ_NAME_LEN, "bfad_wq_%d",
748 bfad->inst_no); 748 bfad->inst_no);
749 im->drv_workq = create_singlethread_workqueue(im->drv_workq_name); 749 im->drv_workq = create_singlethread_workqueue(im->drv_workq_name);
750 if (!im->drv_workq) 750 if (!im->drv_workq)
751 return BFA_STATUS_FAILED; 751 return BFA_STATUS_FAILED;
752 752
753 return BFA_STATUS_OK; 753 return BFA_STATUS_OK;
754 } 754 }
755 755
756 /* 756 /*
757 * Scsi_Host template entry. 757 * Scsi_Host template entry.
758 * 758 *
759 * Description: 759 * Description:
760 * OS entry point to adjust the queue_depths on a per-device basis. 760 * OS entry point to adjust the queue_depths on a per-device basis.
761 * Called once per device during the bus scan. 761 * Called once per device during the bus scan.
762 * Return non-zero if fails. 762 * Return non-zero if fails.
763 */ 763 */
764 static int 764 static int
765 bfad_im_slave_configure(struct scsi_device *sdev) 765 bfad_im_slave_configure(struct scsi_device *sdev)
766 { 766 {
767 if (sdev->tagged_supported) 767 if (sdev->tagged_supported)
768 scsi_activate_tcq(sdev, bfa_lun_queue_depth); 768 scsi_activate_tcq(sdev, bfa_lun_queue_depth);
769 else 769 else
770 scsi_deactivate_tcq(sdev, bfa_lun_queue_depth); 770 scsi_deactivate_tcq(sdev, bfa_lun_queue_depth);
771 771
772 return 0; 772 return 0;
773 } 773 }
774 774
775 struct scsi_host_template bfad_im_scsi_host_template = { 775 struct scsi_host_template bfad_im_scsi_host_template = {
776 .module = THIS_MODULE, 776 .module = THIS_MODULE,
777 .name = BFAD_DRIVER_NAME, 777 .name = BFAD_DRIVER_NAME,
778 .info = bfad_im_info, 778 .info = bfad_im_info,
779 .queuecommand = bfad_im_queuecommand, 779 .queuecommand = bfad_im_queuecommand,
780 .eh_abort_handler = bfad_im_abort_handler, 780 .eh_abort_handler = bfad_im_abort_handler,
781 .eh_device_reset_handler = bfad_im_reset_lun_handler, 781 .eh_device_reset_handler = bfad_im_reset_lun_handler,
782 .eh_bus_reset_handler = bfad_im_reset_bus_handler, 782 .eh_bus_reset_handler = bfad_im_reset_bus_handler,
783 783
784 .slave_alloc = bfad_im_slave_alloc, 784 .slave_alloc = bfad_im_slave_alloc,
785 .slave_configure = bfad_im_slave_configure, 785 .slave_configure = bfad_im_slave_configure,
786 .slave_destroy = bfad_im_slave_destroy, 786 .slave_destroy = bfad_im_slave_destroy,
787 787
788 .this_id = -1, 788 .this_id = -1,
789 .sg_tablesize = BFAD_IO_MAX_SGE, 789 .sg_tablesize = BFAD_IO_MAX_SGE,
790 .cmd_per_lun = 3, 790 .cmd_per_lun = 3,
791 .use_clustering = ENABLE_CLUSTERING, 791 .use_clustering = ENABLE_CLUSTERING,
792 .shost_attrs = bfad_im_host_attrs, 792 .shost_attrs = bfad_im_host_attrs,
793 .max_sectors = 0xFFFF, 793 .max_sectors = 0xFFFF,
794 }; 794 };
795 795
796 struct scsi_host_template bfad_im_vport_template = { 796 struct scsi_host_template bfad_im_vport_template = {
797 .module = THIS_MODULE, 797 .module = THIS_MODULE,
798 .name = BFAD_DRIVER_NAME, 798 .name = BFAD_DRIVER_NAME,
799 .info = bfad_im_info, 799 .info = bfad_im_info,
800 .queuecommand = bfad_im_queuecommand, 800 .queuecommand = bfad_im_queuecommand,
801 .eh_abort_handler = bfad_im_abort_handler, 801 .eh_abort_handler = bfad_im_abort_handler,
802 .eh_device_reset_handler = bfad_im_reset_lun_handler, 802 .eh_device_reset_handler = bfad_im_reset_lun_handler,
803 .eh_bus_reset_handler = bfad_im_reset_bus_handler, 803 .eh_bus_reset_handler = bfad_im_reset_bus_handler,
804 804
805 .slave_alloc = bfad_im_slave_alloc, 805 .slave_alloc = bfad_im_slave_alloc,
806 .slave_configure = bfad_im_slave_configure, 806 .slave_configure = bfad_im_slave_configure,
807 .slave_destroy = bfad_im_slave_destroy, 807 .slave_destroy = bfad_im_slave_destroy,
808 808
809 .this_id = -1, 809 .this_id = -1,
810 .sg_tablesize = BFAD_IO_MAX_SGE, 810 .sg_tablesize = BFAD_IO_MAX_SGE,
811 .cmd_per_lun = 3, 811 .cmd_per_lun = 3,
812 .use_clustering = ENABLE_CLUSTERING, 812 .use_clustering = ENABLE_CLUSTERING,
813 .shost_attrs = bfad_im_vport_attrs, 813 .shost_attrs = bfad_im_vport_attrs,
814 .max_sectors = 0xFFFF, 814 .max_sectors = 0xFFFF,
815 }; 815 };
816 816
817 bfa_status_t 817 bfa_status_t
818 bfad_im_module_init(void) 818 bfad_im_module_init(void)
819 { 819 {
820 bfad_im_scsi_transport_template = 820 bfad_im_scsi_transport_template =
821 fc_attach_transport(&bfad_im_fc_function_template); 821 fc_attach_transport(&bfad_im_fc_function_template);
822 if (!bfad_im_scsi_transport_template) 822 if (!bfad_im_scsi_transport_template)
823 return BFA_STATUS_ENOMEM; 823 return BFA_STATUS_ENOMEM;
824 824
825 bfad_im_scsi_vport_transport_template = 825 bfad_im_scsi_vport_transport_template =
826 fc_attach_transport(&bfad_im_vport_fc_function_template); 826 fc_attach_transport(&bfad_im_vport_fc_function_template);
827 if (!bfad_im_scsi_vport_transport_template) { 827 if (!bfad_im_scsi_vport_transport_template) {
828 fc_release_transport(bfad_im_scsi_transport_template); 828 fc_release_transport(bfad_im_scsi_transport_template);
829 return BFA_STATUS_ENOMEM; 829 return BFA_STATUS_ENOMEM;
830 } 830 }
831 831
832 return BFA_STATUS_OK; 832 return BFA_STATUS_OK;
833 } 833 }
834 834
835 void 835 void
836 bfad_im_module_exit(void) 836 bfad_im_module_exit(void)
837 { 837 {
838 if (bfad_im_scsi_transport_template) 838 if (bfad_im_scsi_transport_template)
839 fc_release_transport(bfad_im_scsi_transport_template); 839 fc_release_transport(bfad_im_scsi_transport_template);
840 840
841 if (bfad_im_scsi_vport_transport_template) 841 if (bfad_im_scsi_vport_transport_template)
842 fc_release_transport(bfad_im_scsi_vport_transport_template); 842 fc_release_transport(bfad_im_scsi_vport_transport_template);
843 } 843 }
844 844
845 void 845 void
846 bfad_ramp_up_qdepth(struct bfad_itnim_s *itnim, struct scsi_device *sdev) 846 bfad_ramp_up_qdepth(struct bfad_itnim_s *itnim, struct scsi_device *sdev)
847 { 847 {
848 struct scsi_device *tmp_sdev; 848 struct scsi_device *tmp_sdev;
849 849
850 if (((jiffies - itnim->last_ramp_up_time) > 850 if (((jiffies - itnim->last_ramp_up_time) >
851 BFA_QUEUE_FULL_RAMP_UP_TIME * HZ) && 851 BFA_QUEUE_FULL_RAMP_UP_TIME * HZ) &&
852 ((jiffies - itnim->last_queue_full_time) > 852 ((jiffies - itnim->last_queue_full_time) >
853 BFA_QUEUE_FULL_RAMP_UP_TIME * HZ)) { 853 BFA_QUEUE_FULL_RAMP_UP_TIME * HZ)) {
854 shost_for_each_device(tmp_sdev, sdev->host) { 854 shost_for_each_device(tmp_sdev, sdev->host) {
855 if (bfa_lun_queue_depth > tmp_sdev->queue_depth) { 855 if (bfa_lun_queue_depth > tmp_sdev->queue_depth) {
856 if (tmp_sdev->id != sdev->id) 856 if (tmp_sdev->id != sdev->id)
857 continue; 857 continue;
858 if (tmp_sdev->ordered_tags) 858 if (tmp_sdev->ordered_tags)
859 scsi_adjust_queue_depth(tmp_sdev, 859 scsi_adjust_queue_depth(tmp_sdev,
860 MSG_ORDERED_TAG, 860 MSG_ORDERED_TAG,
861 tmp_sdev->queue_depth + 1); 861 tmp_sdev->queue_depth + 1);
862 else 862 else
863 scsi_adjust_queue_depth(tmp_sdev, 863 scsi_adjust_queue_depth(tmp_sdev,
864 MSG_SIMPLE_TAG, 864 MSG_SIMPLE_TAG,
865 tmp_sdev->queue_depth + 1); 865 tmp_sdev->queue_depth + 1);
866 866
867 itnim->last_ramp_up_time = jiffies; 867 itnim->last_ramp_up_time = jiffies;
868 } 868 }
869 } 869 }
870 } 870 }
871 } 871 }
872 872
873 void 873 void
874 bfad_handle_qfull(struct bfad_itnim_s *itnim, struct scsi_device *sdev) 874 bfad_handle_qfull(struct bfad_itnim_s *itnim, struct scsi_device *sdev)
875 { 875 {
876 struct scsi_device *tmp_sdev; 876 struct scsi_device *tmp_sdev;
877 877
878 itnim->last_queue_full_time = jiffies; 878 itnim->last_queue_full_time = jiffies;
879 879
880 shost_for_each_device(tmp_sdev, sdev->host) { 880 shost_for_each_device(tmp_sdev, sdev->host) {
881 if (tmp_sdev->id != sdev->id) 881 if (tmp_sdev->id != sdev->id)
882 continue; 882 continue;
883 scsi_track_queue_full(tmp_sdev, tmp_sdev->queue_depth - 1); 883 scsi_track_queue_full(tmp_sdev, tmp_sdev->queue_depth - 1);
884 } 884 }
885 } 885 }
886 886
887 struct bfad_itnim_s * 887 struct bfad_itnim_s *
888 bfad_get_itnim(struct bfad_im_port_s *im_port, int id) 888 bfad_get_itnim(struct bfad_im_port_s *im_port, int id)
889 { 889 {
890 struct bfad_itnim_s *itnim = NULL; 890 struct bfad_itnim_s *itnim = NULL;
891 891
892 /* Search the mapped list for this target ID */ 892 /* Search the mapped list for this target ID */
893 list_for_each_entry(itnim, &im_port->itnim_mapped_list, list_entry) { 893 list_for_each_entry(itnim, &im_port->itnim_mapped_list, list_entry) {
894 if (id == itnim->scsi_tgt_id) 894 if (id == itnim->scsi_tgt_id)
895 return itnim; 895 return itnim;
896 } 896 }
897 897
898 return NULL; 898 return NULL;
899 } 899 }
900 900
901 /* 901 /*
902 * Scsi_Host template entry slave_alloc 902 * Scsi_Host template entry slave_alloc
903 */ 903 */
904 static int 904 static int
905 bfad_im_slave_alloc(struct scsi_device *sdev) 905 bfad_im_slave_alloc(struct scsi_device *sdev)
906 { 906 {
907 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 907 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
908 908
909 if (!rport || fc_remote_port_chkready(rport)) 909 if (!rport || fc_remote_port_chkready(rport))
910 return -ENXIO; 910 return -ENXIO;
911 911
912 sdev->hostdata = rport->dd_data; 912 sdev->hostdata = rport->dd_data;
913 913
914 return 0; 914 return 0;
915 } 915 }
916 916
917 static u32 917 static u32
918 bfad_im_supported_speeds(struct bfa_s *bfa) 918 bfad_im_supported_speeds(struct bfa_s *bfa)
919 { 919 {
920 struct bfa_ioc_attr_s *ioc_attr; 920 struct bfa_ioc_attr_s *ioc_attr;
921 u32 supported_speed = 0; 921 u32 supported_speed = 0;
922 922
923 ioc_attr = kzalloc(sizeof(struct bfa_ioc_attr_s), GFP_KERNEL); 923 ioc_attr = kzalloc(sizeof(struct bfa_ioc_attr_s), GFP_KERNEL);
924 if (!ioc_attr) 924 if (!ioc_attr)
925 return 0; 925 return 0;
926 926
927 bfa_ioc_get_attr(&bfa->ioc, ioc_attr); 927 bfa_ioc_get_attr(&bfa->ioc, ioc_attr);
928 if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_8GBPS) { 928 if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_8GBPS) {
929 if (ioc_attr->adapter_attr.is_mezz) { 929 if (ioc_attr->adapter_attr.is_mezz) {
930 supported_speed |= FC_PORTSPEED_8GBIT | 930 supported_speed |= FC_PORTSPEED_8GBIT |
931 FC_PORTSPEED_4GBIT | 931 FC_PORTSPEED_4GBIT |
932 FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT; 932 FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
933 } else { 933 } else {
934 supported_speed |= FC_PORTSPEED_8GBIT | 934 supported_speed |= FC_PORTSPEED_8GBIT |
935 FC_PORTSPEED_4GBIT | 935 FC_PORTSPEED_4GBIT |
936 FC_PORTSPEED_2GBIT; 936 FC_PORTSPEED_2GBIT;
937 } 937 }
938 } else if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_4GBPS) { 938 } else if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_4GBPS) {
939 supported_speed |= FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT | 939 supported_speed |= FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT |
940 FC_PORTSPEED_1GBIT; 940 FC_PORTSPEED_1GBIT;
941 } else if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_10GBPS) { 941 } else if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_10GBPS) {
942 supported_speed |= FC_PORTSPEED_10GBIT; 942 supported_speed |= FC_PORTSPEED_10GBIT;
943 } 943 }
944 kfree(ioc_attr); 944 kfree(ioc_attr);
945 return supported_speed; 945 return supported_speed;
946 } 946 }
947 947
948 void 948 void
949 bfad_fc_host_init(struct bfad_im_port_s *im_port) 949 bfad_fc_host_init(struct bfad_im_port_s *im_port)
950 { 950 {
951 struct Scsi_Host *host = im_port->shost; 951 struct Scsi_Host *host = im_port->shost;
952 struct bfad_s *bfad = im_port->bfad; 952 struct bfad_s *bfad = im_port->bfad;
953 struct bfad_port_s *port = im_port->port; 953 struct bfad_port_s *port = im_port->port;
954 char symname[BFA_SYMNAME_MAXLEN]; 954 char symname[BFA_SYMNAME_MAXLEN];
955 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); 955 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
956 956
957 fc_host_node_name(host) = 957 fc_host_node_name(host) =
958 cpu_to_be64((bfa_fcs_lport_get_nwwn(port->fcs_port))); 958 cpu_to_be64((bfa_fcs_lport_get_nwwn(port->fcs_port)));
959 fc_host_port_name(host) = 959 fc_host_port_name(host) =
960 cpu_to_be64((bfa_fcs_lport_get_pwwn(port->fcs_port))); 960 cpu_to_be64((bfa_fcs_lport_get_pwwn(port->fcs_port)));
961 fc_host_max_npiv_vports(host) = bfa_lps_get_max_vport(&bfad->bfa); 961 fc_host_max_npiv_vports(host) = bfa_lps_get_max_vport(&bfad->bfa);
962 962
963 fc_host_supported_classes(host) = FC_COS_CLASS3; 963 fc_host_supported_classes(host) = FC_COS_CLASS3;
964 964
965 memset(fc_host_supported_fc4s(host), 0, 965 memset(fc_host_supported_fc4s(host), 0,
966 sizeof(fc_host_supported_fc4s(host))); 966 sizeof(fc_host_supported_fc4s(host)));
967 if (supported_fc4s & BFA_LPORT_ROLE_FCP_IM) 967 if (supported_fc4s & BFA_LPORT_ROLE_FCP_IM)
968 /* For FCP type 0x08 */ 968 /* For FCP type 0x08 */
969 fc_host_supported_fc4s(host)[2] = 1; 969 fc_host_supported_fc4s(host)[2] = 1;
970 /* For fibre channel services type 0x20 */ 970 /* For fibre channel services type 0x20 */
971 fc_host_supported_fc4s(host)[7] = 1; 971 fc_host_supported_fc4s(host)[7] = 1;
972 972
973 strncpy(symname, bfad->bfa_fcs.fabric.bport.port_cfg.sym_name.symname, 973 strncpy(symname, bfad->bfa_fcs.fabric.bport.port_cfg.sym_name.symname,
974 BFA_SYMNAME_MAXLEN); 974 BFA_SYMNAME_MAXLEN);
975 sprintf(fc_host_symbolic_name(host), "%s", symname); 975 sprintf(fc_host_symbolic_name(host), "%s", symname);
976 976
977 fc_host_supported_speeds(host) = bfad_im_supported_speeds(&bfad->bfa); 977 fc_host_supported_speeds(host) = bfad_im_supported_speeds(&bfad->bfa);
978 fc_host_maxframe_size(host) = fcport->cfg.maxfrsize; 978 fc_host_maxframe_size(host) = fcport->cfg.maxfrsize;
979 } 979 }
980 980
981 static void 981 static void
982 bfad_im_fc_rport_add(struct bfad_im_port_s *im_port, struct bfad_itnim_s *itnim) 982 bfad_im_fc_rport_add(struct bfad_im_port_s *im_port, struct bfad_itnim_s *itnim)
983 { 983 {
984 struct fc_rport_identifiers rport_ids; 984 struct fc_rport_identifiers rport_ids;
985 struct fc_rport *fc_rport; 985 struct fc_rport *fc_rport;
986 struct bfad_itnim_data_s *itnim_data; 986 struct bfad_itnim_data_s *itnim_data;
987 987
988 rport_ids.node_name = 988 rport_ids.node_name =
989 cpu_to_be64(bfa_fcs_itnim_get_nwwn(&itnim->fcs_itnim)); 989 cpu_to_be64(bfa_fcs_itnim_get_nwwn(&itnim->fcs_itnim));
990 rport_ids.port_name = 990 rport_ids.port_name =
991 cpu_to_be64(bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim)); 991 cpu_to_be64(bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim));
992 rport_ids.port_id = 992 rport_ids.port_id =
993 bfa_hton3b(bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim)); 993 bfa_hton3b(bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim));
994 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; 994 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
995 995
996 itnim->fc_rport = fc_rport = 996 itnim->fc_rport = fc_rport =
997 fc_remote_port_add(im_port->shost, 0, &rport_ids); 997 fc_remote_port_add(im_port->shost, 0, &rport_ids);
998 998
999 if (!fc_rport) 999 if (!fc_rport)
1000 return; 1000 return;
1001 1001
1002 fc_rport->maxframe_size = 1002 fc_rport->maxframe_size =
1003 bfa_fcs_itnim_get_maxfrsize(&itnim->fcs_itnim); 1003 bfa_fcs_itnim_get_maxfrsize(&itnim->fcs_itnim);
1004 fc_rport->supported_classes = bfa_fcs_itnim_get_cos(&itnim->fcs_itnim); 1004 fc_rport->supported_classes = bfa_fcs_itnim_get_cos(&itnim->fcs_itnim);
1005 1005
1006 itnim_data = fc_rport->dd_data; 1006 itnim_data = fc_rport->dd_data;
1007 itnim_data->itnim = itnim; 1007 itnim_data->itnim = itnim;
1008 1008
1009 rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET; 1009 rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
1010 1010
1011 if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN) 1011 if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN)
1012 fc_remote_port_rolechg(fc_rport, rport_ids.roles); 1012 fc_remote_port_rolechg(fc_rport, rport_ids.roles);
1013 1013
1014 if ((fc_rport->scsi_target_id != -1) 1014 if ((fc_rport->scsi_target_id != -1)
1015 && (fc_rport->scsi_target_id < MAX_FCP_TARGET)) 1015 && (fc_rport->scsi_target_id < MAX_FCP_TARGET))
1016 itnim->scsi_tgt_id = fc_rport->scsi_target_id; 1016 itnim->scsi_tgt_id = fc_rport->scsi_target_id;
1017 1017
1018 return; 1018 return;
1019 } 1019 }
1020 1020
1021 /* 1021 /*
1022 * Work queue handler using FC transport service 1022 * Work queue handler using FC transport service
1023 * Context: kernel 1023 * Context: kernel
1024 */ 1024 */
1025 static void 1025 static void
1026 bfad_im_itnim_work_handler(struct work_struct *work) 1026 bfad_im_itnim_work_handler(struct work_struct *work)
1027 { 1027 {
1028 struct bfad_itnim_s *itnim = container_of(work, struct bfad_itnim_s, 1028 struct bfad_itnim_s *itnim = container_of(work, struct bfad_itnim_s,
1029 itnim_work); 1029 itnim_work);
1030 struct bfad_im_s *im = itnim->im; 1030 struct bfad_im_s *im = itnim->im;
1031 struct bfad_s *bfad = im->bfad; 1031 struct bfad_s *bfad = im->bfad;
1032 struct bfad_im_port_s *im_port; 1032 struct bfad_im_port_s *im_port;
1033 unsigned long flags; 1033 unsigned long flags;
1034 struct fc_rport *fc_rport; 1034 struct fc_rport *fc_rport;
1035 wwn_t wwpn; 1035 wwn_t wwpn;
1036 u32 fcid; 1036 u32 fcid;
1037 char wwpn_str[32], fcid_str[16]; 1037 char wwpn_str[32], fcid_str[16];
1038 1038
1039 spin_lock_irqsave(&bfad->bfad_lock, flags); 1039 spin_lock_irqsave(&bfad->bfad_lock, flags);
1040 im_port = itnim->im_port; 1040 im_port = itnim->im_port;
1041 bfa_trc(bfad, itnim->state); 1041 bfa_trc(bfad, itnim->state);
1042 switch (itnim->state) { 1042 switch (itnim->state) {
1043 case ITNIM_STATE_ONLINE: 1043 case ITNIM_STATE_ONLINE:
1044 if (!itnim->fc_rport) { 1044 if (!itnim->fc_rport) {
1045 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1045 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1046 bfad_im_fc_rport_add(im_port, itnim); 1046 bfad_im_fc_rport_add(im_port, itnim);
1047 spin_lock_irqsave(&bfad->bfad_lock, flags); 1047 spin_lock_irqsave(&bfad->bfad_lock, flags);
1048 wwpn = bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim); 1048 wwpn = bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim);
1049 fcid = bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim); 1049 fcid = bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim);
1050 wwn2str(wwpn_str, wwpn); 1050 wwn2str(wwpn_str, wwpn);
1051 fcid2str(fcid_str, fcid); 1051 fcid2str(fcid_str, fcid);
1052 list_add_tail(&itnim->list_entry, 1052 list_add_tail(&itnim->list_entry,
1053 &im_port->itnim_mapped_list); 1053 &im_port->itnim_mapped_list);
1054 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 1054 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
1055 "ITNIM ONLINE Target: %d:0:%d " 1055 "ITNIM ONLINE Target: %d:0:%d "
1056 "FCID: %s WWPN: %s\n", 1056 "FCID: %s WWPN: %s\n",
1057 im_port->shost->host_no, 1057 im_port->shost->host_no,
1058 itnim->scsi_tgt_id, 1058 itnim->scsi_tgt_id,
1059 fcid_str, wwpn_str); 1059 fcid_str, wwpn_str);
1060 } else { 1060 } else {
1061 printk(KERN_WARNING 1061 printk(KERN_WARNING
1062 "%s: itnim %llx is already in online state\n", 1062 "%s: itnim %llx is already in online state\n",
1063 __func__, 1063 __func__,
1064 bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim)); 1064 bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim));
1065 } 1065 }
1066 1066
1067 break; 1067 break;
1068 case ITNIM_STATE_OFFLINE_PENDING: 1068 case ITNIM_STATE_OFFLINE_PENDING:
1069 itnim->state = ITNIM_STATE_OFFLINE; 1069 itnim->state = ITNIM_STATE_OFFLINE;
1070 if (itnim->fc_rport) { 1070 if (itnim->fc_rport) {
1071 fc_rport = itnim->fc_rport; 1071 fc_rport = itnim->fc_rport;
1072 ((struct bfad_itnim_data_s *) 1072 ((struct bfad_itnim_data_s *)
1073 fc_rport->dd_data)->itnim = NULL; 1073 fc_rport->dd_data)->itnim = NULL;
1074 itnim->fc_rport = NULL; 1074 itnim->fc_rport = NULL;
1075 if (!(im_port->port->flags & BFAD_PORT_DELETE)) { 1075 if (!(im_port->port->flags & BFAD_PORT_DELETE)) {
1076 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1076 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1077 fc_rport->dev_loss_tmo = 1077 fc_rport->dev_loss_tmo =
1078 bfa_fcpim_path_tov_get(&bfad->bfa) + 1; 1078 bfa_fcpim_path_tov_get(&bfad->bfa) + 1;
1079 fc_remote_port_delete(fc_rport); 1079 fc_remote_port_delete(fc_rport);
1080 spin_lock_irqsave(&bfad->bfad_lock, flags); 1080 spin_lock_irqsave(&bfad->bfad_lock, flags);
1081 } 1081 }
1082 wwpn = bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim); 1082 wwpn = bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim);
1083 fcid = bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim); 1083 fcid = bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim);
1084 wwn2str(wwpn_str, wwpn); 1084 wwn2str(wwpn_str, wwpn);
1085 fcid2str(fcid_str, fcid); 1085 fcid2str(fcid_str, fcid);
1086 list_del(&itnim->list_entry); 1086 list_del(&itnim->list_entry);
1087 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 1087 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
1088 "ITNIM OFFLINE Target: %d:0:%d " 1088 "ITNIM OFFLINE Target: %d:0:%d "
1089 "FCID: %s WWPN: %s\n", 1089 "FCID: %s WWPN: %s\n",
1090 im_port->shost->host_no, 1090 im_port->shost->host_no,
1091 itnim->scsi_tgt_id, 1091 itnim->scsi_tgt_id,
1092 fcid_str, wwpn_str); 1092 fcid_str, wwpn_str);
1093 } 1093 }
1094 break; 1094 break;
1095 case ITNIM_STATE_FREE: 1095 case ITNIM_STATE_FREE:
1096 if (itnim->fc_rport) { 1096 if (itnim->fc_rport) {
1097 fc_rport = itnim->fc_rport; 1097 fc_rport = itnim->fc_rport;
1098 ((struct bfad_itnim_data_s *) 1098 ((struct bfad_itnim_data_s *)
1099 fc_rport->dd_data)->itnim = NULL; 1099 fc_rport->dd_data)->itnim = NULL;
1100 itnim->fc_rport = NULL; 1100 itnim->fc_rport = NULL;
1101 if (!(im_port->port->flags & BFAD_PORT_DELETE)) { 1101 if (!(im_port->port->flags & BFAD_PORT_DELETE)) {
1102 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1102 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1103 fc_rport->dev_loss_tmo = 1103 fc_rport->dev_loss_tmo =
1104 bfa_fcpim_path_tov_get(&bfad->bfa) + 1; 1104 bfa_fcpim_path_tov_get(&bfad->bfa) + 1;
1105 fc_remote_port_delete(fc_rport); 1105 fc_remote_port_delete(fc_rport);
1106 spin_lock_irqsave(&bfad->bfad_lock, flags); 1106 spin_lock_irqsave(&bfad->bfad_lock, flags);
1107 } 1107 }
1108 list_del(&itnim->list_entry); 1108 list_del(&itnim->list_entry);
1109 } 1109 }
1110 1110
1111 kfree(itnim); 1111 kfree(itnim);
1112 break; 1112 break;
1113 default: 1113 default:
1114 WARN_ON(1); 1114 WARN_ON(1);
1115 break; 1115 break;
1116 } 1116 }
1117 1117
1118 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1118 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1119 } 1119 }
1120 1120
1121 /* 1121 /*
1122 * Scsi_Host template entry, queue a SCSI command to the BFAD. 1122 * Scsi_Host template entry, queue a SCSI command to the BFAD.
1123 */ 1123 */
1124 static int 1124 static int
1125 bfad_im_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) 1125 bfad_im_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
1126 { 1126 {
1127 struct bfad_im_port_s *im_port = 1127 struct bfad_im_port_s *im_port =
1128 (struct bfad_im_port_s *) cmnd->device->host->hostdata[0]; 1128 (struct bfad_im_port_s *) cmnd->device->host->hostdata[0];
1129 struct bfad_s *bfad = im_port->bfad; 1129 struct bfad_s *bfad = im_port->bfad;
1130 struct bfad_itnim_data_s *itnim_data = cmnd->device->hostdata; 1130 struct bfad_itnim_data_s *itnim_data = cmnd->device->hostdata;
1131 struct bfad_itnim_s *itnim; 1131 struct bfad_itnim_s *itnim;
1132 struct bfa_ioim_s *hal_io; 1132 struct bfa_ioim_s *hal_io;
1133 unsigned long flags; 1133 unsigned long flags;
1134 int rc; 1134 int rc;
1135 int sg_cnt = 0; 1135 int sg_cnt = 0;
1136 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); 1136 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
1137 1137
1138 rc = fc_remote_port_chkready(rport); 1138 rc = fc_remote_port_chkready(rport);
1139 if (rc) { 1139 if (rc) {
1140 cmnd->result = rc; 1140 cmnd->result = rc;
1141 done(cmnd); 1141 done(cmnd);
1142 return 0; 1142 return 0;
1143 } 1143 }
1144 1144
1145 sg_cnt = scsi_dma_map(cmnd); 1145 sg_cnt = scsi_dma_map(cmnd);
1146 if (sg_cnt < 0) 1146 if (sg_cnt < 0)
1147 return SCSI_MLQUEUE_HOST_BUSY; 1147 return SCSI_MLQUEUE_HOST_BUSY;
1148 1148
1149 cmnd->scsi_done = done; 1149 cmnd->scsi_done = done;
1150 1150
1151 spin_lock_irqsave(&bfad->bfad_lock, flags); 1151 spin_lock_irqsave(&bfad->bfad_lock, flags);
1152 if (!(bfad->bfad_flags & BFAD_HAL_START_DONE)) { 1152 if (!(bfad->bfad_flags & BFAD_HAL_START_DONE)) {
1153 printk(KERN_WARNING 1153 printk(KERN_WARNING
1154 "bfad%d, queuecommand %p %x failed, BFA stopped\n", 1154 "bfad%d, queuecommand %p %x failed, BFA stopped\n",
1155 bfad->inst_no, cmnd, cmnd->cmnd[0]); 1155 bfad->inst_no, cmnd, cmnd->cmnd[0]);
1156 cmnd->result = ScsiResult(DID_NO_CONNECT, 0); 1156 cmnd->result = ScsiResult(DID_NO_CONNECT, 0);
1157 goto out_fail_cmd; 1157 goto out_fail_cmd;
1158 } 1158 }
1159 1159
1160 1160
1161 itnim = itnim_data->itnim; 1161 itnim = itnim_data->itnim;
1162 if (!itnim) { 1162 if (!itnim) {
1163 cmnd->result = ScsiResult(DID_IMM_RETRY, 0); 1163 cmnd->result = ScsiResult(DID_IMM_RETRY, 0);
1164 goto out_fail_cmd; 1164 goto out_fail_cmd;
1165 } 1165 }
1166 1166
1167 hal_io = bfa_ioim_alloc(&bfad->bfa, (struct bfad_ioim_s *) cmnd, 1167 hal_io = bfa_ioim_alloc(&bfad->bfa, (struct bfad_ioim_s *) cmnd,
1168 itnim->bfa_itnim, sg_cnt); 1168 itnim->bfa_itnim, sg_cnt);
1169 if (!hal_io) { 1169 if (!hal_io) {
1170 printk(KERN_WARNING "hal_io failure\n"); 1170 printk(KERN_WARNING "hal_io failure\n");
1171 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1171 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1172 scsi_dma_unmap(cmnd); 1172 scsi_dma_unmap(cmnd);
1173 return SCSI_MLQUEUE_HOST_BUSY; 1173 return SCSI_MLQUEUE_HOST_BUSY;
1174 } 1174 }
1175 1175
1176 cmnd->host_scribble = (char *)hal_io; 1176 cmnd->host_scribble = (char *)hal_io;
1177 bfa_trc_fp(bfad, hal_io->iotag);
1178 bfa_ioim_start(hal_io); 1177 bfa_ioim_start(hal_io);
1179 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1178 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1180 1179
1181 return 0; 1180 return 0;
1182 1181
1183 out_fail_cmd: 1182 out_fail_cmd:
1184 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1183 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1185 scsi_dma_unmap(cmnd); 1184 scsi_dma_unmap(cmnd);
1186 if (done) 1185 if (done)
1187 done(cmnd); 1186 done(cmnd);
1188 1187
1189 return 0; 1188 return 0;
1190 } 1189 }
1191 1190
1192 static DEF_SCSI_QCMD(bfad_im_queuecommand) 1191 static DEF_SCSI_QCMD(bfad_im_queuecommand)
1193 1192
1194 void 1193 void
1195 bfad_rport_online_wait(struct bfad_s *bfad) 1194 bfad_rport_online_wait(struct bfad_s *bfad)
1196 { 1195 {
1197 int i; 1196 int i;
1198 int rport_delay = 10; 1197 int rport_delay = 10;
1199 1198
1200 for (i = 0; !(bfad->bfad_flags & BFAD_PORT_ONLINE) 1199 for (i = 0; !(bfad->bfad_flags & BFAD_PORT_ONLINE)
1201 && i < bfa_linkup_delay; i++) { 1200 && i < bfa_linkup_delay; i++) {
1202 set_current_state(TASK_UNINTERRUPTIBLE); 1201 set_current_state(TASK_UNINTERRUPTIBLE);
1203 schedule_timeout(HZ); 1202 schedule_timeout(HZ);
1204 } 1203 }
1205 1204
1206 if (bfad->bfad_flags & BFAD_PORT_ONLINE) { 1205 if (bfad->bfad_flags & BFAD_PORT_ONLINE) {
1207 rport_delay = rport_delay < bfa_linkup_delay ? 1206 rport_delay = rport_delay < bfa_linkup_delay ?
1208 rport_delay : bfa_linkup_delay; 1207 rport_delay : bfa_linkup_delay;
1209 for (i = 0; !(bfad->bfad_flags & BFAD_RPORT_ONLINE) 1208 for (i = 0; !(bfad->bfad_flags & BFAD_RPORT_ONLINE)
1210 && i < rport_delay; i++) { 1209 && i < rport_delay; i++) {
1211 set_current_state(TASK_UNINTERRUPTIBLE); 1210 set_current_state(TASK_UNINTERRUPTIBLE);
1212 schedule_timeout(HZ); 1211 schedule_timeout(HZ);
1213 } 1212 }
1214 1213
1215 if (rport_delay > 0 && (bfad->bfad_flags & BFAD_RPORT_ONLINE)) { 1214 if (rport_delay > 0 && (bfad->bfad_flags & BFAD_RPORT_ONLINE)) {
1216 set_current_state(TASK_UNINTERRUPTIBLE); 1215 set_current_state(TASK_UNINTERRUPTIBLE);
1217 schedule_timeout(rport_delay * HZ); 1216 schedule_timeout(rport_delay * HZ);
1218 } 1217 }
1219 } 1218 }
1220 } 1219 }
1221 1220
1222 int 1221 int
1223 bfad_get_linkup_delay(struct bfad_s *bfad) 1222 bfad_get_linkup_delay(struct bfad_s *bfad)
1224 { 1223 {
1225 u8 nwwns = 0; 1224 u8 nwwns = 0;
1226 wwn_t wwns[BFA_PREBOOT_BOOTLUN_MAX]; 1225 wwn_t wwns[BFA_PREBOOT_BOOTLUN_MAX];
1227 int linkup_delay; 1226 int linkup_delay;
1228 1227
1229 /* 1228 /*
1230 * Querying for the boot target port wwns 1229 * Querying for the boot target port wwns
1231 * -- read from boot information in flash. 1230 * -- read from boot information in flash.
1232 * If nwwns > 0 => boot over SAN and set linkup_delay = 30 1231 * If nwwns > 0 => boot over SAN and set linkup_delay = 30
1233 * else => local boot machine set linkup_delay = 0 1232 * else => local boot machine set linkup_delay = 0
1234 */ 1233 */
1235 1234
1236 bfa_iocfc_get_bootwwns(&bfad->bfa, &nwwns, wwns); 1235 bfa_iocfc_get_bootwwns(&bfad->bfa, &nwwns, wwns);
1237 1236
1238 if (nwwns > 0) 1237 if (nwwns > 0)
1239 /* If Boot over SAN set linkup_delay = 30sec */ 1238 /* If Boot over SAN set linkup_delay = 30sec */
1240 linkup_delay = 30; 1239 linkup_delay = 30;
1241 else 1240 else
1242 /* If local boot; no linkup_delay */ 1241 /* If local boot; no linkup_delay */
1243 linkup_delay = 0; 1242 linkup_delay = 0;
1244 1243
1245 return linkup_delay; 1244 return linkup_delay;
1246 } 1245 }
1247 1246