Commit 58b48576966ed0afd3f63ef17480ec12748a7119

Authored by Andrew Vasquez
Committed by James Bottomley
1 parent 3553d343e7

[SCSI] qla2xxx: Correct inadvertent loop state transitions during port-update handling.

Transitioning to a LOOP_UPDATE loop-state could cause the driver
to miss normal link/target processing.  LOOP_UPDATE is a crufty
artifact leftover from at time the driver performed it's own
internal command-queuing.  Safely remove this state.

Signed-off-by: Andrew Vasquez <andrew.vasquez@qlogic.com>
Signed-off-by: Chad Dupuis <chad.dupuis@qlogic.com>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>

Showing 2 changed files with 0 additions and 4 deletions Inline Diff

drivers/scsi/qla2xxx/qla_init.c
1 /* 1 /*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation 3 * Copyright (c) 2003-2011 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
7 #include "qla_def.h" 7 #include "qla_def.h"
8 #include "qla_gbl.h" 8 #include "qla_gbl.h"
9 9
10 #include <linux/delay.h> 10 #include <linux/delay.h>
11 #include <linux/slab.h> 11 #include <linux/slab.h>
12 #include <linux/vmalloc.h> 12 #include <linux/vmalloc.h>
13 13
14 #include "qla_devtbl.h" 14 #include "qla_devtbl.h"
15 15
16 #ifdef CONFIG_SPARC 16 #ifdef CONFIG_SPARC
17 #include <asm/prom.h> 17 #include <asm/prom.h>
18 #endif 18 #endif
19 19
20 /* 20 /*
21 * QLogic ISP2x00 Hardware Support Function Prototypes. 21 * QLogic ISP2x00 Hardware Support Function Prototypes.
22 */ 22 */
23 static int qla2x00_isp_firmware(scsi_qla_host_t *); 23 static int qla2x00_isp_firmware(scsi_qla_host_t *);
24 static int qla2x00_setup_chip(scsi_qla_host_t *); 24 static int qla2x00_setup_chip(scsi_qla_host_t *);
25 static int qla2x00_init_rings(scsi_qla_host_t *); 25 static int qla2x00_init_rings(scsi_qla_host_t *);
26 static int qla2x00_fw_ready(scsi_qla_host_t *); 26 static int qla2x00_fw_ready(scsi_qla_host_t *);
27 static int qla2x00_configure_hba(scsi_qla_host_t *); 27 static int qla2x00_configure_hba(scsi_qla_host_t *);
28 static int qla2x00_configure_loop(scsi_qla_host_t *); 28 static int qla2x00_configure_loop(scsi_qla_host_t *);
29 static int qla2x00_configure_local_loop(scsi_qla_host_t *); 29 static int qla2x00_configure_local_loop(scsi_qla_host_t *);
30 static int qla2x00_configure_fabric(scsi_qla_host_t *); 30 static int qla2x00_configure_fabric(scsi_qla_host_t *);
31 static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *, struct list_head *); 31 static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *, struct list_head *);
32 static int qla2x00_device_resync(scsi_qla_host_t *); 32 static int qla2x00_device_resync(scsi_qla_host_t *);
33 static int qla2x00_fabric_dev_login(scsi_qla_host_t *, fc_port_t *, 33 static int qla2x00_fabric_dev_login(scsi_qla_host_t *, fc_port_t *,
34 uint16_t *); 34 uint16_t *);
35 35
36 static int qla2x00_restart_isp(scsi_qla_host_t *); 36 static int qla2x00_restart_isp(scsi_qla_host_t *);
37 37
38 static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *); 38 static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *);
39 static int qla84xx_init_chip(scsi_qla_host_t *); 39 static int qla84xx_init_chip(scsi_qla_host_t *);
40 static int qla25xx_init_queues(struct qla_hw_data *); 40 static int qla25xx_init_queues(struct qla_hw_data *);
41 41
42 /* SRB Extensions ---------------------------------------------------------- */ 42 /* SRB Extensions ---------------------------------------------------------- */
43 43
44 static void 44 static void
45 qla2x00_ctx_sp_timeout(unsigned long __data) 45 qla2x00_ctx_sp_timeout(unsigned long __data)
46 { 46 {
47 srb_t *sp = (srb_t *)__data; 47 srb_t *sp = (srb_t *)__data;
48 struct srb_ctx *ctx; 48 struct srb_ctx *ctx;
49 struct srb_iocb *iocb; 49 struct srb_iocb *iocb;
50 fc_port_t *fcport = sp->fcport; 50 fc_port_t *fcport = sp->fcport;
51 struct qla_hw_data *ha = fcport->vha->hw; 51 struct qla_hw_data *ha = fcport->vha->hw;
52 struct req_que *req; 52 struct req_que *req;
53 unsigned long flags; 53 unsigned long flags;
54 54
55 spin_lock_irqsave(&ha->hardware_lock, flags); 55 spin_lock_irqsave(&ha->hardware_lock, flags);
56 req = ha->req_q_map[0]; 56 req = ha->req_q_map[0];
57 req->outstanding_cmds[sp->handle] = NULL; 57 req->outstanding_cmds[sp->handle] = NULL;
58 ctx = sp->ctx; 58 ctx = sp->ctx;
59 iocb = ctx->u.iocb_cmd; 59 iocb = ctx->u.iocb_cmd;
60 iocb->timeout(sp); 60 iocb->timeout(sp);
61 iocb->free(sp); 61 iocb->free(sp);
62 spin_unlock_irqrestore(&ha->hardware_lock, flags); 62 spin_unlock_irqrestore(&ha->hardware_lock, flags);
63 } 63 }
64 64
65 static void 65 static void
66 qla2x00_ctx_sp_free(srb_t *sp) 66 qla2x00_ctx_sp_free(srb_t *sp)
67 { 67 {
68 struct srb_ctx *ctx = sp->ctx; 68 struct srb_ctx *ctx = sp->ctx;
69 struct srb_iocb *iocb = ctx->u.iocb_cmd; 69 struct srb_iocb *iocb = ctx->u.iocb_cmd;
70 struct scsi_qla_host *vha = sp->fcport->vha; 70 struct scsi_qla_host *vha = sp->fcport->vha;
71 71
72 del_timer(&iocb->timer); 72 del_timer(&iocb->timer);
73 kfree(iocb); 73 kfree(iocb);
74 kfree(ctx); 74 kfree(ctx);
75 mempool_free(sp, sp->fcport->vha->hw->srb_mempool); 75 mempool_free(sp, sp->fcport->vha->hw->srb_mempool);
76 76
77 QLA_VHA_MARK_NOT_BUSY(vha); 77 QLA_VHA_MARK_NOT_BUSY(vha);
78 } 78 }
79 79
80 inline srb_t * 80 inline srb_t *
81 qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size, 81 qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size,
82 unsigned long tmo) 82 unsigned long tmo)
83 { 83 {
84 srb_t *sp = NULL; 84 srb_t *sp = NULL;
85 struct qla_hw_data *ha = vha->hw; 85 struct qla_hw_data *ha = vha->hw;
86 struct srb_ctx *ctx; 86 struct srb_ctx *ctx;
87 struct srb_iocb *iocb; 87 struct srb_iocb *iocb;
88 uint8_t bail; 88 uint8_t bail;
89 89
90 QLA_VHA_MARK_BUSY(vha, bail); 90 QLA_VHA_MARK_BUSY(vha, bail);
91 if (bail) 91 if (bail)
92 return NULL; 92 return NULL;
93 93
94 sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL); 94 sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
95 if (!sp) 95 if (!sp)
96 goto done; 96 goto done;
97 ctx = kzalloc(size, GFP_KERNEL); 97 ctx = kzalloc(size, GFP_KERNEL);
98 if (!ctx) { 98 if (!ctx) {
99 mempool_free(sp, ha->srb_mempool); 99 mempool_free(sp, ha->srb_mempool);
100 sp = NULL; 100 sp = NULL;
101 goto done; 101 goto done;
102 } 102 }
103 iocb = kzalloc(sizeof(struct srb_iocb), GFP_KERNEL); 103 iocb = kzalloc(sizeof(struct srb_iocb), GFP_KERNEL);
104 if (!iocb) { 104 if (!iocb) {
105 mempool_free(sp, ha->srb_mempool); 105 mempool_free(sp, ha->srb_mempool);
106 sp = NULL; 106 sp = NULL;
107 kfree(ctx); 107 kfree(ctx);
108 goto done; 108 goto done;
109 } 109 }
110 110
111 memset(sp, 0, sizeof(*sp)); 111 memset(sp, 0, sizeof(*sp));
112 sp->fcport = fcport; 112 sp->fcport = fcport;
113 sp->ctx = ctx; 113 sp->ctx = ctx;
114 ctx->u.iocb_cmd = iocb; 114 ctx->u.iocb_cmd = iocb;
115 iocb->free = qla2x00_ctx_sp_free; 115 iocb->free = qla2x00_ctx_sp_free;
116 116
117 init_timer(&iocb->timer); 117 init_timer(&iocb->timer);
118 if (!tmo) 118 if (!tmo)
119 goto done; 119 goto done;
120 iocb->timer.expires = jiffies + tmo * HZ; 120 iocb->timer.expires = jiffies + tmo * HZ;
121 iocb->timer.data = (unsigned long)sp; 121 iocb->timer.data = (unsigned long)sp;
122 iocb->timer.function = qla2x00_ctx_sp_timeout; 122 iocb->timer.function = qla2x00_ctx_sp_timeout;
123 add_timer(&iocb->timer); 123 add_timer(&iocb->timer);
124 done: 124 done:
125 if (!sp) 125 if (!sp)
126 QLA_VHA_MARK_NOT_BUSY(vha); 126 QLA_VHA_MARK_NOT_BUSY(vha);
127 return sp; 127 return sp;
128 } 128 }
129 129
130 /* Asynchronous Login/Logout Routines -------------------------------------- */ 130 /* Asynchronous Login/Logout Routines -------------------------------------- */
131 131
132 static inline unsigned long 132 static inline unsigned long
133 qla2x00_get_async_timeout(struct scsi_qla_host *vha) 133 qla2x00_get_async_timeout(struct scsi_qla_host *vha)
134 { 134 {
135 unsigned long tmo; 135 unsigned long tmo;
136 struct qla_hw_data *ha = vha->hw; 136 struct qla_hw_data *ha = vha->hw;
137 137
138 /* Firmware should use switch negotiated r_a_tov for timeout. */ 138 /* Firmware should use switch negotiated r_a_tov for timeout. */
139 tmo = ha->r_a_tov / 10 * 2; 139 tmo = ha->r_a_tov / 10 * 2;
140 if (!IS_FWI2_CAPABLE(ha)) { 140 if (!IS_FWI2_CAPABLE(ha)) {
141 /* 141 /*
142 * Except for earlier ISPs where the timeout is seeded from the 142 * Except for earlier ISPs where the timeout is seeded from the
143 * initialization control block. 143 * initialization control block.
144 */ 144 */
145 tmo = ha->login_timeout; 145 tmo = ha->login_timeout;
146 } 146 }
147 return tmo; 147 return tmo;
148 } 148 }
149 149
150 static void 150 static void
151 qla2x00_async_iocb_timeout(srb_t *sp) 151 qla2x00_async_iocb_timeout(srb_t *sp)
152 { 152 {
153 fc_port_t *fcport = sp->fcport; 153 fc_port_t *fcport = sp->fcport;
154 struct srb_ctx *ctx = sp->ctx; 154 struct srb_ctx *ctx = sp->ctx;
155 155
156 ql_dbg(ql_dbg_disc, fcport->vha, 0x2071, 156 ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
157 "Async-%s timeout - portid=%02x%02x%02x.\n", 157 "Async-%s timeout - portid=%02x%02x%02x.\n",
158 ctx->name, fcport->d_id.b.domain, fcport->d_id.b.area, 158 ctx->name, fcport->d_id.b.domain, fcport->d_id.b.area,
159 fcport->d_id.b.al_pa); 159 fcport->d_id.b.al_pa);
160 160
161 fcport->flags &= ~FCF_ASYNC_SENT; 161 fcport->flags &= ~FCF_ASYNC_SENT;
162 if (ctx->type == SRB_LOGIN_CMD) { 162 if (ctx->type == SRB_LOGIN_CMD) {
163 struct srb_iocb *lio = ctx->u.iocb_cmd; 163 struct srb_iocb *lio = ctx->u.iocb_cmd;
164 qla2x00_post_async_logout_work(fcport->vha, fcport, NULL); 164 qla2x00_post_async_logout_work(fcport->vha, fcport, NULL);
165 /* Retry as needed. */ 165 /* Retry as needed. */
166 lio->u.logio.data[0] = MBS_COMMAND_ERROR; 166 lio->u.logio.data[0] = MBS_COMMAND_ERROR;
167 lio->u.logio.data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? 167 lio->u.logio.data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
168 QLA_LOGIO_LOGIN_RETRIED : 0; 168 QLA_LOGIO_LOGIN_RETRIED : 0;
169 qla2x00_post_async_login_done_work(fcport->vha, fcport, 169 qla2x00_post_async_login_done_work(fcport->vha, fcport,
170 lio->u.logio.data); 170 lio->u.logio.data);
171 } 171 }
172 } 172 }
173 173
174 static void 174 static void
175 qla2x00_async_login_ctx_done(srb_t *sp) 175 qla2x00_async_login_ctx_done(srb_t *sp)
176 { 176 {
177 struct srb_ctx *ctx = sp->ctx; 177 struct srb_ctx *ctx = sp->ctx;
178 struct srb_iocb *lio = ctx->u.iocb_cmd; 178 struct srb_iocb *lio = ctx->u.iocb_cmd;
179 179
180 qla2x00_post_async_login_done_work(sp->fcport->vha, sp->fcport, 180 qla2x00_post_async_login_done_work(sp->fcport->vha, sp->fcport,
181 lio->u.logio.data); 181 lio->u.logio.data);
182 lio->free(sp); 182 lio->free(sp);
183 } 183 }
184 184
185 int 185 int
186 qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport, 186 qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
187 uint16_t *data) 187 uint16_t *data)
188 { 188 {
189 srb_t *sp; 189 srb_t *sp;
190 struct srb_ctx *ctx; 190 struct srb_ctx *ctx;
191 struct srb_iocb *lio; 191 struct srb_iocb *lio;
192 int rval; 192 int rval;
193 193
194 rval = QLA_FUNCTION_FAILED; 194 rval = QLA_FUNCTION_FAILED;
195 sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx), 195 sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx),
196 qla2x00_get_async_timeout(vha) + 2); 196 qla2x00_get_async_timeout(vha) + 2);
197 if (!sp) 197 if (!sp)
198 goto done; 198 goto done;
199 199
200 ctx = sp->ctx; 200 ctx = sp->ctx;
201 ctx->type = SRB_LOGIN_CMD; 201 ctx->type = SRB_LOGIN_CMD;
202 ctx->name = "login"; 202 ctx->name = "login";
203 lio = ctx->u.iocb_cmd; 203 lio = ctx->u.iocb_cmd;
204 lio->timeout = qla2x00_async_iocb_timeout; 204 lio->timeout = qla2x00_async_iocb_timeout;
205 lio->done = qla2x00_async_login_ctx_done; 205 lio->done = qla2x00_async_login_ctx_done;
206 lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI; 206 lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
207 if (data[1] & QLA_LOGIO_LOGIN_RETRIED) 207 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
208 lio->u.logio.flags |= SRB_LOGIN_RETRIED; 208 lio->u.logio.flags |= SRB_LOGIN_RETRIED;
209 rval = qla2x00_start_sp(sp); 209 rval = qla2x00_start_sp(sp);
210 if (rval != QLA_SUCCESS) 210 if (rval != QLA_SUCCESS)
211 goto done_free_sp; 211 goto done_free_sp;
212 212
213 ql_dbg(ql_dbg_disc, vha, 0x2072, 213 ql_dbg(ql_dbg_disc, vha, 0x2072,
214 "Async-login - loopid=%x portid=%02x%02x%02x retries=%d.\n", 214 "Async-login - loopid=%x portid=%02x%02x%02x retries=%d.\n",
215 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, 215 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
216 fcport->d_id.b.al_pa, fcport->login_retry); 216 fcport->d_id.b.al_pa, fcport->login_retry);
217 return rval; 217 return rval;
218 218
219 done_free_sp: 219 done_free_sp:
220 lio->free(sp); 220 lio->free(sp);
221 done: 221 done:
222 return rval; 222 return rval;
223 } 223 }
224 224
225 static void 225 static void
226 qla2x00_async_logout_ctx_done(srb_t *sp) 226 qla2x00_async_logout_ctx_done(srb_t *sp)
227 { 227 {
228 struct srb_ctx *ctx = sp->ctx; 228 struct srb_ctx *ctx = sp->ctx;
229 struct srb_iocb *lio = ctx->u.iocb_cmd; 229 struct srb_iocb *lio = ctx->u.iocb_cmd;
230 230
231 qla2x00_post_async_logout_done_work(sp->fcport->vha, sp->fcport, 231 qla2x00_post_async_logout_done_work(sp->fcport->vha, sp->fcport,
232 lio->u.logio.data); 232 lio->u.logio.data);
233 lio->free(sp); 233 lio->free(sp);
234 } 234 }
235 235
236 int 236 int
237 qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport) 237 qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
238 { 238 {
239 srb_t *sp; 239 srb_t *sp;
240 struct srb_ctx *ctx; 240 struct srb_ctx *ctx;
241 struct srb_iocb *lio; 241 struct srb_iocb *lio;
242 int rval; 242 int rval;
243 243
244 rval = QLA_FUNCTION_FAILED; 244 rval = QLA_FUNCTION_FAILED;
245 sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx), 245 sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx),
246 qla2x00_get_async_timeout(vha) + 2); 246 qla2x00_get_async_timeout(vha) + 2);
247 if (!sp) 247 if (!sp)
248 goto done; 248 goto done;
249 249
250 ctx = sp->ctx; 250 ctx = sp->ctx;
251 ctx->type = SRB_LOGOUT_CMD; 251 ctx->type = SRB_LOGOUT_CMD;
252 ctx->name = "logout"; 252 ctx->name = "logout";
253 lio = ctx->u.iocb_cmd; 253 lio = ctx->u.iocb_cmd;
254 lio->timeout = qla2x00_async_iocb_timeout; 254 lio->timeout = qla2x00_async_iocb_timeout;
255 lio->done = qla2x00_async_logout_ctx_done; 255 lio->done = qla2x00_async_logout_ctx_done;
256 rval = qla2x00_start_sp(sp); 256 rval = qla2x00_start_sp(sp);
257 if (rval != QLA_SUCCESS) 257 if (rval != QLA_SUCCESS)
258 goto done_free_sp; 258 goto done_free_sp;
259 259
260 ql_dbg(ql_dbg_disc, vha, 0x2070, 260 ql_dbg(ql_dbg_disc, vha, 0x2070,
261 "Async-logout - loop-id=%x portid=%02x%02x%02x.\n", 261 "Async-logout - loop-id=%x portid=%02x%02x%02x.\n",
262 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, 262 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
263 fcport->d_id.b.al_pa); 263 fcport->d_id.b.al_pa);
264 return rval; 264 return rval;
265 265
266 done_free_sp: 266 done_free_sp:
267 lio->free(sp); 267 lio->free(sp);
268 done: 268 done:
269 return rval; 269 return rval;
270 } 270 }
271 271
272 static void 272 static void
273 qla2x00_async_adisc_ctx_done(srb_t *sp) 273 qla2x00_async_adisc_ctx_done(srb_t *sp)
274 { 274 {
275 struct srb_ctx *ctx = sp->ctx; 275 struct srb_ctx *ctx = sp->ctx;
276 struct srb_iocb *lio = ctx->u.iocb_cmd; 276 struct srb_iocb *lio = ctx->u.iocb_cmd;
277 277
278 qla2x00_post_async_adisc_done_work(sp->fcport->vha, sp->fcport, 278 qla2x00_post_async_adisc_done_work(sp->fcport->vha, sp->fcport,
279 lio->u.logio.data); 279 lio->u.logio.data);
280 lio->free(sp); 280 lio->free(sp);
281 } 281 }
282 282
283 int 283 int
284 qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport, 284 qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
285 uint16_t *data) 285 uint16_t *data)
286 { 286 {
287 srb_t *sp; 287 srb_t *sp;
288 struct srb_ctx *ctx; 288 struct srb_ctx *ctx;
289 struct srb_iocb *lio; 289 struct srb_iocb *lio;
290 int rval; 290 int rval;
291 291
292 rval = QLA_FUNCTION_FAILED; 292 rval = QLA_FUNCTION_FAILED;
293 sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx), 293 sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx),
294 qla2x00_get_async_timeout(vha) + 2); 294 qla2x00_get_async_timeout(vha) + 2);
295 if (!sp) 295 if (!sp)
296 goto done; 296 goto done;
297 297
298 ctx = sp->ctx; 298 ctx = sp->ctx;
299 ctx->type = SRB_ADISC_CMD; 299 ctx->type = SRB_ADISC_CMD;
300 ctx->name = "adisc"; 300 ctx->name = "adisc";
301 lio = ctx->u.iocb_cmd; 301 lio = ctx->u.iocb_cmd;
302 lio->timeout = qla2x00_async_iocb_timeout; 302 lio->timeout = qla2x00_async_iocb_timeout;
303 lio->done = qla2x00_async_adisc_ctx_done; 303 lio->done = qla2x00_async_adisc_ctx_done;
304 if (data[1] & QLA_LOGIO_LOGIN_RETRIED) 304 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
305 lio->u.logio.flags |= SRB_LOGIN_RETRIED; 305 lio->u.logio.flags |= SRB_LOGIN_RETRIED;
306 rval = qla2x00_start_sp(sp); 306 rval = qla2x00_start_sp(sp);
307 if (rval != QLA_SUCCESS) 307 if (rval != QLA_SUCCESS)
308 goto done_free_sp; 308 goto done_free_sp;
309 309
310 ql_dbg(ql_dbg_disc, vha, 0x206f, 310 ql_dbg(ql_dbg_disc, vha, 0x206f,
311 "Async-adisc - loopid=%x portid=%02x%02x%02x.\n", 311 "Async-adisc - loopid=%x portid=%02x%02x%02x.\n",
312 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, 312 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
313 fcport->d_id.b.al_pa); 313 fcport->d_id.b.al_pa);
314 return rval; 314 return rval;
315 315
316 done_free_sp: 316 done_free_sp:
317 lio->free(sp); 317 lio->free(sp);
318 done: 318 done:
319 return rval; 319 return rval;
320 } 320 }
321 321
322 static void 322 static void
323 qla2x00_async_tm_cmd_ctx_done(srb_t *sp) 323 qla2x00_async_tm_cmd_ctx_done(srb_t *sp)
324 { 324 {
325 struct srb_ctx *ctx = sp->ctx; 325 struct srb_ctx *ctx = sp->ctx;
326 struct srb_iocb *iocb = (struct srb_iocb *)ctx->u.iocb_cmd; 326 struct srb_iocb *iocb = (struct srb_iocb *)ctx->u.iocb_cmd;
327 327
328 qla2x00_async_tm_cmd_done(sp->fcport->vha, sp->fcport, iocb); 328 qla2x00_async_tm_cmd_done(sp->fcport->vha, sp->fcport, iocb);
329 iocb->free(sp); 329 iocb->free(sp);
330 } 330 }
331 331
332 int 332 int
333 qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun, 333 qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
334 uint32_t tag) 334 uint32_t tag)
335 { 335 {
336 struct scsi_qla_host *vha = fcport->vha; 336 struct scsi_qla_host *vha = fcport->vha;
337 srb_t *sp; 337 srb_t *sp;
338 struct srb_ctx *ctx; 338 struct srb_ctx *ctx;
339 struct srb_iocb *tcf; 339 struct srb_iocb *tcf;
340 int rval; 340 int rval;
341 341
342 rval = QLA_FUNCTION_FAILED; 342 rval = QLA_FUNCTION_FAILED;
343 sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx), 343 sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx),
344 qla2x00_get_async_timeout(vha) + 2); 344 qla2x00_get_async_timeout(vha) + 2);
345 if (!sp) 345 if (!sp)
346 goto done; 346 goto done;
347 347
348 ctx = sp->ctx; 348 ctx = sp->ctx;
349 ctx->type = SRB_TM_CMD; 349 ctx->type = SRB_TM_CMD;
350 ctx->name = "tmf"; 350 ctx->name = "tmf";
351 tcf = ctx->u.iocb_cmd; 351 tcf = ctx->u.iocb_cmd;
352 tcf->u.tmf.flags = flags; 352 tcf->u.tmf.flags = flags;
353 tcf->u.tmf.lun = lun; 353 tcf->u.tmf.lun = lun;
354 tcf->u.tmf.data = tag; 354 tcf->u.tmf.data = tag;
355 tcf->timeout = qla2x00_async_iocb_timeout; 355 tcf->timeout = qla2x00_async_iocb_timeout;
356 tcf->done = qla2x00_async_tm_cmd_ctx_done; 356 tcf->done = qla2x00_async_tm_cmd_ctx_done;
357 357
358 rval = qla2x00_start_sp(sp); 358 rval = qla2x00_start_sp(sp);
359 if (rval != QLA_SUCCESS) 359 if (rval != QLA_SUCCESS)
360 goto done_free_sp; 360 goto done_free_sp;
361 361
362 ql_dbg(ql_dbg_taskm, vha, 0x802f, 362 ql_dbg(ql_dbg_taskm, vha, 0x802f,
363 "Async-tmf loop-id=%x portid=%02x%02x%02x.\n", 363 "Async-tmf loop-id=%x portid=%02x%02x%02x.\n",
364 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, 364 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
365 fcport->d_id.b.al_pa); 365 fcport->d_id.b.al_pa);
366 return rval; 366 return rval;
367 367
368 done_free_sp: 368 done_free_sp:
369 tcf->free(sp); 369 tcf->free(sp);
370 done: 370 done:
371 return rval; 371 return rval;
372 } 372 }
373 373
374 void 374 void
375 qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport, 375 qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport,
376 uint16_t *data) 376 uint16_t *data)
377 { 377 {
378 int rval; 378 int rval;
379 379
380 switch (data[0]) { 380 switch (data[0]) {
381 case MBS_COMMAND_COMPLETE: 381 case MBS_COMMAND_COMPLETE:
382 /* 382 /*
383 * Driver must validate login state - If PRLI not complete, 383 * Driver must validate login state - If PRLI not complete,
384 * force a relogin attempt via implicit LOGO, PLOGI, and PRLI 384 * force a relogin attempt via implicit LOGO, PLOGI, and PRLI
385 * requests. 385 * requests.
386 */ 386 */
387 rval = qla2x00_get_port_database(vha, fcport, 0); 387 rval = qla2x00_get_port_database(vha, fcport, 0);
388 if (rval != QLA_SUCCESS) { 388 if (rval != QLA_SUCCESS) {
389 qla2x00_post_async_logout_work(vha, fcport, NULL); 389 qla2x00_post_async_logout_work(vha, fcport, NULL);
390 qla2x00_post_async_login_work(vha, fcport, NULL); 390 qla2x00_post_async_login_work(vha, fcport, NULL);
391 break; 391 break;
392 } 392 }
393 if (fcport->flags & FCF_FCP2_DEVICE) { 393 if (fcport->flags & FCF_FCP2_DEVICE) {
394 qla2x00_post_async_adisc_work(vha, fcport, data); 394 qla2x00_post_async_adisc_work(vha, fcport, data);
395 break; 395 break;
396 } 396 }
397 qla2x00_update_fcport(vha, fcport); 397 qla2x00_update_fcport(vha, fcport);
398 break; 398 break;
399 case MBS_COMMAND_ERROR: 399 case MBS_COMMAND_ERROR:
400 fcport->flags &= ~FCF_ASYNC_SENT; 400 fcport->flags &= ~FCF_ASYNC_SENT;
401 if (data[1] & QLA_LOGIO_LOGIN_RETRIED) 401 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
402 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 402 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
403 else 403 else
404 qla2x00_mark_device_lost(vha, fcport, 1, 0); 404 qla2x00_mark_device_lost(vha, fcport, 1, 0);
405 break; 405 break;
406 case MBS_PORT_ID_USED: 406 case MBS_PORT_ID_USED:
407 fcport->loop_id = data[1]; 407 fcport->loop_id = data[1];
408 qla2x00_post_async_logout_work(vha, fcport, NULL); 408 qla2x00_post_async_logout_work(vha, fcport, NULL);
409 qla2x00_post_async_login_work(vha, fcport, NULL); 409 qla2x00_post_async_login_work(vha, fcport, NULL);
410 break; 410 break;
411 case MBS_LOOP_ID_USED: 411 case MBS_LOOP_ID_USED:
412 fcport->loop_id++; 412 fcport->loop_id++;
413 rval = qla2x00_find_new_loop_id(vha, fcport); 413 rval = qla2x00_find_new_loop_id(vha, fcport);
414 if (rval != QLA_SUCCESS) { 414 if (rval != QLA_SUCCESS) {
415 fcport->flags &= ~FCF_ASYNC_SENT; 415 fcport->flags &= ~FCF_ASYNC_SENT;
416 qla2x00_mark_device_lost(vha, fcport, 1, 0); 416 qla2x00_mark_device_lost(vha, fcport, 1, 0);
417 break; 417 break;
418 } 418 }
419 qla2x00_post_async_login_work(vha, fcport, NULL); 419 qla2x00_post_async_login_work(vha, fcport, NULL);
420 break; 420 break;
421 } 421 }
422 return; 422 return;
423 } 423 }
424 424
425 void 425 void
426 qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport, 426 qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport,
427 uint16_t *data) 427 uint16_t *data)
428 { 428 {
429 qla2x00_mark_device_lost(vha, fcport, 1, 0); 429 qla2x00_mark_device_lost(vha, fcport, 1, 0);
430 return; 430 return;
431 } 431 }
432 432
433 void 433 void
434 qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport, 434 qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport,
435 uint16_t *data) 435 uint16_t *data)
436 { 436 {
437 if (data[0] == MBS_COMMAND_COMPLETE) { 437 if (data[0] == MBS_COMMAND_COMPLETE) {
438 qla2x00_update_fcport(vha, fcport); 438 qla2x00_update_fcport(vha, fcport);
439 439
440 return; 440 return;
441 } 441 }
442 442
443 /* Retry login. */ 443 /* Retry login. */
444 fcport->flags &= ~FCF_ASYNC_SENT; 444 fcport->flags &= ~FCF_ASYNC_SENT;
445 if (data[1] & QLA_LOGIO_LOGIN_RETRIED) 445 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
446 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 446 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
447 else 447 else
448 qla2x00_mark_device_lost(vha, fcport, 1, 0); 448 qla2x00_mark_device_lost(vha, fcport, 1, 0);
449 449
450 return; 450 return;
451 } 451 }
452 452
453 void 453 void
454 qla2x00_async_tm_cmd_done(struct scsi_qla_host *vha, fc_port_t *fcport, 454 qla2x00_async_tm_cmd_done(struct scsi_qla_host *vha, fc_port_t *fcport,
455 struct srb_iocb *iocb) 455 struct srb_iocb *iocb)
456 { 456 {
457 int rval; 457 int rval;
458 uint32_t flags; 458 uint32_t flags;
459 uint16_t lun; 459 uint16_t lun;
460 460
461 flags = iocb->u.tmf.flags; 461 flags = iocb->u.tmf.flags;
462 lun = (uint16_t)iocb->u.tmf.lun; 462 lun = (uint16_t)iocb->u.tmf.lun;
463 463
464 /* Issue Marker IOCB */ 464 /* Issue Marker IOCB */
465 rval = qla2x00_marker(vha, vha->hw->req_q_map[0], 465 rval = qla2x00_marker(vha, vha->hw->req_q_map[0],
466 vha->hw->rsp_q_map[0], fcport->loop_id, lun, 466 vha->hw->rsp_q_map[0], fcport->loop_id, lun,
467 flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID); 467 flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
468 468
469 if ((rval != QLA_SUCCESS) || iocb->u.tmf.data) { 469 if ((rval != QLA_SUCCESS) || iocb->u.tmf.data) {
470 ql_dbg(ql_dbg_taskm, vha, 0x8030, 470 ql_dbg(ql_dbg_taskm, vha, 0x8030,
471 "TM IOCB failed (%x).\n", rval); 471 "TM IOCB failed (%x).\n", rval);
472 } 472 }
473 473
474 return; 474 return;
475 } 475 }
476 476
477 /****************************************************************************/ 477 /****************************************************************************/
478 /* QLogic ISP2x00 Hardware Support Functions. */ 478 /* QLogic ISP2x00 Hardware Support Functions. */
479 /****************************************************************************/ 479 /****************************************************************************/
480 480
481 /* 481 /*
482 * qla2x00_initialize_adapter 482 * qla2x00_initialize_adapter
483 * Initialize board. 483 * Initialize board.
484 * 484 *
485 * Input: 485 * Input:
486 * ha = adapter block pointer. 486 * ha = adapter block pointer.
487 * 487 *
488 * Returns: 488 * Returns:
489 * 0 = success 489 * 0 = success
490 */ 490 */
491 int 491 int
492 qla2x00_initialize_adapter(scsi_qla_host_t *vha) 492 qla2x00_initialize_adapter(scsi_qla_host_t *vha)
493 { 493 {
494 int rval; 494 int rval;
495 struct qla_hw_data *ha = vha->hw; 495 struct qla_hw_data *ha = vha->hw;
496 struct req_que *req = ha->req_q_map[0]; 496 struct req_que *req = ha->req_q_map[0];
497 497
498 /* Clear adapter flags. */ 498 /* Clear adapter flags. */
499 vha->flags.online = 0; 499 vha->flags.online = 0;
500 ha->flags.chip_reset_done = 0; 500 ha->flags.chip_reset_done = 0;
501 vha->flags.reset_active = 0; 501 vha->flags.reset_active = 0;
502 ha->flags.pci_channel_io_perm_failure = 0; 502 ha->flags.pci_channel_io_perm_failure = 0;
503 ha->flags.eeh_busy = 0; 503 ha->flags.eeh_busy = 0;
504 ha->flags.thermal_supported = 1; 504 ha->flags.thermal_supported = 1;
505 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 505 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
506 atomic_set(&vha->loop_state, LOOP_DOWN); 506 atomic_set(&vha->loop_state, LOOP_DOWN);
507 vha->device_flags = DFLG_NO_CABLE; 507 vha->device_flags = DFLG_NO_CABLE;
508 vha->dpc_flags = 0; 508 vha->dpc_flags = 0;
509 vha->flags.management_server_logged_in = 0; 509 vha->flags.management_server_logged_in = 0;
510 vha->marker_needed = 0; 510 vha->marker_needed = 0;
511 ha->isp_abort_cnt = 0; 511 ha->isp_abort_cnt = 0;
512 ha->beacon_blink_led = 0; 512 ha->beacon_blink_led = 0;
513 513
514 set_bit(0, ha->req_qid_map); 514 set_bit(0, ha->req_qid_map);
515 set_bit(0, ha->rsp_qid_map); 515 set_bit(0, ha->rsp_qid_map);
516 516
517 ql_log(ql_log_info, vha, 0x0040, 517 ql_log(ql_log_info, vha, 0x0040,
518 "Configuring PCI space...\n"); 518 "Configuring PCI space...\n");
519 rval = ha->isp_ops->pci_config(vha); 519 rval = ha->isp_ops->pci_config(vha);
520 if (rval) { 520 if (rval) {
521 ql_log(ql_log_warn, vha, 0x0044, 521 ql_log(ql_log_warn, vha, 0x0044,
522 "Unable to configure PCI space.\n"); 522 "Unable to configure PCI space.\n");
523 return (rval); 523 return (rval);
524 } 524 }
525 525
526 ha->isp_ops->reset_chip(vha); 526 ha->isp_ops->reset_chip(vha);
527 527
528 rval = qla2xxx_get_flash_info(vha); 528 rval = qla2xxx_get_flash_info(vha);
529 if (rval) { 529 if (rval) {
530 ql_log(ql_log_fatal, vha, 0x004f, 530 ql_log(ql_log_fatal, vha, 0x004f,
531 "Unable to validate FLASH data.\n"); 531 "Unable to validate FLASH data.\n");
532 return (rval); 532 return (rval);
533 } 533 }
534 534
535 ha->isp_ops->get_flash_version(vha, req->ring); 535 ha->isp_ops->get_flash_version(vha, req->ring);
536 ql_log(ql_log_info, vha, 0x0061, 536 ql_log(ql_log_info, vha, 0x0061,
537 "Configure NVRAM parameters...\n"); 537 "Configure NVRAM parameters...\n");
538 538
539 ha->isp_ops->nvram_config(vha); 539 ha->isp_ops->nvram_config(vha);
540 540
541 if (ha->flags.disable_serdes) { 541 if (ha->flags.disable_serdes) {
542 /* Mask HBA via NVRAM settings? */ 542 /* Mask HBA via NVRAM settings? */
543 ql_log(ql_log_info, vha, 0x0077, 543 ql_log(ql_log_info, vha, 0x0077,
544 "Masking HBA WWPN " 544 "Masking HBA WWPN "
545 "%02x%02x%02x%02x%02x%02x%02x%02x (via NVRAM).\n", 545 "%02x%02x%02x%02x%02x%02x%02x%02x (via NVRAM).\n",
546 vha->port_name[0], vha->port_name[1], 546 vha->port_name[0], vha->port_name[1],
547 vha->port_name[2], vha->port_name[3], 547 vha->port_name[2], vha->port_name[3],
548 vha->port_name[4], vha->port_name[5], 548 vha->port_name[4], vha->port_name[5],
549 vha->port_name[6], vha->port_name[7]); 549 vha->port_name[6], vha->port_name[7]);
550 return QLA_FUNCTION_FAILED; 550 return QLA_FUNCTION_FAILED;
551 } 551 }
552 552
553 ql_log(ql_log_info, vha, 0x0078, 553 ql_log(ql_log_info, vha, 0x0078,
554 "Verifying loaded RISC code...\n"); 554 "Verifying loaded RISC code...\n");
555 555
556 if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) { 556 if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) {
557 rval = ha->isp_ops->chip_diag(vha); 557 rval = ha->isp_ops->chip_diag(vha);
558 if (rval) 558 if (rval)
559 return (rval); 559 return (rval);
560 rval = qla2x00_setup_chip(vha); 560 rval = qla2x00_setup_chip(vha);
561 if (rval) 561 if (rval)
562 return (rval); 562 return (rval);
563 } 563 }
564 564
565 if (IS_QLA84XX(ha)) { 565 if (IS_QLA84XX(ha)) {
566 ha->cs84xx = qla84xx_get_chip(vha); 566 ha->cs84xx = qla84xx_get_chip(vha);
567 if (!ha->cs84xx) { 567 if (!ha->cs84xx) {
568 ql_log(ql_log_warn, vha, 0x00d0, 568 ql_log(ql_log_warn, vha, 0x00d0,
569 "Unable to configure ISP84XX.\n"); 569 "Unable to configure ISP84XX.\n");
570 return QLA_FUNCTION_FAILED; 570 return QLA_FUNCTION_FAILED;
571 } 571 }
572 } 572 }
573 rval = qla2x00_init_rings(vha); 573 rval = qla2x00_init_rings(vha);
574 ha->flags.chip_reset_done = 1; 574 ha->flags.chip_reset_done = 1;
575 575
576 if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) { 576 if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) {
577 /* Issue verify 84xx FW IOCB to complete 84xx initialization */ 577 /* Issue verify 84xx FW IOCB to complete 84xx initialization */
578 rval = qla84xx_init_chip(vha); 578 rval = qla84xx_init_chip(vha);
579 if (rval != QLA_SUCCESS) { 579 if (rval != QLA_SUCCESS) {
580 ql_log(ql_log_warn, vha, 0x00d4, 580 ql_log(ql_log_warn, vha, 0x00d4,
581 "Unable to initialize ISP84XX.\n"); 581 "Unable to initialize ISP84XX.\n");
582 qla84xx_put_chip(vha); 582 qla84xx_put_chip(vha);
583 } 583 }
584 } 584 }
585 585
586 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)) 586 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
587 qla24xx_read_fcp_prio_cfg(vha); 587 qla24xx_read_fcp_prio_cfg(vha);
588 588
589 return (rval); 589 return (rval);
590 } 590 }
591 591
592 /** 592 /**
593 * qla2100_pci_config() - Setup ISP21xx PCI configuration registers. 593 * qla2100_pci_config() - Setup ISP21xx PCI configuration registers.
594 * @ha: HA context 594 * @ha: HA context
595 * 595 *
596 * Returns 0 on success. 596 * Returns 0 on success.
597 */ 597 */
598 int 598 int
599 qla2100_pci_config(scsi_qla_host_t *vha) 599 qla2100_pci_config(scsi_qla_host_t *vha)
600 { 600 {
601 uint16_t w; 601 uint16_t w;
602 unsigned long flags; 602 unsigned long flags;
603 struct qla_hw_data *ha = vha->hw; 603 struct qla_hw_data *ha = vha->hw;
604 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 604 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
605 605
606 pci_set_master(ha->pdev); 606 pci_set_master(ha->pdev);
607 pci_try_set_mwi(ha->pdev); 607 pci_try_set_mwi(ha->pdev);
608 608
609 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); 609 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
610 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 610 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
611 pci_write_config_word(ha->pdev, PCI_COMMAND, w); 611 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
612 612
613 pci_disable_rom(ha->pdev); 613 pci_disable_rom(ha->pdev);
614 614
615 /* Get PCI bus information. */ 615 /* Get PCI bus information. */
616 spin_lock_irqsave(&ha->hardware_lock, flags); 616 spin_lock_irqsave(&ha->hardware_lock, flags);
617 ha->pci_attr = RD_REG_WORD(&reg->ctrl_status); 617 ha->pci_attr = RD_REG_WORD(&reg->ctrl_status);
618 spin_unlock_irqrestore(&ha->hardware_lock, flags); 618 spin_unlock_irqrestore(&ha->hardware_lock, flags);
619 619
620 return QLA_SUCCESS; 620 return QLA_SUCCESS;
621 } 621 }
622 622
623 /** 623 /**
624 * qla2300_pci_config() - Setup ISP23xx PCI configuration registers. 624 * qla2300_pci_config() - Setup ISP23xx PCI configuration registers.
625 * @ha: HA context 625 * @ha: HA context
626 * 626 *
627 * Returns 0 on success. 627 * Returns 0 on success.
628 */ 628 */
629 int 629 int
630 qla2300_pci_config(scsi_qla_host_t *vha) 630 qla2300_pci_config(scsi_qla_host_t *vha)
631 { 631 {
632 uint16_t w; 632 uint16_t w;
633 unsigned long flags = 0; 633 unsigned long flags = 0;
634 uint32_t cnt; 634 uint32_t cnt;
635 struct qla_hw_data *ha = vha->hw; 635 struct qla_hw_data *ha = vha->hw;
636 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 636 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
637 637
638 pci_set_master(ha->pdev); 638 pci_set_master(ha->pdev);
639 pci_try_set_mwi(ha->pdev); 639 pci_try_set_mwi(ha->pdev);
640 640
641 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); 641 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
642 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 642 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
643 643
644 if (IS_QLA2322(ha) || IS_QLA6322(ha)) 644 if (IS_QLA2322(ha) || IS_QLA6322(ha))
645 w &= ~PCI_COMMAND_INTX_DISABLE; 645 w &= ~PCI_COMMAND_INTX_DISABLE;
646 pci_write_config_word(ha->pdev, PCI_COMMAND, w); 646 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
647 647
648 /* 648 /*
649 * If this is a 2300 card and not 2312, reset the 649 * If this is a 2300 card and not 2312, reset the
650 * COMMAND_INVALIDATE due to a bug in the 2300. Unfortunately, 650 * COMMAND_INVALIDATE due to a bug in the 2300. Unfortunately,
651 * the 2310 also reports itself as a 2300 so we need to get the 651 * the 2310 also reports itself as a 2300 so we need to get the
652 * fb revision level -- a 6 indicates it really is a 2300 and 652 * fb revision level -- a 6 indicates it really is a 2300 and
653 * not a 2310. 653 * not a 2310.
654 */ 654 */
655 if (IS_QLA2300(ha)) { 655 if (IS_QLA2300(ha)) {
656 spin_lock_irqsave(&ha->hardware_lock, flags); 656 spin_lock_irqsave(&ha->hardware_lock, flags);
657 657
658 /* Pause RISC. */ 658 /* Pause RISC. */
659 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC); 659 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
660 for (cnt = 0; cnt < 30000; cnt++) { 660 for (cnt = 0; cnt < 30000; cnt++) {
661 if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) != 0) 661 if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) != 0)
662 break; 662 break;
663 663
664 udelay(10); 664 udelay(10);
665 } 665 }
666 666
667 /* Select FPM registers. */ 667 /* Select FPM registers. */
668 WRT_REG_WORD(&reg->ctrl_status, 0x20); 668 WRT_REG_WORD(&reg->ctrl_status, 0x20);
669 RD_REG_WORD(&reg->ctrl_status); 669 RD_REG_WORD(&reg->ctrl_status);
670 670
671 /* Get the fb rev level */ 671 /* Get the fb rev level */
672 ha->fb_rev = RD_FB_CMD_REG(ha, reg); 672 ha->fb_rev = RD_FB_CMD_REG(ha, reg);
673 673
674 if (ha->fb_rev == FPM_2300) 674 if (ha->fb_rev == FPM_2300)
675 pci_clear_mwi(ha->pdev); 675 pci_clear_mwi(ha->pdev);
676 676
677 /* Deselect FPM registers. */ 677 /* Deselect FPM registers. */
678 WRT_REG_WORD(&reg->ctrl_status, 0x0); 678 WRT_REG_WORD(&reg->ctrl_status, 0x0);
679 RD_REG_WORD(&reg->ctrl_status); 679 RD_REG_WORD(&reg->ctrl_status);
680 680
681 /* Release RISC module. */ 681 /* Release RISC module. */
682 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC); 682 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
683 for (cnt = 0; cnt < 30000; cnt++) { 683 for (cnt = 0; cnt < 30000; cnt++) {
684 if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0) 684 if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0)
685 break; 685 break;
686 686
687 udelay(10); 687 udelay(10);
688 } 688 }
689 689
690 spin_unlock_irqrestore(&ha->hardware_lock, flags); 690 spin_unlock_irqrestore(&ha->hardware_lock, flags);
691 } 691 }
692 692
693 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80); 693 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
694 694
695 pci_disable_rom(ha->pdev); 695 pci_disable_rom(ha->pdev);
696 696
697 /* Get PCI bus information. */ 697 /* Get PCI bus information. */
698 spin_lock_irqsave(&ha->hardware_lock, flags); 698 spin_lock_irqsave(&ha->hardware_lock, flags);
699 ha->pci_attr = RD_REG_WORD(&reg->ctrl_status); 699 ha->pci_attr = RD_REG_WORD(&reg->ctrl_status);
700 spin_unlock_irqrestore(&ha->hardware_lock, flags); 700 spin_unlock_irqrestore(&ha->hardware_lock, flags);
701 701
702 return QLA_SUCCESS; 702 return QLA_SUCCESS;
703 } 703 }
704 704
705 /** 705 /**
706 * qla24xx_pci_config() - Setup ISP24xx PCI configuration registers. 706 * qla24xx_pci_config() - Setup ISP24xx PCI configuration registers.
707 * @ha: HA context 707 * @ha: HA context
708 * 708 *
709 * Returns 0 on success. 709 * Returns 0 on success.
710 */ 710 */
711 int 711 int
712 qla24xx_pci_config(scsi_qla_host_t *vha) 712 qla24xx_pci_config(scsi_qla_host_t *vha)
713 { 713 {
714 uint16_t w; 714 uint16_t w;
715 unsigned long flags = 0; 715 unsigned long flags = 0;
716 struct qla_hw_data *ha = vha->hw; 716 struct qla_hw_data *ha = vha->hw;
717 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 717 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
718 718
719 pci_set_master(ha->pdev); 719 pci_set_master(ha->pdev);
720 pci_try_set_mwi(ha->pdev); 720 pci_try_set_mwi(ha->pdev);
721 721
722 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); 722 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
723 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 723 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
724 w &= ~PCI_COMMAND_INTX_DISABLE; 724 w &= ~PCI_COMMAND_INTX_DISABLE;
725 pci_write_config_word(ha->pdev, PCI_COMMAND, w); 725 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
726 726
727 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80); 727 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
728 728
729 /* PCI-X -- adjust Maximum Memory Read Byte Count (2048). */ 729 /* PCI-X -- adjust Maximum Memory Read Byte Count (2048). */
730 if (pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX)) 730 if (pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX))
731 pcix_set_mmrbc(ha->pdev, 2048); 731 pcix_set_mmrbc(ha->pdev, 2048);
732 732
733 /* PCIe -- adjust Maximum Read Request Size (2048). */ 733 /* PCIe -- adjust Maximum Read Request Size (2048). */
734 if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP)) 734 if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP))
735 pcie_set_readrq(ha->pdev, 2048); 735 pcie_set_readrq(ha->pdev, 2048);
736 736
737 pci_disable_rom(ha->pdev); 737 pci_disable_rom(ha->pdev);
738 738
739 ha->chip_revision = ha->pdev->revision; 739 ha->chip_revision = ha->pdev->revision;
740 740
741 /* Get PCI bus information. */ 741 /* Get PCI bus information. */
742 spin_lock_irqsave(&ha->hardware_lock, flags); 742 spin_lock_irqsave(&ha->hardware_lock, flags);
743 ha->pci_attr = RD_REG_DWORD(&reg->ctrl_status); 743 ha->pci_attr = RD_REG_DWORD(&reg->ctrl_status);
744 spin_unlock_irqrestore(&ha->hardware_lock, flags); 744 spin_unlock_irqrestore(&ha->hardware_lock, flags);
745 745
746 return QLA_SUCCESS; 746 return QLA_SUCCESS;
747 } 747 }
748 748
749 /** 749 /**
750 * qla25xx_pci_config() - Setup ISP25xx PCI configuration registers. 750 * qla25xx_pci_config() - Setup ISP25xx PCI configuration registers.
751 * @ha: HA context 751 * @ha: HA context
752 * 752 *
753 * Returns 0 on success. 753 * Returns 0 on success.
754 */ 754 */
755 int 755 int
756 qla25xx_pci_config(scsi_qla_host_t *vha) 756 qla25xx_pci_config(scsi_qla_host_t *vha)
757 { 757 {
758 uint16_t w; 758 uint16_t w;
759 struct qla_hw_data *ha = vha->hw; 759 struct qla_hw_data *ha = vha->hw;
760 760
761 pci_set_master(ha->pdev); 761 pci_set_master(ha->pdev);
762 pci_try_set_mwi(ha->pdev); 762 pci_try_set_mwi(ha->pdev);
763 763
764 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); 764 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
765 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 765 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
766 w &= ~PCI_COMMAND_INTX_DISABLE; 766 w &= ~PCI_COMMAND_INTX_DISABLE;
767 pci_write_config_word(ha->pdev, PCI_COMMAND, w); 767 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
768 768
769 /* PCIe -- adjust Maximum Read Request Size (2048). */ 769 /* PCIe -- adjust Maximum Read Request Size (2048). */
770 if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP)) 770 if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP))
771 pcie_set_readrq(ha->pdev, 2048); 771 pcie_set_readrq(ha->pdev, 2048);
772 772
773 pci_disable_rom(ha->pdev); 773 pci_disable_rom(ha->pdev);
774 774
775 ha->chip_revision = ha->pdev->revision; 775 ha->chip_revision = ha->pdev->revision;
776 776
777 return QLA_SUCCESS; 777 return QLA_SUCCESS;
778 } 778 }
779 779
780 /** 780 /**
781 * qla2x00_isp_firmware() - Choose firmware image. 781 * qla2x00_isp_firmware() - Choose firmware image.
782 * @ha: HA context 782 * @ha: HA context
783 * 783 *
784 * Returns 0 on success. 784 * Returns 0 on success.
785 */ 785 */
786 static int 786 static int
787 qla2x00_isp_firmware(scsi_qla_host_t *vha) 787 qla2x00_isp_firmware(scsi_qla_host_t *vha)
788 { 788 {
789 int rval; 789 int rval;
790 uint16_t loop_id, topo, sw_cap; 790 uint16_t loop_id, topo, sw_cap;
791 uint8_t domain, area, al_pa; 791 uint8_t domain, area, al_pa;
792 struct qla_hw_data *ha = vha->hw; 792 struct qla_hw_data *ha = vha->hw;
793 793
794 /* Assume loading risc code */ 794 /* Assume loading risc code */
795 rval = QLA_FUNCTION_FAILED; 795 rval = QLA_FUNCTION_FAILED;
796 796
797 if (ha->flags.disable_risc_code_load) { 797 if (ha->flags.disable_risc_code_load) {
798 ql_log(ql_log_info, vha, 0x0079, "RISC CODE NOT loaded.\n"); 798 ql_log(ql_log_info, vha, 0x0079, "RISC CODE NOT loaded.\n");
799 799
800 /* Verify checksum of loaded RISC code. */ 800 /* Verify checksum of loaded RISC code. */
801 rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address); 801 rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address);
802 if (rval == QLA_SUCCESS) { 802 if (rval == QLA_SUCCESS) {
803 /* And, verify we are not in ROM code. */ 803 /* And, verify we are not in ROM code. */
804 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa, 804 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
805 &area, &domain, &topo, &sw_cap); 805 &area, &domain, &topo, &sw_cap);
806 } 806 }
807 } 807 }
808 808
809 if (rval) 809 if (rval)
810 ql_dbg(ql_dbg_init, vha, 0x007a, 810 ql_dbg(ql_dbg_init, vha, 0x007a,
811 "**** Load RISC code ****.\n"); 811 "**** Load RISC code ****.\n");
812 812
813 return (rval); 813 return (rval);
814 } 814 }
815 815
816 /** 816 /**
817 * qla2x00_reset_chip() - Reset ISP chip. 817 * qla2x00_reset_chip() - Reset ISP chip.
818 * @ha: HA context 818 * @ha: HA context
819 * 819 *
820 * Returns 0 on success. 820 * Returns 0 on success.
821 */ 821 */
822 void 822 void
823 qla2x00_reset_chip(scsi_qla_host_t *vha) 823 qla2x00_reset_chip(scsi_qla_host_t *vha)
824 { 824 {
825 unsigned long flags = 0; 825 unsigned long flags = 0;
826 struct qla_hw_data *ha = vha->hw; 826 struct qla_hw_data *ha = vha->hw;
827 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 827 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
828 uint32_t cnt; 828 uint32_t cnt;
829 uint16_t cmd; 829 uint16_t cmd;
830 830
831 if (unlikely(pci_channel_offline(ha->pdev))) 831 if (unlikely(pci_channel_offline(ha->pdev)))
832 return; 832 return;
833 833
834 ha->isp_ops->disable_intrs(ha); 834 ha->isp_ops->disable_intrs(ha);
835 835
836 spin_lock_irqsave(&ha->hardware_lock, flags); 836 spin_lock_irqsave(&ha->hardware_lock, flags);
837 837
838 /* Turn off master enable */ 838 /* Turn off master enable */
839 cmd = 0; 839 cmd = 0;
840 pci_read_config_word(ha->pdev, PCI_COMMAND, &cmd); 840 pci_read_config_word(ha->pdev, PCI_COMMAND, &cmd);
841 cmd &= ~PCI_COMMAND_MASTER; 841 cmd &= ~PCI_COMMAND_MASTER;
842 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd); 842 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
843 843
844 if (!IS_QLA2100(ha)) { 844 if (!IS_QLA2100(ha)) {
845 /* Pause RISC. */ 845 /* Pause RISC. */
846 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC); 846 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
847 if (IS_QLA2200(ha) || IS_QLA2300(ha)) { 847 if (IS_QLA2200(ha) || IS_QLA2300(ha)) {
848 for (cnt = 0; cnt < 30000; cnt++) { 848 for (cnt = 0; cnt < 30000; cnt++) {
849 if ((RD_REG_WORD(&reg->hccr) & 849 if ((RD_REG_WORD(&reg->hccr) &
850 HCCR_RISC_PAUSE) != 0) 850 HCCR_RISC_PAUSE) != 0)
851 break; 851 break;
852 udelay(100); 852 udelay(100);
853 } 853 }
854 } else { 854 } else {
855 RD_REG_WORD(&reg->hccr); /* PCI Posting. */ 855 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
856 udelay(10); 856 udelay(10);
857 } 857 }
858 858
859 /* Select FPM registers. */ 859 /* Select FPM registers. */
860 WRT_REG_WORD(&reg->ctrl_status, 0x20); 860 WRT_REG_WORD(&reg->ctrl_status, 0x20);
861 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */ 861 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
862 862
863 /* FPM Soft Reset. */ 863 /* FPM Soft Reset. */
864 WRT_REG_WORD(&reg->fpm_diag_config, 0x100); 864 WRT_REG_WORD(&reg->fpm_diag_config, 0x100);
865 RD_REG_WORD(&reg->fpm_diag_config); /* PCI Posting. */ 865 RD_REG_WORD(&reg->fpm_diag_config); /* PCI Posting. */
866 866
867 /* Toggle Fpm Reset. */ 867 /* Toggle Fpm Reset. */
868 if (!IS_QLA2200(ha)) { 868 if (!IS_QLA2200(ha)) {
869 WRT_REG_WORD(&reg->fpm_diag_config, 0x0); 869 WRT_REG_WORD(&reg->fpm_diag_config, 0x0);
870 RD_REG_WORD(&reg->fpm_diag_config); /* PCI Posting. */ 870 RD_REG_WORD(&reg->fpm_diag_config); /* PCI Posting. */
871 } 871 }
872 872
873 /* Select frame buffer registers. */ 873 /* Select frame buffer registers. */
874 WRT_REG_WORD(&reg->ctrl_status, 0x10); 874 WRT_REG_WORD(&reg->ctrl_status, 0x10);
875 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */ 875 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
876 876
877 /* Reset frame buffer FIFOs. */ 877 /* Reset frame buffer FIFOs. */
878 if (IS_QLA2200(ha)) { 878 if (IS_QLA2200(ha)) {
879 WRT_FB_CMD_REG(ha, reg, 0xa000); 879 WRT_FB_CMD_REG(ha, reg, 0xa000);
880 RD_FB_CMD_REG(ha, reg); /* PCI Posting. */ 880 RD_FB_CMD_REG(ha, reg); /* PCI Posting. */
881 } else { 881 } else {
882 WRT_FB_CMD_REG(ha, reg, 0x00fc); 882 WRT_FB_CMD_REG(ha, reg, 0x00fc);
883 883
884 /* Read back fb_cmd until zero or 3 seconds max */ 884 /* Read back fb_cmd until zero or 3 seconds max */
885 for (cnt = 0; cnt < 3000; cnt++) { 885 for (cnt = 0; cnt < 3000; cnt++) {
886 if ((RD_FB_CMD_REG(ha, reg) & 0xff) == 0) 886 if ((RD_FB_CMD_REG(ha, reg) & 0xff) == 0)
887 break; 887 break;
888 udelay(100); 888 udelay(100);
889 } 889 }
890 } 890 }
891 891
892 /* Select RISC module registers. */ 892 /* Select RISC module registers. */
893 WRT_REG_WORD(&reg->ctrl_status, 0); 893 WRT_REG_WORD(&reg->ctrl_status, 0);
894 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */ 894 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
895 895
896 /* Reset RISC processor. */ 896 /* Reset RISC processor. */
897 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC); 897 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
898 RD_REG_WORD(&reg->hccr); /* PCI Posting. */ 898 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
899 899
900 /* Release RISC processor. */ 900 /* Release RISC processor. */
901 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC); 901 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
902 RD_REG_WORD(&reg->hccr); /* PCI Posting. */ 902 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
903 } 903 }
904 904
905 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT); 905 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
906 WRT_REG_WORD(&reg->hccr, HCCR_CLR_HOST_INT); 906 WRT_REG_WORD(&reg->hccr, HCCR_CLR_HOST_INT);
907 907
908 /* Reset ISP chip. */ 908 /* Reset ISP chip. */
909 WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET); 909 WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
910 910
911 /* Wait for RISC to recover from reset. */ 911 /* Wait for RISC to recover from reset. */
912 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { 912 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
913 /* 913 /*
914 * It is necessary to for a delay here since the card doesn't 914 * It is necessary to for a delay here since the card doesn't
915 * respond to PCI reads during a reset. On some architectures 915 * respond to PCI reads during a reset. On some architectures
916 * this will result in an MCA. 916 * this will result in an MCA.
917 */ 917 */
918 udelay(20); 918 udelay(20);
919 for (cnt = 30000; cnt; cnt--) { 919 for (cnt = 30000; cnt; cnt--) {
920 if ((RD_REG_WORD(&reg->ctrl_status) & 920 if ((RD_REG_WORD(&reg->ctrl_status) &
921 CSR_ISP_SOFT_RESET) == 0) 921 CSR_ISP_SOFT_RESET) == 0)
922 break; 922 break;
923 udelay(100); 923 udelay(100);
924 } 924 }
925 } else 925 } else
926 udelay(10); 926 udelay(10);
927 927
928 /* Reset RISC processor. */ 928 /* Reset RISC processor. */
929 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC); 929 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
930 930
931 WRT_REG_WORD(&reg->semaphore, 0); 931 WRT_REG_WORD(&reg->semaphore, 0);
932 932
933 /* Release RISC processor. */ 933 /* Release RISC processor. */
934 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC); 934 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
935 RD_REG_WORD(&reg->hccr); /* PCI Posting. */ 935 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
936 936
937 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { 937 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
938 for (cnt = 0; cnt < 30000; cnt++) { 938 for (cnt = 0; cnt < 30000; cnt++) {
939 if (RD_MAILBOX_REG(ha, reg, 0) != MBS_BUSY) 939 if (RD_MAILBOX_REG(ha, reg, 0) != MBS_BUSY)
940 break; 940 break;
941 941
942 udelay(100); 942 udelay(100);
943 } 943 }
944 } else 944 } else
945 udelay(100); 945 udelay(100);
946 946
947 /* Turn on master enable */ 947 /* Turn on master enable */
948 cmd |= PCI_COMMAND_MASTER; 948 cmd |= PCI_COMMAND_MASTER;
949 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd); 949 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
950 950
951 /* Disable RISC pause on FPM parity error. */ 951 /* Disable RISC pause on FPM parity error. */
952 if (!IS_QLA2100(ha)) { 952 if (!IS_QLA2100(ha)) {
953 WRT_REG_WORD(&reg->hccr, HCCR_DISABLE_PARITY_PAUSE); 953 WRT_REG_WORD(&reg->hccr, HCCR_DISABLE_PARITY_PAUSE);
954 RD_REG_WORD(&reg->hccr); /* PCI Posting. */ 954 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
955 } 955 }
956 956
957 spin_unlock_irqrestore(&ha->hardware_lock, flags); 957 spin_unlock_irqrestore(&ha->hardware_lock, flags);
958 } 958 }
959 959
960 /** 960 /**
961 * qla81xx_reset_mpi() - Reset's MPI FW via Write MPI Register MBC. 961 * qla81xx_reset_mpi() - Reset's MPI FW via Write MPI Register MBC.
962 * 962 *
963 * Returns 0 on success. 963 * Returns 0 on success.
964 */ 964 */
965 int 965 int
966 qla81xx_reset_mpi(scsi_qla_host_t *vha) 966 qla81xx_reset_mpi(scsi_qla_host_t *vha)
967 { 967 {
968 uint16_t mb[4] = {0x1010, 0, 1, 0}; 968 uint16_t mb[4] = {0x1010, 0, 1, 0};
969 969
970 return qla81xx_write_mpi_register(vha, mb); 970 return qla81xx_write_mpi_register(vha, mb);
971 } 971 }
972 972
973 /** 973 /**
974 * qla24xx_reset_risc() - Perform full reset of ISP24xx RISC. 974 * qla24xx_reset_risc() - Perform full reset of ISP24xx RISC.
975 * @ha: HA context 975 * @ha: HA context
976 * 976 *
977 * Returns 0 on success. 977 * Returns 0 on success.
978 */ 978 */
979 static inline void 979 static inline void
980 qla24xx_reset_risc(scsi_qla_host_t *vha) 980 qla24xx_reset_risc(scsi_qla_host_t *vha)
981 { 981 {
982 unsigned long flags = 0; 982 unsigned long flags = 0;
983 struct qla_hw_data *ha = vha->hw; 983 struct qla_hw_data *ha = vha->hw;
984 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 984 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
985 uint32_t cnt, d2; 985 uint32_t cnt, d2;
986 uint16_t wd; 986 uint16_t wd;
987 static int abts_cnt; /* ISP abort retry counts */ 987 static int abts_cnt; /* ISP abort retry counts */
988 988
989 spin_lock_irqsave(&ha->hardware_lock, flags); 989 spin_lock_irqsave(&ha->hardware_lock, flags);
990 990
991 /* Reset RISC. */ 991 /* Reset RISC. */
992 WRT_REG_DWORD(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); 992 WRT_REG_DWORD(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
993 for (cnt = 0; cnt < 30000; cnt++) { 993 for (cnt = 0; cnt < 30000; cnt++) {
994 if ((RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0) 994 if ((RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
995 break; 995 break;
996 996
997 udelay(10); 997 udelay(10);
998 } 998 }
999 999
1000 WRT_REG_DWORD(&reg->ctrl_status, 1000 WRT_REG_DWORD(&reg->ctrl_status,
1001 CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); 1001 CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
1002 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd); 1002 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
1003 1003
1004 udelay(100); 1004 udelay(100);
1005 /* Wait for firmware to complete NVRAM accesses. */ 1005 /* Wait for firmware to complete NVRAM accesses. */
1006 d2 = (uint32_t) RD_REG_WORD(&reg->mailbox0); 1006 d2 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
1007 for (cnt = 10000 ; cnt && d2; cnt--) { 1007 for (cnt = 10000 ; cnt && d2; cnt--) {
1008 udelay(5); 1008 udelay(5);
1009 d2 = (uint32_t) RD_REG_WORD(&reg->mailbox0); 1009 d2 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
1010 barrier(); 1010 barrier();
1011 } 1011 }
1012 1012
1013 /* Wait for soft-reset to complete. */ 1013 /* Wait for soft-reset to complete. */
1014 d2 = RD_REG_DWORD(&reg->ctrl_status); 1014 d2 = RD_REG_DWORD(&reg->ctrl_status);
1015 for (cnt = 6000000 ; cnt && (d2 & CSRX_ISP_SOFT_RESET); cnt--) { 1015 for (cnt = 6000000 ; cnt && (d2 & CSRX_ISP_SOFT_RESET); cnt--) {
1016 udelay(5); 1016 udelay(5);
1017 d2 = RD_REG_DWORD(&reg->ctrl_status); 1017 d2 = RD_REG_DWORD(&reg->ctrl_status);
1018 barrier(); 1018 barrier();
1019 } 1019 }
1020 1020
1021 /* If required, do an MPI FW reset now */ 1021 /* If required, do an MPI FW reset now */
1022 if (test_and_clear_bit(MPI_RESET_NEEDED, &vha->dpc_flags)) { 1022 if (test_and_clear_bit(MPI_RESET_NEEDED, &vha->dpc_flags)) {
1023 if (qla81xx_reset_mpi(vha) != QLA_SUCCESS) { 1023 if (qla81xx_reset_mpi(vha) != QLA_SUCCESS) {
1024 if (++abts_cnt < 5) { 1024 if (++abts_cnt < 5) {
1025 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1025 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1026 set_bit(MPI_RESET_NEEDED, &vha->dpc_flags); 1026 set_bit(MPI_RESET_NEEDED, &vha->dpc_flags);
1027 } else { 1027 } else {
1028 /* 1028 /*
1029 * We exhausted the ISP abort retries. We have to 1029 * We exhausted the ISP abort retries. We have to
1030 * set the board offline. 1030 * set the board offline.
1031 */ 1031 */
1032 abts_cnt = 0; 1032 abts_cnt = 0;
1033 vha->flags.online = 0; 1033 vha->flags.online = 0;
1034 } 1034 }
1035 } 1035 }
1036 } 1036 }
1037 1037
1038 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET); 1038 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
1039 RD_REG_DWORD(&reg->hccr); 1039 RD_REG_DWORD(&reg->hccr);
1040 1040
1041 WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE); 1041 WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
1042 RD_REG_DWORD(&reg->hccr); 1042 RD_REG_DWORD(&reg->hccr);
1043 1043
1044 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET); 1044 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
1045 RD_REG_DWORD(&reg->hccr); 1045 RD_REG_DWORD(&reg->hccr);
1046 1046
1047 d2 = (uint32_t) RD_REG_WORD(&reg->mailbox0); 1047 d2 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
1048 for (cnt = 6000000 ; cnt && d2; cnt--) { 1048 for (cnt = 6000000 ; cnt && d2; cnt--) {
1049 udelay(5); 1049 udelay(5);
1050 d2 = (uint32_t) RD_REG_WORD(&reg->mailbox0); 1050 d2 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
1051 barrier(); 1051 barrier();
1052 } 1052 }
1053 1053
1054 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1054 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1055 1055
1056 if (IS_NOPOLLING_TYPE(ha)) 1056 if (IS_NOPOLLING_TYPE(ha))
1057 ha->isp_ops->enable_intrs(ha); 1057 ha->isp_ops->enable_intrs(ha);
1058 } 1058 }
1059 1059
1060 /** 1060 /**
1061 * qla24xx_reset_chip() - Reset ISP24xx chip. 1061 * qla24xx_reset_chip() - Reset ISP24xx chip.
1062 * @ha: HA context 1062 * @ha: HA context
1063 * 1063 *
1064 * Returns 0 on success. 1064 * Returns 0 on success.
1065 */ 1065 */
1066 void 1066 void
1067 qla24xx_reset_chip(scsi_qla_host_t *vha) 1067 qla24xx_reset_chip(scsi_qla_host_t *vha)
1068 { 1068 {
1069 struct qla_hw_data *ha = vha->hw; 1069 struct qla_hw_data *ha = vha->hw;
1070 1070
1071 if (pci_channel_offline(ha->pdev) && 1071 if (pci_channel_offline(ha->pdev) &&
1072 ha->flags.pci_channel_io_perm_failure) { 1072 ha->flags.pci_channel_io_perm_failure) {
1073 return; 1073 return;
1074 } 1074 }
1075 1075
1076 ha->isp_ops->disable_intrs(ha); 1076 ha->isp_ops->disable_intrs(ha);
1077 1077
1078 /* Perform RISC reset. */ 1078 /* Perform RISC reset. */
1079 qla24xx_reset_risc(vha); 1079 qla24xx_reset_risc(vha);
1080 } 1080 }
1081 1081
1082 /** 1082 /**
1083 * qla2x00_chip_diag() - Test chip for proper operation. 1083 * qla2x00_chip_diag() - Test chip for proper operation.
1084 * @ha: HA context 1084 * @ha: HA context
1085 * 1085 *
1086 * Returns 0 on success. 1086 * Returns 0 on success.
1087 */ 1087 */
1088 int 1088 int
1089 qla2x00_chip_diag(scsi_qla_host_t *vha) 1089 qla2x00_chip_diag(scsi_qla_host_t *vha)
1090 { 1090 {
1091 int rval; 1091 int rval;
1092 struct qla_hw_data *ha = vha->hw; 1092 struct qla_hw_data *ha = vha->hw;
1093 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1093 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1094 unsigned long flags = 0; 1094 unsigned long flags = 0;
1095 uint16_t data; 1095 uint16_t data;
1096 uint32_t cnt; 1096 uint32_t cnt;
1097 uint16_t mb[5]; 1097 uint16_t mb[5];
1098 struct req_que *req = ha->req_q_map[0]; 1098 struct req_que *req = ha->req_q_map[0];
1099 1099
1100 /* Assume a failed state */ 1100 /* Assume a failed state */
1101 rval = QLA_FUNCTION_FAILED; 1101 rval = QLA_FUNCTION_FAILED;
1102 1102
1103 ql_dbg(ql_dbg_init, vha, 0x007b, 1103 ql_dbg(ql_dbg_init, vha, 0x007b,
1104 "Testing device at %lx.\n", (u_long)&reg->flash_address); 1104 "Testing device at %lx.\n", (u_long)&reg->flash_address);
1105 1105
1106 spin_lock_irqsave(&ha->hardware_lock, flags); 1106 spin_lock_irqsave(&ha->hardware_lock, flags);
1107 1107
1108 /* Reset ISP chip. */ 1108 /* Reset ISP chip. */
1109 WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET); 1109 WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
1110 1110
1111 /* 1111 /*
1112 * We need to have a delay here since the card will not respond while 1112 * We need to have a delay here since the card will not respond while
1113 * in reset causing an MCA on some architectures. 1113 * in reset causing an MCA on some architectures.
1114 */ 1114 */
1115 udelay(20); 1115 udelay(20);
1116 data = qla2x00_debounce_register(&reg->ctrl_status); 1116 data = qla2x00_debounce_register(&reg->ctrl_status);
1117 for (cnt = 6000000 ; cnt && (data & CSR_ISP_SOFT_RESET); cnt--) { 1117 for (cnt = 6000000 ; cnt && (data & CSR_ISP_SOFT_RESET); cnt--) {
1118 udelay(5); 1118 udelay(5);
1119 data = RD_REG_WORD(&reg->ctrl_status); 1119 data = RD_REG_WORD(&reg->ctrl_status);
1120 barrier(); 1120 barrier();
1121 } 1121 }
1122 1122
1123 if (!cnt) 1123 if (!cnt)
1124 goto chip_diag_failed; 1124 goto chip_diag_failed;
1125 1125
1126 ql_dbg(ql_dbg_init, vha, 0x007c, 1126 ql_dbg(ql_dbg_init, vha, 0x007c,
1127 "Reset register cleared by chip reset.\n"); 1127 "Reset register cleared by chip reset.\n");
1128 1128
1129 /* Reset RISC processor. */ 1129 /* Reset RISC processor. */
1130 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC); 1130 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
1131 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC); 1131 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
1132 1132
1133 /* Workaround for QLA2312 PCI parity error */ 1133 /* Workaround for QLA2312 PCI parity error */
1134 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { 1134 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
1135 data = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 0)); 1135 data = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 0));
1136 for (cnt = 6000000; cnt && (data == MBS_BUSY); cnt--) { 1136 for (cnt = 6000000; cnt && (data == MBS_BUSY); cnt--) {
1137 udelay(5); 1137 udelay(5);
1138 data = RD_MAILBOX_REG(ha, reg, 0); 1138 data = RD_MAILBOX_REG(ha, reg, 0);
1139 barrier(); 1139 barrier();
1140 } 1140 }
1141 } else 1141 } else
1142 udelay(10); 1142 udelay(10);
1143 1143
1144 if (!cnt) 1144 if (!cnt)
1145 goto chip_diag_failed; 1145 goto chip_diag_failed;
1146 1146
1147 /* Check product ID of chip */ 1147 /* Check product ID of chip */
1148 ql_dbg(ql_dbg_init, vha, 0x007d, "Checking product Id of chip.\n"); 1148 ql_dbg(ql_dbg_init, vha, 0x007d, "Checking product Id of chip.\n");
1149 1149
1150 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 1150 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
1151 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 1151 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
1152 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 1152 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
1153 mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4)); 1153 mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4));
1154 if (mb[1] != PROD_ID_1 || (mb[2] != PROD_ID_2 && mb[2] != PROD_ID_2a) || 1154 if (mb[1] != PROD_ID_1 || (mb[2] != PROD_ID_2 && mb[2] != PROD_ID_2a) ||
1155 mb[3] != PROD_ID_3) { 1155 mb[3] != PROD_ID_3) {
1156 ql_log(ql_log_warn, vha, 0x0062, 1156 ql_log(ql_log_warn, vha, 0x0062,
1157 "Wrong product ID = 0x%x,0x%x,0x%x.\n", 1157 "Wrong product ID = 0x%x,0x%x,0x%x.\n",
1158 mb[1], mb[2], mb[3]); 1158 mb[1], mb[2], mb[3]);
1159 1159
1160 goto chip_diag_failed; 1160 goto chip_diag_failed;
1161 } 1161 }
1162 ha->product_id[0] = mb[1]; 1162 ha->product_id[0] = mb[1];
1163 ha->product_id[1] = mb[2]; 1163 ha->product_id[1] = mb[2];
1164 ha->product_id[2] = mb[3]; 1164 ha->product_id[2] = mb[3];
1165 ha->product_id[3] = mb[4]; 1165 ha->product_id[3] = mb[4];
1166 1166
1167 /* Adjust fw RISC transfer size */ 1167 /* Adjust fw RISC transfer size */
1168 if (req->length > 1024) 1168 if (req->length > 1024)
1169 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024; 1169 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024;
1170 else 1170 else
1171 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1171 ha->fw_transfer_size = REQUEST_ENTRY_SIZE *
1172 req->length; 1172 req->length;
1173 1173
1174 if (IS_QLA2200(ha) && 1174 if (IS_QLA2200(ha) &&
1175 RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) { 1175 RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) {
1176 /* Limit firmware transfer size with a 2200A */ 1176 /* Limit firmware transfer size with a 2200A */
1177 ql_dbg(ql_dbg_init, vha, 0x007e, "Found QLA2200A Chip.\n"); 1177 ql_dbg(ql_dbg_init, vha, 0x007e, "Found QLA2200A Chip.\n");
1178 1178
1179 ha->device_type |= DT_ISP2200A; 1179 ha->device_type |= DT_ISP2200A;
1180 ha->fw_transfer_size = 128; 1180 ha->fw_transfer_size = 128;
1181 } 1181 }
1182 1182
1183 /* Wrap Incoming Mailboxes Test. */ 1183 /* Wrap Incoming Mailboxes Test. */
1184 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1184 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1185 1185
1186 ql_dbg(ql_dbg_init, vha, 0x007f, "Checking mailboxes.\n"); 1186 ql_dbg(ql_dbg_init, vha, 0x007f, "Checking mailboxes.\n");
1187 rval = qla2x00_mbx_reg_test(vha); 1187 rval = qla2x00_mbx_reg_test(vha);
1188 if (rval) 1188 if (rval)
1189 ql_log(ql_log_warn, vha, 0x0080, 1189 ql_log(ql_log_warn, vha, 0x0080,
1190 "Failed mailbox send register test.\n"); 1190 "Failed mailbox send register test.\n");
1191 else 1191 else
1192 /* Flag a successful rval */ 1192 /* Flag a successful rval */
1193 rval = QLA_SUCCESS; 1193 rval = QLA_SUCCESS;
1194 spin_lock_irqsave(&ha->hardware_lock, flags); 1194 spin_lock_irqsave(&ha->hardware_lock, flags);
1195 1195
1196 chip_diag_failed: 1196 chip_diag_failed:
1197 if (rval) 1197 if (rval)
1198 ql_log(ql_log_info, vha, 0x0081, 1198 ql_log(ql_log_info, vha, 0x0081,
1199 "Chip diagnostics **** FAILED ****.\n"); 1199 "Chip diagnostics **** FAILED ****.\n");
1200 1200
1201 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1201 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1202 1202
1203 return (rval); 1203 return (rval);
1204 } 1204 }
1205 1205
1206 /** 1206 /**
1207 * qla24xx_chip_diag() - Test ISP24xx for proper operation. 1207 * qla24xx_chip_diag() - Test ISP24xx for proper operation.
1208 * @ha: HA context 1208 * @ha: HA context
1209 * 1209 *
1210 * Returns 0 on success. 1210 * Returns 0 on success.
1211 */ 1211 */
1212 int 1212 int
1213 qla24xx_chip_diag(scsi_qla_host_t *vha) 1213 qla24xx_chip_diag(scsi_qla_host_t *vha)
1214 { 1214 {
1215 int rval; 1215 int rval;
1216 struct qla_hw_data *ha = vha->hw; 1216 struct qla_hw_data *ha = vha->hw;
1217 struct req_que *req = ha->req_q_map[0]; 1217 struct req_que *req = ha->req_q_map[0];
1218 1218
1219 if (IS_QLA82XX(ha)) 1219 if (IS_QLA82XX(ha))
1220 return QLA_SUCCESS; 1220 return QLA_SUCCESS;
1221 1221
1222 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length; 1222 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
1223 1223
1224 rval = qla2x00_mbx_reg_test(vha); 1224 rval = qla2x00_mbx_reg_test(vha);
1225 if (rval) { 1225 if (rval) {
1226 ql_log(ql_log_warn, vha, 0x0082, 1226 ql_log(ql_log_warn, vha, 0x0082,
1227 "Failed mailbox send register test.\n"); 1227 "Failed mailbox send register test.\n");
1228 } else { 1228 } else {
1229 /* Flag a successful rval */ 1229 /* Flag a successful rval */
1230 rval = QLA_SUCCESS; 1230 rval = QLA_SUCCESS;
1231 } 1231 }
1232 1232
1233 return rval; 1233 return rval;
1234 } 1234 }
1235 1235
1236 void 1236 void
1237 qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) 1237 qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
1238 { 1238 {
1239 int rval; 1239 int rval;
1240 uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size, 1240 uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
1241 eft_size, fce_size, mq_size; 1241 eft_size, fce_size, mq_size;
1242 dma_addr_t tc_dma; 1242 dma_addr_t tc_dma;
1243 void *tc; 1243 void *tc;
1244 struct qla_hw_data *ha = vha->hw; 1244 struct qla_hw_data *ha = vha->hw;
1245 struct req_que *req = ha->req_q_map[0]; 1245 struct req_que *req = ha->req_q_map[0];
1246 struct rsp_que *rsp = ha->rsp_q_map[0]; 1246 struct rsp_que *rsp = ha->rsp_q_map[0];
1247 1247
1248 if (ha->fw_dump) { 1248 if (ha->fw_dump) {
1249 ql_dbg(ql_dbg_init, vha, 0x00bd, 1249 ql_dbg(ql_dbg_init, vha, 0x00bd,
1250 "Firmware dump already allocated.\n"); 1250 "Firmware dump already allocated.\n");
1251 return; 1251 return;
1252 } 1252 }
1253 1253
1254 ha->fw_dumped = 0; 1254 ha->fw_dumped = 0;
1255 fixed_size = mem_size = eft_size = fce_size = mq_size = 0; 1255 fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
1256 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 1256 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
1257 fixed_size = sizeof(struct qla2100_fw_dump); 1257 fixed_size = sizeof(struct qla2100_fw_dump);
1258 } else if (IS_QLA23XX(ha)) { 1258 } else if (IS_QLA23XX(ha)) {
1259 fixed_size = offsetof(struct qla2300_fw_dump, data_ram); 1259 fixed_size = offsetof(struct qla2300_fw_dump, data_ram);
1260 mem_size = (ha->fw_memory_size - 0x11000 + 1) * 1260 mem_size = (ha->fw_memory_size - 0x11000 + 1) *
1261 sizeof(uint16_t); 1261 sizeof(uint16_t);
1262 } else if (IS_FWI2_CAPABLE(ha)) { 1262 } else if (IS_FWI2_CAPABLE(ha)) {
1263 if (IS_QLA81XX(ha)) 1263 if (IS_QLA81XX(ha))
1264 fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem); 1264 fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
1265 else if (IS_QLA25XX(ha)) 1265 else if (IS_QLA25XX(ha))
1266 fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem); 1266 fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);
1267 else 1267 else
1268 fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem); 1268 fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
1269 mem_size = (ha->fw_memory_size - 0x100000 + 1) * 1269 mem_size = (ha->fw_memory_size - 0x100000 + 1) *
1270 sizeof(uint32_t); 1270 sizeof(uint32_t);
1271 if (ha->mqenable) 1271 if (ha->mqenable)
1272 mq_size = sizeof(struct qla2xxx_mq_chain); 1272 mq_size = sizeof(struct qla2xxx_mq_chain);
1273 /* Allocate memory for Fibre Channel Event Buffer. */ 1273 /* Allocate memory for Fibre Channel Event Buffer. */
1274 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)) 1274 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))
1275 goto try_eft; 1275 goto try_eft;
1276 1276
1277 tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma, 1277 tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
1278 GFP_KERNEL); 1278 GFP_KERNEL);
1279 if (!tc) { 1279 if (!tc) {
1280 ql_log(ql_log_warn, vha, 0x00be, 1280 ql_log(ql_log_warn, vha, 0x00be,
1281 "Unable to allocate (%d KB) for FCE.\n", 1281 "Unable to allocate (%d KB) for FCE.\n",
1282 FCE_SIZE / 1024); 1282 FCE_SIZE / 1024);
1283 goto try_eft; 1283 goto try_eft;
1284 } 1284 }
1285 1285
1286 memset(tc, 0, FCE_SIZE); 1286 memset(tc, 0, FCE_SIZE);
1287 rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS, 1287 rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
1288 ha->fce_mb, &ha->fce_bufs); 1288 ha->fce_mb, &ha->fce_bufs);
1289 if (rval) { 1289 if (rval) {
1290 ql_log(ql_log_warn, vha, 0x00bf, 1290 ql_log(ql_log_warn, vha, 0x00bf,
1291 "Unable to initialize FCE (%d).\n", rval); 1291 "Unable to initialize FCE (%d).\n", rval);
1292 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc, 1292 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc,
1293 tc_dma); 1293 tc_dma);
1294 ha->flags.fce_enabled = 0; 1294 ha->flags.fce_enabled = 0;
1295 goto try_eft; 1295 goto try_eft;
1296 } 1296 }
1297 ql_log(ql_log_info, vha, 0x00c0, 1297 ql_log(ql_log_info, vha, 0x00c0,
1298 "Allocate (%d KB) for FCE...\n", FCE_SIZE / 1024); 1298 "Allocate (%d KB) for FCE...\n", FCE_SIZE / 1024);
1299 1299
1300 fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE; 1300 fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
1301 ha->flags.fce_enabled = 1; 1301 ha->flags.fce_enabled = 1;
1302 ha->fce_dma = tc_dma; 1302 ha->fce_dma = tc_dma;
1303 ha->fce = tc; 1303 ha->fce = tc;
1304 try_eft: 1304 try_eft:
1305 /* Allocate memory for Extended Trace Buffer. */ 1305 /* Allocate memory for Extended Trace Buffer. */
1306 tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma, 1306 tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
1307 GFP_KERNEL); 1307 GFP_KERNEL);
1308 if (!tc) { 1308 if (!tc) {
1309 ql_log(ql_log_warn, vha, 0x00c1, 1309 ql_log(ql_log_warn, vha, 0x00c1,
1310 "Unable to allocate (%d KB) for EFT.\n", 1310 "Unable to allocate (%d KB) for EFT.\n",
1311 EFT_SIZE / 1024); 1311 EFT_SIZE / 1024);
1312 goto cont_alloc; 1312 goto cont_alloc;
1313 } 1313 }
1314 1314
1315 memset(tc, 0, EFT_SIZE); 1315 memset(tc, 0, EFT_SIZE);
1316 rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS); 1316 rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
1317 if (rval) { 1317 if (rval) {
1318 ql_log(ql_log_warn, vha, 0x00c2, 1318 ql_log(ql_log_warn, vha, 0x00c2,
1319 "Unable to initialize EFT (%d).\n", rval); 1319 "Unable to initialize EFT (%d).\n", rval);
1320 dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc, 1320 dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc,
1321 tc_dma); 1321 tc_dma);
1322 goto cont_alloc; 1322 goto cont_alloc;
1323 } 1323 }
1324 ql_log(ql_log_info, vha, 0x00c3, 1324 ql_log(ql_log_info, vha, 0x00c3,
1325 "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024); 1325 "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
1326 1326
1327 eft_size = EFT_SIZE; 1327 eft_size = EFT_SIZE;
1328 ha->eft_dma = tc_dma; 1328 ha->eft_dma = tc_dma;
1329 ha->eft = tc; 1329 ha->eft = tc;
1330 } 1330 }
1331 cont_alloc: 1331 cont_alloc:
1332 req_q_size = req->length * sizeof(request_t); 1332 req_q_size = req->length * sizeof(request_t);
1333 rsp_q_size = rsp->length * sizeof(response_t); 1333 rsp_q_size = rsp->length * sizeof(response_t);
1334 1334
1335 dump_size = offsetof(struct qla2xxx_fw_dump, isp); 1335 dump_size = offsetof(struct qla2xxx_fw_dump, isp);
1336 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + eft_size; 1336 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + eft_size;
1337 ha->chain_offset = dump_size; 1337 ha->chain_offset = dump_size;
1338 dump_size += mq_size + fce_size; 1338 dump_size += mq_size + fce_size;
1339 1339
1340 ha->fw_dump = vmalloc(dump_size); 1340 ha->fw_dump = vmalloc(dump_size);
1341 if (!ha->fw_dump) { 1341 if (!ha->fw_dump) {
1342 ql_log(ql_log_warn, vha, 0x00c4, 1342 ql_log(ql_log_warn, vha, 0x00c4,
1343 "Unable to allocate (%d KB) for firmware dump.\n", 1343 "Unable to allocate (%d KB) for firmware dump.\n",
1344 dump_size / 1024); 1344 dump_size / 1024);
1345 1345
1346 if (ha->fce) { 1346 if (ha->fce) {
1347 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce, 1347 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
1348 ha->fce_dma); 1348 ha->fce_dma);
1349 ha->fce = NULL; 1349 ha->fce = NULL;
1350 ha->fce_dma = 0; 1350 ha->fce_dma = 0;
1351 } 1351 }
1352 1352
1353 if (ha->eft) { 1353 if (ha->eft) {
1354 dma_free_coherent(&ha->pdev->dev, eft_size, ha->eft, 1354 dma_free_coherent(&ha->pdev->dev, eft_size, ha->eft,
1355 ha->eft_dma); 1355 ha->eft_dma);
1356 ha->eft = NULL; 1356 ha->eft = NULL;
1357 ha->eft_dma = 0; 1357 ha->eft_dma = 0;
1358 } 1358 }
1359 return; 1359 return;
1360 } 1360 }
1361 ql_log(ql_log_info, vha, 0x00c5, 1361 ql_log(ql_log_info, vha, 0x00c5,
1362 "Allocated (%d KB) for firmware dump.\n", dump_size / 1024); 1362 "Allocated (%d KB) for firmware dump.\n", dump_size / 1024);
1363 1363
1364 ha->fw_dump_len = dump_size; 1364 ha->fw_dump_len = dump_size;
1365 ha->fw_dump->signature[0] = 'Q'; 1365 ha->fw_dump->signature[0] = 'Q';
1366 ha->fw_dump->signature[1] = 'L'; 1366 ha->fw_dump->signature[1] = 'L';
1367 ha->fw_dump->signature[2] = 'G'; 1367 ha->fw_dump->signature[2] = 'G';
1368 ha->fw_dump->signature[3] = 'C'; 1368 ha->fw_dump->signature[3] = 'C';
1369 ha->fw_dump->version = __constant_htonl(1); 1369 ha->fw_dump->version = __constant_htonl(1);
1370 1370
1371 ha->fw_dump->fixed_size = htonl(fixed_size); 1371 ha->fw_dump->fixed_size = htonl(fixed_size);
1372 ha->fw_dump->mem_size = htonl(mem_size); 1372 ha->fw_dump->mem_size = htonl(mem_size);
1373 ha->fw_dump->req_q_size = htonl(req_q_size); 1373 ha->fw_dump->req_q_size = htonl(req_q_size);
1374 ha->fw_dump->rsp_q_size = htonl(rsp_q_size); 1374 ha->fw_dump->rsp_q_size = htonl(rsp_q_size);
1375 1375
1376 ha->fw_dump->eft_size = htonl(eft_size); 1376 ha->fw_dump->eft_size = htonl(eft_size);
1377 ha->fw_dump->eft_addr_l = htonl(LSD(ha->eft_dma)); 1377 ha->fw_dump->eft_addr_l = htonl(LSD(ha->eft_dma));
1378 ha->fw_dump->eft_addr_h = htonl(MSD(ha->eft_dma)); 1378 ha->fw_dump->eft_addr_h = htonl(MSD(ha->eft_dma));
1379 1379
1380 ha->fw_dump->header_size = 1380 ha->fw_dump->header_size =
1381 htonl(offsetof(struct qla2xxx_fw_dump, isp)); 1381 htonl(offsetof(struct qla2xxx_fw_dump, isp));
1382 } 1382 }
1383 1383
1384 static int 1384 static int
1385 qla81xx_mpi_sync(scsi_qla_host_t *vha) 1385 qla81xx_mpi_sync(scsi_qla_host_t *vha)
1386 { 1386 {
1387 #define MPS_MASK 0xe0 1387 #define MPS_MASK 0xe0
1388 int rval; 1388 int rval;
1389 uint16_t dc; 1389 uint16_t dc;
1390 uint32_t dw; 1390 uint32_t dw;
1391 1391
1392 if (!IS_QLA81XX(vha->hw)) 1392 if (!IS_QLA81XX(vha->hw))
1393 return QLA_SUCCESS; 1393 return QLA_SUCCESS;
1394 1394
1395 rval = qla2x00_write_ram_word(vha, 0x7c00, 1); 1395 rval = qla2x00_write_ram_word(vha, 0x7c00, 1);
1396 if (rval != QLA_SUCCESS) { 1396 if (rval != QLA_SUCCESS) {
1397 ql_log(ql_log_warn, vha, 0x0105, 1397 ql_log(ql_log_warn, vha, 0x0105,
1398 "Unable to acquire semaphore.\n"); 1398 "Unable to acquire semaphore.\n");
1399 goto done; 1399 goto done;
1400 } 1400 }
1401 1401
1402 pci_read_config_word(vha->hw->pdev, 0x54, &dc); 1402 pci_read_config_word(vha->hw->pdev, 0x54, &dc);
1403 rval = qla2x00_read_ram_word(vha, 0x7a15, &dw); 1403 rval = qla2x00_read_ram_word(vha, 0x7a15, &dw);
1404 if (rval != QLA_SUCCESS) { 1404 if (rval != QLA_SUCCESS) {
1405 ql_log(ql_log_warn, vha, 0x0067, "Unable to read sync.\n"); 1405 ql_log(ql_log_warn, vha, 0x0067, "Unable to read sync.\n");
1406 goto done_release; 1406 goto done_release;
1407 } 1407 }
1408 1408
1409 dc &= MPS_MASK; 1409 dc &= MPS_MASK;
1410 if (dc == (dw & MPS_MASK)) 1410 if (dc == (dw & MPS_MASK))
1411 goto done_release; 1411 goto done_release;
1412 1412
1413 dw &= ~MPS_MASK; 1413 dw &= ~MPS_MASK;
1414 dw |= dc; 1414 dw |= dc;
1415 rval = qla2x00_write_ram_word(vha, 0x7a15, dw); 1415 rval = qla2x00_write_ram_word(vha, 0x7a15, dw);
1416 if (rval != QLA_SUCCESS) { 1416 if (rval != QLA_SUCCESS) {
1417 ql_log(ql_log_warn, vha, 0x0114, "Unable to gain sync.\n"); 1417 ql_log(ql_log_warn, vha, 0x0114, "Unable to gain sync.\n");
1418 } 1418 }
1419 1419
1420 done_release: 1420 done_release:
1421 rval = qla2x00_write_ram_word(vha, 0x7c00, 0); 1421 rval = qla2x00_write_ram_word(vha, 0x7c00, 0);
1422 if (rval != QLA_SUCCESS) { 1422 if (rval != QLA_SUCCESS) {
1423 ql_log(ql_log_warn, vha, 0x006d, 1423 ql_log(ql_log_warn, vha, 0x006d,
1424 "Unable to release semaphore.\n"); 1424 "Unable to release semaphore.\n");
1425 } 1425 }
1426 1426
1427 done: 1427 done:
1428 return rval; 1428 return rval;
1429 } 1429 }
1430 1430
1431 /** 1431 /**
1432 * qla2x00_setup_chip() - Load and start RISC firmware. 1432 * qla2x00_setup_chip() - Load and start RISC firmware.
1433 * @ha: HA context 1433 * @ha: HA context
1434 * 1434 *
1435 * Returns 0 on success. 1435 * Returns 0 on success.
1436 */ 1436 */
1437 static int 1437 static int
1438 qla2x00_setup_chip(scsi_qla_host_t *vha) 1438 qla2x00_setup_chip(scsi_qla_host_t *vha)
1439 { 1439 {
1440 int rval; 1440 int rval;
1441 uint32_t srisc_address = 0; 1441 uint32_t srisc_address = 0;
1442 struct qla_hw_data *ha = vha->hw; 1442 struct qla_hw_data *ha = vha->hw;
1443 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1443 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1444 unsigned long flags; 1444 unsigned long flags;
1445 uint16_t fw_major_version; 1445 uint16_t fw_major_version;
1446 1446
1447 if (IS_QLA82XX(ha)) { 1447 if (IS_QLA82XX(ha)) {
1448 rval = ha->isp_ops->load_risc(vha, &srisc_address); 1448 rval = ha->isp_ops->load_risc(vha, &srisc_address);
1449 if (rval == QLA_SUCCESS) { 1449 if (rval == QLA_SUCCESS) {
1450 qla2x00_stop_firmware(vha); 1450 qla2x00_stop_firmware(vha);
1451 goto enable_82xx_npiv; 1451 goto enable_82xx_npiv;
1452 } else 1452 } else
1453 goto failed; 1453 goto failed;
1454 } 1454 }
1455 1455
1456 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) { 1456 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
1457 /* Disable SRAM, Instruction RAM and GP RAM parity. */ 1457 /* Disable SRAM, Instruction RAM and GP RAM parity. */
1458 spin_lock_irqsave(&ha->hardware_lock, flags); 1458 spin_lock_irqsave(&ha->hardware_lock, flags);
1459 WRT_REG_WORD(&reg->hccr, (HCCR_ENABLE_PARITY + 0x0)); 1459 WRT_REG_WORD(&reg->hccr, (HCCR_ENABLE_PARITY + 0x0));
1460 RD_REG_WORD(&reg->hccr); 1460 RD_REG_WORD(&reg->hccr);
1461 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1461 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1462 } 1462 }
1463 1463
1464 qla81xx_mpi_sync(vha); 1464 qla81xx_mpi_sync(vha);
1465 1465
1466 /* Load firmware sequences */ 1466 /* Load firmware sequences */
1467 rval = ha->isp_ops->load_risc(vha, &srisc_address); 1467 rval = ha->isp_ops->load_risc(vha, &srisc_address);
1468 if (rval == QLA_SUCCESS) { 1468 if (rval == QLA_SUCCESS) {
1469 ql_dbg(ql_dbg_init, vha, 0x00c9, 1469 ql_dbg(ql_dbg_init, vha, 0x00c9,
1470 "Verifying Checksum of loaded RISC code.\n"); 1470 "Verifying Checksum of loaded RISC code.\n");
1471 1471
1472 rval = qla2x00_verify_checksum(vha, srisc_address); 1472 rval = qla2x00_verify_checksum(vha, srisc_address);
1473 if (rval == QLA_SUCCESS) { 1473 if (rval == QLA_SUCCESS) {
1474 /* Start firmware execution. */ 1474 /* Start firmware execution. */
1475 ql_dbg(ql_dbg_init, vha, 0x00ca, 1475 ql_dbg(ql_dbg_init, vha, 0x00ca,
1476 "Starting firmware.\n"); 1476 "Starting firmware.\n");
1477 1477
1478 rval = qla2x00_execute_fw(vha, srisc_address); 1478 rval = qla2x00_execute_fw(vha, srisc_address);
1479 /* Retrieve firmware information. */ 1479 /* Retrieve firmware information. */
1480 if (rval == QLA_SUCCESS) { 1480 if (rval == QLA_SUCCESS) {
1481 enable_82xx_npiv: 1481 enable_82xx_npiv:
1482 fw_major_version = ha->fw_major_version; 1482 fw_major_version = ha->fw_major_version;
1483 rval = qla2x00_get_fw_version(vha, 1483 rval = qla2x00_get_fw_version(vha,
1484 &ha->fw_major_version, 1484 &ha->fw_major_version,
1485 &ha->fw_minor_version, 1485 &ha->fw_minor_version,
1486 &ha->fw_subminor_version, 1486 &ha->fw_subminor_version,
1487 &ha->fw_attributes, &ha->fw_memory_size, 1487 &ha->fw_attributes, &ha->fw_memory_size,
1488 ha->mpi_version, &ha->mpi_capabilities, 1488 ha->mpi_version, &ha->mpi_capabilities,
1489 ha->phy_version); 1489 ha->phy_version);
1490 if (rval != QLA_SUCCESS) 1490 if (rval != QLA_SUCCESS)
1491 goto failed; 1491 goto failed;
1492 ha->flags.npiv_supported = 0; 1492 ha->flags.npiv_supported = 0;
1493 if (IS_QLA2XXX_MIDTYPE(ha) && 1493 if (IS_QLA2XXX_MIDTYPE(ha) &&
1494 (ha->fw_attributes & BIT_2)) { 1494 (ha->fw_attributes & BIT_2)) {
1495 ha->flags.npiv_supported = 1; 1495 ha->flags.npiv_supported = 1;
1496 if ((!ha->max_npiv_vports) || 1496 if ((!ha->max_npiv_vports) ||
1497 ((ha->max_npiv_vports + 1) % 1497 ((ha->max_npiv_vports + 1) %
1498 MIN_MULTI_ID_FABRIC)) 1498 MIN_MULTI_ID_FABRIC))
1499 ha->max_npiv_vports = 1499 ha->max_npiv_vports =
1500 MIN_MULTI_ID_FABRIC - 1; 1500 MIN_MULTI_ID_FABRIC - 1;
1501 } 1501 }
1502 qla2x00_get_resource_cnts(vha, NULL, 1502 qla2x00_get_resource_cnts(vha, NULL,
1503 &ha->fw_xcb_count, NULL, NULL, 1503 &ha->fw_xcb_count, NULL, NULL,
1504 &ha->max_npiv_vports, NULL); 1504 &ha->max_npiv_vports, NULL);
1505 1505
1506 if (!fw_major_version && ql2xallocfwdump) { 1506 if (!fw_major_version && ql2xallocfwdump) {
1507 if (!IS_QLA82XX(ha)) 1507 if (!IS_QLA82XX(ha))
1508 qla2x00_alloc_fw_dump(vha); 1508 qla2x00_alloc_fw_dump(vha);
1509 } 1509 }
1510 } 1510 }
1511 } else { 1511 } else {
1512 ql_log(ql_log_fatal, vha, 0x00cd, 1512 ql_log(ql_log_fatal, vha, 0x00cd,
1513 "ISP Firmware failed checksum.\n"); 1513 "ISP Firmware failed checksum.\n");
1514 goto failed; 1514 goto failed;
1515 } 1515 }
1516 } 1516 }
1517 1517
1518 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) { 1518 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
1519 /* Enable proper parity. */ 1519 /* Enable proper parity. */
1520 spin_lock_irqsave(&ha->hardware_lock, flags); 1520 spin_lock_irqsave(&ha->hardware_lock, flags);
1521 if (IS_QLA2300(ha)) 1521 if (IS_QLA2300(ha))
1522 /* SRAM parity */ 1522 /* SRAM parity */
1523 WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x1); 1523 WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x1);
1524 else 1524 else
1525 /* SRAM, Instruction RAM and GP RAM parity */ 1525 /* SRAM, Instruction RAM and GP RAM parity */
1526 WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x7); 1526 WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x7);
1527 RD_REG_WORD(&reg->hccr); 1527 RD_REG_WORD(&reg->hccr);
1528 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1528 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1529 } 1529 }
1530 1530
1531 if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) { 1531 if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) {
1532 uint32_t size; 1532 uint32_t size;
1533 1533
1534 rval = qla81xx_fac_get_sector_size(vha, &size); 1534 rval = qla81xx_fac_get_sector_size(vha, &size);
1535 if (rval == QLA_SUCCESS) { 1535 if (rval == QLA_SUCCESS) {
1536 ha->flags.fac_supported = 1; 1536 ha->flags.fac_supported = 1;
1537 ha->fdt_block_size = size << 2; 1537 ha->fdt_block_size = size << 2;
1538 } else { 1538 } else {
1539 ql_log(ql_log_warn, vha, 0x00ce, 1539 ql_log(ql_log_warn, vha, 0x00ce,
1540 "Unsupported FAC firmware (%d.%02d.%02d).\n", 1540 "Unsupported FAC firmware (%d.%02d.%02d).\n",
1541 ha->fw_major_version, ha->fw_minor_version, 1541 ha->fw_major_version, ha->fw_minor_version,
1542 ha->fw_subminor_version); 1542 ha->fw_subminor_version);
1543 } 1543 }
1544 } 1544 }
1545 failed: 1545 failed:
1546 if (rval) { 1546 if (rval) {
1547 ql_log(ql_log_fatal, vha, 0x00cf, 1547 ql_log(ql_log_fatal, vha, 0x00cf,
1548 "Setup chip ****FAILED****.\n"); 1548 "Setup chip ****FAILED****.\n");
1549 } 1549 }
1550 1550
1551 return (rval); 1551 return (rval);
1552 } 1552 }
1553 1553
1554 /** 1554 /**
1555 * qla2x00_init_response_q_entries() - Initializes response queue entries. 1555 * qla2x00_init_response_q_entries() - Initializes response queue entries.
1556 * @ha: HA context 1556 * @ha: HA context
1557 * 1557 *
1558 * Beginning of request ring has initialization control block already built 1558 * Beginning of request ring has initialization control block already built
1559 * by nvram config routine. 1559 * by nvram config routine.
1560 * 1560 *
1561 * Returns 0 on success. 1561 * Returns 0 on success.
1562 */ 1562 */
1563 void 1563 void
1564 qla2x00_init_response_q_entries(struct rsp_que *rsp) 1564 qla2x00_init_response_q_entries(struct rsp_que *rsp)
1565 { 1565 {
1566 uint16_t cnt; 1566 uint16_t cnt;
1567 response_t *pkt; 1567 response_t *pkt;
1568 1568
1569 rsp->ring_ptr = rsp->ring; 1569 rsp->ring_ptr = rsp->ring;
1570 rsp->ring_index = 0; 1570 rsp->ring_index = 0;
1571 rsp->status_srb = NULL; 1571 rsp->status_srb = NULL;
1572 pkt = rsp->ring_ptr; 1572 pkt = rsp->ring_ptr;
1573 for (cnt = 0; cnt < rsp->length; cnt++) { 1573 for (cnt = 0; cnt < rsp->length; cnt++) {
1574 pkt->signature = RESPONSE_PROCESSED; 1574 pkt->signature = RESPONSE_PROCESSED;
1575 pkt++; 1575 pkt++;
1576 } 1576 }
1577 } 1577 }
1578 1578
1579 /** 1579 /**
1580 * qla2x00_update_fw_options() - Read and process firmware options. 1580 * qla2x00_update_fw_options() - Read and process firmware options.
1581 * @ha: HA context 1581 * @ha: HA context
1582 * 1582 *
1583 * Returns 0 on success. 1583 * Returns 0 on success.
1584 */ 1584 */
1585 void 1585 void
1586 qla2x00_update_fw_options(scsi_qla_host_t *vha) 1586 qla2x00_update_fw_options(scsi_qla_host_t *vha)
1587 { 1587 {
1588 uint16_t swing, emphasis, tx_sens, rx_sens; 1588 uint16_t swing, emphasis, tx_sens, rx_sens;
1589 struct qla_hw_data *ha = vha->hw; 1589 struct qla_hw_data *ha = vha->hw;
1590 1590
1591 memset(ha->fw_options, 0, sizeof(ha->fw_options)); 1591 memset(ha->fw_options, 0, sizeof(ha->fw_options));
1592 qla2x00_get_fw_options(vha, ha->fw_options); 1592 qla2x00_get_fw_options(vha, ha->fw_options);
1593 1593
1594 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 1594 if (IS_QLA2100(ha) || IS_QLA2200(ha))
1595 return; 1595 return;
1596 1596
1597 /* Serial Link options. */ 1597 /* Serial Link options. */
1598 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0115, 1598 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0115,
1599 "Serial link options.\n"); 1599 "Serial link options.\n");
1600 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0109, 1600 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0109,
1601 (uint8_t *)&ha->fw_seriallink_options, 1601 (uint8_t *)&ha->fw_seriallink_options,
1602 sizeof(ha->fw_seriallink_options)); 1602 sizeof(ha->fw_seriallink_options));
1603 1603
1604 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING; 1604 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
1605 if (ha->fw_seriallink_options[3] & BIT_2) { 1605 if (ha->fw_seriallink_options[3] & BIT_2) {
1606 ha->fw_options[1] |= FO1_SET_EMPHASIS_SWING; 1606 ha->fw_options[1] |= FO1_SET_EMPHASIS_SWING;
1607 1607
1608 /* 1G settings */ 1608 /* 1G settings */
1609 swing = ha->fw_seriallink_options[2] & (BIT_2 | BIT_1 | BIT_0); 1609 swing = ha->fw_seriallink_options[2] & (BIT_2 | BIT_1 | BIT_0);
1610 emphasis = (ha->fw_seriallink_options[2] & 1610 emphasis = (ha->fw_seriallink_options[2] &
1611 (BIT_4 | BIT_3)) >> 3; 1611 (BIT_4 | BIT_3)) >> 3;
1612 tx_sens = ha->fw_seriallink_options[0] & 1612 tx_sens = ha->fw_seriallink_options[0] &
1613 (BIT_3 | BIT_2 | BIT_1 | BIT_0); 1613 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
1614 rx_sens = (ha->fw_seriallink_options[0] & 1614 rx_sens = (ha->fw_seriallink_options[0] &
1615 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4; 1615 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
1616 ha->fw_options[10] = (emphasis << 14) | (swing << 8); 1616 ha->fw_options[10] = (emphasis << 14) | (swing << 8);
1617 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) { 1617 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
1618 if (rx_sens == 0x0) 1618 if (rx_sens == 0x0)
1619 rx_sens = 0x3; 1619 rx_sens = 0x3;
1620 ha->fw_options[10] |= (tx_sens << 4) | rx_sens; 1620 ha->fw_options[10] |= (tx_sens << 4) | rx_sens;
1621 } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) 1621 } else if (IS_QLA2322(ha) || IS_QLA6322(ha))
1622 ha->fw_options[10] |= BIT_5 | 1622 ha->fw_options[10] |= BIT_5 |
1623 ((rx_sens & (BIT_1 | BIT_0)) << 2) | 1623 ((rx_sens & (BIT_1 | BIT_0)) << 2) |
1624 (tx_sens & (BIT_1 | BIT_0)); 1624 (tx_sens & (BIT_1 | BIT_0));
1625 1625
1626 /* 2G settings */ 1626 /* 2G settings */
1627 swing = (ha->fw_seriallink_options[2] & 1627 swing = (ha->fw_seriallink_options[2] &
1628 (BIT_7 | BIT_6 | BIT_5)) >> 5; 1628 (BIT_7 | BIT_6 | BIT_5)) >> 5;
1629 emphasis = ha->fw_seriallink_options[3] & (BIT_1 | BIT_0); 1629 emphasis = ha->fw_seriallink_options[3] & (BIT_1 | BIT_0);
1630 tx_sens = ha->fw_seriallink_options[1] & 1630 tx_sens = ha->fw_seriallink_options[1] &
1631 (BIT_3 | BIT_2 | BIT_1 | BIT_0); 1631 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
1632 rx_sens = (ha->fw_seriallink_options[1] & 1632 rx_sens = (ha->fw_seriallink_options[1] &
1633 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4; 1633 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
1634 ha->fw_options[11] = (emphasis << 14) | (swing << 8); 1634 ha->fw_options[11] = (emphasis << 14) | (swing << 8);
1635 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) { 1635 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
1636 if (rx_sens == 0x0) 1636 if (rx_sens == 0x0)
1637 rx_sens = 0x3; 1637 rx_sens = 0x3;
1638 ha->fw_options[11] |= (tx_sens << 4) | rx_sens; 1638 ha->fw_options[11] |= (tx_sens << 4) | rx_sens;
1639 } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) 1639 } else if (IS_QLA2322(ha) || IS_QLA6322(ha))
1640 ha->fw_options[11] |= BIT_5 | 1640 ha->fw_options[11] |= BIT_5 |
1641 ((rx_sens & (BIT_1 | BIT_0)) << 2) | 1641 ((rx_sens & (BIT_1 | BIT_0)) << 2) |
1642 (tx_sens & (BIT_1 | BIT_0)); 1642 (tx_sens & (BIT_1 | BIT_0));
1643 } 1643 }
1644 1644
1645 /* FCP2 options. */ 1645 /* FCP2 options. */
1646 /* Return command IOCBs without waiting for an ABTS to complete. */ 1646 /* Return command IOCBs without waiting for an ABTS to complete. */
1647 ha->fw_options[3] |= BIT_13; 1647 ha->fw_options[3] |= BIT_13;
1648 1648
1649 /* LED scheme. */ 1649 /* LED scheme. */
1650 if (ha->flags.enable_led_scheme) 1650 if (ha->flags.enable_led_scheme)
1651 ha->fw_options[2] |= BIT_12; 1651 ha->fw_options[2] |= BIT_12;
1652 1652
1653 /* Detect ISP6312. */ 1653 /* Detect ISP6312. */
1654 if (IS_QLA6312(ha)) 1654 if (IS_QLA6312(ha))
1655 ha->fw_options[2] |= BIT_13; 1655 ha->fw_options[2] |= BIT_13;
1656 1656
1657 /* Update firmware options. */ 1657 /* Update firmware options. */
1658 qla2x00_set_fw_options(vha, ha->fw_options); 1658 qla2x00_set_fw_options(vha, ha->fw_options);
1659 } 1659 }
1660 1660
1661 void 1661 void
1662 qla24xx_update_fw_options(scsi_qla_host_t *vha) 1662 qla24xx_update_fw_options(scsi_qla_host_t *vha)
1663 { 1663 {
1664 int rval; 1664 int rval;
1665 struct qla_hw_data *ha = vha->hw; 1665 struct qla_hw_data *ha = vha->hw;
1666 1666
1667 if (IS_QLA82XX(ha)) 1667 if (IS_QLA82XX(ha))
1668 return; 1668 return;
1669 1669
1670 /* Update Serial Link options. */ 1670 /* Update Serial Link options. */
1671 if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0) 1671 if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0)
1672 return; 1672 return;
1673 1673
1674 rval = qla2x00_set_serdes_params(vha, 1674 rval = qla2x00_set_serdes_params(vha,
1675 le16_to_cpu(ha->fw_seriallink_options24[1]), 1675 le16_to_cpu(ha->fw_seriallink_options24[1]),
1676 le16_to_cpu(ha->fw_seriallink_options24[2]), 1676 le16_to_cpu(ha->fw_seriallink_options24[2]),
1677 le16_to_cpu(ha->fw_seriallink_options24[3])); 1677 le16_to_cpu(ha->fw_seriallink_options24[3]));
1678 if (rval != QLA_SUCCESS) { 1678 if (rval != QLA_SUCCESS) {
1679 ql_log(ql_log_warn, vha, 0x0104, 1679 ql_log(ql_log_warn, vha, 0x0104,
1680 "Unable to update Serial Link options (%x).\n", rval); 1680 "Unable to update Serial Link options (%x).\n", rval);
1681 } 1681 }
1682 } 1682 }
1683 1683
1684 void 1684 void
1685 qla2x00_config_rings(struct scsi_qla_host *vha) 1685 qla2x00_config_rings(struct scsi_qla_host *vha)
1686 { 1686 {
1687 struct qla_hw_data *ha = vha->hw; 1687 struct qla_hw_data *ha = vha->hw;
1688 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1688 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1689 struct req_que *req = ha->req_q_map[0]; 1689 struct req_que *req = ha->req_q_map[0];
1690 struct rsp_que *rsp = ha->rsp_q_map[0]; 1690 struct rsp_que *rsp = ha->rsp_q_map[0];
1691 1691
1692 /* Setup ring parameters in initialization control block. */ 1692 /* Setup ring parameters in initialization control block. */
1693 ha->init_cb->request_q_outpointer = __constant_cpu_to_le16(0); 1693 ha->init_cb->request_q_outpointer = __constant_cpu_to_le16(0);
1694 ha->init_cb->response_q_inpointer = __constant_cpu_to_le16(0); 1694 ha->init_cb->response_q_inpointer = __constant_cpu_to_le16(0);
1695 ha->init_cb->request_q_length = cpu_to_le16(req->length); 1695 ha->init_cb->request_q_length = cpu_to_le16(req->length);
1696 ha->init_cb->response_q_length = cpu_to_le16(rsp->length); 1696 ha->init_cb->response_q_length = cpu_to_le16(rsp->length);
1697 ha->init_cb->request_q_address[0] = cpu_to_le32(LSD(req->dma)); 1697 ha->init_cb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
1698 ha->init_cb->request_q_address[1] = cpu_to_le32(MSD(req->dma)); 1698 ha->init_cb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
1699 ha->init_cb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma)); 1699 ha->init_cb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
1700 ha->init_cb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma)); 1700 ha->init_cb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
1701 1701
1702 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), 0); 1702 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), 0);
1703 WRT_REG_WORD(ISP_REQ_Q_OUT(ha, reg), 0); 1703 WRT_REG_WORD(ISP_REQ_Q_OUT(ha, reg), 0);
1704 WRT_REG_WORD(ISP_RSP_Q_IN(ha, reg), 0); 1704 WRT_REG_WORD(ISP_RSP_Q_IN(ha, reg), 0);
1705 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), 0); 1705 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), 0);
1706 RD_REG_WORD(ISP_RSP_Q_OUT(ha, reg)); /* PCI Posting. */ 1706 RD_REG_WORD(ISP_RSP_Q_OUT(ha, reg)); /* PCI Posting. */
1707 } 1707 }
1708 1708
1709 void 1709 void
1710 qla24xx_config_rings(struct scsi_qla_host *vha) 1710 qla24xx_config_rings(struct scsi_qla_host *vha)
1711 { 1711 {
1712 struct qla_hw_data *ha = vha->hw; 1712 struct qla_hw_data *ha = vha->hw;
1713 device_reg_t __iomem *reg = ISP_QUE_REG(ha, 0); 1713 device_reg_t __iomem *reg = ISP_QUE_REG(ha, 0);
1714 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp; 1714 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
1715 struct qla_msix_entry *msix; 1715 struct qla_msix_entry *msix;
1716 struct init_cb_24xx *icb; 1716 struct init_cb_24xx *icb;
1717 uint16_t rid = 0; 1717 uint16_t rid = 0;
1718 struct req_que *req = ha->req_q_map[0]; 1718 struct req_que *req = ha->req_q_map[0];
1719 struct rsp_que *rsp = ha->rsp_q_map[0]; 1719 struct rsp_que *rsp = ha->rsp_q_map[0];
1720 1720
1721 /* Setup ring parameters in initialization control block. */ 1721 /* Setup ring parameters in initialization control block. */
1722 icb = (struct init_cb_24xx *)ha->init_cb; 1722 icb = (struct init_cb_24xx *)ha->init_cb;
1723 icb->request_q_outpointer = __constant_cpu_to_le16(0); 1723 icb->request_q_outpointer = __constant_cpu_to_le16(0);
1724 icb->response_q_inpointer = __constant_cpu_to_le16(0); 1724 icb->response_q_inpointer = __constant_cpu_to_le16(0);
1725 icb->request_q_length = cpu_to_le16(req->length); 1725 icb->request_q_length = cpu_to_le16(req->length);
1726 icb->response_q_length = cpu_to_le16(rsp->length); 1726 icb->response_q_length = cpu_to_le16(rsp->length);
1727 icb->request_q_address[0] = cpu_to_le32(LSD(req->dma)); 1727 icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
1728 icb->request_q_address[1] = cpu_to_le32(MSD(req->dma)); 1728 icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
1729 icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma)); 1729 icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
1730 icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma)); 1730 icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
1731 1731
1732 if (ha->mqenable) { 1732 if (ha->mqenable) {
1733 icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS); 1733 icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS);
1734 icb->rid = __constant_cpu_to_le16(rid); 1734 icb->rid = __constant_cpu_to_le16(rid);
1735 if (ha->flags.msix_enabled) { 1735 if (ha->flags.msix_enabled) {
1736 msix = &ha->msix_entries[1]; 1736 msix = &ha->msix_entries[1];
1737 ql_dbg(ql_dbg_init, vha, 0x00fd, 1737 ql_dbg(ql_dbg_init, vha, 0x00fd,
1738 "Registering vector 0x%x for base que.\n", 1738 "Registering vector 0x%x for base que.\n",
1739 msix->entry); 1739 msix->entry);
1740 icb->msix = cpu_to_le16(msix->entry); 1740 icb->msix = cpu_to_le16(msix->entry);
1741 } 1741 }
1742 /* Use alternate PCI bus number */ 1742 /* Use alternate PCI bus number */
1743 if (MSB(rid)) 1743 if (MSB(rid))
1744 icb->firmware_options_2 |= 1744 icb->firmware_options_2 |=
1745 __constant_cpu_to_le32(BIT_19); 1745 __constant_cpu_to_le32(BIT_19);
1746 /* Use alternate PCI devfn */ 1746 /* Use alternate PCI devfn */
1747 if (LSB(rid)) 1747 if (LSB(rid))
1748 icb->firmware_options_2 |= 1748 icb->firmware_options_2 |=
1749 __constant_cpu_to_le32(BIT_18); 1749 __constant_cpu_to_le32(BIT_18);
1750 1750
1751 /* Use Disable MSIX Handshake mode for capable adapters */ 1751 /* Use Disable MSIX Handshake mode for capable adapters */
1752 if (IS_MSIX_NACK_CAPABLE(ha)) { 1752 if (IS_MSIX_NACK_CAPABLE(ha)) {
1753 icb->firmware_options_2 &= 1753 icb->firmware_options_2 &=
1754 __constant_cpu_to_le32(~BIT_22); 1754 __constant_cpu_to_le32(~BIT_22);
1755 ha->flags.disable_msix_handshake = 1; 1755 ha->flags.disable_msix_handshake = 1;
1756 ql_dbg(ql_dbg_init, vha, 0x00fe, 1756 ql_dbg(ql_dbg_init, vha, 0x00fe,
1757 "MSIX Handshake Disable Mode turned on.\n"); 1757 "MSIX Handshake Disable Mode turned on.\n");
1758 } else { 1758 } else {
1759 icb->firmware_options_2 |= 1759 icb->firmware_options_2 |=
1760 __constant_cpu_to_le32(BIT_22); 1760 __constant_cpu_to_le32(BIT_22);
1761 } 1761 }
1762 icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_23); 1762 icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_23);
1763 1763
1764 WRT_REG_DWORD(&reg->isp25mq.req_q_in, 0); 1764 WRT_REG_DWORD(&reg->isp25mq.req_q_in, 0);
1765 WRT_REG_DWORD(&reg->isp25mq.req_q_out, 0); 1765 WRT_REG_DWORD(&reg->isp25mq.req_q_out, 0);
1766 WRT_REG_DWORD(&reg->isp25mq.rsp_q_in, 0); 1766 WRT_REG_DWORD(&reg->isp25mq.rsp_q_in, 0);
1767 WRT_REG_DWORD(&reg->isp25mq.rsp_q_out, 0); 1767 WRT_REG_DWORD(&reg->isp25mq.rsp_q_out, 0);
1768 } else { 1768 } else {
1769 WRT_REG_DWORD(&reg->isp24.req_q_in, 0); 1769 WRT_REG_DWORD(&reg->isp24.req_q_in, 0);
1770 WRT_REG_DWORD(&reg->isp24.req_q_out, 0); 1770 WRT_REG_DWORD(&reg->isp24.req_q_out, 0);
1771 WRT_REG_DWORD(&reg->isp24.rsp_q_in, 0); 1771 WRT_REG_DWORD(&reg->isp24.rsp_q_in, 0);
1772 WRT_REG_DWORD(&reg->isp24.rsp_q_out, 0); 1772 WRT_REG_DWORD(&reg->isp24.rsp_q_out, 0);
1773 } 1773 }
1774 /* PCI posting */ 1774 /* PCI posting */
1775 RD_REG_DWORD(&ioreg->hccr); 1775 RD_REG_DWORD(&ioreg->hccr);
1776 } 1776 }
1777 1777
1778 /** 1778 /**
1779 * qla2x00_init_rings() - Initializes firmware. 1779 * qla2x00_init_rings() - Initializes firmware.
1780 * @ha: HA context 1780 * @ha: HA context
1781 * 1781 *
1782 * Beginning of request ring has initialization control block already built 1782 * Beginning of request ring has initialization control block already built
1783 * by nvram config routine. 1783 * by nvram config routine.
1784 * 1784 *
1785 * Returns 0 on success. 1785 * Returns 0 on success.
1786 */ 1786 */
1787 static int 1787 static int
1788 qla2x00_init_rings(scsi_qla_host_t *vha) 1788 qla2x00_init_rings(scsi_qla_host_t *vha)
1789 { 1789 {
1790 int rval; 1790 int rval;
1791 unsigned long flags = 0; 1791 unsigned long flags = 0;
1792 int cnt, que; 1792 int cnt, que;
1793 struct qla_hw_data *ha = vha->hw; 1793 struct qla_hw_data *ha = vha->hw;
1794 struct req_que *req; 1794 struct req_que *req;
1795 struct rsp_que *rsp; 1795 struct rsp_que *rsp;
1796 struct scsi_qla_host *vp; 1796 struct scsi_qla_host *vp;
1797 struct mid_init_cb_24xx *mid_init_cb = 1797 struct mid_init_cb_24xx *mid_init_cb =
1798 (struct mid_init_cb_24xx *) ha->init_cb; 1798 (struct mid_init_cb_24xx *) ha->init_cb;
1799 1799
1800 spin_lock_irqsave(&ha->hardware_lock, flags); 1800 spin_lock_irqsave(&ha->hardware_lock, flags);
1801 1801
1802 /* Clear outstanding commands array. */ 1802 /* Clear outstanding commands array. */
1803 for (que = 0; que < ha->max_req_queues; que++) { 1803 for (que = 0; que < ha->max_req_queues; que++) {
1804 req = ha->req_q_map[que]; 1804 req = ha->req_q_map[que];
1805 if (!req) 1805 if (!req)
1806 continue; 1806 continue;
1807 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) 1807 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
1808 req->outstanding_cmds[cnt] = NULL; 1808 req->outstanding_cmds[cnt] = NULL;
1809 1809
1810 req->current_outstanding_cmd = 1; 1810 req->current_outstanding_cmd = 1;
1811 1811
1812 /* Initialize firmware. */ 1812 /* Initialize firmware. */
1813 req->ring_ptr = req->ring; 1813 req->ring_ptr = req->ring;
1814 req->ring_index = 0; 1814 req->ring_index = 0;
1815 req->cnt = req->length; 1815 req->cnt = req->length;
1816 } 1816 }
1817 1817
1818 for (que = 0; que < ha->max_rsp_queues; que++) { 1818 for (que = 0; que < ha->max_rsp_queues; que++) {
1819 rsp = ha->rsp_q_map[que]; 1819 rsp = ha->rsp_q_map[que];
1820 if (!rsp) 1820 if (!rsp)
1821 continue; 1821 continue;
1822 /* Initialize response queue entries */ 1822 /* Initialize response queue entries */
1823 qla2x00_init_response_q_entries(rsp); 1823 qla2x00_init_response_q_entries(rsp);
1824 } 1824 }
1825 1825
1826 spin_lock(&ha->vport_slock); 1826 spin_lock(&ha->vport_slock);
1827 /* Clear RSCN queue. */ 1827 /* Clear RSCN queue. */
1828 list_for_each_entry(vp, &ha->vp_list, list) { 1828 list_for_each_entry(vp, &ha->vp_list, list) {
1829 vp->rscn_in_ptr = 0; 1829 vp->rscn_in_ptr = 0;
1830 vp->rscn_out_ptr = 0; 1830 vp->rscn_out_ptr = 0;
1831 } 1831 }
1832 1832
1833 spin_unlock(&ha->vport_slock); 1833 spin_unlock(&ha->vport_slock);
1834 1834
1835 ha->isp_ops->config_rings(vha); 1835 ha->isp_ops->config_rings(vha);
1836 1836
1837 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1837 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1838 1838
1839 /* Update any ISP specific firmware options before initialization. */ 1839 /* Update any ISP specific firmware options before initialization. */
1840 ha->isp_ops->update_fw_options(vha); 1840 ha->isp_ops->update_fw_options(vha);
1841 1841
1842 ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware.\n"); 1842 ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware.\n");
1843 1843
1844 if (ha->flags.npiv_supported) { 1844 if (ha->flags.npiv_supported) {
1845 if (ha->operating_mode == LOOP) 1845 if (ha->operating_mode == LOOP)
1846 ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1; 1846 ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1;
1847 mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports); 1847 mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports);
1848 } 1848 }
1849 1849
1850 if (IS_FWI2_CAPABLE(ha)) { 1850 if (IS_FWI2_CAPABLE(ha)) {
1851 mid_init_cb->options = __constant_cpu_to_le16(BIT_1); 1851 mid_init_cb->options = __constant_cpu_to_le16(BIT_1);
1852 mid_init_cb->init_cb.execution_throttle = 1852 mid_init_cb->init_cb.execution_throttle =
1853 cpu_to_le16(ha->fw_xcb_count); 1853 cpu_to_le16(ha->fw_xcb_count);
1854 } 1854 }
1855 1855
1856 rval = qla2x00_init_firmware(vha, ha->init_cb_size); 1856 rval = qla2x00_init_firmware(vha, ha->init_cb_size);
1857 if (rval) { 1857 if (rval) {
1858 ql_log(ql_log_fatal, vha, 0x00d2, 1858 ql_log(ql_log_fatal, vha, 0x00d2,
1859 "Init Firmware **** FAILED ****.\n"); 1859 "Init Firmware **** FAILED ****.\n");
1860 } else { 1860 } else {
1861 ql_dbg(ql_dbg_init, vha, 0x00d3, 1861 ql_dbg(ql_dbg_init, vha, 0x00d3,
1862 "Init Firmware -- success.\n"); 1862 "Init Firmware -- success.\n");
1863 } 1863 }
1864 1864
1865 return (rval); 1865 return (rval);
1866 } 1866 }
1867 1867
1868 /** 1868 /**
1869 * qla2x00_fw_ready() - Waits for firmware ready. 1869 * qla2x00_fw_ready() - Waits for firmware ready.
1870 * @ha: HA context 1870 * @ha: HA context
1871 * 1871 *
1872 * Returns 0 on success. 1872 * Returns 0 on success.
1873 */ 1873 */
1874 static int 1874 static int
1875 qla2x00_fw_ready(scsi_qla_host_t *vha) 1875 qla2x00_fw_ready(scsi_qla_host_t *vha)
1876 { 1876 {
1877 int rval; 1877 int rval;
1878 unsigned long wtime, mtime, cs84xx_time; 1878 unsigned long wtime, mtime, cs84xx_time;
1879 uint16_t min_wait; /* Minimum wait time if loop is down */ 1879 uint16_t min_wait; /* Minimum wait time if loop is down */
1880 uint16_t wait_time; /* Wait time if loop is coming ready */ 1880 uint16_t wait_time; /* Wait time if loop is coming ready */
1881 uint16_t state[5]; 1881 uint16_t state[5];
1882 struct qla_hw_data *ha = vha->hw; 1882 struct qla_hw_data *ha = vha->hw;
1883 1883
1884 rval = QLA_SUCCESS; 1884 rval = QLA_SUCCESS;
1885 1885
1886 /* 20 seconds for loop down. */ 1886 /* 20 seconds for loop down. */
1887 min_wait = 20; 1887 min_wait = 20;
1888 1888
1889 /* 1889 /*
1890 * Firmware should take at most one RATOV to login, plus 5 seconds for 1890 * Firmware should take at most one RATOV to login, plus 5 seconds for
1891 * our own processing. 1891 * our own processing.
1892 */ 1892 */
1893 if ((wait_time = (ha->retry_count*ha->login_timeout) + 5) < min_wait) { 1893 if ((wait_time = (ha->retry_count*ha->login_timeout) + 5) < min_wait) {
1894 wait_time = min_wait; 1894 wait_time = min_wait;
1895 } 1895 }
1896 1896
1897 /* Min wait time if loop down */ 1897 /* Min wait time if loop down */
1898 mtime = jiffies + (min_wait * HZ); 1898 mtime = jiffies + (min_wait * HZ);
1899 1899
1900 /* wait time before firmware ready */ 1900 /* wait time before firmware ready */
1901 wtime = jiffies + (wait_time * HZ); 1901 wtime = jiffies + (wait_time * HZ);
1902 1902
1903 /* Wait for ISP to finish LIP */ 1903 /* Wait for ISP to finish LIP */
1904 if (!vha->flags.init_done) 1904 if (!vha->flags.init_done)
1905 ql_log(ql_log_info, vha, 0x801e, 1905 ql_log(ql_log_info, vha, 0x801e,
1906 "Waiting for LIP to complete.\n"); 1906 "Waiting for LIP to complete.\n");
1907 1907
1908 do { 1908 do {
1909 rval = qla2x00_get_firmware_state(vha, state); 1909 rval = qla2x00_get_firmware_state(vha, state);
1910 if (rval == QLA_SUCCESS) { 1910 if (rval == QLA_SUCCESS) {
1911 if (state[0] < FSTATE_LOSS_OF_SYNC) { 1911 if (state[0] < FSTATE_LOSS_OF_SYNC) {
1912 vha->device_flags &= ~DFLG_NO_CABLE; 1912 vha->device_flags &= ~DFLG_NO_CABLE;
1913 } 1913 }
1914 if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) { 1914 if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) {
1915 ql_dbg(ql_dbg_taskm, vha, 0x801f, 1915 ql_dbg(ql_dbg_taskm, vha, 0x801f,
1916 "fw_state=%x 84xx=%x.\n", state[0], 1916 "fw_state=%x 84xx=%x.\n", state[0],
1917 state[2]); 1917 state[2]);
1918 if ((state[2] & FSTATE_LOGGED_IN) && 1918 if ((state[2] & FSTATE_LOGGED_IN) &&
1919 (state[2] & FSTATE_WAITING_FOR_VERIFY)) { 1919 (state[2] & FSTATE_WAITING_FOR_VERIFY)) {
1920 ql_dbg(ql_dbg_taskm, vha, 0x8028, 1920 ql_dbg(ql_dbg_taskm, vha, 0x8028,
1921 "Sending verify iocb.\n"); 1921 "Sending verify iocb.\n");
1922 1922
1923 cs84xx_time = jiffies; 1923 cs84xx_time = jiffies;
1924 rval = qla84xx_init_chip(vha); 1924 rval = qla84xx_init_chip(vha);
1925 if (rval != QLA_SUCCESS) { 1925 if (rval != QLA_SUCCESS) {
1926 ql_log(ql_log_warn, 1926 ql_log(ql_log_warn,
1927 vha, 0x8043, 1927 vha, 0x8043,
1928 "Init chip failed.\n"); 1928 "Init chip failed.\n");
1929 break; 1929 break;
1930 } 1930 }
1931 1931
1932 /* Add time taken to initialize. */ 1932 /* Add time taken to initialize. */
1933 cs84xx_time = jiffies - cs84xx_time; 1933 cs84xx_time = jiffies - cs84xx_time;
1934 wtime += cs84xx_time; 1934 wtime += cs84xx_time;
1935 mtime += cs84xx_time; 1935 mtime += cs84xx_time;
1936 ql_dbg(ql_dbg_taskm, vha, 0x8042, 1936 ql_dbg(ql_dbg_taskm, vha, 0x8042,
1937 "Increasing wait time by %ld. " 1937 "Increasing wait time by %ld. "
1938 "New time %ld.\n", cs84xx_time, 1938 "New time %ld.\n", cs84xx_time,
1939 wtime); 1939 wtime);
1940 } 1940 }
1941 } else if (state[0] == FSTATE_READY) { 1941 } else if (state[0] == FSTATE_READY) {
1942 ql_dbg(ql_dbg_taskm, vha, 0x8037, 1942 ql_dbg(ql_dbg_taskm, vha, 0x8037,
1943 "F/W Ready - OK.\n"); 1943 "F/W Ready - OK.\n");
1944 1944
1945 qla2x00_get_retry_cnt(vha, &ha->retry_count, 1945 qla2x00_get_retry_cnt(vha, &ha->retry_count,
1946 &ha->login_timeout, &ha->r_a_tov); 1946 &ha->login_timeout, &ha->r_a_tov);
1947 1947
1948 rval = QLA_SUCCESS; 1948 rval = QLA_SUCCESS;
1949 break; 1949 break;
1950 } 1950 }
1951 1951
1952 rval = QLA_FUNCTION_FAILED; 1952 rval = QLA_FUNCTION_FAILED;
1953 1953
1954 if (atomic_read(&vha->loop_down_timer) && 1954 if (atomic_read(&vha->loop_down_timer) &&
1955 state[0] != FSTATE_READY) { 1955 state[0] != FSTATE_READY) {
1956 /* Loop down. Timeout on min_wait for states 1956 /* Loop down. Timeout on min_wait for states
1957 * other than Wait for Login. 1957 * other than Wait for Login.
1958 */ 1958 */
1959 if (time_after_eq(jiffies, mtime)) { 1959 if (time_after_eq(jiffies, mtime)) {
1960 ql_log(ql_log_info, vha, 0x8038, 1960 ql_log(ql_log_info, vha, 0x8038,
1961 "Cable is unplugged...\n"); 1961 "Cable is unplugged...\n");
1962 1962
1963 vha->device_flags |= DFLG_NO_CABLE; 1963 vha->device_flags |= DFLG_NO_CABLE;
1964 break; 1964 break;
1965 } 1965 }
1966 } 1966 }
1967 } else { 1967 } else {
1968 /* Mailbox cmd failed. Timeout on min_wait. */ 1968 /* Mailbox cmd failed. Timeout on min_wait. */
1969 if (time_after_eq(jiffies, mtime) || 1969 if (time_after_eq(jiffies, mtime) ||
1970 ha->flags.isp82xx_fw_hung) 1970 ha->flags.isp82xx_fw_hung)
1971 break; 1971 break;
1972 } 1972 }
1973 1973
1974 if (time_after_eq(jiffies, wtime)) 1974 if (time_after_eq(jiffies, wtime))
1975 break; 1975 break;
1976 1976
1977 /* Delay for a while */ 1977 /* Delay for a while */
1978 msleep(500); 1978 msleep(500);
1979 1979
1980 ql_dbg(ql_dbg_taskm, vha, 0x8039, 1980 ql_dbg(ql_dbg_taskm, vha, 0x8039,
1981 "fw_state=%x curr time=%lx.\n", state[0], jiffies); 1981 "fw_state=%x curr time=%lx.\n", state[0], jiffies);
1982 } while (1); 1982 } while (1);
1983 1983
1984 ql_dbg(ql_dbg_taskm, vha, 0x803a, 1984 ql_dbg(ql_dbg_taskm, vha, 0x803a,
1985 "fw_state=%x (%x, %x, %x, %x) " "curr time=%lx.\n", state[0], 1985 "fw_state=%x (%x, %x, %x, %x) " "curr time=%lx.\n", state[0],
1986 state[1], state[2], state[3], state[4], jiffies); 1986 state[1], state[2], state[3], state[4], jiffies);
1987 1987
1988 if (rval) { 1988 if (rval) {
1989 ql_log(ql_log_warn, vha, 0x803b, 1989 ql_log(ql_log_warn, vha, 0x803b,
1990 "Firmware ready **** FAILED ****.\n"); 1990 "Firmware ready **** FAILED ****.\n");
1991 } 1991 }
1992 1992
1993 return (rval); 1993 return (rval);
1994 } 1994 }
1995 1995
1996 /* 1996 /*
1997 * qla2x00_configure_hba 1997 * qla2x00_configure_hba
1998 * Setup adapter context. 1998 * Setup adapter context.
1999 * 1999 *
2000 * Input: 2000 * Input:
2001 * ha = adapter state pointer. 2001 * ha = adapter state pointer.
2002 * 2002 *
2003 * Returns: 2003 * Returns:
2004 * 0 = success 2004 * 0 = success
2005 * 2005 *
2006 * Context: 2006 * Context:
2007 * Kernel context. 2007 * Kernel context.
2008 */ 2008 */
2009 static int 2009 static int
2010 qla2x00_configure_hba(scsi_qla_host_t *vha) 2010 qla2x00_configure_hba(scsi_qla_host_t *vha)
2011 { 2011 {
2012 int rval; 2012 int rval;
2013 uint16_t loop_id; 2013 uint16_t loop_id;
2014 uint16_t topo; 2014 uint16_t topo;
2015 uint16_t sw_cap; 2015 uint16_t sw_cap;
2016 uint8_t al_pa; 2016 uint8_t al_pa;
2017 uint8_t area; 2017 uint8_t area;
2018 uint8_t domain; 2018 uint8_t domain;
2019 char connect_type[22]; 2019 char connect_type[22];
2020 struct qla_hw_data *ha = vha->hw; 2020 struct qla_hw_data *ha = vha->hw;
2021 2021
2022 /* Get host addresses. */ 2022 /* Get host addresses. */
2023 rval = qla2x00_get_adapter_id(vha, 2023 rval = qla2x00_get_adapter_id(vha,
2024 &loop_id, &al_pa, &area, &domain, &topo, &sw_cap); 2024 &loop_id, &al_pa, &area, &domain, &topo, &sw_cap);
2025 if (rval != QLA_SUCCESS) { 2025 if (rval != QLA_SUCCESS) {
2026 if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) || 2026 if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) ||
2027 IS_QLA8XXX_TYPE(ha) || 2027 IS_QLA8XXX_TYPE(ha) ||
2028 (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) { 2028 (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) {
2029 ql_dbg(ql_dbg_disc, vha, 0x2008, 2029 ql_dbg(ql_dbg_disc, vha, 0x2008,
2030 "Loop is in a transition state.\n"); 2030 "Loop is in a transition state.\n");
2031 } else { 2031 } else {
2032 ql_log(ql_log_warn, vha, 0x2009, 2032 ql_log(ql_log_warn, vha, 0x2009,
2033 "Unable to get host loop ID.\n"); 2033 "Unable to get host loop ID.\n");
2034 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2034 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2035 } 2035 }
2036 return (rval); 2036 return (rval);
2037 } 2037 }
2038 2038
2039 if (topo == 4) { 2039 if (topo == 4) {
2040 ql_log(ql_log_info, vha, 0x200a, 2040 ql_log(ql_log_info, vha, 0x200a,
2041 "Cannot get topology - retrying.\n"); 2041 "Cannot get topology - retrying.\n");
2042 return (QLA_FUNCTION_FAILED); 2042 return (QLA_FUNCTION_FAILED);
2043 } 2043 }
2044 2044
2045 vha->loop_id = loop_id; 2045 vha->loop_id = loop_id;
2046 2046
2047 /* initialize */ 2047 /* initialize */
2048 ha->min_external_loopid = SNS_FIRST_LOOP_ID; 2048 ha->min_external_loopid = SNS_FIRST_LOOP_ID;
2049 ha->operating_mode = LOOP; 2049 ha->operating_mode = LOOP;
2050 ha->switch_cap = 0; 2050 ha->switch_cap = 0;
2051 2051
2052 switch (topo) { 2052 switch (topo) {
2053 case 0: 2053 case 0:
2054 ql_dbg(ql_dbg_disc, vha, 0x200b, "HBA in NL topology.\n"); 2054 ql_dbg(ql_dbg_disc, vha, 0x200b, "HBA in NL topology.\n");
2055 ha->current_topology = ISP_CFG_NL; 2055 ha->current_topology = ISP_CFG_NL;
2056 strcpy(connect_type, "(Loop)"); 2056 strcpy(connect_type, "(Loop)");
2057 break; 2057 break;
2058 2058
2059 case 1: 2059 case 1:
2060 ql_dbg(ql_dbg_disc, vha, 0x200c, "HBA in FL topology.\n"); 2060 ql_dbg(ql_dbg_disc, vha, 0x200c, "HBA in FL topology.\n");
2061 ha->switch_cap = sw_cap; 2061 ha->switch_cap = sw_cap;
2062 ha->current_topology = ISP_CFG_FL; 2062 ha->current_topology = ISP_CFG_FL;
2063 strcpy(connect_type, "(FL_Port)"); 2063 strcpy(connect_type, "(FL_Port)");
2064 break; 2064 break;
2065 2065
2066 case 2: 2066 case 2:
2067 ql_dbg(ql_dbg_disc, vha, 0x200d, "HBA in N P2P topology.\n"); 2067 ql_dbg(ql_dbg_disc, vha, 0x200d, "HBA in N P2P topology.\n");
2068 ha->operating_mode = P2P; 2068 ha->operating_mode = P2P;
2069 ha->current_topology = ISP_CFG_N; 2069 ha->current_topology = ISP_CFG_N;
2070 strcpy(connect_type, "(N_Port-to-N_Port)"); 2070 strcpy(connect_type, "(N_Port-to-N_Port)");
2071 break; 2071 break;
2072 2072
2073 case 3: 2073 case 3:
2074 ql_dbg(ql_dbg_disc, vha, 0x200e, "HBA in F P2P topology.\n"); 2074 ql_dbg(ql_dbg_disc, vha, 0x200e, "HBA in F P2P topology.\n");
2075 ha->switch_cap = sw_cap; 2075 ha->switch_cap = sw_cap;
2076 ha->operating_mode = P2P; 2076 ha->operating_mode = P2P;
2077 ha->current_topology = ISP_CFG_F; 2077 ha->current_topology = ISP_CFG_F;
2078 strcpy(connect_type, "(F_Port)"); 2078 strcpy(connect_type, "(F_Port)");
2079 break; 2079 break;
2080 2080
2081 default: 2081 default:
2082 ql_dbg(ql_dbg_disc, vha, 0x200f, 2082 ql_dbg(ql_dbg_disc, vha, 0x200f,
2083 "HBA in unknown topology %x, using NL.\n", topo); 2083 "HBA in unknown topology %x, using NL.\n", topo);
2084 ha->current_topology = ISP_CFG_NL; 2084 ha->current_topology = ISP_CFG_NL;
2085 strcpy(connect_type, "(Loop)"); 2085 strcpy(connect_type, "(Loop)");
2086 break; 2086 break;
2087 } 2087 }
2088 2088
2089 /* Save Host port and loop ID. */ 2089 /* Save Host port and loop ID. */
2090 /* byte order - Big Endian */ 2090 /* byte order - Big Endian */
2091 vha->d_id.b.domain = domain; 2091 vha->d_id.b.domain = domain;
2092 vha->d_id.b.area = area; 2092 vha->d_id.b.area = area;
2093 vha->d_id.b.al_pa = al_pa; 2093 vha->d_id.b.al_pa = al_pa;
2094 2094
2095 if (!vha->flags.init_done) 2095 if (!vha->flags.init_done)
2096 ql_log(ql_log_info, vha, 0x2010, 2096 ql_log(ql_log_info, vha, 0x2010,
2097 "Topology - %s, Host Loop address 0x%x.\n", 2097 "Topology - %s, Host Loop address 0x%x.\n",
2098 connect_type, vha->loop_id); 2098 connect_type, vha->loop_id);
2099 2099
2100 if (rval) { 2100 if (rval) {
2101 ql_log(ql_log_warn, vha, 0x2011, 2101 ql_log(ql_log_warn, vha, 0x2011,
2102 "%s FAILED\n", __func__); 2102 "%s FAILED\n", __func__);
2103 } else { 2103 } else {
2104 ql_dbg(ql_dbg_disc, vha, 0x2012, 2104 ql_dbg(ql_dbg_disc, vha, 0x2012,
2105 "%s success\n", __func__); 2105 "%s success\n", __func__);
2106 } 2106 }
2107 2107
2108 return(rval); 2108 return(rval);
2109 } 2109 }
2110 2110
2111 inline void 2111 inline void
2112 qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len, 2112 qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
2113 char *def) 2113 char *def)
2114 { 2114 {
2115 char *st, *en; 2115 char *st, *en;
2116 uint16_t index; 2116 uint16_t index;
2117 struct qla_hw_data *ha = vha->hw; 2117 struct qla_hw_data *ha = vha->hw;
2118 int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && 2118 int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
2119 !IS_QLA8XXX_TYPE(ha); 2119 !IS_QLA8XXX_TYPE(ha);
2120 2120
2121 if (memcmp(model, BINZERO, len) != 0) { 2121 if (memcmp(model, BINZERO, len) != 0) {
2122 strncpy(ha->model_number, model, len); 2122 strncpy(ha->model_number, model, len);
2123 st = en = ha->model_number; 2123 st = en = ha->model_number;
2124 en += len - 1; 2124 en += len - 1;
2125 while (en > st) { 2125 while (en > st) {
2126 if (*en != 0x20 && *en != 0x00) 2126 if (*en != 0x20 && *en != 0x00)
2127 break; 2127 break;
2128 *en-- = '\0'; 2128 *en-- = '\0';
2129 } 2129 }
2130 2130
2131 index = (ha->pdev->subsystem_device & 0xff); 2131 index = (ha->pdev->subsystem_device & 0xff);
2132 if (use_tbl && 2132 if (use_tbl &&
2133 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && 2133 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
2134 index < QLA_MODEL_NAMES) 2134 index < QLA_MODEL_NAMES)
2135 strncpy(ha->model_desc, 2135 strncpy(ha->model_desc,
2136 qla2x00_model_name[index * 2 + 1], 2136 qla2x00_model_name[index * 2 + 1],
2137 sizeof(ha->model_desc) - 1); 2137 sizeof(ha->model_desc) - 1);
2138 } else { 2138 } else {
2139 index = (ha->pdev->subsystem_device & 0xff); 2139 index = (ha->pdev->subsystem_device & 0xff);
2140 if (use_tbl && 2140 if (use_tbl &&
2141 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && 2141 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
2142 index < QLA_MODEL_NAMES) { 2142 index < QLA_MODEL_NAMES) {
2143 strcpy(ha->model_number, 2143 strcpy(ha->model_number,
2144 qla2x00_model_name[index * 2]); 2144 qla2x00_model_name[index * 2]);
2145 strncpy(ha->model_desc, 2145 strncpy(ha->model_desc,
2146 qla2x00_model_name[index * 2 + 1], 2146 qla2x00_model_name[index * 2 + 1],
2147 sizeof(ha->model_desc) - 1); 2147 sizeof(ha->model_desc) - 1);
2148 } else { 2148 } else {
2149 strcpy(ha->model_number, def); 2149 strcpy(ha->model_number, def);
2150 } 2150 }
2151 } 2151 }
2152 if (IS_FWI2_CAPABLE(ha)) 2152 if (IS_FWI2_CAPABLE(ha))
2153 qla2xxx_get_vpd_field(vha, "\x82", ha->model_desc, 2153 qla2xxx_get_vpd_field(vha, "\x82", ha->model_desc,
2154 sizeof(ha->model_desc)); 2154 sizeof(ha->model_desc));
2155 } 2155 }
2156 2156
2157 /* On sparc systems, obtain port and node WWN from firmware 2157 /* On sparc systems, obtain port and node WWN from firmware
2158 * properties. 2158 * properties.
2159 */ 2159 */
2160 static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, nvram_t *nv) 2160 static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, nvram_t *nv)
2161 { 2161 {
2162 #ifdef CONFIG_SPARC 2162 #ifdef CONFIG_SPARC
2163 struct qla_hw_data *ha = vha->hw; 2163 struct qla_hw_data *ha = vha->hw;
2164 struct pci_dev *pdev = ha->pdev; 2164 struct pci_dev *pdev = ha->pdev;
2165 struct device_node *dp = pci_device_to_OF_node(pdev); 2165 struct device_node *dp = pci_device_to_OF_node(pdev);
2166 const u8 *val; 2166 const u8 *val;
2167 int len; 2167 int len;
2168 2168
2169 val = of_get_property(dp, "port-wwn", &len); 2169 val = of_get_property(dp, "port-wwn", &len);
2170 if (val && len >= WWN_SIZE) 2170 if (val && len >= WWN_SIZE)
2171 memcpy(nv->port_name, val, WWN_SIZE); 2171 memcpy(nv->port_name, val, WWN_SIZE);
2172 2172
2173 val = of_get_property(dp, "node-wwn", &len); 2173 val = of_get_property(dp, "node-wwn", &len);
2174 if (val && len >= WWN_SIZE) 2174 if (val && len >= WWN_SIZE)
2175 memcpy(nv->node_name, val, WWN_SIZE); 2175 memcpy(nv->node_name, val, WWN_SIZE);
2176 #endif 2176 #endif
2177 } 2177 }
2178 2178
2179 /* 2179 /*
2180 * NVRAM configuration for ISP 2xxx 2180 * NVRAM configuration for ISP 2xxx
2181 * 2181 *
2182 * Input: 2182 * Input:
2183 * ha = adapter block pointer. 2183 * ha = adapter block pointer.
2184 * 2184 *
2185 * Output: 2185 * Output:
2186 * initialization control block in response_ring 2186 * initialization control block in response_ring
2187 * host adapters parameters in host adapter block 2187 * host adapters parameters in host adapter block
2188 * 2188 *
2189 * Returns: 2189 * Returns:
2190 * 0 = success. 2190 * 0 = success.
2191 */ 2191 */
2192 int 2192 int
2193 qla2x00_nvram_config(scsi_qla_host_t *vha) 2193 qla2x00_nvram_config(scsi_qla_host_t *vha)
2194 { 2194 {
2195 int rval; 2195 int rval;
2196 uint8_t chksum = 0; 2196 uint8_t chksum = 0;
2197 uint16_t cnt; 2197 uint16_t cnt;
2198 uint8_t *dptr1, *dptr2; 2198 uint8_t *dptr1, *dptr2;
2199 struct qla_hw_data *ha = vha->hw; 2199 struct qla_hw_data *ha = vha->hw;
2200 init_cb_t *icb = ha->init_cb; 2200 init_cb_t *icb = ha->init_cb;
2201 nvram_t *nv = ha->nvram; 2201 nvram_t *nv = ha->nvram;
2202 uint8_t *ptr = ha->nvram; 2202 uint8_t *ptr = ha->nvram;
2203 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 2203 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2204 2204
2205 rval = QLA_SUCCESS; 2205 rval = QLA_SUCCESS;
2206 2206
2207 /* Determine NVRAM starting address. */ 2207 /* Determine NVRAM starting address. */
2208 ha->nvram_size = sizeof(nvram_t); 2208 ha->nvram_size = sizeof(nvram_t);
2209 ha->nvram_base = 0; 2209 ha->nvram_base = 0;
2210 if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha)) 2210 if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha))
2211 if ((RD_REG_WORD(&reg->ctrl_status) >> 14) == 1) 2211 if ((RD_REG_WORD(&reg->ctrl_status) >> 14) == 1)
2212 ha->nvram_base = 0x80; 2212 ha->nvram_base = 0x80;
2213 2213
2214 /* Get NVRAM data and calculate checksum. */ 2214 /* Get NVRAM data and calculate checksum. */
2215 ha->isp_ops->read_nvram(vha, ptr, ha->nvram_base, ha->nvram_size); 2215 ha->isp_ops->read_nvram(vha, ptr, ha->nvram_base, ha->nvram_size);
2216 for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++) 2216 for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++)
2217 chksum += *ptr++; 2217 chksum += *ptr++;
2218 2218
2219 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010f, 2219 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010f,
2220 "Contents of NVRAM.\n"); 2220 "Contents of NVRAM.\n");
2221 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0110, 2221 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0110,
2222 (uint8_t *)nv, ha->nvram_size); 2222 (uint8_t *)nv, ha->nvram_size);
2223 2223
2224 /* Bad NVRAM data, set defaults parameters. */ 2224 /* Bad NVRAM data, set defaults parameters. */
2225 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || 2225 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' ||
2226 nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) { 2226 nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) {
2227 /* Reset NVRAM data. */ 2227 /* Reset NVRAM data. */
2228 ql_log(ql_log_warn, vha, 0x0064, 2228 ql_log(ql_log_warn, vha, 0x0064,
2229 "Inconisistent NVRAM " 2229 "Inconisistent NVRAM "
2230 "detected: checksum=0x%x id=%c version=0x%x.\n", 2230 "detected: checksum=0x%x id=%c version=0x%x.\n",
2231 chksum, nv->id[0], nv->nvram_version); 2231 chksum, nv->id[0], nv->nvram_version);
2232 ql_log(ql_log_warn, vha, 0x0065, 2232 ql_log(ql_log_warn, vha, 0x0065,
2233 "Falling back to " 2233 "Falling back to "
2234 "functioning (yet invalid -- WWPN) defaults.\n"); 2234 "functioning (yet invalid -- WWPN) defaults.\n");
2235 2235
2236 /* 2236 /*
2237 * Set default initialization control block. 2237 * Set default initialization control block.
2238 */ 2238 */
2239 memset(nv, 0, ha->nvram_size); 2239 memset(nv, 0, ha->nvram_size);
2240 nv->parameter_block_version = ICB_VERSION; 2240 nv->parameter_block_version = ICB_VERSION;
2241 2241
2242 if (IS_QLA23XX(ha)) { 2242 if (IS_QLA23XX(ha)) {
2243 nv->firmware_options[0] = BIT_2 | BIT_1; 2243 nv->firmware_options[0] = BIT_2 | BIT_1;
2244 nv->firmware_options[1] = BIT_7 | BIT_5; 2244 nv->firmware_options[1] = BIT_7 | BIT_5;
2245 nv->add_firmware_options[0] = BIT_5; 2245 nv->add_firmware_options[0] = BIT_5;
2246 nv->add_firmware_options[1] = BIT_5 | BIT_4; 2246 nv->add_firmware_options[1] = BIT_5 | BIT_4;
2247 nv->frame_payload_size = __constant_cpu_to_le16(2048); 2247 nv->frame_payload_size = __constant_cpu_to_le16(2048);
2248 nv->special_options[1] = BIT_7; 2248 nv->special_options[1] = BIT_7;
2249 } else if (IS_QLA2200(ha)) { 2249 } else if (IS_QLA2200(ha)) {
2250 nv->firmware_options[0] = BIT_2 | BIT_1; 2250 nv->firmware_options[0] = BIT_2 | BIT_1;
2251 nv->firmware_options[1] = BIT_7 | BIT_5; 2251 nv->firmware_options[1] = BIT_7 | BIT_5;
2252 nv->add_firmware_options[0] = BIT_5; 2252 nv->add_firmware_options[0] = BIT_5;
2253 nv->add_firmware_options[1] = BIT_5 | BIT_4; 2253 nv->add_firmware_options[1] = BIT_5 | BIT_4;
2254 nv->frame_payload_size = __constant_cpu_to_le16(1024); 2254 nv->frame_payload_size = __constant_cpu_to_le16(1024);
2255 } else if (IS_QLA2100(ha)) { 2255 } else if (IS_QLA2100(ha)) {
2256 nv->firmware_options[0] = BIT_3 | BIT_1; 2256 nv->firmware_options[0] = BIT_3 | BIT_1;
2257 nv->firmware_options[1] = BIT_5; 2257 nv->firmware_options[1] = BIT_5;
2258 nv->frame_payload_size = __constant_cpu_to_le16(1024); 2258 nv->frame_payload_size = __constant_cpu_to_le16(1024);
2259 } 2259 }
2260 2260
2261 nv->max_iocb_allocation = __constant_cpu_to_le16(256); 2261 nv->max_iocb_allocation = __constant_cpu_to_le16(256);
2262 nv->execution_throttle = __constant_cpu_to_le16(16); 2262 nv->execution_throttle = __constant_cpu_to_le16(16);
2263 nv->retry_count = 8; 2263 nv->retry_count = 8;
2264 nv->retry_delay = 1; 2264 nv->retry_delay = 1;
2265 2265
2266 nv->port_name[0] = 33; 2266 nv->port_name[0] = 33;
2267 nv->port_name[3] = 224; 2267 nv->port_name[3] = 224;
2268 nv->port_name[4] = 139; 2268 nv->port_name[4] = 139;
2269 2269
2270 qla2xxx_nvram_wwn_from_ofw(vha, nv); 2270 qla2xxx_nvram_wwn_from_ofw(vha, nv);
2271 2271
2272 nv->login_timeout = 4; 2272 nv->login_timeout = 4;
2273 2273
2274 /* 2274 /*
2275 * Set default host adapter parameters 2275 * Set default host adapter parameters
2276 */ 2276 */
2277 nv->host_p[1] = BIT_2; 2277 nv->host_p[1] = BIT_2;
2278 nv->reset_delay = 5; 2278 nv->reset_delay = 5;
2279 nv->port_down_retry_count = 8; 2279 nv->port_down_retry_count = 8;
2280 nv->max_luns_per_target = __constant_cpu_to_le16(8); 2280 nv->max_luns_per_target = __constant_cpu_to_le16(8);
2281 nv->link_down_timeout = 60; 2281 nv->link_down_timeout = 60;
2282 2282
2283 rval = 1; 2283 rval = 1;
2284 } 2284 }
2285 2285
2286 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) 2286 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
2287 /* 2287 /*
2288 * The SN2 does not provide BIOS emulation which means you can't change 2288 * The SN2 does not provide BIOS emulation which means you can't change
2289 * potentially bogus BIOS settings. Force the use of default settings 2289 * potentially bogus BIOS settings. Force the use of default settings
2290 * for link rate and frame size. Hope that the rest of the settings 2290 * for link rate and frame size. Hope that the rest of the settings
2291 * are valid. 2291 * are valid.
2292 */ 2292 */
2293 if (ia64_platform_is("sn2")) { 2293 if (ia64_platform_is("sn2")) {
2294 nv->frame_payload_size = __constant_cpu_to_le16(2048); 2294 nv->frame_payload_size = __constant_cpu_to_le16(2048);
2295 if (IS_QLA23XX(ha)) 2295 if (IS_QLA23XX(ha))
2296 nv->special_options[1] = BIT_7; 2296 nv->special_options[1] = BIT_7;
2297 } 2297 }
2298 #endif 2298 #endif
2299 2299
2300 /* Reset Initialization control block */ 2300 /* Reset Initialization control block */
2301 memset(icb, 0, ha->init_cb_size); 2301 memset(icb, 0, ha->init_cb_size);
2302 2302
2303 /* 2303 /*
2304 * Setup driver NVRAM options. 2304 * Setup driver NVRAM options.
2305 */ 2305 */
2306 nv->firmware_options[0] |= (BIT_6 | BIT_1); 2306 nv->firmware_options[0] |= (BIT_6 | BIT_1);
2307 nv->firmware_options[0] &= ~(BIT_5 | BIT_4); 2307 nv->firmware_options[0] &= ~(BIT_5 | BIT_4);
2308 nv->firmware_options[1] |= (BIT_5 | BIT_0); 2308 nv->firmware_options[1] |= (BIT_5 | BIT_0);
2309 nv->firmware_options[1] &= ~BIT_4; 2309 nv->firmware_options[1] &= ~BIT_4;
2310 2310
2311 if (IS_QLA23XX(ha)) { 2311 if (IS_QLA23XX(ha)) {
2312 nv->firmware_options[0] |= BIT_2; 2312 nv->firmware_options[0] |= BIT_2;
2313 nv->firmware_options[0] &= ~BIT_3; 2313 nv->firmware_options[0] &= ~BIT_3;
2314 nv->firmware_options[0] &= ~BIT_6; 2314 nv->firmware_options[0] &= ~BIT_6;
2315 nv->add_firmware_options[1] |= BIT_5 | BIT_4; 2315 nv->add_firmware_options[1] |= BIT_5 | BIT_4;
2316 2316
2317 if (IS_QLA2300(ha)) { 2317 if (IS_QLA2300(ha)) {
2318 if (ha->fb_rev == FPM_2310) { 2318 if (ha->fb_rev == FPM_2310) {
2319 strcpy(ha->model_number, "QLA2310"); 2319 strcpy(ha->model_number, "QLA2310");
2320 } else { 2320 } else {
2321 strcpy(ha->model_number, "QLA2300"); 2321 strcpy(ha->model_number, "QLA2300");
2322 } 2322 }
2323 } else { 2323 } else {
2324 qla2x00_set_model_info(vha, nv->model_number, 2324 qla2x00_set_model_info(vha, nv->model_number,
2325 sizeof(nv->model_number), "QLA23xx"); 2325 sizeof(nv->model_number), "QLA23xx");
2326 } 2326 }
2327 } else if (IS_QLA2200(ha)) { 2327 } else if (IS_QLA2200(ha)) {
2328 nv->firmware_options[0] |= BIT_2; 2328 nv->firmware_options[0] |= BIT_2;
2329 /* 2329 /*
2330 * 'Point-to-point preferred, else loop' is not a safe 2330 * 'Point-to-point preferred, else loop' is not a safe
2331 * connection mode setting. 2331 * connection mode setting.
2332 */ 2332 */
2333 if ((nv->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) == 2333 if ((nv->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) ==
2334 (BIT_5 | BIT_4)) { 2334 (BIT_5 | BIT_4)) {
2335 /* Force 'loop preferred, else point-to-point'. */ 2335 /* Force 'loop preferred, else point-to-point'. */
2336 nv->add_firmware_options[0] &= ~(BIT_6 | BIT_5 | BIT_4); 2336 nv->add_firmware_options[0] &= ~(BIT_6 | BIT_5 | BIT_4);
2337 nv->add_firmware_options[0] |= BIT_5; 2337 nv->add_firmware_options[0] |= BIT_5;
2338 } 2338 }
2339 strcpy(ha->model_number, "QLA22xx"); 2339 strcpy(ha->model_number, "QLA22xx");
2340 } else /*if (IS_QLA2100(ha))*/ { 2340 } else /*if (IS_QLA2100(ha))*/ {
2341 strcpy(ha->model_number, "QLA2100"); 2341 strcpy(ha->model_number, "QLA2100");
2342 } 2342 }
2343 2343
2344 /* 2344 /*
2345 * Copy over NVRAM RISC parameter block to initialization control block. 2345 * Copy over NVRAM RISC parameter block to initialization control block.
2346 */ 2346 */
2347 dptr1 = (uint8_t *)icb; 2347 dptr1 = (uint8_t *)icb;
2348 dptr2 = (uint8_t *)&nv->parameter_block_version; 2348 dptr2 = (uint8_t *)&nv->parameter_block_version;
2349 cnt = (uint8_t *)&icb->request_q_outpointer - (uint8_t *)&icb->version; 2349 cnt = (uint8_t *)&icb->request_q_outpointer - (uint8_t *)&icb->version;
2350 while (cnt--) 2350 while (cnt--)
2351 *dptr1++ = *dptr2++; 2351 *dptr1++ = *dptr2++;
2352 2352
2353 /* Copy 2nd half. */ 2353 /* Copy 2nd half. */
2354 dptr1 = (uint8_t *)icb->add_firmware_options; 2354 dptr1 = (uint8_t *)icb->add_firmware_options;
2355 cnt = (uint8_t *)icb->reserved_3 - (uint8_t *)icb->add_firmware_options; 2355 cnt = (uint8_t *)icb->reserved_3 - (uint8_t *)icb->add_firmware_options;
2356 while (cnt--) 2356 while (cnt--)
2357 *dptr1++ = *dptr2++; 2357 *dptr1++ = *dptr2++;
2358 2358
2359 /* Use alternate WWN? */ 2359 /* Use alternate WWN? */
2360 if (nv->host_p[1] & BIT_7) { 2360 if (nv->host_p[1] & BIT_7) {
2361 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE); 2361 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
2362 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE); 2362 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
2363 } 2363 }
2364 2364
2365 /* Prepare nodename */ 2365 /* Prepare nodename */
2366 if ((icb->firmware_options[1] & BIT_6) == 0) { 2366 if ((icb->firmware_options[1] & BIT_6) == 0) {
2367 /* 2367 /*
2368 * Firmware will apply the following mask if the nodename was 2368 * Firmware will apply the following mask if the nodename was
2369 * not provided. 2369 * not provided.
2370 */ 2370 */
2371 memcpy(icb->node_name, icb->port_name, WWN_SIZE); 2371 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
2372 icb->node_name[0] &= 0xF0; 2372 icb->node_name[0] &= 0xF0;
2373 } 2373 }
2374 2374
2375 /* 2375 /*
2376 * Set host adapter parameters. 2376 * Set host adapter parameters.
2377 */ 2377 */
2378 2378
2379 /* 2379 /*
2380 * BIT_7 in the host-parameters section allows for modification to 2380 * BIT_7 in the host-parameters section allows for modification to
2381 * internal driver logging. 2381 * internal driver logging.
2382 */ 2382 */
2383 if (nv->host_p[0] & BIT_7) 2383 if (nv->host_p[0] & BIT_7)
2384 ql2xextended_error_logging = 0x7fffffff; 2384 ql2xextended_error_logging = 0x7fffffff;
2385 ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0); 2385 ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0);
2386 /* Always load RISC code on non ISP2[12]00 chips. */ 2386 /* Always load RISC code on non ISP2[12]00 chips. */
2387 if (!IS_QLA2100(ha) && !IS_QLA2200(ha)) 2387 if (!IS_QLA2100(ha) && !IS_QLA2200(ha))
2388 ha->flags.disable_risc_code_load = 0; 2388 ha->flags.disable_risc_code_load = 0;
2389 ha->flags.enable_lip_reset = ((nv->host_p[1] & BIT_1) ? 1 : 0); 2389 ha->flags.enable_lip_reset = ((nv->host_p[1] & BIT_1) ? 1 : 0);
2390 ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0); 2390 ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0);
2391 ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0); 2391 ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0);
2392 ha->flags.enable_led_scheme = (nv->special_options[1] & BIT_4) ? 1 : 0; 2392 ha->flags.enable_led_scheme = (nv->special_options[1] & BIT_4) ? 1 : 0;
2393 ha->flags.disable_serdes = 0; 2393 ha->flags.disable_serdes = 0;
2394 2394
2395 ha->operating_mode = 2395 ha->operating_mode =
2396 (icb->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) >> 4; 2396 (icb->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) >> 4;
2397 2397
2398 memcpy(ha->fw_seriallink_options, nv->seriallink_options, 2398 memcpy(ha->fw_seriallink_options, nv->seriallink_options,
2399 sizeof(ha->fw_seriallink_options)); 2399 sizeof(ha->fw_seriallink_options));
2400 2400
2401 /* save HBA serial number */ 2401 /* save HBA serial number */
2402 ha->serial0 = icb->port_name[5]; 2402 ha->serial0 = icb->port_name[5];
2403 ha->serial1 = icb->port_name[6]; 2403 ha->serial1 = icb->port_name[6];
2404 ha->serial2 = icb->port_name[7]; 2404 ha->serial2 = icb->port_name[7];
2405 memcpy(vha->node_name, icb->node_name, WWN_SIZE); 2405 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
2406 memcpy(vha->port_name, icb->port_name, WWN_SIZE); 2406 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
2407 2407
2408 icb->execution_throttle = __constant_cpu_to_le16(0xFFFF); 2408 icb->execution_throttle = __constant_cpu_to_le16(0xFFFF);
2409 2409
2410 ha->retry_count = nv->retry_count; 2410 ha->retry_count = nv->retry_count;
2411 2411
2412 /* Set minimum login_timeout to 4 seconds. */ 2412 /* Set minimum login_timeout to 4 seconds. */
2413 if (nv->login_timeout != ql2xlogintimeout) 2413 if (nv->login_timeout != ql2xlogintimeout)
2414 nv->login_timeout = ql2xlogintimeout; 2414 nv->login_timeout = ql2xlogintimeout;
2415 if (nv->login_timeout < 4) 2415 if (nv->login_timeout < 4)
2416 nv->login_timeout = 4; 2416 nv->login_timeout = 4;
2417 ha->login_timeout = nv->login_timeout; 2417 ha->login_timeout = nv->login_timeout;
2418 icb->login_timeout = nv->login_timeout; 2418 icb->login_timeout = nv->login_timeout;
2419 2419
2420 /* Set minimum RATOV to 100 tenths of a second. */ 2420 /* Set minimum RATOV to 100 tenths of a second. */
2421 ha->r_a_tov = 100; 2421 ha->r_a_tov = 100;
2422 2422
2423 ha->loop_reset_delay = nv->reset_delay; 2423 ha->loop_reset_delay = nv->reset_delay;
2424 2424
2425 /* Link Down Timeout = 0: 2425 /* Link Down Timeout = 0:
2426 * 2426 *
2427 * When Port Down timer expires we will start returning 2427 * When Port Down timer expires we will start returning
2428 * I/O's to OS with "DID_NO_CONNECT". 2428 * I/O's to OS with "DID_NO_CONNECT".
2429 * 2429 *
2430 * Link Down Timeout != 0: 2430 * Link Down Timeout != 0:
2431 * 2431 *
2432 * The driver waits for the link to come up after link down 2432 * The driver waits for the link to come up after link down
2433 * before returning I/Os to OS with "DID_NO_CONNECT". 2433 * before returning I/Os to OS with "DID_NO_CONNECT".
2434 */ 2434 */
2435 if (nv->link_down_timeout == 0) { 2435 if (nv->link_down_timeout == 0) {
2436 ha->loop_down_abort_time = 2436 ha->loop_down_abort_time =
2437 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT); 2437 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
2438 } else { 2438 } else {
2439 ha->link_down_timeout = nv->link_down_timeout; 2439 ha->link_down_timeout = nv->link_down_timeout;
2440 ha->loop_down_abort_time = 2440 ha->loop_down_abort_time =
2441 (LOOP_DOWN_TIME - ha->link_down_timeout); 2441 (LOOP_DOWN_TIME - ha->link_down_timeout);
2442 } 2442 }
2443 2443
2444 /* 2444 /*
2445 * Need enough time to try and get the port back. 2445 * Need enough time to try and get the port back.
2446 */ 2446 */
2447 ha->port_down_retry_count = nv->port_down_retry_count; 2447 ha->port_down_retry_count = nv->port_down_retry_count;
2448 if (qlport_down_retry) 2448 if (qlport_down_retry)
2449 ha->port_down_retry_count = qlport_down_retry; 2449 ha->port_down_retry_count = qlport_down_retry;
2450 /* Set login_retry_count */ 2450 /* Set login_retry_count */
2451 ha->login_retry_count = nv->retry_count; 2451 ha->login_retry_count = nv->retry_count;
2452 if (ha->port_down_retry_count == nv->port_down_retry_count && 2452 if (ha->port_down_retry_count == nv->port_down_retry_count &&
2453 ha->port_down_retry_count > 3) 2453 ha->port_down_retry_count > 3)
2454 ha->login_retry_count = ha->port_down_retry_count; 2454 ha->login_retry_count = ha->port_down_retry_count;
2455 else if (ha->port_down_retry_count > (int)ha->login_retry_count) 2455 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
2456 ha->login_retry_count = ha->port_down_retry_count; 2456 ha->login_retry_count = ha->port_down_retry_count;
2457 if (ql2xloginretrycount) 2457 if (ql2xloginretrycount)
2458 ha->login_retry_count = ql2xloginretrycount; 2458 ha->login_retry_count = ql2xloginretrycount;
2459 2459
2460 icb->lun_enables = __constant_cpu_to_le16(0); 2460 icb->lun_enables = __constant_cpu_to_le16(0);
2461 icb->command_resource_count = 0; 2461 icb->command_resource_count = 0;
2462 icb->immediate_notify_resource_count = 0; 2462 icb->immediate_notify_resource_count = 0;
2463 icb->timeout = __constant_cpu_to_le16(0); 2463 icb->timeout = __constant_cpu_to_le16(0);
2464 2464
2465 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 2465 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
2466 /* Enable RIO */ 2466 /* Enable RIO */
2467 icb->firmware_options[0] &= ~BIT_3; 2467 icb->firmware_options[0] &= ~BIT_3;
2468 icb->add_firmware_options[0] &= 2468 icb->add_firmware_options[0] &=
2469 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0); 2469 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
2470 icb->add_firmware_options[0] |= BIT_2; 2470 icb->add_firmware_options[0] |= BIT_2;
2471 icb->response_accumulation_timer = 3; 2471 icb->response_accumulation_timer = 3;
2472 icb->interrupt_delay_timer = 5; 2472 icb->interrupt_delay_timer = 5;
2473 2473
2474 vha->flags.process_response_queue = 1; 2474 vha->flags.process_response_queue = 1;
2475 } else { 2475 } else {
2476 /* Enable ZIO. */ 2476 /* Enable ZIO. */
2477 if (!vha->flags.init_done) { 2477 if (!vha->flags.init_done) {
2478 ha->zio_mode = icb->add_firmware_options[0] & 2478 ha->zio_mode = icb->add_firmware_options[0] &
2479 (BIT_3 | BIT_2 | BIT_1 | BIT_0); 2479 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
2480 ha->zio_timer = icb->interrupt_delay_timer ? 2480 ha->zio_timer = icb->interrupt_delay_timer ?
2481 icb->interrupt_delay_timer: 2; 2481 icb->interrupt_delay_timer: 2;
2482 } 2482 }
2483 icb->add_firmware_options[0] &= 2483 icb->add_firmware_options[0] &=
2484 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0); 2484 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
2485 vha->flags.process_response_queue = 0; 2485 vha->flags.process_response_queue = 0;
2486 if (ha->zio_mode != QLA_ZIO_DISABLED) { 2486 if (ha->zio_mode != QLA_ZIO_DISABLED) {
2487 ha->zio_mode = QLA_ZIO_MODE_6; 2487 ha->zio_mode = QLA_ZIO_MODE_6;
2488 2488
2489 ql_log(ql_log_info, vha, 0x0068, 2489 ql_log(ql_log_info, vha, 0x0068,
2490 "ZIO mode %d enabled; timer delay (%d us).\n", 2490 "ZIO mode %d enabled; timer delay (%d us).\n",
2491 ha->zio_mode, ha->zio_timer * 100); 2491 ha->zio_mode, ha->zio_timer * 100);
2492 2492
2493 icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode; 2493 icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode;
2494 icb->interrupt_delay_timer = (uint8_t)ha->zio_timer; 2494 icb->interrupt_delay_timer = (uint8_t)ha->zio_timer;
2495 vha->flags.process_response_queue = 1; 2495 vha->flags.process_response_queue = 1;
2496 } 2496 }
2497 } 2497 }
2498 2498
2499 if (rval) { 2499 if (rval) {
2500 ql_log(ql_log_warn, vha, 0x0069, 2500 ql_log(ql_log_warn, vha, 0x0069,
2501 "NVRAM configuration failed.\n"); 2501 "NVRAM configuration failed.\n");
2502 } 2502 }
2503 return (rval); 2503 return (rval);
2504 } 2504 }
2505 2505
2506 static void 2506 static void
2507 qla2x00_rport_del(void *data) 2507 qla2x00_rport_del(void *data)
2508 { 2508 {
2509 fc_port_t *fcport = data; 2509 fc_port_t *fcport = data;
2510 struct fc_rport *rport; 2510 struct fc_rport *rport;
2511 unsigned long flags; 2511 unsigned long flags;
2512 2512
2513 spin_lock_irqsave(fcport->vha->host->host_lock, flags); 2513 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
2514 rport = fcport->drport ? fcport->drport: fcport->rport; 2514 rport = fcport->drport ? fcport->drport: fcport->rport;
2515 fcport->drport = NULL; 2515 fcport->drport = NULL;
2516 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags); 2516 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
2517 if (rport) 2517 if (rport)
2518 fc_remote_port_delete(rport); 2518 fc_remote_port_delete(rport);
2519 } 2519 }
2520 2520
2521 /** 2521 /**
2522 * qla2x00_alloc_fcport() - Allocate a generic fcport. 2522 * qla2x00_alloc_fcport() - Allocate a generic fcport.
2523 * @ha: HA context 2523 * @ha: HA context
2524 * @flags: allocation flags 2524 * @flags: allocation flags
2525 * 2525 *
2526 * Returns a pointer to the allocated fcport, or NULL, if none available. 2526 * Returns a pointer to the allocated fcport, or NULL, if none available.
2527 */ 2527 */
2528 fc_port_t * 2528 fc_port_t *
2529 qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags) 2529 qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
2530 { 2530 {
2531 fc_port_t *fcport; 2531 fc_port_t *fcport;
2532 2532
2533 fcport = kzalloc(sizeof(fc_port_t), flags); 2533 fcport = kzalloc(sizeof(fc_port_t), flags);
2534 if (!fcport) 2534 if (!fcport)
2535 return NULL; 2535 return NULL;
2536 2536
2537 /* Setup fcport template structure. */ 2537 /* Setup fcport template structure. */
2538 fcport->vha = vha; 2538 fcport->vha = vha;
2539 fcport->vp_idx = vha->vp_idx; 2539 fcport->vp_idx = vha->vp_idx;
2540 fcport->port_type = FCT_UNKNOWN; 2540 fcport->port_type = FCT_UNKNOWN;
2541 fcport->loop_id = FC_NO_LOOP_ID; 2541 fcport->loop_id = FC_NO_LOOP_ID;
2542 qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED); 2542 qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
2543 fcport->supported_classes = FC_COS_UNSPECIFIED; 2543 fcport->supported_classes = FC_COS_UNSPECIFIED;
2544 2544
2545 return fcport; 2545 return fcport;
2546 } 2546 }
2547 2547
2548 /* 2548 /*
2549 * qla2x00_configure_loop 2549 * qla2x00_configure_loop
2550 * Updates Fibre Channel Device Database with what is actually on loop. 2550 * Updates Fibre Channel Device Database with what is actually on loop.
2551 * 2551 *
2552 * Input: 2552 * Input:
2553 * ha = adapter block pointer. 2553 * ha = adapter block pointer.
2554 * 2554 *
2555 * Returns: 2555 * Returns:
2556 * 0 = success. 2556 * 0 = success.
2557 * 1 = error. 2557 * 1 = error.
2558 * 2 = database was full and device was not configured. 2558 * 2 = database was full and device was not configured.
2559 */ 2559 */
2560 static int 2560 static int
2561 qla2x00_configure_loop(scsi_qla_host_t *vha) 2561 qla2x00_configure_loop(scsi_qla_host_t *vha)
2562 { 2562 {
2563 int rval; 2563 int rval;
2564 unsigned long flags, save_flags; 2564 unsigned long flags, save_flags;
2565 struct qla_hw_data *ha = vha->hw; 2565 struct qla_hw_data *ha = vha->hw;
2566 rval = QLA_SUCCESS; 2566 rval = QLA_SUCCESS;
2567 2567
2568 /* Get Initiator ID */ 2568 /* Get Initiator ID */
2569 if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) { 2569 if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) {
2570 rval = qla2x00_configure_hba(vha); 2570 rval = qla2x00_configure_hba(vha);
2571 if (rval != QLA_SUCCESS) { 2571 if (rval != QLA_SUCCESS) {
2572 ql_dbg(ql_dbg_disc, vha, 0x2013, 2572 ql_dbg(ql_dbg_disc, vha, 0x2013,
2573 "Unable to configure HBA.\n"); 2573 "Unable to configure HBA.\n");
2574 return (rval); 2574 return (rval);
2575 } 2575 }
2576 } 2576 }
2577 2577
2578 save_flags = flags = vha->dpc_flags; 2578 save_flags = flags = vha->dpc_flags;
2579 ql_dbg(ql_dbg_disc, vha, 0x2014, 2579 ql_dbg(ql_dbg_disc, vha, 0x2014,
2580 "Configure loop -- dpc flags = 0x%lx.\n", flags); 2580 "Configure loop -- dpc flags = 0x%lx.\n", flags);
2581 2581
2582 /* 2582 /*
2583 * If we have both an RSCN and PORT UPDATE pending then handle them 2583 * If we have both an RSCN and PORT UPDATE pending then handle them
2584 * both at the same time. 2584 * both at the same time.
2585 */ 2585 */
2586 clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 2586 clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
2587 clear_bit(RSCN_UPDATE, &vha->dpc_flags); 2587 clear_bit(RSCN_UPDATE, &vha->dpc_flags);
2588 2588
2589 qla2x00_get_data_rate(vha); 2589 qla2x00_get_data_rate(vha);
2590 2590
2591 /* Determine what we need to do */ 2591 /* Determine what we need to do */
2592 if (ha->current_topology == ISP_CFG_FL && 2592 if (ha->current_topology == ISP_CFG_FL &&
2593 (test_bit(LOCAL_LOOP_UPDATE, &flags))) { 2593 (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
2594 2594
2595 vha->flags.rscn_queue_overflow = 1; 2595 vha->flags.rscn_queue_overflow = 1;
2596 set_bit(RSCN_UPDATE, &flags); 2596 set_bit(RSCN_UPDATE, &flags);
2597 2597
2598 } else if (ha->current_topology == ISP_CFG_F && 2598 } else if (ha->current_topology == ISP_CFG_F &&
2599 (test_bit(LOCAL_LOOP_UPDATE, &flags))) { 2599 (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
2600 2600
2601 vha->flags.rscn_queue_overflow = 1; 2601 vha->flags.rscn_queue_overflow = 1;
2602 set_bit(RSCN_UPDATE, &flags); 2602 set_bit(RSCN_UPDATE, &flags);
2603 clear_bit(LOCAL_LOOP_UPDATE, &flags); 2603 clear_bit(LOCAL_LOOP_UPDATE, &flags);
2604 2604
2605 } else if (ha->current_topology == ISP_CFG_N) { 2605 } else if (ha->current_topology == ISP_CFG_N) {
2606 clear_bit(RSCN_UPDATE, &flags); 2606 clear_bit(RSCN_UPDATE, &flags);
2607 2607
2608 } else if (!vha->flags.online || 2608 } else if (!vha->flags.online ||
2609 (test_bit(ABORT_ISP_ACTIVE, &flags))) { 2609 (test_bit(ABORT_ISP_ACTIVE, &flags))) {
2610 2610
2611 vha->flags.rscn_queue_overflow = 1; 2611 vha->flags.rscn_queue_overflow = 1;
2612 set_bit(RSCN_UPDATE, &flags); 2612 set_bit(RSCN_UPDATE, &flags);
2613 set_bit(LOCAL_LOOP_UPDATE, &flags); 2613 set_bit(LOCAL_LOOP_UPDATE, &flags);
2614 } 2614 }
2615 2615
2616 if (test_bit(LOCAL_LOOP_UPDATE, &flags)) { 2616 if (test_bit(LOCAL_LOOP_UPDATE, &flags)) {
2617 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { 2617 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
2618 ql_dbg(ql_dbg_disc, vha, 0x2015, 2618 ql_dbg(ql_dbg_disc, vha, 0x2015,
2619 "Loop resync needed, failing.\n"); 2619 "Loop resync needed, failing.\n");
2620 rval = QLA_FUNCTION_FAILED; 2620 rval = QLA_FUNCTION_FAILED;
2621 } 2621 }
2622 else 2622 else
2623 rval = qla2x00_configure_local_loop(vha); 2623 rval = qla2x00_configure_local_loop(vha);
2624 } 2624 }
2625 2625
2626 if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) { 2626 if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) {
2627 if (LOOP_TRANSITION(vha)) { 2627 if (LOOP_TRANSITION(vha)) {
2628 ql_dbg(ql_dbg_disc, vha, 0x201e, 2628 ql_dbg(ql_dbg_disc, vha, 0x201e,
2629 "Needs RSCN update and loop transition.\n"); 2629 "Needs RSCN update and loop transition.\n");
2630 rval = QLA_FUNCTION_FAILED; 2630 rval = QLA_FUNCTION_FAILED;
2631 } 2631 }
2632 else 2632 else
2633 rval = qla2x00_configure_fabric(vha); 2633 rval = qla2x00_configure_fabric(vha);
2634 } 2634 }
2635 2635
2636 if (rval == QLA_SUCCESS) { 2636 if (rval == QLA_SUCCESS) {
2637 if (atomic_read(&vha->loop_down_timer) || 2637 if (atomic_read(&vha->loop_down_timer) ||
2638 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { 2638 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
2639 rval = QLA_FUNCTION_FAILED; 2639 rval = QLA_FUNCTION_FAILED;
2640 } else { 2640 } else {
2641 atomic_set(&vha->loop_state, LOOP_READY); 2641 atomic_set(&vha->loop_state, LOOP_READY);
2642 ql_dbg(ql_dbg_disc, vha, 0x2069, 2642 ql_dbg(ql_dbg_disc, vha, 0x2069,
2643 "LOOP READY.\n"); 2643 "LOOP READY.\n");
2644 } 2644 }
2645 } 2645 }
2646 2646
2647 if (rval) { 2647 if (rval) {
2648 ql_dbg(ql_dbg_disc, vha, 0x206a, 2648 ql_dbg(ql_dbg_disc, vha, 0x206a,
2649 "%s *** FAILED ***.\n", __func__); 2649 "%s *** FAILED ***.\n", __func__);
2650 } else { 2650 } else {
2651 ql_dbg(ql_dbg_disc, vha, 0x206b, 2651 ql_dbg(ql_dbg_disc, vha, 0x206b,
2652 "%s: exiting normally.\n", __func__); 2652 "%s: exiting normally.\n", __func__);
2653 } 2653 }
2654 2654
2655 /* Restore state if a resync event occurred during processing */ 2655 /* Restore state if a resync event occurred during processing */
2656 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { 2656 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
2657 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags)) 2657 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags))
2658 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 2658 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
2659 if (test_bit(RSCN_UPDATE, &save_flags)) { 2659 if (test_bit(RSCN_UPDATE, &save_flags)) {
2660 set_bit(RSCN_UPDATE, &vha->dpc_flags); 2660 set_bit(RSCN_UPDATE, &vha->dpc_flags);
2661 if (!IS_ALOGIO_CAPABLE(ha)) 2661 if (!IS_ALOGIO_CAPABLE(ha))
2662 vha->flags.rscn_queue_overflow = 1; 2662 vha->flags.rscn_queue_overflow = 1;
2663 } 2663 }
2664 } 2664 }
2665 2665
2666 return (rval); 2666 return (rval);
2667 } 2667 }
2668 2668
2669 2669
2670 2670
2671 /* 2671 /*
2672 * qla2x00_configure_local_loop 2672 * qla2x00_configure_local_loop
2673 * Updates Fibre Channel Device Database with local loop devices. 2673 * Updates Fibre Channel Device Database with local loop devices.
2674 * 2674 *
2675 * Input: 2675 * Input:
2676 * ha = adapter block pointer. 2676 * ha = adapter block pointer.
2677 * 2677 *
2678 * Returns: 2678 * Returns:
2679 * 0 = success. 2679 * 0 = success.
2680 */ 2680 */
2681 static int 2681 static int
2682 qla2x00_configure_local_loop(scsi_qla_host_t *vha) 2682 qla2x00_configure_local_loop(scsi_qla_host_t *vha)
2683 { 2683 {
2684 int rval, rval2; 2684 int rval, rval2;
2685 int found_devs; 2685 int found_devs;
2686 int found; 2686 int found;
2687 fc_port_t *fcport, *new_fcport; 2687 fc_port_t *fcport, *new_fcport;
2688 2688
2689 uint16_t index; 2689 uint16_t index;
2690 uint16_t entries; 2690 uint16_t entries;
2691 char *id_iter; 2691 char *id_iter;
2692 uint16_t loop_id; 2692 uint16_t loop_id;
2693 uint8_t domain, area, al_pa; 2693 uint8_t domain, area, al_pa;
2694 struct qla_hw_data *ha = vha->hw; 2694 struct qla_hw_data *ha = vha->hw;
2695 2695
2696 found_devs = 0; 2696 found_devs = 0;
2697 new_fcport = NULL; 2697 new_fcport = NULL;
2698 entries = MAX_FIBRE_DEVICES; 2698 entries = MAX_FIBRE_DEVICES;
2699 2699
2700 ql_dbg(ql_dbg_disc, vha, 0x2016, 2700 ql_dbg(ql_dbg_disc, vha, 0x2016,
2701 "Getting FCAL position map.\n"); 2701 "Getting FCAL position map.\n");
2702 if (ql2xextended_error_logging & ql_dbg_disc) 2702 if (ql2xextended_error_logging & ql_dbg_disc)
2703 qla2x00_get_fcal_position_map(vha, NULL); 2703 qla2x00_get_fcal_position_map(vha, NULL);
2704 2704
2705 /* Get list of logged in devices. */ 2705 /* Get list of logged in devices. */
2706 memset(ha->gid_list, 0, GID_LIST_SIZE); 2706 memset(ha->gid_list, 0, GID_LIST_SIZE);
2707 rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma, 2707 rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma,
2708 &entries); 2708 &entries);
2709 if (rval != QLA_SUCCESS) 2709 if (rval != QLA_SUCCESS)
2710 goto cleanup_allocation; 2710 goto cleanup_allocation;
2711 2711
2712 ql_dbg(ql_dbg_disc, vha, 0x2017, 2712 ql_dbg(ql_dbg_disc, vha, 0x2017,
2713 "Entries in ID list (%d).\n", entries); 2713 "Entries in ID list (%d).\n", entries);
2714 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2075, 2714 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2075,
2715 (uint8_t *)ha->gid_list, 2715 (uint8_t *)ha->gid_list,
2716 entries * sizeof(struct gid_list_info)); 2716 entries * sizeof(struct gid_list_info));
2717 2717
2718 /* Allocate temporary fcport for any new fcports discovered. */ 2718 /* Allocate temporary fcport for any new fcports discovered. */
2719 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 2719 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2720 if (new_fcport == NULL) { 2720 if (new_fcport == NULL) {
2721 ql_log(ql_log_warn, vha, 0x2018, 2721 ql_log(ql_log_warn, vha, 0x2018,
2722 "Memory allocation failed for fcport.\n"); 2722 "Memory allocation failed for fcport.\n");
2723 rval = QLA_MEMORY_ALLOC_FAILED; 2723 rval = QLA_MEMORY_ALLOC_FAILED;
2724 goto cleanup_allocation; 2724 goto cleanup_allocation;
2725 } 2725 }
2726 new_fcport->flags &= ~FCF_FABRIC_DEVICE; 2726 new_fcport->flags &= ~FCF_FABRIC_DEVICE;
2727 2727
2728 /* 2728 /*
2729 * Mark local devices that were present with FCF_DEVICE_LOST for now. 2729 * Mark local devices that were present with FCF_DEVICE_LOST for now.
2730 */ 2730 */
2731 list_for_each_entry(fcport, &vha->vp_fcports, list) { 2731 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2732 if (atomic_read(&fcport->state) == FCS_ONLINE && 2732 if (atomic_read(&fcport->state) == FCS_ONLINE &&
2733 fcport->port_type != FCT_BROADCAST && 2733 fcport->port_type != FCT_BROADCAST &&
2734 (fcport->flags & FCF_FABRIC_DEVICE) == 0) { 2734 (fcport->flags & FCF_FABRIC_DEVICE) == 0) {
2735 2735
2736 ql_dbg(ql_dbg_disc, vha, 0x2019, 2736 ql_dbg(ql_dbg_disc, vha, 0x2019,
2737 "Marking port lost loop_id=0x%04x.\n", 2737 "Marking port lost loop_id=0x%04x.\n",
2738 fcport->loop_id); 2738 fcport->loop_id);
2739 2739
2740 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); 2740 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
2741 } 2741 }
2742 } 2742 }
2743 2743
2744 /* Add devices to port list. */ 2744 /* Add devices to port list. */
2745 id_iter = (char *)ha->gid_list; 2745 id_iter = (char *)ha->gid_list;
2746 for (index = 0; index < entries; index++) { 2746 for (index = 0; index < entries; index++) {
2747 domain = ((struct gid_list_info *)id_iter)->domain; 2747 domain = ((struct gid_list_info *)id_iter)->domain;
2748 area = ((struct gid_list_info *)id_iter)->area; 2748 area = ((struct gid_list_info *)id_iter)->area;
2749 al_pa = ((struct gid_list_info *)id_iter)->al_pa; 2749 al_pa = ((struct gid_list_info *)id_iter)->al_pa;
2750 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 2750 if (IS_QLA2100(ha) || IS_QLA2200(ha))
2751 loop_id = (uint16_t) 2751 loop_id = (uint16_t)
2752 ((struct gid_list_info *)id_iter)->loop_id_2100; 2752 ((struct gid_list_info *)id_iter)->loop_id_2100;
2753 else 2753 else
2754 loop_id = le16_to_cpu( 2754 loop_id = le16_to_cpu(
2755 ((struct gid_list_info *)id_iter)->loop_id); 2755 ((struct gid_list_info *)id_iter)->loop_id);
2756 id_iter += ha->gid_list_info_size; 2756 id_iter += ha->gid_list_info_size;
2757 2757
2758 /* Bypass reserved domain fields. */ 2758 /* Bypass reserved domain fields. */
2759 if ((domain & 0xf0) == 0xf0) 2759 if ((domain & 0xf0) == 0xf0)
2760 continue; 2760 continue;
2761 2761
2762 /* Bypass if not same domain and area of adapter. */ 2762 /* Bypass if not same domain and area of adapter. */
2763 if (area && domain && 2763 if (area && domain &&
2764 (area != vha->d_id.b.area || domain != vha->d_id.b.domain)) 2764 (area != vha->d_id.b.area || domain != vha->d_id.b.domain))
2765 continue; 2765 continue;
2766 2766
2767 /* Bypass invalid local loop ID. */ 2767 /* Bypass invalid local loop ID. */
2768 if (loop_id > LAST_LOCAL_LOOP_ID) 2768 if (loop_id > LAST_LOCAL_LOOP_ID)
2769 continue; 2769 continue;
2770 2770
2771 /* Fill in member data. */ 2771 /* Fill in member data. */
2772 new_fcport->d_id.b.domain = domain; 2772 new_fcport->d_id.b.domain = domain;
2773 new_fcport->d_id.b.area = area; 2773 new_fcport->d_id.b.area = area;
2774 new_fcport->d_id.b.al_pa = al_pa; 2774 new_fcport->d_id.b.al_pa = al_pa;
2775 new_fcport->loop_id = loop_id; 2775 new_fcport->loop_id = loop_id;
2776 new_fcport->vp_idx = vha->vp_idx; 2776 new_fcport->vp_idx = vha->vp_idx;
2777 rval2 = qla2x00_get_port_database(vha, new_fcport, 0); 2777 rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
2778 if (rval2 != QLA_SUCCESS) { 2778 if (rval2 != QLA_SUCCESS) {
2779 ql_dbg(ql_dbg_disc, vha, 0x201a, 2779 ql_dbg(ql_dbg_disc, vha, 0x201a,
2780 "Failed to retrieve fcport information " 2780 "Failed to retrieve fcport information "
2781 "-- get_port_database=%x, loop_id=0x%04x.\n", 2781 "-- get_port_database=%x, loop_id=0x%04x.\n",
2782 rval2, new_fcport->loop_id); 2782 rval2, new_fcport->loop_id);
2783 ql_dbg(ql_dbg_disc, vha, 0x201b, 2783 ql_dbg(ql_dbg_disc, vha, 0x201b,
2784 "Scheduling resync.\n"); 2784 "Scheduling resync.\n");
2785 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 2785 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
2786 continue; 2786 continue;
2787 } 2787 }
2788 2788
2789 /* Check for matching device in port list. */ 2789 /* Check for matching device in port list. */
2790 found = 0; 2790 found = 0;
2791 fcport = NULL; 2791 fcport = NULL;
2792 list_for_each_entry(fcport, &vha->vp_fcports, list) { 2792 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2793 if (memcmp(new_fcport->port_name, fcport->port_name, 2793 if (memcmp(new_fcport->port_name, fcport->port_name,
2794 WWN_SIZE)) 2794 WWN_SIZE))
2795 continue; 2795 continue;
2796 2796
2797 fcport->flags &= ~FCF_FABRIC_DEVICE; 2797 fcport->flags &= ~FCF_FABRIC_DEVICE;
2798 fcport->loop_id = new_fcport->loop_id; 2798 fcport->loop_id = new_fcport->loop_id;
2799 fcport->port_type = new_fcport->port_type; 2799 fcport->port_type = new_fcport->port_type;
2800 fcport->d_id.b24 = new_fcport->d_id.b24; 2800 fcport->d_id.b24 = new_fcport->d_id.b24;
2801 memcpy(fcport->node_name, new_fcport->node_name, 2801 memcpy(fcport->node_name, new_fcport->node_name,
2802 WWN_SIZE); 2802 WWN_SIZE);
2803 2803
2804 found++; 2804 found++;
2805 break; 2805 break;
2806 } 2806 }
2807 2807
2808 if (!found) { 2808 if (!found) {
2809 /* New device, add to fcports list. */ 2809 /* New device, add to fcports list. */
2810 if (vha->vp_idx) { 2810 if (vha->vp_idx) {
2811 new_fcport->vha = vha; 2811 new_fcport->vha = vha;
2812 new_fcport->vp_idx = vha->vp_idx; 2812 new_fcport->vp_idx = vha->vp_idx;
2813 } 2813 }
2814 list_add_tail(&new_fcport->list, &vha->vp_fcports); 2814 list_add_tail(&new_fcport->list, &vha->vp_fcports);
2815 2815
2816 /* Allocate a new replacement fcport. */ 2816 /* Allocate a new replacement fcport. */
2817 fcport = new_fcport; 2817 fcport = new_fcport;
2818 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 2818 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2819 if (new_fcport == NULL) { 2819 if (new_fcport == NULL) {
2820 ql_log(ql_log_warn, vha, 0x201c, 2820 ql_log(ql_log_warn, vha, 0x201c,
2821 "Failed to allocate memory for fcport.\n"); 2821 "Failed to allocate memory for fcport.\n");
2822 rval = QLA_MEMORY_ALLOC_FAILED; 2822 rval = QLA_MEMORY_ALLOC_FAILED;
2823 goto cleanup_allocation; 2823 goto cleanup_allocation;
2824 } 2824 }
2825 new_fcport->flags &= ~FCF_FABRIC_DEVICE; 2825 new_fcport->flags &= ~FCF_FABRIC_DEVICE;
2826 } 2826 }
2827 2827
2828 /* Base iIDMA settings on HBA port speed. */ 2828 /* Base iIDMA settings on HBA port speed. */
2829 fcport->fp_speed = ha->link_data_rate; 2829 fcport->fp_speed = ha->link_data_rate;
2830 2830
2831 qla2x00_update_fcport(vha, fcport); 2831 qla2x00_update_fcport(vha, fcport);
2832 2832
2833 found_devs++; 2833 found_devs++;
2834 } 2834 }
2835 2835
2836 cleanup_allocation: 2836 cleanup_allocation:
2837 kfree(new_fcport); 2837 kfree(new_fcport);
2838 2838
2839 if (rval != QLA_SUCCESS) { 2839 if (rval != QLA_SUCCESS) {
2840 ql_dbg(ql_dbg_disc, vha, 0x201d, 2840 ql_dbg(ql_dbg_disc, vha, 0x201d,
2841 "Configure local loop error exit: rval=%x.\n", rval); 2841 "Configure local loop error exit: rval=%x.\n", rval);
2842 } 2842 }
2843 2843
2844 return (rval); 2844 return (rval);
2845 } 2845 }
2846 2846
2847 static void 2847 static void
2848 qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) 2848 qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
2849 { 2849 {
2850 #define LS_UNKNOWN 2 2850 #define LS_UNKNOWN 2
2851 static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" }; 2851 static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" };
2852 char *link_speed; 2852 char *link_speed;
2853 int rval; 2853 int rval;
2854 uint16_t mb[4]; 2854 uint16_t mb[4];
2855 struct qla_hw_data *ha = vha->hw; 2855 struct qla_hw_data *ha = vha->hw;
2856 2856
2857 if (!IS_IIDMA_CAPABLE(ha)) 2857 if (!IS_IIDMA_CAPABLE(ha))
2858 return; 2858 return;
2859 2859
2860 if (atomic_read(&fcport->state) != FCS_ONLINE) 2860 if (atomic_read(&fcport->state) != FCS_ONLINE)
2861 return; 2861 return;
2862 2862
2863 if (fcport->fp_speed == PORT_SPEED_UNKNOWN || 2863 if (fcport->fp_speed == PORT_SPEED_UNKNOWN ||
2864 fcport->fp_speed > ha->link_data_rate) 2864 fcport->fp_speed > ha->link_data_rate)
2865 return; 2865 return;
2866 2866
2867 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed, 2867 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed,
2868 mb); 2868 mb);
2869 if (rval != QLA_SUCCESS) { 2869 if (rval != QLA_SUCCESS) {
2870 ql_dbg(ql_dbg_disc, vha, 0x2004, 2870 ql_dbg(ql_dbg_disc, vha, 0x2004,
2871 "Unable to adjust iIDMA " 2871 "Unable to adjust iIDMA "
2872 "%02x%02x%02x%02x%02x%02x%02x%02x -- %04x %x %04x " 2872 "%02x%02x%02x%02x%02x%02x%02x%02x -- %04x %x %04x "
2873 "%04x.\n", fcport->port_name[0], fcport->port_name[1], 2873 "%04x.\n", fcport->port_name[0], fcport->port_name[1],
2874 fcport->port_name[2], fcport->port_name[3], 2874 fcport->port_name[2], fcport->port_name[3],
2875 fcport->port_name[4], fcport->port_name[5], 2875 fcport->port_name[4], fcport->port_name[5],
2876 fcport->port_name[6], fcport->port_name[7], rval, 2876 fcport->port_name[6], fcport->port_name[7], rval,
2877 fcport->fp_speed, mb[0], mb[1]); 2877 fcport->fp_speed, mb[0], mb[1]);
2878 } else { 2878 } else {
2879 link_speed = link_speeds[LS_UNKNOWN]; 2879 link_speed = link_speeds[LS_UNKNOWN];
2880 if (fcport->fp_speed < 5) 2880 if (fcport->fp_speed < 5)
2881 link_speed = link_speeds[fcport->fp_speed]; 2881 link_speed = link_speeds[fcport->fp_speed];
2882 else if (fcport->fp_speed == 0x13) 2882 else if (fcport->fp_speed == 0x13)
2883 link_speed = link_speeds[5]; 2883 link_speed = link_speeds[5];
2884 ql_dbg(ql_dbg_disc, vha, 0x2005, 2884 ql_dbg(ql_dbg_disc, vha, 0x2005,
2885 "iIDMA adjusted to %s GB/s " 2885 "iIDMA adjusted to %s GB/s "
2886 "on %02x%02x%02x%02x%02x%02x%02x%02x.\n", link_speed, 2886 "on %02x%02x%02x%02x%02x%02x%02x%02x.\n", link_speed,
2887 fcport->port_name[0], fcport->port_name[1], 2887 fcport->port_name[0], fcport->port_name[1],
2888 fcport->port_name[2], fcport->port_name[3], 2888 fcport->port_name[2], fcport->port_name[3],
2889 fcport->port_name[4], fcport->port_name[5], 2889 fcport->port_name[4], fcport->port_name[5],
2890 fcport->port_name[6], fcport->port_name[7]); 2890 fcport->port_name[6], fcport->port_name[7]);
2891 } 2891 }
2892 } 2892 }
2893 2893
2894 static void 2894 static void
2895 qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport) 2895 qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
2896 { 2896 {
2897 struct fc_rport_identifiers rport_ids; 2897 struct fc_rport_identifiers rport_ids;
2898 struct fc_rport *rport; 2898 struct fc_rport *rport;
2899 unsigned long flags; 2899 unsigned long flags;
2900 2900
2901 qla2x00_rport_del(fcport); 2901 qla2x00_rport_del(fcport);
2902 2902
2903 rport_ids.node_name = wwn_to_u64(fcport->node_name); 2903 rport_ids.node_name = wwn_to_u64(fcport->node_name);
2904 rport_ids.port_name = wwn_to_u64(fcport->port_name); 2904 rport_ids.port_name = wwn_to_u64(fcport->port_name);
2905 rport_ids.port_id = fcport->d_id.b.domain << 16 | 2905 rport_ids.port_id = fcport->d_id.b.domain << 16 |
2906 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa; 2906 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
2907 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; 2907 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
2908 fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids); 2908 fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids);
2909 if (!rport) { 2909 if (!rport) {
2910 ql_log(ql_log_warn, vha, 0x2006, 2910 ql_log(ql_log_warn, vha, 0x2006,
2911 "Unable to allocate fc remote port.\n"); 2911 "Unable to allocate fc remote port.\n");
2912 return; 2912 return;
2913 } 2913 }
2914 spin_lock_irqsave(fcport->vha->host->host_lock, flags); 2914 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
2915 *((fc_port_t **)rport->dd_data) = fcport; 2915 *((fc_port_t **)rport->dd_data) = fcport;
2916 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags); 2916 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
2917 2917
2918 rport->supported_classes = fcport->supported_classes; 2918 rport->supported_classes = fcport->supported_classes;
2919 2919
2920 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; 2920 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
2921 if (fcport->port_type == FCT_INITIATOR) 2921 if (fcport->port_type == FCT_INITIATOR)
2922 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR; 2922 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
2923 if (fcport->port_type == FCT_TARGET) 2923 if (fcport->port_type == FCT_TARGET)
2924 rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET; 2924 rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
2925 fc_remote_port_rolechg(rport, rport_ids.roles); 2925 fc_remote_port_rolechg(rport, rport_ids.roles);
2926 } 2926 }
2927 2927
2928 /* 2928 /*
2929 * qla2x00_update_fcport 2929 * qla2x00_update_fcport
2930 * Updates device on list. 2930 * Updates device on list.
2931 * 2931 *
2932 * Input: 2932 * Input:
2933 * ha = adapter block pointer. 2933 * ha = adapter block pointer.
2934 * fcport = port structure pointer. 2934 * fcport = port structure pointer.
2935 * 2935 *
2936 * Return: 2936 * Return:
2937 * 0 - Success 2937 * 0 - Success
2938 * BIT_0 - error 2938 * BIT_0 - error
2939 * 2939 *
2940 * Context: 2940 * Context:
2941 * Kernel context. 2941 * Kernel context.
2942 */ 2942 */
2943 void 2943 void
2944 qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) 2944 qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
2945 { 2945 {
2946 fcport->vha = vha; 2946 fcport->vha = vha;
2947 fcport->login_retry = 0; 2947 fcport->login_retry = 0;
2948 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); 2948 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
2949 2949
2950 qla2x00_iidma_fcport(vha, fcport); 2950 qla2x00_iidma_fcport(vha, fcport);
2951 qla24xx_update_fcport_fcp_prio(vha, fcport); 2951 qla24xx_update_fcport_fcp_prio(vha, fcport);
2952 qla2x00_reg_remote_port(vha, fcport); 2952 qla2x00_reg_remote_port(vha, fcport);
2953 qla2x00_set_fcport_state(fcport, FCS_ONLINE); 2953 qla2x00_set_fcport_state(fcport, FCS_ONLINE);
2954 } 2954 }
2955 2955
2956 /* 2956 /*
2957 * qla2x00_configure_fabric 2957 * qla2x00_configure_fabric
2958 * Setup SNS devices with loop ID's. 2958 * Setup SNS devices with loop ID's.
2959 * 2959 *
2960 * Input: 2960 * Input:
2961 * ha = adapter block pointer. 2961 * ha = adapter block pointer.
2962 * 2962 *
2963 * Returns: 2963 * Returns:
2964 * 0 = success. 2964 * 0 = success.
2965 * BIT_0 = error 2965 * BIT_0 = error
2966 */ 2966 */
2967 static int 2967 static int
2968 qla2x00_configure_fabric(scsi_qla_host_t *vha) 2968 qla2x00_configure_fabric(scsi_qla_host_t *vha)
2969 { 2969 {
2970 int rval, rval2; 2970 int rval, rval2;
2971 fc_port_t *fcport, *fcptemp; 2971 fc_port_t *fcport, *fcptemp;
2972 uint16_t next_loopid; 2972 uint16_t next_loopid;
2973 uint16_t mb[MAILBOX_REGISTER_COUNT]; 2973 uint16_t mb[MAILBOX_REGISTER_COUNT];
2974 uint16_t loop_id; 2974 uint16_t loop_id;
2975 LIST_HEAD(new_fcports); 2975 LIST_HEAD(new_fcports);
2976 struct qla_hw_data *ha = vha->hw; 2976 struct qla_hw_data *ha = vha->hw;
2977 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 2977 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2978 2978
2979 /* If FL port exists, then SNS is present */ 2979 /* If FL port exists, then SNS is present */
2980 if (IS_FWI2_CAPABLE(ha)) 2980 if (IS_FWI2_CAPABLE(ha))
2981 loop_id = NPH_F_PORT; 2981 loop_id = NPH_F_PORT;
2982 else 2982 else
2983 loop_id = SNS_FL_PORT; 2983 loop_id = SNS_FL_PORT;
2984 rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1); 2984 rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1);
2985 if (rval != QLA_SUCCESS) { 2985 if (rval != QLA_SUCCESS) {
2986 ql_dbg(ql_dbg_disc, vha, 0x201f, 2986 ql_dbg(ql_dbg_disc, vha, 0x201f,
2987 "MBX_GET_PORT_NAME failed, No FL Port.\n"); 2987 "MBX_GET_PORT_NAME failed, No FL Port.\n");
2988 2988
2989 vha->device_flags &= ~SWITCH_FOUND; 2989 vha->device_flags &= ~SWITCH_FOUND;
2990 return (QLA_SUCCESS); 2990 return (QLA_SUCCESS);
2991 } 2991 }
2992 vha->device_flags |= SWITCH_FOUND; 2992 vha->device_flags |= SWITCH_FOUND;
2993 2993
2994 /* Mark devices that need re-synchronization. */ 2994 /* Mark devices that need re-synchronization. */
2995 rval2 = qla2x00_device_resync(vha); 2995 rval2 = qla2x00_device_resync(vha);
2996 if (rval2 == QLA_RSCNS_HANDLED) { 2996 if (rval2 == QLA_RSCNS_HANDLED) {
2997 /* No point doing the scan, just continue. */ 2997 /* No point doing the scan, just continue. */
2998 return (QLA_SUCCESS); 2998 return (QLA_SUCCESS);
2999 } 2999 }
3000 do { 3000 do {
3001 /* FDMI support. */ 3001 /* FDMI support. */
3002 if (ql2xfdmienable && 3002 if (ql2xfdmienable &&
3003 test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags)) 3003 test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags))
3004 qla2x00_fdmi_register(vha); 3004 qla2x00_fdmi_register(vha);
3005 3005
3006 /* Ensure we are logged into the SNS. */ 3006 /* Ensure we are logged into the SNS. */
3007 if (IS_FWI2_CAPABLE(ha)) 3007 if (IS_FWI2_CAPABLE(ha))
3008 loop_id = NPH_SNS; 3008 loop_id = NPH_SNS;
3009 else 3009 else
3010 loop_id = SIMPLE_NAME_SERVER; 3010 loop_id = SIMPLE_NAME_SERVER;
3011 ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff, 3011 ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff,
3012 0xfc, mb, BIT_1 | BIT_0); 3012 0xfc, mb, BIT_1 | BIT_0);
3013 if (mb[0] != MBS_COMMAND_COMPLETE) { 3013 if (mb[0] != MBS_COMMAND_COMPLETE) {
3014 ql_dbg(ql_dbg_disc, vha, 0x2042, 3014 ql_dbg(ql_dbg_disc, vha, 0x2042,
3015 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x " 3015 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x "
3016 "mb[6]=%x mb[7]=%x.\n", loop_id, mb[0], mb[1], 3016 "mb[6]=%x mb[7]=%x.\n", loop_id, mb[0], mb[1],
3017 mb[2], mb[6], mb[7]); 3017 mb[2], mb[6], mb[7]);
3018 return (QLA_SUCCESS); 3018 return (QLA_SUCCESS);
3019 } 3019 }
3020 3020
3021 if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) { 3021 if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) {
3022 if (qla2x00_rft_id(vha)) { 3022 if (qla2x00_rft_id(vha)) {
3023 /* EMPTY */ 3023 /* EMPTY */
3024 ql_dbg(ql_dbg_disc, vha, 0x2045, 3024 ql_dbg(ql_dbg_disc, vha, 0x2045,
3025 "Register FC-4 TYPE failed.\n"); 3025 "Register FC-4 TYPE failed.\n");
3026 } 3026 }
3027 if (qla2x00_rff_id(vha)) { 3027 if (qla2x00_rff_id(vha)) {
3028 /* EMPTY */ 3028 /* EMPTY */
3029 ql_dbg(ql_dbg_disc, vha, 0x2049, 3029 ql_dbg(ql_dbg_disc, vha, 0x2049,
3030 "Register FC-4 Features failed.\n"); 3030 "Register FC-4 Features failed.\n");
3031 } 3031 }
3032 if (qla2x00_rnn_id(vha)) { 3032 if (qla2x00_rnn_id(vha)) {
3033 /* EMPTY */ 3033 /* EMPTY */
3034 ql_dbg(ql_dbg_disc, vha, 0x204f, 3034 ql_dbg(ql_dbg_disc, vha, 0x204f,
3035 "Register Node Name failed.\n"); 3035 "Register Node Name failed.\n");
3036 } else if (qla2x00_rsnn_nn(vha)) { 3036 } else if (qla2x00_rsnn_nn(vha)) {
3037 /* EMPTY */ 3037 /* EMPTY */
3038 ql_dbg(ql_dbg_disc, vha, 0x2053, 3038 ql_dbg(ql_dbg_disc, vha, 0x2053,
3039 "Register Symobilic Node Name failed.\n"); 3039 "Register Symobilic Node Name failed.\n");
3040 } 3040 }
3041 } 3041 }
3042 3042
3043 rval = qla2x00_find_all_fabric_devs(vha, &new_fcports); 3043 rval = qla2x00_find_all_fabric_devs(vha, &new_fcports);
3044 if (rval != QLA_SUCCESS) 3044 if (rval != QLA_SUCCESS)
3045 break; 3045 break;
3046 3046
3047 /* 3047 /*
3048 * Logout all previous fabric devices marked lost, except 3048 * Logout all previous fabric devices marked lost, except
3049 * FCP2 devices. 3049 * FCP2 devices.
3050 */ 3050 */
3051 list_for_each_entry(fcport, &vha->vp_fcports, list) { 3051 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3052 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 3052 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
3053 break; 3053 break;
3054 3054
3055 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) 3055 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
3056 continue; 3056 continue;
3057 3057
3058 if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) { 3058 if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) {
3059 qla2x00_mark_device_lost(vha, fcport, 3059 qla2x00_mark_device_lost(vha, fcport,
3060 ql2xplogiabsentdevice, 0); 3060 ql2xplogiabsentdevice, 0);
3061 if (fcport->loop_id != FC_NO_LOOP_ID && 3061 if (fcport->loop_id != FC_NO_LOOP_ID &&
3062 (fcport->flags & FCF_FCP2_DEVICE) == 0 && 3062 (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
3063 fcport->port_type != FCT_INITIATOR && 3063 fcport->port_type != FCT_INITIATOR &&
3064 fcport->port_type != FCT_BROADCAST) { 3064 fcport->port_type != FCT_BROADCAST) {
3065 ha->isp_ops->fabric_logout(vha, 3065 ha->isp_ops->fabric_logout(vha,
3066 fcport->loop_id, 3066 fcport->loop_id,
3067 fcport->d_id.b.domain, 3067 fcport->d_id.b.domain,
3068 fcport->d_id.b.area, 3068 fcport->d_id.b.area,
3069 fcport->d_id.b.al_pa); 3069 fcport->d_id.b.al_pa);
3070 fcport->loop_id = FC_NO_LOOP_ID; 3070 fcport->loop_id = FC_NO_LOOP_ID;
3071 } 3071 }
3072 } 3072 }
3073 } 3073 }
3074 3074
3075 /* Starting free loop ID. */ 3075 /* Starting free loop ID. */
3076 next_loopid = ha->min_external_loopid; 3076 next_loopid = ha->min_external_loopid;
3077 3077
3078 /* 3078 /*
3079 * Scan through our port list and login entries that need to be 3079 * Scan through our port list and login entries that need to be
3080 * logged in. 3080 * logged in.
3081 */ 3081 */
3082 list_for_each_entry(fcport, &vha->vp_fcports, list) { 3082 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3083 if (atomic_read(&vha->loop_down_timer) || 3083 if (atomic_read(&vha->loop_down_timer) ||
3084 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 3084 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
3085 break; 3085 break;
3086 3086
3087 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 || 3087 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
3088 (fcport->flags & FCF_LOGIN_NEEDED) == 0) 3088 (fcport->flags & FCF_LOGIN_NEEDED) == 0)
3089 continue; 3089 continue;
3090 3090
3091 if (fcport->loop_id == FC_NO_LOOP_ID) { 3091 if (fcport->loop_id == FC_NO_LOOP_ID) {
3092 fcport->loop_id = next_loopid; 3092 fcport->loop_id = next_loopid;
3093 rval = qla2x00_find_new_loop_id( 3093 rval = qla2x00_find_new_loop_id(
3094 base_vha, fcport); 3094 base_vha, fcport);
3095 if (rval != QLA_SUCCESS) { 3095 if (rval != QLA_SUCCESS) {
3096 /* Ran out of IDs to use */ 3096 /* Ran out of IDs to use */
3097 break; 3097 break;
3098 } 3098 }
3099 } 3099 }
3100 /* Login and update database */ 3100 /* Login and update database */
3101 qla2x00_fabric_dev_login(vha, fcport, &next_loopid); 3101 qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
3102 } 3102 }
3103 3103
3104 /* Exit if out of loop IDs. */ 3104 /* Exit if out of loop IDs. */
3105 if (rval != QLA_SUCCESS) { 3105 if (rval != QLA_SUCCESS) {
3106 break; 3106 break;
3107 } 3107 }
3108 3108
3109 /* 3109 /*
3110 * Login and add the new devices to our port list. 3110 * Login and add the new devices to our port list.
3111 */ 3111 */
3112 list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) { 3112 list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
3113 if (atomic_read(&vha->loop_down_timer) || 3113 if (atomic_read(&vha->loop_down_timer) ||
3114 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 3114 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
3115 break; 3115 break;
3116 3116
3117 /* Find a new loop ID to use. */ 3117 /* Find a new loop ID to use. */
3118 fcport->loop_id = next_loopid; 3118 fcport->loop_id = next_loopid;
3119 rval = qla2x00_find_new_loop_id(base_vha, fcport); 3119 rval = qla2x00_find_new_loop_id(base_vha, fcport);
3120 if (rval != QLA_SUCCESS) { 3120 if (rval != QLA_SUCCESS) {
3121 /* Ran out of IDs to use */ 3121 /* Ran out of IDs to use */
3122 break; 3122 break;
3123 } 3123 }
3124 3124
3125 /* Login and update database */ 3125 /* Login and update database */
3126 qla2x00_fabric_dev_login(vha, fcport, &next_loopid); 3126 qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
3127 3127
3128 if (vha->vp_idx) { 3128 if (vha->vp_idx) {
3129 fcport->vha = vha; 3129 fcport->vha = vha;
3130 fcport->vp_idx = vha->vp_idx; 3130 fcport->vp_idx = vha->vp_idx;
3131 } 3131 }
3132 list_move_tail(&fcport->list, &vha->vp_fcports); 3132 list_move_tail(&fcport->list, &vha->vp_fcports);
3133 } 3133 }
3134 } while (0); 3134 } while (0);
3135 3135
3136 /* Free all new device structures not processed. */ 3136 /* Free all new device structures not processed. */
3137 list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) { 3137 list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
3138 list_del(&fcport->list); 3138 list_del(&fcport->list);
3139 kfree(fcport); 3139 kfree(fcport);
3140 } 3140 }
3141 3141
3142 if (rval) { 3142 if (rval) {
3143 ql_dbg(ql_dbg_disc, vha, 0x2068, 3143 ql_dbg(ql_dbg_disc, vha, 0x2068,
3144 "Configure fabric error exit rval=%d.\n", rval); 3144 "Configure fabric error exit rval=%d.\n", rval);
3145 } 3145 }
3146 3146
3147 return (rval); 3147 return (rval);
3148 } 3148 }
3149 3149
3150 /* 3150 /*
3151 * qla2x00_find_all_fabric_devs 3151 * qla2x00_find_all_fabric_devs
3152 * 3152 *
3153 * Input: 3153 * Input:
3154 * ha = adapter block pointer. 3154 * ha = adapter block pointer.
3155 * dev = database device entry pointer. 3155 * dev = database device entry pointer.
3156 * 3156 *
3157 * Returns: 3157 * Returns:
3158 * 0 = success. 3158 * 0 = success.
3159 * 3159 *
3160 * Context: 3160 * Context:
3161 * Kernel context. 3161 * Kernel context.
3162 */ 3162 */
3163 static int 3163 static int
3164 qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, 3164 qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3165 struct list_head *new_fcports) 3165 struct list_head *new_fcports)
3166 { 3166 {
3167 int rval; 3167 int rval;
3168 uint16_t loop_id; 3168 uint16_t loop_id;
3169 fc_port_t *fcport, *new_fcport, *fcptemp; 3169 fc_port_t *fcport, *new_fcport, *fcptemp;
3170 int found; 3170 int found;
3171 3171
3172 sw_info_t *swl; 3172 sw_info_t *swl;
3173 int swl_idx; 3173 int swl_idx;
3174 int first_dev, last_dev; 3174 int first_dev, last_dev;
3175 port_id_t wrap = {}, nxt_d_id; 3175 port_id_t wrap = {}, nxt_d_id;
3176 struct qla_hw_data *ha = vha->hw; 3176 struct qla_hw_data *ha = vha->hw;
3177 struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev); 3177 struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev);
3178 struct scsi_qla_host *tvp; 3178 struct scsi_qla_host *tvp;
3179 3179
3180 rval = QLA_SUCCESS; 3180 rval = QLA_SUCCESS;
3181 3181
3182 /* Try GID_PT to get device list, else GAN. */ 3182 /* Try GID_PT to get device list, else GAN. */
3183 swl = kcalloc(MAX_FIBRE_DEVICES, sizeof(sw_info_t), GFP_KERNEL); 3183 swl = kcalloc(MAX_FIBRE_DEVICES, sizeof(sw_info_t), GFP_KERNEL);
3184 if (!swl) { 3184 if (!swl) {
3185 /*EMPTY*/ 3185 /*EMPTY*/
3186 ql_dbg(ql_dbg_disc, vha, 0x2054, 3186 ql_dbg(ql_dbg_disc, vha, 0x2054,
3187 "GID_PT allocations failed, fallback on GA_NXT.\n"); 3187 "GID_PT allocations failed, fallback on GA_NXT.\n");
3188 } else { 3188 } else {
3189 if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) { 3189 if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) {
3190 kfree(swl); 3190 kfree(swl);
3191 swl = NULL; 3191 swl = NULL;
3192 } else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) { 3192 } else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) {
3193 kfree(swl); 3193 kfree(swl);
3194 swl = NULL; 3194 swl = NULL;
3195 } else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) { 3195 } else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) {
3196 kfree(swl); 3196 kfree(swl);
3197 swl = NULL; 3197 swl = NULL;
3198 } else if (ql2xiidmaenable && 3198 } else if (ql2xiidmaenable &&
3199 qla2x00_gfpn_id(vha, swl) == QLA_SUCCESS) { 3199 qla2x00_gfpn_id(vha, swl) == QLA_SUCCESS) {
3200 qla2x00_gpsc(vha, swl); 3200 qla2x00_gpsc(vha, swl);
3201 } 3201 }
3202 3202
3203 /* If other queries succeeded probe for FC-4 type */ 3203 /* If other queries succeeded probe for FC-4 type */
3204 if (swl) 3204 if (swl)
3205 qla2x00_gff_id(vha, swl); 3205 qla2x00_gff_id(vha, swl);
3206 } 3206 }
3207 swl_idx = 0; 3207 swl_idx = 0;
3208 3208
3209 /* Allocate temporary fcport for any new fcports discovered. */ 3209 /* Allocate temporary fcport for any new fcports discovered. */
3210 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 3210 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
3211 if (new_fcport == NULL) { 3211 if (new_fcport == NULL) {
3212 ql_log(ql_log_warn, vha, 0x205e, 3212 ql_log(ql_log_warn, vha, 0x205e,
3213 "Failed to allocate memory for fcport.\n"); 3213 "Failed to allocate memory for fcport.\n");
3214 kfree(swl); 3214 kfree(swl);
3215 return (QLA_MEMORY_ALLOC_FAILED); 3215 return (QLA_MEMORY_ALLOC_FAILED);
3216 } 3216 }
3217 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED); 3217 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
3218 /* Set start port ID scan at adapter ID. */ 3218 /* Set start port ID scan at adapter ID. */
3219 first_dev = 1; 3219 first_dev = 1;
3220 last_dev = 0; 3220 last_dev = 0;
3221 3221
3222 /* Starting free loop ID. */ 3222 /* Starting free loop ID. */
3223 loop_id = ha->min_external_loopid; 3223 loop_id = ha->min_external_loopid;
3224 for (; loop_id <= ha->max_loop_id; loop_id++) { 3224 for (; loop_id <= ha->max_loop_id; loop_id++) {
3225 if (qla2x00_is_reserved_id(vha, loop_id)) 3225 if (qla2x00_is_reserved_id(vha, loop_id))
3226 continue; 3226 continue;
3227 3227
3228 if (ha->current_topology == ISP_CFG_FL && 3228 if (ha->current_topology == ISP_CFG_FL &&
3229 (atomic_read(&vha->loop_down_timer) || 3229 (atomic_read(&vha->loop_down_timer) ||
3230 LOOP_TRANSITION(vha))) { 3230 LOOP_TRANSITION(vha))) {
3231 atomic_set(&vha->loop_down_timer, 0); 3231 atomic_set(&vha->loop_down_timer, 0);
3232 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 3232 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3233 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 3233 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3234 break; 3234 break;
3235 } 3235 }
3236 3236
3237 if (swl != NULL) { 3237 if (swl != NULL) {
3238 if (last_dev) { 3238 if (last_dev) {
3239 wrap.b24 = new_fcport->d_id.b24; 3239 wrap.b24 = new_fcport->d_id.b24;
3240 } else { 3240 } else {
3241 new_fcport->d_id.b24 = swl[swl_idx].d_id.b24; 3241 new_fcport->d_id.b24 = swl[swl_idx].d_id.b24;
3242 memcpy(new_fcport->node_name, 3242 memcpy(new_fcport->node_name,
3243 swl[swl_idx].node_name, WWN_SIZE); 3243 swl[swl_idx].node_name, WWN_SIZE);
3244 memcpy(new_fcport->port_name, 3244 memcpy(new_fcport->port_name,
3245 swl[swl_idx].port_name, WWN_SIZE); 3245 swl[swl_idx].port_name, WWN_SIZE);
3246 memcpy(new_fcport->fabric_port_name, 3246 memcpy(new_fcport->fabric_port_name,
3247 swl[swl_idx].fabric_port_name, WWN_SIZE); 3247 swl[swl_idx].fabric_port_name, WWN_SIZE);
3248 new_fcport->fp_speed = swl[swl_idx].fp_speed; 3248 new_fcport->fp_speed = swl[swl_idx].fp_speed;
3249 new_fcport->fc4_type = swl[swl_idx].fc4_type; 3249 new_fcport->fc4_type = swl[swl_idx].fc4_type;
3250 3250
3251 if (swl[swl_idx].d_id.b.rsvd_1 != 0) { 3251 if (swl[swl_idx].d_id.b.rsvd_1 != 0) {
3252 last_dev = 1; 3252 last_dev = 1;
3253 } 3253 }
3254 swl_idx++; 3254 swl_idx++;
3255 } 3255 }
3256 } else { 3256 } else {
3257 /* Send GA_NXT to the switch */ 3257 /* Send GA_NXT to the switch */
3258 rval = qla2x00_ga_nxt(vha, new_fcport); 3258 rval = qla2x00_ga_nxt(vha, new_fcport);
3259 if (rval != QLA_SUCCESS) { 3259 if (rval != QLA_SUCCESS) {
3260 ql_log(ql_log_warn, vha, 0x2064, 3260 ql_log(ql_log_warn, vha, 0x2064,
3261 "SNS scan failed -- assuming " 3261 "SNS scan failed -- assuming "
3262 "zero-entry result.\n"); 3262 "zero-entry result.\n");
3263 list_for_each_entry_safe(fcport, fcptemp, 3263 list_for_each_entry_safe(fcport, fcptemp,
3264 new_fcports, list) { 3264 new_fcports, list) {
3265 list_del(&fcport->list); 3265 list_del(&fcport->list);
3266 kfree(fcport); 3266 kfree(fcport);
3267 } 3267 }
3268 rval = QLA_SUCCESS; 3268 rval = QLA_SUCCESS;
3269 break; 3269 break;
3270 } 3270 }
3271 } 3271 }
3272 3272
3273 /* If wrap on switch device list, exit. */ 3273 /* If wrap on switch device list, exit. */
3274 if (first_dev) { 3274 if (first_dev) {
3275 wrap.b24 = new_fcport->d_id.b24; 3275 wrap.b24 = new_fcport->d_id.b24;
3276 first_dev = 0; 3276 first_dev = 0;
3277 } else if (new_fcport->d_id.b24 == wrap.b24) { 3277 } else if (new_fcport->d_id.b24 == wrap.b24) {
3278 ql_dbg(ql_dbg_disc, vha, 0x2065, 3278 ql_dbg(ql_dbg_disc, vha, 0x2065,
3279 "Device wrap (%02x%02x%02x).\n", 3279 "Device wrap (%02x%02x%02x).\n",
3280 new_fcport->d_id.b.domain, 3280 new_fcport->d_id.b.domain,
3281 new_fcport->d_id.b.area, 3281 new_fcport->d_id.b.area,
3282 new_fcport->d_id.b.al_pa); 3282 new_fcport->d_id.b.al_pa);
3283 break; 3283 break;
3284 } 3284 }
3285 3285
3286 /* Bypass if same physical adapter. */ 3286 /* Bypass if same physical adapter. */
3287 if (new_fcport->d_id.b24 == base_vha->d_id.b24) 3287 if (new_fcport->d_id.b24 == base_vha->d_id.b24)
3288 continue; 3288 continue;
3289 3289
3290 /* Bypass virtual ports of the same host. */ 3290 /* Bypass virtual ports of the same host. */
3291 found = 0; 3291 found = 0;
3292 if (ha->num_vhosts) { 3292 if (ha->num_vhosts) {
3293 unsigned long flags; 3293 unsigned long flags;
3294 3294
3295 spin_lock_irqsave(&ha->vport_slock, flags); 3295 spin_lock_irqsave(&ha->vport_slock, flags);
3296 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { 3296 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
3297 if (new_fcport->d_id.b24 == vp->d_id.b24) { 3297 if (new_fcport->d_id.b24 == vp->d_id.b24) {
3298 found = 1; 3298 found = 1;
3299 break; 3299 break;
3300 } 3300 }
3301 } 3301 }
3302 spin_unlock_irqrestore(&ha->vport_slock, flags); 3302 spin_unlock_irqrestore(&ha->vport_slock, flags);
3303 3303
3304 if (found) 3304 if (found)
3305 continue; 3305 continue;
3306 } 3306 }
3307 3307
3308 /* Bypass if same domain and area of adapter. */ 3308 /* Bypass if same domain and area of adapter. */
3309 if (((new_fcport->d_id.b24 & 0xffff00) == 3309 if (((new_fcport->d_id.b24 & 0xffff00) ==
3310 (vha->d_id.b24 & 0xffff00)) && ha->current_topology == 3310 (vha->d_id.b24 & 0xffff00)) && ha->current_topology ==
3311 ISP_CFG_FL) 3311 ISP_CFG_FL)
3312 continue; 3312 continue;
3313 3313
3314 /* Bypass reserved domain fields. */ 3314 /* Bypass reserved domain fields. */
3315 if ((new_fcport->d_id.b.domain & 0xf0) == 0xf0) 3315 if ((new_fcport->d_id.b.domain & 0xf0) == 0xf0)
3316 continue; 3316 continue;
3317 3317
3318 /* Bypass ports whose FCP-4 type is not FCP_SCSI */ 3318 /* Bypass ports whose FCP-4 type is not FCP_SCSI */
3319 if (ql2xgffidenable && 3319 if (ql2xgffidenable &&
3320 (new_fcport->fc4_type != FC4_TYPE_FCP_SCSI && 3320 (new_fcport->fc4_type != FC4_TYPE_FCP_SCSI &&
3321 new_fcport->fc4_type != FC4_TYPE_UNKNOWN)) 3321 new_fcport->fc4_type != FC4_TYPE_UNKNOWN))
3322 continue; 3322 continue;
3323 3323
3324 /* Locate matching device in database. */ 3324 /* Locate matching device in database. */
3325 found = 0; 3325 found = 0;
3326 list_for_each_entry(fcport, &vha->vp_fcports, list) { 3326 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3327 if (memcmp(new_fcport->port_name, fcport->port_name, 3327 if (memcmp(new_fcport->port_name, fcport->port_name,
3328 WWN_SIZE)) 3328 WWN_SIZE))
3329 continue; 3329 continue;
3330 3330
3331 found++; 3331 found++;
3332 3332
3333 /* Update port state. */ 3333 /* Update port state. */
3334 memcpy(fcport->fabric_port_name, 3334 memcpy(fcport->fabric_port_name,
3335 new_fcport->fabric_port_name, WWN_SIZE); 3335 new_fcport->fabric_port_name, WWN_SIZE);
3336 fcport->fp_speed = new_fcport->fp_speed; 3336 fcport->fp_speed = new_fcport->fp_speed;
3337 3337
3338 /* 3338 /*
3339 * If address the same and state FCS_ONLINE, nothing 3339 * If address the same and state FCS_ONLINE, nothing
3340 * changed. 3340 * changed.
3341 */ 3341 */
3342 if (fcport->d_id.b24 == new_fcport->d_id.b24 && 3342 if (fcport->d_id.b24 == new_fcport->d_id.b24 &&
3343 atomic_read(&fcport->state) == FCS_ONLINE) { 3343 atomic_read(&fcport->state) == FCS_ONLINE) {
3344 break; 3344 break;
3345 } 3345 }
3346 3346
3347 /* 3347 /*
3348 * If device was not a fabric device before. 3348 * If device was not a fabric device before.
3349 */ 3349 */
3350 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) { 3350 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
3351 fcport->d_id.b24 = new_fcport->d_id.b24; 3351 fcport->d_id.b24 = new_fcport->d_id.b24;
3352 fcport->loop_id = FC_NO_LOOP_ID; 3352 fcport->loop_id = FC_NO_LOOP_ID;
3353 fcport->flags |= (FCF_FABRIC_DEVICE | 3353 fcport->flags |= (FCF_FABRIC_DEVICE |
3354 FCF_LOGIN_NEEDED); 3354 FCF_LOGIN_NEEDED);
3355 break; 3355 break;
3356 } 3356 }
3357 3357
3358 /* 3358 /*
3359 * Port ID changed or device was marked to be updated; 3359 * Port ID changed or device was marked to be updated;
3360 * Log it out if still logged in and mark it for 3360 * Log it out if still logged in and mark it for
3361 * relogin later. 3361 * relogin later.
3362 */ 3362 */
3363 fcport->d_id.b24 = new_fcport->d_id.b24; 3363 fcport->d_id.b24 = new_fcport->d_id.b24;
3364 fcport->flags |= FCF_LOGIN_NEEDED; 3364 fcport->flags |= FCF_LOGIN_NEEDED;
3365 if (fcport->loop_id != FC_NO_LOOP_ID && 3365 if (fcport->loop_id != FC_NO_LOOP_ID &&
3366 (fcport->flags & FCF_FCP2_DEVICE) == 0 && 3366 (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
3367 fcport->port_type != FCT_INITIATOR && 3367 fcport->port_type != FCT_INITIATOR &&
3368 fcport->port_type != FCT_BROADCAST) { 3368 fcport->port_type != FCT_BROADCAST) {
3369 ha->isp_ops->fabric_logout(vha, fcport->loop_id, 3369 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
3370 fcport->d_id.b.domain, fcport->d_id.b.area, 3370 fcport->d_id.b.domain, fcport->d_id.b.area,
3371 fcport->d_id.b.al_pa); 3371 fcport->d_id.b.al_pa);
3372 fcport->loop_id = FC_NO_LOOP_ID; 3372 fcport->loop_id = FC_NO_LOOP_ID;
3373 } 3373 }
3374 3374
3375 break; 3375 break;
3376 } 3376 }
3377 3377
3378 if (found) 3378 if (found)
3379 continue; 3379 continue;
3380 /* If device was not in our fcports list, then add it. */ 3380 /* If device was not in our fcports list, then add it. */
3381 list_add_tail(&new_fcport->list, new_fcports); 3381 list_add_tail(&new_fcport->list, new_fcports);
3382 3382
3383 /* Allocate a new replacement fcport. */ 3383 /* Allocate a new replacement fcport. */
3384 nxt_d_id.b24 = new_fcport->d_id.b24; 3384 nxt_d_id.b24 = new_fcport->d_id.b24;
3385 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 3385 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
3386 if (new_fcport == NULL) { 3386 if (new_fcport == NULL) {
3387 ql_log(ql_log_warn, vha, 0x2066, 3387 ql_log(ql_log_warn, vha, 0x2066,
3388 "Memory allocation failed for fcport.\n"); 3388 "Memory allocation failed for fcport.\n");
3389 kfree(swl); 3389 kfree(swl);
3390 return (QLA_MEMORY_ALLOC_FAILED); 3390 return (QLA_MEMORY_ALLOC_FAILED);
3391 } 3391 }
3392 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED); 3392 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
3393 new_fcport->d_id.b24 = nxt_d_id.b24; 3393 new_fcport->d_id.b24 = nxt_d_id.b24;
3394 } 3394 }
3395 3395
3396 kfree(swl); 3396 kfree(swl);
3397 kfree(new_fcport); 3397 kfree(new_fcport);
3398 3398
3399 return (rval); 3399 return (rval);
3400 } 3400 }
3401 3401
3402 /* 3402 /*
3403 * qla2x00_find_new_loop_id 3403 * qla2x00_find_new_loop_id
3404 * Scan through our port list and find a new usable loop ID. 3404 * Scan through our port list and find a new usable loop ID.
3405 * 3405 *
3406 * Input: 3406 * Input:
3407 * ha: adapter state pointer. 3407 * ha: adapter state pointer.
3408 * dev: port structure pointer. 3408 * dev: port structure pointer.
3409 * 3409 *
3410 * Returns: 3410 * Returns:
3411 * qla2x00 local function return status code. 3411 * qla2x00 local function return status code.
3412 * 3412 *
3413 * Context: 3413 * Context:
3414 * Kernel context. 3414 * Kernel context.
3415 */ 3415 */
3416 int 3416 int
3417 qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev) 3417 qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
3418 { 3418 {
3419 int rval; 3419 int rval;
3420 int found; 3420 int found;
3421 fc_port_t *fcport; 3421 fc_port_t *fcport;
3422 uint16_t first_loop_id; 3422 uint16_t first_loop_id;
3423 struct qla_hw_data *ha = vha->hw; 3423 struct qla_hw_data *ha = vha->hw;
3424 struct scsi_qla_host *vp; 3424 struct scsi_qla_host *vp;
3425 struct scsi_qla_host *tvp; 3425 struct scsi_qla_host *tvp;
3426 unsigned long flags = 0; 3426 unsigned long flags = 0;
3427 3427
3428 rval = QLA_SUCCESS; 3428 rval = QLA_SUCCESS;
3429 3429
3430 /* Save starting loop ID. */ 3430 /* Save starting loop ID. */
3431 first_loop_id = dev->loop_id; 3431 first_loop_id = dev->loop_id;
3432 3432
3433 for (;;) { 3433 for (;;) {
3434 /* Skip loop ID if already used by adapter. */ 3434 /* Skip loop ID if already used by adapter. */
3435 if (dev->loop_id == vha->loop_id) 3435 if (dev->loop_id == vha->loop_id)
3436 dev->loop_id++; 3436 dev->loop_id++;
3437 3437
3438 /* Skip reserved loop IDs. */ 3438 /* Skip reserved loop IDs. */
3439 while (qla2x00_is_reserved_id(vha, dev->loop_id)) 3439 while (qla2x00_is_reserved_id(vha, dev->loop_id))
3440 dev->loop_id++; 3440 dev->loop_id++;
3441 3441
3442 /* Reset loop ID if passed the end. */ 3442 /* Reset loop ID if passed the end. */
3443 if (dev->loop_id > ha->max_loop_id) { 3443 if (dev->loop_id > ha->max_loop_id) {
3444 /* first loop ID. */ 3444 /* first loop ID. */
3445 dev->loop_id = ha->min_external_loopid; 3445 dev->loop_id = ha->min_external_loopid;
3446 } 3446 }
3447 3447
3448 /* Check for loop ID being already in use. */ 3448 /* Check for loop ID being already in use. */
3449 found = 0; 3449 found = 0;
3450 fcport = NULL; 3450 fcport = NULL;
3451 3451
3452 spin_lock_irqsave(&ha->vport_slock, flags); 3452 spin_lock_irqsave(&ha->vport_slock, flags);
3453 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { 3453 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
3454 list_for_each_entry(fcport, &vp->vp_fcports, list) { 3454 list_for_each_entry(fcport, &vp->vp_fcports, list) {
3455 if (fcport->loop_id == dev->loop_id && 3455 if (fcport->loop_id == dev->loop_id &&
3456 fcport != dev) { 3456 fcport != dev) {
3457 /* ID possibly in use */ 3457 /* ID possibly in use */
3458 found++; 3458 found++;
3459 break; 3459 break;
3460 } 3460 }
3461 } 3461 }
3462 if (found) 3462 if (found)
3463 break; 3463 break;
3464 } 3464 }
3465 spin_unlock_irqrestore(&ha->vport_slock, flags); 3465 spin_unlock_irqrestore(&ha->vport_slock, flags);
3466 3466
3467 /* If not in use then it is free to use. */ 3467 /* If not in use then it is free to use. */
3468 if (!found) { 3468 if (!found) {
3469 break; 3469 break;
3470 } 3470 }
3471 3471
3472 /* ID in use. Try next value. */ 3472 /* ID in use. Try next value. */
3473 dev->loop_id++; 3473 dev->loop_id++;
3474 3474
3475 /* If wrap around. No free ID to use. */ 3475 /* If wrap around. No free ID to use. */
3476 if (dev->loop_id == first_loop_id) { 3476 if (dev->loop_id == first_loop_id) {
3477 dev->loop_id = FC_NO_LOOP_ID; 3477 dev->loop_id = FC_NO_LOOP_ID;
3478 rval = QLA_FUNCTION_FAILED; 3478 rval = QLA_FUNCTION_FAILED;
3479 break; 3479 break;
3480 } 3480 }
3481 } 3481 }
3482 3482
3483 return (rval); 3483 return (rval);
3484 } 3484 }
3485 3485
3486 /* 3486 /*
3487 * qla2x00_device_resync 3487 * qla2x00_device_resync
3488 * Marks devices in the database that needs resynchronization. 3488 * Marks devices in the database that needs resynchronization.
3489 * 3489 *
3490 * Input: 3490 * Input:
3491 * ha = adapter block pointer. 3491 * ha = adapter block pointer.
3492 * 3492 *
3493 * Context: 3493 * Context:
3494 * Kernel context. 3494 * Kernel context.
3495 */ 3495 */
3496 static int 3496 static int
3497 qla2x00_device_resync(scsi_qla_host_t *vha) 3497 qla2x00_device_resync(scsi_qla_host_t *vha)
3498 { 3498 {
3499 int rval; 3499 int rval;
3500 uint32_t mask; 3500 uint32_t mask;
3501 fc_port_t *fcport; 3501 fc_port_t *fcport;
3502 uint32_t rscn_entry; 3502 uint32_t rscn_entry;
3503 uint8_t rscn_out_iter; 3503 uint8_t rscn_out_iter;
3504 uint8_t format; 3504 uint8_t format;
3505 port_id_t d_id = {}; 3505 port_id_t d_id = {};
3506 3506
3507 rval = QLA_RSCNS_HANDLED; 3507 rval = QLA_RSCNS_HANDLED;
3508 3508
3509 while (vha->rscn_out_ptr != vha->rscn_in_ptr || 3509 while (vha->rscn_out_ptr != vha->rscn_in_ptr ||
3510 vha->flags.rscn_queue_overflow) { 3510 vha->flags.rscn_queue_overflow) {
3511 3511
3512 rscn_entry = vha->rscn_queue[vha->rscn_out_ptr]; 3512 rscn_entry = vha->rscn_queue[vha->rscn_out_ptr];
3513 format = MSB(MSW(rscn_entry)); 3513 format = MSB(MSW(rscn_entry));
3514 d_id.b.domain = LSB(MSW(rscn_entry)); 3514 d_id.b.domain = LSB(MSW(rscn_entry));
3515 d_id.b.area = MSB(LSW(rscn_entry)); 3515 d_id.b.area = MSB(LSW(rscn_entry));
3516 d_id.b.al_pa = LSB(LSW(rscn_entry)); 3516 d_id.b.al_pa = LSB(LSW(rscn_entry));
3517 3517
3518 ql_dbg(ql_dbg_disc, vha, 0x2020, 3518 ql_dbg(ql_dbg_disc, vha, 0x2020,
3519 "RSCN queue entry[%d] = [%02x/%02x%02x%02x].\n", 3519 "RSCN queue entry[%d] = [%02x/%02x%02x%02x].\n",
3520 vha->rscn_out_ptr, format, d_id.b.domain, d_id.b.area, 3520 vha->rscn_out_ptr, format, d_id.b.domain, d_id.b.area,
3521 d_id.b.al_pa); 3521 d_id.b.al_pa);
3522 3522
3523 vha->rscn_out_ptr++; 3523 vha->rscn_out_ptr++;
3524 if (vha->rscn_out_ptr == MAX_RSCN_COUNT) 3524 if (vha->rscn_out_ptr == MAX_RSCN_COUNT)
3525 vha->rscn_out_ptr = 0; 3525 vha->rscn_out_ptr = 0;
3526 3526
3527 /* Skip duplicate entries. */ 3527 /* Skip duplicate entries. */
3528 for (rscn_out_iter = vha->rscn_out_ptr; 3528 for (rscn_out_iter = vha->rscn_out_ptr;
3529 !vha->flags.rscn_queue_overflow && 3529 !vha->flags.rscn_queue_overflow &&
3530 rscn_out_iter != vha->rscn_in_ptr; 3530 rscn_out_iter != vha->rscn_in_ptr;
3531 rscn_out_iter = (rscn_out_iter == 3531 rscn_out_iter = (rscn_out_iter ==
3532 (MAX_RSCN_COUNT - 1)) ? 0: rscn_out_iter + 1) { 3532 (MAX_RSCN_COUNT - 1)) ? 0: rscn_out_iter + 1) {
3533 3533
3534 if (rscn_entry != vha->rscn_queue[rscn_out_iter]) 3534 if (rscn_entry != vha->rscn_queue[rscn_out_iter])
3535 break; 3535 break;
3536 3536
3537 ql_dbg(ql_dbg_disc, vha, 0x2021, 3537 ql_dbg(ql_dbg_disc, vha, 0x2021,
3538 "Skipping duplicate RSCN queue entry found at " 3538 "Skipping duplicate RSCN queue entry found at "
3539 "[%d].\n", rscn_out_iter); 3539 "[%d].\n", rscn_out_iter);
3540 3540
3541 vha->rscn_out_ptr = rscn_out_iter; 3541 vha->rscn_out_ptr = rscn_out_iter;
3542 } 3542 }
3543 3543
3544 /* Queue overflow, set switch default case. */ 3544 /* Queue overflow, set switch default case. */
3545 if (vha->flags.rscn_queue_overflow) { 3545 if (vha->flags.rscn_queue_overflow) {
3546 ql_dbg(ql_dbg_disc, vha, 0x2022, 3546 ql_dbg(ql_dbg_disc, vha, 0x2022,
3547 "device_resync: rscn overflow.\n"); 3547 "device_resync: rscn overflow.\n");
3548 3548
3549 format = 3; 3549 format = 3;
3550 vha->flags.rscn_queue_overflow = 0; 3550 vha->flags.rscn_queue_overflow = 0;
3551 } 3551 }
3552 3552
3553 switch (format) { 3553 switch (format) {
3554 case 0: 3554 case 0:
3555 mask = 0xffffff; 3555 mask = 0xffffff;
3556 break; 3556 break;
3557 case 1: 3557 case 1:
3558 mask = 0xffff00; 3558 mask = 0xffff00;
3559 break; 3559 break;
3560 case 2: 3560 case 2:
3561 mask = 0xff0000; 3561 mask = 0xff0000;
3562 break; 3562 break;
3563 default: 3563 default:
3564 mask = 0x0; 3564 mask = 0x0;
3565 d_id.b24 = 0; 3565 d_id.b24 = 0;
3566 vha->rscn_out_ptr = vha->rscn_in_ptr; 3566 vha->rscn_out_ptr = vha->rscn_in_ptr;
3567 break; 3567 break;
3568 } 3568 }
3569 3569
3570 rval = QLA_SUCCESS; 3570 rval = QLA_SUCCESS;
3571 3571
3572 list_for_each_entry(fcport, &vha->vp_fcports, list) { 3572 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3573 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 || 3573 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
3574 (fcport->d_id.b24 & mask) != d_id.b24 || 3574 (fcport->d_id.b24 & mask) != d_id.b24 ||
3575 fcport->port_type == FCT_BROADCAST) 3575 fcport->port_type == FCT_BROADCAST)
3576 continue; 3576 continue;
3577 3577
3578 if (atomic_read(&fcport->state) == FCS_ONLINE) { 3578 if (atomic_read(&fcport->state) == FCS_ONLINE) {
3579 if (format != 3 || 3579 if (format != 3 ||
3580 fcport->port_type != FCT_INITIATOR) { 3580 fcport->port_type != FCT_INITIATOR) {
3581 qla2x00_mark_device_lost(vha, fcport, 3581 qla2x00_mark_device_lost(vha, fcport,
3582 0, 0); 3582 0, 0);
3583 } 3583 }
3584 } 3584 }
3585 } 3585 }
3586 } 3586 }
3587 return (rval); 3587 return (rval);
3588 } 3588 }
3589 3589
3590 /* 3590 /*
3591 * qla2x00_fabric_dev_login 3591 * qla2x00_fabric_dev_login
3592 * Login fabric target device and update FC port database. 3592 * Login fabric target device and update FC port database.
3593 * 3593 *
3594 * Input: 3594 * Input:
3595 * ha: adapter state pointer. 3595 * ha: adapter state pointer.
3596 * fcport: port structure list pointer. 3596 * fcport: port structure list pointer.
3597 * next_loopid: contains value of a new loop ID that can be used 3597 * next_loopid: contains value of a new loop ID that can be used
3598 * by the next login attempt. 3598 * by the next login attempt.
3599 * 3599 *
3600 * Returns: 3600 * Returns:
3601 * qla2x00 local function return status code. 3601 * qla2x00 local function return status code.
3602 * 3602 *
3603 * Context: 3603 * Context:
3604 * Kernel context. 3604 * Kernel context.
3605 */ 3605 */
3606 static int 3606 static int
3607 qla2x00_fabric_dev_login(scsi_qla_host_t *vha, fc_port_t *fcport, 3607 qla2x00_fabric_dev_login(scsi_qla_host_t *vha, fc_port_t *fcport,
3608 uint16_t *next_loopid) 3608 uint16_t *next_loopid)
3609 { 3609 {
3610 int rval; 3610 int rval;
3611 int retry; 3611 int retry;
3612 uint8_t opts; 3612 uint8_t opts;
3613 struct qla_hw_data *ha = vha->hw; 3613 struct qla_hw_data *ha = vha->hw;
3614 3614
3615 rval = QLA_SUCCESS; 3615 rval = QLA_SUCCESS;
3616 retry = 0; 3616 retry = 0;
3617 3617
3618 if (IS_ALOGIO_CAPABLE(ha)) { 3618 if (IS_ALOGIO_CAPABLE(ha)) {
3619 if (fcport->flags & FCF_ASYNC_SENT) 3619 if (fcport->flags & FCF_ASYNC_SENT)
3620 return rval; 3620 return rval;
3621 fcport->flags |= FCF_ASYNC_SENT; 3621 fcport->flags |= FCF_ASYNC_SENT;
3622 rval = qla2x00_post_async_login_work(vha, fcport, NULL); 3622 rval = qla2x00_post_async_login_work(vha, fcport, NULL);
3623 if (!rval) 3623 if (!rval)
3624 return rval; 3624 return rval;
3625 } 3625 }
3626 3626
3627 fcport->flags &= ~FCF_ASYNC_SENT; 3627 fcport->flags &= ~FCF_ASYNC_SENT;
3628 rval = qla2x00_fabric_login(vha, fcport, next_loopid); 3628 rval = qla2x00_fabric_login(vha, fcport, next_loopid);
3629 if (rval == QLA_SUCCESS) { 3629 if (rval == QLA_SUCCESS) {
3630 /* Send an ADISC to FCP2 devices.*/ 3630 /* Send an ADISC to FCP2 devices.*/
3631 opts = 0; 3631 opts = 0;
3632 if (fcport->flags & FCF_FCP2_DEVICE) 3632 if (fcport->flags & FCF_FCP2_DEVICE)
3633 opts |= BIT_1; 3633 opts |= BIT_1;
3634 rval = qla2x00_get_port_database(vha, fcport, opts); 3634 rval = qla2x00_get_port_database(vha, fcport, opts);
3635 if (rval != QLA_SUCCESS) { 3635 if (rval != QLA_SUCCESS) {
3636 ha->isp_ops->fabric_logout(vha, fcport->loop_id, 3636 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
3637 fcport->d_id.b.domain, fcport->d_id.b.area, 3637 fcport->d_id.b.domain, fcport->d_id.b.area,
3638 fcport->d_id.b.al_pa); 3638 fcport->d_id.b.al_pa);
3639 qla2x00_mark_device_lost(vha, fcport, 1, 0); 3639 qla2x00_mark_device_lost(vha, fcport, 1, 0);
3640 } else { 3640 } else {
3641 qla2x00_update_fcport(vha, fcport); 3641 qla2x00_update_fcport(vha, fcport);
3642 } 3642 }
3643 } 3643 }
3644 3644
3645 return (rval); 3645 return (rval);
3646 } 3646 }
3647 3647
3648 /* 3648 /*
3649 * qla2x00_fabric_login 3649 * qla2x00_fabric_login
3650 * Issue fabric login command. 3650 * Issue fabric login command.
3651 * 3651 *
3652 * Input: 3652 * Input:
3653 * ha = adapter block pointer. 3653 * ha = adapter block pointer.
3654 * device = pointer to FC device type structure. 3654 * device = pointer to FC device type structure.
3655 * 3655 *
3656 * Returns: 3656 * Returns:
3657 * 0 - Login successfully 3657 * 0 - Login successfully
3658 * 1 - Login failed 3658 * 1 - Login failed
3659 * 2 - Initiator device 3659 * 2 - Initiator device
3660 * 3 - Fatal error 3660 * 3 - Fatal error
3661 */ 3661 */
3662 int 3662 int
3663 qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport, 3663 qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
3664 uint16_t *next_loopid) 3664 uint16_t *next_loopid)
3665 { 3665 {
3666 int rval; 3666 int rval;
3667 int retry; 3667 int retry;
3668 uint16_t tmp_loopid; 3668 uint16_t tmp_loopid;
3669 uint16_t mb[MAILBOX_REGISTER_COUNT]; 3669 uint16_t mb[MAILBOX_REGISTER_COUNT];
3670 struct qla_hw_data *ha = vha->hw; 3670 struct qla_hw_data *ha = vha->hw;
3671 3671
3672 retry = 0; 3672 retry = 0;
3673 tmp_loopid = 0; 3673 tmp_loopid = 0;
3674 3674
3675 for (;;) { 3675 for (;;) {
3676 ql_dbg(ql_dbg_disc, vha, 0x2000, 3676 ql_dbg(ql_dbg_disc, vha, 0x2000,
3677 "Trying Fabric Login w/loop id 0x%04x for port " 3677 "Trying Fabric Login w/loop id 0x%04x for port "
3678 "%02x%02x%02x.\n", 3678 "%02x%02x%02x.\n",
3679 fcport->loop_id, fcport->d_id.b.domain, 3679 fcport->loop_id, fcport->d_id.b.domain,
3680 fcport->d_id.b.area, fcport->d_id.b.al_pa); 3680 fcport->d_id.b.area, fcport->d_id.b.al_pa);
3681 3681
3682 /* Login fcport on switch. */ 3682 /* Login fcport on switch. */
3683 ha->isp_ops->fabric_login(vha, fcport->loop_id, 3683 ha->isp_ops->fabric_login(vha, fcport->loop_id,
3684 fcport->d_id.b.domain, fcport->d_id.b.area, 3684 fcport->d_id.b.domain, fcport->d_id.b.area,
3685 fcport->d_id.b.al_pa, mb, BIT_0); 3685 fcport->d_id.b.al_pa, mb, BIT_0);
3686 if (mb[0] == MBS_PORT_ID_USED) { 3686 if (mb[0] == MBS_PORT_ID_USED) {
3687 /* 3687 /*
3688 * Device has another loop ID. The firmware team 3688 * Device has another loop ID. The firmware team
3689 * recommends the driver perform an implicit login with 3689 * recommends the driver perform an implicit login with
3690 * the specified ID again. The ID we just used is save 3690 * the specified ID again. The ID we just used is save
3691 * here so we return with an ID that can be tried by 3691 * here so we return with an ID that can be tried by
3692 * the next login. 3692 * the next login.
3693 */ 3693 */
3694 retry++; 3694 retry++;
3695 tmp_loopid = fcport->loop_id; 3695 tmp_loopid = fcport->loop_id;
3696 fcport->loop_id = mb[1]; 3696 fcport->loop_id = mb[1];
3697 3697
3698 ql_dbg(ql_dbg_disc, vha, 0x2001, 3698 ql_dbg(ql_dbg_disc, vha, 0x2001,
3699 "Fabric Login: port in use - next loop " 3699 "Fabric Login: port in use - next loop "
3700 "id=0x%04x, port id= %02x%02x%02x.\n", 3700 "id=0x%04x, port id= %02x%02x%02x.\n",
3701 fcport->loop_id, fcport->d_id.b.domain, 3701 fcport->loop_id, fcport->d_id.b.domain,
3702 fcport->d_id.b.area, fcport->d_id.b.al_pa); 3702 fcport->d_id.b.area, fcport->d_id.b.al_pa);
3703 3703
3704 } else if (mb[0] == MBS_COMMAND_COMPLETE) { 3704 } else if (mb[0] == MBS_COMMAND_COMPLETE) {
3705 /* 3705 /*
3706 * Login succeeded. 3706 * Login succeeded.
3707 */ 3707 */
3708 if (retry) { 3708 if (retry) {
3709 /* A retry occurred before. */ 3709 /* A retry occurred before. */
3710 *next_loopid = tmp_loopid; 3710 *next_loopid = tmp_loopid;
3711 } else { 3711 } else {
3712 /* 3712 /*
3713 * No retry occurred before. Just increment the 3713 * No retry occurred before. Just increment the
3714 * ID value for next login. 3714 * ID value for next login.
3715 */ 3715 */
3716 *next_loopid = (fcport->loop_id + 1); 3716 *next_loopid = (fcport->loop_id + 1);
3717 } 3717 }
3718 3718
3719 if (mb[1] & BIT_0) { 3719 if (mb[1] & BIT_0) {
3720 fcport->port_type = FCT_INITIATOR; 3720 fcport->port_type = FCT_INITIATOR;
3721 } else { 3721 } else {
3722 fcport->port_type = FCT_TARGET; 3722 fcport->port_type = FCT_TARGET;
3723 if (mb[1] & BIT_1) { 3723 if (mb[1] & BIT_1) {
3724 fcport->flags |= FCF_FCP2_DEVICE; 3724 fcport->flags |= FCF_FCP2_DEVICE;
3725 } 3725 }
3726 } 3726 }
3727 3727
3728 if (mb[10] & BIT_0) 3728 if (mb[10] & BIT_0)
3729 fcport->supported_classes |= FC_COS_CLASS2; 3729 fcport->supported_classes |= FC_COS_CLASS2;
3730 if (mb[10] & BIT_1) 3730 if (mb[10] & BIT_1)
3731 fcport->supported_classes |= FC_COS_CLASS3; 3731 fcport->supported_classes |= FC_COS_CLASS3;
3732 3732
3733 rval = QLA_SUCCESS; 3733 rval = QLA_SUCCESS;
3734 break; 3734 break;
3735 } else if (mb[0] == MBS_LOOP_ID_USED) { 3735 } else if (mb[0] == MBS_LOOP_ID_USED) {
3736 /* 3736 /*
3737 * Loop ID already used, try next loop ID. 3737 * Loop ID already used, try next loop ID.
3738 */ 3738 */
3739 fcport->loop_id++; 3739 fcport->loop_id++;
3740 rval = qla2x00_find_new_loop_id(vha, fcport); 3740 rval = qla2x00_find_new_loop_id(vha, fcport);
3741 if (rval != QLA_SUCCESS) { 3741 if (rval != QLA_SUCCESS) {
3742 /* Ran out of loop IDs to use */ 3742 /* Ran out of loop IDs to use */
3743 break; 3743 break;
3744 } 3744 }
3745 } else if (mb[0] == MBS_COMMAND_ERROR) { 3745 } else if (mb[0] == MBS_COMMAND_ERROR) {
3746 /* 3746 /*
3747 * Firmware possibly timed out during login. If NO 3747 * Firmware possibly timed out during login. If NO
3748 * retries are left to do then the device is declared 3748 * retries are left to do then the device is declared
3749 * dead. 3749 * dead.
3750 */ 3750 */
3751 *next_loopid = fcport->loop_id; 3751 *next_loopid = fcport->loop_id;
3752 ha->isp_ops->fabric_logout(vha, fcport->loop_id, 3752 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
3753 fcport->d_id.b.domain, fcport->d_id.b.area, 3753 fcport->d_id.b.domain, fcport->d_id.b.area,
3754 fcport->d_id.b.al_pa); 3754 fcport->d_id.b.al_pa);
3755 qla2x00_mark_device_lost(vha, fcport, 1, 0); 3755 qla2x00_mark_device_lost(vha, fcport, 1, 0);
3756 3756
3757 rval = 1; 3757 rval = 1;
3758 break; 3758 break;
3759 } else { 3759 } else {
3760 /* 3760 /*
3761 * unrecoverable / not handled error 3761 * unrecoverable / not handled error
3762 */ 3762 */
3763 ql_dbg(ql_dbg_disc, vha, 0x2002, 3763 ql_dbg(ql_dbg_disc, vha, 0x2002,
3764 "Failed=%x port_id=%02x%02x%02x loop_id=%x " 3764 "Failed=%x port_id=%02x%02x%02x loop_id=%x "
3765 "jiffies=%lx.\n", mb[0], fcport->d_id.b.domain, 3765 "jiffies=%lx.\n", mb[0], fcport->d_id.b.domain,
3766 fcport->d_id.b.area, fcport->d_id.b.al_pa, 3766 fcport->d_id.b.area, fcport->d_id.b.al_pa,
3767 fcport->loop_id, jiffies); 3767 fcport->loop_id, jiffies);
3768 3768
3769 *next_loopid = fcport->loop_id; 3769 *next_loopid = fcport->loop_id;
3770 ha->isp_ops->fabric_logout(vha, fcport->loop_id, 3770 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
3771 fcport->d_id.b.domain, fcport->d_id.b.area, 3771 fcport->d_id.b.domain, fcport->d_id.b.area,
3772 fcport->d_id.b.al_pa); 3772 fcport->d_id.b.al_pa);
3773 fcport->loop_id = FC_NO_LOOP_ID; 3773 fcport->loop_id = FC_NO_LOOP_ID;
3774 fcport->login_retry = 0; 3774 fcport->login_retry = 0;
3775 3775
3776 rval = 3; 3776 rval = 3;
3777 break; 3777 break;
3778 } 3778 }
3779 } 3779 }
3780 3780
3781 return (rval); 3781 return (rval);
3782 } 3782 }
3783 3783
3784 /* 3784 /*
3785 * qla2x00_local_device_login 3785 * qla2x00_local_device_login
3786 * Issue local device login command. 3786 * Issue local device login command.
3787 * 3787 *
3788 * Input: 3788 * Input:
3789 * ha = adapter block pointer. 3789 * ha = adapter block pointer.
3790 * loop_id = loop id of device to login to. 3790 * loop_id = loop id of device to login to.
3791 * 3791 *
3792 * Returns (Where's the #define!!!!): 3792 * Returns (Where's the #define!!!!):
3793 * 0 - Login successfully 3793 * 0 - Login successfully
3794 * 1 - Login failed 3794 * 1 - Login failed
3795 * 3 - Fatal error 3795 * 3 - Fatal error
3796 */ 3796 */
3797 int 3797 int
3798 qla2x00_local_device_login(scsi_qla_host_t *vha, fc_port_t *fcport) 3798 qla2x00_local_device_login(scsi_qla_host_t *vha, fc_port_t *fcport)
3799 { 3799 {
3800 int rval; 3800 int rval;
3801 uint16_t mb[MAILBOX_REGISTER_COUNT]; 3801 uint16_t mb[MAILBOX_REGISTER_COUNT];
3802 3802
3803 memset(mb, 0, sizeof(mb)); 3803 memset(mb, 0, sizeof(mb));
3804 rval = qla2x00_login_local_device(vha, fcport, mb, BIT_0); 3804 rval = qla2x00_login_local_device(vha, fcport, mb, BIT_0);
3805 if (rval == QLA_SUCCESS) { 3805 if (rval == QLA_SUCCESS) {
3806 /* Interrogate mailbox registers for any errors */ 3806 /* Interrogate mailbox registers for any errors */
3807 if (mb[0] == MBS_COMMAND_ERROR) 3807 if (mb[0] == MBS_COMMAND_ERROR)
3808 rval = 1; 3808 rval = 1;
3809 else if (mb[0] == MBS_COMMAND_PARAMETER_ERROR) 3809 else if (mb[0] == MBS_COMMAND_PARAMETER_ERROR)
3810 /* device not in PCB table */ 3810 /* device not in PCB table */
3811 rval = 3; 3811 rval = 3;
3812 } 3812 }
3813 3813
3814 return (rval); 3814 return (rval);
3815 } 3815 }
3816 3816
3817 /* 3817 /*
3818 * qla2x00_loop_resync 3818 * qla2x00_loop_resync
3819 * Resync with fibre channel devices. 3819 * Resync with fibre channel devices.
3820 * 3820 *
3821 * Input: 3821 * Input:
3822 * ha = adapter block pointer. 3822 * ha = adapter block pointer.
3823 * 3823 *
3824 * Returns: 3824 * Returns:
3825 * 0 = success 3825 * 0 = success
3826 */ 3826 */
3827 int 3827 int
3828 qla2x00_loop_resync(scsi_qla_host_t *vha) 3828 qla2x00_loop_resync(scsi_qla_host_t *vha)
3829 { 3829 {
3830 int rval = QLA_SUCCESS; 3830 int rval = QLA_SUCCESS;
3831 uint32_t wait_time; 3831 uint32_t wait_time;
3832 struct req_que *req; 3832 struct req_que *req;
3833 struct rsp_que *rsp; 3833 struct rsp_que *rsp;
3834 3834
3835 if (vha->hw->flags.cpu_affinity_enabled) 3835 if (vha->hw->flags.cpu_affinity_enabled)
3836 req = vha->hw->req_q_map[0]; 3836 req = vha->hw->req_q_map[0];
3837 else 3837 else
3838 req = vha->req; 3838 req = vha->req;
3839 rsp = req->rsp; 3839 rsp = req->rsp;
3840 3840
3841 atomic_set(&vha->loop_state, LOOP_UPDATE);
3842 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 3841 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
3843 if (vha->flags.online) { 3842 if (vha->flags.online) {
3844 if (!(rval = qla2x00_fw_ready(vha))) { 3843 if (!(rval = qla2x00_fw_ready(vha))) {
3845 /* Wait at most MAX_TARGET RSCNs for a stable link. */ 3844 /* Wait at most MAX_TARGET RSCNs for a stable link. */
3846 wait_time = 256; 3845 wait_time = 256;
3847 do { 3846 do {
3848 atomic_set(&vha->loop_state, LOOP_UPDATE);
3849
3850 /* Issue a marker after FW becomes ready. */ 3847 /* Issue a marker after FW becomes ready. */
3851 qla2x00_marker(vha, req, rsp, 0, 0, 3848 qla2x00_marker(vha, req, rsp, 0, 0,
3852 MK_SYNC_ALL); 3849 MK_SYNC_ALL);
3853 vha->marker_needed = 0; 3850 vha->marker_needed = 0;
3854 3851
3855 /* Remap devices on Loop. */ 3852 /* Remap devices on Loop. */
3856 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 3853 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3857 3854
3858 qla2x00_configure_loop(vha); 3855 qla2x00_configure_loop(vha);
3859 wait_time--; 3856 wait_time--;
3860 } while (!atomic_read(&vha->loop_down_timer) && 3857 } while (!atomic_read(&vha->loop_down_timer) &&
3861 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) 3858 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
3862 && wait_time && (test_bit(LOOP_RESYNC_NEEDED, 3859 && wait_time && (test_bit(LOOP_RESYNC_NEEDED,
3863 &vha->dpc_flags))); 3860 &vha->dpc_flags)));
3864 } 3861 }
3865 } 3862 }
3866 3863
3867 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) 3864 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
3868 return (QLA_FUNCTION_FAILED); 3865 return (QLA_FUNCTION_FAILED);
3869 3866
3870 if (rval) 3867 if (rval)
3871 ql_dbg(ql_dbg_disc, vha, 0x206c, 3868 ql_dbg(ql_dbg_disc, vha, 0x206c,
3872 "%s *** FAILED ***.\n", __func__); 3869 "%s *** FAILED ***.\n", __func__);
3873 3870
3874 return (rval); 3871 return (rval);
3875 } 3872 }
3876 3873
3877 /* 3874 /*
3878 * qla2x00_perform_loop_resync 3875 * qla2x00_perform_loop_resync
3879 * Description: This function will set the appropriate flags and call 3876 * Description: This function will set the appropriate flags and call
3880 * qla2x00_loop_resync. If successful loop will be resynced 3877 * qla2x00_loop_resync. If successful loop will be resynced
3881 * Arguments : scsi_qla_host_t pointer 3878 * Arguments : scsi_qla_host_t pointer
3882 * returm : Success or Failure 3879 * returm : Success or Failure
3883 */ 3880 */
3884 3881
3885 int qla2x00_perform_loop_resync(scsi_qla_host_t *ha) 3882 int qla2x00_perform_loop_resync(scsi_qla_host_t *ha)
3886 { 3883 {
3887 int32_t rval = 0; 3884 int32_t rval = 0;
3888 3885
3889 if (!test_and_set_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags)) { 3886 if (!test_and_set_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags)) {
3890 /*Configure the flags so that resync happens properly*/ 3887 /*Configure the flags so that resync happens properly*/
3891 atomic_set(&ha->loop_down_timer, 0); 3888 atomic_set(&ha->loop_down_timer, 0);
3892 if (!(ha->device_flags & DFLG_NO_CABLE)) { 3889 if (!(ha->device_flags & DFLG_NO_CABLE)) {
3893 atomic_set(&ha->loop_state, LOOP_UP); 3890 atomic_set(&ha->loop_state, LOOP_UP);
3894 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); 3891 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
3895 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags); 3892 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
3896 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); 3893 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
3897 3894
3898 rval = qla2x00_loop_resync(ha); 3895 rval = qla2x00_loop_resync(ha);
3899 } else 3896 } else
3900 atomic_set(&ha->loop_state, LOOP_DEAD); 3897 atomic_set(&ha->loop_state, LOOP_DEAD);
3901 3898
3902 clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags); 3899 clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags);
3903 } 3900 }
3904 3901
3905 return rval; 3902 return rval;
3906 } 3903 }
3907 3904
3908 void 3905 void
3909 qla2x00_update_fcports(scsi_qla_host_t *base_vha) 3906 qla2x00_update_fcports(scsi_qla_host_t *base_vha)
3910 { 3907 {
3911 fc_port_t *fcport; 3908 fc_port_t *fcport;
3912 struct scsi_qla_host *vha; 3909 struct scsi_qla_host *vha;
3913 struct qla_hw_data *ha = base_vha->hw; 3910 struct qla_hw_data *ha = base_vha->hw;
3914 unsigned long flags; 3911 unsigned long flags;
3915 3912
3916 spin_lock_irqsave(&ha->vport_slock, flags); 3913 spin_lock_irqsave(&ha->vport_slock, flags);
3917 /* Go with deferred removal of rport references. */ 3914 /* Go with deferred removal of rport references. */
3918 list_for_each_entry(vha, &base_vha->hw->vp_list, list) { 3915 list_for_each_entry(vha, &base_vha->hw->vp_list, list) {
3919 atomic_inc(&vha->vref_count); 3916 atomic_inc(&vha->vref_count);
3920 list_for_each_entry(fcport, &vha->vp_fcports, list) { 3917 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3921 if (fcport->drport && 3918 if (fcport->drport &&
3922 atomic_read(&fcport->state) != FCS_UNCONFIGURED) { 3919 atomic_read(&fcport->state) != FCS_UNCONFIGURED) {
3923 spin_unlock_irqrestore(&ha->vport_slock, flags); 3920 spin_unlock_irqrestore(&ha->vport_slock, flags);
3924 3921
3925 qla2x00_rport_del(fcport); 3922 qla2x00_rport_del(fcport);
3926 3923
3927 spin_lock_irqsave(&ha->vport_slock, flags); 3924 spin_lock_irqsave(&ha->vport_slock, flags);
3928 } 3925 }
3929 } 3926 }
3930 atomic_dec(&vha->vref_count); 3927 atomic_dec(&vha->vref_count);
3931 } 3928 }
3932 spin_unlock_irqrestore(&ha->vport_slock, flags); 3929 spin_unlock_irqrestore(&ha->vport_slock, flags);
3933 } 3930 }
3934 3931
3935 /* 3932 /*
3936 * qla82xx_quiescent_state_cleanup 3933 * qla82xx_quiescent_state_cleanup
3937 * Description: This function will block the new I/Os 3934 * Description: This function will block the new I/Os
3938 * Its not aborting any I/Os as context 3935 * Its not aborting any I/Os as context
3939 * is not destroyed during quiescence 3936 * is not destroyed during quiescence
3940 * Arguments: scsi_qla_host_t 3937 * Arguments: scsi_qla_host_t
3941 * return : void 3938 * return : void
3942 */ 3939 */
3943 void 3940 void
3944 qla82xx_quiescent_state_cleanup(scsi_qla_host_t *vha) 3941 qla82xx_quiescent_state_cleanup(scsi_qla_host_t *vha)
3945 { 3942 {
3946 struct qla_hw_data *ha = vha->hw; 3943 struct qla_hw_data *ha = vha->hw;
3947 struct scsi_qla_host *vp; 3944 struct scsi_qla_host *vp;
3948 3945
3949 ql_dbg(ql_dbg_p3p, vha, 0xb002, 3946 ql_dbg(ql_dbg_p3p, vha, 0xb002,
3950 "Performing ISP error recovery - ha=%p.\n", ha); 3947 "Performing ISP error recovery - ha=%p.\n", ha);
3951 3948
3952 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); 3949 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
3953 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 3950 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
3954 atomic_set(&vha->loop_state, LOOP_DOWN); 3951 atomic_set(&vha->loop_state, LOOP_DOWN);
3955 qla2x00_mark_all_devices_lost(vha, 0); 3952 qla2x00_mark_all_devices_lost(vha, 0);
3956 list_for_each_entry(vp, &ha->vp_list, list) 3953 list_for_each_entry(vp, &ha->vp_list, list)
3957 qla2x00_mark_all_devices_lost(vha, 0); 3954 qla2x00_mark_all_devices_lost(vha, 0);
3958 } else { 3955 } else {
3959 if (!atomic_read(&vha->loop_down_timer)) 3956 if (!atomic_read(&vha->loop_down_timer))
3960 atomic_set(&vha->loop_down_timer, 3957 atomic_set(&vha->loop_down_timer,
3961 LOOP_DOWN_TIME); 3958 LOOP_DOWN_TIME);
3962 } 3959 }
3963 /* Wait for pending cmds to complete */ 3960 /* Wait for pending cmds to complete */
3964 qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST); 3961 qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST);
3965 } 3962 }
3966 3963
3967 void 3964 void
3968 qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) 3965 qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
3969 { 3966 {
3970 struct qla_hw_data *ha = vha->hw; 3967 struct qla_hw_data *ha = vha->hw;
3971 struct scsi_qla_host *vp; 3968 struct scsi_qla_host *vp;
3972 unsigned long flags; 3969 unsigned long flags;
3973 fc_port_t *fcport; 3970 fc_port_t *fcport;
3974 3971
3975 /* For ISP82XX, driver waits for completion of the commands. 3972 /* For ISP82XX, driver waits for completion of the commands.
3976 * online flag should be set. 3973 * online flag should be set.
3977 */ 3974 */
3978 if (!IS_QLA82XX(ha)) 3975 if (!IS_QLA82XX(ha))
3979 vha->flags.online = 0; 3976 vha->flags.online = 0;
3980 ha->flags.chip_reset_done = 0; 3977 ha->flags.chip_reset_done = 0;
3981 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3978 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3982 ha->qla_stats.total_isp_aborts++; 3979 ha->qla_stats.total_isp_aborts++;
3983 3980
3984 ql_log(ql_log_info, vha, 0x00af, 3981 ql_log(ql_log_info, vha, 0x00af,
3985 "Performing ISP error recovery - ha=%p.\n", ha); 3982 "Performing ISP error recovery - ha=%p.\n", ha);
3986 3983
3987 /* For ISP82XX, reset_chip is just disabling interrupts. 3984 /* For ISP82XX, reset_chip is just disabling interrupts.
3988 * Driver waits for the completion of the commands. 3985 * Driver waits for the completion of the commands.
3989 * the interrupts need to be enabled. 3986 * the interrupts need to be enabled.
3990 */ 3987 */
3991 if (!IS_QLA82XX(ha)) 3988 if (!IS_QLA82XX(ha))
3992 ha->isp_ops->reset_chip(vha); 3989 ha->isp_ops->reset_chip(vha);
3993 3990
3994 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 3991 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
3995 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 3992 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
3996 atomic_set(&vha->loop_state, LOOP_DOWN); 3993 atomic_set(&vha->loop_state, LOOP_DOWN);
3997 qla2x00_mark_all_devices_lost(vha, 0); 3994 qla2x00_mark_all_devices_lost(vha, 0);
3998 3995
3999 spin_lock_irqsave(&ha->vport_slock, flags); 3996 spin_lock_irqsave(&ha->vport_slock, flags);
4000 list_for_each_entry(vp, &ha->vp_list, list) { 3997 list_for_each_entry(vp, &ha->vp_list, list) {
4001 atomic_inc(&vp->vref_count); 3998 atomic_inc(&vp->vref_count);
4002 spin_unlock_irqrestore(&ha->vport_slock, flags); 3999 spin_unlock_irqrestore(&ha->vport_slock, flags);
4003 4000
4004 qla2x00_mark_all_devices_lost(vp, 0); 4001 qla2x00_mark_all_devices_lost(vp, 0);
4005 4002
4006 spin_lock_irqsave(&ha->vport_slock, flags); 4003 spin_lock_irqsave(&ha->vport_slock, flags);
4007 atomic_dec(&vp->vref_count); 4004 atomic_dec(&vp->vref_count);
4008 } 4005 }
4009 spin_unlock_irqrestore(&ha->vport_slock, flags); 4006 spin_unlock_irqrestore(&ha->vport_slock, flags);
4010 } else { 4007 } else {
4011 if (!atomic_read(&vha->loop_down_timer)) 4008 if (!atomic_read(&vha->loop_down_timer))
4012 atomic_set(&vha->loop_down_timer, 4009 atomic_set(&vha->loop_down_timer,
4013 LOOP_DOWN_TIME); 4010 LOOP_DOWN_TIME);
4014 } 4011 }
4015 4012
4016 /* Clear all async request states across all VPs. */ 4013 /* Clear all async request states across all VPs. */
4017 list_for_each_entry(fcport, &vha->vp_fcports, list) 4014 list_for_each_entry(fcport, &vha->vp_fcports, list)
4018 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); 4015 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
4019 spin_lock_irqsave(&ha->vport_slock, flags); 4016 spin_lock_irqsave(&ha->vport_slock, flags);
4020 list_for_each_entry(vp, &ha->vp_list, list) { 4017 list_for_each_entry(vp, &ha->vp_list, list) {
4021 atomic_inc(&vp->vref_count); 4018 atomic_inc(&vp->vref_count);
4022 spin_unlock_irqrestore(&ha->vport_slock, flags); 4019 spin_unlock_irqrestore(&ha->vport_slock, flags);
4023 4020
4024 list_for_each_entry(fcport, &vp->vp_fcports, list) 4021 list_for_each_entry(fcport, &vp->vp_fcports, list)
4025 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); 4022 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
4026 4023
4027 spin_lock_irqsave(&ha->vport_slock, flags); 4024 spin_lock_irqsave(&ha->vport_slock, flags);
4028 atomic_dec(&vp->vref_count); 4025 atomic_dec(&vp->vref_count);
4029 } 4026 }
4030 spin_unlock_irqrestore(&ha->vport_slock, flags); 4027 spin_unlock_irqrestore(&ha->vport_slock, flags);
4031 4028
4032 if (!ha->flags.eeh_busy) { 4029 if (!ha->flags.eeh_busy) {
4033 /* Make sure for ISP 82XX IO DMA is complete */ 4030 /* Make sure for ISP 82XX IO DMA is complete */
4034 if (IS_QLA82XX(ha)) { 4031 if (IS_QLA82XX(ha)) {
4035 qla82xx_chip_reset_cleanup(vha); 4032 qla82xx_chip_reset_cleanup(vha);
4036 ql_log(ql_log_info, vha, 0x00b4, 4033 ql_log(ql_log_info, vha, 0x00b4,
4037 "Done chip reset cleanup.\n"); 4034 "Done chip reset cleanup.\n");
4038 4035
4039 /* Done waiting for pending commands. 4036 /* Done waiting for pending commands.
4040 * Reset the online flag. 4037 * Reset the online flag.
4041 */ 4038 */
4042 vha->flags.online = 0; 4039 vha->flags.online = 0;
4043 } 4040 }
4044 4041
4045 /* Requeue all commands in outstanding command list. */ 4042 /* Requeue all commands in outstanding command list. */
4046 qla2x00_abort_all_cmds(vha, DID_RESET << 16); 4043 qla2x00_abort_all_cmds(vha, DID_RESET << 16);
4047 } 4044 }
4048 } 4045 }
4049 4046
4050 /* 4047 /*
4051 * qla2x00_abort_isp 4048 * qla2x00_abort_isp
4052 * Resets ISP and aborts all outstanding commands. 4049 * Resets ISP and aborts all outstanding commands.
4053 * 4050 *
4054 * Input: 4051 * Input:
4055 * ha = adapter block pointer. 4052 * ha = adapter block pointer.
4056 * 4053 *
4057 * Returns: 4054 * Returns:
4058 * 0 = success 4055 * 0 = success
4059 */ 4056 */
4060 int 4057 int
4061 qla2x00_abort_isp(scsi_qla_host_t *vha) 4058 qla2x00_abort_isp(scsi_qla_host_t *vha)
4062 { 4059 {
4063 int rval; 4060 int rval;
4064 uint8_t status = 0; 4061 uint8_t status = 0;
4065 struct qla_hw_data *ha = vha->hw; 4062 struct qla_hw_data *ha = vha->hw;
4066 struct scsi_qla_host *vp; 4063 struct scsi_qla_host *vp;
4067 struct req_que *req = ha->req_q_map[0]; 4064 struct req_que *req = ha->req_q_map[0];
4068 unsigned long flags; 4065 unsigned long flags;
4069 4066
4070 if (vha->flags.online) { 4067 if (vha->flags.online) {
4071 qla2x00_abort_isp_cleanup(vha); 4068 qla2x00_abort_isp_cleanup(vha);
4072 4069
4073 if (unlikely(pci_channel_offline(ha->pdev) && 4070 if (unlikely(pci_channel_offline(ha->pdev) &&
4074 ha->flags.pci_channel_io_perm_failure)) { 4071 ha->flags.pci_channel_io_perm_failure)) {
4075 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 4072 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
4076 status = 0; 4073 status = 0;
4077 return status; 4074 return status;
4078 } 4075 }
4079 4076
4080 ha->isp_ops->get_flash_version(vha, req->ring); 4077 ha->isp_ops->get_flash_version(vha, req->ring);
4081 4078
4082 ha->isp_ops->nvram_config(vha); 4079 ha->isp_ops->nvram_config(vha);
4083 4080
4084 if (!qla2x00_restart_isp(vha)) { 4081 if (!qla2x00_restart_isp(vha)) {
4085 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 4082 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
4086 4083
4087 if (!atomic_read(&vha->loop_down_timer)) { 4084 if (!atomic_read(&vha->loop_down_timer)) {
4088 /* 4085 /*
4089 * Issue marker command only when we are going 4086 * Issue marker command only when we are going
4090 * to start the I/O . 4087 * to start the I/O .
4091 */ 4088 */
4092 vha->marker_needed = 1; 4089 vha->marker_needed = 1;
4093 } 4090 }
4094 4091
4095 vha->flags.online = 1; 4092 vha->flags.online = 1;
4096 4093
4097 ha->isp_ops->enable_intrs(ha); 4094 ha->isp_ops->enable_intrs(ha);
4098 4095
4099 ha->isp_abort_cnt = 0; 4096 ha->isp_abort_cnt = 0;
4100 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 4097 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
4101 4098
4102 if (IS_QLA81XX(ha)) 4099 if (IS_QLA81XX(ha))
4103 qla2x00_get_fw_version(vha, 4100 qla2x00_get_fw_version(vha,
4104 &ha->fw_major_version, 4101 &ha->fw_major_version,
4105 &ha->fw_minor_version, 4102 &ha->fw_minor_version,
4106 &ha->fw_subminor_version, 4103 &ha->fw_subminor_version,
4107 &ha->fw_attributes, &ha->fw_memory_size, 4104 &ha->fw_attributes, &ha->fw_memory_size,
4108 ha->mpi_version, &ha->mpi_capabilities, 4105 ha->mpi_version, &ha->mpi_capabilities,
4109 ha->phy_version); 4106 ha->phy_version);
4110 4107
4111 if (ha->fce) { 4108 if (ha->fce) {
4112 ha->flags.fce_enabled = 1; 4109 ha->flags.fce_enabled = 1;
4113 memset(ha->fce, 0, 4110 memset(ha->fce, 0,
4114 fce_calc_size(ha->fce_bufs)); 4111 fce_calc_size(ha->fce_bufs));
4115 rval = qla2x00_enable_fce_trace(vha, 4112 rval = qla2x00_enable_fce_trace(vha,
4116 ha->fce_dma, ha->fce_bufs, ha->fce_mb, 4113 ha->fce_dma, ha->fce_bufs, ha->fce_mb,
4117 &ha->fce_bufs); 4114 &ha->fce_bufs);
4118 if (rval) { 4115 if (rval) {
4119 ql_log(ql_log_warn, vha, 0x8033, 4116 ql_log(ql_log_warn, vha, 0x8033,
4120 "Unable to reinitialize FCE " 4117 "Unable to reinitialize FCE "
4121 "(%d).\n", rval); 4118 "(%d).\n", rval);
4122 ha->flags.fce_enabled = 0; 4119 ha->flags.fce_enabled = 0;
4123 } 4120 }
4124 } 4121 }
4125 4122
4126 if (ha->eft) { 4123 if (ha->eft) {
4127 memset(ha->eft, 0, EFT_SIZE); 4124 memset(ha->eft, 0, EFT_SIZE);
4128 rval = qla2x00_enable_eft_trace(vha, 4125 rval = qla2x00_enable_eft_trace(vha,
4129 ha->eft_dma, EFT_NUM_BUFFERS); 4126 ha->eft_dma, EFT_NUM_BUFFERS);
4130 if (rval) { 4127 if (rval) {
4131 ql_log(ql_log_warn, vha, 0x8034, 4128 ql_log(ql_log_warn, vha, 0x8034,
4132 "Unable to reinitialize EFT " 4129 "Unable to reinitialize EFT "
4133 "(%d).\n", rval); 4130 "(%d).\n", rval);
4134 } 4131 }
4135 } 4132 }
4136 } else { /* failed the ISP abort */ 4133 } else { /* failed the ISP abort */
4137 vha->flags.online = 1; 4134 vha->flags.online = 1;
4138 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 4135 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
4139 if (ha->isp_abort_cnt == 0) { 4136 if (ha->isp_abort_cnt == 0) {
4140 ql_log(ql_log_fatal, vha, 0x8035, 4137 ql_log(ql_log_fatal, vha, 0x8035,
4141 "ISP error recover failed - " 4138 "ISP error recover failed - "
4142 "board disabled.\n"); 4139 "board disabled.\n");
4143 /* 4140 /*
4144 * The next call disables the board 4141 * The next call disables the board
4145 * completely. 4142 * completely.
4146 */ 4143 */
4147 ha->isp_ops->reset_adapter(vha); 4144 ha->isp_ops->reset_adapter(vha);
4148 vha->flags.online = 0; 4145 vha->flags.online = 0;
4149 clear_bit(ISP_ABORT_RETRY, 4146 clear_bit(ISP_ABORT_RETRY,
4150 &vha->dpc_flags); 4147 &vha->dpc_flags);
4151 status = 0; 4148 status = 0;
4152 } else { /* schedule another ISP abort */ 4149 } else { /* schedule another ISP abort */
4153 ha->isp_abort_cnt--; 4150 ha->isp_abort_cnt--;
4154 ql_dbg(ql_dbg_taskm, vha, 0x8020, 4151 ql_dbg(ql_dbg_taskm, vha, 0x8020,
4155 "ISP abort - retry remaining %d.\n", 4152 "ISP abort - retry remaining %d.\n",
4156 ha->isp_abort_cnt); 4153 ha->isp_abort_cnt);
4157 status = 1; 4154 status = 1;
4158 } 4155 }
4159 } else { 4156 } else {
4160 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT; 4157 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
4161 ql_dbg(ql_dbg_taskm, vha, 0x8021, 4158 ql_dbg(ql_dbg_taskm, vha, 0x8021,
4162 "ISP error recovery - retrying (%d) " 4159 "ISP error recovery - retrying (%d) "
4163 "more times.\n", ha->isp_abort_cnt); 4160 "more times.\n", ha->isp_abort_cnt);
4164 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 4161 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
4165 status = 1; 4162 status = 1;
4166 } 4163 }
4167 } 4164 }
4168 4165
4169 } 4166 }
4170 4167
4171 if (!status) { 4168 if (!status) {
4172 ql_dbg(ql_dbg_taskm, vha, 0x8022, "%s succeeded.\n", __func__); 4169 ql_dbg(ql_dbg_taskm, vha, 0x8022, "%s succeeded.\n", __func__);
4173 4170
4174 spin_lock_irqsave(&ha->vport_slock, flags); 4171 spin_lock_irqsave(&ha->vport_slock, flags);
4175 list_for_each_entry(vp, &ha->vp_list, list) { 4172 list_for_each_entry(vp, &ha->vp_list, list) {
4176 if (vp->vp_idx) { 4173 if (vp->vp_idx) {
4177 atomic_inc(&vp->vref_count); 4174 atomic_inc(&vp->vref_count);
4178 spin_unlock_irqrestore(&ha->vport_slock, flags); 4175 spin_unlock_irqrestore(&ha->vport_slock, flags);
4179 4176
4180 qla2x00_vp_abort_isp(vp); 4177 qla2x00_vp_abort_isp(vp);
4181 4178
4182 spin_lock_irqsave(&ha->vport_slock, flags); 4179 spin_lock_irqsave(&ha->vport_slock, flags);
4183 atomic_dec(&vp->vref_count); 4180 atomic_dec(&vp->vref_count);
4184 } 4181 }
4185 } 4182 }
4186 spin_unlock_irqrestore(&ha->vport_slock, flags); 4183 spin_unlock_irqrestore(&ha->vport_slock, flags);
4187 4184
4188 } else { 4185 } else {
4189 ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n"); 4186 ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n");
4190 } 4187 }
4191 4188
4192 return(status); 4189 return(status);
4193 } 4190 }
4194 4191
4195 /* 4192 /*
4196 * qla2x00_restart_isp 4193 * qla2x00_restart_isp
4197 * restarts the ISP after a reset 4194 * restarts the ISP after a reset
4198 * 4195 *
4199 * Input: 4196 * Input:
4200 * ha = adapter block pointer. 4197 * ha = adapter block pointer.
4201 * 4198 *
4202 * Returns: 4199 * Returns:
4203 * 0 = success 4200 * 0 = success
4204 */ 4201 */
4205 static int 4202 static int
4206 qla2x00_restart_isp(scsi_qla_host_t *vha) 4203 qla2x00_restart_isp(scsi_qla_host_t *vha)
4207 { 4204 {
4208 int status = 0; 4205 int status = 0;
4209 uint32_t wait_time; 4206 uint32_t wait_time;
4210 struct qla_hw_data *ha = vha->hw; 4207 struct qla_hw_data *ha = vha->hw;
4211 struct req_que *req = ha->req_q_map[0]; 4208 struct req_que *req = ha->req_q_map[0];
4212 struct rsp_que *rsp = ha->rsp_q_map[0]; 4209 struct rsp_que *rsp = ha->rsp_q_map[0];
4213 4210
4214 /* If firmware needs to be loaded */ 4211 /* If firmware needs to be loaded */
4215 if (qla2x00_isp_firmware(vha)) { 4212 if (qla2x00_isp_firmware(vha)) {
4216 vha->flags.online = 0; 4213 vha->flags.online = 0;
4217 status = ha->isp_ops->chip_diag(vha); 4214 status = ha->isp_ops->chip_diag(vha);
4218 if (!status) 4215 if (!status)
4219 status = qla2x00_setup_chip(vha); 4216 status = qla2x00_setup_chip(vha);
4220 } 4217 }
4221 4218
4222 if (!status && !(status = qla2x00_init_rings(vha))) { 4219 if (!status && !(status = qla2x00_init_rings(vha))) {
4223 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 4220 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
4224 ha->flags.chip_reset_done = 1; 4221 ha->flags.chip_reset_done = 1;
4225 /* Initialize the queues in use */ 4222 /* Initialize the queues in use */
4226 qla25xx_init_queues(ha); 4223 qla25xx_init_queues(ha);
4227 4224
4228 status = qla2x00_fw_ready(vha); 4225 status = qla2x00_fw_ready(vha);
4229 if (!status) { 4226 if (!status) {
4230 ql_dbg(ql_dbg_taskm, vha, 0x8031, 4227 ql_dbg(ql_dbg_taskm, vha, 0x8031,
4231 "Start configure loop status = %d.\n", status); 4228 "Start configure loop status = %d.\n", status);
4232 4229
4233 /* Issue a marker after FW becomes ready. */ 4230 /* Issue a marker after FW becomes ready. */
4234 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); 4231 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
4235 4232
4236 vha->flags.online = 1; 4233 vha->flags.online = 1;
4237 /* Wait at most MAX_TARGET RSCNs for a stable link. */ 4234 /* Wait at most MAX_TARGET RSCNs for a stable link. */
4238 wait_time = 256; 4235 wait_time = 256;
4239 do { 4236 do {
4240 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 4237 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4241 qla2x00_configure_loop(vha); 4238 qla2x00_configure_loop(vha);
4242 wait_time--; 4239 wait_time--;
4243 } while (!atomic_read(&vha->loop_down_timer) && 4240 } while (!atomic_read(&vha->loop_down_timer) &&
4244 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) 4241 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
4245 && wait_time && (test_bit(LOOP_RESYNC_NEEDED, 4242 && wait_time && (test_bit(LOOP_RESYNC_NEEDED,
4246 &vha->dpc_flags))); 4243 &vha->dpc_flags)));
4247 } 4244 }
4248 4245
4249 /* if no cable then assume it's good */ 4246 /* if no cable then assume it's good */
4250 if ((vha->device_flags & DFLG_NO_CABLE)) 4247 if ((vha->device_flags & DFLG_NO_CABLE))
4251 status = 0; 4248 status = 0;
4252 4249
4253 ql_dbg(ql_dbg_taskm, vha, 0x8032, 4250 ql_dbg(ql_dbg_taskm, vha, 0x8032,
4254 "Configure loop done, status = 0x%x.\n", status); 4251 "Configure loop done, status = 0x%x.\n", status);
4255 } 4252 }
4256 return (status); 4253 return (status);
4257 } 4254 }
4258 4255
4259 static int 4256 static int
4260 qla25xx_init_queues(struct qla_hw_data *ha) 4257 qla25xx_init_queues(struct qla_hw_data *ha)
4261 { 4258 {
4262 struct rsp_que *rsp = NULL; 4259 struct rsp_que *rsp = NULL;
4263 struct req_que *req = NULL; 4260 struct req_que *req = NULL;
4264 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 4261 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
4265 int ret = -1; 4262 int ret = -1;
4266 int i; 4263 int i;
4267 4264
4268 for (i = 1; i < ha->max_rsp_queues; i++) { 4265 for (i = 1; i < ha->max_rsp_queues; i++) {
4269 rsp = ha->rsp_q_map[i]; 4266 rsp = ha->rsp_q_map[i];
4270 if (rsp) { 4267 if (rsp) {
4271 rsp->options &= ~BIT_0; 4268 rsp->options &= ~BIT_0;
4272 ret = qla25xx_init_rsp_que(base_vha, rsp); 4269 ret = qla25xx_init_rsp_que(base_vha, rsp);
4273 if (ret != QLA_SUCCESS) 4270 if (ret != QLA_SUCCESS)
4274 ql_dbg(ql_dbg_init, base_vha, 0x00ff, 4271 ql_dbg(ql_dbg_init, base_vha, 0x00ff,
4275 "%s Rsp que: %d init failed.\n", 4272 "%s Rsp que: %d init failed.\n",
4276 __func__, rsp->id); 4273 __func__, rsp->id);
4277 else 4274 else
4278 ql_dbg(ql_dbg_init, base_vha, 0x0100, 4275 ql_dbg(ql_dbg_init, base_vha, 0x0100,
4279 "%s Rsp que: %d inited.\n", 4276 "%s Rsp que: %d inited.\n",
4280 __func__, rsp->id); 4277 __func__, rsp->id);
4281 } 4278 }
4282 } 4279 }
4283 for (i = 1; i < ha->max_req_queues; i++) { 4280 for (i = 1; i < ha->max_req_queues; i++) {
4284 req = ha->req_q_map[i]; 4281 req = ha->req_q_map[i];
4285 if (req) { 4282 if (req) {
4286 /* Clear outstanding commands array. */ 4283 /* Clear outstanding commands array. */
4287 req->options &= ~BIT_0; 4284 req->options &= ~BIT_0;
4288 ret = qla25xx_init_req_que(base_vha, req); 4285 ret = qla25xx_init_req_que(base_vha, req);
4289 if (ret != QLA_SUCCESS) 4286 if (ret != QLA_SUCCESS)
4290 ql_dbg(ql_dbg_init, base_vha, 0x0101, 4287 ql_dbg(ql_dbg_init, base_vha, 0x0101,
4291 "%s Req que: %d init failed.\n", 4288 "%s Req que: %d init failed.\n",
4292 __func__, req->id); 4289 __func__, req->id);
4293 else 4290 else
4294 ql_dbg(ql_dbg_init, base_vha, 0x0102, 4291 ql_dbg(ql_dbg_init, base_vha, 0x0102,
4295 "%s Req que: %d inited.\n", 4292 "%s Req que: %d inited.\n",
4296 __func__, req->id); 4293 __func__, req->id);
4297 } 4294 }
4298 } 4295 }
4299 return ret; 4296 return ret;
4300 } 4297 }
4301 4298
4302 /* 4299 /*
4303 * qla2x00_reset_adapter 4300 * qla2x00_reset_adapter
4304 * Reset adapter. 4301 * Reset adapter.
4305 * 4302 *
4306 * Input: 4303 * Input:
4307 * ha = adapter block pointer. 4304 * ha = adapter block pointer.
4308 */ 4305 */
4309 void 4306 void
4310 qla2x00_reset_adapter(scsi_qla_host_t *vha) 4307 qla2x00_reset_adapter(scsi_qla_host_t *vha)
4311 { 4308 {
4312 unsigned long flags = 0; 4309 unsigned long flags = 0;
4313 struct qla_hw_data *ha = vha->hw; 4310 struct qla_hw_data *ha = vha->hw;
4314 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 4311 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
4315 4312
4316 vha->flags.online = 0; 4313 vha->flags.online = 0;
4317 ha->isp_ops->disable_intrs(ha); 4314 ha->isp_ops->disable_intrs(ha);
4318 4315
4319 spin_lock_irqsave(&ha->hardware_lock, flags); 4316 spin_lock_irqsave(&ha->hardware_lock, flags);
4320 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC); 4317 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
4321 RD_REG_WORD(&reg->hccr); /* PCI Posting. */ 4318 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
4322 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC); 4319 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
4323 RD_REG_WORD(&reg->hccr); /* PCI Posting. */ 4320 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
4324 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4321 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4325 } 4322 }
4326 4323
4327 void 4324 void
4328 qla24xx_reset_adapter(scsi_qla_host_t *vha) 4325 qla24xx_reset_adapter(scsi_qla_host_t *vha)
4329 { 4326 {
4330 unsigned long flags = 0; 4327 unsigned long flags = 0;
4331 struct qla_hw_data *ha = vha->hw; 4328 struct qla_hw_data *ha = vha->hw;
4332 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 4329 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
4333 4330
4334 if (IS_QLA82XX(ha)) 4331 if (IS_QLA82XX(ha))
4335 return; 4332 return;
4336 4333
4337 vha->flags.online = 0; 4334 vha->flags.online = 0;
4338 ha->isp_ops->disable_intrs(ha); 4335 ha->isp_ops->disable_intrs(ha);
4339 4336
4340 spin_lock_irqsave(&ha->hardware_lock, flags); 4337 spin_lock_irqsave(&ha->hardware_lock, flags);
4341 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET); 4338 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
4342 RD_REG_DWORD(&reg->hccr); 4339 RD_REG_DWORD(&reg->hccr);
4343 WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE); 4340 WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
4344 RD_REG_DWORD(&reg->hccr); 4341 RD_REG_DWORD(&reg->hccr);
4345 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4342 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4346 4343
4347 if (IS_NOPOLLING_TYPE(ha)) 4344 if (IS_NOPOLLING_TYPE(ha))
4348 ha->isp_ops->enable_intrs(ha); 4345 ha->isp_ops->enable_intrs(ha);
4349 } 4346 }
4350 4347
4351 /* On sparc systems, obtain port and node WWN from firmware 4348 /* On sparc systems, obtain port and node WWN from firmware
4352 * properties. 4349 * properties.
4353 */ 4350 */
4354 static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, 4351 static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *vha,
4355 struct nvram_24xx *nv) 4352 struct nvram_24xx *nv)
4356 { 4353 {
4357 #ifdef CONFIG_SPARC 4354 #ifdef CONFIG_SPARC
4358 struct qla_hw_data *ha = vha->hw; 4355 struct qla_hw_data *ha = vha->hw;
4359 struct pci_dev *pdev = ha->pdev; 4356 struct pci_dev *pdev = ha->pdev;
4360 struct device_node *dp = pci_device_to_OF_node(pdev); 4357 struct device_node *dp = pci_device_to_OF_node(pdev);
4361 const u8 *val; 4358 const u8 *val;
4362 int len; 4359 int len;
4363 4360
4364 val = of_get_property(dp, "port-wwn", &len); 4361 val = of_get_property(dp, "port-wwn", &len);
4365 if (val && len >= WWN_SIZE) 4362 if (val && len >= WWN_SIZE)
4366 memcpy(nv->port_name, val, WWN_SIZE); 4363 memcpy(nv->port_name, val, WWN_SIZE);
4367 4364
4368 val = of_get_property(dp, "node-wwn", &len); 4365 val = of_get_property(dp, "node-wwn", &len);
4369 if (val && len >= WWN_SIZE) 4366 if (val && len >= WWN_SIZE)
4370 memcpy(nv->node_name, val, WWN_SIZE); 4367 memcpy(nv->node_name, val, WWN_SIZE);
4371 #endif 4368 #endif
4372 } 4369 }
4373 4370
4374 int 4371 int
4375 qla24xx_nvram_config(scsi_qla_host_t *vha) 4372 qla24xx_nvram_config(scsi_qla_host_t *vha)
4376 { 4373 {
4377 int rval; 4374 int rval;
4378 struct init_cb_24xx *icb; 4375 struct init_cb_24xx *icb;
4379 struct nvram_24xx *nv; 4376 struct nvram_24xx *nv;
4380 uint32_t *dptr; 4377 uint32_t *dptr;
4381 uint8_t *dptr1, *dptr2; 4378 uint8_t *dptr1, *dptr2;
4382 uint32_t chksum; 4379 uint32_t chksum;
4383 uint16_t cnt; 4380 uint16_t cnt;
4384 struct qla_hw_data *ha = vha->hw; 4381 struct qla_hw_data *ha = vha->hw;
4385 4382
4386 rval = QLA_SUCCESS; 4383 rval = QLA_SUCCESS;
4387 icb = (struct init_cb_24xx *)ha->init_cb; 4384 icb = (struct init_cb_24xx *)ha->init_cb;
4388 nv = ha->nvram; 4385 nv = ha->nvram;
4389 4386
4390 /* Determine NVRAM starting address. */ 4387 /* Determine NVRAM starting address. */
4391 if (ha->flags.port0) { 4388 if (ha->flags.port0) {
4392 ha->nvram_base = FA_NVRAM_FUNC0_ADDR; 4389 ha->nvram_base = FA_NVRAM_FUNC0_ADDR;
4393 ha->vpd_base = FA_NVRAM_VPD0_ADDR; 4390 ha->vpd_base = FA_NVRAM_VPD0_ADDR;
4394 } else { 4391 } else {
4395 ha->nvram_base = FA_NVRAM_FUNC1_ADDR; 4392 ha->nvram_base = FA_NVRAM_FUNC1_ADDR;
4396 ha->vpd_base = FA_NVRAM_VPD1_ADDR; 4393 ha->vpd_base = FA_NVRAM_VPD1_ADDR;
4397 } 4394 }
4398 ha->nvram_size = sizeof(struct nvram_24xx); 4395 ha->nvram_size = sizeof(struct nvram_24xx);
4399 ha->vpd_size = FA_NVRAM_VPD_SIZE; 4396 ha->vpd_size = FA_NVRAM_VPD_SIZE;
4400 if (IS_QLA82XX(ha)) 4397 if (IS_QLA82XX(ha))
4401 ha->vpd_size = FA_VPD_SIZE_82XX; 4398 ha->vpd_size = FA_VPD_SIZE_82XX;
4402 4399
4403 /* Get VPD data into cache */ 4400 /* Get VPD data into cache */
4404 ha->vpd = ha->nvram + VPD_OFFSET; 4401 ha->vpd = ha->nvram + VPD_OFFSET;
4405 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd, 4402 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd,
4406 ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4); 4403 ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4);
4407 4404
4408 /* Get NVRAM data into cache and calculate checksum. */ 4405 /* Get NVRAM data into cache and calculate checksum. */
4409 dptr = (uint32_t *)nv; 4406 dptr = (uint32_t *)nv;
4410 ha->isp_ops->read_nvram(vha, (uint8_t *)dptr, ha->nvram_base, 4407 ha->isp_ops->read_nvram(vha, (uint8_t *)dptr, ha->nvram_base,
4411 ha->nvram_size); 4408 ha->nvram_size);
4412 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++) 4409 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
4413 chksum += le32_to_cpu(*dptr++); 4410 chksum += le32_to_cpu(*dptr++);
4414 4411
4415 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x006a, 4412 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x006a,
4416 "Contents of NVRAM\n"); 4413 "Contents of NVRAM\n");
4417 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010d, 4414 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010d,
4418 (uint8_t *)nv, ha->nvram_size); 4415 (uint8_t *)nv, ha->nvram_size);
4419 4416
4420 /* Bad NVRAM data, set defaults parameters. */ 4417 /* Bad NVRAM data, set defaults parameters. */
4421 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P' 4418 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
4422 || nv->id[3] != ' ' || 4419 || nv->id[3] != ' ' ||
4423 nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) { 4420 nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
4424 /* Reset NVRAM data. */ 4421 /* Reset NVRAM data. */
4425 ql_log(ql_log_warn, vha, 0x006b, 4422 ql_log(ql_log_warn, vha, 0x006b,
4426 "Inconisistent NVRAM detected: checksum=0x%x id=%c " 4423 "Inconisistent NVRAM detected: checksum=0x%x id=%c "
4427 "version=0x%x.\n", chksum, nv->id[0], nv->nvram_version); 4424 "version=0x%x.\n", chksum, nv->id[0], nv->nvram_version);
4428 ql_log(ql_log_warn, vha, 0x006c, 4425 ql_log(ql_log_warn, vha, 0x006c,
4429 "Falling back to functioning (yet invalid -- WWPN) " 4426 "Falling back to functioning (yet invalid -- WWPN) "
4430 "defaults.\n"); 4427 "defaults.\n");
4431 4428
4432 /* 4429 /*
4433 * Set default initialization control block. 4430 * Set default initialization control block.
4434 */ 4431 */
4435 memset(nv, 0, ha->nvram_size); 4432 memset(nv, 0, ha->nvram_size);
4436 nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION); 4433 nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION);
4437 nv->version = __constant_cpu_to_le16(ICB_VERSION); 4434 nv->version = __constant_cpu_to_le16(ICB_VERSION);
4438 nv->frame_payload_size = __constant_cpu_to_le16(2048); 4435 nv->frame_payload_size = __constant_cpu_to_le16(2048);
4439 nv->execution_throttle = __constant_cpu_to_le16(0xFFFF); 4436 nv->execution_throttle = __constant_cpu_to_le16(0xFFFF);
4440 nv->exchange_count = __constant_cpu_to_le16(0); 4437 nv->exchange_count = __constant_cpu_to_le16(0);
4441 nv->hard_address = __constant_cpu_to_le16(124); 4438 nv->hard_address = __constant_cpu_to_le16(124);
4442 nv->port_name[0] = 0x21; 4439 nv->port_name[0] = 0x21;
4443 nv->port_name[1] = 0x00 + ha->port_no; 4440 nv->port_name[1] = 0x00 + ha->port_no;
4444 nv->port_name[2] = 0x00; 4441 nv->port_name[2] = 0x00;
4445 nv->port_name[3] = 0xe0; 4442 nv->port_name[3] = 0xe0;
4446 nv->port_name[4] = 0x8b; 4443 nv->port_name[4] = 0x8b;
4447 nv->port_name[5] = 0x1c; 4444 nv->port_name[5] = 0x1c;
4448 nv->port_name[6] = 0x55; 4445 nv->port_name[6] = 0x55;
4449 nv->port_name[7] = 0x86; 4446 nv->port_name[7] = 0x86;
4450 nv->node_name[0] = 0x20; 4447 nv->node_name[0] = 0x20;
4451 nv->node_name[1] = 0x00; 4448 nv->node_name[1] = 0x00;
4452 nv->node_name[2] = 0x00; 4449 nv->node_name[2] = 0x00;
4453 nv->node_name[3] = 0xe0; 4450 nv->node_name[3] = 0xe0;
4454 nv->node_name[4] = 0x8b; 4451 nv->node_name[4] = 0x8b;
4455 nv->node_name[5] = 0x1c; 4452 nv->node_name[5] = 0x1c;
4456 nv->node_name[6] = 0x55; 4453 nv->node_name[6] = 0x55;
4457 nv->node_name[7] = 0x86; 4454 nv->node_name[7] = 0x86;
4458 qla24xx_nvram_wwn_from_ofw(vha, nv); 4455 qla24xx_nvram_wwn_from_ofw(vha, nv);
4459 nv->login_retry_count = __constant_cpu_to_le16(8); 4456 nv->login_retry_count = __constant_cpu_to_le16(8);
4460 nv->interrupt_delay_timer = __constant_cpu_to_le16(0); 4457 nv->interrupt_delay_timer = __constant_cpu_to_le16(0);
4461 nv->login_timeout = __constant_cpu_to_le16(0); 4458 nv->login_timeout = __constant_cpu_to_le16(0);
4462 nv->firmware_options_1 = 4459 nv->firmware_options_1 =
4463 __constant_cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1); 4460 __constant_cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
4464 nv->firmware_options_2 = __constant_cpu_to_le32(2 << 4); 4461 nv->firmware_options_2 = __constant_cpu_to_le32(2 << 4);
4465 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12); 4462 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
4466 nv->firmware_options_3 = __constant_cpu_to_le32(2 << 13); 4463 nv->firmware_options_3 = __constant_cpu_to_le32(2 << 13);
4467 nv->host_p = __constant_cpu_to_le32(BIT_11|BIT_10); 4464 nv->host_p = __constant_cpu_to_le32(BIT_11|BIT_10);
4468 nv->efi_parameters = __constant_cpu_to_le32(0); 4465 nv->efi_parameters = __constant_cpu_to_le32(0);
4469 nv->reset_delay = 5; 4466 nv->reset_delay = 5;
4470 nv->max_luns_per_target = __constant_cpu_to_le16(128); 4467 nv->max_luns_per_target = __constant_cpu_to_le16(128);
4471 nv->port_down_retry_count = __constant_cpu_to_le16(30); 4468 nv->port_down_retry_count = __constant_cpu_to_le16(30);
4472 nv->link_down_timeout = __constant_cpu_to_le16(30); 4469 nv->link_down_timeout = __constant_cpu_to_le16(30);
4473 4470
4474 rval = 1; 4471 rval = 1;
4475 } 4472 }
4476 4473
4477 /* Reset Initialization control block */ 4474 /* Reset Initialization control block */
4478 memset(icb, 0, ha->init_cb_size); 4475 memset(icb, 0, ha->init_cb_size);
4479 4476
4480 /* Copy 1st segment. */ 4477 /* Copy 1st segment. */
4481 dptr1 = (uint8_t *)icb; 4478 dptr1 = (uint8_t *)icb;
4482 dptr2 = (uint8_t *)&nv->version; 4479 dptr2 = (uint8_t *)&nv->version;
4483 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version; 4480 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
4484 while (cnt--) 4481 while (cnt--)
4485 *dptr1++ = *dptr2++; 4482 *dptr1++ = *dptr2++;
4486 4483
4487 icb->login_retry_count = nv->login_retry_count; 4484 icb->login_retry_count = nv->login_retry_count;
4488 icb->link_down_on_nos = nv->link_down_on_nos; 4485 icb->link_down_on_nos = nv->link_down_on_nos;
4489 4486
4490 /* Copy 2nd segment. */ 4487 /* Copy 2nd segment. */
4491 dptr1 = (uint8_t *)&icb->interrupt_delay_timer; 4488 dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
4492 dptr2 = (uint8_t *)&nv->interrupt_delay_timer; 4489 dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
4493 cnt = (uint8_t *)&icb->reserved_3 - 4490 cnt = (uint8_t *)&icb->reserved_3 -
4494 (uint8_t *)&icb->interrupt_delay_timer; 4491 (uint8_t *)&icb->interrupt_delay_timer;
4495 while (cnt--) 4492 while (cnt--)
4496 *dptr1++ = *dptr2++; 4493 *dptr1++ = *dptr2++;
4497 4494
4498 /* 4495 /*
4499 * Setup driver NVRAM options. 4496 * Setup driver NVRAM options.
4500 */ 4497 */
4501 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name), 4498 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
4502 "QLA2462"); 4499 "QLA2462");
4503 4500
4504 /* Use alternate WWN? */ 4501 /* Use alternate WWN? */
4505 if (nv->host_p & __constant_cpu_to_le32(BIT_15)) { 4502 if (nv->host_p & __constant_cpu_to_le32(BIT_15)) {
4506 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE); 4503 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
4507 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE); 4504 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
4508 } 4505 }
4509 4506
4510 /* Prepare nodename */ 4507 /* Prepare nodename */
4511 if ((icb->firmware_options_1 & __constant_cpu_to_le32(BIT_14)) == 0) { 4508 if ((icb->firmware_options_1 & __constant_cpu_to_le32(BIT_14)) == 0) {
4512 /* 4509 /*
4513 * Firmware will apply the following mask if the nodename was 4510 * Firmware will apply the following mask if the nodename was
4514 * not provided. 4511 * not provided.
4515 */ 4512 */
4516 memcpy(icb->node_name, icb->port_name, WWN_SIZE); 4513 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
4517 icb->node_name[0] &= 0xF0; 4514 icb->node_name[0] &= 0xF0;
4518 } 4515 }
4519 4516
4520 /* Set host adapter parameters. */ 4517 /* Set host adapter parameters. */
4521 ha->flags.disable_risc_code_load = 0; 4518 ha->flags.disable_risc_code_load = 0;
4522 ha->flags.enable_lip_reset = 0; 4519 ha->flags.enable_lip_reset = 0;
4523 ha->flags.enable_lip_full_login = 4520 ha->flags.enable_lip_full_login =
4524 le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0; 4521 le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0;
4525 ha->flags.enable_target_reset = 4522 ha->flags.enable_target_reset =
4526 le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0; 4523 le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0;
4527 ha->flags.enable_led_scheme = 0; 4524 ha->flags.enable_led_scheme = 0;
4528 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0; 4525 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0;
4529 4526
4530 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) & 4527 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
4531 (BIT_6 | BIT_5 | BIT_4)) >> 4; 4528 (BIT_6 | BIT_5 | BIT_4)) >> 4;
4532 4529
4533 memcpy(ha->fw_seriallink_options24, nv->seriallink_options, 4530 memcpy(ha->fw_seriallink_options24, nv->seriallink_options,
4534 sizeof(ha->fw_seriallink_options24)); 4531 sizeof(ha->fw_seriallink_options24));
4535 4532
4536 /* save HBA serial number */ 4533 /* save HBA serial number */
4537 ha->serial0 = icb->port_name[5]; 4534 ha->serial0 = icb->port_name[5];
4538 ha->serial1 = icb->port_name[6]; 4535 ha->serial1 = icb->port_name[6];
4539 ha->serial2 = icb->port_name[7]; 4536 ha->serial2 = icb->port_name[7];
4540 memcpy(vha->node_name, icb->node_name, WWN_SIZE); 4537 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
4541 memcpy(vha->port_name, icb->port_name, WWN_SIZE); 4538 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
4542 4539
4543 icb->execution_throttle = __constant_cpu_to_le16(0xFFFF); 4540 icb->execution_throttle = __constant_cpu_to_le16(0xFFFF);
4544 4541
4545 ha->retry_count = le16_to_cpu(nv->login_retry_count); 4542 ha->retry_count = le16_to_cpu(nv->login_retry_count);
4546 4543
4547 /* Set minimum login_timeout to 4 seconds. */ 4544 /* Set minimum login_timeout to 4 seconds. */
4548 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout) 4545 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
4549 nv->login_timeout = cpu_to_le16(ql2xlogintimeout); 4546 nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
4550 if (le16_to_cpu(nv->login_timeout) < 4) 4547 if (le16_to_cpu(nv->login_timeout) < 4)
4551 nv->login_timeout = __constant_cpu_to_le16(4); 4548 nv->login_timeout = __constant_cpu_to_le16(4);
4552 ha->login_timeout = le16_to_cpu(nv->login_timeout); 4549 ha->login_timeout = le16_to_cpu(nv->login_timeout);
4553 icb->login_timeout = nv->login_timeout; 4550 icb->login_timeout = nv->login_timeout;
4554 4551
4555 /* Set minimum RATOV to 100 tenths of a second. */ 4552 /* Set minimum RATOV to 100 tenths of a second. */
4556 ha->r_a_tov = 100; 4553 ha->r_a_tov = 100;
4557 4554
4558 ha->loop_reset_delay = nv->reset_delay; 4555 ha->loop_reset_delay = nv->reset_delay;
4559 4556
4560 /* Link Down Timeout = 0: 4557 /* Link Down Timeout = 0:
4561 * 4558 *
4562 * When Port Down timer expires we will start returning 4559 * When Port Down timer expires we will start returning
4563 * I/O's to OS with "DID_NO_CONNECT". 4560 * I/O's to OS with "DID_NO_CONNECT".
4564 * 4561 *
4565 * Link Down Timeout != 0: 4562 * Link Down Timeout != 0:
4566 * 4563 *
4567 * The driver waits for the link to come up after link down 4564 * The driver waits for the link to come up after link down
4568 * before returning I/Os to OS with "DID_NO_CONNECT". 4565 * before returning I/Os to OS with "DID_NO_CONNECT".
4569 */ 4566 */
4570 if (le16_to_cpu(nv->link_down_timeout) == 0) { 4567 if (le16_to_cpu(nv->link_down_timeout) == 0) {
4571 ha->loop_down_abort_time = 4568 ha->loop_down_abort_time =
4572 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT); 4569 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
4573 } else { 4570 } else {
4574 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout); 4571 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
4575 ha->loop_down_abort_time = 4572 ha->loop_down_abort_time =
4576 (LOOP_DOWN_TIME - ha->link_down_timeout); 4573 (LOOP_DOWN_TIME - ha->link_down_timeout);
4577 } 4574 }
4578 4575
4579 /* Need enough time to try and get the port back. */ 4576 /* Need enough time to try and get the port back. */
4580 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count); 4577 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
4581 if (qlport_down_retry) 4578 if (qlport_down_retry)
4582 ha->port_down_retry_count = qlport_down_retry; 4579 ha->port_down_retry_count = qlport_down_retry;
4583 4580
4584 /* Set login_retry_count */ 4581 /* Set login_retry_count */
4585 ha->login_retry_count = le16_to_cpu(nv->login_retry_count); 4582 ha->login_retry_count = le16_to_cpu(nv->login_retry_count);
4586 if (ha->port_down_retry_count == 4583 if (ha->port_down_retry_count ==
4587 le16_to_cpu(nv->port_down_retry_count) && 4584 le16_to_cpu(nv->port_down_retry_count) &&
4588 ha->port_down_retry_count > 3) 4585 ha->port_down_retry_count > 3)
4589 ha->login_retry_count = ha->port_down_retry_count; 4586 ha->login_retry_count = ha->port_down_retry_count;
4590 else if (ha->port_down_retry_count > (int)ha->login_retry_count) 4587 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
4591 ha->login_retry_count = ha->port_down_retry_count; 4588 ha->login_retry_count = ha->port_down_retry_count;
4592 if (ql2xloginretrycount) 4589 if (ql2xloginretrycount)
4593 ha->login_retry_count = ql2xloginretrycount; 4590 ha->login_retry_count = ql2xloginretrycount;
4594 4591
4595 /* Enable ZIO. */ 4592 /* Enable ZIO. */
4596 if (!vha->flags.init_done) { 4593 if (!vha->flags.init_done) {
4597 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) & 4594 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
4598 (BIT_3 | BIT_2 | BIT_1 | BIT_0); 4595 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
4599 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ? 4596 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
4600 le16_to_cpu(icb->interrupt_delay_timer): 2; 4597 le16_to_cpu(icb->interrupt_delay_timer): 2;
4601 } 4598 }
4602 icb->firmware_options_2 &= __constant_cpu_to_le32( 4599 icb->firmware_options_2 &= __constant_cpu_to_le32(
4603 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0)); 4600 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
4604 vha->flags.process_response_queue = 0; 4601 vha->flags.process_response_queue = 0;
4605 if (ha->zio_mode != QLA_ZIO_DISABLED) { 4602 if (ha->zio_mode != QLA_ZIO_DISABLED) {
4606 ha->zio_mode = QLA_ZIO_MODE_6; 4603 ha->zio_mode = QLA_ZIO_MODE_6;
4607 4604
4608 ql_log(ql_log_info, vha, 0x006f, 4605 ql_log(ql_log_info, vha, 0x006f,
4609 "ZIO mode %d enabled; timer delay (%d us).\n", 4606 "ZIO mode %d enabled; timer delay (%d us).\n",
4610 ha->zio_mode, ha->zio_timer * 100); 4607 ha->zio_mode, ha->zio_timer * 100);
4611 4608
4612 icb->firmware_options_2 |= cpu_to_le32( 4609 icb->firmware_options_2 |= cpu_to_le32(
4613 (uint32_t)ha->zio_mode); 4610 (uint32_t)ha->zio_mode);
4614 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer); 4611 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
4615 vha->flags.process_response_queue = 1; 4612 vha->flags.process_response_queue = 1;
4616 } 4613 }
4617 4614
4618 if (rval) { 4615 if (rval) {
4619 ql_log(ql_log_warn, vha, 0x0070, 4616 ql_log(ql_log_warn, vha, 0x0070,
4620 "NVRAM configuration failed.\n"); 4617 "NVRAM configuration failed.\n");
4621 } 4618 }
4622 return (rval); 4619 return (rval);
4623 } 4620 }
4624 4621
4625 static int 4622 static int
4626 qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr, 4623 qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
4627 uint32_t faddr) 4624 uint32_t faddr)
4628 { 4625 {
4629 int rval = QLA_SUCCESS; 4626 int rval = QLA_SUCCESS;
4630 int segments, fragment; 4627 int segments, fragment;
4631 uint32_t *dcode, dlen; 4628 uint32_t *dcode, dlen;
4632 uint32_t risc_addr; 4629 uint32_t risc_addr;
4633 uint32_t risc_size; 4630 uint32_t risc_size;
4634 uint32_t i; 4631 uint32_t i;
4635 struct qla_hw_data *ha = vha->hw; 4632 struct qla_hw_data *ha = vha->hw;
4636 struct req_que *req = ha->req_q_map[0]; 4633 struct req_que *req = ha->req_q_map[0];
4637 4634
4638 ql_dbg(ql_dbg_init, vha, 0x008b, 4635 ql_dbg(ql_dbg_init, vha, 0x008b,
4639 "Loading firmware from flash (%x).\n", faddr); 4636 "Loading firmware from flash (%x).\n", faddr);
4640 4637
4641 rval = QLA_SUCCESS; 4638 rval = QLA_SUCCESS;
4642 4639
4643 segments = FA_RISC_CODE_SEGMENTS; 4640 segments = FA_RISC_CODE_SEGMENTS;
4644 dcode = (uint32_t *)req->ring; 4641 dcode = (uint32_t *)req->ring;
4645 *srisc_addr = 0; 4642 *srisc_addr = 0;
4646 4643
4647 /* Validate firmware image by checking version. */ 4644 /* Validate firmware image by checking version. */
4648 qla24xx_read_flash_data(vha, dcode, faddr + 4, 4); 4645 qla24xx_read_flash_data(vha, dcode, faddr + 4, 4);
4649 for (i = 0; i < 4; i++) 4646 for (i = 0; i < 4; i++)
4650 dcode[i] = be32_to_cpu(dcode[i]); 4647 dcode[i] = be32_to_cpu(dcode[i]);
4651 if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff && 4648 if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
4652 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) || 4649 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
4653 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 && 4650 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
4654 dcode[3] == 0)) { 4651 dcode[3] == 0)) {
4655 ql_log(ql_log_fatal, vha, 0x008c, 4652 ql_log(ql_log_fatal, vha, 0x008c,
4656 "Unable to verify the integrity of flash firmware " 4653 "Unable to verify the integrity of flash firmware "
4657 "image.\n"); 4654 "image.\n");
4658 ql_log(ql_log_fatal, vha, 0x008d, 4655 ql_log(ql_log_fatal, vha, 0x008d,
4659 "Firmware data: %08x %08x %08x %08x.\n", 4656 "Firmware data: %08x %08x %08x %08x.\n",
4660 dcode[0], dcode[1], dcode[2], dcode[3]); 4657 dcode[0], dcode[1], dcode[2], dcode[3]);
4661 4658
4662 return QLA_FUNCTION_FAILED; 4659 return QLA_FUNCTION_FAILED;
4663 } 4660 }
4664 4661
4665 while (segments && rval == QLA_SUCCESS) { 4662 while (segments && rval == QLA_SUCCESS) {
4666 /* Read segment's load information. */ 4663 /* Read segment's load information. */
4667 qla24xx_read_flash_data(vha, dcode, faddr, 4); 4664 qla24xx_read_flash_data(vha, dcode, faddr, 4);
4668 4665
4669 risc_addr = be32_to_cpu(dcode[2]); 4666 risc_addr = be32_to_cpu(dcode[2]);
4670 *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr; 4667 *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr;
4671 risc_size = be32_to_cpu(dcode[3]); 4668 risc_size = be32_to_cpu(dcode[3]);
4672 4669
4673 fragment = 0; 4670 fragment = 0;
4674 while (risc_size > 0 && rval == QLA_SUCCESS) { 4671 while (risc_size > 0 && rval == QLA_SUCCESS) {
4675 dlen = (uint32_t)(ha->fw_transfer_size >> 2); 4672 dlen = (uint32_t)(ha->fw_transfer_size >> 2);
4676 if (dlen > risc_size) 4673 if (dlen > risc_size)
4677 dlen = risc_size; 4674 dlen = risc_size;
4678 4675
4679 ql_dbg(ql_dbg_init, vha, 0x008e, 4676 ql_dbg(ql_dbg_init, vha, 0x008e,
4680 "Loading risc segment@ risc addr %x " 4677 "Loading risc segment@ risc addr %x "
4681 "number of dwords 0x%x offset 0x%x.\n", 4678 "number of dwords 0x%x offset 0x%x.\n",
4682 risc_addr, dlen, faddr); 4679 risc_addr, dlen, faddr);
4683 4680
4684 qla24xx_read_flash_data(vha, dcode, faddr, dlen); 4681 qla24xx_read_flash_data(vha, dcode, faddr, dlen);
4685 for (i = 0; i < dlen; i++) 4682 for (i = 0; i < dlen; i++)
4686 dcode[i] = swab32(dcode[i]); 4683 dcode[i] = swab32(dcode[i]);
4687 4684
4688 rval = qla2x00_load_ram(vha, req->dma, risc_addr, 4685 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
4689 dlen); 4686 dlen);
4690 if (rval) { 4687 if (rval) {
4691 ql_log(ql_log_fatal, vha, 0x008f, 4688 ql_log(ql_log_fatal, vha, 0x008f,
4692 "Failed to load segment %d of firmware.\n", 4689 "Failed to load segment %d of firmware.\n",
4693 fragment); 4690 fragment);
4694 break; 4691 break;
4695 } 4692 }
4696 4693
4697 faddr += dlen; 4694 faddr += dlen;
4698 risc_addr += dlen; 4695 risc_addr += dlen;
4699 risc_size -= dlen; 4696 risc_size -= dlen;
4700 fragment++; 4697 fragment++;
4701 } 4698 }
4702 4699
4703 /* Next segment. */ 4700 /* Next segment. */
4704 segments--; 4701 segments--;
4705 } 4702 }
4706 4703
4707 return rval; 4704 return rval;
4708 } 4705 }
4709 4706
4710 #define QLA_FW_URL "ftp://ftp.qlogic.com/outgoing/linux/firmware/" 4707 #define QLA_FW_URL "ftp://ftp.qlogic.com/outgoing/linux/firmware/"
4711 4708
4712 int 4709 int
4713 qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) 4710 qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4714 { 4711 {
4715 int rval; 4712 int rval;
4716 int i, fragment; 4713 int i, fragment;
4717 uint16_t *wcode, *fwcode; 4714 uint16_t *wcode, *fwcode;
4718 uint32_t risc_addr, risc_size, fwclen, wlen, *seg; 4715 uint32_t risc_addr, risc_size, fwclen, wlen, *seg;
4719 struct fw_blob *blob; 4716 struct fw_blob *blob;
4720 struct qla_hw_data *ha = vha->hw; 4717 struct qla_hw_data *ha = vha->hw;
4721 struct req_que *req = ha->req_q_map[0]; 4718 struct req_que *req = ha->req_q_map[0];
4722 4719
4723 /* Load firmware blob. */ 4720 /* Load firmware blob. */
4724 blob = qla2x00_request_firmware(vha); 4721 blob = qla2x00_request_firmware(vha);
4725 if (!blob) { 4722 if (!blob) {
4726 ql_log(ql_log_info, vha, 0x0083, 4723 ql_log(ql_log_info, vha, 0x0083,
4727 "Fimware image unavailable.\n"); 4724 "Fimware image unavailable.\n");
4728 ql_log(ql_log_info, vha, 0x0084, 4725 ql_log(ql_log_info, vha, 0x0084,
4729 "Firmware images can be retrieved from: "QLA_FW_URL ".\n"); 4726 "Firmware images can be retrieved from: "QLA_FW_URL ".\n");
4730 return QLA_FUNCTION_FAILED; 4727 return QLA_FUNCTION_FAILED;
4731 } 4728 }
4732 4729
4733 rval = QLA_SUCCESS; 4730 rval = QLA_SUCCESS;
4734 4731
4735 wcode = (uint16_t *)req->ring; 4732 wcode = (uint16_t *)req->ring;
4736 *srisc_addr = 0; 4733 *srisc_addr = 0;
4737 fwcode = (uint16_t *)blob->fw->data; 4734 fwcode = (uint16_t *)blob->fw->data;
4738 fwclen = 0; 4735 fwclen = 0;
4739 4736
4740 /* Validate firmware image by checking version. */ 4737 /* Validate firmware image by checking version. */
4741 if (blob->fw->size < 8 * sizeof(uint16_t)) { 4738 if (blob->fw->size < 8 * sizeof(uint16_t)) {
4742 ql_log(ql_log_fatal, vha, 0x0085, 4739 ql_log(ql_log_fatal, vha, 0x0085,
4743 "Unable to verify integrity of firmware image (%Zd).\n", 4740 "Unable to verify integrity of firmware image (%Zd).\n",
4744 blob->fw->size); 4741 blob->fw->size);
4745 goto fail_fw_integrity; 4742 goto fail_fw_integrity;
4746 } 4743 }
4747 for (i = 0; i < 4; i++) 4744 for (i = 0; i < 4; i++)
4748 wcode[i] = be16_to_cpu(fwcode[i + 4]); 4745 wcode[i] = be16_to_cpu(fwcode[i + 4]);
4749 if ((wcode[0] == 0xffff && wcode[1] == 0xffff && wcode[2] == 0xffff && 4746 if ((wcode[0] == 0xffff && wcode[1] == 0xffff && wcode[2] == 0xffff &&
4750 wcode[3] == 0xffff) || (wcode[0] == 0 && wcode[1] == 0 && 4747 wcode[3] == 0xffff) || (wcode[0] == 0 && wcode[1] == 0 &&
4751 wcode[2] == 0 && wcode[3] == 0)) { 4748 wcode[2] == 0 && wcode[3] == 0)) {
4752 ql_log(ql_log_fatal, vha, 0x0086, 4749 ql_log(ql_log_fatal, vha, 0x0086,
4753 "Unable to verify integrity of firmware image.\n"); 4750 "Unable to verify integrity of firmware image.\n");
4754 ql_log(ql_log_fatal, vha, 0x0087, 4751 ql_log(ql_log_fatal, vha, 0x0087,
4755 "Firmware data: %04x %04x %04x %04x.\n", 4752 "Firmware data: %04x %04x %04x %04x.\n",
4756 wcode[0], wcode[1], wcode[2], wcode[3]); 4753 wcode[0], wcode[1], wcode[2], wcode[3]);
4757 goto fail_fw_integrity; 4754 goto fail_fw_integrity;
4758 } 4755 }
4759 4756
4760 seg = blob->segs; 4757 seg = blob->segs;
4761 while (*seg && rval == QLA_SUCCESS) { 4758 while (*seg && rval == QLA_SUCCESS) {
4762 risc_addr = *seg; 4759 risc_addr = *seg;
4763 *srisc_addr = *srisc_addr == 0 ? *seg : *srisc_addr; 4760 *srisc_addr = *srisc_addr == 0 ? *seg : *srisc_addr;
4764 risc_size = be16_to_cpu(fwcode[3]); 4761 risc_size = be16_to_cpu(fwcode[3]);
4765 4762
4766 /* Validate firmware image size. */ 4763 /* Validate firmware image size. */
4767 fwclen += risc_size * sizeof(uint16_t); 4764 fwclen += risc_size * sizeof(uint16_t);
4768 if (blob->fw->size < fwclen) { 4765 if (blob->fw->size < fwclen) {
4769 ql_log(ql_log_fatal, vha, 0x0088, 4766 ql_log(ql_log_fatal, vha, 0x0088,
4770 "Unable to verify integrity of firmware image " 4767 "Unable to verify integrity of firmware image "
4771 "(%Zd).\n", blob->fw->size); 4768 "(%Zd).\n", blob->fw->size);
4772 goto fail_fw_integrity; 4769 goto fail_fw_integrity;
4773 } 4770 }
4774 4771
4775 fragment = 0; 4772 fragment = 0;
4776 while (risc_size > 0 && rval == QLA_SUCCESS) { 4773 while (risc_size > 0 && rval == QLA_SUCCESS) {
4777 wlen = (uint16_t)(ha->fw_transfer_size >> 1); 4774 wlen = (uint16_t)(ha->fw_transfer_size >> 1);
4778 if (wlen > risc_size) 4775 if (wlen > risc_size)
4779 wlen = risc_size; 4776 wlen = risc_size;
4780 ql_dbg(ql_dbg_init, vha, 0x0089, 4777 ql_dbg(ql_dbg_init, vha, 0x0089,
4781 "Loading risc segment@ risc addr %x number of " 4778 "Loading risc segment@ risc addr %x number of "
4782 "words 0x%x.\n", risc_addr, wlen); 4779 "words 0x%x.\n", risc_addr, wlen);
4783 4780
4784 for (i = 0; i < wlen; i++) 4781 for (i = 0; i < wlen; i++)
4785 wcode[i] = swab16(fwcode[i]); 4782 wcode[i] = swab16(fwcode[i]);
4786 4783
4787 rval = qla2x00_load_ram(vha, req->dma, risc_addr, 4784 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
4788 wlen); 4785 wlen);
4789 if (rval) { 4786 if (rval) {
4790 ql_log(ql_log_fatal, vha, 0x008a, 4787 ql_log(ql_log_fatal, vha, 0x008a,
4791 "Failed to load segment %d of firmware.\n", 4788 "Failed to load segment %d of firmware.\n",
4792 fragment); 4789 fragment);
4793 break; 4790 break;
4794 } 4791 }
4795 4792
4796 fwcode += wlen; 4793 fwcode += wlen;
4797 risc_addr += wlen; 4794 risc_addr += wlen;
4798 risc_size -= wlen; 4795 risc_size -= wlen;
4799 fragment++; 4796 fragment++;
4800 } 4797 }
4801 4798
4802 /* Next segment. */ 4799 /* Next segment. */
4803 seg++; 4800 seg++;
4804 } 4801 }
4805 return rval; 4802 return rval;
4806 4803
4807 fail_fw_integrity: 4804 fail_fw_integrity:
4808 return QLA_FUNCTION_FAILED; 4805 return QLA_FUNCTION_FAILED;
4809 } 4806 }
4810 4807
4811 static int 4808 static int
4812 qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr) 4809 qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4813 { 4810 {
4814 int rval; 4811 int rval;
4815 int segments, fragment; 4812 int segments, fragment;
4816 uint32_t *dcode, dlen; 4813 uint32_t *dcode, dlen;
4817 uint32_t risc_addr; 4814 uint32_t risc_addr;
4818 uint32_t risc_size; 4815 uint32_t risc_size;
4819 uint32_t i; 4816 uint32_t i;
4820 struct fw_blob *blob; 4817 struct fw_blob *blob;
4821 uint32_t *fwcode, fwclen; 4818 uint32_t *fwcode, fwclen;
4822 struct qla_hw_data *ha = vha->hw; 4819 struct qla_hw_data *ha = vha->hw;
4823 struct req_que *req = ha->req_q_map[0]; 4820 struct req_que *req = ha->req_q_map[0];
4824 4821
4825 /* Load firmware blob. */ 4822 /* Load firmware blob. */
4826 blob = qla2x00_request_firmware(vha); 4823 blob = qla2x00_request_firmware(vha);
4827 if (!blob) { 4824 if (!blob) {
4828 ql_log(ql_log_warn, vha, 0x0090, 4825 ql_log(ql_log_warn, vha, 0x0090,
4829 "Fimware image unavailable.\n"); 4826 "Fimware image unavailable.\n");
4830 ql_log(ql_log_warn, vha, 0x0091, 4827 ql_log(ql_log_warn, vha, 0x0091,
4831 "Firmware images can be retrieved from: " 4828 "Firmware images can be retrieved from: "
4832 QLA_FW_URL ".\n"); 4829 QLA_FW_URL ".\n");
4833 4830
4834 return QLA_FUNCTION_FAILED; 4831 return QLA_FUNCTION_FAILED;
4835 } 4832 }
4836 4833
4837 ql_log(ql_log_info, vha, 0x0092, 4834 ql_log(ql_log_info, vha, 0x0092,
4838 "Loading via request-firmware.\n"); 4835 "Loading via request-firmware.\n");
4839 4836
4840 rval = QLA_SUCCESS; 4837 rval = QLA_SUCCESS;
4841 4838
4842 segments = FA_RISC_CODE_SEGMENTS; 4839 segments = FA_RISC_CODE_SEGMENTS;
4843 dcode = (uint32_t *)req->ring; 4840 dcode = (uint32_t *)req->ring;
4844 *srisc_addr = 0; 4841 *srisc_addr = 0;
4845 fwcode = (uint32_t *)blob->fw->data; 4842 fwcode = (uint32_t *)blob->fw->data;
4846 fwclen = 0; 4843 fwclen = 0;
4847 4844
4848 /* Validate firmware image by checking version. */ 4845 /* Validate firmware image by checking version. */
4849 if (blob->fw->size < 8 * sizeof(uint32_t)) { 4846 if (blob->fw->size < 8 * sizeof(uint32_t)) {
4850 ql_log(ql_log_fatal, vha, 0x0093, 4847 ql_log(ql_log_fatal, vha, 0x0093,
4851 "Unable to verify integrity of firmware image (%Zd).\n", 4848 "Unable to verify integrity of firmware image (%Zd).\n",
4852 blob->fw->size); 4849 blob->fw->size);
4853 goto fail_fw_integrity; 4850 goto fail_fw_integrity;
4854 } 4851 }
4855 for (i = 0; i < 4; i++) 4852 for (i = 0; i < 4; i++)
4856 dcode[i] = be32_to_cpu(fwcode[i + 4]); 4853 dcode[i] = be32_to_cpu(fwcode[i + 4]);
4857 if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff && 4854 if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
4858 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) || 4855 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
4859 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 && 4856 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
4860 dcode[3] == 0)) { 4857 dcode[3] == 0)) {
4861 ql_log(ql_log_fatal, vha, 0x0094, 4858 ql_log(ql_log_fatal, vha, 0x0094,
4862 "Unable to verify integrity of firmware image (%Zd).\n", 4859 "Unable to verify integrity of firmware image (%Zd).\n",
4863 blob->fw->size); 4860 blob->fw->size);
4864 ql_log(ql_log_fatal, vha, 0x0095, 4861 ql_log(ql_log_fatal, vha, 0x0095,
4865 "Firmware data: %08x %08x %08x %08x.\n", 4862 "Firmware data: %08x %08x %08x %08x.\n",
4866 dcode[0], dcode[1], dcode[2], dcode[3]); 4863 dcode[0], dcode[1], dcode[2], dcode[3]);
4867 goto fail_fw_integrity; 4864 goto fail_fw_integrity;
4868 } 4865 }
4869 4866
4870 while (segments && rval == QLA_SUCCESS) { 4867 while (segments && rval == QLA_SUCCESS) {
4871 risc_addr = be32_to_cpu(fwcode[2]); 4868 risc_addr = be32_to_cpu(fwcode[2]);
4872 *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr; 4869 *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr;
4873 risc_size = be32_to_cpu(fwcode[3]); 4870 risc_size = be32_to_cpu(fwcode[3]);
4874 4871
4875 /* Validate firmware image size. */ 4872 /* Validate firmware image size. */
4876 fwclen += risc_size * sizeof(uint32_t); 4873 fwclen += risc_size * sizeof(uint32_t);
4877 if (blob->fw->size < fwclen) { 4874 if (blob->fw->size < fwclen) {
4878 ql_log(ql_log_fatal, vha, 0x0096, 4875 ql_log(ql_log_fatal, vha, 0x0096,
4879 "Unable to verify integrity of firmware image " 4876 "Unable to verify integrity of firmware image "
4880 "(%Zd).\n", blob->fw->size); 4877 "(%Zd).\n", blob->fw->size);
4881 4878
4882 goto fail_fw_integrity; 4879 goto fail_fw_integrity;
4883 } 4880 }
4884 4881
4885 fragment = 0; 4882 fragment = 0;
4886 while (risc_size > 0 && rval == QLA_SUCCESS) { 4883 while (risc_size > 0 && rval == QLA_SUCCESS) {
4887 dlen = (uint32_t)(ha->fw_transfer_size >> 2); 4884 dlen = (uint32_t)(ha->fw_transfer_size >> 2);
4888 if (dlen > risc_size) 4885 if (dlen > risc_size)
4889 dlen = risc_size; 4886 dlen = risc_size;
4890 4887
4891 ql_dbg(ql_dbg_init, vha, 0x0097, 4888 ql_dbg(ql_dbg_init, vha, 0x0097,
4892 "Loading risc segment@ risc addr %x " 4889 "Loading risc segment@ risc addr %x "
4893 "number of dwords 0x%x.\n", risc_addr, dlen); 4890 "number of dwords 0x%x.\n", risc_addr, dlen);
4894 4891
4895 for (i = 0; i < dlen; i++) 4892 for (i = 0; i < dlen; i++)
4896 dcode[i] = swab32(fwcode[i]); 4893 dcode[i] = swab32(fwcode[i]);
4897 4894
4898 rval = qla2x00_load_ram(vha, req->dma, risc_addr, 4895 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
4899 dlen); 4896 dlen);
4900 if (rval) { 4897 if (rval) {
4901 ql_log(ql_log_fatal, vha, 0x0098, 4898 ql_log(ql_log_fatal, vha, 0x0098,
4902 "Failed to load segment %d of firmware.\n", 4899 "Failed to load segment %d of firmware.\n",
4903 fragment); 4900 fragment);
4904 break; 4901 break;
4905 } 4902 }
4906 4903
4907 fwcode += dlen; 4904 fwcode += dlen;
4908 risc_addr += dlen; 4905 risc_addr += dlen;
4909 risc_size -= dlen; 4906 risc_size -= dlen;
4910 fragment++; 4907 fragment++;
4911 } 4908 }
4912 4909
4913 /* Next segment. */ 4910 /* Next segment. */
4914 segments--; 4911 segments--;
4915 } 4912 }
4916 return rval; 4913 return rval;
4917 4914
4918 fail_fw_integrity: 4915 fail_fw_integrity:
4919 return QLA_FUNCTION_FAILED; 4916 return QLA_FUNCTION_FAILED;
4920 } 4917 }
4921 4918
4922 int 4919 int
4923 qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) 4920 qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4924 { 4921 {
4925 int rval; 4922 int rval;
4926 4923
4927 if (ql2xfwloadbin == 1) 4924 if (ql2xfwloadbin == 1)
4928 return qla81xx_load_risc(vha, srisc_addr); 4925 return qla81xx_load_risc(vha, srisc_addr);
4929 4926
4930 /* 4927 /*
4931 * FW Load priority: 4928 * FW Load priority:
4932 * 1) Firmware via request-firmware interface (.bin file). 4929 * 1) Firmware via request-firmware interface (.bin file).
4933 * 2) Firmware residing in flash. 4930 * 2) Firmware residing in flash.
4934 */ 4931 */
4935 rval = qla24xx_load_risc_blob(vha, srisc_addr); 4932 rval = qla24xx_load_risc_blob(vha, srisc_addr);
4936 if (rval == QLA_SUCCESS) 4933 if (rval == QLA_SUCCESS)
4937 return rval; 4934 return rval;
4938 4935
4939 return qla24xx_load_risc_flash(vha, srisc_addr, 4936 return qla24xx_load_risc_flash(vha, srisc_addr,
4940 vha->hw->flt_region_fw); 4937 vha->hw->flt_region_fw);
4941 } 4938 }
4942 4939
4943 int 4940 int
4944 qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) 4941 qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4945 { 4942 {
4946 int rval; 4943 int rval;
4947 struct qla_hw_data *ha = vha->hw; 4944 struct qla_hw_data *ha = vha->hw;
4948 4945
4949 if (ql2xfwloadbin == 2) 4946 if (ql2xfwloadbin == 2)
4950 goto try_blob_fw; 4947 goto try_blob_fw;
4951 4948
4952 /* 4949 /*
4953 * FW Load priority: 4950 * FW Load priority:
4954 * 1) Firmware residing in flash. 4951 * 1) Firmware residing in flash.
4955 * 2) Firmware via request-firmware interface (.bin file). 4952 * 2) Firmware via request-firmware interface (.bin file).
4956 * 3) Golden-Firmware residing in flash -- limited operation. 4953 * 3) Golden-Firmware residing in flash -- limited operation.
4957 */ 4954 */
4958 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw); 4955 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw);
4959 if (rval == QLA_SUCCESS) 4956 if (rval == QLA_SUCCESS)
4960 return rval; 4957 return rval;
4961 4958
4962 try_blob_fw: 4959 try_blob_fw:
4963 rval = qla24xx_load_risc_blob(vha, srisc_addr); 4960 rval = qla24xx_load_risc_blob(vha, srisc_addr);
4964 if (rval == QLA_SUCCESS || !ha->flt_region_gold_fw) 4961 if (rval == QLA_SUCCESS || !ha->flt_region_gold_fw)
4965 return rval; 4962 return rval;
4966 4963
4967 ql_log(ql_log_info, vha, 0x0099, 4964 ql_log(ql_log_info, vha, 0x0099,
4968 "Attempting to fallback to golden firmware.\n"); 4965 "Attempting to fallback to golden firmware.\n");
4969 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw); 4966 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw);
4970 if (rval != QLA_SUCCESS) 4967 if (rval != QLA_SUCCESS)
4971 return rval; 4968 return rval;
4972 4969
4973 ql_log(ql_log_info, vha, 0x009a, "Update operational firmware.\n"); 4970 ql_log(ql_log_info, vha, 0x009a, "Update operational firmware.\n");
4974 ha->flags.running_gold_fw = 1; 4971 ha->flags.running_gold_fw = 1;
4975 4972
4976 return rval; 4973 return rval;
4977 } 4974 }
4978 4975
4979 void 4976 void
4980 qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha) 4977 qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
4981 { 4978 {
4982 int ret, retries; 4979 int ret, retries;
4983 struct qla_hw_data *ha = vha->hw; 4980 struct qla_hw_data *ha = vha->hw;
4984 4981
4985 if (ha->flags.pci_channel_io_perm_failure) 4982 if (ha->flags.pci_channel_io_perm_failure)
4986 return; 4983 return;
4987 if (!IS_FWI2_CAPABLE(ha)) 4984 if (!IS_FWI2_CAPABLE(ha))
4988 return; 4985 return;
4989 if (!ha->fw_major_version) 4986 if (!ha->fw_major_version)
4990 return; 4987 return;
4991 4988
4992 ret = qla2x00_stop_firmware(vha); 4989 ret = qla2x00_stop_firmware(vha);
4993 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT && 4990 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
4994 ret != QLA_INVALID_COMMAND && retries ; retries--) { 4991 ret != QLA_INVALID_COMMAND && retries ; retries--) {
4995 ha->isp_ops->reset_chip(vha); 4992 ha->isp_ops->reset_chip(vha);
4996 if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS) 4993 if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS)
4997 continue; 4994 continue;
4998 if (qla2x00_setup_chip(vha) != QLA_SUCCESS) 4995 if (qla2x00_setup_chip(vha) != QLA_SUCCESS)
4999 continue; 4996 continue;
5000 ql_log(ql_log_info, vha, 0x8015, 4997 ql_log(ql_log_info, vha, 0x8015,
5001 "Attempting retry of stop-firmware command.\n"); 4998 "Attempting retry of stop-firmware command.\n");
5002 ret = qla2x00_stop_firmware(vha); 4999 ret = qla2x00_stop_firmware(vha);
5003 } 5000 }
5004 } 5001 }
5005 5002
5006 int 5003 int
5007 qla24xx_configure_vhba(scsi_qla_host_t *vha) 5004 qla24xx_configure_vhba(scsi_qla_host_t *vha)
5008 { 5005 {
5009 int rval = QLA_SUCCESS; 5006 int rval = QLA_SUCCESS;
5010 uint16_t mb[MAILBOX_REGISTER_COUNT]; 5007 uint16_t mb[MAILBOX_REGISTER_COUNT];
5011 struct qla_hw_data *ha = vha->hw; 5008 struct qla_hw_data *ha = vha->hw;
5012 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 5009 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
5013 struct req_que *req; 5010 struct req_que *req;
5014 struct rsp_que *rsp; 5011 struct rsp_que *rsp;
5015 5012
5016 if (!vha->vp_idx) 5013 if (!vha->vp_idx)
5017 return -EINVAL; 5014 return -EINVAL;
5018 5015
5019 rval = qla2x00_fw_ready(base_vha); 5016 rval = qla2x00_fw_ready(base_vha);
5020 if (ha->flags.cpu_affinity_enabled) 5017 if (ha->flags.cpu_affinity_enabled)
5021 req = ha->req_q_map[0]; 5018 req = ha->req_q_map[0];
5022 else 5019 else
5023 req = vha->req; 5020 req = vha->req;
5024 rsp = req->rsp; 5021 rsp = req->rsp;
5025 5022
5026 if (rval == QLA_SUCCESS) { 5023 if (rval == QLA_SUCCESS) {
5027 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 5024 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
5028 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); 5025 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
5029 } 5026 }
5030 5027
5031 vha->flags.management_server_logged_in = 0; 5028 vha->flags.management_server_logged_in = 0;
5032 5029
5033 /* Login to SNS first */ 5030 /* Login to SNS first */
5034 ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb, BIT_1); 5031 ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb, BIT_1);
5035 if (mb[0] != MBS_COMMAND_COMPLETE) { 5032 if (mb[0] != MBS_COMMAND_COMPLETE) {
5036 ql_dbg(ql_dbg_init, vha, 0x0103, 5033 ql_dbg(ql_dbg_init, vha, 0x0103,
5037 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x " 5034 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x "
5038 "mb[6]=%x mb[7]=%x.\n", 5035 "mb[6]=%x mb[7]=%x.\n",
5039 NPH_SNS, mb[0], mb[1], mb[2], mb[6], mb[7]); 5036 NPH_SNS, mb[0], mb[1], mb[2], mb[6], mb[7]);
5040 return (QLA_FUNCTION_FAILED); 5037 return (QLA_FUNCTION_FAILED);
5041 } 5038 }
5042 5039
5043 atomic_set(&vha->loop_down_timer, 0); 5040 atomic_set(&vha->loop_down_timer, 0);
5044 atomic_set(&vha->loop_state, LOOP_UP); 5041 atomic_set(&vha->loop_state, LOOP_UP);
5045 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 5042 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5046 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 5043 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
5047 rval = qla2x00_loop_resync(base_vha); 5044 rval = qla2x00_loop_resync(base_vha);
5048 5045
5049 return rval; 5046 return rval;
5050 } 5047 }
5051 5048
5052 /* 84XX Support **************************************************************/ 5049 /* 84XX Support **************************************************************/
5053 5050
5054 static LIST_HEAD(qla_cs84xx_list); 5051 static LIST_HEAD(qla_cs84xx_list);
5055 static DEFINE_MUTEX(qla_cs84xx_mutex); 5052 static DEFINE_MUTEX(qla_cs84xx_mutex);
5056 5053
5057 static struct qla_chip_state_84xx * 5054 static struct qla_chip_state_84xx *
5058 qla84xx_get_chip(struct scsi_qla_host *vha) 5055 qla84xx_get_chip(struct scsi_qla_host *vha)
5059 { 5056 {
5060 struct qla_chip_state_84xx *cs84xx; 5057 struct qla_chip_state_84xx *cs84xx;
5061 struct qla_hw_data *ha = vha->hw; 5058 struct qla_hw_data *ha = vha->hw;
5062 5059
5063 mutex_lock(&qla_cs84xx_mutex); 5060 mutex_lock(&qla_cs84xx_mutex);
5064 5061
5065 /* Find any shared 84xx chip. */ 5062 /* Find any shared 84xx chip. */
5066 list_for_each_entry(cs84xx, &qla_cs84xx_list, list) { 5063 list_for_each_entry(cs84xx, &qla_cs84xx_list, list) {
5067 if (cs84xx->bus == ha->pdev->bus) { 5064 if (cs84xx->bus == ha->pdev->bus) {
5068 kref_get(&cs84xx->kref); 5065 kref_get(&cs84xx->kref);
5069 goto done; 5066 goto done;
5070 } 5067 }
5071 } 5068 }
5072 5069
5073 cs84xx = kzalloc(sizeof(*cs84xx), GFP_KERNEL); 5070 cs84xx = kzalloc(sizeof(*cs84xx), GFP_KERNEL);
5074 if (!cs84xx) 5071 if (!cs84xx)
5075 goto done; 5072 goto done;
5076 5073
5077 kref_init(&cs84xx->kref); 5074 kref_init(&cs84xx->kref);
5078 spin_lock_init(&cs84xx->access_lock); 5075 spin_lock_init(&cs84xx->access_lock);
5079 mutex_init(&cs84xx->fw_update_mutex); 5076 mutex_init(&cs84xx->fw_update_mutex);
5080 cs84xx->bus = ha->pdev->bus; 5077 cs84xx->bus = ha->pdev->bus;
5081 5078
5082 list_add_tail(&cs84xx->list, &qla_cs84xx_list); 5079 list_add_tail(&cs84xx->list, &qla_cs84xx_list);
5083 done: 5080 done:
5084 mutex_unlock(&qla_cs84xx_mutex); 5081 mutex_unlock(&qla_cs84xx_mutex);
5085 return cs84xx; 5082 return cs84xx;
5086 } 5083 }
5087 5084
5088 static void 5085 static void
5089 __qla84xx_chip_release(struct kref *kref) 5086 __qla84xx_chip_release(struct kref *kref)
5090 { 5087 {
5091 struct qla_chip_state_84xx *cs84xx = 5088 struct qla_chip_state_84xx *cs84xx =
5092 container_of(kref, struct qla_chip_state_84xx, kref); 5089 container_of(kref, struct qla_chip_state_84xx, kref);
5093 5090
5094 mutex_lock(&qla_cs84xx_mutex); 5091 mutex_lock(&qla_cs84xx_mutex);
5095 list_del(&cs84xx->list); 5092 list_del(&cs84xx->list);
5096 mutex_unlock(&qla_cs84xx_mutex); 5093 mutex_unlock(&qla_cs84xx_mutex);
5097 kfree(cs84xx); 5094 kfree(cs84xx);
5098 } 5095 }
5099 5096
5100 void 5097 void
5101 qla84xx_put_chip(struct scsi_qla_host *vha) 5098 qla84xx_put_chip(struct scsi_qla_host *vha)
5102 { 5099 {
5103 struct qla_hw_data *ha = vha->hw; 5100 struct qla_hw_data *ha = vha->hw;
5104 if (ha->cs84xx) 5101 if (ha->cs84xx)
5105 kref_put(&ha->cs84xx->kref, __qla84xx_chip_release); 5102 kref_put(&ha->cs84xx->kref, __qla84xx_chip_release);
5106 } 5103 }
5107 5104
5108 static int 5105 static int
5109 qla84xx_init_chip(scsi_qla_host_t *vha) 5106 qla84xx_init_chip(scsi_qla_host_t *vha)
5110 { 5107 {
5111 int rval; 5108 int rval;
5112 uint16_t status[2]; 5109 uint16_t status[2];
5113 struct qla_hw_data *ha = vha->hw; 5110 struct qla_hw_data *ha = vha->hw;
5114 5111
5115 mutex_lock(&ha->cs84xx->fw_update_mutex); 5112 mutex_lock(&ha->cs84xx->fw_update_mutex);
5116 5113
5117 rval = qla84xx_verify_chip(vha, status); 5114 rval = qla84xx_verify_chip(vha, status);
5118 5115
5119 mutex_unlock(&ha->cs84xx->fw_update_mutex); 5116 mutex_unlock(&ha->cs84xx->fw_update_mutex);
5120 5117
5121 return rval != QLA_SUCCESS || status[0] ? QLA_FUNCTION_FAILED: 5118 return rval != QLA_SUCCESS || status[0] ? QLA_FUNCTION_FAILED:
5122 QLA_SUCCESS; 5119 QLA_SUCCESS;
5123 } 5120 }
5124 5121
5125 /* 81XX Support **************************************************************/ 5122 /* 81XX Support **************************************************************/
5126 5123
5127 int 5124 int
5128 qla81xx_nvram_config(scsi_qla_host_t *vha) 5125 qla81xx_nvram_config(scsi_qla_host_t *vha)
5129 { 5126 {
5130 int rval; 5127 int rval;
5131 struct init_cb_81xx *icb; 5128 struct init_cb_81xx *icb;
5132 struct nvram_81xx *nv; 5129 struct nvram_81xx *nv;
5133 uint32_t *dptr; 5130 uint32_t *dptr;
5134 uint8_t *dptr1, *dptr2; 5131 uint8_t *dptr1, *dptr2;
5135 uint32_t chksum; 5132 uint32_t chksum;
5136 uint16_t cnt; 5133 uint16_t cnt;
5137 struct qla_hw_data *ha = vha->hw; 5134 struct qla_hw_data *ha = vha->hw;
5138 5135
5139 rval = QLA_SUCCESS; 5136 rval = QLA_SUCCESS;
5140 icb = (struct init_cb_81xx *)ha->init_cb; 5137 icb = (struct init_cb_81xx *)ha->init_cb;
5141 nv = ha->nvram; 5138 nv = ha->nvram;
5142 5139
5143 /* Determine NVRAM starting address. */ 5140 /* Determine NVRAM starting address. */
5144 ha->nvram_size = sizeof(struct nvram_81xx); 5141 ha->nvram_size = sizeof(struct nvram_81xx);
5145 ha->vpd_size = FA_NVRAM_VPD_SIZE; 5142 ha->vpd_size = FA_NVRAM_VPD_SIZE;
5146 5143
5147 /* Get VPD data into cache */ 5144 /* Get VPD data into cache */
5148 ha->vpd = ha->nvram + VPD_OFFSET; 5145 ha->vpd = ha->nvram + VPD_OFFSET;
5149 ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2, 5146 ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2,
5150 ha->vpd_size); 5147 ha->vpd_size);
5151 5148
5152 /* Get NVRAM data into cache and calculate checksum. */ 5149 /* Get NVRAM data into cache and calculate checksum. */
5153 ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2, 5150 ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
5154 ha->nvram_size); 5151 ha->nvram_size);
5155 dptr = (uint32_t *)nv; 5152 dptr = (uint32_t *)nv;
5156 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++) 5153 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
5157 chksum += le32_to_cpu(*dptr++); 5154 chksum += le32_to_cpu(*dptr++);
5158 5155
5159 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0111, 5156 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0111,
5160 "Contents of NVRAM:\n"); 5157 "Contents of NVRAM:\n");
5161 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0112, 5158 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0112,
5162 (uint8_t *)nv, ha->nvram_size); 5159 (uint8_t *)nv, ha->nvram_size);
5163 5160
5164 /* Bad NVRAM data, set defaults parameters. */ 5161 /* Bad NVRAM data, set defaults parameters. */
5165 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P' 5162 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
5166 || nv->id[3] != ' ' || 5163 || nv->id[3] != ' ' ||
5167 nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) { 5164 nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
5168 /* Reset NVRAM data. */ 5165 /* Reset NVRAM data. */
5169 ql_log(ql_log_info, vha, 0x0073, 5166 ql_log(ql_log_info, vha, 0x0073,
5170 "Inconisistent NVRAM detected: checksum=0x%x id=%c " 5167 "Inconisistent NVRAM detected: checksum=0x%x id=%c "
5171 "version=0x%x.\n", chksum, nv->id[0], 5168 "version=0x%x.\n", chksum, nv->id[0],
5172 le16_to_cpu(nv->nvram_version)); 5169 le16_to_cpu(nv->nvram_version));
5173 ql_log(ql_log_info, vha, 0x0074, 5170 ql_log(ql_log_info, vha, 0x0074,
5174 "Falling back to functioning (yet invalid -- WWPN) " 5171 "Falling back to functioning (yet invalid -- WWPN) "
5175 "defaults.\n"); 5172 "defaults.\n");
5176 5173
5177 /* 5174 /*
5178 * Set default initialization control block. 5175 * Set default initialization control block.
5179 */ 5176 */
5180 memset(nv, 0, ha->nvram_size); 5177 memset(nv, 0, ha->nvram_size);
5181 nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION); 5178 nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION);
5182 nv->version = __constant_cpu_to_le16(ICB_VERSION); 5179 nv->version = __constant_cpu_to_le16(ICB_VERSION);
5183 nv->frame_payload_size = __constant_cpu_to_le16(2048); 5180 nv->frame_payload_size = __constant_cpu_to_le16(2048);
5184 nv->execution_throttle = __constant_cpu_to_le16(0xFFFF); 5181 nv->execution_throttle = __constant_cpu_to_le16(0xFFFF);
5185 nv->exchange_count = __constant_cpu_to_le16(0); 5182 nv->exchange_count = __constant_cpu_to_le16(0);
5186 nv->port_name[0] = 0x21; 5183 nv->port_name[0] = 0x21;
5187 nv->port_name[1] = 0x00 + ha->port_no; 5184 nv->port_name[1] = 0x00 + ha->port_no;
5188 nv->port_name[2] = 0x00; 5185 nv->port_name[2] = 0x00;
5189 nv->port_name[3] = 0xe0; 5186 nv->port_name[3] = 0xe0;
5190 nv->port_name[4] = 0x8b; 5187 nv->port_name[4] = 0x8b;
5191 nv->port_name[5] = 0x1c; 5188 nv->port_name[5] = 0x1c;
5192 nv->port_name[6] = 0x55; 5189 nv->port_name[6] = 0x55;
5193 nv->port_name[7] = 0x86; 5190 nv->port_name[7] = 0x86;
5194 nv->node_name[0] = 0x20; 5191 nv->node_name[0] = 0x20;
5195 nv->node_name[1] = 0x00; 5192 nv->node_name[1] = 0x00;
5196 nv->node_name[2] = 0x00; 5193 nv->node_name[2] = 0x00;
5197 nv->node_name[3] = 0xe0; 5194 nv->node_name[3] = 0xe0;
5198 nv->node_name[4] = 0x8b; 5195 nv->node_name[4] = 0x8b;
5199 nv->node_name[5] = 0x1c; 5196 nv->node_name[5] = 0x1c;
5200 nv->node_name[6] = 0x55; 5197 nv->node_name[6] = 0x55;
5201 nv->node_name[7] = 0x86; 5198 nv->node_name[7] = 0x86;
5202 nv->login_retry_count = __constant_cpu_to_le16(8); 5199 nv->login_retry_count = __constant_cpu_to_le16(8);
5203 nv->interrupt_delay_timer = __constant_cpu_to_le16(0); 5200 nv->interrupt_delay_timer = __constant_cpu_to_le16(0);
5204 nv->login_timeout = __constant_cpu_to_le16(0); 5201 nv->login_timeout = __constant_cpu_to_le16(0);
5205 nv->firmware_options_1 = 5202 nv->firmware_options_1 =
5206 __constant_cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1); 5203 __constant_cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
5207 nv->firmware_options_2 = __constant_cpu_to_le32(2 << 4); 5204 nv->firmware_options_2 = __constant_cpu_to_le32(2 << 4);
5208 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12); 5205 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
5209 nv->firmware_options_3 = __constant_cpu_to_le32(2 << 13); 5206 nv->firmware_options_3 = __constant_cpu_to_le32(2 << 13);
5210 nv->host_p = __constant_cpu_to_le32(BIT_11|BIT_10); 5207 nv->host_p = __constant_cpu_to_le32(BIT_11|BIT_10);
5211 nv->efi_parameters = __constant_cpu_to_le32(0); 5208 nv->efi_parameters = __constant_cpu_to_le32(0);
5212 nv->reset_delay = 5; 5209 nv->reset_delay = 5;
5213 nv->max_luns_per_target = __constant_cpu_to_le16(128); 5210 nv->max_luns_per_target = __constant_cpu_to_le16(128);
5214 nv->port_down_retry_count = __constant_cpu_to_le16(30); 5211 nv->port_down_retry_count = __constant_cpu_to_le16(30);
5215 nv->link_down_timeout = __constant_cpu_to_le16(30); 5212 nv->link_down_timeout = __constant_cpu_to_le16(30);
5216 nv->enode_mac[0] = 0x00; 5213 nv->enode_mac[0] = 0x00;
5217 nv->enode_mac[1] = 0x02; 5214 nv->enode_mac[1] = 0x02;
5218 nv->enode_mac[2] = 0x03; 5215 nv->enode_mac[2] = 0x03;
5219 nv->enode_mac[3] = 0x04; 5216 nv->enode_mac[3] = 0x04;
5220 nv->enode_mac[4] = 0x05; 5217 nv->enode_mac[4] = 0x05;
5221 nv->enode_mac[5] = 0x06 + ha->port_no; 5218 nv->enode_mac[5] = 0x06 + ha->port_no;
5222 5219
5223 rval = 1; 5220 rval = 1;
5224 } 5221 }
5225 5222
5226 /* Reset Initialization control block */ 5223 /* Reset Initialization control block */
5227 memset(icb, 0, ha->init_cb_size); 5224 memset(icb, 0, ha->init_cb_size);
5228 5225
5229 /* Copy 1st segment. */ 5226 /* Copy 1st segment. */
5230 dptr1 = (uint8_t *)icb; 5227 dptr1 = (uint8_t *)icb;
5231 dptr2 = (uint8_t *)&nv->version; 5228 dptr2 = (uint8_t *)&nv->version;
5232 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version; 5229 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
5233 while (cnt--) 5230 while (cnt--)
5234 *dptr1++ = *dptr2++; 5231 *dptr1++ = *dptr2++;
5235 5232
5236 icb->login_retry_count = nv->login_retry_count; 5233 icb->login_retry_count = nv->login_retry_count;
5237 5234
5238 /* Copy 2nd segment. */ 5235 /* Copy 2nd segment. */
5239 dptr1 = (uint8_t *)&icb->interrupt_delay_timer; 5236 dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
5240 dptr2 = (uint8_t *)&nv->interrupt_delay_timer; 5237 dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
5241 cnt = (uint8_t *)&icb->reserved_5 - 5238 cnt = (uint8_t *)&icb->reserved_5 -
5242 (uint8_t *)&icb->interrupt_delay_timer; 5239 (uint8_t *)&icb->interrupt_delay_timer;
5243 while (cnt--) 5240 while (cnt--)
5244 *dptr1++ = *dptr2++; 5241 *dptr1++ = *dptr2++;
5245 5242
5246 memcpy(icb->enode_mac, nv->enode_mac, sizeof(icb->enode_mac)); 5243 memcpy(icb->enode_mac, nv->enode_mac, sizeof(icb->enode_mac));
5247 /* Some boards (with valid NVRAMs) still have NULL enode_mac!! */ 5244 /* Some boards (with valid NVRAMs) still have NULL enode_mac!! */
5248 if (!memcmp(icb->enode_mac, "\0\0\0\0\0\0", sizeof(icb->enode_mac))) { 5245 if (!memcmp(icb->enode_mac, "\0\0\0\0\0\0", sizeof(icb->enode_mac))) {
5249 icb->enode_mac[0] = 0x01; 5246 icb->enode_mac[0] = 0x01;
5250 icb->enode_mac[1] = 0x02; 5247 icb->enode_mac[1] = 0x02;
5251 icb->enode_mac[2] = 0x03; 5248 icb->enode_mac[2] = 0x03;
5252 icb->enode_mac[3] = 0x04; 5249 icb->enode_mac[3] = 0x04;
5253 icb->enode_mac[4] = 0x05; 5250 icb->enode_mac[4] = 0x05;
5254 icb->enode_mac[5] = 0x06 + ha->port_no; 5251 icb->enode_mac[5] = 0x06 + ha->port_no;
5255 } 5252 }
5256 5253
5257 /* Use extended-initialization control block. */ 5254 /* Use extended-initialization control block. */
5258 memcpy(ha->ex_init_cb, &nv->ex_version, sizeof(*ha->ex_init_cb)); 5255 memcpy(ha->ex_init_cb, &nv->ex_version, sizeof(*ha->ex_init_cb));
5259 5256
5260 /* 5257 /*
5261 * Setup driver NVRAM options. 5258 * Setup driver NVRAM options.
5262 */ 5259 */
5263 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name), 5260 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
5264 "QLE8XXX"); 5261 "QLE8XXX");
5265 5262
5266 /* Use alternate WWN? */ 5263 /* Use alternate WWN? */
5267 if (nv->host_p & __constant_cpu_to_le32(BIT_15)) { 5264 if (nv->host_p & __constant_cpu_to_le32(BIT_15)) {
5268 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE); 5265 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
5269 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE); 5266 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
5270 } 5267 }
5271 5268
5272 /* Prepare nodename */ 5269 /* Prepare nodename */
5273 if ((icb->firmware_options_1 & __constant_cpu_to_le32(BIT_14)) == 0) { 5270 if ((icb->firmware_options_1 & __constant_cpu_to_le32(BIT_14)) == 0) {
5274 /* 5271 /*
5275 * Firmware will apply the following mask if the nodename was 5272 * Firmware will apply the following mask if the nodename was
5276 * not provided. 5273 * not provided.
5277 */ 5274 */
5278 memcpy(icb->node_name, icb->port_name, WWN_SIZE); 5275 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
5279 icb->node_name[0] &= 0xF0; 5276 icb->node_name[0] &= 0xF0;
5280 } 5277 }
5281 5278
5282 /* Set host adapter parameters. */ 5279 /* Set host adapter parameters. */
5283 ha->flags.disable_risc_code_load = 0; 5280 ha->flags.disable_risc_code_load = 0;
5284 ha->flags.enable_lip_reset = 0; 5281 ha->flags.enable_lip_reset = 0;
5285 ha->flags.enable_lip_full_login = 5282 ha->flags.enable_lip_full_login =
5286 le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0; 5283 le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0;
5287 ha->flags.enable_target_reset = 5284 ha->flags.enable_target_reset =
5288 le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0; 5285 le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0;
5289 ha->flags.enable_led_scheme = 0; 5286 ha->flags.enable_led_scheme = 0;
5290 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0; 5287 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0;
5291 5288
5292 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) & 5289 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
5293 (BIT_6 | BIT_5 | BIT_4)) >> 4; 5290 (BIT_6 | BIT_5 | BIT_4)) >> 4;
5294 5291
5295 /* save HBA serial number */ 5292 /* save HBA serial number */
5296 ha->serial0 = icb->port_name[5]; 5293 ha->serial0 = icb->port_name[5];
5297 ha->serial1 = icb->port_name[6]; 5294 ha->serial1 = icb->port_name[6];
5298 ha->serial2 = icb->port_name[7]; 5295 ha->serial2 = icb->port_name[7];
5299 memcpy(vha->node_name, icb->node_name, WWN_SIZE); 5296 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
5300 memcpy(vha->port_name, icb->port_name, WWN_SIZE); 5297 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
5301 5298
5302 icb->execution_throttle = __constant_cpu_to_le16(0xFFFF); 5299 icb->execution_throttle = __constant_cpu_to_le16(0xFFFF);
5303 5300
5304 ha->retry_count = le16_to_cpu(nv->login_retry_count); 5301 ha->retry_count = le16_to_cpu(nv->login_retry_count);
5305 5302
5306 /* Set minimum login_timeout to 4 seconds. */ 5303 /* Set minimum login_timeout to 4 seconds. */
5307 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout) 5304 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
5308 nv->login_timeout = cpu_to_le16(ql2xlogintimeout); 5305 nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
5309 if (le16_to_cpu(nv->login_timeout) < 4) 5306 if (le16_to_cpu(nv->login_timeout) < 4)
5310 nv->login_timeout = __constant_cpu_to_le16(4); 5307 nv->login_timeout = __constant_cpu_to_le16(4);
5311 ha->login_timeout = le16_to_cpu(nv->login_timeout); 5308 ha->login_timeout = le16_to_cpu(nv->login_timeout);
5312 icb->login_timeout = nv->login_timeout; 5309 icb->login_timeout = nv->login_timeout;
5313 5310
5314 /* Set minimum RATOV to 100 tenths of a second. */ 5311 /* Set minimum RATOV to 100 tenths of a second. */
5315 ha->r_a_tov = 100; 5312 ha->r_a_tov = 100;
5316 5313
5317 ha->loop_reset_delay = nv->reset_delay; 5314 ha->loop_reset_delay = nv->reset_delay;
5318 5315
5319 /* Link Down Timeout = 0: 5316 /* Link Down Timeout = 0:
5320 * 5317 *
5321 * When Port Down timer expires we will start returning 5318 * When Port Down timer expires we will start returning
5322 * I/O's to OS with "DID_NO_CONNECT". 5319 * I/O's to OS with "DID_NO_CONNECT".
5323 * 5320 *
5324 * Link Down Timeout != 0: 5321 * Link Down Timeout != 0:
5325 * 5322 *
5326 * The driver waits for the link to come up after link down 5323 * The driver waits for the link to come up after link down
5327 * before returning I/Os to OS with "DID_NO_CONNECT". 5324 * before returning I/Os to OS with "DID_NO_CONNECT".
5328 */ 5325 */
5329 if (le16_to_cpu(nv->link_down_timeout) == 0) { 5326 if (le16_to_cpu(nv->link_down_timeout) == 0) {
5330 ha->loop_down_abort_time = 5327 ha->loop_down_abort_time =
5331 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT); 5328 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
5332 } else { 5329 } else {
5333 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout); 5330 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
5334 ha->loop_down_abort_time = 5331 ha->loop_down_abort_time =
5335 (LOOP_DOWN_TIME - ha->link_down_timeout); 5332 (LOOP_DOWN_TIME - ha->link_down_timeout);
5336 } 5333 }
5337 5334
5338 /* Need enough time to try and get the port back. */ 5335 /* Need enough time to try and get the port back. */
5339 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count); 5336 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
5340 if (qlport_down_retry) 5337 if (qlport_down_retry)
5341 ha->port_down_retry_count = qlport_down_retry; 5338 ha->port_down_retry_count = qlport_down_retry;
5342 5339
5343 /* Set login_retry_count */ 5340 /* Set login_retry_count */
5344 ha->login_retry_count = le16_to_cpu(nv->login_retry_count); 5341 ha->login_retry_count = le16_to_cpu(nv->login_retry_count);
5345 if (ha->port_down_retry_count == 5342 if (ha->port_down_retry_count ==
5346 le16_to_cpu(nv->port_down_retry_count) && 5343 le16_to_cpu(nv->port_down_retry_count) &&
5347 ha->port_down_retry_count > 3) 5344 ha->port_down_retry_count > 3)
5348 ha->login_retry_count = ha->port_down_retry_count; 5345 ha->login_retry_count = ha->port_down_retry_count;
5349 else if (ha->port_down_retry_count > (int)ha->login_retry_count) 5346 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
5350 ha->login_retry_count = ha->port_down_retry_count; 5347 ha->login_retry_count = ha->port_down_retry_count;
5351 if (ql2xloginretrycount) 5348 if (ql2xloginretrycount)
5352 ha->login_retry_count = ql2xloginretrycount; 5349 ha->login_retry_count = ql2xloginretrycount;
5353 5350
5354 /* Enable ZIO. */ 5351 /* Enable ZIO. */
5355 if (!vha->flags.init_done) { 5352 if (!vha->flags.init_done) {
5356 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) & 5353 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
5357 (BIT_3 | BIT_2 | BIT_1 | BIT_0); 5354 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
5358 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ? 5355 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
5359 le16_to_cpu(icb->interrupt_delay_timer): 2; 5356 le16_to_cpu(icb->interrupt_delay_timer): 2;
5360 } 5357 }
5361 icb->firmware_options_2 &= __constant_cpu_to_le32( 5358 icb->firmware_options_2 &= __constant_cpu_to_le32(
5362 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0)); 5359 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
5363 vha->flags.process_response_queue = 0; 5360 vha->flags.process_response_queue = 0;
5364 if (ha->zio_mode != QLA_ZIO_DISABLED) { 5361 if (ha->zio_mode != QLA_ZIO_DISABLED) {
5365 ha->zio_mode = QLA_ZIO_MODE_6; 5362 ha->zio_mode = QLA_ZIO_MODE_6;
5366 5363
5367 ql_log(ql_log_info, vha, 0x0075, 5364 ql_log(ql_log_info, vha, 0x0075,
5368 "ZIO mode %d enabled; timer delay (%d us).\n", 5365 "ZIO mode %d enabled; timer delay (%d us).\n",
5369 ha->zio_mode, 5366 ha->zio_mode,
5370 ha->zio_timer * 100); 5367 ha->zio_timer * 100);
5371 5368
5372 icb->firmware_options_2 |= cpu_to_le32( 5369 icb->firmware_options_2 |= cpu_to_le32(
5373 (uint32_t)ha->zio_mode); 5370 (uint32_t)ha->zio_mode);
5374 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer); 5371 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
5375 vha->flags.process_response_queue = 1; 5372 vha->flags.process_response_queue = 1;
5376 } 5373 }
5377 5374
5378 if (rval) { 5375 if (rval) {
5379 ql_log(ql_log_warn, vha, 0x0076, 5376 ql_log(ql_log_warn, vha, 0x0076,
5380 "NVRAM configuration failed.\n"); 5377 "NVRAM configuration failed.\n");
5381 } 5378 }
5382 return (rval); 5379 return (rval);
5383 } 5380 }
5384 5381
5385 int 5382 int
5386 qla82xx_restart_isp(scsi_qla_host_t *vha) 5383 qla82xx_restart_isp(scsi_qla_host_t *vha)
5387 { 5384 {
5388 int status, rval; 5385 int status, rval;
5389 uint32_t wait_time; 5386 uint32_t wait_time;
5390 struct qla_hw_data *ha = vha->hw; 5387 struct qla_hw_data *ha = vha->hw;
5391 struct req_que *req = ha->req_q_map[0]; 5388 struct req_que *req = ha->req_q_map[0];
5392 struct rsp_que *rsp = ha->rsp_q_map[0]; 5389 struct rsp_que *rsp = ha->rsp_q_map[0];
5393 struct scsi_qla_host *vp; 5390 struct scsi_qla_host *vp;
5394 unsigned long flags; 5391 unsigned long flags;
5395 5392
5396 status = qla2x00_init_rings(vha); 5393 status = qla2x00_init_rings(vha);
5397 if (!status) { 5394 if (!status) {
5398 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 5395 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
5399 ha->flags.chip_reset_done = 1; 5396 ha->flags.chip_reset_done = 1;
5400 5397
5401 status = qla2x00_fw_ready(vha); 5398 status = qla2x00_fw_ready(vha);
5402 if (!status) { 5399 if (!status) {
5403 ql_log(ql_log_info, vha, 0x803c, 5400 ql_log(ql_log_info, vha, 0x803c,
5404 "Start configure loop, status =%d.\n", status); 5401 "Start configure loop, status =%d.\n", status);
5405 5402
5406 /* Issue a marker after FW becomes ready. */ 5403 /* Issue a marker after FW becomes ready. */
5407 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); 5404 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
5408 5405
5409 vha->flags.online = 1; 5406 vha->flags.online = 1;
5410 /* Wait at most MAX_TARGET RSCNs for a stable link. */ 5407 /* Wait at most MAX_TARGET RSCNs for a stable link. */
5411 wait_time = 256; 5408 wait_time = 256;
5412 do { 5409 do {
5413 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 5410 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5414 qla2x00_configure_loop(vha); 5411 qla2x00_configure_loop(vha);
5415 wait_time--; 5412 wait_time--;
5416 } while (!atomic_read(&vha->loop_down_timer) && 5413 } while (!atomic_read(&vha->loop_down_timer) &&
5417 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) && 5414 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) &&
5418 wait_time && 5415 wait_time &&
5419 (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))); 5416 (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)));
5420 } 5417 }
5421 5418
5422 /* if no cable then assume it's good */ 5419 /* if no cable then assume it's good */
5423 if ((vha->device_flags & DFLG_NO_CABLE)) 5420 if ((vha->device_flags & DFLG_NO_CABLE))
5424 status = 0; 5421 status = 0;
5425 5422
5426 ql_log(ql_log_info, vha, 0x803d, 5423 ql_log(ql_log_info, vha, 0x803d,
5427 "Configure loop done, status = 0x%x.\n", status); 5424 "Configure loop done, status = 0x%x.\n", status);
5428 } 5425 }
5429 5426
5430 if (!status) { 5427 if (!status) {
5431 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 5428 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
5432 5429
5433 if (!atomic_read(&vha->loop_down_timer)) { 5430 if (!atomic_read(&vha->loop_down_timer)) {
5434 /* 5431 /*
5435 * Issue marker command only when we are going 5432 * Issue marker command only when we are going
5436 * to start the I/O . 5433 * to start the I/O .
5437 */ 5434 */
5438 vha->marker_needed = 1; 5435 vha->marker_needed = 1;
5439 } 5436 }
5440 5437
5441 vha->flags.online = 1; 5438 vha->flags.online = 1;
5442 5439
5443 ha->isp_ops->enable_intrs(ha); 5440 ha->isp_ops->enable_intrs(ha);
5444 5441
5445 ha->isp_abort_cnt = 0; 5442 ha->isp_abort_cnt = 0;
5446 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 5443 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
5447 5444
5448 /* Update the firmware version */ 5445 /* Update the firmware version */
5449 qla2x00_get_fw_version(vha, &ha->fw_major_version, 5446 qla2x00_get_fw_version(vha, &ha->fw_major_version,
5450 &ha->fw_minor_version, &ha->fw_subminor_version, 5447 &ha->fw_minor_version, &ha->fw_subminor_version,
5451 &ha->fw_attributes, &ha->fw_memory_size, 5448 &ha->fw_attributes, &ha->fw_memory_size,
5452 ha->mpi_version, &ha->mpi_capabilities, 5449 ha->mpi_version, &ha->mpi_capabilities,
5453 ha->phy_version); 5450 ha->phy_version);
5454 5451
5455 if (ha->fce) { 5452 if (ha->fce) {
5456 ha->flags.fce_enabled = 1; 5453 ha->flags.fce_enabled = 1;
5457 memset(ha->fce, 0, 5454 memset(ha->fce, 0,
5458 fce_calc_size(ha->fce_bufs)); 5455 fce_calc_size(ha->fce_bufs));
5459 rval = qla2x00_enable_fce_trace(vha, 5456 rval = qla2x00_enable_fce_trace(vha,
5460 ha->fce_dma, ha->fce_bufs, ha->fce_mb, 5457 ha->fce_dma, ha->fce_bufs, ha->fce_mb,
5461 &ha->fce_bufs); 5458 &ha->fce_bufs);
5462 if (rval) { 5459 if (rval) {
5463 ql_log(ql_log_warn, vha, 0x803e, 5460 ql_log(ql_log_warn, vha, 0x803e,
5464 "Unable to reinitialize FCE (%d).\n", 5461 "Unable to reinitialize FCE (%d).\n",
5465 rval); 5462 rval);
5466 ha->flags.fce_enabled = 0; 5463 ha->flags.fce_enabled = 0;
5467 } 5464 }
5468 } 5465 }
5469 5466
5470 if (ha->eft) { 5467 if (ha->eft) {
5471 memset(ha->eft, 0, EFT_SIZE); 5468 memset(ha->eft, 0, EFT_SIZE);
5472 rval = qla2x00_enable_eft_trace(vha, 5469 rval = qla2x00_enable_eft_trace(vha,
5473 ha->eft_dma, EFT_NUM_BUFFERS); 5470 ha->eft_dma, EFT_NUM_BUFFERS);
5474 if (rval) { 5471 if (rval) {
5475 ql_log(ql_log_warn, vha, 0x803f, 5472 ql_log(ql_log_warn, vha, 0x803f,
5476 "Unable to reinitialize EFT (%d).\n", 5473 "Unable to reinitialize EFT (%d).\n",
5477 rval); 5474 rval);
5478 } 5475 }
5479 } 5476 }
5480 } 5477 }
5481 5478
5482 if (!status) { 5479 if (!status) {
5483 ql_dbg(ql_dbg_taskm, vha, 0x8040, 5480 ql_dbg(ql_dbg_taskm, vha, 0x8040,
5484 "qla82xx_restart_isp succeeded.\n"); 5481 "qla82xx_restart_isp succeeded.\n");
5485 5482
5486 spin_lock_irqsave(&ha->vport_slock, flags); 5483 spin_lock_irqsave(&ha->vport_slock, flags);
5487 list_for_each_entry(vp, &ha->vp_list, list) { 5484 list_for_each_entry(vp, &ha->vp_list, list) {
5488 if (vp->vp_idx) { 5485 if (vp->vp_idx) {
5489 atomic_inc(&vp->vref_count); 5486 atomic_inc(&vp->vref_count);
5490 spin_unlock_irqrestore(&ha->vport_slock, flags); 5487 spin_unlock_irqrestore(&ha->vport_slock, flags);
5491 5488
5492 qla2x00_vp_abort_isp(vp); 5489 qla2x00_vp_abort_isp(vp);
5493 5490
5494 spin_lock_irqsave(&ha->vport_slock, flags); 5491 spin_lock_irqsave(&ha->vport_slock, flags);
5495 atomic_dec(&vp->vref_count); 5492 atomic_dec(&vp->vref_count);
5496 } 5493 }
5497 } 5494 }
5498 spin_unlock_irqrestore(&ha->vport_slock, flags); 5495 spin_unlock_irqrestore(&ha->vport_slock, flags);
5499 5496
5500 } else { 5497 } else {
5501 ql_log(ql_log_warn, vha, 0x8041, 5498 ql_log(ql_log_warn, vha, 0x8041,
5502 "qla82xx_restart_isp **** FAILED ****.\n"); 5499 "qla82xx_restart_isp **** FAILED ****.\n");
5503 } 5500 }
5504 5501
5505 return status; 5502 return status;
5506 } 5503 }
5507 5504
5508 void 5505 void
5509 qla81xx_update_fw_options(scsi_qla_host_t *vha) 5506 qla81xx_update_fw_options(scsi_qla_host_t *vha)
5510 { 5507 {
5511 struct qla_hw_data *ha = vha->hw; 5508 struct qla_hw_data *ha = vha->hw;
5512 5509
5513 if (!ql2xetsenable) 5510 if (!ql2xetsenable)
5514 return; 5511 return;
5515 5512
5516 /* Enable ETS Burst. */ 5513 /* Enable ETS Burst. */
5517 memset(ha->fw_options, 0, sizeof(ha->fw_options)); 5514 memset(ha->fw_options, 0, sizeof(ha->fw_options));
5518 ha->fw_options[2] |= BIT_9; 5515 ha->fw_options[2] |= BIT_9;
5519 qla2x00_set_fw_options(vha, ha->fw_options); 5516 qla2x00_set_fw_options(vha, ha->fw_options);
5520 } 5517 }
5521 5518
5522 /* 5519 /*
5523 * qla24xx_get_fcp_prio 5520 * qla24xx_get_fcp_prio
5524 * Gets the fcp cmd priority value for the logged in port. 5521 * Gets the fcp cmd priority value for the logged in port.
5525 * Looks for a match of the port descriptors within 5522 * Looks for a match of the port descriptors within
5526 * each of the fcp prio config entries. If a match is found, 5523 * each of the fcp prio config entries. If a match is found,
5527 * the tag (priority) value is returned. 5524 * the tag (priority) value is returned.
5528 * 5525 *
5529 * Input: 5526 * Input:
5530 * vha = scsi host structure pointer. 5527 * vha = scsi host structure pointer.
5531 * fcport = port structure pointer. 5528 * fcport = port structure pointer.
5532 * 5529 *
5533 * Return: 5530 * Return:
5534 * non-zero (if found) 5531 * non-zero (if found)
5535 * -1 (if not found) 5532 * -1 (if not found)
5536 * 5533 *
5537 * Context: 5534 * Context:
5538 * Kernel context 5535 * Kernel context
5539 */ 5536 */
5540 static int 5537 static int
5541 qla24xx_get_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport) 5538 qla24xx_get_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
5542 { 5539 {
5543 int i, entries; 5540 int i, entries;
5544 uint8_t pid_match, wwn_match; 5541 uint8_t pid_match, wwn_match;
5545 int priority; 5542 int priority;
5546 uint32_t pid1, pid2; 5543 uint32_t pid1, pid2;
5547 uint64_t wwn1, wwn2; 5544 uint64_t wwn1, wwn2;
5548 struct qla_fcp_prio_entry *pri_entry; 5545 struct qla_fcp_prio_entry *pri_entry;
5549 struct qla_hw_data *ha = vha->hw; 5546 struct qla_hw_data *ha = vha->hw;
5550 5547
5551 if (!ha->fcp_prio_cfg || !ha->flags.fcp_prio_enabled) 5548 if (!ha->fcp_prio_cfg || !ha->flags.fcp_prio_enabled)
5552 return -1; 5549 return -1;
5553 5550
5554 priority = -1; 5551 priority = -1;
5555 entries = ha->fcp_prio_cfg->num_entries; 5552 entries = ha->fcp_prio_cfg->num_entries;
5556 pri_entry = &ha->fcp_prio_cfg->entry[0]; 5553 pri_entry = &ha->fcp_prio_cfg->entry[0];
5557 5554
5558 for (i = 0; i < entries; i++) { 5555 for (i = 0; i < entries; i++) {
5559 pid_match = wwn_match = 0; 5556 pid_match = wwn_match = 0;
5560 5557
5561 if (!(pri_entry->flags & FCP_PRIO_ENTRY_VALID)) { 5558 if (!(pri_entry->flags & FCP_PRIO_ENTRY_VALID)) {
5562 pri_entry++; 5559 pri_entry++;
5563 continue; 5560 continue;
5564 } 5561 }
5565 5562
5566 /* check source pid for a match */ 5563 /* check source pid for a match */
5567 if (pri_entry->flags & FCP_PRIO_ENTRY_SPID_VALID) { 5564 if (pri_entry->flags & FCP_PRIO_ENTRY_SPID_VALID) {
5568 pid1 = pri_entry->src_pid & INVALID_PORT_ID; 5565 pid1 = pri_entry->src_pid & INVALID_PORT_ID;
5569 pid2 = vha->d_id.b24 & INVALID_PORT_ID; 5566 pid2 = vha->d_id.b24 & INVALID_PORT_ID;
5570 if (pid1 == INVALID_PORT_ID) 5567 if (pid1 == INVALID_PORT_ID)
5571 pid_match++; 5568 pid_match++;
5572 else if (pid1 == pid2) 5569 else if (pid1 == pid2)
5573 pid_match++; 5570 pid_match++;
5574 } 5571 }
5575 5572
5576 /* check destination pid for a match */ 5573 /* check destination pid for a match */
5577 if (pri_entry->flags & FCP_PRIO_ENTRY_DPID_VALID) { 5574 if (pri_entry->flags & FCP_PRIO_ENTRY_DPID_VALID) {
5578 pid1 = pri_entry->dst_pid & INVALID_PORT_ID; 5575 pid1 = pri_entry->dst_pid & INVALID_PORT_ID;
5579 pid2 = fcport->d_id.b24 & INVALID_PORT_ID; 5576 pid2 = fcport->d_id.b24 & INVALID_PORT_ID;
5580 if (pid1 == INVALID_PORT_ID) 5577 if (pid1 == INVALID_PORT_ID)
5581 pid_match++; 5578 pid_match++;
5582 else if (pid1 == pid2) 5579 else if (pid1 == pid2)
5583 pid_match++; 5580 pid_match++;
5584 } 5581 }
5585 5582
5586 /* check source WWN for a match */ 5583 /* check source WWN for a match */
5587 if (pri_entry->flags & FCP_PRIO_ENTRY_SWWN_VALID) { 5584 if (pri_entry->flags & FCP_PRIO_ENTRY_SWWN_VALID) {
5588 wwn1 = wwn_to_u64(vha->port_name); 5585 wwn1 = wwn_to_u64(vha->port_name);
5589 wwn2 = wwn_to_u64(pri_entry->src_wwpn); 5586 wwn2 = wwn_to_u64(pri_entry->src_wwpn);
5590 if (wwn2 == (uint64_t)-1) 5587 if (wwn2 == (uint64_t)-1)
5591 wwn_match++; 5588 wwn_match++;
5592 else if (wwn1 == wwn2) 5589 else if (wwn1 == wwn2)
5593 wwn_match++; 5590 wwn_match++;
5594 } 5591 }
5595 5592
5596 /* check destination WWN for a match */ 5593 /* check destination WWN for a match */
5597 if (pri_entry->flags & FCP_PRIO_ENTRY_DWWN_VALID) { 5594 if (pri_entry->flags & FCP_PRIO_ENTRY_DWWN_VALID) {
5598 wwn1 = wwn_to_u64(fcport->port_name); 5595 wwn1 = wwn_to_u64(fcport->port_name);
5599 wwn2 = wwn_to_u64(pri_entry->dst_wwpn); 5596 wwn2 = wwn_to_u64(pri_entry->dst_wwpn);
5600 if (wwn2 == (uint64_t)-1) 5597 if (wwn2 == (uint64_t)-1)
5601 wwn_match++; 5598 wwn_match++;
5602 else if (wwn1 == wwn2) 5599 else if (wwn1 == wwn2)
5603 wwn_match++; 5600 wwn_match++;
5604 } 5601 }
5605 5602
5606 if (pid_match == 2 || wwn_match == 2) { 5603 if (pid_match == 2 || wwn_match == 2) {
5607 /* Found a matching entry */ 5604 /* Found a matching entry */
5608 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID) 5605 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
5609 priority = pri_entry->tag; 5606 priority = pri_entry->tag;
5610 break; 5607 break;
5611 } 5608 }
5612 5609
5613 pri_entry++; 5610 pri_entry++;
5614 } 5611 }
5615 5612
5616 return priority; 5613 return priority;
5617 } 5614 }
5618 5615
5619 /* 5616 /*
5620 * qla24xx_update_fcport_fcp_prio 5617 * qla24xx_update_fcport_fcp_prio
5621 * Activates fcp priority for the logged in fc port 5618 * Activates fcp priority for the logged in fc port
5622 * 5619 *
5623 * Input: 5620 * Input:
5624 * vha = scsi host structure pointer. 5621 * vha = scsi host structure pointer.
5625 * fcp = port structure pointer. 5622 * fcp = port structure pointer.
5626 * 5623 *
5627 * Return: 5624 * Return:
5628 * QLA_SUCCESS or QLA_FUNCTION_FAILED 5625 * QLA_SUCCESS or QLA_FUNCTION_FAILED
5629 * 5626 *
5630 * Context: 5627 * Context:
5631 * Kernel context. 5628 * Kernel context.
5632 */ 5629 */
5633 int 5630 int
5634 qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport) 5631 qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
5635 { 5632 {
5636 int ret; 5633 int ret;
5637 int priority; 5634 int priority;
5638 uint16_t mb[5]; 5635 uint16_t mb[5];
5639 5636
5640 if (fcport->port_type != FCT_TARGET || 5637 if (fcport->port_type != FCT_TARGET ||
5641 fcport->loop_id == FC_NO_LOOP_ID) 5638 fcport->loop_id == FC_NO_LOOP_ID)
5642 return QLA_FUNCTION_FAILED; 5639 return QLA_FUNCTION_FAILED;
5643 5640
5644 priority = qla24xx_get_fcp_prio(vha, fcport); 5641 priority = qla24xx_get_fcp_prio(vha, fcport);
5645 if (priority < 0) 5642 if (priority < 0)
5646 return QLA_FUNCTION_FAILED; 5643 return QLA_FUNCTION_FAILED;
5647 5644
5648 ret = qla24xx_set_fcp_prio(vha, fcport->loop_id, priority, mb); 5645 ret = qla24xx_set_fcp_prio(vha, fcport->loop_id, priority, mb);
5649 if (ret == QLA_SUCCESS) 5646 if (ret == QLA_SUCCESS)
5650 fcport->fcp_prio = priority; 5647 fcport->fcp_prio = priority;
5651 else 5648 else
5652 ql_dbg(ql_dbg_user, vha, 0x704f, 5649 ql_dbg(ql_dbg_user, vha, 0x704f,
5653 "Unable to activate fcp priority, ret=0x%x.\n", ret); 5650 "Unable to activate fcp priority, ret=0x%x.\n", ret);
5654 5651
5655 return ret; 5652 return ret;
5656 } 5653 }
5657 5654
5658 /* 5655 /*
5659 * qla24xx_update_all_fcp_prio 5656 * qla24xx_update_all_fcp_prio
5660 * Activates fcp priority for all the logged in ports 5657 * Activates fcp priority for all the logged in ports
5661 * 5658 *
5662 * Input: 5659 * Input:
5663 * ha = adapter block pointer. 5660 * ha = adapter block pointer.
5664 * 5661 *
5665 * Return: 5662 * Return:
5666 * QLA_SUCCESS or QLA_FUNCTION_FAILED 5663 * QLA_SUCCESS or QLA_FUNCTION_FAILED
5667 * 5664 *
5668 * Context: 5665 * Context:
5669 * Kernel context. 5666 * Kernel context.
5670 */ 5667 */
5671 int 5668 int
5672 qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha) 5669 qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha)
5673 { 5670 {
5674 int ret; 5671 int ret;
5675 fc_port_t *fcport; 5672 fc_port_t *fcport;
5676 5673
5677 ret = QLA_FUNCTION_FAILED; 5674 ret = QLA_FUNCTION_FAILED;
5678 /* We need to set priority for all logged in ports */ 5675 /* We need to set priority for all logged in ports */
5679 list_for_each_entry(fcport, &vha->vp_fcports, list) 5676 list_for_each_entry(fcport, &vha->vp_fcports, list)
5680 ret = qla24xx_update_fcport_fcp_prio(vha, fcport); 5677 ret = qla24xx_update_fcport_fcp_prio(vha, fcport);
5681 5678
5682 return ret; 5679 return ret;
5683 } 5680 }
5684 5681
drivers/scsi/qla2xxx/qla_isr.c
1 /* 1 /*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation 3 * Copyright (c) 2003-2011 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
7 #include "qla_def.h" 7 #include "qla_def.h"
8 8
9 #include <linux/delay.h> 9 #include <linux/delay.h>
10 #include <linux/slab.h> 10 #include <linux/slab.h>
11 #include <scsi/scsi_tcq.h> 11 #include <scsi/scsi_tcq.h>
12 #include <scsi/scsi_bsg_fc.h> 12 #include <scsi/scsi_bsg_fc.h>
13 #include <scsi/scsi_eh.h> 13 #include <scsi/scsi_eh.h>
14 14
15 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t); 15 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
16 static void qla2x00_process_completed_request(struct scsi_qla_host *, 16 static void qla2x00_process_completed_request(struct scsi_qla_host *,
17 struct req_que *, uint32_t); 17 struct req_que *, uint32_t);
18 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *); 18 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
19 static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *); 19 static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
20 static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *, 20 static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
21 sts_entry_t *); 21 sts_entry_t *);
22 22
23 /** 23 /**
24 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200. 24 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
25 * @irq: 25 * @irq:
26 * @dev_id: SCSI driver HA context 26 * @dev_id: SCSI driver HA context
27 * 27 *
28 * Called by system whenever the host adapter generates an interrupt. 28 * Called by system whenever the host adapter generates an interrupt.
29 * 29 *
30 * Returns handled flag. 30 * Returns handled flag.
31 */ 31 */
32 irqreturn_t 32 irqreturn_t
33 qla2100_intr_handler(int irq, void *dev_id) 33 qla2100_intr_handler(int irq, void *dev_id)
34 { 34 {
35 scsi_qla_host_t *vha; 35 scsi_qla_host_t *vha;
36 struct qla_hw_data *ha; 36 struct qla_hw_data *ha;
37 struct device_reg_2xxx __iomem *reg; 37 struct device_reg_2xxx __iomem *reg;
38 int status; 38 int status;
39 unsigned long iter; 39 unsigned long iter;
40 uint16_t hccr; 40 uint16_t hccr;
41 uint16_t mb[4]; 41 uint16_t mb[4];
42 struct rsp_que *rsp; 42 struct rsp_que *rsp;
43 unsigned long flags; 43 unsigned long flags;
44 44
45 rsp = (struct rsp_que *) dev_id; 45 rsp = (struct rsp_que *) dev_id;
46 if (!rsp) { 46 if (!rsp) {
47 printk(KERN_INFO 47 printk(KERN_INFO
48 "%s(): NULL response queue pointer.\n", __func__); 48 "%s(): NULL response queue pointer.\n", __func__);
49 return (IRQ_NONE); 49 return (IRQ_NONE);
50 } 50 }
51 51
52 ha = rsp->hw; 52 ha = rsp->hw;
53 reg = &ha->iobase->isp; 53 reg = &ha->iobase->isp;
54 status = 0; 54 status = 0;
55 55
56 spin_lock_irqsave(&ha->hardware_lock, flags); 56 spin_lock_irqsave(&ha->hardware_lock, flags);
57 vha = pci_get_drvdata(ha->pdev); 57 vha = pci_get_drvdata(ha->pdev);
58 for (iter = 50; iter--; ) { 58 for (iter = 50; iter--; ) {
59 hccr = RD_REG_WORD(&reg->hccr); 59 hccr = RD_REG_WORD(&reg->hccr);
60 if (hccr & HCCR_RISC_PAUSE) { 60 if (hccr & HCCR_RISC_PAUSE) {
61 if (pci_channel_offline(ha->pdev)) 61 if (pci_channel_offline(ha->pdev))
62 break; 62 break;
63 63
64 /* 64 /*
65 * Issue a "HARD" reset in order for the RISC interrupt 65 * Issue a "HARD" reset in order for the RISC interrupt
66 * bit to be cleared. Schedule a big hammer to get 66 * bit to be cleared. Schedule a big hammer to get
67 * out of the RISC PAUSED state. 67 * out of the RISC PAUSED state.
68 */ 68 */
69 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC); 69 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
70 RD_REG_WORD(&reg->hccr); 70 RD_REG_WORD(&reg->hccr);
71 71
72 ha->isp_ops->fw_dump(vha, 1); 72 ha->isp_ops->fw_dump(vha, 1);
73 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 73 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
74 break; 74 break;
75 } else if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0) 75 } else if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0)
76 break; 76 break;
77 77
78 if (RD_REG_WORD(&reg->semaphore) & BIT_0) { 78 if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
79 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT); 79 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
80 RD_REG_WORD(&reg->hccr); 80 RD_REG_WORD(&reg->hccr);
81 81
82 /* Get mailbox data. */ 82 /* Get mailbox data. */
83 mb[0] = RD_MAILBOX_REG(ha, reg, 0); 83 mb[0] = RD_MAILBOX_REG(ha, reg, 0);
84 if (mb[0] > 0x3fff && mb[0] < 0x8000) { 84 if (mb[0] > 0x3fff && mb[0] < 0x8000) {
85 qla2x00_mbx_completion(vha, mb[0]); 85 qla2x00_mbx_completion(vha, mb[0]);
86 status |= MBX_INTERRUPT; 86 status |= MBX_INTERRUPT;
87 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) { 87 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
88 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 88 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
89 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 89 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
90 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 90 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
91 qla2x00_async_event(vha, rsp, mb); 91 qla2x00_async_event(vha, rsp, mb);
92 } else { 92 } else {
93 /*EMPTY*/ 93 /*EMPTY*/
94 ql_dbg(ql_dbg_async, vha, 0x5025, 94 ql_dbg(ql_dbg_async, vha, 0x5025,
95 "Unrecognized interrupt type (%d).\n", 95 "Unrecognized interrupt type (%d).\n",
96 mb[0]); 96 mb[0]);
97 } 97 }
98 /* Release mailbox registers. */ 98 /* Release mailbox registers. */
99 WRT_REG_WORD(&reg->semaphore, 0); 99 WRT_REG_WORD(&reg->semaphore, 0);
100 RD_REG_WORD(&reg->semaphore); 100 RD_REG_WORD(&reg->semaphore);
101 } else { 101 } else {
102 qla2x00_process_response_queue(rsp); 102 qla2x00_process_response_queue(rsp);
103 103
104 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT); 104 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
105 RD_REG_WORD(&reg->hccr); 105 RD_REG_WORD(&reg->hccr);
106 } 106 }
107 } 107 }
108 spin_unlock_irqrestore(&ha->hardware_lock, flags); 108 spin_unlock_irqrestore(&ha->hardware_lock, flags);
109 109
110 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 110 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
111 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 111 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
112 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 112 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
113 complete(&ha->mbx_intr_comp); 113 complete(&ha->mbx_intr_comp);
114 } 114 }
115 115
116 return (IRQ_HANDLED); 116 return (IRQ_HANDLED);
117 } 117 }
118 118
119 /** 119 /**
120 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx. 120 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
121 * @irq: 121 * @irq:
122 * @dev_id: SCSI driver HA context 122 * @dev_id: SCSI driver HA context
123 * 123 *
124 * Called by system whenever the host adapter generates an interrupt. 124 * Called by system whenever the host adapter generates an interrupt.
125 * 125 *
126 * Returns handled flag. 126 * Returns handled flag.
127 */ 127 */
128 irqreturn_t 128 irqreturn_t
129 qla2300_intr_handler(int irq, void *dev_id) 129 qla2300_intr_handler(int irq, void *dev_id)
130 { 130 {
131 scsi_qla_host_t *vha; 131 scsi_qla_host_t *vha;
132 struct device_reg_2xxx __iomem *reg; 132 struct device_reg_2xxx __iomem *reg;
133 int status; 133 int status;
134 unsigned long iter; 134 unsigned long iter;
135 uint32_t stat; 135 uint32_t stat;
136 uint16_t hccr; 136 uint16_t hccr;
137 uint16_t mb[4]; 137 uint16_t mb[4];
138 struct rsp_que *rsp; 138 struct rsp_que *rsp;
139 struct qla_hw_data *ha; 139 struct qla_hw_data *ha;
140 unsigned long flags; 140 unsigned long flags;
141 141
142 rsp = (struct rsp_que *) dev_id; 142 rsp = (struct rsp_que *) dev_id;
143 if (!rsp) { 143 if (!rsp) {
144 printk(KERN_INFO 144 printk(KERN_INFO
145 "%s(): NULL response queue pointer.\n", __func__); 145 "%s(): NULL response queue pointer.\n", __func__);
146 return (IRQ_NONE); 146 return (IRQ_NONE);
147 } 147 }
148 148
149 ha = rsp->hw; 149 ha = rsp->hw;
150 reg = &ha->iobase->isp; 150 reg = &ha->iobase->isp;
151 status = 0; 151 status = 0;
152 152
153 spin_lock_irqsave(&ha->hardware_lock, flags); 153 spin_lock_irqsave(&ha->hardware_lock, flags);
154 vha = pci_get_drvdata(ha->pdev); 154 vha = pci_get_drvdata(ha->pdev);
155 for (iter = 50; iter--; ) { 155 for (iter = 50; iter--; ) {
156 stat = RD_REG_DWORD(&reg->u.isp2300.host_status); 156 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
157 if (stat & HSR_RISC_PAUSED) { 157 if (stat & HSR_RISC_PAUSED) {
158 if (unlikely(pci_channel_offline(ha->pdev))) 158 if (unlikely(pci_channel_offline(ha->pdev)))
159 break; 159 break;
160 160
161 hccr = RD_REG_WORD(&reg->hccr); 161 hccr = RD_REG_WORD(&reg->hccr);
162 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8)) 162 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
163 ql_log(ql_log_warn, vha, 0x5026, 163 ql_log(ql_log_warn, vha, 0x5026,
164 "Parity error -- HCCR=%x, Dumping " 164 "Parity error -- HCCR=%x, Dumping "
165 "firmware.\n", hccr); 165 "firmware.\n", hccr);
166 else 166 else
167 ql_log(ql_log_warn, vha, 0x5027, 167 ql_log(ql_log_warn, vha, 0x5027,
168 "RISC paused -- HCCR=%x, Dumping " 168 "RISC paused -- HCCR=%x, Dumping "
169 "firmware.\n", hccr); 169 "firmware.\n", hccr);
170 170
171 /* 171 /*
172 * Issue a "HARD" reset in order for the RISC 172 * Issue a "HARD" reset in order for the RISC
173 * interrupt bit to be cleared. Schedule a big 173 * interrupt bit to be cleared. Schedule a big
174 * hammer to get out of the RISC PAUSED state. 174 * hammer to get out of the RISC PAUSED state.
175 */ 175 */
176 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC); 176 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
177 RD_REG_WORD(&reg->hccr); 177 RD_REG_WORD(&reg->hccr);
178 178
179 ha->isp_ops->fw_dump(vha, 1); 179 ha->isp_ops->fw_dump(vha, 1);
180 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 180 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
181 break; 181 break;
182 } else if ((stat & HSR_RISC_INT) == 0) 182 } else if ((stat & HSR_RISC_INT) == 0)
183 break; 183 break;
184 184
185 switch (stat & 0xff) { 185 switch (stat & 0xff) {
186 case 0x1: 186 case 0x1:
187 case 0x2: 187 case 0x2:
188 case 0x10: 188 case 0x10:
189 case 0x11: 189 case 0x11:
190 qla2x00_mbx_completion(vha, MSW(stat)); 190 qla2x00_mbx_completion(vha, MSW(stat));
191 status |= MBX_INTERRUPT; 191 status |= MBX_INTERRUPT;
192 192
193 /* Release mailbox registers. */ 193 /* Release mailbox registers. */
194 WRT_REG_WORD(&reg->semaphore, 0); 194 WRT_REG_WORD(&reg->semaphore, 0);
195 break; 195 break;
196 case 0x12: 196 case 0x12:
197 mb[0] = MSW(stat); 197 mb[0] = MSW(stat);
198 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 198 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
199 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 199 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
200 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 200 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
201 qla2x00_async_event(vha, rsp, mb); 201 qla2x00_async_event(vha, rsp, mb);
202 break; 202 break;
203 case 0x13: 203 case 0x13:
204 qla2x00_process_response_queue(rsp); 204 qla2x00_process_response_queue(rsp);
205 break; 205 break;
206 case 0x15: 206 case 0x15:
207 mb[0] = MBA_CMPLT_1_16BIT; 207 mb[0] = MBA_CMPLT_1_16BIT;
208 mb[1] = MSW(stat); 208 mb[1] = MSW(stat);
209 qla2x00_async_event(vha, rsp, mb); 209 qla2x00_async_event(vha, rsp, mb);
210 break; 210 break;
211 case 0x16: 211 case 0x16:
212 mb[0] = MBA_SCSI_COMPLETION; 212 mb[0] = MBA_SCSI_COMPLETION;
213 mb[1] = MSW(stat); 213 mb[1] = MSW(stat);
214 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 214 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
215 qla2x00_async_event(vha, rsp, mb); 215 qla2x00_async_event(vha, rsp, mb);
216 break; 216 break;
217 default: 217 default:
218 ql_dbg(ql_dbg_async, vha, 0x5028, 218 ql_dbg(ql_dbg_async, vha, 0x5028,
219 "Unrecognized interrupt type (%d).\n", stat & 0xff); 219 "Unrecognized interrupt type (%d).\n", stat & 0xff);
220 break; 220 break;
221 } 221 }
222 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT); 222 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
223 RD_REG_WORD_RELAXED(&reg->hccr); 223 RD_REG_WORD_RELAXED(&reg->hccr);
224 } 224 }
225 spin_unlock_irqrestore(&ha->hardware_lock, flags); 225 spin_unlock_irqrestore(&ha->hardware_lock, flags);
226 226
227 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 227 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
228 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 228 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
229 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 229 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
230 complete(&ha->mbx_intr_comp); 230 complete(&ha->mbx_intr_comp);
231 } 231 }
232 232
233 return (IRQ_HANDLED); 233 return (IRQ_HANDLED);
234 } 234 }
235 235
236 /** 236 /**
237 * qla2x00_mbx_completion() - Process mailbox command completions. 237 * qla2x00_mbx_completion() - Process mailbox command completions.
238 * @ha: SCSI driver HA context 238 * @ha: SCSI driver HA context
239 * @mb0: Mailbox0 register 239 * @mb0: Mailbox0 register
240 */ 240 */
241 static void 241 static void
242 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) 242 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
243 { 243 {
244 uint16_t cnt; 244 uint16_t cnt;
245 uint16_t __iomem *wptr; 245 uint16_t __iomem *wptr;
246 struct qla_hw_data *ha = vha->hw; 246 struct qla_hw_data *ha = vha->hw;
247 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 247 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
248 248
249 /* Load return mailbox registers. */ 249 /* Load return mailbox registers. */
250 ha->flags.mbox_int = 1; 250 ha->flags.mbox_int = 1;
251 ha->mailbox_out[0] = mb0; 251 ha->mailbox_out[0] = mb0;
252 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1); 252 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
253 253
254 for (cnt = 1; cnt < ha->mbx_count; cnt++) { 254 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
255 if (IS_QLA2200(ha) && cnt == 8) 255 if (IS_QLA2200(ha) && cnt == 8)
256 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8); 256 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
257 if (cnt == 4 || cnt == 5) 257 if (cnt == 4 || cnt == 5)
258 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr); 258 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
259 else 259 else
260 ha->mailbox_out[cnt] = RD_REG_WORD(wptr); 260 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
261 261
262 wptr++; 262 wptr++;
263 } 263 }
264 264
265 if (ha->mcp) { 265 if (ha->mcp) {
266 ql_dbg(ql_dbg_async, vha, 0x5000, 266 ql_dbg(ql_dbg_async, vha, 0x5000,
267 "Got mbx completion. cmd=%x.\n", ha->mcp->mb[0]); 267 "Got mbx completion. cmd=%x.\n", ha->mcp->mb[0]);
268 } else { 268 } else {
269 ql_dbg(ql_dbg_async, vha, 0x5001, 269 ql_dbg(ql_dbg_async, vha, 0x5001,
270 "MBX pointer ERROR.\n"); 270 "MBX pointer ERROR.\n");
271 } 271 }
272 } 272 }
273 273
274 static void 274 static void
275 qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr) 275 qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
276 { 276 {
277 static char *event[] = 277 static char *event[] =
278 { "Complete", "Request Notification", "Time Extension" }; 278 { "Complete", "Request Notification", "Time Extension" };
279 int rval; 279 int rval;
280 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24; 280 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
281 uint16_t __iomem *wptr; 281 uint16_t __iomem *wptr;
282 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS]; 282 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];
283 283
284 /* Seed data -- mailbox1 -> mailbox7. */ 284 /* Seed data -- mailbox1 -> mailbox7. */
285 wptr = (uint16_t __iomem *)&reg24->mailbox1; 285 wptr = (uint16_t __iomem *)&reg24->mailbox1;
286 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++) 286 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
287 mb[cnt] = RD_REG_WORD(wptr); 287 mb[cnt] = RD_REG_WORD(wptr);
288 288
289 ql_dbg(ql_dbg_async, vha, 0x5021, 289 ql_dbg(ql_dbg_async, vha, 0x5021,
290 "Inter-Driver Commucation %s -- " 290 "Inter-Driver Commucation %s -- "
291 "%04x %04x %04x %04x %04x %04x %04x.\n", 291 "%04x %04x %04x %04x %04x %04x %04x.\n",
292 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3], 292 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
293 mb[4], mb[5], mb[6]); 293 mb[4], mb[5], mb[6]);
294 294
295 /* Acknowledgement needed? [Notify && non-zero timeout]. */ 295 /* Acknowledgement needed? [Notify && non-zero timeout]. */
296 timeout = (descr >> 8) & 0xf; 296 timeout = (descr >> 8) & 0xf;
297 if (aen != MBA_IDC_NOTIFY || !timeout) 297 if (aen != MBA_IDC_NOTIFY || !timeout)
298 return; 298 return;
299 299
300 ql_dbg(ql_dbg_async, vha, 0x5022, 300 ql_dbg(ql_dbg_async, vha, 0x5022,
301 "Inter-Driver Commucation %s -- ACK timeout=%d.\n", 301 "Inter-Driver Commucation %s -- ACK timeout=%d.\n",
302 vha->host_no, event[aen & 0xff], timeout); 302 vha->host_no, event[aen & 0xff], timeout);
303 303
304 rval = qla2x00_post_idc_ack_work(vha, mb); 304 rval = qla2x00_post_idc_ack_work(vha, mb);
305 if (rval != QLA_SUCCESS) 305 if (rval != QLA_SUCCESS)
306 ql_log(ql_log_warn, vha, 0x5023, 306 ql_log(ql_log_warn, vha, 0x5023,
307 "IDC failed to post ACK.\n"); 307 "IDC failed to post ACK.\n");
308 } 308 }
309 309
310 /** 310 /**
311 * qla2x00_async_event() - Process aynchronous events. 311 * qla2x00_async_event() - Process aynchronous events.
312 * @ha: SCSI driver HA context 312 * @ha: SCSI driver HA context
313 * @mb: Mailbox registers (0 - 3) 313 * @mb: Mailbox registers (0 - 3)
314 */ 314 */
315 void 315 void
316 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) 316 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
317 { 317 {
318 #define LS_UNKNOWN 2 318 #define LS_UNKNOWN 2
319 static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" }; 319 static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" };
320 char *link_speed; 320 char *link_speed;
321 uint16_t handle_cnt; 321 uint16_t handle_cnt;
322 uint16_t cnt, mbx; 322 uint16_t cnt, mbx;
323 uint32_t handles[5]; 323 uint32_t handles[5];
324 struct qla_hw_data *ha = vha->hw; 324 struct qla_hw_data *ha = vha->hw;
325 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 325 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
326 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; 326 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
327 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; 327 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
328 uint32_t rscn_entry, host_pid; 328 uint32_t rscn_entry, host_pid;
329 uint8_t rscn_queue_index; 329 uint8_t rscn_queue_index;
330 unsigned long flags; 330 unsigned long flags;
331 331
332 /* Setup to process RIO completion. */ 332 /* Setup to process RIO completion. */
333 handle_cnt = 0; 333 handle_cnt = 0;
334 if (IS_QLA8XXX_TYPE(ha)) 334 if (IS_QLA8XXX_TYPE(ha))
335 goto skip_rio; 335 goto skip_rio;
336 switch (mb[0]) { 336 switch (mb[0]) {
337 case MBA_SCSI_COMPLETION: 337 case MBA_SCSI_COMPLETION:
338 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1])); 338 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
339 handle_cnt = 1; 339 handle_cnt = 1;
340 break; 340 break;
341 case MBA_CMPLT_1_16BIT: 341 case MBA_CMPLT_1_16BIT:
342 handles[0] = mb[1]; 342 handles[0] = mb[1];
343 handle_cnt = 1; 343 handle_cnt = 1;
344 mb[0] = MBA_SCSI_COMPLETION; 344 mb[0] = MBA_SCSI_COMPLETION;
345 break; 345 break;
346 case MBA_CMPLT_2_16BIT: 346 case MBA_CMPLT_2_16BIT:
347 handles[0] = mb[1]; 347 handles[0] = mb[1];
348 handles[1] = mb[2]; 348 handles[1] = mb[2];
349 handle_cnt = 2; 349 handle_cnt = 2;
350 mb[0] = MBA_SCSI_COMPLETION; 350 mb[0] = MBA_SCSI_COMPLETION;
351 break; 351 break;
352 case MBA_CMPLT_3_16BIT: 352 case MBA_CMPLT_3_16BIT:
353 handles[0] = mb[1]; 353 handles[0] = mb[1];
354 handles[1] = mb[2]; 354 handles[1] = mb[2];
355 handles[2] = mb[3]; 355 handles[2] = mb[3];
356 handle_cnt = 3; 356 handle_cnt = 3;
357 mb[0] = MBA_SCSI_COMPLETION; 357 mb[0] = MBA_SCSI_COMPLETION;
358 break; 358 break;
359 case MBA_CMPLT_4_16BIT: 359 case MBA_CMPLT_4_16BIT:
360 handles[0] = mb[1]; 360 handles[0] = mb[1];
361 handles[1] = mb[2]; 361 handles[1] = mb[2];
362 handles[2] = mb[3]; 362 handles[2] = mb[3];
363 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); 363 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
364 handle_cnt = 4; 364 handle_cnt = 4;
365 mb[0] = MBA_SCSI_COMPLETION; 365 mb[0] = MBA_SCSI_COMPLETION;
366 break; 366 break;
367 case MBA_CMPLT_5_16BIT: 367 case MBA_CMPLT_5_16BIT:
368 handles[0] = mb[1]; 368 handles[0] = mb[1];
369 handles[1] = mb[2]; 369 handles[1] = mb[2];
370 handles[2] = mb[3]; 370 handles[2] = mb[3];
371 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); 371 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
372 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7); 372 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
373 handle_cnt = 5; 373 handle_cnt = 5;
374 mb[0] = MBA_SCSI_COMPLETION; 374 mb[0] = MBA_SCSI_COMPLETION;
375 break; 375 break;
376 case MBA_CMPLT_2_32BIT: 376 case MBA_CMPLT_2_32BIT:
377 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1])); 377 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
378 handles[1] = le32_to_cpu( 378 handles[1] = le32_to_cpu(
379 ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) | 379 ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
380 RD_MAILBOX_REG(ha, reg, 6)); 380 RD_MAILBOX_REG(ha, reg, 6));
381 handle_cnt = 2; 381 handle_cnt = 2;
382 mb[0] = MBA_SCSI_COMPLETION; 382 mb[0] = MBA_SCSI_COMPLETION;
383 break; 383 break;
384 default: 384 default:
385 break; 385 break;
386 } 386 }
387 skip_rio: 387 skip_rio:
388 switch (mb[0]) { 388 switch (mb[0]) {
389 case MBA_SCSI_COMPLETION: /* Fast Post */ 389 case MBA_SCSI_COMPLETION: /* Fast Post */
390 if (!vha->flags.online) 390 if (!vha->flags.online)
391 break; 391 break;
392 392
393 for (cnt = 0; cnt < handle_cnt; cnt++) 393 for (cnt = 0; cnt < handle_cnt; cnt++)
394 qla2x00_process_completed_request(vha, rsp->req, 394 qla2x00_process_completed_request(vha, rsp->req,
395 handles[cnt]); 395 handles[cnt]);
396 break; 396 break;
397 397
398 case MBA_RESET: /* Reset */ 398 case MBA_RESET: /* Reset */
399 ql_dbg(ql_dbg_async, vha, 0x5002, 399 ql_dbg(ql_dbg_async, vha, 0x5002,
400 "Asynchronous RESET.\n"); 400 "Asynchronous RESET.\n");
401 401
402 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 402 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
403 break; 403 break;
404 404
405 case MBA_SYSTEM_ERR: /* System Error */ 405 case MBA_SYSTEM_ERR: /* System Error */
406 mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox7) : 0; 406 mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox7) : 0;
407 ql_log(ql_log_warn, vha, 0x5003, 407 ql_log(ql_log_warn, vha, 0x5003,
408 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh " 408 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
409 "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx); 409 "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
410 410
411 ha->isp_ops->fw_dump(vha, 1); 411 ha->isp_ops->fw_dump(vha, 1);
412 412
413 if (IS_FWI2_CAPABLE(ha)) { 413 if (IS_FWI2_CAPABLE(ha)) {
414 if (mb[1] == 0 && mb[2] == 0) { 414 if (mb[1] == 0 && mb[2] == 0) {
415 ql_log(ql_log_fatal, vha, 0x5004, 415 ql_log(ql_log_fatal, vha, 0x5004,
416 "Unrecoverable Hardware Error: adapter " 416 "Unrecoverable Hardware Error: adapter "
417 "marked OFFLINE!\n"); 417 "marked OFFLINE!\n");
418 vha->flags.online = 0; 418 vha->flags.online = 0;
419 } else { 419 } else {
420 /* Check to see if MPI timeout occurred */ 420 /* Check to see if MPI timeout occurred */
421 if ((mbx & MBX_3) && (ha->flags.port0)) 421 if ((mbx & MBX_3) && (ha->flags.port0))
422 set_bit(MPI_RESET_NEEDED, 422 set_bit(MPI_RESET_NEEDED,
423 &vha->dpc_flags); 423 &vha->dpc_flags);
424 424
425 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 425 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
426 } 426 }
427 } else if (mb[1] == 0) { 427 } else if (mb[1] == 0) {
428 ql_log(ql_log_fatal, vha, 0x5005, 428 ql_log(ql_log_fatal, vha, 0x5005,
429 "Unrecoverable Hardware Error: adapter marked " 429 "Unrecoverable Hardware Error: adapter marked "
430 "OFFLINE!\n"); 430 "OFFLINE!\n");
431 vha->flags.online = 0; 431 vha->flags.online = 0;
432 } else 432 } else
433 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 433 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
434 break; 434 break;
435 435
436 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ 436 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
437 ql_log(ql_log_warn, vha, 0x5006, 437 ql_log(ql_log_warn, vha, 0x5006,
438 "ISP Request Transfer Error (%x).\n", mb[1]); 438 "ISP Request Transfer Error (%x).\n", mb[1]);
439 439
440 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 440 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
441 break; 441 break;
442 442
443 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ 443 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
444 ql_log(ql_log_warn, vha, 0x5007, 444 ql_log(ql_log_warn, vha, 0x5007,
445 "ISP Response Transfer Error.\n"); 445 "ISP Response Transfer Error.\n");
446 446
447 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 447 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
448 break; 448 break;
449 449
450 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */ 450 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
451 ql_dbg(ql_dbg_async, vha, 0x5008, 451 ql_dbg(ql_dbg_async, vha, 0x5008,
452 "Asynchronous WAKEUP_THRES.\n"); 452 "Asynchronous WAKEUP_THRES.\n");
453 break; 453 break;
454 454
455 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ 455 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
456 ql_log(ql_log_info, vha, 0x5009, 456 ql_log(ql_log_info, vha, 0x5009,
457 "LIP occurred (%x).\n", mb[1]); 457 "LIP occurred (%x).\n", mb[1]);
458 458
459 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 459 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
460 atomic_set(&vha->loop_state, LOOP_DOWN); 460 atomic_set(&vha->loop_state, LOOP_DOWN);
461 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 461 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
462 qla2x00_mark_all_devices_lost(vha, 1); 462 qla2x00_mark_all_devices_lost(vha, 1);
463 } 463 }
464 464
465 if (vha->vp_idx) { 465 if (vha->vp_idx) {
466 atomic_set(&vha->vp_state, VP_FAILED); 466 atomic_set(&vha->vp_state, VP_FAILED);
467 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 467 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
468 } 468 }
469 469
470 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 470 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
471 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 471 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
472 472
473 vha->flags.management_server_logged_in = 0; 473 vha->flags.management_server_logged_in = 0;
474 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]); 474 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
475 break; 475 break;
476 476
477 case MBA_LOOP_UP: /* Loop Up Event */ 477 case MBA_LOOP_UP: /* Loop Up Event */
478 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 478 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
479 link_speed = link_speeds[0]; 479 link_speed = link_speeds[0];
480 ha->link_data_rate = PORT_SPEED_1GB; 480 ha->link_data_rate = PORT_SPEED_1GB;
481 } else { 481 } else {
482 link_speed = link_speeds[LS_UNKNOWN]; 482 link_speed = link_speeds[LS_UNKNOWN];
483 if (mb[1] < 5) 483 if (mb[1] < 5)
484 link_speed = link_speeds[mb[1]]; 484 link_speed = link_speeds[mb[1]];
485 else if (mb[1] == 0x13) 485 else if (mb[1] == 0x13)
486 link_speed = link_speeds[5]; 486 link_speed = link_speeds[5];
487 ha->link_data_rate = mb[1]; 487 ha->link_data_rate = mb[1];
488 } 488 }
489 489
490 ql_log(ql_log_info, vha, 0x500a, 490 ql_log(ql_log_info, vha, 0x500a,
491 "LOOP UP detected (%s Gbps).\n", link_speed); 491 "LOOP UP detected (%s Gbps).\n", link_speed);
492 492
493 vha->flags.management_server_logged_in = 0; 493 vha->flags.management_server_logged_in = 0;
494 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate); 494 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
495 break; 495 break;
496 496
497 case MBA_LOOP_DOWN: /* Loop Down Event */ 497 case MBA_LOOP_DOWN: /* Loop Down Event */
498 mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox4) : 0; 498 mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox4) : 0;
499 mbx = IS_QLA82XX(ha) ? RD_REG_WORD(&reg82->mailbox_out[4]) : mbx; 499 mbx = IS_QLA82XX(ha) ? RD_REG_WORD(&reg82->mailbox_out[4]) : mbx;
500 ql_log(ql_log_info, vha, 0x500b, 500 ql_log(ql_log_info, vha, 0x500b,
501 "LOOP DOWN detected (%x %x %x %x).\n", 501 "LOOP DOWN detected (%x %x %x %x).\n",
502 mb[1], mb[2], mb[3], mbx); 502 mb[1], mb[2], mb[3], mbx);
503 503
504 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 504 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
505 atomic_set(&vha->loop_state, LOOP_DOWN); 505 atomic_set(&vha->loop_state, LOOP_DOWN);
506 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 506 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
507 vha->device_flags |= DFLG_NO_CABLE; 507 vha->device_flags |= DFLG_NO_CABLE;
508 qla2x00_mark_all_devices_lost(vha, 1); 508 qla2x00_mark_all_devices_lost(vha, 1);
509 } 509 }
510 510
511 if (vha->vp_idx) { 511 if (vha->vp_idx) {
512 atomic_set(&vha->vp_state, VP_FAILED); 512 atomic_set(&vha->vp_state, VP_FAILED);
513 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 513 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
514 } 514 }
515 515
516 vha->flags.management_server_logged_in = 0; 516 vha->flags.management_server_logged_in = 0;
517 ha->link_data_rate = PORT_SPEED_UNKNOWN; 517 ha->link_data_rate = PORT_SPEED_UNKNOWN;
518 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0); 518 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
519 break; 519 break;
520 520
521 case MBA_LIP_RESET: /* LIP reset occurred */ 521 case MBA_LIP_RESET: /* LIP reset occurred */
522 ql_log(ql_log_info, vha, 0x500c, 522 ql_log(ql_log_info, vha, 0x500c,
523 "LIP reset occurred (%x).\n", mb[1]); 523 "LIP reset occurred (%x).\n", mb[1]);
524 524
525 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 525 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
526 atomic_set(&vha->loop_state, LOOP_DOWN); 526 atomic_set(&vha->loop_state, LOOP_DOWN);
527 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 527 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
528 qla2x00_mark_all_devices_lost(vha, 1); 528 qla2x00_mark_all_devices_lost(vha, 1);
529 } 529 }
530 530
531 if (vha->vp_idx) { 531 if (vha->vp_idx) {
532 atomic_set(&vha->vp_state, VP_FAILED); 532 atomic_set(&vha->vp_state, VP_FAILED);
533 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 533 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
534 } 534 }
535 535
536 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 536 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
537 537
538 ha->operating_mode = LOOP; 538 ha->operating_mode = LOOP;
539 vha->flags.management_server_logged_in = 0; 539 vha->flags.management_server_logged_in = 0;
540 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]); 540 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
541 break; 541 break;
542 542
543 /* case MBA_DCBX_COMPLETE: */ 543 /* case MBA_DCBX_COMPLETE: */
544 case MBA_POINT_TO_POINT: /* Point-to-Point */ 544 case MBA_POINT_TO_POINT: /* Point-to-Point */
545 if (IS_QLA2100(ha)) 545 if (IS_QLA2100(ha))
546 break; 546 break;
547 547
548 if (IS_QLA8XXX_TYPE(ha)) { 548 if (IS_QLA8XXX_TYPE(ha)) {
549 ql_dbg(ql_dbg_async, vha, 0x500d, 549 ql_dbg(ql_dbg_async, vha, 0x500d,
550 "DCBX Completed -- %04x %04x %04x.\n", 550 "DCBX Completed -- %04x %04x %04x.\n",
551 mb[1], mb[2], mb[3]); 551 mb[1], mb[2], mb[3]);
552 if (ha->notify_dcbx_comp) 552 if (ha->notify_dcbx_comp)
553 complete(&ha->dcbx_comp); 553 complete(&ha->dcbx_comp);
554 554
555 } else 555 } else
556 ql_dbg(ql_dbg_async, vha, 0x500e, 556 ql_dbg(ql_dbg_async, vha, 0x500e,
557 "Asynchronous P2P MODE received.\n"); 557 "Asynchronous P2P MODE received.\n");
558 558
559 /* 559 /*
560 * Until there's a transition from loop down to loop up, treat 560 * Until there's a transition from loop down to loop up, treat
561 * this as loop down only. 561 * this as loop down only.
562 */ 562 */
563 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 563 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
564 atomic_set(&vha->loop_state, LOOP_DOWN); 564 atomic_set(&vha->loop_state, LOOP_DOWN);
565 if (!atomic_read(&vha->loop_down_timer)) 565 if (!atomic_read(&vha->loop_down_timer))
566 atomic_set(&vha->loop_down_timer, 566 atomic_set(&vha->loop_down_timer,
567 LOOP_DOWN_TIME); 567 LOOP_DOWN_TIME);
568 qla2x00_mark_all_devices_lost(vha, 1); 568 qla2x00_mark_all_devices_lost(vha, 1);
569 } 569 }
570 570
571 if (vha->vp_idx) { 571 if (vha->vp_idx) {
572 atomic_set(&vha->vp_state, VP_FAILED); 572 atomic_set(&vha->vp_state, VP_FAILED);
573 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 573 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
574 } 574 }
575 575
576 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) 576 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
577 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 577 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
578 578
579 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 579 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
580 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 580 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
581 581
582 ha->flags.gpsc_supported = 1; 582 ha->flags.gpsc_supported = 1;
583 vha->flags.management_server_logged_in = 0; 583 vha->flags.management_server_logged_in = 0;
584 break; 584 break;
585 585
586 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */ 586 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */
587 if (IS_QLA2100(ha)) 587 if (IS_QLA2100(ha))
588 break; 588 break;
589 589
590 ql_log(ql_log_info, vha, 0x500f, 590 ql_log(ql_log_info, vha, 0x500f,
591 "Configuration change detected: value=%x.\n", mb[1]); 591 "Configuration change detected: value=%x.\n", mb[1]);
592 592
593 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 593 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
594 atomic_set(&vha->loop_state, LOOP_DOWN); 594 atomic_set(&vha->loop_state, LOOP_DOWN);
595 if (!atomic_read(&vha->loop_down_timer)) 595 if (!atomic_read(&vha->loop_down_timer))
596 atomic_set(&vha->loop_down_timer, 596 atomic_set(&vha->loop_down_timer,
597 LOOP_DOWN_TIME); 597 LOOP_DOWN_TIME);
598 qla2x00_mark_all_devices_lost(vha, 1); 598 qla2x00_mark_all_devices_lost(vha, 1);
599 } 599 }
600 600
601 if (vha->vp_idx) { 601 if (vha->vp_idx) {
602 atomic_set(&vha->vp_state, VP_FAILED); 602 atomic_set(&vha->vp_state, VP_FAILED);
603 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 603 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
604 } 604 }
605 605
606 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 606 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
607 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 607 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
608 break; 608 break;
609 609
610 case MBA_PORT_UPDATE: /* Port database update */ 610 case MBA_PORT_UPDATE: /* Port database update */
611 /* 611 /*
612 * Handle only global and vn-port update events 612 * Handle only global and vn-port update events
613 * 613 *
614 * Relevant inputs: 614 * Relevant inputs:
615 * mb[1] = N_Port handle of changed port 615 * mb[1] = N_Port handle of changed port
616 * OR 0xffff for global event 616 * OR 0xffff for global event
617 * mb[2] = New login state 617 * mb[2] = New login state
618 * 7 = Port logged out 618 * 7 = Port logged out
619 * mb[3] = LSB is vp_idx, 0xff = all vps 619 * mb[3] = LSB is vp_idx, 0xff = all vps
620 * 620 *
621 * Skip processing if: 621 * Skip processing if:
622 * Event is global, vp_idx is NOT all vps, 622 * Event is global, vp_idx is NOT all vps,
623 * vp_idx does not match 623 * vp_idx does not match
624 * Event is not global, vp_idx does not match 624 * Event is not global, vp_idx does not match
625 */ 625 */
626 if (IS_QLA2XXX_MIDTYPE(ha) && 626 if (IS_QLA2XXX_MIDTYPE(ha) &&
627 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) || 627 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) ||
628 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff)) 628 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff))
629 break; 629 break;
630 630
631 /* Global event -- port logout or port unavailable. */ 631 /* Global event -- port logout or port unavailable. */
632 if (mb[1] == 0xffff && mb[2] == 0x7) { 632 if (mb[1] == 0xffff && mb[2] == 0x7) {
633 ql_dbg(ql_dbg_async, vha, 0x5010, 633 ql_dbg(ql_dbg_async, vha, 0x5010,
634 "Port unavailable %04x %04x %04x.\n", 634 "Port unavailable %04x %04x %04x.\n",
635 mb[1], mb[2], mb[3]); 635 mb[1], mb[2], mb[3]);
636 636
637 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 637 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
638 atomic_set(&vha->loop_state, LOOP_DOWN); 638 atomic_set(&vha->loop_state, LOOP_DOWN);
639 atomic_set(&vha->loop_down_timer, 639 atomic_set(&vha->loop_down_timer,
640 LOOP_DOWN_TIME); 640 LOOP_DOWN_TIME);
641 vha->device_flags |= DFLG_NO_CABLE; 641 vha->device_flags |= DFLG_NO_CABLE;
642 qla2x00_mark_all_devices_lost(vha, 1); 642 qla2x00_mark_all_devices_lost(vha, 1);
643 } 643 }
644 644
645 if (vha->vp_idx) { 645 if (vha->vp_idx) {
646 atomic_set(&vha->vp_state, VP_FAILED); 646 atomic_set(&vha->vp_state, VP_FAILED);
647 fc_vport_set_state(vha->fc_vport, 647 fc_vport_set_state(vha->fc_vport,
648 FC_VPORT_FAILED); 648 FC_VPORT_FAILED);
649 qla2x00_mark_all_devices_lost(vha, 1); 649 qla2x00_mark_all_devices_lost(vha, 1);
650 } 650 }
651 651
652 vha->flags.management_server_logged_in = 0; 652 vha->flags.management_server_logged_in = 0;
653 ha->link_data_rate = PORT_SPEED_UNKNOWN; 653 ha->link_data_rate = PORT_SPEED_UNKNOWN;
654 break; 654 break;
655 } 655 }
656 656
657 /* 657 /*
658 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET 658 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
659 * event etc. earlier indicating loop is down) then process 659 * event etc. earlier indicating loop is down) then process
660 * it. Otherwise ignore it and Wait for RSCN to come in. 660 * it. Otherwise ignore it and Wait for RSCN to come in.
661 */ 661 */
662 atomic_set(&vha->loop_down_timer, 0); 662 atomic_set(&vha->loop_down_timer, 0);
663 if (atomic_read(&vha->loop_state) != LOOP_DOWN && 663 if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
664 atomic_read(&vha->loop_state) != LOOP_DEAD) { 664 atomic_read(&vha->loop_state) != LOOP_DEAD) {
665 ql_dbg(ql_dbg_async, vha, 0x5011, 665 ql_dbg(ql_dbg_async, vha, 0x5011,
666 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n", 666 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
667 mb[1], mb[2], mb[3]); 667 mb[1], mb[2], mb[3]);
668 break; 668 break;
669 } 669 }
670 670
671 ql_dbg(ql_dbg_async, vha, 0x5012, 671 ql_dbg(ql_dbg_async, vha, 0x5012,
672 "Port database changed %04x %04x %04x.\n", 672 "Port database changed %04x %04x %04x.\n",
673 mb[1], mb[2], mb[3]); 673 mb[1], mb[2], mb[3]);
674 674
675 /* 675 /*
676 * Mark all devices as missing so we will login again. 676 * Mark all devices as missing so we will login again.
677 */ 677 */
678 atomic_set(&vha->loop_state, LOOP_UP); 678 atomic_set(&vha->loop_state, LOOP_UP);
679 679
680 qla2x00_mark_all_devices_lost(vha, 1); 680 qla2x00_mark_all_devices_lost(vha, 1);
681 681
682 vha->flags.rscn_queue_overflow = 1; 682 vha->flags.rscn_queue_overflow = 1;
683 683
684 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 684 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
685 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 685 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
686 break; 686 break;
687 687
688 case MBA_RSCN_UPDATE: /* State Change Registration */ 688 case MBA_RSCN_UPDATE: /* State Change Registration */
689 /* Check if the Vport has issued a SCR */ 689 /* Check if the Vport has issued a SCR */
690 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags)) 690 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
691 break; 691 break;
692 /* Only handle SCNs for our Vport index. */ 692 /* Only handle SCNs for our Vport index. */
693 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff)) 693 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
694 break; 694 break;
695 695
696 ql_dbg(ql_dbg_async, vha, 0x5013, 696 ql_dbg(ql_dbg_async, vha, 0x5013,
697 "RSCN database changed -- %04x %04x %04x.\n", 697 "RSCN database changed -- %04x %04x %04x.\n",
698 mb[1], mb[2], mb[3]); 698 mb[1], mb[2], mb[3]);
699 699
700 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2]; 700 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
701 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8) 701 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
702 | vha->d_id.b.al_pa; 702 | vha->d_id.b.al_pa;
703 if (rscn_entry == host_pid) { 703 if (rscn_entry == host_pid) {
704 ql_dbg(ql_dbg_async, vha, 0x5014, 704 ql_dbg(ql_dbg_async, vha, 0x5014,
705 "Ignoring RSCN update to local host " 705 "Ignoring RSCN update to local host "
706 "port ID (%06x).\n", host_pid); 706 "port ID (%06x).\n", host_pid);
707 break; 707 break;
708 } 708 }
709 709
710 /* Ignore reserved bits from RSCN-payload. */ 710 /* Ignore reserved bits from RSCN-payload. */
711 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2]; 711 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
712 rscn_queue_index = vha->rscn_in_ptr + 1; 712 rscn_queue_index = vha->rscn_in_ptr + 1;
713 if (rscn_queue_index == MAX_RSCN_COUNT) 713 if (rscn_queue_index == MAX_RSCN_COUNT)
714 rscn_queue_index = 0; 714 rscn_queue_index = 0;
715 if (rscn_queue_index != vha->rscn_out_ptr) { 715 if (rscn_queue_index != vha->rscn_out_ptr) {
716 vha->rscn_queue[vha->rscn_in_ptr] = rscn_entry; 716 vha->rscn_queue[vha->rscn_in_ptr] = rscn_entry;
717 vha->rscn_in_ptr = rscn_queue_index; 717 vha->rscn_in_ptr = rscn_queue_index;
718 } else { 718 } else {
719 vha->flags.rscn_queue_overflow = 1; 719 vha->flags.rscn_queue_overflow = 1;
720 } 720 }
721 721
722 atomic_set(&vha->loop_state, LOOP_UPDATE);
723 atomic_set(&vha->loop_down_timer, 0); 722 atomic_set(&vha->loop_down_timer, 0);
724 vha->flags.management_server_logged_in = 0; 723 vha->flags.management_server_logged_in = 0;
725 724
726 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 725 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
727 set_bit(RSCN_UPDATE, &vha->dpc_flags); 726 set_bit(RSCN_UPDATE, &vha->dpc_flags);
728 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry); 727 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
729 break; 728 break;
730 729
731 /* case MBA_RIO_RESPONSE: */ 730 /* case MBA_RIO_RESPONSE: */
732 case MBA_ZIO_RESPONSE: 731 case MBA_ZIO_RESPONSE:
733 ql_dbg(ql_dbg_async, vha, 0x5015, 732 ql_dbg(ql_dbg_async, vha, 0x5015,
734 "[R|Z]IO update completion.\n"); 733 "[R|Z]IO update completion.\n");
735 734
736 if (IS_FWI2_CAPABLE(ha)) 735 if (IS_FWI2_CAPABLE(ha))
737 qla24xx_process_response_queue(vha, rsp); 736 qla24xx_process_response_queue(vha, rsp);
738 else 737 else
739 qla2x00_process_response_queue(rsp); 738 qla2x00_process_response_queue(rsp);
740 break; 739 break;
741 740
742 case MBA_DISCARD_RND_FRAME: 741 case MBA_DISCARD_RND_FRAME:
743 ql_dbg(ql_dbg_async, vha, 0x5016, 742 ql_dbg(ql_dbg_async, vha, 0x5016,
744 "Discard RND Frame -- %04x %04x %04x.\n", 743 "Discard RND Frame -- %04x %04x %04x.\n",
745 mb[1], mb[2], mb[3]); 744 mb[1], mb[2], mb[3]);
746 break; 745 break;
747 746
748 case MBA_TRACE_NOTIFICATION: 747 case MBA_TRACE_NOTIFICATION:
749 ql_dbg(ql_dbg_async, vha, 0x5017, 748 ql_dbg(ql_dbg_async, vha, 0x5017,
750 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]); 749 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]);
751 break; 750 break;
752 751
753 case MBA_ISP84XX_ALERT: 752 case MBA_ISP84XX_ALERT:
754 ql_dbg(ql_dbg_async, vha, 0x5018, 753 ql_dbg(ql_dbg_async, vha, 0x5018,
755 "ISP84XX Alert Notification -- %04x %04x %04x.\n", 754 "ISP84XX Alert Notification -- %04x %04x %04x.\n",
756 mb[1], mb[2], mb[3]); 755 mb[1], mb[2], mb[3]);
757 756
758 spin_lock_irqsave(&ha->cs84xx->access_lock, flags); 757 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
759 switch (mb[1]) { 758 switch (mb[1]) {
760 case A84_PANIC_RECOVERY: 759 case A84_PANIC_RECOVERY:
761 ql_log(ql_log_info, vha, 0x5019, 760 ql_log(ql_log_info, vha, 0x5019,
762 "Alert 84XX: panic recovery %04x %04x.\n", 761 "Alert 84XX: panic recovery %04x %04x.\n",
763 mb[2], mb[3]); 762 mb[2], mb[3]);
764 break; 763 break;
765 case A84_OP_LOGIN_COMPLETE: 764 case A84_OP_LOGIN_COMPLETE:
766 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2]; 765 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
767 ql_log(ql_log_info, vha, 0x501a, 766 ql_log(ql_log_info, vha, 0x501a,
768 "Alert 84XX: firmware version %x.\n", 767 "Alert 84XX: firmware version %x.\n",
769 ha->cs84xx->op_fw_version); 768 ha->cs84xx->op_fw_version);
770 break; 769 break;
771 case A84_DIAG_LOGIN_COMPLETE: 770 case A84_DIAG_LOGIN_COMPLETE:
772 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; 771 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
773 ql_log(ql_log_info, vha, 0x501b, 772 ql_log(ql_log_info, vha, 0x501b,
774 "Alert 84XX: diagnostic firmware version %x.\n", 773 "Alert 84XX: diagnostic firmware version %x.\n",
775 ha->cs84xx->diag_fw_version); 774 ha->cs84xx->diag_fw_version);
776 break; 775 break;
777 case A84_GOLD_LOGIN_COMPLETE: 776 case A84_GOLD_LOGIN_COMPLETE:
778 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; 777 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
779 ha->cs84xx->fw_update = 1; 778 ha->cs84xx->fw_update = 1;
780 ql_log(ql_log_info, vha, 0x501c, 779 ql_log(ql_log_info, vha, 0x501c,
781 "Alert 84XX: gold firmware version %x.\n", 780 "Alert 84XX: gold firmware version %x.\n",
782 ha->cs84xx->gold_fw_version); 781 ha->cs84xx->gold_fw_version);
783 break; 782 break;
784 default: 783 default:
785 ql_log(ql_log_warn, vha, 0x501d, 784 ql_log(ql_log_warn, vha, 0x501d,
786 "Alert 84xx: Invalid Alert %04x %04x %04x.\n", 785 "Alert 84xx: Invalid Alert %04x %04x %04x.\n",
787 mb[1], mb[2], mb[3]); 786 mb[1], mb[2], mb[3]);
788 } 787 }
789 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags); 788 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
790 break; 789 break;
791 case MBA_DCBX_START: 790 case MBA_DCBX_START:
792 ql_dbg(ql_dbg_async, vha, 0x501e, 791 ql_dbg(ql_dbg_async, vha, 0x501e,
793 "DCBX Started -- %04x %04x %04x.\n", 792 "DCBX Started -- %04x %04x %04x.\n",
794 mb[1], mb[2], mb[3]); 793 mb[1], mb[2], mb[3]);
795 break; 794 break;
796 case MBA_DCBX_PARAM_UPDATE: 795 case MBA_DCBX_PARAM_UPDATE:
797 ql_dbg(ql_dbg_async, vha, 0x501f, 796 ql_dbg(ql_dbg_async, vha, 0x501f,
798 "DCBX Parameters Updated -- %04x %04x %04x.\n", 797 "DCBX Parameters Updated -- %04x %04x %04x.\n",
799 mb[1], mb[2], mb[3]); 798 mb[1], mb[2], mb[3]);
800 break; 799 break;
801 case MBA_FCF_CONF_ERR: 800 case MBA_FCF_CONF_ERR:
802 ql_dbg(ql_dbg_async, vha, 0x5020, 801 ql_dbg(ql_dbg_async, vha, 0x5020,
803 "FCF Configuration Error -- %04x %04x %04x.\n", 802 "FCF Configuration Error -- %04x %04x %04x.\n",
804 mb[1], mb[2], mb[3]); 803 mb[1], mb[2], mb[3]);
805 break; 804 break;
806 case MBA_IDC_COMPLETE: 805 case MBA_IDC_COMPLETE:
807 case MBA_IDC_NOTIFY: 806 case MBA_IDC_NOTIFY:
808 case MBA_IDC_TIME_EXT: 807 case MBA_IDC_TIME_EXT:
809 qla81xx_idc_event(vha, mb[0], mb[1]); 808 qla81xx_idc_event(vha, mb[0], mb[1]);
810 break; 809 break;
811 } 810 }
812 811
813 if (!vha->vp_idx && ha->num_vhosts) 812 if (!vha->vp_idx && ha->num_vhosts)
814 qla2x00_alert_all_vps(rsp, mb); 813 qla2x00_alert_all_vps(rsp, mb);
815 } 814 }
816 815
817 /** 816 /**
818 * qla2x00_process_completed_request() - Process a Fast Post response. 817 * qla2x00_process_completed_request() - Process a Fast Post response.
819 * @ha: SCSI driver HA context 818 * @ha: SCSI driver HA context
820 * @index: SRB index 819 * @index: SRB index
821 */ 820 */
822 static void 821 static void
823 qla2x00_process_completed_request(struct scsi_qla_host *vha, 822 qla2x00_process_completed_request(struct scsi_qla_host *vha,
824 struct req_que *req, uint32_t index) 823 struct req_que *req, uint32_t index)
825 { 824 {
826 srb_t *sp; 825 srb_t *sp;
827 struct qla_hw_data *ha = vha->hw; 826 struct qla_hw_data *ha = vha->hw;
828 827
829 /* Validate handle. */ 828 /* Validate handle. */
830 if (index >= MAX_OUTSTANDING_COMMANDS) { 829 if (index >= MAX_OUTSTANDING_COMMANDS) {
831 ql_log(ql_log_warn, vha, 0x3014, 830 ql_log(ql_log_warn, vha, 0x3014,
832 "Invalid SCSI command index (%x).\n", index); 831 "Invalid SCSI command index (%x).\n", index);
833 832
834 if (IS_QLA82XX(ha)) 833 if (IS_QLA82XX(ha))
835 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 834 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
836 else 835 else
837 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 836 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
838 return; 837 return;
839 } 838 }
840 839
841 sp = req->outstanding_cmds[index]; 840 sp = req->outstanding_cmds[index];
842 if (sp) { 841 if (sp) {
843 /* Free outstanding command slot. */ 842 /* Free outstanding command slot. */
844 req->outstanding_cmds[index] = NULL; 843 req->outstanding_cmds[index] = NULL;
845 844
846 /* Save ISP completion status */ 845 /* Save ISP completion status */
847 sp->cmd->result = DID_OK << 16; 846 sp->cmd->result = DID_OK << 16;
848 qla2x00_sp_compl(ha, sp); 847 qla2x00_sp_compl(ha, sp);
849 } else { 848 } else {
850 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n"); 849 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
851 850
852 if (IS_QLA82XX(ha)) 851 if (IS_QLA82XX(ha))
853 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 852 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
854 else 853 else
855 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 854 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
856 } 855 }
857 } 856 }
858 857
859 static srb_t * 858 static srb_t *
860 qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func, 859 qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
861 struct req_que *req, void *iocb) 860 struct req_que *req, void *iocb)
862 { 861 {
863 struct qla_hw_data *ha = vha->hw; 862 struct qla_hw_data *ha = vha->hw;
864 sts_entry_t *pkt = iocb; 863 sts_entry_t *pkt = iocb;
865 srb_t *sp = NULL; 864 srb_t *sp = NULL;
866 uint16_t index; 865 uint16_t index;
867 866
868 index = LSW(pkt->handle); 867 index = LSW(pkt->handle);
869 if (index >= MAX_OUTSTANDING_COMMANDS) { 868 if (index >= MAX_OUTSTANDING_COMMANDS) {
870 ql_log(ql_log_warn, vha, 0x5031, 869 ql_log(ql_log_warn, vha, 0x5031,
871 "Invalid command index (%x).\n", index); 870 "Invalid command index (%x).\n", index);
872 if (IS_QLA82XX(ha)) 871 if (IS_QLA82XX(ha))
873 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 872 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
874 else 873 else
875 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 874 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
876 goto done; 875 goto done;
877 } 876 }
878 sp = req->outstanding_cmds[index]; 877 sp = req->outstanding_cmds[index];
879 if (!sp) { 878 if (!sp) {
880 ql_log(ql_log_warn, vha, 0x5032, 879 ql_log(ql_log_warn, vha, 0x5032,
881 "Invalid completion handle (%x) -- timed-out.\n", index); 880 "Invalid completion handle (%x) -- timed-out.\n", index);
882 return sp; 881 return sp;
883 } 882 }
884 if (sp->handle != index) { 883 if (sp->handle != index) {
885 ql_log(ql_log_warn, vha, 0x5033, 884 ql_log(ql_log_warn, vha, 0x5033,
886 "SRB handle (%x) mismatch %x.\n", sp->handle, index); 885 "SRB handle (%x) mismatch %x.\n", sp->handle, index);
887 return NULL; 886 return NULL;
888 } 887 }
889 888
890 req->outstanding_cmds[index] = NULL; 889 req->outstanding_cmds[index] = NULL;
891 890
892 done: 891 done:
893 return sp; 892 return sp;
894 } 893 }
895 894
896 static void 895 static void
897 qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 896 qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
898 struct mbx_entry *mbx) 897 struct mbx_entry *mbx)
899 { 898 {
900 const char func[] = "MBX-IOCB"; 899 const char func[] = "MBX-IOCB";
901 const char *type; 900 const char *type;
902 fc_port_t *fcport; 901 fc_port_t *fcport;
903 srb_t *sp; 902 srb_t *sp;
904 struct srb_iocb *lio; 903 struct srb_iocb *lio;
905 struct srb_ctx *ctx; 904 struct srb_ctx *ctx;
906 uint16_t *data; 905 uint16_t *data;
907 uint16_t status; 906 uint16_t status;
908 907
909 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx); 908 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
910 if (!sp) 909 if (!sp)
911 return; 910 return;
912 911
913 ctx = sp->ctx; 912 ctx = sp->ctx;
914 lio = ctx->u.iocb_cmd; 913 lio = ctx->u.iocb_cmd;
915 type = ctx->name; 914 type = ctx->name;
916 fcport = sp->fcport; 915 fcport = sp->fcport;
917 data = lio->u.logio.data; 916 data = lio->u.logio.data;
918 917
919 data[0] = MBS_COMMAND_ERROR; 918 data[0] = MBS_COMMAND_ERROR;
920 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? 919 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
921 QLA_LOGIO_LOGIN_RETRIED : 0; 920 QLA_LOGIO_LOGIN_RETRIED : 0;
922 if (mbx->entry_status) { 921 if (mbx->entry_status) {
923 ql_dbg(ql_dbg_async, vha, 0x5043, 922 ql_dbg(ql_dbg_async, vha, 0x5043,
924 "Async-%s error entry - portid=%02x%02x%02x " 923 "Async-%s error entry - portid=%02x%02x%02x "
925 "entry-status=%x status=%x state-flag=%x " 924 "entry-status=%x status=%x state-flag=%x "
926 "status-flags=%x.\n", 925 "status-flags=%x.\n",
927 type, fcport->d_id.b.domain, fcport->d_id.b.area, 926 type, fcport->d_id.b.domain, fcport->d_id.b.area,
928 fcport->d_id.b.al_pa, mbx->entry_status, 927 fcport->d_id.b.al_pa, mbx->entry_status,
929 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags), 928 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
930 le16_to_cpu(mbx->status_flags)); 929 le16_to_cpu(mbx->status_flags));
931 930
932 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5057, 931 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5057,
933 (uint8_t *)mbx, sizeof(*mbx)); 932 (uint8_t *)mbx, sizeof(*mbx));
934 933
935 goto logio_done; 934 goto logio_done;
936 } 935 }
937 936
938 status = le16_to_cpu(mbx->status); 937 status = le16_to_cpu(mbx->status);
939 if (status == 0x30 && ctx->type == SRB_LOGIN_CMD && 938 if (status == 0x30 && ctx->type == SRB_LOGIN_CMD &&
940 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) 939 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
941 status = 0; 940 status = 0;
942 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) { 941 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
943 ql_dbg(ql_dbg_async, vha, 0x5045, 942 ql_dbg(ql_dbg_async, vha, 0x5045,
944 "Async-%s complete - portid=%02x%02x%02x mbx1=%x.\n", 943 "Async-%s complete - portid=%02x%02x%02x mbx1=%x.\n",
945 type, fcport->d_id.b.domain, fcport->d_id.b.area, 944 type, fcport->d_id.b.domain, fcport->d_id.b.area,
946 fcport->d_id.b.al_pa, le16_to_cpu(mbx->mb1)); 945 fcport->d_id.b.al_pa, le16_to_cpu(mbx->mb1));
947 946
948 data[0] = MBS_COMMAND_COMPLETE; 947 data[0] = MBS_COMMAND_COMPLETE;
949 if (ctx->type == SRB_LOGIN_CMD) { 948 if (ctx->type == SRB_LOGIN_CMD) {
950 fcport->port_type = FCT_TARGET; 949 fcport->port_type = FCT_TARGET;
951 if (le16_to_cpu(mbx->mb1) & BIT_0) 950 if (le16_to_cpu(mbx->mb1) & BIT_0)
952 fcport->port_type = FCT_INITIATOR; 951 fcport->port_type = FCT_INITIATOR;
953 else if (le16_to_cpu(mbx->mb1) & BIT_1) 952 else if (le16_to_cpu(mbx->mb1) & BIT_1)
954 fcport->flags |= FCF_FCP2_DEVICE; 953 fcport->flags |= FCF_FCP2_DEVICE;
955 } 954 }
956 goto logio_done; 955 goto logio_done;
957 } 956 }
958 957
959 data[0] = le16_to_cpu(mbx->mb0); 958 data[0] = le16_to_cpu(mbx->mb0);
960 switch (data[0]) { 959 switch (data[0]) {
961 case MBS_PORT_ID_USED: 960 case MBS_PORT_ID_USED:
962 data[1] = le16_to_cpu(mbx->mb1); 961 data[1] = le16_to_cpu(mbx->mb1);
963 break; 962 break;
964 case MBS_LOOP_ID_USED: 963 case MBS_LOOP_ID_USED:
965 break; 964 break;
966 default: 965 default:
967 data[0] = MBS_COMMAND_ERROR; 966 data[0] = MBS_COMMAND_ERROR;
968 break; 967 break;
969 } 968 }
970 969
971 ql_log(ql_log_warn, vha, 0x5046, 970 ql_log(ql_log_warn, vha, 0x5046,
972 "Async-%s failed - portid=%02x%02x%02x status=%x " 971 "Async-%s failed - portid=%02x%02x%02x status=%x "
973 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", 972 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n",
974 type, fcport->d_id.b.domain, 973 type, fcport->d_id.b.domain,
975 fcport->d_id.b.area, fcport->d_id.b.al_pa, status, 974 fcport->d_id.b.area, fcport->d_id.b.al_pa, status,
976 le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1), 975 le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
977 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6), 976 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
978 le16_to_cpu(mbx->mb7)); 977 le16_to_cpu(mbx->mb7));
979 978
980 logio_done: 979 logio_done:
981 lio->done(sp); 980 lio->done(sp);
982 } 981 }
983 982
984 static void 983 static void
985 qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req, 984 qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
986 sts_entry_t *pkt, int iocb_type) 985 sts_entry_t *pkt, int iocb_type)
987 { 986 {
988 const char func[] = "CT_IOCB"; 987 const char func[] = "CT_IOCB";
989 const char *type; 988 const char *type;
990 struct qla_hw_data *ha = vha->hw; 989 struct qla_hw_data *ha = vha->hw;
991 srb_t *sp; 990 srb_t *sp;
992 struct srb_ctx *sp_bsg; 991 struct srb_ctx *sp_bsg;
993 struct fc_bsg_job *bsg_job; 992 struct fc_bsg_job *bsg_job;
994 uint16_t comp_status; 993 uint16_t comp_status;
995 994
996 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 995 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
997 if (!sp) 996 if (!sp)
998 return; 997 return;
999 998
1000 sp_bsg = sp->ctx; 999 sp_bsg = sp->ctx;
1001 bsg_job = sp_bsg->u.bsg_job; 1000 bsg_job = sp_bsg->u.bsg_job;
1002 1001
1003 type = NULL; 1002 type = NULL;
1004 switch (sp_bsg->type) { 1003 switch (sp_bsg->type) {
1005 case SRB_CT_CMD: 1004 case SRB_CT_CMD:
1006 type = "ct pass-through"; 1005 type = "ct pass-through";
1007 break; 1006 break;
1008 default: 1007 default:
1009 ql_log(ql_log_warn, vha, 0x5047, 1008 ql_log(ql_log_warn, vha, 0x5047,
1010 "Unrecognized SRB: (%p) type=%d.\n", sp, sp_bsg->type); 1009 "Unrecognized SRB: (%p) type=%d.\n", sp, sp_bsg->type);
1011 return; 1010 return;
1012 } 1011 }
1013 1012
1014 comp_status = le16_to_cpu(pkt->comp_status); 1013 comp_status = le16_to_cpu(pkt->comp_status);
1015 1014
1016 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT 1015 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1017 * fc payload to the caller 1016 * fc payload to the caller
1018 */ 1017 */
1019 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; 1018 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1020 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1019 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1021 1020
1022 if (comp_status != CS_COMPLETE) { 1021 if (comp_status != CS_COMPLETE) {
1023 if (comp_status == CS_DATA_UNDERRUN) { 1022 if (comp_status == CS_DATA_UNDERRUN) {
1024 bsg_job->reply->result = DID_OK << 16; 1023 bsg_job->reply->result = DID_OK << 16;
1025 bsg_job->reply->reply_payload_rcv_len = 1024 bsg_job->reply->reply_payload_rcv_len =
1026 le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len); 1025 le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);
1027 1026
1028 ql_log(ql_log_warn, vha, 0x5048, 1027 ql_log(ql_log_warn, vha, 0x5048,
1029 "CT pass-through-%s error " 1028 "CT pass-through-%s error "
1030 "comp_status-status=0x%x total_byte = 0x%x.\n", 1029 "comp_status-status=0x%x total_byte = 0x%x.\n",
1031 type, comp_status, 1030 type, comp_status,
1032 bsg_job->reply->reply_payload_rcv_len); 1031 bsg_job->reply->reply_payload_rcv_len);
1033 } else { 1032 } else {
1034 ql_log(ql_log_warn, vha, 0x5049, 1033 ql_log(ql_log_warn, vha, 0x5049,
1035 "CT pass-through-%s error " 1034 "CT pass-through-%s error "
1036 "comp_status-status=0x%x.\n", type, comp_status); 1035 "comp_status-status=0x%x.\n", type, comp_status);
1037 bsg_job->reply->result = DID_ERROR << 16; 1036 bsg_job->reply->result = DID_ERROR << 16;
1038 bsg_job->reply->reply_payload_rcv_len = 0; 1037 bsg_job->reply->reply_payload_rcv_len = 0;
1039 } 1038 }
1040 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5058, 1039 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5058,
1041 (uint8_t *)pkt, sizeof(*pkt)); 1040 (uint8_t *)pkt, sizeof(*pkt));
1042 } else { 1041 } else {
1043 bsg_job->reply->result = DID_OK << 16; 1042 bsg_job->reply->result = DID_OK << 16;
1044 bsg_job->reply->reply_payload_rcv_len = 1043 bsg_job->reply->reply_payload_rcv_len =
1045 bsg_job->reply_payload.payload_len; 1044 bsg_job->reply_payload.payload_len;
1046 bsg_job->reply_len = 0; 1045 bsg_job->reply_len = 0;
1047 } 1046 }
1048 1047
1049 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 1048 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1050 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1049 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1051 1050
1052 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 1051 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1053 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 1052 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1054 1053
1055 if (sp_bsg->type == SRB_ELS_CMD_HST || sp_bsg->type == SRB_CT_CMD) 1054 if (sp_bsg->type == SRB_ELS_CMD_HST || sp_bsg->type == SRB_CT_CMD)
1056 kfree(sp->fcport); 1055 kfree(sp->fcport);
1057 1056
1058 kfree(sp->ctx); 1057 kfree(sp->ctx);
1059 mempool_free(sp, ha->srb_mempool); 1058 mempool_free(sp, ha->srb_mempool);
1060 bsg_job->job_done(bsg_job); 1059 bsg_job->job_done(bsg_job);
1061 } 1060 }
1062 1061
1063 static void 1062 static void
1064 qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, 1063 qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1065 struct sts_entry_24xx *pkt, int iocb_type) 1064 struct sts_entry_24xx *pkt, int iocb_type)
1066 { 1065 {
1067 const char func[] = "ELS_CT_IOCB"; 1066 const char func[] = "ELS_CT_IOCB";
1068 const char *type; 1067 const char *type;
1069 struct qla_hw_data *ha = vha->hw; 1068 struct qla_hw_data *ha = vha->hw;
1070 srb_t *sp; 1069 srb_t *sp;
1071 struct srb_ctx *sp_bsg; 1070 struct srb_ctx *sp_bsg;
1072 struct fc_bsg_job *bsg_job; 1071 struct fc_bsg_job *bsg_job;
1073 uint16_t comp_status; 1072 uint16_t comp_status;
1074 uint32_t fw_status[3]; 1073 uint32_t fw_status[3];
1075 uint8_t* fw_sts_ptr; 1074 uint8_t* fw_sts_ptr;
1076 1075
1077 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1076 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1078 if (!sp) 1077 if (!sp)
1079 return; 1078 return;
1080 sp_bsg = sp->ctx; 1079 sp_bsg = sp->ctx;
1081 bsg_job = sp_bsg->u.bsg_job; 1080 bsg_job = sp_bsg->u.bsg_job;
1082 1081
1083 type = NULL; 1082 type = NULL;
1084 switch (sp_bsg->type) { 1083 switch (sp_bsg->type) {
1085 case SRB_ELS_CMD_RPT: 1084 case SRB_ELS_CMD_RPT:
1086 case SRB_ELS_CMD_HST: 1085 case SRB_ELS_CMD_HST:
1087 type = "els"; 1086 type = "els";
1088 break; 1087 break;
1089 case SRB_CT_CMD: 1088 case SRB_CT_CMD:
1090 type = "ct pass-through"; 1089 type = "ct pass-through";
1091 break; 1090 break;
1092 default: 1091 default:
1093 ql_log(ql_log_warn, vha, 0x503e, 1092 ql_log(ql_log_warn, vha, 0x503e,
1094 "Unrecognized SRB: (%p) type=%d.\n", sp, sp_bsg->type); 1093 "Unrecognized SRB: (%p) type=%d.\n", sp, sp_bsg->type);
1095 return; 1094 return;
1096 } 1095 }
1097 1096
1098 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status); 1097 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
1099 fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1); 1098 fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1);
1100 fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2); 1099 fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2);
1101 1100
1102 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT 1101 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1103 * fc payload to the caller 1102 * fc payload to the caller
1104 */ 1103 */
1105 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; 1104 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1106 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status); 1105 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
1107 1106
1108 if (comp_status != CS_COMPLETE) { 1107 if (comp_status != CS_COMPLETE) {
1109 if (comp_status == CS_DATA_UNDERRUN) { 1108 if (comp_status == CS_DATA_UNDERRUN) {
1110 bsg_job->reply->result = DID_OK << 16; 1109 bsg_job->reply->result = DID_OK << 16;
1111 bsg_job->reply->reply_payload_rcv_len = 1110 bsg_job->reply->reply_payload_rcv_len =
1112 le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count); 1111 le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count);
1113 1112
1114 ql_log(ql_log_info, vha, 0x503f, 1113 ql_log(ql_log_info, vha, 0x503f,
1115 "ELS-CT pass-through-%s error comp_status-status=0x%x " 1114 "ELS-CT pass-through-%s error comp_status-status=0x%x "
1116 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n", 1115 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
1117 type, comp_status, fw_status[1], fw_status[2], 1116 type, comp_status, fw_status[1], fw_status[2],
1118 le16_to_cpu(((struct els_sts_entry_24xx *) 1117 le16_to_cpu(((struct els_sts_entry_24xx *)
1119 pkt)->total_byte_count)); 1118 pkt)->total_byte_count));
1120 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply); 1119 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1121 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status)); 1120 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1122 } 1121 }
1123 else { 1122 else {
1124 ql_log(ql_log_info, vha, 0x5040, 1123 ql_log(ql_log_info, vha, 0x5040,
1125 "ELS-CT pass-through-%s error comp_status-status=0x%x " 1124 "ELS-CT pass-through-%s error comp_status-status=0x%x "
1126 "error subcode 1=0x%x error subcode 2=0x%x.\n", 1125 "error subcode 1=0x%x error subcode 2=0x%x.\n",
1127 type, comp_status, 1126 type, comp_status,
1128 le16_to_cpu(((struct els_sts_entry_24xx *) 1127 le16_to_cpu(((struct els_sts_entry_24xx *)
1129 pkt)->error_subcode_1), 1128 pkt)->error_subcode_1),
1130 le16_to_cpu(((struct els_sts_entry_24xx *) 1129 le16_to_cpu(((struct els_sts_entry_24xx *)
1131 pkt)->error_subcode_2)); 1130 pkt)->error_subcode_2));
1132 bsg_job->reply->result = DID_ERROR << 16; 1131 bsg_job->reply->result = DID_ERROR << 16;
1133 bsg_job->reply->reply_payload_rcv_len = 0; 1132 bsg_job->reply->reply_payload_rcv_len = 0;
1134 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply); 1133 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1135 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status)); 1134 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1136 } 1135 }
1137 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5056, 1136 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5056,
1138 (uint8_t *)pkt, sizeof(*pkt)); 1137 (uint8_t *)pkt, sizeof(*pkt));
1139 } 1138 }
1140 else { 1139 else {
1141 bsg_job->reply->result = DID_OK << 16; 1140 bsg_job->reply->result = DID_OK << 16;
1142 bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; 1141 bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
1143 bsg_job->reply_len = 0; 1142 bsg_job->reply_len = 0;
1144 } 1143 }
1145 1144
1146 dma_unmap_sg(&ha->pdev->dev, 1145 dma_unmap_sg(&ha->pdev->dev,
1147 bsg_job->request_payload.sg_list, 1146 bsg_job->request_payload.sg_list,
1148 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1147 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1149 dma_unmap_sg(&ha->pdev->dev, 1148 dma_unmap_sg(&ha->pdev->dev,
1150 bsg_job->reply_payload.sg_list, 1149 bsg_job->reply_payload.sg_list,
1151 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 1150 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1152 if ((sp_bsg->type == SRB_ELS_CMD_HST) || 1151 if ((sp_bsg->type == SRB_ELS_CMD_HST) ||
1153 (sp_bsg->type == SRB_CT_CMD)) 1152 (sp_bsg->type == SRB_CT_CMD))
1154 kfree(sp->fcport); 1153 kfree(sp->fcport);
1155 kfree(sp->ctx); 1154 kfree(sp->ctx);
1156 mempool_free(sp, ha->srb_mempool); 1155 mempool_free(sp, ha->srb_mempool);
1157 bsg_job->job_done(bsg_job); 1156 bsg_job->job_done(bsg_job);
1158 } 1157 }
1159 1158
1160 static void 1159 static void
1161 qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, 1160 qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1162 struct logio_entry_24xx *logio) 1161 struct logio_entry_24xx *logio)
1163 { 1162 {
1164 const char func[] = "LOGIO-IOCB"; 1163 const char func[] = "LOGIO-IOCB";
1165 const char *type; 1164 const char *type;
1166 fc_port_t *fcport; 1165 fc_port_t *fcport;
1167 srb_t *sp; 1166 srb_t *sp;
1168 struct srb_iocb *lio; 1167 struct srb_iocb *lio;
1169 struct srb_ctx *ctx; 1168 struct srb_ctx *ctx;
1170 uint16_t *data; 1169 uint16_t *data;
1171 uint32_t iop[2]; 1170 uint32_t iop[2];
1172 1171
1173 sp = qla2x00_get_sp_from_handle(vha, func, req, logio); 1172 sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
1174 if (!sp) 1173 if (!sp)
1175 return; 1174 return;
1176 1175
1177 ctx = sp->ctx; 1176 ctx = sp->ctx;
1178 lio = ctx->u.iocb_cmd; 1177 lio = ctx->u.iocb_cmd;
1179 type = ctx->name; 1178 type = ctx->name;
1180 fcport = sp->fcport; 1179 fcport = sp->fcport;
1181 data = lio->u.logio.data; 1180 data = lio->u.logio.data;
1182 1181
1183 data[0] = MBS_COMMAND_ERROR; 1182 data[0] = MBS_COMMAND_ERROR;
1184 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? 1183 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1185 QLA_LOGIO_LOGIN_RETRIED : 0; 1184 QLA_LOGIO_LOGIN_RETRIED : 0;
1186 if (logio->entry_status) { 1185 if (logio->entry_status) {
1187 ql_log(ql_log_warn, vha, 0x5034, 1186 ql_log(ql_log_warn, vha, 0x5034,
1188 "Async-%s error entry - " 1187 "Async-%s error entry - "
1189 "portid=%02x%02x%02x entry-status=%x.\n", 1188 "portid=%02x%02x%02x entry-status=%x.\n",
1190 type, fcport->d_id.b.domain, fcport->d_id.b.area, 1189 type, fcport->d_id.b.domain, fcport->d_id.b.area,
1191 fcport->d_id.b.al_pa, logio->entry_status); 1190 fcport->d_id.b.al_pa, logio->entry_status);
1192 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5059, 1191 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5059,
1193 (uint8_t *)logio, sizeof(*logio)); 1192 (uint8_t *)logio, sizeof(*logio));
1194 1193
1195 goto logio_done; 1194 goto logio_done;
1196 } 1195 }
1197 1196
1198 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) { 1197 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
1199 ql_dbg(ql_dbg_async, vha, 0x5036, 1198 ql_dbg(ql_dbg_async, vha, 0x5036,
1200 "Async-%s complete - portid=%02x%02x%02x " 1199 "Async-%s complete - portid=%02x%02x%02x "
1201 "iop0=%x.\n", 1200 "iop0=%x.\n",
1202 type, fcport->d_id.b.domain, fcport->d_id.b.area, 1201 type, fcport->d_id.b.domain, fcport->d_id.b.area,
1203 fcport->d_id.b.al_pa, 1202 fcport->d_id.b.al_pa,
1204 le32_to_cpu(logio->io_parameter[0])); 1203 le32_to_cpu(logio->io_parameter[0]));
1205 1204
1206 data[0] = MBS_COMMAND_COMPLETE; 1205 data[0] = MBS_COMMAND_COMPLETE;
1207 if (ctx->type != SRB_LOGIN_CMD) 1206 if (ctx->type != SRB_LOGIN_CMD)
1208 goto logio_done; 1207 goto logio_done;
1209 1208
1210 iop[0] = le32_to_cpu(logio->io_parameter[0]); 1209 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1211 if (iop[0] & BIT_4) { 1210 if (iop[0] & BIT_4) {
1212 fcport->port_type = FCT_TARGET; 1211 fcport->port_type = FCT_TARGET;
1213 if (iop[0] & BIT_8) 1212 if (iop[0] & BIT_8)
1214 fcport->flags |= FCF_FCP2_DEVICE; 1213 fcport->flags |= FCF_FCP2_DEVICE;
1215 } else if (iop[0] & BIT_5) 1214 } else if (iop[0] & BIT_5)
1216 fcport->port_type = FCT_INITIATOR; 1215 fcport->port_type = FCT_INITIATOR;
1217 1216
1218 if (logio->io_parameter[7] || logio->io_parameter[8]) 1217 if (logio->io_parameter[7] || logio->io_parameter[8])
1219 fcport->supported_classes |= FC_COS_CLASS2; 1218 fcport->supported_classes |= FC_COS_CLASS2;
1220 if (logio->io_parameter[9] || logio->io_parameter[10]) 1219 if (logio->io_parameter[9] || logio->io_parameter[10])
1221 fcport->supported_classes |= FC_COS_CLASS3; 1220 fcport->supported_classes |= FC_COS_CLASS3;
1222 1221
1223 goto logio_done; 1222 goto logio_done;
1224 } 1223 }
1225 1224
1226 iop[0] = le32_to_cpu(logio->io_parameter[0]); 1225 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1227 iop[1] = le32_to_cpu(logio->io_parameter[1]); 1226 iop[1] = le32_to_cpu(logio->io_parameter[1]);
1228 switch (iop[0]) { 1227 switch (iop[0]) {
1229 case LSC_SCODE_PORTID_USED: 1228 case LSC_SCODE_PORTID_USED:
1230 data[0] = MBS_PORT_ID_USED; 1229 data[0] = MBS_PORT_ID_USED;
1231 data[1] = LSW(iop[1]); 1230 data[1] = LSW(iop[1]);
1232 break; 1231 break;
1233 case LSC_SCODE_NPORT_USED: 1232 case LSC_SCODE_NPORT_USED:
1234 data[0] = MBS_LOOP_ID_USED; 1233 data[0] = MBS_LOOP_ID_USED;
1235 break; 1234 break;
1236 default: 1235 default:
1237 data[0] = MBS_COMMAND_ERROR; 1236 data[0] = MBS_COMMAND_ERROR;
1238 break; 1237 break;
1239 } 1238 }
1240 1239
1241 ql_dbg(ql_dbg_async, vha, 0x5037, 1240 ql_dbg(ql_dbg_async, vha, 0x5037,
1242 "Async-%s failed - portid=%02x%02x%02x comp=%x " 1241 "Async-%s failed - portid=%02x%02x%02x comp=%x "
1243 "iop0=%x iop1=%x.\n", 1242 "iop0=%x iop1=%x.\n",
1244 type, fcport->d_id.b.domain, 1243 type, fcport->d_id.b.domain,
1245 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1244 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1246 le16_to_cpu(logio->comp_status), 1245 le16_to_cpu(logio->comp_status),
1247 le32_to_cpu(logio->io_parameter[0]), 1246 le32_to_cpu(logio->io_parameter[0]),
1248 le32_to_cpu(logio->io_parameter[1])); 1247 le32_to_cpu(logio->io_parameter[1]));
1249 1248
1250 logio_done: 1249 logio_done:
1251 lio->done(sp); 1250 lio->done(sp);
1252 } 1251 }
1253 1252
1254 static void 1253 static void
1255 qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 1254 qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1256 struct tsk_mgmt_entry *tsk) 1255 struct tsk_mgmt_entry *tsk)
1257 { 1256 {
1258 const char func[] = "TMF-IOCB"; 1257 const char func[] = "TMF-IOCB";
1259 const char *type; 1258 const char *type;
1260 fc_port_t *fcport; 1259 fc_port_t *fcport;
1261 srb_t *sp; 1260 srb_t *sp;
1262 struct srb_iocb *iocb; 1261 struct srb_iocb *iocb;
1263 struct srb_ctx *ctx; 1262 struct srb_ctx *ctx;
1264 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk; 1263 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
1265 int error = 1; 1264 int error = 1;
1266 1265
1267 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk); 1266 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
1268 if (!sp) 1267 if (!sp)
1269 return; 1268 return;
1270 1269
1271 ctx = sp->ctx; 1270 ctx = sp->ctx;
1272 iocb = ctx->u.iocb_cmd; 1271 iocb = ctx->u.iocb_cmd;
1273 type = ctx->name; 1272 type = ctx->name;
1274 fcport = sp->fcport; 1273 fcport = sp->fcport;
1275 1274
1276 if (sts->entry_status) { 1275 if (sts->entry_status) {
1277 ql_log(ql_log_warn, vha, 0x5038, 1276 ql_log(ql_log_warn, vha, 0x5038,
1278 "Async-%s error - entry-status(%x).\n", 1277 "Async-%s error - entry-status(%x).\n",
1279 type, sts->entry_status); 1278 type, sts->entry_status);
1280 } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { 1279 } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
1281 ql_log(ql_log_warn, vha, 0x5039, 1280 ql_log(ql_log_warn, vha, 0x5039,
1282 "Async-%s error - completion status(%x).\n", 1281 "Async-%s error - completion status(%x).\n",
1283 type, sts->comp_status); 1282 type, sts->comp_status);
1284 } else if (!(le16_to_cpu(sts->scsi_status) & 1283 } else if (!(le16_to_cpu(sts->scsi_status) &
1285 SS_RESPONSE_INFO_LEN_VALID)) { 1284 SS_RESPONSE_INFO_LEN_VALID)) {
1286 ql_log(ql_log_warn, vha, 0x503a, 1285 ql_log(ql_log_warn, vha, 0x503a,
1287 "Async-%s error - no response info(%x).\n", 1286 "Async-%s error - no response info(%x).\n",
1288 type, sts->scsi_status); 1287 type, sts->scsi_status);
1289 } else if (le32_to_cpu(sts->rsp_data_len) < 4) { 1288 } else if (le32_to_cpu(sts->rsp_data_len) < 4) {
1290 ql_log(ql_log_warn, vha, 0x503b, 1289 ql_log(ql_log_warn, vha, 0x503b,
1291 "Async-%s error - not enough response(%d).\n", 1290 "Async-%s error - not enough response(%d).\n",
1292 type, sts->rsp_data_len); 1291 type, sts->rsp_data_len);
1293 } else if (sts->data[3]) { 1292 } else if (sts->data[3]) {
1294 ql_log(ql_log_warn, vha, 0x503c, 1293 ql_log(ql_log_warn, vha, 0x503c,
1295 "Async-%s error - response(%x).\n", 1294 "Async-%s error - response(%x).\n",
1296 type, sts->data[3]); 1295 type, sts->data[3]);
1297 } else { 1296 } else {
1298 error = 0; 1297 error = 0;
1299 } 1298 }
1300 1299
1301 if (error) { 1300 if (error) {
1302 iocb->u.tmf.data = error; 1301 iocb->u.tmf.data = error;
1303 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055, 1302 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055,
1304 (uint8_t *)sts, sizeof(*sts)); 1303 (uint8_t *)sts, sizeof(*sts));
1305 } 1304 }
1306 1305
1307 iocb->done(sp); 1306 iocb->done(sp);
1308 } 1307 }
1309 1308
1310 /** 1309 /**
1311 * qla2x00_process_response_queue() - Process response queue entries. 1310 * qla2x00_process_response_queue() - Process response queue entries.
1312 * @ha: SCSI driver HA context 1311 * @ha: SCSI driver HA context
1313 */ 1312 */
1314 void 1313 void
1315 qla2x00_process_response_queue(struct rsp_que *rsp) 1314 qla2x00_process_response_queue(struct rsp_que *rsp)
1316 { 1315 {
1317 struct scsi_qla_host *vha; 1316 struct scsi_qla_host *vha;
1318 struct qla_hw_data *ha = rsp->hw; 1317 struct qla_hw_data *ha = rsp->hw;
1319 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1318 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1320 sts_entry_t *pkt; 1319 sts_entry_t *pkt;
1321 uint16_t handle_cnt; 1320 uint16_t handle_cnt;
1322 uint16_t cnt; 1321 uint16_t cnt;
1323 1322
1324 vha = pci_get_drvdata(ha->pdev); 1323 vha = pci_get_drvdata(ha->pdev);
1325 1324
1326 if (!vha->flags.online) 1325 if (!vha->flags.online)
1327 return; 1326 return;
1328 1327
1329 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { 1328 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
1330 pkt = (sts_entry_t *)rsp->ring_ptr; 1329 pkt = (sts_entry_t *)rsp->ring_ptr;
1331 1330
1332 rsp->ring_index++; 1331 rsp->ring_index++;
1333 if (rsp->ring_index == rsp->length) { 1332 if (rsp->ring_index == rsp->length) {
1334 rsp->ring_index = 0; 1333 rsp->ring_index = 0;
1335 rsp->ring_ptr = rsp->ring; 1334 rsp->ring_ptr = rsp->ring;
1336 } else { 1335 } else {
1337 rsp->ring_ptr++; 1336 rsp->ring_ptr++;
1338 } 1337 }
1339 1338
1340 if (pkt->entry_status != 0) { 1339 if (pkt->entry_status != 0) {
1341 ql_log(ql_log_warn, vha, 0x5035, 1340 ql_log(ql_log_warn, vha, 0x5035,
1342 "Process error entry.\n"); 1341 "Process error entry.\n");
1343 1342
1344 qla2x00_error_entry(vha, rsp, pkt); 1343 qla2x00_error_entry(vha, rsp, pkt);
1345 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 1344 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1346 wmb(); 1345 wmb();
1347 continue; 1346 continue;
1348 } 1347 }
1349 1348
1350 switch (pkt->entry_type) { 1349 switch (pkt->entry_type) {
1351 case STATUS_TYPE: 1350 case STATUS_TYPE:
1352 qla2x00_status_entry(vha, rsp, pkt); 1351 qla2x00_status_entry(vha, rsp, pkt);
1353 break; 1352 break;
1354 case STATUS_TYPE_21: 1353 case STATUS_TYPE_21:
1355 handle_cnt = ((sts21_entry_t *)pkt)->handle_count; 1354 handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
1356 for (cnt = 0; cnt < handle_cnt; cnt++) { 1355 for (cnt = 0; cnt < handle_cnt; cnt++) {
1357 qla2x00_process_completed_request(vha, rsp->req, 1356 qla2x00_process_completed_request(vha, rsp->req,
1358 ((sts21_entry_t *)pkt)->handle[cnt]); 1357 ((sts21_entry_t *)pkt)->handle[cnt]);
1359 } 1358 }
1360 break; 1359 break;
1361 case STATUS_TYPE_22: 1360 case STATUS_TYPE_22:
1362 handle_cnt = ((sts22_entry_t *)pkt)->handle_count; 1361 handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
1363 for (cnt = 0; cnt < handle_cnt; cnt++) { 1362 for (cnt = 0; cnt < handle_cnt; cnt++) {
1364 qla2x00_process_completed_request(vha, rsp->req, 1363 qla2x00_process_completed_request(vha, rsp->req,
1365 ((sts22_entry_t *)pkt)->handle[cnt]); 1364 ((sts22_entry_t *)pkt)->handle[cnt]);
1366 } 1365 }
1367 break; 1366 break;
1368 case STATUS_CONT_TYPE: 1367 case STATUS_CONT_TYPE:
1369 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); 1368 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
1370 break; 1369 break;
1371 case MBX_IOCB_TYPE: 1370 case MBX_IOCB_TYPE:
1372 qla2x00_mbx_iocb_entry(vha, rsp->req, 1371 qla2x00_mbx_iocb_entry(vha, rsp->req,
1373 (struct mbx_entry *)pkt); 1372 (struct mbx_entry *)pkt);
1374 break; 1373 break;
1375 case CT_IOCB_TYPE: 1374 case CT_IOCB_TYPE:
1376 qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); 1375 qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
1377 break; 1376 break;
1378 default: 1377 default:
1379 /* Type Not Supported. */ 1378 /* Type Not Supported. */
1380 ql_log(ql_log_warn, vha, 0x504a, 1379 ql_log(ql_log_warn, vha, 0x504a,
1381 "Received unknown response pkt type %x " 1380 "Received unknown response pkt type %x "
1382 "entry status=%x.\n", 1381 "entry status=%x.\n",
1383 pkt->entry_type, pkt->entry_status); 1382 pkt->entry_type, pkt->entry_status);
1384 break; 1383 break;
1385 } 1384 }
1386 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 1385 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1387 wmb(); 1386 wmb();
1388 } 1387 }
1389 1388
1390 /* Adjust ring index */ 1389 /* Adjust ring index */
1391 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index); 1390 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
1392 } 1391 }
1393 1392
1394 static inline void 1393 static inline void
1395 1394
1396 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, 1395 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
1397 uint32_t sense_len, struct rsp_que *rsp) 1396 uint32_t sense_len, struct rsp_que *rsp)
1398 { 1397 {
1399 struct scsi_qla_host *vha = sp->fcport->vha; 1398 struct scsi_qla_host *vha = sp->fcport->vha;
1400 struct scsi_cmnd *cp = sp->cmd; 1399 struct scsi_cmnd *cp = sp->cmd;
1401 1400
1402 if (sense_len >= SCSI_SENSE_BUFFERSIZE) 1401 if (sense_len >= SCSI_SENSE_BUFFERSIZE)
1403 sense_len = SCSI_SENSE_BUFFERSIZE; 1402 sense_len = SCSI_SENSE_BUFFERSIZE;
1404 1403
1405 sp->request_sense_length = sense_len; 1404 sp->request_sense_length = sense_len;
1406 sp->request_sense_ptr = cp->sense_buffer; 1405 sp->request_sense_ptr = cp->sense_buffer;
1407 if (sp->request_sense_length > par_sense_len) 1406 if (sp->request_sense_length > par_sense_len)
1408 sense_len = par_sense_len; 1407 sense_len = par_sense_len;
1409 1408
1410 memcpy(cp->sense_buffer, sense_data, sense_len); 1409 memcpy(cp->sense_buffer, sense_data, sense_len);
1411 1410
1412 sp->request_sense_ptr += sense_len; 1411 sp->request_sense_ptr += sense_len;
1413 sp->request_sense_length -= sense_len; 1412 sp->request_sense_length -= sense_len;
1414 if (sp->request_sense_length != 0) 1413 if (sp->request_sense_length != 0)
1415 rsp->status_srb = sp; 1414 rsp->status_srb = sp;
1416 1415
1417 ql_dbg(ql_dbg_io, vha, 0x301c, 1416 ql_dbg(ql_dbg_io, vha, 0x301c,
1418 "Check condition Sense data, scsi(%ld:%d:%d:%d) cmd=%p.\n", 1417 "Check condition Sense data, scsi(%ld:%d:%d:%d) cmd=%p.\n",
1419 sp->fcport->vha->host_no, cp->device->channel, cp->device->id, 1418 sp->fcport->vha->host_no, cp->device->channel, cp->device->id,
1420 cp->device->lun, cp); 1419 cp->device->lun, cp);
1421 if (sense_len) 1420 if (sense_len)
1422 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b, 1421 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
1423 cp->sense_buffer, sense_len); 1422 cp->sense_buffer, sense_len);
1424 } 1423 }
1425 1424
1426 struct scsi_dif_tuple { 1425 struct scsi_dif_tuple {
1427 __be16 guard; /* Checksum */ 1426 __be16 guard; /* Checksum */
1428 __be16 app_tag; /* APPL identifer */ 1427 __be16 app_tag; /* APPL identifer */
1429 __be32 ref_tag; /* Target LBA or indirect LBA */ 1428 __be32 ref_tag; /* Target LBA or indirect LBA */
1430 }; 1429 };
1431 1430
1432 /* 1431 /*
1433 * Checks the guard or meta-data for the type of error 1432 * Checks the guard or meta-data for the type of error
1434 * detected by the HBA. In case of errors, we set the 1433 * detected by the HBA. In case of errors, we set the
1435 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST 1434 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST
1436 * to indicate to the kernel that the HBA detected error. 1435 * to indicate to the kernel that the HBA detected error.
1437 */ 1436 */
1438 static inline int 1437 static inline int
1439 qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) 1438 qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
1440 { 1439 {
1441 struct scsi_qla_host *vha = sp->fcport->vha; 1440 struct scsi_qla_host *vha = sp->fcport->vha;
1442 struct scsi_cmnd *cmd = sp->cmd; 1441 struct scsi_cmnd *cmd = sp->cmd;
1443 uint8_t *ap = &sts24->data[12]; 1442 uint8_t *ap = &sts24->data[12];
1444 uint8_t *ep = &sts24->data[20]; 1443 uint8_t *ep = &sts24->data[20];
1445 uint32_t e_ref_tag, a_ref_tag; 1444 uint32_t e_ref_tag, a_ref_tag;
1446 uint16_t e_app_tag, a_app_tag; 1445 uint16_t e_app_tag, a_app_tag;
1447 uint16_t e_guard, a_guard; 1446 uint16_t e_guard, a_guard;
1448 1447
1449 /* 1448 /*
1450 * swab32 of the "data" field in the beginning of qla2x00_status_entry() 1449 * swab32 of the "data" field in the beginning of qla2x00_status_entry()
1451 * would make guard field appear at offset 2 1450 * would make guard field appear at offset 2
1452 */ 1451 */
1453 a_guard = le16_to_cpu(*(uint16_t *)(ap + 2)); 1452 a_guard = le16_to_cpu(*(uint16_t *)(ap + 2));
1454 a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0)); 1453 a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0));
1455 a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4)); 1454 a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4));
1456 e_guard = le16_to_cpu(*(uint16_t *)(ep + 2)); 1455 e_guard = le16_to_cpu(*(uint16_t *)(ep + 2));
1457 e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0)); 1456 e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0));
1458 e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4)); 1457 e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4));
1459 1458
1460 ql_dbg(ql_dbg_io, vha, 0x3023, 1459 ql_dbg(ql_dbg_io, vha, 0x3023,
1461 "iocb(s) %p Returned STATUS.\n", sts24); 1460 "iocb(s) %p Returned STATUS.\n", sts24);
1462 1461
1463 ql_dbg(ql_dbg_io, vha, 0x3024, 1462 ql_dbg(ql_dbg_io, vha, 0x3024,
1464 "DIF ERROR in cmd 0x%x lba 0x%llx act ref" 1463 "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
1465 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app" 1464 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
1466 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n", 1465 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n",
1467 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag, 1466 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
1468 a_app_tag, e_app_tag, a_guard, e_guard); 1467 a_app_tag, e_app_tag, a_guard, e_guard);
1469 1468
1470 /* 1469 /*
1471 * Ignore sector if: 1470 * Ignore sector if:
1472 * For type 3: ref & app tag is all 'f's 1471 * For type 3: ref & app tag is all 'f's
1473 * For type 0,1,2: app tag is all 'f's 1472 * For type 0,1,2: app tag is all 'f's
1474 */ 1473 */
1475 if ((a_app_tag == 0xffff) && 1474 if ((a_app_tag == 0xffff) &&
1476 ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) || 1475 ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) ||
1477 (a_ref_tag == 0xffffffff))) { 1476 (a_ref_tag == 0xffffffff))) {
1478 uint32_t blocks_done, resid; 1477 uint32_t blocks_done, resid;
1479 sector_t lba_s = scsi_get_lba(cmd); 1478 sector_t lba_s = scsi_get_lba(cmd);
1480 1479
1481 /* 2TB boundary case covered automatically with this */ 1480 /* 2TB boundary case covered automatically with this */
1482 blocks_done = e_ref_tag - (uint32_t)lba_s + 1; 1481 blocks_done = e_ref_tag - (uint32_t)lba_s + 1;
1483 1482
1484 resid = scsi_bufflen(cmd) - (blocks_done * 1483 resid = scsi_bufflen(cmd) - (blocks_done *
1485 cmd->device->sector_size); 1484 cmd->device->sector_size);
1486 1485
1487 scsi_set_resid(cmd, resid); 1486 scsi_set_resid(cmd, resid);
1488 cmd->result = DID_OK << 16; 1487 cmd->result = DID_OK << 16;
1489 1488
1490 /* Update protection tag */ 1489 /* Update protection tag */
1491 if (scsi_prot_sg_count(cmd)) { 1490 if (scsi_prot_sg_count(cmd)) {
1492 uint32_t i, j = 0, k = 0, num_ent; 1491 uint32_t i, j = 0, k = 0, num_ent;
1493 struct scatterlist *sg; 1492 struct scatterlist *sg;
1494 struct sd_dif_tuple *spt; 1493 struct sd_dif_tuple *spt;
1495 1494
1496 /* Patch the corresponding protection tags */ 1495 /* Patch the corresponding protection tags */
1497 scsi_for_each_prot_sg(cmd, sg, 1496 scsi_for_each_prot_sg(cmd, sg,
1498 scsi_prot_sg_count(cmd), i) { 1497 scsi_prot_sg_count(cmd), i) {
1499 num_ent = sg_dma_len(sg) / 8; 1498 num_ent = sg_dma_len(sg) / 8;
1500 if (k + num_ent < blocks_done) { 1499 if (k + num_ent < blocks_done) {
1501 k += num_ent; 1500 k += num_ent;
1502 continue; 1501 continue;
1503 } 1502 }
1504 j = blocks_done - k - 1; 1503 j = blocks_done - k - 1;
1505 k = blocks_done; 1504 k = blocks_done;
1506 break; 1505 break;
1507 } 1506 }
1508 1507
1509 if (k != blocks_done) { 1508 if (k != blocks_done) {
1510 qla_printk(KERN_WARNING, sp->fcport->vha->hw, 1509 qla_printk(KERN_WARNING, sp->fcport->vha->hw,
1511 "unexpected tag values tag:lba=%x:%lx)\n", 1510 "unexpected tag values tag:lba=%x:%lx)\n",
1512 e_ref_tag, lba_s); 1511 e_ref_tag, lba_s);
1513 return 1; 1512 return 1;
1514 } 1513 }
1515 1514
1516 spt = page_address(sg_page(sg)) + sg->offset; 1515 spt = page_address(sg_page(sg)) + sg->offset;
1517 spt += j; 1516 spt += j;
1518 1517
1519 spt->app_tag = 0xffff; 1518 spt->app_tag = 0xffff;
1520 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3) 1519 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3)
1521 spt->ref_tag = 0xffffffff; 1520 spt->ref_tag = 0xffffffff;
1522 } 1521 }
1523 1522
1524 return 0; 1523 return 0;
1525 } 1524 }
1526 1525
1527 /* check guard */ 1526 /* check guard */
1528 if (e_guard != a_guard) { 1527 if (e_guard != a_guard) {
1529 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 1528 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1530 0x10, 0x1); 1529 0x10, 0x1);
1531 set_driver_byte(cmd, DRIVER_SENSE); 1530 set_driver_byte(cmd, DRIVER_SENSE);
1532 set_host_byte(cmd, DID_ABORT); 1531 set_host_byte(cmd, DID_ABORT);
1533 cmd->result |= SAM_STAT_CHECK_CONDITION << 1; 1532 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1534 return 1; 1533 return 1;
1535 } 1534 }
1536 1535
1537 /* check ref tag */ 1536 /* check ref tag */
1538 if (e_ref_tag != a_ref_tag) { 1537 if (e_ref_tag != a_ref_tag) {
1539 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 1538 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1540 0x10, 0x3); 1539 0x10, 0x3);
1541 set_driver_byte(cmd, DRIVER_SENSE); 1540 set_driver_byte(cmd, DRIVER_SENSE);
1542 set_host_byte(cmd, DID_ABORT); 1541 set_host_byte(cmd, DID_ABORT);
1543 cmd->result |= SAM_STAT_CHECK_CONDITION << 1; 1542 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1544 return 1; 1543 return 1;
1545 } 1544 }
1546 1545
1547 /* check appl tag */ 1546 /* check appl tag */
1548 if (e_app_tag != a_app_tag) { 1547 if (e_app_tag != a_app_tag) {
1549 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 1548 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1550 0x10, 0x2); 1549 0x10, 0x2);
1551 set_driver_byte(cmd, DRIVER_SENSE); 1550 set_driver_byte(cmd, DRIVER_SENSE);
1552 set_host_byte(cmd, DID_ABORT); 1551 set_host_byte(cmd, DID_ABORT);
1553 cmd->result |= SAM_STAT_CHECK_CONDITION << 1; 1552 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1554 return 1; 1553 return 1;
1555 } 1554 }
1556 1555
1557 return 1; 1556 return 1;
1558 } 1557 }
1559 1558
1560 /** 1559 /**
1561 * qla2x00_status_entry() - Process a Status IOCB entry. 1560 * qla2x00_status_entry() - Process a Status IOCB entry.
1562 * @ha: SCSI driver HA context 1561 * @ha: SCSI driver HA context
1563 * @pkt: Entry pointer 1562 * @pkt: Entry pointer
1564 */ 1563 */
1565 static void 1564 static void
1566 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) 1565 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1567 { 1566 {
1568 srb_t *sp; 1567 srb_t *sp;
1569 fc_port_t *fcport; 1568 fc_port_t *fcport;
1570 struct scsi_cmnd *cp; 1569 struct scsi_cmnd *cp;
1571 sts_entry_t *sts; 1570 sts_entry_t *sts;
1572 struct sts_entry_24xx *sts24; 1571 struct sts_entry_24xx *sts24;
1573 uint16_t comp_status; 1572 uint16_t comp_status;
1574 uint16_t scsi_status; 1573 uint16_t scsi_status;
1575 uint16_t ox_id; 1574 uint16_t ox_id;
1576 uint8_t lscsi_status; 1575 uint8_t lscsi_status;
1577 int32_t resid; 1576 int32_t resid;
1578 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len, 1577 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
1579 fw_resid_len; 1578 fw_resid_len;
1580 uint8_t *rsp_info, *sense_data; 1579 uint8_t *rsp_info, *sense_data;
1581 struct qla_hw_data *ha = vha->hw; 1580 struct qla_hw_data *ha = vha->hw;
1582 uint32_t handle; 1581 uint32_t handle;
1583 uint16_t que; 1582 uint16_t que;
1584 struct req_que *req; 1583 struct req_que *req;
1585 int logit = 1; 1584 int logit = 1;
1586 1585
1587 sts = (sts_entry_t *) pkt; 1586 sts = (sts_entry_t *) pkt;
1588 sts24 = (struct sts_entry_24xx *) pkt; 1587 sts24 = (struct sts_entry_24xx *) pkt;
1589 if (IS_FWI2_CAPABLE(ha)) { 1588 if (IS_FWI2_CAPABLE(ha)) {
1590 comp_status = le16_to_cpu(sts24->comp_status); 1589 comp_status = le16_to_cpu(sts24->comp_status);
1591 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK; 1590 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
1592 } else { 1591 } else {
1593 comp_status = le16_to_cpu(sts->comp_status); 1592 comp_status = le16_to_cpu(sts->comp_status);
1594 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; 1593 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
1595 } 1594 }
1596 handle = (uint32_t) LSW(sts->handle); 1595 handle = (uint32_t) LSW(sts->handle);
1597 que = MSW(sts->handle); 1596 que = MSW(sts->handle);
1598 req = ha->req_q_map[que]; 1597 req = ha->req_q_map[que];
1599 1598
1600 /* Fast path completion. */ 1599 /* Fast path completion. */
1601 if (comp_status == CS_COMPLETE && scsi_status == 0) { 1600 if (comp_status == CS_COMPLETE && scsi_status == 0) {
1602 qla2x00_process_completed_request(vha, req, handle); 1601 qla2x00_process_completed_request(vha, req, handle);
1603 1602
1604 return; 1603 return;
1605 } 1604 }
1606 1605
1607 /* Validate handle. */ 1606 /* Validate handle. */
1608 if (handle < MAX_OUTSTANDING_COMMANDS) { 1607 if (handle < MAX_OUTSTANDING_COMMANDS) {
1609 sp = req->outstanding_cmds[handle]; 1608 sp = req->outstanding_cmds[handle];
1610 req->outstanding_cmds[handle] = NULL; 1609 req->outstanding_cmds[handle] = NULL;
1611 } else 1610 } else
1612 sp = NULL; 1611 sp = NULL;
1613 1612
1614 if (sp == NULL) { 1613 if (sp == NULL) {
1615 ql_log(ql_log_warn, vha, 0x3017, 1614 ql_log(ql_log_warn, vha, 0x3017,
1616 "Invalid status handle (0x%x).\n", sts->handle); 1615 "Invalid status handle (0x%x).\n", sts->handle);
1617 1616
1618 if (IS_QLA82XX(ha)) 1617 if (IS_QLA82XX(ha))
1619 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1618 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1620 else 1619 else
1621 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1620 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1622 qla2xxx_wake_dpc(vha); 1621 qla2xxx_wake_dpc(vha);
1623 return; 1622 return;
1624 } 1623 }
1625 cp = sp->cmd; 1624 cp = sp->cmd;
1626 if (cp == NULL) { 1625 if (cp == NULL) {
1627 ql_log(ql_log_warn, vha, 0x3018, 1626 ql_log(ql_log_warn, vha, 0x3018,
1628 "Command already returned (0x%x/%p).\n", 1627 "Command already returned (0x%x/%p).\n",
1629 sts->handle, sp); 1628 sts->handle, sp);
1630 1629
1631 return; 1630 return;
1632 } 1631 }
1633 1632
1634 lscsi_status = scsi_status & STATUS_MASK; 1633 lscsi_status = scsi_status & STATUS_MASK;
1635 1634
1636 fcport = sp->fcport; 1635 fcport = sp->fcport;
1637 1636
1638 ox_id = 0; 1637 ox_id = 0;
1639 sense_len = par_sense_len = rsp_info_len = resid_len = 1638 sense_len = par_sense_len = rsp_info_len = resid_len =
1640 fw_resid_len = 0; 1639 fw_resid_len = 0;
1641 if (IS_FWI2_CAPABLE(ha)) { 1640 if (IS_FWI2_CAPABLE(ha)) {
1642 if (scsi_status & SS_SENSE_LEN_VALID) 1641 if (scsi_status & SS_SENSE_LEN_VALID)
1643 sense_len = le32_to_cpu(sts24->sense_len); 1642 sense_len = le32_to_cpu(sts24->sense_len);
1644 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) 1643 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
1645 rsp_info_len = le32_to_cpu(sts24->rsp_data_len); 1644 rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
1646 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) 1645 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
1647 resid_len = le32_to_cpu(sts24->rsp_residual_count); 1646 resid_len = le32_to_cpu(sts24->rsp_residual_count);
1648 if (comp_status == CS_DATA_UNDERRUN) 1647 if (comp_status == CS_DATA_UNDERRUN)
1649 fw_resid_len = le32_to_cpu(sts24->residual_len); 1648 fw_resid_len = le32_to_cpu(sts24->residual_len);
1650 rsp_info = sts24->data; 1649 rsp_info = sts24->data;
1651 sense_data = sts24->data; 1650 sense_data = sts24->data;
1652 host_to_fcp_swap(sts24->data, sizeof(sts24->data)); 1651 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
1653 ox_id = le16_to_cpu(sts24->ox_id); 1652 ox_id = le16_to_cpu(sts24->ox_id);
1654 par_sense_len = sizeof(sts24->data); 1653 par_sense_len = sizeof(sts24->data);
1655 } else { 1654 } else {
1656 if (scsi_status & SS_SENSE_LEN_VALID) 1655 if (scsi_status & SS_SENSE_LEN_VALID)
1657 sense_len = le16_to_cpu(sts->req_sense_length); 1656 sense_len = le16_to_cpu(sts->req_sense_length);
1658 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) 1657 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
1659 rsp_info_len = le16_to_cpu(sts->rsp_info_len); 1658 rsp_info_len = le16_to_cpu(sts->rsp_info_len);
1660 resid_len = le32_to_cpu(sts->residual_length); 1659 resid_len = le32_to_cpu(sts->residual_length);
1661 rsp_info = sts->rsp_info; 1660 rsp_info = sts->rsp_info;
1662 sense_data = sts->req_sense_data; 1661 sense_data = sts->req_sense_data;
1663 par_sense_len = sizeof(sts->req_sense_data); 1662 par_sense_len = sizeof(sts->req_sense_data);
1664 } 1663 }
1665 1664
1666 /* Check for any FCP transport errors. */ 1665 /* Check for any FCP transport errors. */
1667 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) { 1666 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
1668 /* Sense data lies beyond any FCP RESPONSE data. */ 1667 /* Sense data lies beyond any FCP RESPONSE data. */
1669 if (IS_FWI2_CAPABLE(ha)) { 1668 if (IS_FWI2_CAPABLE(ha)) {
1670 sense_data += rsp_info_len; 1669 sense_data += rsp_info_len;
1671 par_sense_len -= rsp_info_len; 1670 par_sense_len -= rsp_info_len;
1672 } 1671 }
1673 if (rsp_info_len > 3 && rsp_info[3]) { 1672 if (rsp_info_len > 3 && rsp_info[3]) {
1674 ql_log(ql_log_warn, vha, 0x3019, 1673 ql_log(ql_log_warn, vha, 0x3019,
1675 "FCP I/O protocol failure (0x%x/0x%x).\n", 1674 "FCP I/O protocol failure (0x%x/0x%x).\n",
1676 rsp_info_len, rsp_info[3]); 1675 rsp_info_len, rsp_info[3]);
1677 1676
1678 cp->result = DID_BUS_BUSY << 16; 1677 cp->result = DID_BUS_BUSY << 16;
1679 goto out; 1678 goto out;
1680 } 1679 }
1681 } 1680 }
1682 1681
1683 /* Check for overrun. */ 1682 /* Check for overrun. */
1684 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE && 1683 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
1685 scsi_status & SS_RESIDUAL_OVER) 1684 scsi_status & SS_RESIDUAL_OVER)
1686 comp_status = CS_DATA_OVERRUN; 1685 comp_status = CS_DATA_OVERRUN;
1687 1686
1688 /* 1687 /*
1689 * Based on Host and scsi status generate status code for Linux 1688 * Based on Host and scsi status generate status code for Linux
1690 */ 1689 */
1691 switch (comp_status) { 1690 switch (comp_status) {
1692 case CS_COMPLETE: 1691 case CS_COMPLETE:
1693 case CS_QUEUE_FULL: 1692 case CS_QUEUE_FULL:
1694 if (scsi_status == 0) { 1693 if (scsi_status == 0) {
1695 cp->result = DID_OK << 16; 1694 cp->result = DID_OK << 16;
1696 break; 1695 break;
1697 } 1696 }
1698 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) { 1697 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
1699 resid = resid_len; 1698 resid = resid_len;
1700 scsi_set_resid(cp, resid); 1699 scsi_set_resid(cp, resid);
1701 1700
1702 if (!lscsi_status && 1701 if (!lscsi_status &&
1703 ((unsigned)(scsi_bufflen(cp) - resid) < 1702 ((unsigned)(scsi_bufflen(cp) - resid) <
1704 cp->underflow)) { 1703 cp->underflow)) {
1705 ql_log(ql_log_warn, vha, 0x301a, 1704 ql_log(ql_log_warn, vha, 0x301a,
1706 "Mid-layer underflow " 1705 "Mid-layer underflow "
1707 "detected (0x%x of 0x%x bytes).\n", 1706 "detected (0x%x of 0x%x bytes).\n",
1708 resid, scsi_bufflen(cp)); 1707 resid, scsi_bufflen(cp));
1709 1708
1710 cp->result = DID_ERROR << 16; 1709 cp->result = DID_ERROR << 16;
1711 break; 1710 break;
1712 } 1711 }
1713 } 1712 }
1714 cp->result = DID_OK << 16 | lscsi_status; 1713 cp->result = DID_OK << 16 | lscsi_status;
1715 1714
1716 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 1715 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1717 ql_log(ql_log_warn, vha, 0x301b, 1716 ql_log(ql_log_warn, vha, 0x301b,
1718 "QUEUE FULL detected.\n"); 1717 "QUEUE FULL detected.\n");
1719 break; 1718 break;
1720 } 1719 }
1721 logit = 0; 1720 logit = 0;
1722 if (lscsi_status != SS_CHECK_CONDITION) 1721 if (lscsi_status != SS_CHECK_CONDITION)
1723 break; 1722 break;
1724 1723
1725 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 1724 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1726 if (!(scsi_status & SS_SENSE_LEN_VALID)) 1725 if (!(scsi_status & SS_SENSE_LEN_VALID))
1727 break; 1726 break;
1728 1727
1729 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len, 1728 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len,
1730 rsp); 1729 rsp);
1731 break; 1730 break;
1732 1731
1733 case CS_DATA_UNDERRUN: 1732 case CS_DATA_UNDERRUN:
1734 /* Use F/W calculated residual length. */ 1733 /* Use F/W calculated residual length. */
1735 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len; 1734 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
1736 scsi_set_resid(cp, resid); 1735 scsi_set_resid(cp, resid);
1737 if (scsi_status & SS_RESIDUAL_UNDER) { 1736 if (scsi_status & SS_RESIDUAL_UNDER) {
1738 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) { 1737 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
1739 ql_log(ql_log_warn, vha, 0x301d, 1738 ql_log(ql_log_warn, vha, 0x301d,
1740 "Dropped frame(s) detected " 1739 "Dropped frame(s) detected "
1741 "(0x%x of 0x%x bytes).\n", 1740 "(0x%x of 0x%x bytes).\n",
1742 resid, scsi_bufflen(cp)); 1741 resid, scsi_bufflen(cp));
1743 1742
1744 cp->result = DID_ERROR << 16 | lscsi_status; 1743 cp->result = DID_ERROR << 16 | lscsi_status;
1745 break; 1744 break;
1746 } 1745 }
1747 1746
1748 if (!lscsi_status && 1747 if (!lscsi_status &&
1749 ((unsigned)(scsi_bufflen(cp) - resid) < 1748 ((unsigned)(scsi_bufflen(cp) - resid) <
1750 cp->underflow)) { 1749 cp->underflow)) {
1751 ql_log(ql_log_warn, vha, 0x301e, 1750 ql_log(ql_log_warn, vha, 0x301e,
1752 "Mid-layer underflow " 1751 "Mid-layer underflow "
1753 "detected (0x%x of 0x%x bytes).\n", 1752 "detected (0x%x of 0x%x bytes).\n",
1754 resid, scsi_bufflen(cp)); 1753 resid, scsi_bufflen(cp));
1755 1754
1756 cp->result = DID_ERROR << 16; 1755 cp->result = DID_ERROR << 16;
1757 break; 1756 break;
1758 } 1757 }
1759 } else { 1758 } else {
1760 ql_log(ql_log_warn, vha, 0x301f, 1759 ql_log(ql_log_warn, vha, 0x301f,
1761 "Dropped frame(s) detected (0x%x " 1760 "Dropped frame(s) detected (0x%x "
1762 "of 0x%x bytes).\n", resid, scsi_bufflen(cp)); 1761 "of 0x%x bytes).\n", resid, scsi_bufflen(cp));
1763 1762
1764 cp->result = DID_ERROR << 16 | lscsi_status; 1763 cp->result = DID_ERROR << 16 | lscsi_status;
1765 goto check_scsi_status; 1764 goto check_scsi_status;
1766 } 1765 }
1767 1766
1768 cp->result = DID_OK << 16 | lscsi_status; 1767 cp->result = DID_OK << 16 | lscsi_status;
1769 logit = 0; 1768 logit = 0;
1770 1769
1771 check_scsi_status: 1770 check_scsi_status:
1772 /* 1771 /*
1773 * Check to see if SCSI Status is non zero. If so report SCSI 1772 * Check to see if SCSI Status is non zero. If so report SCSI
1774 * Status. 1773 * Status.
1775 */ 1774 */
1776 if (lscsi_status != 0) { 1775 if (lscsi_status != 0) {
1777 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 1776 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1778 ql_log(ql_log_warn, vha, 0x3020, 1777 ql_log(ql_log_warn, vha, 0x3020,
1779 "QUEUE FULL detected.\n"); 1778 "QUEUE FULL detected.\n");
1780 logit = 1; 1779 logit = 1;
1781 break; 1780 break;
1782 } 1781 }
1783 if (lscsi_status != SS_CHECK_CONDITION) 1782 if (lscsi_status != SS_CHECK_CONDITION)
1784 break; 1783 break;
1785 1784
1786 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 1785 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1787 if (!(scsi_status & SS_SENSE_LEN_VALID)) 1786 if (!(scsi_status & SS_SENSE_LEN_VALID))
1788 break; 1787 break;
1789 1788
1790 qla2x00_handle_sense(sp, sense_data, par_sense_len, 1789 qla2x00_handle_sense(sp, sense_data, par_sense_len,
1791 sense_len, rsp); 1790 sense_len, rsp);
1792 } 1791 }
1793 break; 1792 break;
1794 1793
1795 case CS_PORT_LOGGED_OUT: 1794 case CS_PORT_LOGGED_OUT:
1796 case CS_PORT_CONFIG_CHG: 1795 case CS_PORT_CONFIG_CHG:
1797 case CS_PORT_BUSY: 1796 case CS_PORT_BUSY:
1798 case CS_INCOMPLETE: 1797 case CS_INCOMPLETE:
1799 case CS_PORT_UNAVAILABLE: 1798 case CS_PORT_UNAVAILABLE:
1800 case CS_TIMEOUT: 1799 case CS_TIMEOUT:
1801 case CS_RESET: 1800 case CS_RESET:
1802 1801
1803 /* 1802 /*
1804 * We are going to have the fc class block the rport 1803 * We are going to have the fc class block the rport
1805 * while we try to recover so instruct the mid layer 1804 * while we try to recover so instruct the mid layer
1806 * to requeue until the class decides how to handle this. 1805 * to requeue until the class decides how to handle this.
1807 */ 1806 */
1808 cp->result = DID_TRANSPORT_DISRUPTED << 16; 1807 cp->result = DID_TRANSPORT_DISRUPTED << 16;
1809 1808
1810 if (comp_status == CS_TIMEOUT) { 1809 if (comp_status == CS_TIMEOUT) {
1811 if (IS_FWI2_CAPABLE(ha)) 1810 if (IS_FWI2_CAPABLE(ha))
1812 break; 1811 break;
1813 else if ((le16_to_cpu(sts->status_flags) & 1812 else if ((le16_to_cpu(sts->status_flags) &
1814 SF_LOGOUT_SENT) == 0) 1813 SF_LOGOUT_SENT) == 0)
1815 break; 1814 break;
1816 } 1815 }
1817 1816
1818 ql_dbg(ql_dbg_io, vha, 0x3021, 1817 ql_dbg(ql_dbg_io, vha, 0x3021,
1819 "Port down status: port-state=0x%x.\n", 1818 "Port down status: port-state=0x%x.\n",
1820 atomic_read(&fcport->state)); 1819 atomic_read(&fcport->state));
1821 1820
1822 if (atomic_read(&fcport->state) == FCS_ONLINE) 1821 if (atomic_read(&fcport->state) == FCS_ONLINE)
1823 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); 1822 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
1824 break; 1823 break;
1825 1824
1826 case CS_ABORTED: 1825 case CS_ABORTED:
1827 cp->result = DID_RESET << 16; 1826 cp->result = DID_RESET << 16;
1828 break; 1827 break;
1829 1828
1830 case CS_DIF_ERROR: 1829 case CS_DIF_ERROR:
1831 logit = qla2x00_handle_dif_error(sp, sts24); 1830 logit = qla2x00_handle_dif_error(sp, sts24);
1832 break; 1831 break;
1833 default: 1832 default:
1834 cp->result = DID_ERROR << 16; 1833 cp->result = DID_ERROR << 16;
1835 break; 1834 break;
1836 } 1835 }
1837 1836
1838 out: 1837 out:
1839 if (logit) 1838 if (logit)
1840 ql_dbg(ql_dbg_io, vha, 0x3022, 1839 ql_dbg(ql_dbg_io, vha, 0x3022,
1841 "FCP command status: 0x%x-0x%x (0x%x) " 1840 "FCP command status: 0x%x-0x%x (0x%x) "
1842 "oxid=0x%x cdb=%02x%02x%02x len=0x%x " 1841 "oxid=0x%x cdb=%02x%02x%02x len=0x%x "
1843 "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n", 1842 "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n",
1844 comp_status, scsi_status, cp->result, ox_id, cp->cmnd[0], 1843 comp_status, scsi_status, cp->result, ox_id, cp->cmnd[0],
1845 cp->cmnd[1], cp->cmnd[2], scsi_bufflen(cp), rsp_info_len, 1844 cp->cmnd[1], cp->cmnd[2], scsi_bufflen(cp), rsp_info_len,
1846 resid_len, fw_resid_len); 1845 resid_len, fw_resid_len);
1847 1846
1848 if (rsp->status_srb == NULL) 1847 if (rsp->status_srb == NULL)
1849 qla2x00_sp_compl(ha, sp); 1848 qla2x00_sp_compl(ha, sp);
1850 } 1849 }
1851 1850
1852 /** 1851 /**
1853 * qla2x00_status_cont_entry() - Process a Status Continuations entry. 1852 * qla2x00_status_cont_entry() - Process a Status Continuations entry.
1854 * @ha: SCSI driver HA context 1853 * @ha: SCSI driver HA context
1855 * @pkt: Entry pointer 1854 * @pkt: Entry pointer
1856 * 1855 *
1857 * Extended sense data. 1856 * Extended sense data.
1858 */ 1857 */
1859 static void 1858 static void
1860 qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) 1859 qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
1861 { 1860 {
1862 uint8_t sense_sz = 0; 1861 uint8_t sense_sz = 0;
1863 struct qla_hw_data *ha = rsp->hw; 1862 struct qla_hw_data *ha = rsp->hw;
1864 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev); 1863 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
1865 srb_t *sp = rsp->status_srb; 1864 srb_t *sp = rsp->status_srb;
1866 struct scsi_cmnd *cp; 1865 struct scsi_cmnd *cp;
1867 1866
1868 if (sp != NULL && sp->request_sense_length != 0) { 1867 if (sp != NULL && sp->request_sense_length != 0) {
1869 cp = sp->cmd; 1868 cp = sp->cmd;
1870 if (cp == NULL) { 1869 if (cp == NULL) {
1871 ql_log(ql_log_warn, vha, 0x3025, 1870 ql_log(ql_log_warn, vha, 0x3025,
1872 "cmd is NULL: already returned to OS (sp=%p).\n", 1871 "cmd is NULL: already returned to OS (sp=%p).\n",
1873 sp); 1872 sp);
1874 1873
1875 rsp->status_srb = NULL; 1874 rsp->status_srb = NULL;
1876 return; 1875 return;
1877 } 1876 }
1878 1877
1879 if (sp->request_sense_length > sizeof(pkt->data)) { 1878 if (sp->request_sense_length > sizeof(pkt->data)) {
1880 sense_sz = sizeof(pkt->data); 1879 sense_sz = sizeof(pkt->data);
1881 } else { 1880 } else {
1882 sense_sz = sp->request_sense_length; 1881 sense_sz = sp->request_sense_length;
1883 } 1882 }
1884 1883
1885 /* Move sense data. */ 1884 /* Move sense data. */
1886 if (IS_FWI2_CAPABLE(ha)) 1885 if (IS_FWI2_CAPABLE(ha))
1887 host_to_fcp_swap(pkt->data, sizeof(pkt->data)); 1886 host_to_fcp_swap(pkt->data, sizeof(pkt->data));
1888 memcpy(sp->request_sense_ptr, pkt->data, sense_sz); 1887 memcpy(sp->request_sense_ptr, pkt->data, sense_sz);
1889 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c, 1888 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
1890 sp->request_sense_ptr, sense_sz); 1889 sp->request_sense_ptr, sense_sz);
1891 1890
1892 sp->request_sense_ptr += sense_sz; 1891 sp->request_sense_ptr += sense_sz;
1893 sp->request_sense_length -= sense_sz; 1892 sp->request_sense_length -= sense_sz;
1894 1893
1895 /* Place command on done queue. */ 1894 /* Place command on done queue. */
1896 if (sp->request_sense_length == 0) { 1895 if (sp->request_sense_length == 0) {
1897 rsp->status_srb = NULL; 1896 rsp->status_srb = NULL;
1898 qla2x00_sp_compl(ha, sp); 1897 qla2x00_sp_compl(ha, sp);
1899 } 1898 }
1900 } 1899 }
1901 } 1900 }
1902 1901
1903 /** 1902 /**
1904 * qla2x00_error_entry() - Process an error entry. 1903 * qla2x00_error_entry() - Process an error entry.
1905 * @ha: SCSI driver HA context 1904 * @ha: SCSI driver HA context
1906 * @pkt: Entry pointer 1905 * @pkt: Entry pointer
1907 */ 1906 */
1908 static void 1907 static void
1909 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) 1908 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
1910 { 1909 {
1911 srb_t *sp; 1910 srb_t *sp;
1912 struct qla_hw_data *ha = vha->hw; 1911 struct qla_hw_data *ha = vha->hw;
1913 uint32_t handle = LSW(pkt->handle); 1912 uint32_t handle = LSW(pkt->handle);
1914 uint16_t que = MSW(pkt->handle); 1913 uint16_t que = MSW(pkt->handle);
1915 struct req_que *req = ha->req_q_map[que]; 1914 struct req_que *req = ha->req_q_map[que];
1916 1915
1917 if (pkt->entry_status & RF_INV_E_ORDER) 1916 if (pkt->entry_status & RF_INV_E_ORDER)
1918 ql_dbg(ql_dbg_async, vha, 0x502a, 1917 ql_dbg(ql_dbg_async, vha, 0x502a,
1919 "Invalid Entry Order.\n"); 1918 "Invalid Entry Order.\n");
1920 else if (pkt->entry_status & RF_INV_E_COUNT) 1919 else if (pkt->entry_status & RF_INV_E_COUNT)
1921 ql_dbg(ql_dbg_async, vha, 0x502b, 1920 ql_dbg(ql_dbg_async, vha, 0x502b,
1922 "Invalid Entry Count.\n"); 1921 "Invalid Entry Count.\n");
1923 else if (pkt->entry_status & RF_INV_E_PARAM) 1922 else if (pkt->entry_status & RF_INV_E_PARAM)
1924 ql_dbg(ql_dbg_async, vha, 0x502c, 1923 ql_dbg(ql_dbg_async, vha, 0x502c,
1925 "Invalid Entry Parameter.\n"); 1924 "Invalid Entry Parameter.\n");
1926 else if (pkt->entry_status & RF_INV_E_TYPE) 1925 else if (pkt->entry_status & RF_INV_E_TYPE)
1927 ql_dbg(ql_dbg_async, vha, 0x502d, 1926 ql_dbg(ql_dbg_async, vha, 0x502d,
1928 "Invalid Entry Type.\n"); 1927 "Invalid Entry Type.\n");
1929 else if (pkt->entry_status & RF_BUSY) 1928 else if (pkt->entry_status & RF_BUSY)
1930 ql_dbg(ql_dbg_async, vha, 0x502e, 1929 ql_dbg(ql_dbg_async, vha, 0x502e,
1931 "Busy.\n"); 1930 "Busy.\n");
1932 else 1931 else
1933 ql_dbg(ql_dbg_async, vha, 0x502f, 1932 ql_dbg(ql_dbg_async, vha, 0x502f,
1934 "UNKNOWN flag error.\n"); 1933 "UNKNOWN flag error.\n");
1935 1934
1936 /* Validate handle. */ 1935 /* Validate handle. */
1937 if (handle < MAX_OUTSTANDING_COMMANDS) 1936 if (handle < MAX_OUTSTANDING_COMMANDS)
1938 sp = req->outstanding_cmds[handle]; 1937 sp = req->outstanding_cmds[handle];
1939 else 1938 else
1940 sp = NULL; 1939 sp = NULL;
1941 1940
1942 if (sp) { 1941 if (sp) {
1943 /* Free outstanding command slot. */ 1942 /* Free outstanding command slot. */
1944 req->outstanding_cmds[handle] = NULL; 1943 req->outstanding_cmds[handle] = NULL;
1945 1944
1946 /* Bad payload or header */ 1945 /* Bad payload or header */
1947 if (pkt->entry_status & 1946 if (pkt->entry_status &
1948 (RF_INV_E_ORDER | RF_INV_E_COUNT | 1947 (RF_INV_E_ORDER | RF_INV_E_COUNT |
1949 RF_INV_E_PARAM | RF_INV_E_TYPE)) { 1948 RF_INV_E_PARAM | RF_INV_E_TYPE)) {
1950 sp->cmd->result = DID_ERROR << 16; 1949 sp->cmd->result = DID_ERROR << 16;
1951 } else if (pkt->entry_status & RF_BUSY) { 1950 } else if (pkt->entry_status & RF_BUSY) {
1952 sp->cmd->result = DID_BUS_BUSY << 16; 1951 sp->cmd->result = DID_BUS_BUSY << 16;
1953 } else { 1952 } else {
1954 sp->cmd->result = DID_ERROR << 16; 1953 sp->cmd->result = DID_ERROR << 16;
1955 } 1954 }
1956 qla2x00_sp_compl(ha, sp); 1955 qla2x00_sp_compl(ha, sp);
1957 1956
1958 } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type == 1957 } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type ==
1959 COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7 1958 COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7
1960 || pkt->entry_type == COMMAND_TYPE_6) { 1959 || pkt->entry_type == COMMAND_TYPE_6) {
1961 ql_log(ql_log_warn, vha, 0x5030, 1960 ql_log(ql_log_warn, vha, 0x5030,
1962 "Error entry - invalid handle.\n"); 1961 "Error entry - invalid handle.\n");
1963 1962
1964 if (IS_QLA82XX(ha)) 1963 if (IS_QLA82XX(ha))
1965 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1964 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1966 else 1965 else
1967 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1966 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1968 qla2xxx_wake_dpc(vha); 1967 qla2xxx_wake_dpc(vha);
1969 } 1968 }
1970 } 1969 }
1971 1970
1972 /** 1971 /**
1973 * qla24xx_mbx_completion() - Process mailbox command completions. 1972 * qla24xx_mbx_completion() - Process mailbox command completions.
1974 * @ha: SCSI driver HA context 1973 * @ha: SCSI driver HA context
1975 * @mb0: Mailbox0 register 1974 * @mb0: Mailbox0 register
1976 */ 1975 */
1977 static void 1976 static void
1978 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) 1977 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
1979 { 1978 {
1980 uint16_t cnt; 1979 uint16_t cnt;
1981 uint16_t __iomem *wptr; 1980 uint16_t __iomem *wptr;
1982 struct qla_hw_data *ha = vha->hw; 1981 struct qla_hw_data *ha = vha->hw;
1983 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1982 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1984 1983
1985 /* Load return mailbox registers. */ 1984 /* Load return mailbox registers. */
1986 ha->flags.mbox_int = 1; 1985 ha->flags.mbox_int = 1;
1987 ha->mailbox_out[0] = mb0; 1986 ha->mailbox_out[0] = mb0;
1988 wptr = (uint16_t __iomem *)&reg->mailbox1; 1987 wptr = (uint16_t __iomem *)&reg->mailbox1;
1989 1988
1990 for (cnt = 1; cnt < ha->mbx_count; cnt++) { 1989 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
1991 ha->mailbox_out[cnt] = RD_REG_WORD(wptr); 1990 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
1992 wptr++; 1991 wptr++;
1993 } 1992 }
1994 1993
1995 if (ha->mcp) { 1994 if (ha->mcp) {
1996 ql_dbg(ql_dbg_async, vha, 0x504d, 1995 ql_dbg(ql_dbg_async, vha, 0x504d,
1997 "Got mailbox completion. cmd=%x.\n", ha->mcp->mb[0]); 1996 "Got mailbox completion. cmd=%x.\n", ha->mcp->mb[0]);
1998 } else { 1997 } else {
1999 ql_dbg(ql_dbg_async, vha, 0x504e, 1998 ql_dbg(ql_dbg_async, vha, 0x504e,
2000 "MBX pointer ERROR.\n"); 1999 "MBX pointer ERROR.\n");
2001 } 2000 }
2002 } 2001 }
2003 2002
2004 /** 2003 /**
2005 * qla24xx_process_response_queue() - Process response queue entries. 2004 * qla24xx_process_response_queue() - Process response queue entries.
2006 * @ha: SCSI driver HA context 2005 * @ha: SCSI driver HA context
2007 */ 2006 */
2008 void qla24xx_process_response_queue(struct scsi_qla_host *vha, 2007 void qla24xx_process_response_queue(struct scsi_qla_host *vha,
2009 struct rsp_que *rsp) 2008 struct rsp_que *rsp)
2010 { 2009 {
2011 struct sts_entry_24xx *pkt; 2010 struct sts_entry_24xx *pkt;
2012 struct qla_hw_data *ha = vha->hw; 2011 struct qla_hw_data *ha = vha->hw;
2013 2012
2014 if (!vha->flags.online) 2013 if (!vha->flags.online)
2015 return; 2014 return;
2016 2015
2017 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { 2016 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
2018 pkt = (struct sts_entry_24xx *)rsp->ring_ptr; 2017 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
2019 2018
2020 rsp->ring_index++; 2019 rsp->ring_index++;
2021 if (rsp->ring_index == rsp->length) { 2020 if (rsp->ring_index == rsp->length) {
2022 rsp->ring_index = 0; 2021 rsp->ring_index = 0;
2023 rsp->ring_ptr = rsp->ring; 2022 rsp->ring_ptr = rsp->ring;
2024 } else { 2023 } else {
2025 rsp->ring_ptr++; 2024 rsp->ring_ptr++;
2026 } 2025 }
2027 2026
2028 if (pkt->entry_status != 0) { 2027 if (pkt->entry_status != 0) {
2029 ql_dbg(ql_dbg_async, vha, 0x5029, 2028 ql_dbg(ql_dbg_async, vha, 0x5029,
2030 "Process error entry.\n"); 2029 "Process error entry.\n");
2031 2030
2032 qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt); 2031 qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
2033 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 2032 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2034 wmb(); 2033 wmb();
2035 continue; 2034 continue;
2036 } 2035 }
2037 2036
2038 switch (pkt->entry_type) { 2037 switch (pkt->entry_type) {
2039 case STATUS_TYPE: 2038 case STATUS_TYPE:
2040 qla2x00_status_entry(vha, rsp, pkt); 2039 qla2x00_status_entry(vha, rsp, pkt);
2041 break; 2040 break;
2042 case STATUS_CONT_TYPE: 2041 case STATUS_CONT_TYPE:
2043 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); 2042 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
2044 break; 2043 break;
2045 case VP_RPT_ID_IOCB_TYPE: 2044 case VP_RPT_ID_IOCB_TYPE:
2046 qla24xx_report_id_acquisition(vha, 2045 qla24xx_report_id_acquisition(vha,
2047 (struct vp_rpt_id_entry_24xx *)pkt); 2046 (struct vp_rpt_id_entry_24xx *)pkt);
2048 break; 2047 break;
2049 case LOGINOUT_PORT_IOCB_TYPE: 2048 case LOGINOUT_PORT_IOCB_TYPE:
2050 qla24xx_logio_entry(vha, rsp->req, 2049 qla24xx_logio_entry(vha, rsp->req,
2051 (struct logio_entry_24xx *)pkt); 2050 (struct logio_entry_24xx *)pkt);
2052 break; 2051 break;
2053 case TSK_MGMT_IOCB_TYPE: 2052 case TSK_MGMT_IOCB_TYPE:
2054 qla24xx_tm_iocb_entry(vha, rsp->req, 2053 qla24xx_tm_iocb_entry(vha, rsp->req,
2055 (struct tsk_mgmt_entry *)pkt); 2054 (struct tsk_mgmt_entry *)pkt);
2056 break; 2055 break;
2057 case CT_IOCB_TYPE: 2056 case CT_IOCB_TYPE:
2058 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); 2057 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
2059 clear_bit(MBX_INTERRUPT, &vha->hw->mbx_cmd_flags); 2058 clear_bit(MBX_INTERRUPT, &vha->hw->mbx_cmd_flags);
2060 break; 2059 break;
2061 case ELS_IOCB_TYPE: 2060 case ELS_IOCB_TYPE:
2062 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE); 2061 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
2063 break; 2062 break;
2064 default: 2063 default:
2065 /* Type Not Supported. */ 2064 /* Type Not Supported. */
2066 ql_dbg(ql_dbg_async, vha, 0x5042, 2065 ql_dbg(ql_dbg_async, vha, 0x5042,
2067 "Received unknown response pkt type %x " 2066 "Received unknown response pkt type %x "
2068 "entry status=%x.\n", 2067 "entry status=%x.\n",
2069 pkt->entry_type, pkt->entry_status); 2068 pkt->entry_type, pkt->entry_status);
2070 break; 2069 break;
2071 } 2070 }
2072 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 2071 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2073 wmb(); 2072 wmb();
2074 } 2073 }
2075 2074
2076 /* Adjust ring index */ 2075 /* Adjust ring index */
2077 if (IS_QLA82XX(ha)) { 2076 if (IS_QLA82XX(ha)) {
2078 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82; 2077 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
2079 WRT_REG_DWORD(&reg->rsp_q_out[0], rsp->ring_index); 2078 WRT_REG_DWORD(&reg->rsp_q_out[0], rsp->ring_index);
2080 } else 2079 } else
2081 WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index); 2080 WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
2082 } 2081 }
2083 2082
2084 static void 2083 static void
2085 qla2xxx_check_risc_status(scsi_qla_host_t *vha) 2084 qla2xxx_check_risc_status(scsi_qla_host_t *vha)
2086 { 2085 {
2087 int rval; 2086 int rval;
2088 uint32_t cnt; 2087 uint32_t cnt;
2089 struct qla_hw_data *ha = vha->hw; 2088 struct qla_hw_data *ha = vha->hw;
2090 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 2089 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2091 2090
2092 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)) 2091 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))
2093 return; 2092 return;
2094 2093
2095 rval = QLA_SUCCESS; 2094 rval = QLA_SUCCESS;
2096 WRT_REG_DWORD(&reg->iobase_addr, 0x7C00); 2095 WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
2097 RD_REG_DWORD(&reg->iobase_addr); 2096 RD_REG_DWORD(&reg->iobase_addr);
2098 WRT_REG_DWORD(&reg->iobase_window, 0x0001); 2097 WRT_REG_DWORD(&reg->iobase_window, 0x0001);
2099 for (cnt = 10000; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 && 2098 for (cnt = 10000; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
2100 rval == QLA_SUCCESS; cnt--) { 2099 rval == QLA_SUCCESS; cnt--) {
2101 if (cnt) { 2100 if (cnt) {
2102 WRT_REG_DWORD(&reg->iobase_window, 0x0001); 2101 WRT_REG_DWORD(&reg->iobase_window, 0x0001);
2103 udelay(10); 2102 udelay(10);
2104 } else 2103 } else
2105 rval = QLA_FUNCTION_TIMEOUT; 2104 rval = QLA_FUNCTION_TIMEOUT;
2106 } 2105 }
2107 if (rval == QLA_SUCCESS) 2106 if (rval == QLA_SUCCESS)
2108 goto next_test; 2107 goto next_test;
2109 2108
2110 WRT_REG_DWORD(&reg->iobase_window, 0x0003); 2109 WRT_REG_DWORD(&reg->iobase_window, 0x0003);
2111 for (cnt = 100; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 && 2110 for (cnt = 100; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
2112 rval == QLA_SUCCESS; cnt--) { 2111 rval == QLA_SUCCESS; cnt--) {
2113 if (cnt) { 2112 if (cnt) {
2114 WRT_REG_DWORD(&reg->iobase_window, 0x0003); 2113 WRT_REG_DWORD(&reg->iobase_window, 0x0003);
2115 udelay(10); 2114 udelay(10);
2116 } else 2115 } else
2117 rval = QLA_FUNCTION_TIMEOUT; 2116 rval = QLA_FUNCTION_TIMEOUT;
2118 } 2117 }
2119 if (rval != QLA_SUCCESS) 2118 if (rval != QLA_SUCCESS)
2120 goto done; 2119 goto done;
2121 2120
2122 next_test: 2121 next_test:
2123 if (RD_REG_DWORD(&reg->iobase_c8) & BIT_3) 2122 if (RD_REG_DWORD(&reg->iobase_c8) & BIT_3)
2124 ql_log(ql_log_info, vha, 0x504c, 2123 ql_log(ql_log_info, vha, 0x504c,
2125 "Additional code -- 0x55AA.\n"); 2124 "Additional code -- 0x55AA.\n");
2126 2125
2127 done: 2126 done:
2128 WRT_REG_DWORD(&reg->iobase_window, 0x0000); 2127 WRT_REG_DWORD(&reg->iobase_window, 0x0000);
2129 RD_REG_DWORD(&reg->iobase_window); 2128 RD_REG_DWORD(&reg->iobase_window);
2130 } 2129 }
2131 2130
2132 /** 2131 /**
2133 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx. 2132 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
2134 * @irq: 2133 * @irq:
2135 * @dev_id: SCSI driver HA context 2134 * @dev_id: SCSI driver HA context
2136 * 2135 *
2137 * Called by system whenever the host adapter generates an interrupt. 2136 * Called by system whenever the host adapter generates an interrupt.
2138 * 2137 *
2139 * Returns handled flag. 2138 * Returns handled flag.
2140 */ 2139 */
2141 irqreturn_t 2140 irqreturn_t
2142 qla24xx_intr_handler(int irq, void *dev_id) 2141 qla24xx_intr_handler(int irq, void *dev_id)
2143 { 2142 {
2144 scsi_qla_host_t *vha; 2143 scsi_qla_host_t *vha;
2145 struct qla_hw_data *ha; 2144 struct qla_hw_data *ha;
2146 struct device_reg_24xx __iomem *reg; 2145 struct device_reg_24xx __iomem *reg;
2147 int status; 2146 int status;
2148 unsigned long iter; 2147 unsigned long iter;
2149 uint32_t stat; 2148 uint32_t stat;
2150 uint32_t hccr; 2149 uint32_t hccr;
2151 uint16_t mb[4]; 2150 uint16_t mb[4];
2152 struct rsp_que *rsp; 2151 struct rsp_que *rsp;
2153 unsigned long flags; 2152 unsigned long flags;
2154 2153
2155 rsp = (struct rsp_que *) dev_id; 2154 rsp = (struct rsp_que *) dev_id;
2156 if (!rsp) { 2155 if (!rsp) {
2157 printk(KERN_INFO 2156 printk(KERN_INFO
2158 "%s(): NULL response queue pointer.\n", __func__); 2157 "%s(): NULL response queue pointer.\n", __func__);
2159 return IRQ_NONE; 2158 return IRQ_NONE;
2160 } 2159 }
2161 2160
2162 ha = rsp->hw; 2161 ha = rsp->hw;
2163 reg = &ha->iobase->isp24; 2162 reg = &ha->iobase->isp24;
2164 status = 0; 2163 status = 0;
2165 2164
2166 if (unlikely(pci_channel_offline(ha->pdev))) 2165 if (unlikely(pci_channel_offline(ha->pdev)))
2167 return IRQ_HANDLED; 2166 return IRQ_HANDLED;
2168 2167
2169 spin_lock_irqsave(&ha->hardware_lock, flags); 2168 spin_lock_irqsave(&ha->hardware_lock, flags);
2170 vha = pci_get_drvdata(ha->pdev); 2169 vha = pci_get_drvdata(ha->pdev);
2171 for (iter = 50; iter--; ) { 2170 for (iter = 50; iter--; ) {
2172 stat = RD_REG_DWORD(&reg->host_status); 2171 stat = RD_REG_DWORD(&reg->host_status);
2173 if (stat & HSRX_RISC_PAUSED) { 2172 if (stat & HSRX_RISC_PAUSED) {
2174 if (unlikely(pci_channel_offline(ha->pdev))) 2173 if (unlikely(pci_channel_offline(ha->pdev)))
2175 break; 2174 break;
2176 2175
2177 hccr = RD_REG_DWORD(&reg->hccr); 2176 hccr = RD_REG_DWORD(&reg->hccr);
2178 2177
2179 ql_log(ql_log_warn, vha, 0x504b, 2178 ql_log(ql_log_warn, vha, 0x504b,
2180 "RISC paused -- HCCR=%x, Dumping firmware.\n", 2179 "RISC paused -- HCCR=%x, Dumping firmware.\n",
2181 hccr); 2180 hccr);
2182 2181
2183 qla2xxx_check_risc_status(vha); 2182 qla2xxx_check_risc_status(vha);
2184 2183
2185 ha->isp_ops->fw_dump(vha, 1); 2184 ha->isp_ops->fw_dump(vha, 1);
2186 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2185 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2187 break; 2186 break;
2188 } else if ((stat & HSRX_RISC_INT) == 0) 2187 } else if ((stat & HSRX_RISC_INT) == 0)
2189 break; 2188 break;
2190 2189
2191 switch (stat & 0xff) { 2190 switch (stat & 0xff) {
2192 case 0x1: 2191 case 0x1:
2193 case 0x2: 2192 case 0x2:
2194 case 0x10: 2193 case 0x10:
2195 case 0x11: 2194 case 0x11:
2196 qla24xx_mbx_completion(vha, MSW(stat)); 2195 qla24xx_mbx_completion(vha, MSW(stat));
2197 status |= MBX_INTERRUPT; 2196 status |= MBX_INTERRUPT;
2198 2197
2199 break; 2198 break;
2200 case 0x12: 2199 case 0x12:
2201 mb[0] = MSW(stat); 2200 mb[0] = MSW(stat);
2202 mb[1] = RD_REG_WORD(&reg->mailbox1); 2201 mb[1] = RD_REG_WORD(&reg->mailbox1);
2203 mb[2] = RD_REG_WORD(&reg->mailbox2); 2202 mb[2] = RD_REG_WORD(&reg->mailbox2);
2204 mb[3] = RD_REG_WORD(&reg->mailbox3); 2203 mb[3] = RD_REG_WORD(&reg->mailbox3);
2205 qla2x00_async_event(vha, rsp, mb); 2204 qla2x00_async_event(vha, rsp, mb);
2206 break; 2205 break;
2207 case 0x13: 2206 case 0x13:
2208 case 0x14: 2207 case 0x14:
2209 qla24xx_process_response_queue(vha, rsp); 2208 qla24xx_process_response_queue(vha, rsp);
2210 break; 2209 break;
2211 default: 2210 default:
2212 ql_dbg(ql_dbg_async, vha, 0x504f, 2211 ql_dbg(ql_dbg_async, vha, 0x504f,
2213 "Unrecognized interrupt type (%d).\n", stat * 0xff); 2212 "Unrecognized interrupt type (%d).\n", stat * 0xff);
2214 break; 2213 break;
2215 } 2214 }
2216 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); 2215 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2217 RD_REG_DWORD_RELAXED(&reg->hccr); 2216 RD_REG_DWORD_RELAXED(&reg->hccr);
2218 } 2217 }
2219 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2218 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2220 2219
2221 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 2220 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2222 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 2221 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
2223 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 2222 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
2224 complete(&ha->mbx_intr_comp); 2223 complete(&ha->mbx_intr_comp);
2225 } 2224 }
2226 2225
2227 return IRQ_HANDLED; 2226 return IRQ_HANDLED;
2228 } 2227 }
2229 2228
2230 static irqreturn_t 2229 static irqreturn_t
2231 qla24xx_msix_rsp_q(int irq, void *dev_id) 2230 qla24xx_msix_rsp_q(int irq, void *dev_id)
2232 { 2231 {
2233 struct qla_hw_data *ha; 2232 struct qla_hw_data *ha;
2234 struct rsp_que *rsp; 2233 struct rsp_que *rsp;
2235 struct device_reg_24xx __iomem *reg; 2234 struct device_reg_24xx __iomem *reg;
2236 struct scsi_qla_host *vha; 2235 struct scsi_qla_host *vha;
2237 unsigned long flags; 2236 unsigned long flags;
2238 2237
2239 rsp = (struct rsp_que *) dev_id; 2238 rsp = (struct rsp_que *) dev_id;
2240 if (!rsp) { 2239 if (!rsp) {
2241 printk(KERN_INFO 2240 printk(KERN_INFO
2242 "%s(): NULL response queue pointer.\n", __func__); 2241 "%s(): NULL response queue pointer.\n", __func__);
2243 return IRQ_NONE; 2242 return IRQ_NONE;
2244 } 2243 }
2245 ha = rsp->hw; 2244 ha = rsp->hw;
2246 reg = &ha->iobase->isp24; 2245 reg = &ha->iobase->isp24;
2247 2246
2248 spin_lock_irqsave(&ha->hardware_lock, flags); 2247 spin_lock_irqsave(&ha->hardware_lock, flags);
2249 2248
2250 vha = pci_get_drvdata(ha->pdev); 2249 vha = pci_get_drvdata(ha->pdev);
2251 qla24xx_process_response_queue(vha, rsp); 2250 qla24xx_process_response_queue(vha, rsp);
2252 if (!ha->flags.disable_msix_handshake) { 2251 if (!ha->flags.disable_msix_handshake) {
2253 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); 2252 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2254 RD_REG_DWORD_RELAXED(&reg->hccr); 2253 RD_REG_DWORD_RELAXED(&reg->hccr);
2255 } 2254 }
2256 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2255 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2257 2256
2258 return IRQ_HANDLED; 2257 return IRQ_HANDLED;
2259 } 2258 }
2260 2259
2261 static irqreturn_t 2260 static irqreturn_t
2262 qla25xx_msix_rsp_q(int irq, void *dev_id) 2261 qla25xx_msix_rsp_q(int irq, void *dev_id)
2263 { 2262 {
2264 struct qla_hw_data *ha; 2263 struct qla_hw_data *ha;
2265 struct rsp_que *rsp; 2264 struct rsp_que *rsp;
2266 struct device_reg_24xx __iomem *reg; 2265 struct device_reg_24xx __iomem *reg;
2267 unsigned long flags; 2266 unsigned long flags;
2268 2267
2269 rsp = (struct rsp_que *) dev_id; 2268 rsp = (struct rsp_que *) dev_id;
2270 if (!rsp) { 2269 if (!rsp) {
2271 printk(KERN_INFO 2270 printk(KERN_INFO
2272 "%s(): NULL response queue pointer.\n", __func__); 2271 "%s(): NULL response queue pointer.\n", __func__);
2273 return IRQ_NONE; 2272 return IRQ_NONE;
2274 } 2273 }
2275 ha = rsp->hw; 2274 ha = rsp->hw;
2276 2275
2277 /* Clear the interrupt, if enabled, for this response queue */ 2276 /* Clear the interrupt, if enabled, for this response queue */
2278 if (rsp->options & ~BIT_6) { 2277 if (rsp->options & ~BIT_6) {
2279 reg = &ha->iobase->isp24; 2278 reg = &ha->iobase->isp24;
2280 spin_lock_irqsave(&ha->hardware_lock, flags); 2279 spin_lock_irqsave(&ha->hardware_lock, flags);
2281 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); 2280 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2282 RD_REG_DWORD_RELAXED(&reg->hccr); 2281 RD_REG_DWORD_RELAXED(&reg->hccr);
2283 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2282 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2284 } 2283 }
2285 queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work); 2284 queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
2286 2285
2287 return IRQ_HANDLED; 2286 return IRQ_HANDLED;
2288 } 2287 }
2289 2288
2290 static irqreturn_t 2289 static irqreturn_t
2291 qla24xx_msix_default(int irq, void *dev_id) 2290 qla24xx_msix_default(int irq, void *dev_id)
2292 { 2291 {
2293 scsi_qla_host_t *vha; 2292 scsi_qla_host_t *vha;
2294 struct qla_hw_data *ha; 2293 struct qla_hw_data *ha;
2295 struct rsp_que *rsp; 2294 struct rsp_que *rsp;
2296 struct device_reg_24xx __iomem *reg; 2295 struct device_reg_24xx __iomem *reg;
2297 int status; 2296 int status;
2298 uint32_t stat; 2297 uint32_t stat;
2299 uint32_t hccr; 2298 uint32_t hccr;
2300 uint16_t mb[4]; 2299 uint16_t mb[4];
2301 unsigned long flags; 2300 unsigned long flags;
2302 2301
2303 rsp = (struct rsp_que *) dev_id; 2302 rsp = (struct rsp_que *) dev_id;
2304 if (!rsp) { 2303 if (!rsp) {
2305 printk(KERN_INFO 2304 printk(KERN_INFO
2306 "%s(): NULL response queue pointer.\n", __func__); 2305 "%s(): NULL response queue pointer.\n", __func__);
2307 return IRQ_NONE; 2306 return IRQ_NONE;
2308 } 2307 }
2309 ha = rsp->hw; 2308 ha = rsp->hw;
2310 reg = &ha->iobase->isp24; 2309 reg = &ha->iobase->isp24;
2311 status = 0; 2310 status = 0;
2312 2311
2313 spin_lock_irqsave(&ha->hardware_lock, flags); 2312 spin_lock_irqsave(&ha->hardware_lock, flags);
2314 vha = pci_get_drvdata(ha->pdev); 2313 vha = pci_get_drvdata(ha->pdev);
2315 do { 2314 do {
2316 stat = RD_REG_DWORD(&reg->host_status); 2315 stat = RD_REG_DWORD(&reg->host_status);
2317 if (stat & HSRX_RISC_PAUSED) { 2316 if (stat & HSRX_RISC_PAUSED) {
2318 if (unlikely(pci_channel_offline(ha->pdev))) 2317 if (unlikely(pci_channel_offline(ha->pdev)))
2319 break; 2318 break;
2320 2319
2321 hccr = RD_REG_DWORD(&reg->hccr); 2320 hccr = RD_REG_DWORD(&reg->hccr);
2322 2321
2323 ql_log(ql_log_info, vha, 0x5050, 2322 ql_log(ql_log_info, vha, 0x5050,
2324 "RISC paused -- HCCR=%x, Dumping firmware.\n", 2323 "RISC paused -- HCCR=%x, Dumping firmware.\n",
2325 hccr); 2324 hccr);
2326 2325
2327 qla2xxx_check_risc_status(vha); 2326 qla2xxx_check_risc_status(vha);
2328 2327
2329 ha->isp_ops->fw_dump(vha, 1); 2328 ha->isp_ops->fw_dump(vha, 1);
2330 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2329 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2331 break; 2330 break;
2332 } else if ((stat & HSRX_RISC_INT) == 0) 2331 } else if ((stat & HSRX_RISC_INT) == 0)
2333 break; 2332 break;
2334 2333
2335 switch (stat & 0xff) { 2334 switch (stat & 0xff) {
2336 case 0x1: 2335 case 0x1:
2337 case 0x2: 2336 case 0x2:
2338 case 0x10: 2337 case 0x10:
2339 case 0x11: 2338 case 0x11:
2340 qla24xx_mbx_completion(vha, MSW(stat)); 2339 qla24xx_mbx_completion(vha, MSW(stat));
2341 status |= MBX_INTERRUPT; 2340 status |= MBX_INTERRUPT;
2342 2341
2343 break; 2342 break;
2344 case 0x12: 2343 case 0x12:
2345 mb[0] = MSW(stat); 2344 mb[0] = MSW(stat);
2346 mb[1] = RD_REG_WORD(&reg->mailbox1); 2345 mb[1] = RD_REG_WORD(&reg->mailbox1);
2347 mb[2] = RD_REG_WORD(&reg->mailbox2); 2346 mb[2] = RD_REG_WORD(&reg->mailbox2);
2348 mb[3] = RD_REG_WORD(&reg->mailbox3); 2347 mb[3] = RD_REG_WORD(&reg->mailbox3);
2349 qla2x00_async_event(vha, rsp, mb); 2348 qla2x00_async_event(vha, rsp, mb);
2350 break; 2349 break;
2351 case 0x13: 2350 case 0x13:
2352 case 0x14: 2351 case 0x14:
2353 qla24xx_process_response_queue(vha, rsp); 2352 qla24xx_process_response_queue(vha, rsp);
2354 break; 2353 break;
2355 default: 2354 default:
2356 ql_dbg(ql_dbg_async, vha, 0x5051, 2355 ql_dbg(ql_dbg_async, vha, 0x5051,
2357 "Unrecognized interrupt type (%d).\n", stat & 0xff); 2356 "Unrecognized interrupt type (%d).\n", stat & 0xff);
2358 break; 2357 break;
2359 } 2358 }
2360 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); 2359 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2361 } while (0); 2360 } while (0);
2362 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2361 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2363 2362
2364 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 2363 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2365 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 2364 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
2366 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 2365 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
2367 complete(&ha->mbx_intr_comp); 2366 complete(&ha->mbx_intr_comp);
2368 } 2367 }
2369 return IRQ_HANDLED; 2368 return IRQ_HANDLED;
2370 } 2369 }
2371 2370
2372 /* Interrupt handling helpers. */ 2371 /* Interrupt handling helpers. */
2373 2372
2374 struct qla_init_msix_entry { 2373 struct qla_init_msix_entry {
2375 const char *name; 2374 const char *name;
2376 irq_handler_t handler; 2375 irq_handler_t handler;
2377 }; 2376 };
2378 2377
2379 static struct qla_init_msix_entry msix_entries[3] = { 2378 static struct qla_init_msix_entry msix_entries[3] = {
2380 { "qla2xxx (default)", qla24xx_msix_default }, 2379 { "qla2xxx (default)", qla24xx_msix_default },
2381 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q }, 2380 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
2382 { "qla2xxx (multiq)", qla25xx_msix_rsp_q }, 2381 { "qla2xxx (multiq)", qla25xx_msix_rsp_q },
2383 }; 2382 };
2384 2383
2385 static struct qla_init_msix_entry qla82xx_msix_entries[2] = { 2384 static struct qla_init_msix_entry qla82xx_msix_entries[2] = {
2386 { "qla2xxx (default)", qla82xx_msix_default }, 2385 { "qla2xxx (default)", qla82xx_msix_default },
2387 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q }, 2386 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
2388 }; 2387 };
2389 2388
2390 static void 2389 static void
2391 qla24xx_disable_msix(struct qla_hw_data *ha) 2390 qla24xx_disable_msix(struct qla_hw_data *ha)
2392 { 2391 {
2393 int i; 2392 int i;
2394 struct qla_msix_entry *qentry; 2393 struct qla_msix_entry *qentry;
2395 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 2394 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2396 2395
2397 for (i = 0; i < ha->msix_count; i++) { 2396 for (i = 0; i < ha->msix_count; i++) {
2398 qentry = &ha->msix_entries[i]; 2397 qentry = &ha->msix_entries[i];
2399 if (qentry->have_irq) 2398 if (qentry->have_irq)
2400 free_irq(qentry->vector, qentry->rsp); 2399 free_irq(qentry->vector, qentry->rsp);
2401 } 2400 }
2402 pci_disable_msix(ha->pdev); 2401 pci_disable_msix(ha->pdev);
2403 kfree(ha->msix_entries); 2402 kfree(ha->msix_entries);
2404 ha->msix_entries = NULL; 2403 ha->msix_entries = NULL;
2405 ha->flags.msix_enabled = 0; 2404 ha->flags.msix_enabled = 0;
2406 ql_dbg(ql_dbg_init, vha, 0x0042, 2405 ql_dbg(ql_dbg_init, vha, 0x0042,
2407 "Disabled the MSI.\n"); 2406 "Disabled the MSI.\n");
2408 } 2407 }
2409 2408
2410 static int 2409 static int
2411 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) 2410 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
2412 { 2411 {
2413 #define MIN_MSIX_COUNT 2 2412 #define MIN_MSIX_COUNT 2
2414 int i, ret; 2413 int i, ret;
2415 struct msix_entry *entries; 2414 struct msix_entry *entries;
2416 struct qla_msix_entry *qentry; 2415 struct qla_msix_entry *qentry;
2417 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 2416 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2418 2417
2419 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count, 2418 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
2420 GFP_KERNEL); 2419 GFP_KERNEL);
2421 if (!entries) { 2420 if (!entries) {
2422 ql_log(ql_log_warn, vha, 0x00bc, 2421 ql_log(ql_log_warn, vha, 0x00bc,
2423 "Failed to allocate memory for msix_entry.\n"); 2422 "Failed to allocate memory for msix_entry.\n");
2424 return -ENOMEM; 2423 return -ENOMEM;
2425 } 2424 }
2426 2425
2427 for (i = 0; i < ha->msix_count; i++) 2426 for (i = 0; i < ha->msix_count; i++)
2428 entries[i].entry = i; 2427 entries[i].entry = i;
2429 2428
2430 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count); 2429 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
2431 if (ret) { 2430 if (ret) {
2432 if (ret < MIN_MSIX_COUNT) 2431 if (ret < MIN_MSIX_COUNT)
2433 goto msix_failed; 2432 goto msix_failed;
2434 2433
2435 ql_log(ql_log_warn, vha, 0x00c6, 2434 ql_log(ql_log_warn, vha, 0x00c6,
2436 "MSI-X: Failed to enable support " 2435 "MSI-X: Failed to enable support "
2437 "-- %d/%d\n Retry with %d vectors.\n", 2436 "-- %d/%d\n Retry with %d vectors.\n",
2438 ha->msix_count, ret, ret); 2437 ha->msix_count, ret, ret);
2439 ha->msix_count = ret; 2438 ha->msix_count = ret;
2440 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count); 2439 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
2441 if (ret) { 2440 if (ret) {
2442 msix_failed: 2441 msix_failed:
2443 ql_log(ql_log_fatal, vha, 0x00c7, 2442 ql_log(ql_log_fatal, vha, 0x00c7,
2444 "MSI-X: Failed to enable support, " 2443 "MSI-X: Failed to enable support, "
2445 "giving up -- %d/%d.\n", 2444 "giving up -- %d/%d.\n",
2446 ha->msix_count, ret); 2445 ha->msix_count, ret);
2447 goto msix_out; 2446 goto msix_out;
2448 } 2447 }
2449 ha->max_rsp_queues = ha->msix_count - 1; 2448 ha->max_rsp_queues = ha->msix_count - 1;
2450 } 2449 }
2451 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) * 2450 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
2452 ha->msix_count, GFP_KERNEL); 2451 ha->msix_count, GFP_KERNEL);
2453 if (!ha->msix_entries) { 2452 if (!ha->msix_entries) {
2454 ql_log(ql_log_fatal, vha, 0x00c8, 2453 ql_log(ql_log_fatal, vha, 0x00c8,
2455 "Failed to allocate memory for ha->msix_entries.\n"); 2454 "Failed to allocate memory for ha->msix_entries.\n");
2456 ret = -ENOMEM; 2455 ret = -ENOMEM;
2457 goto msix_out; 2456 goto msix_out;
2458 } 2457 }
2459 ha->flags.msix_enabled = 1; 2458 ha->flags.msix_enabled = 1;
2460 2459
2461 for (i = 0; i < ha->msix_count; i++) { 2460 for (i = 0; i < ha->msix_count; i++) {
2462 qentry = &ha->msix_entries[i]; 2461 qentry = &ha->msix_entries[i];
2463 qentry->vector = entries[i].vector; 2462 qentry->vector = entries[i].vector;
2464 qentry->entry = entries[i].entry; 2463 qentry->entry = entries[i].entry;
2465 qentry->have_irq = 0; 2464 qentry->have_irq = 0;
2466 qentry->rsp = NULL; 2465 qentry->rsp = NULL;
2467 } 2466 }
2468 2467
2469 /* Enable MSI-X vectors for the base queue */ 2468 /* Enable MSI-X vectors for the base queue */
2470 for (i = 0; i < 2; i++) { 2469 for (i = 0; i < 2; i++) {
2471 qentry = &ha->msix_entries[i]; 2470 qentry = &ha->msix_entries[i];
2472 if (IS_QLA82XX(ha)) { 2471 if (IS_QLA82XX(ha)) {
2473 ret = request_irq(qentry->vector, 2472 ret = request_irq(qentry->vector,
2474 qla82xx_msix_entries[i].handler, 2473 qla82xx_msix_entries[i].handler,
2475 0, qla82xx_msix_entries[i].name, rsp); 2474 0, qla82xx_msix_entries[i].name, rsp);
2476 } else { 2475 } else {
2477 ret = request_irq(qentry->vector, 2476 ret = request_irq(qentry->vector,
2478 msix_entries[i].handler, 2477 msix_entries[i].handler,
2479 0, msix_entries[i].name, rsp); 2478 0, msix_entries[i].name, rsp);
2480 } 2479 }
2481 if (ret) { 2480 if (ret) {
2482 ql_log(ql_log_fatal, vha, 0x00cb, 2481 ql_log(ql_log_fatal, vha, 0x00cb,
2483 "MSI-X: unable to register handler -- %x/%d.\n", 2482 "MSI-X: unable to register handler -- %x/%d.\n",
2484 qentry->vector, ret); 2483 qentry->vector, ret);
2485 qla24xx_disable_msix(ha); 2484 qla24xx_disable_msix(ha);
2486 ha->mqenable = 0; 2485 ha->mqenable = 0;
2487 goto msix_out; 2486 goto msix_out;
2488 } 2487 }
2489 qentry->have_irq = 1; 2488 qentry->have_irq = 1;
2490 qentry->rsp = rsp; 2489 qentry->rsp = rsp;
2491 rsp->msix = qentry; 2490 rsp->msix = qentry;
2492 } 2491 }
2493 2492
2494 /* Enable MSI-X vector for response queue update for queue 0 */ 2493 /* Enable MSI-X vector for response queue update for queue 0 */
2495 if (ha->mqiobase && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1)) 2494 if (ha->mqiobase && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
2496 ha->mqenable = 1; 2495 ha->mqenable = 1;
2497 ql_dbg(ql_dbg_multiq, vha, 0xc005, 2496 ql_dbg(ql_dbg_multiq, vha, 0xc005,
2498 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", 2497 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
2499 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); 2498 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
2500 ql_dbg(ql_dbg_init, vha, 0x0055, 2499 ql_dbg(ql_dbg_init, vha, 0x0055,
2501 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", 2500 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
2502 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); 2501 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
2503 2502
2504 msix_out: 2503 msix_out:
2505 kfree(entries); 2504 kfree(entries);
2506 return ret; 2505 return ret;
2507 } 2506 }
2508 2507
2509 int 2508 int
2510 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp) 2509 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
2511 { 2510 {
2512 int ret; 2511 int ret;
2513 device_reg_t __iomem *reg = ha->iobase; 2512 device_reg_t __iomem *reg = ha->iobase;
2514 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 2513 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2515 2514
2516 /* If possible, enable MSI-X. */ 2515 /* If possible, enable MSI-X. */
2517 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && 2516 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
2518 !IS_QLA8432(ha) && !IS_QLA8XXX_TYPE(ha)) 2517 !IS_QLA8432(ha) && !IS_QLA8XXX_TYPE(ha))
2519 goto skip_msi; 2518 goto skip_msi;
2520 2519
2521 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP && 2520 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
2522 (ha->pdev->subsystem_device == 0x7040 || 2521 (ha->pdev->subsystem_device == 0x7040 ||
2523 ha->pdev->subsystem_device == 0x7041 || 2522 ha->pdev->subsystem_device == 0x7041 ||
2524 ha->pdev->subsystem_device == 0x1705)) { 2523 ha->pdev->subsystem_device == 0x1705)) {
2525 ql_log(ql_log_warn, vha, 0x0034, 2524 ql_log(ql_log_warn, vha, 0x0034,
2526 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n", 2525 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n",
2527 ha->pdev->subsystem_vendor, 2526 ha->pdev->subsystem_vendor,
2528 ha->pdev->subsystem_device); 2527 ha->pdev->subsystem_device);
2529 goto skip_msi; 2528 goto skip_msi;
2530 } 2529 }
2531 2530
2532 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) { 2531 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) {
2533 ql_log(ql_log_warn, vha, 0x0035, 2532 ql_log(ql_log_warn, vha, 0x0035,
2534 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n", 2533 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
2535 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX); 2534 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX);
2536 goto skip_msix; 2535 goto skip_msix;
2537 } 2536 }
2538 2537
2539 ret = qla24xx_enable_msix(ha, rsp); 2538 ret = qla24xx_enable_msix(ha, rsp);
2540 if (!ret) { 2539 if (!ret) {
2541 ql_dbg(ql_dbg_init, vha, 0x0036, 2540 ql_dbg(ql_dbg_init, vha, 0x0036,
2542 "MSI-X: Enabled (0x%X, 0x%X).\n", 2541 "MSI-X: Enabled (0x%X, 0x%X).\n",
2543 ha->chip_revision, ha->fw_attributes); 2542 ha->chip_revision, ha->fw_attributes);
2544 goto clear_risc_ints; 2543 goto clear_risc_ints;
2545 } 2544 }
2546 ql_log(ql_log_info, vha, 0x0037, 2545 ql_log(ql_log_info, vha, 0x0037,
2547 "MSI-X Falling back-to MSI mode -%d.\n", ret); 2546 "MSI-X Falling back-to MSI mode -%d.\n", ret);
2548 skip_msix: 2547 skip_msix:
2549 2548
2550 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && 2549 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
2551 !IS_QLA8001(ha)) 2550 !IS_QLA8001(ha))
2552 goto skip_msi; 2551 goto skip_msi;
2553 2552
2554 ret = pci_enable_msi(ha->pdev); 2553 ret = pci_enable_msi(ha->pdev);
2555 if (!ret) { 2554 if (!ret) {
2556 ql_dbg(ql_dbg_init, vha, 0x0038, 2555 ql_dbg(ql_dbg_init, vha, 0x0038,
2557 "MSI: Enabled.\n"); 2556 "MSI: Enabled.\n");
2558 ha->flags.msi_enabled = 1; 2557 ha->flags.msi_enabled = 1;
2559 } else 2558 } else
2560 ql_log(ql_log_warn, vha, 0x0039, 2559 ql_log(ql_log_warn, vha, 0x0039,
2561 "MSI-X; Falling back-to INTa mode -- %d.\n", ret); 2560 "MSI-X; Falling back-to INTa mode -- %d.\n", ret);
2562 skip_msi: 2561 skip_msi:
2563 2562
2564 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, 2563 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
2565 ha->flags.msi_enabled ? 0 : IRQF_SHARED, 2564 ha->flags.msi_enabled ? 0 : IRQF_SHARED,
2566 QLA2XXX_DRIVER_NAME, rsp); 2565 QLA2XXX_DRIVER_NAME, rsp);
2567 if (ret) { 2566 if (ret) {
2568 ql_log(ql_log_warn, vha, 0x003a, 2567 ql_log(ql_log_warn, vha, 0x003a,
2569 "Failed to reserve interrupt %d already in use.\n", 2568 "Failed to reserve interrupt %d already in use.\n",
2570 ha->pdev->irq); 2569 ha->pdev->irq);
2571 goto fail; 2570 goto fail;
2572 } 2571 }
2573 2572
2574 clear_risc_ints: 2573 clear_risc_ints:
2575 2574
2576 /* 2575 /*
2577 * FIXME: Noted that 8014s were being dropped during NK testing. 2576 * FIXME: Noted that 8014s were being dropped during NK testing.
2578 * Timing deltas during MSI-X/INTa transitions? 2577 * Timing deltas during MSI-X/INTa transitions?
2579 */ 2578 */
2580 if (IS_QLA81XX(ha) || IS_QLA82XX(ha)) 2579 if (IS_QLA81XX(ha) || IS_QLA82XX(ha))
2581 goto fail; 2580 goto fail;
2582 spin_lock_irq(&ha->hardware_lock); 2581 spin_lock_irq(&ha->hardware_lock);
2583 if (IS_FWI2_CAPABLE(ha)) { 2582 if (IS_FWI2_CAPABLE(ha)) {
2584 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT); 2583 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT);
2585 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_RISC_INT); 2584 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_RISC_INT);
2586 } else { 2585 } else {
2587 WRT_REG_WORD(&reg->isp.semaphore, 0); 2586 WRT_REG_WORD(&reg->isp.semaphore, 0);
2588 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_RISC_INT); 2587 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_RISC_INT);
2589 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_HOST_INT); 2588 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_HOST_INT);
2590 } 2589 }
2591 spin_unlock_irq(&ha->hardware_lock); 2590 spin_unlock_irq(&ha->hardware_lock);
2592 2591
2593 fail: 2592 fail:
2594 return ret; 2593 return ret;
2595 } 2594 }
2596 2595
2597 void 2596 void
2598 qla2x00_free_irqs(scsi_qla_host_t *vha) 2597 qla2x00_free_irqs(scsi_qla_host_t *vha)
2599 { 2598 {
2600 struct qla_hw_data *ha = vha->hw; 2599 struct qla_hw_data *ha = vha->hw;
2601 struct rsp_que *rsp = ha->rsp_q_map[0]; 2600 struct rsp_que *rsp = ha->rsp_q_map[0];
2602 2601
2603 if (ha->flags.msix_enabled) 2602 if (ha->flags.msix_enabled)
2604 qla24xx_disable_msix(ha); 2603 qla24xx_disable_msix(ha);
2605 else if (ha->flags.msi_enabled) { 2604 else if (ha->flags.msi_enabled) {
2606 free_irq(ha->pdev->irq, rsp); 2605 free_irq(ha->pdev->irq, rsp);
2607 pci_disable_msi(ha->pdev); 2606 pci_disable_msi(ha->pdev);
2608 } else 2607 } else
2609 free_irq(ha->pdev->irq, rsp); 2608 free_irq(ha->pdev->irq, rsp);
2610 } 2609 }
2611 2610
2612 2611
2613 int qla25xx_request_irq(struct rsp_que *rsp) 2612 int qla25xx_request_irq(struct rsp_que *rsp)
2614 { 2613 {
2615 struct qla_hw_data *ha = rsp->hw; 2614 struct qla_hw_data *ha = rsp->hw;
2616 struct qla_init_msix_entry *intr = &msix_entries[2]; 2615 struct qla_init_msix_entry *intr = &msix_entries[2];
2617 struct qla_msix_entry *msix = rsp->msix; 2616 struct qla_msix_entry *msix = rsp->msix;
2618 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 2617 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2619 int ret; 2618 int ret;
2620 2619
2621 ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp); 2620 ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp);
2622 if (ret) { 2621 if (ret) {
2623 ql_log(ql_log_fatal, vha, 0x00e6, 2622 ql_log(ql_log_fatal, vha, 0x00e6,
2624 "MSI-X: Unable to register handler -- %x/%d.\n", 2623 "MSI-X: Unable to register handler -- %x/%d.\n",
2625 msix->vector, ret); 2624 msix->vector, ret);
2626 return ret; 2625 return ret;
2627 } 2626 }
2628 msix->have_irq = 1; 2627 msix->have_irq = 1;
2629 msix->rsp = rsp; 2628 msix->rsp = rsp;
2630 return ret; 2629 return ret;
2631 } 2630 }
2632 2631